aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/Kconfig16
-rw-r--r--drivers/staging/Makefile7
-rw-r--r--drivers/staging/apf/Kconfig20
-rw-r--r--drivers/staging/apf/Makefile9
-rw-r--r--drivers/staging/apf/dt-binding.txt17
-rw-r--r--drivers/staging/apf/xilinx-dma-apf.c1232
-rw-r--r--drivers/staging/apf/xilinx-dma-apf.h234
-rw-r--r--drivers/staging/apf/xlnk-eng.c242
-rw-r--r--drivers/staging/apf/xlnk-eng.h33
-rw-r--r--drivers/staging/apf/xlnk-ioctl.h37
-rw-r--r--drivers/staging/apf/xlnk-sysdef.h34
-rw-r--r--drivers/staging/apf/xlnk.c1580
-rw-r--r--drivers/staging/apf/xlnk.h175
-rw-r--r--drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c398
-rw-r--r--drivers/staging/clocking-wizard/dt-binding.txt4
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1760.c2
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c30
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c35
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c7
-rw-r--r--drivers/staging/fclk/Kconfig9
-rw-r--r--drivers/staging/fclk/Makefile1
-rw-r--r--drivers/staging/fclk/TODO2
-rw-r--r--drivers/staging/fclk/dt-binding.txt16
-rw-r--r--drivers/staging/fclk/xilinx_fclk.c125
-rw-r--r--drivers/staging/greybus/light.c8
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c2
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c2
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c6
-rw-r--r--drivers/staging/media/imx/imx-media-csc-scaler.c1
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c16
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c4
-rw-r--r--drivers/staging/most/dim2/dim2.c2
-rw-r--r--drivers/staging/octeon/ethernet-tx.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c39
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c4
-rw-r--r--drivers/staging/rtl8192u/r8192U.h2
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.c38
-rw-r--r--drivers/staging/rtl8192u/r8192U_dm.h2
-rw-r--r--drivers/staging/rtl8712/os_intfs.c1
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c36
-rw-r--r--drivers/staging/rtl8712/usb_intf.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c16
-rw-r--r--drivers/staging/speakup/main.c4
-rw-r--r--drivers/staging/speakup/synth.c4
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c40
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h2
-rw-r--r--drivers/staging/vt6655/device_main.c8
-rw-r--r--drivers/staging/wilc1000/wilc_hif.c40
-rw-r--r--drivers/staging/wilc1000/wilc_netdev.c7
-rw-r--r--drivers/staging/wilc1000/wilc_sdio.c1
-rw-r--r--drivers/staging/xlnx_ctrl_driver/Kconfig15
-rw-r--r--drivers/staging/xlnx_ctrl_driver/MAINTAINERS4
-rw-r--r--drivers/staging/xlnx_ctrl_driver/Makefile2
-rw-r--r--drivers/staging/xlnx_ctrl_driver/xlnx_frmb.c290
-rw-r--r--drivers/staging/xlnx_ctrl_driver/xlnx_vpss.c595
-rw-r--r--drivers/staging/xlnx_ernic/Kconfig4
-rw-r--r--drivers/staging/xlnx_ernic/MAINTAINERS4
-rw-r--r--drivers/staging/xlnx_ernic/Makefile7
-rw-r--r--drivers/staging/xlnx_ernic/dt-binding.txt29
-rw-r--r--drivers/staging/xlnx_ernic/xcm.c1962
-rw-r--r--drivers/staging/xlnx_ernic/xcm.h170
-rw-r--r--drivers/staging/xlnx_ernic/xcommon.h73
-rw-r--r--drivers/staging/xlnx_ernic/xernic_bw_test.c482
-rw-r--r--drivers/staging/xlnx_ernic/xhw_config.h26
-rw-r--r--drivers/staging/xlnx_ernic/xhw_def.h641
-rw-r--r--drivers/staging/xlnx_ernic/xif.h239
-rw-r--r--drivers/staging/xlnx_ernic/xioctl.h24
-rw-r--r--drivers/staging/xlnx_ernic/xmain.c1592
-rw-r--r--drivers/staging/xlnx_ernic/xmain.h33
-rw-r--r--drivers/staging/xlnx_ernic/xmr.c413
-rw-r--r--drivers/staging/xlnx_ernic/xmr.h68
-rw-r--r--drivers/staging/xlnx_ernic/xperftest.h33
-rw-r--r--drivers/staging/xlnx_ernic/xqp.c1310
-rw-r--r--drivers/staging/xlnx_ernic/xqp.h114
-rw-r--r--drivers/staging/xlnx_ernic/xrocev2.h409
-rw-r--r--drivers/staging/xlnx_tsmux/Kconfig11
-rw-r--r--drivers/staging/xlnx_tsmux/MAINTAINERS4
-rw-r--r--drivers/staging/xlnx_tsmux/Makefile1
-rw-r--r--drivers/staging/xlnx_tsmux/dt-binding.txt28
-rw-r--r--drivers/staging/xlnx_tsmux/xlnx_mpg2tsmux.c1510
-rw-r--r--drivers/staging/xlnxsync/Kconfig11
-rw-r--r--drivers/staging/xlnxsync/MAINTAINERS4
-rw-r--r--drivers/staging/xlnxsync/Makefile1
-rw-r--r--drivers/staging/xlnxsync/dt-binding.txt34
-rw-r--r--drivers/staging/xlnxsync/xlnxsync.c1290
-rw-r--r--drivers/staging/xroeframer/Kconfig18
-rw-r--r--drivers/staging/xroeframer/Makefile12
-rw-r--r--drivers/staging/xroeframer/README47
-rw-r--r--drivers/staging/xroeframer/roe_framer_ctrl.h1088
-rw-r--r--drivers/staging/xroeframer/sysfs_xroe.c562
-rw-r--r--drivers/staging/xroeframer/sysfs_xroe_framer_ipv4.c718
-rw-r--r--drivers/staging/xroeframer/sysfs_xroe_framer_ipv6.c571
-rw-r--r--drivers/staging/xroeframer/sysfs_xroe_framer_stats.c401
-rw-r--r--drivers/staging/xroeframer/sysfs_xroe_framer_udp.c181
-rw-r--r--drivers/staging/xroeframer/xroe_framer.c155
-rw-r--r--drivers/staging/xroeframer/xroe_framer.h63
-rw-r--r--drivers/staging/xroetrafficgen/Kconfig14
-rw-r--r--drivers/staging/xroetrafficgen/Makefile8
-rw-r--r--drivers/staging/xroetrafficgen/README19
-rw-r--r--drivers/staging/xroetrafficgen/roe_radio_ctrl.h183
-rw-r--r--drivers/staging/xroetrafficgen/xroe-traffic-gen-sysfs.c824
-rw-r--r--drivers/staging/xroetrafficgen/xroe-traffic-gen.c124
-rw-r--r--drivers/staging/xroetrafficgen/xroe-traffic-gen.h15
106 files changed, 20782 insertions, 212 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 6f1fa4c849a1..614359bc16ab 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -78,6 +78,8 @@ source "drivers/staging/gs_fpgaboot/Kconfig"
source "drivers/staging/unisys/Kconfig"
+source "drivers/staging/apf/Kconfig"
+
source "drivers/staging/clocking-wizard/Kconfig"
source "drivers/staging/fbtft/Kconfig"
@@ -96,6 +98,8 @@ source "drivers/staging/vc04_services/Kconfig"
source "drivers/staging/pi433/Kconfig"
+source "drivers/staging/fclk/Kconfig"
+
source "drivers/staging/mt7621-pci/Kconfig"
source "drivers/staging/mt7621-pci-phy/Kconfig"
@@ -125,4 +129,16 @@ source "drivers/staging/exfat/Kconfig"
source "drivers/staging/qlge/Kconfig"
+source "drivers/staging/xlnx_ctrl_driver/Kconfig"
+
+source "drivers/staging/xlnx_ernic/Kconfig"
+
+source "drivers/staging/xroeframer/Kconfig"
+
+source "drivers/staging/xroetrafficgen/Kconfig"
+
+source "drivers/staging/xlnxsync/Kconfig"
+
+source "drivers/staging/xlnx_tsmux/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index a90f9b308c8d..e3b3163a91a0 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -29,7 +29,9 @@ obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
obj-$(CONFIG_GOLDFISH) += goldfish/
obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/
obj-$(CONFIG_UNISYSSPAR) += unisys/
+obj-$(CONFIG_XILINX_APF) += apf/
obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
+obj-$(CONFIG_XILINX_FCLK) += fclk/
obj-$(CONFIG_FB_TFT) += fbtft/
obj-$(CONFIG_FSL_DPAA2) += fsl-dpaa2/
obj-$(CONFIG_WILC1000) += wilc1000/
@@ -53,3 +55,8 @@ obj-$(CONFIG_UWB) += uwb/
obj-$(CONFIG_USB_WUSB) += wusbcore/
obj-$(CONFIG_EXFAT_FS) += exfat/
obj-$(CONFIG_QLGE) += qlge/
+obj-y += xlnx_ctrl_driver/
+obj-$(CONFIG_ERNIC) += xlnx_ernic/
+obj-$(CONFIG_XROE_FRAMER) += xroeframer/
+obj-$(CONFIG_XLNX_SYNC) += xlnxsync/
+obj-$(CONFIG_XLNX_TSMUX) += xlnx_tsmux/
diff --git a/drivers/staging/apf/Kconfig b/drivers/staging/apf/Kconfig
new file mode 100644
index 000000000000..40d11d02e92d
--- /dev/null
+++ b/drivers/staging/apf/Kconfig
@@ -0,0 +1,20 @@
+#
+# APF driver configuration
+#
+
+menuconfig XILINX_APF
+ tristate "Xilinx APF Accelerator driver"
+ depends on ARCH_ZYNQ || ARCH_ZYNQMP
+ default n
+ select UIO
+ select DMA_SHARED_BUFFER
+ help
+ Select if you want to include APF accelerator driver
+
+config XILINX_DMA_APF
+ bool "Xilinx APF DMA engines support"
+ depends on XILINX_APF
+ select DMA_ENGINE
+ select DMADEVICES
+ help
+ Enable support for the Xilinx APF DMA controllers.
diff --git a/drivers/staging/apf/Makefile b/drivers/staging/apf/Makefile
new file mode 100644
index 000000000000..bf281a2c16df
--- /dev/null
+++ b/drivers/staging/apf/Makefile
@@ -0,0 +1,9 @@
+# gpio support: dedicated expander chips, etc
+
+ccflags-$(CONFIG_DEBUG_XILINX_APF) += -DDEBUG
+ccflags-$(CONFIG_XILINX_APF) += -Idrivers/dma
+
+obj-$(CONFIG_XILINX_APF) += xlnk.o
+obj-$(CONFIG_XILINX_APF) += xlnk-eng.o
+obj-$(CONFIG_XILINX_DMA_APF) += xilinx-dma-apf.o
+
diff --git a/drivers/staging/apf/dt-binding.txt b/drivers/staging/apf/dt-binding.txt
new file mode 100644
index 000000000000..fd73725fa589
--- /dev/null
+++ b/drivers/staging/apf/dt-binding.txt
@@ -0,0 +1,17 @@
+* Xilinx APF xlnk driver
+
+Required properties:
+- compatible: Should be "xlnx,xlnk"
+- clock-names: List of clock names
+- clocks: List of clock sources corresponding to the clock names
+
+The number of elements on the clock-names and clocks lists should be the same.
+If there are no controllable clocks, the xlnk node should be omitted from the
+devicetree.
+
+Example:
+ xlnk {
+ compatible = "xlnx,xlnk-1.0";
+ clock-names = "clk166", "clk150", "clk100", "clk200";
+ clocks = <&clkc 15>, <&clkc 16>, <&clkc 17>, <&clkc 18>;
+ };
diff --git a/drivers/staging/apf/xilinx-dma-apf.c b/drivers/staging/apf/xilinx-dma-apf.c
new file mode 100644
index 000000000000..55913130eafc
--- /dev/null
+++ b/drivers/staging/apf/xilinx-dma-apf.c
@@ -0,0 +1,1232 @@
+/*
+ * Xilinx AXI DMA Engine support
+ *
+ * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
+ *
+ * Description:
+ * This driver supports Xilinx AXI DMA engine:
+ * . Axi DMA engine, it does transfers between memory and device. It can be
+ * configured to have one channel or two channels. If configured as two
+ * channels, one is for transmit to device and another is for receive from
+ * device.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/pagemap.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/pm.h>
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <linux/sched.h>
+#include <linux/dma-buf.h>
+
+#include <linux/of.h>
+#include <linux/irq.h>
+#include <linux/of_irq.h>
+
+#include "xilinx-dma-apf.h"
+
+#include "xlnk.h"
+
+static DEFINE_MUTEX(dma_list_mutex);
+static LIST_HEAD(dma_device_list);
+/* IO accessors */
+#define DMA_OUT_64(addr, val) (writeq(val, addr))
+#define DMA_OUT(addr, val) (iowrite32(val, addr))
+#define DMA_IN(addr) (ioread32(addr))
+
+#define GET_LOW(x) ((u32)((x) & 0xFFFFFFFF))
+#define GET_HI(x) ((u32)((x) / 0x100000000))
+
+static int unpin_user_pages(struct scatterlist *sglist, unsigned int cnt);
+/* Driver functions */
+static void xdma_clean_bd(struct xdma_desc_hw *bd)
+{
+ bd->src_addr = 0x0;
+ bd->control = 0x0;
+ bd->status = 0x0;
+ bd->app[0] = 0x0;
+ bd->app[1] = 0x0;
+ bd->app[2] = 0x0;
+ bd->app[3] = 0x0;
+ bd->app[4] = 0x0;
+ bd->dmahead = 0x0;
+ bd->sw_flag = 0x0;
+}
+
+static int dma_is_running(struct xdma_chan *chan)
+{
+ return !(DMA_IN(&chan->regs->sr) & XDMA_SR_HALTED_MASK) &&
+ (DMA_IN(&chan->regs->cr) & XDMA_CR_RUNSTOP_MASK);
+}
+
+static int dma_is_idle(struct xdma_chan *chan)
+{
+ return DMA_IN(&chan->regs->sr) & XDMA_SR_IDLE_MASK;
+}
+
+static void dma_halt(struct xdma_chan *chan)
+{
+ DMA_OUT(&chan->regs->cr,
+ (DMA_IN(&chan->regs->cr) & ~XDMA_CR_RUNSTOP_MASK));
+}
+
+static void dma_start(struct xdma_chan *chan)
+{
+ DMA_OUT(&chan->regs->cr,
+ (DMA_IN(&chan->regs->cr) | XDMA_CR_RUNSTOP_MASK));
+}
+
+static int dma_init(struct xdma_chan *chan)
+{
+ int loop = XDMA_RESET_LOOP;
+
+ DMA_OUT(&chan->regs->cr,
+ (DMA_IN(&chan->regs->cr) | XDMA_CR_RESET_MASK));
+
+ /* Wait for the hardware to finish reset
+ */
+ while (loop) {
+ if (!(DMA_IN(&chan->regs->cr) & XDMA_CR_RESET_MASK))
+ break;
+
+ loop -= 1;
+ }
+
+ if (!loop)
+ return 1;
+
+ return 0;
+}
+
+static int xdma_alloc_chan_descriptors(struct xdma_chan *chan)
+{
+ int i;
+ u8 *ptr;
+
+ /*
+ * We need the descriptor to be aligned to 64bytes
+ * for meeting Xilinx DMA specification requirement.
+ */
+ ptr = (u8 *)dma_alloc_coherent(chan->dev,
+ (sizeof(struct xdma_desc_hw) * XDMA_MAX_BD_CNT),
+ &chan->bd_phys_addr,
+ GFP_KERNEL);
+
+ if (!ptr) {
+ dev_err(chan->dev,
+ "unable to allocate channel %d descriptor pool\n",
+ chan->id);
+ return -ENOMEM;
+ }
+
+ memset(ptr, 0, (sizeof(struct xdma_desc_hw) * XDMA_MAX_BD_CNT));
+ chan->bd_cur = 0;
+ chan->bd_tail = 0;
+ chan->bd_used = 0;
+ chan->bd_chain_size = sizeof(struct xdma_desc_hw) * XDMA_MAX_BD_CNT;
+
+ /*
+ * Pre allocate all the channels.
+ */
+ for (i = 0; i < XDMA_MAX_BD_CNT; i++) {
+ chan->bds[i] = (struct xdma_desc_hw *)
+ (ptr + (sizeof(struct xdma_desc_hw) * i));
+ chan->bds[i]->next_desc = chan->bd_phys_addr +
+ (sizeof(struct xdma_desc_hw) *
+ ((i + 1) % XDMA_MAX_BD_CNT));
+ }
+
+ /* there is at least one descriptor free to be allocated */
+ return 0;
+}
+
+static void xdma_free_chan_resources(struct xdma_chan *chan)
+{
+ dev_dbg(chan->dev, "Free all channel resources.\n");
+ dma_free_coherent(chan->dev, (sizeof(struct xdma_desc_hw) *
+ XDMA_MAX_BD_CNT), chan->bds[0], chan->bd_phys_addr);
+}
+
+static void xilinx_chan_desc_reinit(struct xdma_chan *chan)
+{
+ struct xdma_desc_hw *desc;
+ unsigned int start, end;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ start = 0;
+ end = XDMA_MAX_BD_CNT;
+
+ while (start < end) {
+ desc = chan->bds[start];
+ xdma_clean_bd(desc);
+ start++;
+ }
+ /* Re-initialize bd_cur and bd_tail values */
+ chan->bd_cur = 0;
+ chan->bd_tail = 0;
+ chan->bd_used = 0;
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static void xilinx_chan_desc_cleanup(struct xdma_chan *chan)
+{
+ struct xdma_head *dmahead;
+ struct xdma_desc_hw *desc;
+ struct completion *cmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+#define XDMA_BD_STS_RXEOF_MASK 0x04000000
+ desc = chan->bds[chan->bd_cur];
+ while (desc->status & XDMA_BD_STS_ALL_MASK) {
+ if ((desc->status & XDMA_BD_STS_RXEOF_MASK) &&
+ !(desc->dmahead)) {
+ pr_info("ERROR: premature EOF on DMA\n");
+ dma_init(chan); /* reset the dma HW */
+ while (!(desc->dmahead)) {
+ xdma_clean_bd(desc);
+ chan->bd_used--;
+ chan->bd_cur++;
+ if (chan->bd_cur >= XDMA_MAX_BD_CNT)
+ chan->bd_cur = 0;
+ desc = chan->bds[chan->bd_cur];
+ }
+ }
+ if (desc->dmahead) {
+ if ((desc->sw_flag & XDMA_BD_SF_POLL_MODE_MASK))
+ if (!(desc->sw_flag & XDMA_BD_SF_SW_DONE_MASK))
+ break;
+
+ dmahead = (struct xdma_head *)desc->dmahead;
+ cmp = (struct completion *)&dmahead->cmp;
+ if (dmahead->nappwords_o)
+ memcpy(dmahead->appwords_o, desc->app,
+ dmahead->nappwords_o * sizeof(u32));
+
+ if (chan->poll_mode)
+ cmp->done = 1;
+ else
+ complete(cmp);
+ }
+ xdma_clean_bd(desc);
+ chan->bd_used--;
+ chan->bd_cur++;
+ if (chan->bd_cur >= XDMA_MAX_BD_CNT)
+ chan->bd_cur = 0;
+ desc = chan->bds[chan->bd_cur];
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static void xdma_err_tasklet(unsigned long data)
+{
+ struct xdma_chan *chan = (struct xdma_chan *)data;
+
+ if (chan->err) {
+ /* If reset failed, need to hard reset
+ * Channel is no longer functional
+ */
+ if (!dma_init(chan))
+ chan->err = 0;
+ else
+ dev_err(chan->dev, "DMA channel reset failed, please reset system\n");
+ }
+
+ /* Barrier to assert descriptor init is reaches memory */
+ rmb();
+ xilinx_chan_desc_cleanup(chan);
+
+ xilinx_chan_desc_reinit(chan);
+}
+
+static void xdma_tasklet(unsigned long data)
+{
+ struct xdma_chan *chan = (struct xdma_chan *)data;
+
+ xilinx_chan_desc_cleanup(chan);
+}
+
+static void dump_cur_bd(struct xdma_chan *chan)
+{
+ u32 index;
+
+ index = (((u32)DMA_IN(&chan->regs->cdr)) - chan->bd_phys_addr) /
+ sizeof(struct xdma_desc_hw);
+
+ dev_err(chan->dev, "cur bd @ %08x\n", (u32)DMA_IN(&chan->regs->cdr));
+ dev_err(chan->dev, " buf = %p\n",
+ (void *)chan->bds[index]->src_addr);
+ dev_err(chan->dev, " ctrl = 0x%08x\n", chan->bds[index]->control);
+ dev_err(chan->dev, " sts = 0x%08x\n", chan->bds[index]->status);
+ dev_err(chan->dev, " next = %p\n",
+ (void *)chan->bds[index]->next_desc);
+}
+
+static irqreturn_t xdma_rx_intr_handler(int irq, void *data)
+{
+ struct xdma_chan *chan = data;
+ u32 stat;
+
+ stat = DMA_IN(&chan->regs->sr);
+
+ if (!(stat & XDMA_XR_IRQ_ALL_MASK))
+ return IRQ_NONE;
+
+ /* Ack the interrupts */
+ DMA_OUT(&chan->regs->sr, (stat & XDMA_XR_IRQ_ALL_MASK));
+
+ if (stat & XDMA_XR_IRQ_ERROR_MASK) {
+ dev_err(chan->dev, "Channel %s has errors %x, cdr %x tdr %x\n",
+ chan->name, (unsigned int)stat,
+ (unsigned int)DMA_IN(&chan->regs->cdr),
+ (unsigned int)DMA_IN(&chan->regs->tdr));
+
+ dump_cur_bd(chan);
+
+ chan->err = 1;
+ tasklet_schedule(&chan->dma_err_tasklet);
+ }
+
+ if (!(chan->poll_mode) && ((stat & XDMA_XR_IRQ_DELAY_MASK) ||
+ (stat & XDMA_XR_IRQ_IOC_MASK)))
+ tasklet_schedule(&chan->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xdma_tx_intr_handler(int irq, void *data)
+{
+ struct xdma_chan *chan = data;
+ u32 stat;
+
+ stat = DMA_IN(&chan->regs->sr);
+
+ if (!(stat & XDMA_XR_IRQ_ALL_MASK))
+ return IRQ_NONE;
+
+ /* Ack the interrupts */
+ DMA_OUT(&chan->regs->sr, (stat & XDMA_XR_IRQ_ALL_MASK));
+
+ if (stat & XDMA_XR_IRQ_ERROR_MASK) {
+ dev_err(chan->dev, "Channel %s has errors %x, cdr %x tdr %x\n",
+ chan->name, (unsigned int)stat,
+ (unsigned int)DMA_IN(&chan->regs->cdr),
+ (unsigned int)DMA_IN(&chan->regs->tdr));
+
+ dump_cur_bd(chan);
+
+ chan->err = 1;
+ tasklet_schedule(&chan->dma_err_tasklet);
+ }
+
+ if (!(chan->poll_mode) && ((stat & XDMA_XR_IRQ_DELAY_MASK) ||
+ (stat & XDMA_XR_IRQ_IOC_MASK)))
+ tasklet_schedule(&chan->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static void xdma_start_transfer(struct xdma_chan *chan,
+ int start_index,
+ int end_index)
+{
+ xlnk_intptr_type cur_phys;
+ xlnk_intptr_type tail_phys;
+ u32 regval;
+
+ if (chan->err)
+ return;
+
+ cur_phys = chan->bd_phys_addr + (start_index *
+ sizeof(struct xdma_desc_hw));
+ tail_phys = chan->bd_phys_addr + (end_index *
+ sizeof(struct xdma_desc_hw));
+ /* If hardware is busy, move the tail & return */
+ if (dma_is_running(chan) || dma_is_idle(chan)) {
+#if XLNK_SYS_BIT_WIDTH == 32
+ DMA_OUT(&chan->regs->tdr, tail_phys);
+#else
+ DMA_OUT_64(&chan->regs->tdr, tail_phys);
+#endif
+ return;
+ }
+
+#if XLNK_SYS_BIT_WIDTH == 32
+ DMA_OUT(&chan->regs->cdr, cur_phys);
+#else
+ DMA_OUT_64(&chan->regs->cdr, cur_phys);
+#endif
+
+ dma_start(chan);
+
+ /* Enable interrupts */
+ regval = DMA_IN(&chan->regs->cr);
+ regval |= (chan->poll_mode ? XDMA_XR_IRQ_ERROR_MASK
+ : XDMA_XR_IRQ_ALL_MASK);
+ DMA_OUT(&chan->regs->cr, regval);
+
+ /* Update tail ptr register and start the transfer */
+#if XLNK_SYS_BIT_WIDTH == 32
+ DMA_OUT(&chan->regs->tdr, tail_phys);
+#else
+ DMA_OUT_64(&chan->regs->tdr, tail_phys);
+#endif
+}
+
+static int xdma_setup_hw_desc(struct xdma_chan *chan,
+ struct xdma_head *dmahead,
+ struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_data_direction direction,
+ unsigned int nappwords_i,
+ u32 *appwords_i)
+{
+ struct xdma_desc_hw *bd = NULL;
+ size_t copy;
+ struct scatterlist *sg;
+ size_t sg_used;
+ dma_addr_t dma_src;
+ int i, start_index = -1, end_index1 = 0, end_index2 = -1;
+ int status;
+ unsigned long flags;
+ unsigned int bd_used_saved;
+
+ if (!chan) {
+ pr_err("Requested transfer on invalid channel\n");
+ return -ENODEV;
+ }
+
+ /* if we almost run out of bd, try to recycle some */
+ if ((chan->poll_mode) && (chan->bd_used >= XDMA_BD_CLEANUP_THRESHOLD))
+ xilinx_chan_desc_cleanup(chan);
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ bd_used_saved = chan->bd_used;
+ /*
+ * Build transactions using information in the scatter gather list
+ */
+ for_each_sg(sgl, sg, sg_len, i) {
+ sg_used = 0;
+
+ /* Loop until the entire scatterlist entry is used */
+ while (sg_used < sg_dma_len(sg)) {
+ /* Allocate the link descriptor from DMA pool */
+ bd = chan->bds[chan->bd_tail];
+ if ((bd->control) & (XDMA_BD_STS_ACTUAL_LEN_MASK)) {
+ end_index2 = chan->bd_tail;
+ status = -ENOMEM;
+ /* If first was not set, then we failed to
+ * allocate the very first descriptor,
+ * and we're done
+ */
+ if (start_index == -1)
+ goto out_unlock;
+ else
+ goto out_clean;
+ }
+ /*
+ * Calculate the maximum number of bytes to transfer,
+ * making sure it is less than the DMA controller limit
+ */
+ copy = min((size_t)(sg_dma_len(sg) - sg_used),
+ (size_t)chan->max_len);
+ /*
+ * Only the src address for DMA
+ */
+ dma_src = sg_dma_address(sg) + sg_used;
+ bd->src_addr = dma_src;
+
+ /* Fill in the descriptor */
+ bd->control = copy;
+
+ /*
+ * If this is not the first descriptor, chain the
+ * current descriptor after the previous descriptor
+ *
+ * For the first DMA_TO_DEVICE transfer, set SOP
+ */
+ if (start_index == -1) {
+ start_index = chan->bd_tail;
+
+ if (nappwords_i)
+ memcpy(bd->app, appwords_i,
+ nappwords_i * sizeof(u32));
+
+ if (direction == DMA_TO_DEVICE)
+ bd->control |= XDMA_BD_SOP;
+ }
+
+ sg_used += copy;
+ end_index2 = chan->bd_tail;
+ chan->bd_tail++;
+ chan->bd_used++;
+ if (chan->bd_tail >= XDMA_MAX_BD_CNT) {
+ end_index1 = XDMA_MAX_BD_CNT;
+ chan->bd_tail = 0;
+ }
+ }
+ }
+
+ if (start_index == -1) {
+ status = -EINVAL;
+ goto out_unlock;
+ }
+
+ bd->dmahead = (xlnk_intptr_type)dmahead;
+ bd->sw_flag = chan->poll_mode ? XDMA_BD_SF_POLL_MODE_MASK : 0;
+ dmahead->last_bd_index = end_index2;
+
+ if (direction == DMA_TO_DEVICE)
+ bd->control |= XDMA_BD_EOP;
+
+ /* Barrier to assert control word write commits */
+ wmb();
+
+ xdma_start_transfer(chan, start_index, end_index2);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+
+out_clean:
+ if (!end_index1) {
+ for (i = start_index; i < end_index2; i++)
+ xdma_clean_bd(chan->bds[i]);
+ } else {
+ /* clean till the end of bd list first, and then 2nd end */
+ for (i = start_index; i < end_index1; i++)
+ xdma_clean_bd(chan->bds[i]);
+
+ end_index1 = 0;
+ for (i = end_index1; i < end_index2; i++)
+ xdma_clean_bd(chan->bds[i]);
+ }
+ /* Move the bd_tail back */
+ chan->bd_tail = start_index;
+ chan->bd_used = bd_used_saved;
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return status;
+}
+
+/*
+ * create minimal length scatter gather list for physically contiguous buffer
+ * that starts at phy_buf and has length phy_buf_len bytes
+ */
+static unsigned int phy_buf_to_sgl(xlnk_intptr_type phy_buf,
+ unsigned int phy_buf_len,
+ struct scatterlist *sgl)
+{
+ unsigned int sgl_cnt = 0;
+ struct scatterlist *sgl_head;
+ unsigned int dma_len;
+ unsigned int num_bd;
+
+ if (!phy_buf || !phy_buf_len) {
+ pr_err("phy_buf is NULL or phy_buf_len = 0\n");
+ return sgl_cnt;
+ }
+
+ num_bd = (phy_buf_len + (XDMA_MAX_TRANS_LEN - 1))
+ / XDMA_MAX_TRANS_LEN;
+ sgl_head = sgl;
+ sg_init_table(sgl, num_bd);
+
+ while (phy_buf_len > 0) {
+ xlnk_intptr_type page_id = phy_buf >> PAGE_SHIFT;
+ unsigned int offset = phy_buf - (page_id << PAGE_SHIFT);
+
+ sgl_cnt++;
+ if (sgl_cnt > XDMA_MAX_BD_CNT)
+ return 0;
+
+ dma_len = (phy_buf_len > XDMA_MAX_TRANS_LEN) ?
+ XDMA_MAX_TRANS_LEN : phy_buf_len;
+
+ sg_set_page(sgl_head, pfn_to_page(page_id), dma_len, offset);
+ sg_dma_address(sgl_head) = (dma_addr_t)phy_buf;
+ sg_dma_len(sgl_head) = dma_len;
+ sgl_head = sg_next(sgl_head);
+
+ phy_buf += dma_len;
+ phy_buf_len -= dma_len;
+ }
+
+ return sgl_cnt;
+}
+
+/* merge sg list, sgl, with length sgl_len, to sgl_merged, to save dma bds */
+static unsigned int sgl_merge(struct scatterlist *sgl,
+ unsigned int sgl_len,
+ struct scatterlist *sgl_merged)
+{
+ struct scatterlist *sghead, *sgend, *sgnext, *sg_merged_head;
+ unsigned int sg_visited_cnt = 0, sg_merged_num = 0;
+ unsigned int dma_len = 0;
+
+ sg_init_table(sgl_merged, sgl_len);
+ sg_merged_head = sgl_merged;
+ sghead = sgl;
+
+ while (sghead && (sg_visited_cnt < sgl_len)) {
+ dma_len = sg_dma_len(sghead);
+ sgend = sghead;
+ sg_visited_cnt++;
+ sgnext = sg_next(sgend);
+
+ while (sgnext && (sg_visited_cnt < sgl_len)) {
+ if ((sg_dma_address(sgend) + sg_dma_len(sgend)) !=
+ sg_dma_address(sgnext))
+ break;
+
+ if (dma_len + sg_dma_len(sgnext) >= XDMA_MAX_TRANS_LEN)
+ break;
+
+ sgend = sgnext;
+ dma_len += sg_dma_len(sgend);
+ sg_visited_cnt++;
+ sgnext = sg_next(sgnext);
+ }
+
+ sg_merged_num++;
+ if (sg_merged_num > XDMA_MAX_BD_CNT)
+ return 0;
+
+ memcpy(sg_merged_head, sghead, sizeof(struct scatterlist));
+
+ sg_dma_len(sg_merged_head) = dma_len;
+
+ sg_merged_head = sg_next(sg_merged_head);
+ sghead = sg_next(sgend);
+ }
+
+ return sg_merged_num;
+}
+
+static int pin_user_pages(xlnk_intptr_type uaddr,
+ unsigned int ulen,
+ int write,
+ struct scatterlist **scatterpp,
+ unsigned int *cntp,
+ unsigned int user_flags)
+{
+ int status;
+ struct mm_struct *mm = current->mm;
+ unsigned int first_page;
+ unsigned int last_page;
+ unsigned int num_pages;
+ struct scatterlist *sglist;
+ struct page **mapped_pages;
+
+ unsigned int pgidx;
+ unsigned int pglen;
+ unsigned int pgoff;
+ unsigned int sublen;
+
+ first_page = uaddr / PAGE_SIZE;
+ last_page = (uaddr + ulen - 1) / PAGE_SIZE;
+ num_pages = last_page - first_page + 1;
+ mapped_pages = vmalloc(sizeof(*mapped_pages) * num_pages);
+ if (!mapped_pages)
+ return -ENOMEM;
+
+ down_read(&mm->mmap_sem);
+ status = get_user_pages(uaddr, num_pages,
+ (write ? FOLL_WRITE : 0) | FOLL_FORCE,
+ mapped_pages, NULL);
+ up_read(&mm->mmap_sem);
+
+ if (status == num_pages) {
+ sglist = kcalloc(num_pages,
+ sizeof(struct scatterlist),
+ GFP_KERNEL);
+ if (!sglist) {
+ pr_err("%s: kcalloc failed to create sg list\n",
+ __func__);
+ vfree(mapped_pages);
+ return -ENOMEM;
+ }
+ sg_init_table(sglist, num_pages);
+ sublen = 0;
+ for (pgidx = 0; pgidx < status; pgidx++) {
+ if (pgidx == 0 && num_pages != 1) {
+ pgoff = uaddr & (~PAGE_MASK);
+ pglen = PAGE_SIZE - pgoff;
+ } else if (pgidx == 0 && num_pages == 1) {
+ pgoff = uaddr & (~PAGE_MASK);
+ pglen = ulen;
+ } else if (pgidx == num_pages - 1) {
+ pgoff = 0;
+ pglen = ulen - sublen;
+ } else {
+ pgoff = 0;
+ pglen = PAGE_SIZE;
+ }
+
+ sublen += pglen;
+
+ sg_set_page(&sglist[pgidx],
+ mapped_pages[pgidx],
+ pglen, pgoff);
+
+ sg_dma_len(&sglist[pgidx]) = pglen;
+ }
+
+ *scatterpp = sglist;
+ *cntp = num_pages;
+
+ vfree(mapped_pages);
+ return 0;
+ }
+ pr_err("Failed to pin user pages\n");
+ for (pgidx = 0; pgidx < status; pgidx++)
+ put_page(mapped_pages[pgidx]);
+ vfree(mapped_pages);
+ return -ENOMEM;
+}
+
+static int unpin_user_pages(struct scatterlist *sglist, unsigned int cnt)
+{
+ struct page *pg;
+ unsigned int i;
+
+ if (!sglist)
+ return 0;
+
+ for (i = 0; i < cnt; i++) {
+ pg = sg_page(sglist + i);
+ if (pg)
+ put_page(pg);
+ }
+
+ kfree(sglist);
+ return 0;
+}
+
+struct xdma_chan *xdma_request_channel(char *name)
+{
+ int i;
+ struct xdma_device *device, *tmp;
+
+ list_for_each_entry_safe(device, tmp, &dma_device_list, node) {
+ for (i = 0; i < device->channel_count; i++) {
+ if (!strcmp(device->chan[i]->name, name))
+ return device->chan[i];
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(xdma_request_channel);
+
+void xdma_release_channel(struct xdma_chan *chan)
+{ }
+EXPORT_SYMBOL(xdma_release_channel);
+
+void xdma_release_all_channels(void)
+{
+ int i;
+ struct xdma_device *device, *tmp;
+
+ list_for_each_entry_safe(device, tmp, &dma_device_list, node) {
+ for (i = 0; i < device->channel_count; i++) {
+ if (device->chan[i]->client_count) {
+ dma_halt(device->chan[i]);
+ xilinx_chan_desc_reinit(device->chan[i]);
+ pr_info("%s: chan %s freed\n",
+ __func__,
+ device->chan[i]->name);
+ }
+ }
+ }
+}
+EXPORT_SYMBOL(xdma_release_all_channels);
+
+static void xdma_release(struct device *dev)
+{
+}
+
+int xdma_submit(struct xdma_chan *chan,
+ xlnk_intptr_type userbuf,
+ void *kaddr,
+ unsigned int size,
+ unsigned int nappwords_i,
+ u32 *appwords_i,
+ unsigned int nappwords_o,
+ unsigned int user_flags,
+ struct xdma_head **dmaheadpp,
+ struct xlnk_dmabuf_reg *dp)
+{
+ struct xdma_head *dmahead;
+ struct scatterlist *pagelist = NULL;
+ struct scatterlist *sglist = NULL;
+ unsigned int pagecnt = 0;
+ unsigned int sgcnt = 0;
+ enum dma_data_direction dmadir;
+ int status;
+ unsigned long attrs = 0;
+
+ dmahead = kzalloc(sizeof(*dmahead), GFP_KERNEL);
+ if (!dmahead)
+ return -ENOMEM;
+
+ dmahead->chan = chan;
+ dmahead->userbuf = userbuf;
+ dmahead->size = size;
+ dmahead->dmadir = chan->direction;
+ dmahead->userflag = user_flags;
+ dmahead->dmabuf = dp;
+ dmadir = chan->direction;
+
+ if (!(user_flags & CF_FLAG_CACHE_FLUSH_INVALIDATE))
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+
+ if (dp) {
+ int i;
+ struct scatterlist *sg;
+ unsigned int remaining_size = size;
+
+ if (IS_ERR_OR_NULL(dp->dbuf_sg_table)) {
+ pr_err("%s dmabuf not mapped: %p\n",
+ __func__, dp->dbuf_sg_table);
+ return -EINVAL;
+ }
+ if (dp->dbuf_sg_table->nents == 0) {
+ pr_err("%s: cannot map a scatterlist with 0 entries\n",
+ __func__);
+ return -EINVAL;
+ }
+ sglist = kmalloc_array(dp->dbuf_sg_table->nents,
+ sizeof(*sglist),
+ GFP_KERNEL);
+ if (!sglist)
+ return -ENOMEM;
+
+ sg_init_table(sglist, dp->dbuf_sg_table->nents);
+ sgcnt = 0;
+ for_each_sg(dp->dbuf_sg_table->sgl,
+ sg,
+ dp->dbuf_sg_table->nents,
+ i) {
+ sg_set_page(sglist + i,
+ sg_page(sg),
+ sg_dma_len(sg),
+ sg->offset);
+ sg_dma_address(sglist + i) = sg_dma_address(sg);
+ if (remaining_size == 0) {
+ sg_dma_len(sglist + i) = 0;
+ } else if (sg_dma_len(sg) > remaining_size) {
+ sg_dma_len(sglist + i) = remaining_size;
+ sgcnt++;
+ } else {
+ sg_dma_len(sglist + i) = sg_dma_len(sg);
+ remaining_size -= sg_dma_len(sg);
+ sgcnt++;
+ }
+ }
+ dmahead->userbuf = (xlnk_intptr_type)sglist->dma_address;
+ pagelist = NULL;
+ pagecnt = 0;
+ } else if (user_flags & CF_FLAG_PHYSICALLY_CONTIGUOUS) {
+ size_t elem_cnt;
+
+ elem_cnt = DIV_ROUND_UP(size, XDMA_MAX_TRANS_LEN);
+ sglist = kmalloc_array(elem_cnt, sizeof(*sglist), GFP_KERNEL);
+ sgcnt = phy_buf_to_sgl(userbuf, size, sglist);
+ if (!sgcnt)
+ return -ENOMEM;
+
+ status = get_dma_ops(chan->dev)->map_sg(chan->dev,
+ sglist,
+ sgcnt,
+ dmadir,
+ attrs);
+
+ if (!status) {
+ pr_err("sg contiguous mapping failed\n");
+ return -ENOMEM;
+ }
+ pagelist = NULL;
+ pagecnt = 0;
+ } else {
+ status = pin_user_pages(userbuf,
+ size,
+ dmadir != DMA_TO_DEVICE,
+ &pagelist,
+ &pagecnt,
+ user_flags);
+ if (status < 0) {
+ pr_err("pin_user_pages failed\n");
+ return status;
+ }
+
+ status = get_dma_ops(chan->dev)->map_sg(chan->dev,
+ pagelist,
+ pagecnt,
+ dmadir,
+ attrs);
+ if (!status) {
+ pr_err("dma_map_sg failed\n");
+ unpin_user_pages(pagelist, pagecnt);
+ return -ENOMEM;
+ }
+
+ sglist = kmalloc_array(pagecnt, sizeof(*sglist), GFP_KERNEL);
+ if (sglist)
+ sgcnt = sgl_merge(pagelist, pagecnt, sglist);
+ if (!sgcnt) {
+ get_dma_ops(chan->dev)->unmap_sg(chan->dev,
+ pagelist,
+ pagecnt,
+ dmadir,
+ attrs);
+ unpin_user_pages(pagelist, pagecnt);
+ kfree(sglist);
+ return -ENOMEM;
+ }
+ }
+ dmahead->sglist = sglist;
+ dmahead->sgcnt = sgcnt;
+ dmahead->pagelist = pagelist;
+ dmahead->pagecnt = pagecnt;
+
+ /* skipping config */
+ init_completion(&dmahead->cmp);
+
+ if (nappwords_i > XDMA_MAX_APPWORDS)
+ nappwords_i = XDMA_MAX_APPWORDS;
+
+ if (nappwords_o > XDMA_MAX_APPWORDS)
+ nappwords_o = XDMA_MAX_APPWORDS;
+
+ dmahead->nappwords_o = nappwords_o;
+
+ status = xdma_setup_hw_desc(chan, dmahead, sglist, sgcnt,
+ dmadir, nappwords_i, appwords_i);
+ if (status) {
+ pr_err("setup hw desc failed\n");
+ if (dmahead->pagelist) {
+ get_dma_ops(chan->dev)->unmap_sg(chan->dev,
+ pagelist,
+ pagecnt,
+ dmadir,
+ attrs);
+ unpin_user_pages(pagelist, pagecnt);
+ } else if (!dp) {
+ get_dma_ops(chan->dev)->unmap_sg(chan->dev,
+ sglist,
+ sgcnt,
+ dmadir,
+ attrs);
+ }
+ kfree(dmahead->sglist);
+ return -ENOMEM;
+ }
+
+ *dmaheadpp = dmahead;
+ return 0;
+}
+EXPORT_SYMBOL(xdma_submit);
+
+int xdma_wait(struct xdma_head *dmahead,
+ unsigned int user_flags,
+ unsigned int *operating_flags)
+{
+ struct xdma_chan *chan = dmahead->chan;
+ unsigned long attrs = 0;
+
+ if (chan->poll_mode) {
+ xilinx_chan_desc_cleanup(chan);
+ *operating_flags |= XDMA_FLAGS_WAIT_COMPLETE;
+ } else {
+ if (*operating_flags & XDMA_FLAGS_TRYWAIT) {
+ if (!try_wait_for_completion(&dmahead->cmp))
+ return 0;
+ *operating_flags |= XDMA_FLAGS_WAIT_COMPLETE;
+ } else {
+ wait_for_completion(&dmahead->cmp);
+ *operating_flags |= XDMA_FLAGS_WAIT_COMPLETE;
+ }
+ }
+
+ if (!dmahead->dmabuf) {
+ if (!(user_flags & CF_FLAG_CACHE_FLUSH_INVALIDATE))
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+
+ if (user_flags & CF_FLAG_PHYSICALLY_CONTIGUOUS) {
+ get_dma_ops(chan->dev)->unmap_sg(chan->dev,
+ dmahead->sglist,
+ dmahead->sgcnt,
+ dmahead->dmadir,
+ attrs);
+ } else {
+ get_dma_ops(chan->dev)->unmap_sg(chan->dev,
+ dmahead->pagelist,
+ dmahead->pagecnt,
+ dmahead->dmadir,
+ attrs);
+ unpin_user_pages(dmahead->pagelist, dmahead->pagecnt);
+ }
+ }
+ kfree(dmahead->sglist);
+
+ return 0;
+}
+EXPORT_SYMBOL(xdma_wait);
+
+int xdma_getconfig(struct xdma_chan *chan,
+ unsigned char *irq_thresh,
+ unsigned char *irq_delay)
+{
+ *irq_thresh = (DMA_IN(&chan->regs->cr) >> XDMA_COALESCE_SHIFT) & 0xff;
+ *irq_delay = (DMA_IN(&chan->regs->cr) >> XDMA_DELAY_SHIFT) & 0xff;
+ return 0;
+}
+EXPORT_SYMBOL(xdma_getconfig);
+
+int xdma_setconfig(struct xdma_chan *chan,
+ unsigned char irq_thresh,
+ unsigned char irq_delay)
+{
+ unsigned long val;
+
+ if (dma_is_running(chan))
+ return -EBUSY;
+
+ val = DMA_IN(&chan->regs->cr);
+ val &= ~((0xff << XDMA_COALESCE_SHIFT) |
+ (0xff << XDMA_DELAY_SHIFT));
+ val |= ((irq_thresh << XDMA_COALESCE_SHIFT) |
+ (irq_delay << XDMA_DELAY_SHIFT));
+
+ DMA_OUT(&chan->regs->cr, val);
+ return 0;
+}
+EXPORT_SYMBOL(xdma_setconfig);
+
+static const struct of_device_id gic_match[] = {
+ { .compatible = "arm,cortex-a9-gic", },
+ { .compatible = "arm,cortex-a15-gic", },
+ { },
+};
+
+static struct device_node *gic_node;
+
+unsigned int xlate_irq(unsigned int hwirq)
+{
+ struct of_phandle_args irq_data;
+ unsigned int irq;
+
+ if (!gic_node)
+ gic_node = of_find_matching_node(NULL, gic_match);
+
+ if (WARN_ON(!gic_node))
+ return hwirq;
+
+ irq_data.np = gic_node;
+ irq_data.args_count = 3;
+ irq_data.args[0] = 0;
+#if XLNK_SYS_BIT_WIDTH == 32
+ irq_data.args[1] = hwirq - 32; /* GIC SPI offset */
+#else
+ irq_data.args[1] = hwirq;
+#endif
+ irq_data.args[2] = IRQ_TYPE_LEVEL_HIGH;
+
+ irq = irq_create_of_mapping(&irq_data);
+ if (WARN_ON(!irq))
+ irq = hwirq;
+
+ pr_info("%s: hwirq %d, irq %d\n", __func__, hwirq, irq);
+
+ return irq;
+}
+
+/* Brute-force probing for xilinx DMA
+ */
+static int xdma_probe(struct platform_device *pdev)
+{
+ struct xdma_device *xdev;
+ struct resource *res;
+ int err, i, j;
+ struct xdma_chan *chan;
+ struct xdma_device_config *dma_config;
+ int dma_chan_dir;
+ int dma_chan_reg_offset;
+
+ pr_info("%s: probe dma %p, nres %d, id %d\n", __func__,
+ &pdev->dev, pdev->num_resources, pdev->id);
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(struct xdma_device), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+ xdev->dev = &pdev->dev;
+
+ /* Set this as configurable once HPC works */
+ arch_setup_dma_ops(&pdev->dev, 0, 0, NULL, false);
+ dma_set_mask(&pdev->dev, 0xFFFFFFFFFFFFFFFFull);
+
+ dma_config = (struct xdma_device_config *)xdev->dev->platform_data;
+ if (dma_config->channel_count < 1 || dma_config->channel_count > 2)
+ return -EFAULT;
+
+ /* Get the memory resource */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xdev->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (!xdev->regs) {
+ dev_err(&pdev->dev, "unable to iomap registers\n");
+ return -EFAULT;
+ }
+
+ dev_info(&pdev->dev, "AXIDMA device %d physical base address=%pa\n",
+ pdev->id, &res->start);
+ dev_info(&pdev->dev, "AXIDMA device %d remapped to %pa\n",
+ pdev->id, &xdev->regs);
+
+ /* Allocate the channels */
+
+ dev_info(&pdev->dev, "has %d channel(s)\n", dma_config->channel_count);
+ for (i = 0; i < dma_config->channel_count; i++) {
+ chan = devm_kzalloc(&pdev->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ dma_chan_dir = strcmp(dma_config->channel_config[i].type,
+ "axi-dma-mm2s-channel") ?
+ DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
+ dma_chan_reg_offset = (dma_chan_dir == DMA_TO_DEVICE) ?
+ 0 :
+ 0x30;
+
+ /* Initialize channel parameters */
+ chan->id = i;
+ chan->regs = xdev->regs + dma_chan_reg_offset;
+ /* chan->regs = xdev->regs; */
+ chan->dev = xdev->dev;
+ chan->max_len = XDMA_MAX_TRANS_LEN;
+ chan->direction = dma_chan_dir;
+ sprintf(chan->name, "%s:%d", dma_config->name, chan->id);
+ pr_info(" chan %d name: %s\n", chan->id, chan->name);
+ pr_info(" chan %d direction: %s\n", chan->id,
+ dma_chan_dir == DMA_FROM_DEVICE ?
+ "FROM_DEVICE" : "TO_DEVICE");
+
+ spin_lock_init(&chan->lock);
+ tasklet_init(&chan->tasklet,
+ xdma_tasklet,
+ (unsigned long)chan);
+ tasklet_init(&chan->dma_err_tasklet,
+ xdma_err_tasklet,
+ (unsigned long)chan);
+
+ xdev->chan[chan->id] = chan;
+
+ /* The IRQ resource */
+ chan->irq = xlate_irq(dma_config->channel_config[i].irq);
+ if (chan->irq <= 0) {
+ pr_err("get_resource for IRQ for dev %d failed\n",
+ pdev->id);
+ return -ENODEV;
+ }
+
+ err = devm_request_irq(&pdev->dev,
+ chan->irq,
+ dma_chan_dir == DMA_TO_DEVICE ?
+ xdma_tx_intr_handler :
+ xdma_rx_intr_handler,
+ IRQF_SHARED,
+ pdev->name,
+ chan);
+ if (err) {
+ dev_err(&pdev->dev, "unable to request IRQ\n");
+ return err;
+ }
+ pr_info(" chan%d irq: %d\n", chan->id, chan->irq);
+
+ chan->poll_mode = dma_config->channel_config[i].poll_mode;
+ pr_info(" chan%d poll mode: %s\n",
+ chan->id,
+ chan->poll_mode ? "on" : "off");
+
+ /* Allocate channel BD's */
+ err = xdma_alloc_chan_descriptors(xdev->chan[chan->id]);
+ if (err) {
+ dev_err(&pdev->dev, "unable to allocate BD's\n");
+ return -ENOMEM;
+ }
+ pr_info(" chan%d bd ring @ 0x%p (size: 0x%x bytes)\n",
+ chan->id,
+ (void *)chan->bd_phys_addr,
+ chan->bd_chain_size);
+
+ err = dma_init(xdev->chan[chan->id]);
+ if (err) {
+ dev_err(&pdev->dev, "DMA init failed\n");
+ /* FIXME Check this - unregister all chan resources */
+ for (j = 0; j <= i; j++)
+ xdma_free_chan_resources(xdev->chan[j]);
+ return -EIO;
+ }
+ }
+ xdev->channel_count = dma_config->channel_count;
+ pdev->dev.release = xdma_release;
+ /* Add the DMA device to the global list */
+ mutex_lock(&dma_list_mutex);
+ list_add_tail(&xdev->node, &dma_device_list);
+ mutex_unlock(&dma_list_mutex);
+
+ platform_set_drvdata(pdev, xdev);
+
+ return 0;
+}
+
+static int xdma_remove(struct platform_device *pdev)
+{
+ int i;
+ struct xdma_device *xdev = platform_get_drvdata(pdev);
+
+ /* Remove the DMA device from the global list */
+ mutex_lock(&dma_list_mutex);
+ list_del(&xdev->node);
+ mutex_unlock(&dma_list_mutex);
+
+ for (i = 0; i < XDMA_MAX_CHANS_PER_DEVICE; i++) {
+ if (xdev->chan[i])
+ xdma_free_chan_resources(xdev->chan[i]);
+ }
+
+ return 0;
+}
+
+static struct platform_driver xdma_driver = {
+ .probe = xdma_probe,
+ .remove = xdma_remove,
+ .driver = {
+ .name = "xilinx-axidma",
+ },
+};
+
+module_platform_driver(xdma_driver);
+
+MODULE_DESCRIPTION("Xilinx DMA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/apf/xilinx-dma-apf.h b/drivers/staging/apf/xilinx-dma-apf.h
new file mode 100644
index 000000000000..8837fec01779
--- /dev/null
+++ b/drivers/staging/apf/xilinx-dma-apf.h
@@ -0,0 +1,234 @@
+/*
+ * Xilinx AXI DMA Engine support
+ *
+ * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef __XILINX_DMA_APF_H
+#define __XILINX_DMA_APF_H
+
+/* ioctls */
+#include <linux/ioctl.h>
+
+/* tasklet */
+#include <linux/interrupt.h>
+
+/* dma stuff */
+#include <linux/dma-mapping.h>
+
+/* xlnk structures */
+#include "xlnk.h"
+#include "xlnk-sysdef.h"
+
+#define XDMA_IOC_MAGIC 'X'
+#define XDMA_IOCRESET _IO(XDMA_IOC_MAGIC, 0)
+#define XDMA_IOCREQUEST _IOWR(XDMA_IOC_MAGIC, 1, unsigned long)
+#define XDMA_IOCRELEASE _IOWR(XDMA_IOC_MAGIC, 2, unsigned long)
+#define XDMA_IOCSUBMIT _IOWR(XDMA_IOC_MAGIC, 3, unsigned long)
+#define XDMA_IOCWAIT _IOWR(XDMA_IOC_MAGIC, 4, unsigned long)
+#define XDMA_IOCGETCONFIG _IOWR(XDMA_IOC_MAGIC, 5, unsigned long)
+#define XDMA_IOCSETCONFIG _IOWR(XDMA_IOC_MAGIC, 6, unsigned long)
+#define XDMA_IOC_MAXNR 6
+
+/* Specific hardware configuration-related constants
+ */
+#define XDMA_RESET_LOOP 1000000
+#define XDMA_HALT_LOOP 1000000
+#define XDMA_NO_CHANGE 0xFFFF
+
+/* General register bits definitions
+ */
+#define XDMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */
+#define XDMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA engine */
+
+#define XDMA_SR_HALTED_MASK 0x00000001 /* DMA channel halted */
+#define XDMA_SR_IDLE_MASK 0x00000002 /* DMA channel idle */
+
+#define XDMA_SR_ERR_INTERNAL_MASK 0x00000010/* Datamover internal err */
+#define XDMA_SR_ERR_SLAVE_MASK 0x00000020 /* Datamover slave err */
+#define XDMA_SR_ERR_DECODE_MASK 0x00000040 /* Datamover decode err */
+#define XDMA_SR_ERR_SG_INT_MASK 0x00000100 /* SG internal err */
+#define XDMA_SR_ERR_SG_SLV_MASK 0x00000200 /* SG slave err */
+#define XDMA_SR_ERR_SG_DEC_MASK 0x00000400 /* SG decode err */
+#define XDMA_SR_ERR_ALL_MASK 0x00000770 /* All errors */
+
+#define XDMA_XR_IRQ_IOC_MASK 0x00001000 /* Completion interrupt */
+#define XDMA_XR_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
+#define XDMA_XR_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
+#define XDMA_XR_IRQ_ALL_MASK 0x00007000 /* All interrupts */
+
+#define XDMA_XR_DELAY_MASK 0xFF000000 /* Delay timeout counter */
+#define XDMA_XR_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
+
+#define XDMA_DELAY_SHIFT 24
+#define XDMA_COALESCE_SHIFT 16
+
+#define XDMA_DELAY_MAX 0xFF /**< Maximum delay counter value */
+#define XDMA_COALESCE_MAX 0xFF /**< Maximum coalescing counter value */
+
+/* BD definitions for Axi DMA
+ */
+#define XDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF
+#define XDMA_BD_STS_COMPL_MASK 0x80000000
+#define XDMA_BD_STS_ERR_MASK 0x70000000
+#define XDMA_BD_STS_ALL_MASK 0xF0000000
+
+/* DMA BD special bits definitions
+ */
+#define XDMA_BD_SOP 0x08000000 /* Start of packet bit */
+#define XDMA_BD_EOP 0x04000000 /* End of packet bit */
+
+/* BD Software Flag definitions for Axi DMA
+ */
+#define XDMA_BD_SF_POLL_MODE_MASK 0x00000002
+#define XDMA_BD_SF_SW_DONE_MASK 0x00000001
+
+/* driver defines */
+#define XDMA_MAX_BD_CNT 16384
+#define XDMA_MAX_CHANS_PER_DEVICE 2
+#define XDMA_MAX_TRANS_LEN 0x7FF000
+#define XDMA_MAX_APPWORDS 5
+#define XDMA_BD_CLEANUP_THRESHOLD ((XDMA_MAX_BD_CNT * 8) / 10)
+
+#define XDMA_FLAGS_WAIT_COMPLETE 1
+#define XDMA_FLAGS_TRYWAIT 2
+
+/* Platform data definition until ARM supports device tree */
+struct xdma_channel_config {
+ char *type;
+ unsigned int include_dre;
+ unsigned int datawidth;
+ unsigned int max_burst_len;
+ unsigned int irq;
+ unsigned int poll_mode;
+ unsigned int lite_mode;
+};
+
+struct xdma_device_config {
+ char *type;
+ char *name;
+ unsigned int include_sg;
+ unsigned int sg_include_stscntrl_strm; /* dma only */
+ unsigned int channel_count;
+ struct xdma_channel_config *channel_config;
+};
+
+struct xdma_desc_hw {
+ xlnk_intptr_type next_desc; /* 0x00 */
+#if XLNK_SYS_BIT_WIDTH == 32
+ u32 pad1; /* 0x04 */
+#endif
+ xlnk_intptr_type src_addr; /* 0x08 */
+#if XLNK_SYS_BIT_WIDTH == 32
+ u32 pad2; /* 0x0c */
+#endif
+ u32 addr_vsize; /* 0x10 */
+ u32 hsize; /* 0x14 */
+ u32 control; /* 0x18 */
+ u32 status; /* 0x1c */
+ u32 app[5]; /* 0x20 */
+ xlnk_intptr_type dmahead;
+#if XLNK_SYS_BIT_WIDTH == 32
+ u32 Reserved0;
+#endif
+ u32 sw_flag; /* 0x3C */
+} __aligned(64);
+
+/* shared by all Xilinx DMA engines */
+struct xdma_regs {
+ u32 cr; /* 0x00 Control Register */
+ u32 sr; /* 0x04 Status Register */
+ u32 cdr; /* 0x08 Current Descriptor Register */
+ u32 cdr_hi;
+ u32 tdr; /* 0x10 Tail Descriptor Register */
+ u32 tdr_hi;
+ u32 src; /* 0x18 Source Address Register (cdma) */
+ u32 src_hi;
+ u32 dst; /* 0x20 Destination Address Register (cdma) */
+ u32 dst_hi;
+ u32 btt_ref; /* 0x28 Bytes To Transfer (cdma) or
+ * park_ref (vdma)
+ */
+ u32 version; /* 0x2c version (vdma) */
+};
+
+/* Per DMA specific operations should be embedded in the channel structure */
+struct xdma_chan {
+ char name[64];
+ struct xdma_regs __iomem *regs;
+ struct device *dev; /* The dma device */
+ struct xdma_desc_hw *bds[XDMA_MAX_BD_CNT];
+ dma_addr_t bd_phys_addr;
+ u32 bd_chain_size;
+ int bd_cur;
+ int bd_tail;
+ unsigned int bd_used; /* # of BDs passed to hw chan */
+ enum dma_data_direction direction; /* Transfer direction */
+ int id; /* Channel ID */
+ int irq; /* Channel IRQ */
+ int poll_mode; /* Poll mode turned on? */
+ spinlock_t lock; /* Descriptor operation lock */
+ struct tasklet_struct tasklet; /* Cleanup work after irq */
+ struct tasklet_struct dma_err_tasklet; /* Cleanup work after irq */
+ int max_len; /* Maximum len per transfer */
+ int err; /* Channel has errors */
+ int client_count;
+};
+
+struct xdma_device {
+ void __iomem *regs;
+ struct device *dev;
+ struct list_head node;
+ struct xdma_chan *chan[XDMA_MAX_CHANS_PER_DEVICE];
+ u8 channel_count;
+};
+
+struct xdma_head {
+ xlnk_intptr_type userbuf;
+ unsigned int size;
+ unsigned int dmaflag;
+ enum dma_data_direction dmadir;
+ struct scatterlist *sglist;
+ unsigned int sgcnt;
+ struct scatterlist *pagelist;
+ unsigned int pagecnt;
+ struct completion cmp;
+ struct xdma_chan *chan;
+ unsigned int nappwords_o;
+ u32 appwords_o[XDMA_MAX_APPWORDS];
+ unsigned int userflag;
+ u32 last_bd_index;
+ struct xlnk_dmabuf_reg *dmabuf;
+};
+
+struct xdma_chan *xdma_request_channel(char *name);
+void xdma_release_channel(struct xdma_chan *chan);
+void xdma_release_all_channels(void);
+int xdma_submit(struct xdma_chan *chan,
+ xlnk_intptr_type userbuf,
+ void *kaddr,
+ unsigned int size,
+ unsigned int nappwords_i,
+ u32 *appwords_i,
+ unsigned int nappwords_o,
+ unsigned int user_flags,
+ struct xdma_head **dmaheadpp,
+ struct xlnk_dmabuf_reg *dp);
+int xdma_wait(struct xdma_head *dmahead,
+ unsigned int user_flags,
+ unsigned int *operating_flags);
+int xdma_getconfig(struct xdma_chan *chan,
+ unsigned char *irq_thresh,
+ unsigned char *irq_delay);
+int xdma_setconfig(struct xdma_chan *chan,
+ unsigned char irq_thresh,
+ unsigned char irq_delay);
+unsigned int xlate_irq(unsigned int hwirq);
+
+#endif
diff --git a/drivers/staging/apf/xlnk-eng.c b/drivers/staging/apf/xlnk-eng.c
new file mode 100644
index 000000000000..bc40128e93cf
--- /dev/null
+++ b/drivers/staging/apf/xlnk-eng.c
@@ -0,0 +1,242 @@
+/*
+ * Xilinx XLNK Engine Driver
+ *
+ * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
+ *
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/spinlock_types.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/uio_driver.h>
+
+
+#include "xlnk-eng.h"
+
+static DEFINE_MUTEX(xlnk_eng_list_mutex);
+static LIST_HEAD(xlnk_eng_list);
+
+int xlnk_eng_register_device(struct xlnk_eng_device *xlnk_dev)
+{
+ mutex_lock(&xlnk_eng_list_mutex);
+ /* todo: need to add more error checking */
+
+ list_add_tail(&xlnk_dev->global_node, &xlnk_eng_list);
+
+ mutex_unlock(&xlnk_eng_list_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(xlnk_eng_register_device);
+
+
+void xlnk_eng_unregister_device(struct xlnk_eng_device *xlnk_dev)
+{
+ mutex_lock(&xlnk_eng_list_mutex);
+ /* todo: need to add more error checking */
+
+ list_del(&xlnk_dev->global_node);
+
+ mutex_unlock(&xlnk_eng_list_mutex);
+}
+EXPORT_SYMBOL(xlnk_eng_unregister_device);
+
+struct xlnk_eng_device *xlnk_eng_request_by_name(char *name)
+{
+ struct xlnk_eng_device *device, *_d;
+ int found = 0;
+
+ mutex_lock(&xlnk_eng_list_mutex);
+
+ list_for_each_entry_safe(device, _d, &xlnk_eng_list, global_node) {
+ if (!strcmp(dev_name(device->dev), name)) {
+ found = 1;
+ break;
+ }
+ }
+ if (found)
+ device = device->alloc(device);
+ else
+ device = NULL;
+
+ mutex_unlock(&xlnk_eng_list_mutex);
+
+ return device;
+}
+EXPORT_SYMBOL(xlnk_eng_request_by_name);
+
+/**
+ * struct xilinx_xlnk_eng_device - device structure for xilinx_xlnk_eng
+ * @common: common device info
+ * @base: base address for device
+ * @lock: lock used by device
+ * @cnt: usage count
+ * @info: info for registering and unregistering uio device
+ */
+struct xilinx_xlnk_eng_device {
+ struct xlnk_eng_device common;
+ void __iomem *base;
+ spinlock_t lock;
+ int cnt;
+ struct uio_info *info;
+};
+
+static void xlnk_eng_release(struct device *dev)
+{
+ struct xilinx_xlnk_eng_device *xdev;
+ struct xlnk_eng_device *xlnk_dev;
+
+ xdev = dev_get_drvdata(dev);
+ xlnk_dev = &xdev->common;
+ if (!xlnk_dev)
+ return;
+
+ xlnk_dev->free(xlnk_dev);
+}
+
+#define DRIVER_NAME "xilinx-xlnk-eng"
+
+#define to_xilinx_xlnk(dev) container_of(dev, \
+ struct xilinx_xlnk_eng_device, common)
+
+static struct xlnk_eng_device *xilinx_xlnk_alloc(
+ struct xlnk_eng_device *xlnkdev)
+{
+ struct xilinx_xlnk_eng_device *xdev;
+ struct xlnk_eng_device *retdev;
+
+ xdev = to_xilinx_xlnk(xlnkdev);
+
+ if (xdev->cnt == 0) {
+ xdev->cnt++;
+ retdev = xlnkdev;
+ } else
+ retdev = NULL;
+
+ return retdev;
+}
+
+static void xilinx_xlnk_free(struct xlnk_eng_device *xlnkdev)
+{
+ struct xilinx_xlnk_eng_device *xdev;
+
+ xdev = to_xilinx_xlnk(xlnkdev);
+
+ xdev->cnt = 0;
+}
+
+static int xlnk_eng_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct xilinx_xlnk_eng_device *xdev;
+ struct uio_info *info;
+ char *devname;
+
+ pr_info("xlnk_eng_probe ...\n");
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev) {
+ dev_err(&pdev->dev, "Not enough memory for device\n");
+ return -ENOMEM;
+ }
+
+ /* more error handling */
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "Not enough memory for device\n");
+ return -ENOMEM;
+ }
+ xdev->info = info;
+ devname = devm_kzalloc(&pdev->dev, 64, GFP_KERNEL);
+ if (!devname) {
+ dev_err(&pdev->dev, "Not enough memory for device\n");
+ return -ENOMEM;
+ }
+ sprintf(devname, "%s.%d", DRIVER_NAME, pdev->id);
+ pr_info("uio name %s\n", devname);
+ /* iomap registers */
+
+ /* Get the data from the platform device */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xdev->base = devm_ioremap_resource(&pdev->dev, res);
+
+ /* %pa types should be used here */
+ dev_info(&pdev->dev, "physical base : 0x%lx\n",
+ (unsigned long)res->start);
+ dev_info(&pdev->dev, "register range : 0x%lx\n",
+ (unsigned long)resource_size(res));
+ dev_info(&pdev->dev, "base remapped to: 0x%lx\n",
+ (unsigned long)xdev->base);
+ if (!xdev->base) {
+ dev_err(&pdev->dev, "unable to iomap registers\n");
+ return -ENOMEM;
+ }
+
+ info->mem[0].addr = res->start;
+ info->mem[0].size = resource_size(res);
+ info->mem[0].memtype = UIO_MEM_PHYS;
+ info->mem[0].internal_addr = xdev->base;
+
+ /* info->name = DRIVER_NAME; */
+ info->name = devname;
+ info->version = "0.0.1";
+
+ info->irq = -1;
+
+ xdev->common.dev = &pdev->dev;
+
+ xdev->common.alloc = xilinx_xlnk_alloc;
+ xdev->common.free = xilinx_xlnk_free;
+ xdev->common.dev->release = xlnk_eng_release;
+
+ dev_set_drvdata(&pdev->dev, xdev);
+
+ spin_lock_init(&xdev->lock);
+
+ xdev->cnt = 0;
+
+ xlnk_eng_register_device(&xdev->common);
+
+ if (uio_register_device(&pdev->dev, info)) {
+ dev_err(&pdev->dev, "uio_register_device failed\n");
+ return -ENODEV;
+ }
+ dev_info(&pdev->dev, "xilinx-xlnk-eng uio registered\n");
+
+ return 0;
+}
+
+static int xlnk_eng_remove(struct platform_device *pdev)
+{
+ struct uio_info *info;
+ struct xilinx_xlnk_eng_device *xdev;
+
+ xdev = dev_get_drvdata(&pdev->dev);
+ info = xdev->info;
+
+ uio_unregister_device(info);
+ dev_info(&pdev->dev, "xilinx-xlnk-eng uio unregistered\n");
+ xlnk_eng_unregister_device(&xdev->common);
+
+ return 0;
+}
+
+static struct platform_driver xlnk_eng_driver = {
+ .probe = xlnk_eng_probe,
+ .remove = xlnk_eng_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+};
+
+module_platform_driver(xlnk_eng_driver);
+
+MODULE_DESCRIPTION("Xilinx xlnk engine generic driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/apf/xlnk-eng.h b/drivers/staging/apf/xlnk-eng.h
new file mode 100644
index 000000000000..9f9519664705
--- /dev/null
+++ b/drivers/staging/apf/xlnk-eng.h
@@ -0,0 +1,33 @@
+/*
+ * Xilinx XLNK Engine Driver
+ *
+ * Copyright (C) 2010 Xilinx, Inc. All rights reserved.
+ *
+ *
+ */
+
+#ifndef XLNK_ENG_H
+#define XLNK_ENG_H
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/spinlock_types.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+
+struct xlnk_eng_device {
+ struct list_head global_node;
+ struct xlnk_eng_device * (*alloc)(struct xlnk_eng_device *xdev);
+ void (*free)(struct xlnk_eng_device *xdev);
+ struct device *dev;
+};
+extern int xlnk_eng_register_device(struct xlnk_eng_device *xlnk_dev);
+extern void xlnk_eng_unregister_device(struct xlnk_eng_device *xlnk_dev);
+extern struct xlnk_eng_device *xlnk_eng_request_by_name(char *name);
+
+#endif
+
diff --git a/drivers/staging/apf/xlnk-ioctl.h b/drivers/staging/apf/xlnk-ioctl.h
new file mode 100644
index 000000000000..d909fa65459f
--- /dev/null
+++ b/drivers/staging/apf/xlnk-ioctl.h
@@ -0,0 +1,37 @@
+#ifndef _XLNK_IOCTL_H
+#define _XLNK_IOCTL_H
+
+#include <linux/ioctl.h>
+
+#define XLNK_IOC_MAGIC 'X'
+
+#define XLNK_IOCRESET _IO(XLNK_IOC_MAGIC, 0)
+
+#define XLNK_IOCALLOCBUF _IOWR(XLNK_IOC_MAGIC, 2, unsigned long)
+#define XLNK_IOCFREEBUF _IOWR(XLNK_IOC_MAGIC, 3, unsigned long)
+#define XLNK_IOCADDDMABUF _IOWR(XLNK_IOC_MAGIC, 4, unsigned long)
+#define XLNK_IOCCLEARDMABUF _IOWR(XLNK_IOC_MAGIC, 5, unsigned long)
+
+#define XLNK_IOCDMAREQUEST _IOWR(XLNK_IOC_MAGIC, 7, unsigned long)
+#define XLNK_IOCDMASUBMIT _IOWR(XLNK_IOC_MAGIC, 8, unsigned long)
+#define XLNK_IOCDMAWAIT _IOWR(XLNK_IOC_MAGIC, 9, unsigned long)
+#define XLNK_IOCDMARELEASE _IOWR(XLNK_IOC_MAGIC, 10, unsigned long)
+
+#define XLNK_IOCMEMOP _IOWR(XLNK_IOC_MAGIC, 25, unsigned long)
+#define XLNK_IOCDEVREGISTER _IOWR(XLNK_IOC_MAGIC, 16, unsigned long)
+#define XLNK_IOCDMAREGISTER _IOWR(XLNK_IOC_MAGIC, 17, unsigned long)
+#define XLNK_IOCDEVUNREGISTER _IOWR(XLNK_IOC_MAGIC, 18, unsigned long)
+#define XLNK_IOCCDMAREQUEST _IOWR(XLNK_IOC_MAGIC, 19, unsigned long)
+#define XLNK_IOCCDMASUBMIT _IOWR(XLNK_IOC_MAGIC, 20, unsigned long)
+#define XLNK_IOCMCDMAREGISTER _IOWR(XLNK_IOC_MAGIC, 23, unsigned long)
+#define XLNK_IOCCACHECTRL _IOWR(XLNK_IOC_MAGIC, 24, unsigned long)
+
+#define XLNK_IOCIRQREGISTER _IOWR(XLNK_IOC_MAGIC, 35, unsigned long)
+#define XLNK_IOCIRQUNREGISTER _IOWR(XLNK_IOC_MAGIC, 36, unsigned long)
+#define XLNK_IOCIRQWAIT _IOWR(XLNK_IOC_MAGIC, 37, unsigned long)
+
+#define XLNK_IOCSHUTDOWN _IOWR(XLNK_IOC_MAGIC, 100, unsigned long)
+#define XLNK_IOCRECRES _IOWR(XLNK_IOC_MAGIC, 101, unsigned long)
+#define XLNK_IOC_MAXNR 101
+
+#endif
diff --git a/drivers/staging/apf/xlnk-sysdef.h b/drivers/staging/apf/xlnk-sysdef.h
new file mode 100644
index 000000000000..b6334be3b9c4
--- /dev/null
+++ b/drivers/staging/apf/xlnk-sysdef.h
@@ -0,0 +1,34 @@
+#ifndef XLNK_SYSDEF_H
+#define XLNK_SYSDEF_H
+
+#if __SIZEOF_POINTER__ == 4
+ #define XLNK_SYS_BIT_WIDTH 32
+#elif __SIZEOF_POINTER__ == 8
+ #define XLNK_SYS_BIT_WIDTH 64
+#endif
+
+#include <linux/types.h>
+
+#if XLNK_SYS_BIT_WIDTH == 32
+
+ typedef u32 xlnk_intptr_type;
+ typedef s32 xlnk_int_type;
+ typedef u32 xlnk_uint_type;
+ typedef u8 xlnk_byte_type;
+ typedef s8 xlnk_char_type;
+ #define xlnk_enum_type s32
+
+#elif XLNK_SYS_BIT_WIDTH == 64
+
+ typedef u64 xlnk_intptr_type;
+ typedef s32 xlnk_int_type;
+ typedef u32 xlnk_uint_type;
+ typedef u8 xlnk_byte_type;
+ typedef s8 xlnk_char_type;
+ #define xlnk_enum_type s32
+
+#else
+ #error "Please define application bit width and system bit width"
+#endif
+
+#endif
diff --git a/drivers/staging/apf/xlnk.c b/drivers/staging/apf/xlnk.c
new file mode 100644
index 000000000000..4701898cc5ec
--- /dev/null
+++ b/drivers/staging/apf/xlnk.c
@@ -0,0 +1,1580 @@
+/*
+ * xlnk.c
+ *
+ * Xilinx Accelerator driver support.
+ *
+ * Copyright (C) 2010 Xilinx Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+/* ----------------------------------- Host OS */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <linux/io.h>
+#include <linux/dma-buf.h>
+
+#include <linux/string.h>
+
+#include <linux/uaccess.h>
+
+#include <linux/dmaengine.h>
+#include <linux/completion.h>
+#include <linux/wait.h>
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/cdev.h>
+
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <linux/errno.h> /* error codes */
+#include <linux/dma-mapping.h> /* dma */
+#include <linux/of.h>
+#include <linux/list.h>
+#include <linux/dma/xilinx_dma.h>
+#include <linux/uio_driver.h>
+#include <asm/cacheflush.h>
+#include <linux/semaphore.h>
+
+#include "xlnk-ioctl.h"
+#include "xlnk-sysdef.h"
+#include "xlnk.h"
+
+#ifdef CONFIG_XILINX_DMA_APF
+#include "xilinx-dma-apf.h"
+#endif
+
+#define DRIVER_NAME "xlnk"
+#define DRIVER_VERSION "0.2"
+
+static struct platform_device *xlnk_pdev;
+static struct device *xlnk_dev;
+
+static struct cdev xlnk_cdev;
+
+static struct class *xlnk_class;
+
+static s32 driver_major;
+
+static char *driver_name = DRIVER_NAME;
+
+static void *xlnk_dev_buf;
+static ssize_t xlnk_dev_size;
+static int xlnk_dev_vmas;
+
+#define XLNK_BUF_POOL_SIZE 4096
+static unsigned int xlnk_bufpool_size = XLNK_BUF_POOL_SIZE;
+static void *xlnk_bufpool[XLNK_BUF_POOL_SIZE];
+static void *xlnk_bufpool_alloc_point[XLNK_BUF_POOL_SIZE];
+static xlnk_intptr_type xlnk_userbuf[XLNK_BUF_POOL_SIZE];
+static int xlnk_buf_process[XLNK_BUF_POOL_SIZE];
+static dma_addr_t xlnk_phyaddr[XLNK_BUF_POOL_SIZE];
+static size_t xlnk_buflen[XLNK_BUF_POOL_SIZE];
+static unsigned int xlnk_bufcacheable[XLNK_BUF_POOL_SIZE];
+static spinlock_t xlnk_buf_lock;
+
+#define XLNK_IRQ_POOL_SIZE 256
+static struct xlnk_irq_control *xlnk_irq_set[XLNK_IRQ_POOL_SIZE];
+static spinlock_t xlnk_irq_lock;
+
+static int xlnk_open(struct inode *ip, struct file *filp);
+static int xlnk_release(struct inode *ip, struct file *filp);
+static long xlnk_ioctl(struct file *filp, unsigned int code,
+ unsigned long args);
+static ssize_t xlnk_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *offp);
+static ssize_t xlnk_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offp);
+static int xlnk_mmap(struct file *filp, struct vm_area_struct *vma);
+static void xlnk_vma_open(struct vm_area_struct *vma);
+static void xlnk_vma_close(struct vm_area_struct *vma);
+
+static int xlnk_init_bufpool(void);
+static void xlnk_init_irqpool(void);
+
+LIST_HEAD(xlnk_dmabuf_list);
+
+static int xlnk_shutdown(unsigned long buf);
+static int xlnk_recover_resource(unsigned long buf);
+
+static const struct file_operations xlnk_fops = {
+ .open = xlnk_open,
+ .release = xlnk_release,
+ .read = xlnk_read,
+ .write = xlnk_write,
+ .unlocked_ioctl = xlnk_ioctl,
+ .mmap = xlnk_mmap,
+};
+
+#define MAX_XLNK_DMAS 128
+
+struct xlnk_device_pack {
+ char name[64];
+ struct platform_device pdev;
+ struct resource res[8];
+ struct uio_info *io_ptr;
+ int refs;
+
+#ifdef CONFIG_XILINX_DMA_APF
+ struct xdma_channel_config dma_chan_cfg[4]; /* for xidane dma only */
+ struct xdma_device_config dma_dev_cfg; /* for xidane dma only */
+#endif
+};
+
+static struct semaphore xlnk_devpack_sem;
+static struct xlnk_device_pack *xlnk_devpacks[MAX_XLNK_DMAS];
+static void xlnk_devpacks_init(void)
+{
+ unsigned int i;
+
+ sema_init(&xlnk_devpack_sem, 1);
+ for (i = 0; i < MAX_XLNK_DMAS; i++)
+ xlnk_devpacks[i] = NULL;
+}
+
+static struct xlnk_device_pack *xlnk_devpacks_alloc(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_XLNK_DMAS; i++) {
+ if (!xlnk_devpacks[i]) {
+ struct xlnk_device_pack *ret;
+
+ ret = kzalloc(sizeof(*ret), GFP_KERNEL);
+ ret->pdev.id = i;
+ xlnk_devpacks[i] = ret;
+
+ return ret;
+ }
+ }
+
+ return NULL;
+}
+
+static void xlnk_devpacks_delete(struct xlnk_device_pack *devpack)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_XLNK_DMAS; i++)
+ if (xlnk_devpacks[i] == devpack)
+ xlnk_devpacks[i] = NULL;
+ kfree(devpack);
+}
+
+static struct xlnk_device_pack *xlnk_devpacks_find(xlnk_intptr_type base)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_XLNK_DMAS; i++) {
+ if (xlnk_devpacks[i] &&
+ xlnk_devpacks[i]->res[0].start == base)
+ return xlnk_devpacks[i];
+ }
+ return NULL;
+}
+
+static void xlnk_devpacks_free(xlnk_intptr_type base)
+{
+ struct xlnk_device_pack *devpack;
+
+ down(&xlnk_devpack_sem);
+ devpack = xlnk_devpacks_find(base);
+ if (!devpack) {
+ up(&xlnk_devpack_sem);
+ return;
+ }
+ devpack->refs--;
+ if (devpack->refs) {
+ up(&xlnk_devpack_sem);
+ return;
+ }
+ platform_device_unregister(&devpack->pdev);
+ xlnk_devpacks_delete(devpack);
+ kfree(devpack);
+ up(&xlnk_devpack_sem);
+}
+
+static void xlnk_devpacks_free_all(void)
+{
+ struct xlnk_device_pack *devpack;
+ unsigned int i;
+
+ for (i = 0; i < MAX_XLNK_DMAS; i++) {
+ devpack = xlnk_devpacks[i];
+ if (devpack) {
+ if (devpack->io_ptr) {
+ uio_unregister_device(devpack->io_ptr);
+ kfree(devpack->io_ptr);
+ } else {
+ platform_device_unregister(&devpack->pdev);
+ }
+ xlnk_devpacks_delete(devpack);
+ kfree(devpack);
+ }
+ }
+}
+
+static int xlnk_probe(struct platform_device *pdev)
+{
+ int err;
+ dev_t dev = 0;
+
+ xlnk_dev_buf = NULL;
+ xlnk_dev_size = 0;
+ xlnk_dev_vmas = 0;
+
+ /* use 2.6 device model */
+ err = alloc_chrdev_region(&dev, 0, 1, driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "%s: Can't get major %d\n",
+ __func__, driver_major);
+ goto err1;
+ }
+
+ cdev_init(&xlnk_cdev, &xlnk_fops);
+
+ xlnk_cdev.owner = THIS_MODULE;
+
+ err = cdev_add(&xlnk_cdev, dev, 1);
+
+ if (err) {
+ dev_err(&pdev->dev, "%s: Failed to add XLNK device\n",
+ __func__);
+ goto err3;
+ }
+
+ /* udev support */
+ xlnk_class = class_create(THIS_MODULE, "xlnk");
+ if (IS_ERR(xlnk_class)) {
+ dev_err(xlnk_dev, "%s: Error creating xlnk class\n", __func__);
+ goto err3;
+ }
+
+ driver_major = MAJOR(dev);
+
+ dev_info(&pdev->dev, "Major %d\n", driver_major);
+
+ device_create(xlnk_class, NULL, MKDEV(driver_major, 0),
+ NULL, "xlnk");
+
+ err = xlnk_init_bufpool();
+ if (err) {
+ dev_err(&pdev->dev, "%s: Failed to allocate buffer pool\n",
+ __func__);
+ goto err3;
+ }
+
+ xlnk_init_irqpool();
+
+ dev_info(&pdev->dev, "%s driver loaded\n", DRIVER_NAME);
+
+ xlnk_pdev = pdev;
+ xlnk_dev = &pdev->dev;
+
+ if (xlnk_pdev)
+ dev_info(&pdev->dev, "xlnk_pdev is not null\n");
+ else
+ dev_info(&pdev->dev, "xlnk_pdev is null\n");
+
+ xlnk_devpacks_init();
+
+ return 0;
+err3:
+ cdev_del(&xlnk_cdev);
+ unregister_chrdev_region(dev, 1);
+err1:
+ return err;
+}
+
+static int xlnk_buf_findnull(void)
+{
+ int i;
+
+ for (i = 1; i < xlnk_bufpool_size; i++) {
+ if (!xlnk_bufpool[i])
+ return i;
+ }
+
+ return 0;
+}
+
+static int xlnk_buf_find_by_phys_addr(xlnk_intptr_type addr)
+{
+ int i;
+
+ for (i = 1; i < xlnk_bufpool_size; i++) {
+ if (xlnk_bufpool[i] &&
+ xlnk_phyaddr[i] <= addr &&
+ xlnk_phyaddr[i] + xlnk_buflen[i] > addr)
+ return i;
+ }
+
+ return 0;
+}
+
+static int xlnk_buf_find_by_user_addr(xlnk_intptr_type addr, int pid)
+{
+ int i;
+
+ for (i = 1; i < xlnk_bufpool_size; i++) {
+ if (xlnk_bufpool[i] &&
+ xlnk_buf_process[i] == pid &&
+ xlnk_userbuf[i] <= addr &&
+ xlnk_userbuf[i] + xlnk_buflen[i] > addr)
+ return i;
+ }
+
+ return 0;
+}
+
+/*
+ * allocate and return an id
+ * id must be a positve number
+ */
+static int xlnk_allocbuf(unsigned int len, unsigned int cacheable)
+{
+ int id;
+ void *kaddr;
+ dma_addr_t phys_addr_anchor;
+ unsigned long attrs;
+
+ attrs = cacheable ? DMA_ATTR_NON_CONSISTENT : 0;
+
+ kaddr = dma_alloc_attrs(xlnk_dev,
+ len,
+ &phys_addr_anchor,
+ GFP_KERNEL | GFP_DMA,
+ attrs);
+ if (!kaddr)
+ return -ENOMEM;
+
+ spin_lock(&xlnk_buf_lock);
+ id = xlnk_buf_findnull();
+ if (id > 0 && id < XLNK_BUF_POOL_SIZE) {
+ xlnk_bufpool_alloc_point[id] = kaddr;
+ xlnk_bufpool[id] = kaddr;
+ xlnk_buflen[id] = len;
+ xlnk_bufcacheable[id] = cacheable;
+ xlnk_phyaddr[id] = phys_addr_anchor;
+ }
+ spin_unlock(&xlnk_buf_lock);
+
+ if (id <= 0 || id >= XLNK_BUF_POOL_SIZE)
+ return -ENOMEM;
+
+ return id;
+}
+
+static int xlnk_init_bufpool(void)
+{
+ unsigned int i;
+
+ spin_lock_init(&xlnk_buf_lock);
+ xlnk_dev_buf = kmalloc(8192, GFP_KERNEL | GFP_DMA);
+ *((char *)xlnk_dev_buf) = '\0';
+
+ if (!xlnk_dev_buf) {
+ dev_err(xlnk_dev, "%s: malloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ xlnk_bufpool[0] = xlnk_dev_buf;
+ for (i = 1; i < xlnk_bufpool_size; i++)
+ xlnk_bufpool[i] = NULL;
+
+ return 0;
+}
+
+static void xlnk_init_irqpool(void)
+{
+ int i;
+
+ spin_lock_init(&xlnk_irq_lock);
+ for (i = 0; i < XLNK_IRQ_POOL_SIZE; i++)
+ xlnk_irq_set[i] = NULL;
+}
+
+#define XLNK_SUSPEND NULL
+#define XLNK_RESUME NULL
+
+static int xlnk_remove(struct platform_device *pdev)
+{
+ dev_t devno;
+
+ kfree(xlnk_dev_buf);
+ xlnk_dev_buf = NULL;
+
+ devno = MKDEV(driver_major, 0);
+ cdev_del(&xlnk_cdev);
+ unregister_chrdev_region(devno, 1);
+ if (xlnk_class) {
+ /* remove the device from sysfs */
+ device_destroy(xlnk_class, MKDEV(driver_major, 0));
+ class_destroy(xlnk_class);
+ }
+
+ xlnk_devpacks_free_all();
+
+ return 0;
+}
+
+static const struct of_device_id xlnk_match[] = {
+ { .compatible = "xlnx,xlnk-1.0", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xlnk_match);
+
+static struct platform_driver xlnk_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xlnk_match,
+ },
+ .probe = xlnk_probe,
+ .remove = xlnk_remove,
+ .suspend = XLNK_SUSPEND,
+ .resume = XLNK_RESUME,
+};
+
+static u64 dma_mask = 0xFFFFFFFFFFFFFFFFull;
+
+/*
+ * This function is called when an application opens handle to the
+ * bridge driver.
+ */
+static int xlnk_open(struct inode *ip, struct file *filp)
+{
+ if ((filp->f_flags & O_ACCMODE) == O_WRONLY)
+ xlnk_dev_size = 0;
+
+ return 0;
+}
+
+static ssize_t xlnk_read(struct file *filp,
+ char __user *buf,
+ size_t count,
+ loff_t *offp)
+{
+ ssize_t retval = 0;
+
+ if (*offp >= xlnk_dev_size)
+ goto out;
+
+ if (*offp + count > xlnk_dev_size)
+ count = xlnk_dev_size - *offp;
+
+ if (copy_to_user(buf, xlnk_dev_buf + *offp, count)) {
+ retval = -EFAULT;
+ goto out;
+ }
+ *offp += count;
+ retval = count;
+
+ out:
+ return retval;
+}
+
+static ssize_t xlnk_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offp)
+{
+ ssize_t retval = 0;
+
+ if (copy_from_user(xlnk_dev_buf + *offp, buf, count)) {
+ retval = -EFAULT;
+ goto out;
+ }
+ *offp += count;
+ retval = count;
+
+ if (xlnk_dev_size < *offp)
+ xlnk_dev_size = *offp;
+
+ out:
+ return retval;
+}
+
+/*
+ * This function is called when an application closes handle to the bridge
+ * driver.
+ */
+static int xlnk_release(struct inode *ip, struct file *filp)
+{
+ return 0;
+}
+
+static int xlnk_devregister(char *name,
+ unsigned int id,
+ xlnk_intptr_type base,
+ unsigned int size,
+ unsigned int *irqs,
+ xlnk_intptr_type *handle)
+{
+ unsigned int nres;
+ unsigned int nirq;
+ unsigned int *irqptr;
+ struct xlnk_device_pack *devpack;
+ unsigned int i;
+ int status;
+
+ down(&xlnk_devpack_sem);
+ devpack = xlnk_devpacks_find(base);
+ if (devpack) {
+ *handle = (xlnk_intptr_type)devpack;
+ devpack->refs++;
+ status = 0;
+ } else {
+ nirq = 0;
+ irqptr = irqs;
+
+ while (*irqptr) {
+ nirq++;
+ irqptr++;
+ }
+
+ if (nirq > 7) {
+ up(&xlnk_devpack_sem);
+ return -ENOMEM;
+ }
+
+ nres = nirq + 1;
+
+ devpack = xlnk_devpacks_alloc();
+ if (!devpack) {
+ up(&xlnk_devpack_sem);
+ pr_err("Failed to allocate device %s\n", name);
+ return -ENOMEM;
+ }
+ devpack->io_ptr = NULL;
+ strcpy(devpack->name, name);
+ devpack->pdev.name = devpack->name;
+
+ devpack->pdev.dev.dma_mask = &dma_mask;
+ devpack->pdev.dev.coherent_dma_mask = dma_mask;
+
+ devpack->res[0].start = base;
+ devpack->res[0].end = base + size - 1;
+ devpack->res[0].flags = IORESOURCE_MEM;
+
+ for (i = 0; i < nirq; i++) {
+ devpack->res[i + 1].start = irqs[i];
+ devpack->res[i + 1].end = irqs[i];
+ devpack->res[i + 1].flags = IORESOURCE_IRQ;
+ }
+
+ devpack->pdev.resource = devpack->res;
+ devpack->pdev.num_resources = nres;
+
+ status = platform_device_register(&devpack->pdev);
+ if (status) {
+ xlnk_devpacks_delete(devpack);
+ *handle = 0;
+ } else {
+ *handle = (xlnk_intptr_type)devpack;
+ }
+ }
+ up(&xlnk_devpack_sem);
+
+ return status;
+}
+
+static int xlnk_dmaregister(char *name,
+ unsigned int id,
+ xlnk_intptr_type base,
+ unsigned int size,
+ unsigned int chan_num,
+ unsigned int chan0_dir,
+ unsigned int chan0_irq,
+ unsigned int chan0_poll_mode,
+ unsigned int chan0_include_dre,
+ unsigned int chan0_data_width,
+ unsigned int chan1_dir,
+ unsigned int chan1_irq,
+ unsigned int chan1_poll_mode,
+ unsigned int chan1_include_dre,
+ unsigned int chan1_data_width,
+ xlnk_intptr_type *handle)
+{
+ int status = 0;
+
+#ifdef CONFIG_XILINX_DMA_APF
+
+ struct xlnk_device_pack *devpack;
+
+ if (chan_num < 1 || chan_num > 2) {
+ pr_err("%s: Expected either 1 or 2 channels, got %d\n",
+ __func__, chan_num);
+ return -EINVAL;
+ }
+
+ down(&xlnk_devpack_sem);
+ devpack = xlnk_devpacks_find(base);
+ if (devpack) {
+ *handle = (xlnk_intptr_type)devpack;
+ devpack->refs++;
+ status = 0;
+ } else {
+ devpack = xlnk_devpacks_alloc();
+ if (!devpack) {
+ up(&xlnk_devpack_sem);
+ return -ENOMEM;
+ }
+ strcpy(devpack->name, name);
+ devpack->pdev.name = "xilinx-axidma";
+
+ devpack->io_ptr = NULL;
+
+ devpack->dma_chan_cfg[0].include_dre = chan0_include_dre;
+ devpack->dma_chan_cfg[0].datawidth = chan0_data_width;
+ devpack->dma_chan_cfg[0].irq = chan0_irq;
+ devpack->dma_chan_cfg[0].poll_mode = chan0_poll_mode;
+ devpack->dma_chan_cfg[0].type =
+ (chan0_dir == XLNK_DMA_FROM_DEVICE) ?
+ "axi-dma-s2mm-channel" :
+ "axi-dma-mm2s-channel";
+
+ if (chan_num > 1) {
+ devpack->dma_chan_cfg[1].include_dre =
+ chan1_include_dre;
+ devpack->dma_chan_cfg[1].datawidth = chan1_data_width;
+ devpack->dma_chan_cfg[1].irq = chan1_irq;
+ devpack->dma_chan_cfg[1].poll_mode = chan1_poll_mode;
+ devpack->dma_chan_cfg[1].type =
+ (chan1_dir == XLNK_DMA_FROM_DEVICE) ?
+ "axi-dma-s2mm-channel" :
+ "axi-dma-mm2s-channel";
+ }
+
+ devpack->dma_dev_cfg.name = devpack->name;
+ devpack->dma_dev_cfg.type = "axi-dma";
+ devpack->dma_dev_cfg.include_sg = 1;
+ devpack->dma_dev_cfg.sg_include_stscntrl_strm = 1;
+ devpack->dma_dev_cfg.channel_count = chan_num;
+ devpack->dma_dev_cfg.channel_config = &devpack->dma_chan_cfg[0];
+
+ devpack->pdev.dev.platform_data = &devpack->dma_dev_cfg;
+
+ devpack->pdev.dev.dma_mask = &dma_mask;
+ devpack->pdev.dev.coherent_dma_mask = dma_mask;
+
+ devpack->res[0].start = base;
+ devpack->res[0].end = base + size - 1;
+ devpack->res[0].flags = IORESOURCE_MEM;
+
+ devpack->pdev.resource = devpack->res;
+ devpack->pdev.num_resources = 1;
+ status = platform_device_register(&devpack->pdev);
+ if (status) {
+ xlnk_devpacks_delete(devpack);
+ *handle = 0;
+ } else {
+ *handle = (xlnk_intptr_type)devpack;
+ }
+ }
+ up(&xlnk_devpack_sem);
+
+#endif
+ return status;
+}
+
+static int xlnk_allocbuf_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ xlnk_int_type id;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ id = xlnk_allocbuf(temp_args.allocbuf.len,
+ temp_args.allocbuf.cacheable);
+
+ if (id <= 0)
+ return -ENOMEM;
+
+ temp_args.allocbuf.id = id;
+ temp_args.allocbuf.phyaddr = (xlnk_intptr_type)(xlnk_phyaddr[id]);
+ status = copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(union xlnk_args));
+
+ return status;
+}
+
+static int xlnk_freebuf(int id)
+{
+ void *alloc_point;
+ dma_addr_t p_addr;
+ size_t buf_len;
+ int cacheable;
+ unsigned long attrs;
+
+ if (id <= 0 || id >= xlnk_bufpool_size)
+ return -ENOMEM;
+
+ if (!xlnk_bufpool[id])
+ return -ENOMEM;
+
+ spin_lock(&xlnk_buf_lock);
+ alloc_point = xlnk_bufpool_alloc_point[id];
+ p_addr = xlnk_phyaddr[id];
+ buf_len = xlnk_buflen[id];
+ xlnk_bufpool[id] = NULL;
+ xlnk_phyaddr[id] = (dma_addr_t)NULL;
+ xlnk_buflen[id] = 0;
+ cacheable = xlnk_bufcacheable[id];
+ xlnk_bufcacheable[id] = 0;
+ spin_unlock(&xlnk_buf_lock);
+
+ attrs = cacheable ? DMA_ATTR_NON_CONSISTENT : 0;
+
+ dma_free_attrs(xlnk_dev,
+ buf_len,
+ alloc_point,
+ p_addr,
+ attrs);
+
+ return 0;
+}
+
+static void xlnk_free_all_buf(void)
+{
+ int i;
+
+ for (i = 1; i < xlnk_bufpool_size; i++)
+ xlnk_freebuf(i);
+}
+
+static int xlnk_freebuf_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ int id;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ id = temp_args.freebuf.id;
+ return xlnk_freebuf(id);
+}
+
+static int xlnk_adddmabuf_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ struct xlnk_dmabuf_reg *db;
+ int status;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ spin_lock(&xlnk_buf_lock);
+ list_for_each_entry(db, &xlnk_dmabuf_list, list) {
+ if (db->user_vaddr == temp_args.dmasubmit.buf) {
+ pr_err("Attempting to register DMA-BUF for addr %llx that is already registered\n",
+ (unsigned long long)temp_args.dmabuf.user_addr);
+ spin_unlock(&xlnk_buf_lock);
+ return -EINVAL;
+ }
+ }
+ spin_unlock(&xlnk_buf_lock);
+
+ db = kzalloc(sizeof(*db), GFP_KERNEL);
+ if (!db)
+ return -ENOMEM;
+
+ db->dmabuf_fd = temp_args.dmabuf.dmabuf_fd;
+ db->user_vaddr = temp_args.dmabuf.user_addr;
+ db->dbuf = dma_buf_get(db->dmabuf_fd);
+ db->dbuf_attach = dma_buf_attach(db->dbuf, xlnk_dev);
+ if (IS_ERR(db->dbuf_attach)) {
+ dma_buf_put(db->dbuf);
+ pr_err("Failed DMA-BUF attach\n");
+ return -EINVAL;
+ }
+
+ db->dbuf_sg_table = dma_buf_map_attachment(db->dbuf_attach,
+ DMA_BIDIRECTIONAL);
+
+ if (!db->dbuf_sg_table) {
+ pr_err("Failed DMA-BUF map_attachment\n");
+ dma_buf_detach(db->dbuf, db->dbuf_attach);
+ dma_buf_put(db->dbuf);
+ return -EINVAL;
+ }
+
+ spin_lock(&xlnk_buf_lock);
+ INIT_LIST_HEAD(&db->list);
+ list_add_tail(&db->list, &xlnk_dmabuf_list);
+ spin_unlock(&xlnk_buf_lock);
+
+ return 0;
+}
+
+static int xlnk_cleardmabuf_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ struct xlnk_dmabuf_reg *dp, *dp_temp;
+ int status;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ spin_lock(&xlnk_buf_lock);
+ list_for_each_entry_safe(dp, dp_temp, &xlnk_dmabuf_list, list) {
+ if (dp->user_vaddr == temp_args.dmabuf.user_addr) {
+ dma_buf_unmap_attachment(dp->dbuf_attach,
+ dp->dbuf_sg_table,
+ DMA_BIDIRECTIONAL);
+ dma_buf_detach(dp->dbuf, dp->dbuf_attach);
+ dma_buf_put(dp->dbuf);
+ list_del(&dp->list);
+ spin_unlock(&xlnk_buf_lock);
+ kfree(dp);
+ return 0;
+ }
+ }
+ spin_unlock(&xlnk_buf_lock);
+ pr_err("Attempting to unregister a DMA-BUF that was not registered at addr %llx\n",
+ (unsigned long long)temp_args.dmabuf.user_addr);
+
+ return 1;
+}
+
+static int xlnk_dmarequest_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+#ifdef CONFIG_XILINX_DMA_APF
+ union xlnk_args temp_args;
+ int status;
+ struct xdma_chan *chan;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ if (!temp_args.dmarequest.name[0])
+ return 0;
+
+ down(&xlnk_devpack_sem);
+ chan = xdma_request_channel(temp_args.dmarequest.name);
+ up(&xlnk_devpack_sem);
+ if (!chan)
+ return -ENOMEM;
+ temp_args.dmarequest.dmachan = (xlnk_intptr_type)chan;
+ temp_args.dmarequest.bd_space_phys_addr = chan->bd_phys_addr;
+ temp_args.dmarequest.bd_space_size = chan->bd_chain_size;
+
+ if (copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(union xlnk_args)))
+ return -EFAULT;
+
+ return 0;
+#else
+ return -1;
+#endif
+}
+
+static int xlnk_dmasubmit_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+#ifdef CONFIG_XILINX_DMA_APF
+ union xlnk_args temp_args;
+ struct xdma_head *dmahead;
+ struct xlnk_dmabuf_reg *dp, *cp = NULL;
+ int buf_id;
+ void *kaddr = NULL;
+ int status = -1;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ if (!temp_args.dmasubmit.dmachan)
+ return -ENODEV;
+
+ spin_lock(&xlnk_buf_lock);
+ buf_id = xlnk_buf_find_by_phys_addr(temp_args.dmasubmit.buf);
+ if (buf_id) {
+ xlnk_intptr_type addr_delta =
+ temp_args.dmasubmit.buf -
+ xlnk_phyaddr[buf_id];
+ kaddr = (u8 *)(xlnk_bufpool[buf_id]) + addr_delta;
+ } else {
+ list_for_each_entry(dp, &xlnk_dmabuf_list, list) {
+ if (dp->user_vaddr == temp_args.dmasubmit.buf) {
+ cp = dp;
+ break;
+ }
+ }
+ }
+ spin_unlock(&xlnk_buf_lock);
+
+ status = xdma_submit((struct xdma_chan *)
+ (temp_args.dmasubmit.dmachan),
+ temp_args.dmasubmit.buf,
+ kaddr,
+ temp_args.dmasubmit.len,
+ temp_args.dmasubmit.nappwords_i,
+ temp_args.dmasubmit.appwords_i,
+ temp_args.dmasubmit.nappwords_o,
+ temp_args.dmasubmit.flag,
+ &dmahead,
+ cp);
+
+ temp_args.dmasubmit.dmahandle = (xlnk_intptr_type)dmahead;
+ temp_args.dmasubmit.last_bd_index =
+ (xlnk_intptr_type)dmahead->last_bd_index;
+
+ if (!status) {
+ if (copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(union xlnk_args)))
+ return -EFAULT;
+ }
+ return status;
+#endif
+ return -ENOMEM;
+}
+
+static int xlnk_dmawait_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ int status = -1;
+#ifdef CONFIG_XILINX_DMA_APF
+ union xlnk_args temp_args;
+ struct xdma_head *dmahead;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ dmahead = (struct xdma_head *)temp_args.dmawait.dmahandle;
+ status = xdma_wait(dmahead,
+ dmahead->userflag,
+ &temp_args.dmawait.flags);
+ if (temp_args.dmawait.flags & XDMA_FLAGS_WAIT_COMPLETE) {
+ if (temp_args.dmawait.nappwords) {
+ memcpy(temp_args.dmawait.appwords,
+ dmahead->appwords_o,
+ dmahead->nappwords_o * sizeof(u32));
+ }
+ kfree(dmahead);
+ }
+ if (copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(union xlnk_args)))
+ return -EFAULT;
+#endif
+
+ return status;
+}
+
+static int xlnk_dmarelease_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ int status = -1;
+#ifdef CONFIG_XILINX_DMA_APF
+ union xlnk_args temp_args;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+ down(&xlnk_devpack_sem);
+ xdma_release_channel((struct xdma_chan *)
+ (temp_args.dmarelease.dmachan));
+ up(&xlnk_devpack_sem);
+#endif
+
+ return status;
+}
+
+static int xlnk_devregister_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ xlnk_intptr_type handle;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ status = xlnk_devregister(temp_args.devregister.name,
+ temp_args.devregister.id,
+ temp_args.devregister.base,
+ temp_args.devregister.size,
+ temp_args.devregister.irqs,
+ &handle);
+
+ return status;
+}
+
+static int xlnk_dmaregister_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ xlnk_intptr_type handle;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ status = xlnk_dmaregister(temp_args.dmaregister.name,
+ temp_args.dmaregister.id,
+ temp_args.dmaregister.base,
+ temp_args.dmaregister.size,
+ temp_args.dmaregister.chan_num,
+ temp_args.dmaregister.chan0_dir,
+ temp_args.dmaregister.chan0_irq,
+ temp_args.dmaregister.chan0_poll_mode,
+ temp_args.dmaregister.chan0_include_dre,
+ temp_args.dmaregister.chan0_data_width,
+ temp_args.dmaregister.chan1_dir,
+ temp_args.dmaregister.chan1_irq,
+ temp_args.dmaregister.chan1_poll_mode,
+ temp_args.dmaregister.chan1_include_dre,
+ temp_args.dmaregister.chan1_data_width,
+ &handle);
+
+ return status;
+}
+
+static int xlnk_devunregister_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+
+ status = copy_from_user(&temp_args, (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status)
+ return -ENOMEM;
+
+ xlnk_devpacks_free(temp_args.devunregister.base);
+
+ return 0;
+}
+
+static irqreturn_t xlnk_accel_isr(int irq, void *arg)
+{
+ struct xlnk_irq_control *irq_control = (struct xlnk_irq_control *)arg;
+
+ disable_irq_nosync(irq);
+ complete(&irq_control->cmp);
+
+ return IRQ_HANDLED;
+}
+
+static int xlnk_irq_register_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ int i;
+ struct xlnk_irq_control *ctrl;
+ int irq_id = -1;
+ int irq_entry_new = 0;
+
+ status = copy_from_user(&temp_args,
+ (void __user *)args,
+ sizeof(temp_args.irqregister));
+ if (status)
+ return -ENOMEM;
+
+ if (temp_args.irqregister.type !=
+ (XLNK_IRQ_LEVEL | XLNK_IRQ_ACTIVE_HIGH)) {
+ dev_err(xlnk_dev, "Unsupported interrupt type %x\n",
+ temp_args.irqregister.type);
+ return -EINVAL;
+ }
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->irq = xlate_irq(temp_args.irqregister.irq);
+ ctrl->enabled = 0;
+ init_completion(&ctrl->cmp);
+
+ spin_lock(&xlnk_irq_lock);
+ for (i = 0; i < XLNK_IRQ_POOL_SIZE; i++) {
+ if (!xlnk_irq_set[i] && irq_id == -1) {
+ irq_entry_new = 1;
+ irq_id = i;
+ xlnk_irq_set[i] = ctrl;
+ } else if (xlnk_irq_set[i] &&
+ xlnk_irq_set[i]->irq == ctrl->irq) {
+ irq_id = i;
+ break;
+ }
+ }
+ spin_unlock(&xlnk_irq_lock);
+
+ if (irq_id == -1) {
+ kfree(ctrl);
+ return -ENOMEM;
+ }
+
+ if (!irq_entry_new) {
+ kfree(ctrl);
+ } else {
+ status = request_irq(ctrl->irq,
+ xlnk_accel_isr,
+ IRQF_SHARED,
+ "xlnk",
+ ctrl);
+ if (status) {
+ enable_irq(ctrl->irq);
+ xlnk_irq_set[irq_id] = NULL;
+ kfree(ctrl);
+ return -EINVAL;
+ }
+ disable_irq_nosync(ctrl->irq);
+ }
+
+ temp_args.irqregister.irq_id = irq_id;
+
+ status = copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(temp_args.irqregister));
+
+ return status;
+}
+
+static int xlnk_irq_unregister_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ int irq_id;
+ struct xlnk_irq_control *ctrl;
+
+ status = copy_from_user(&temp_args,
+ (void __user *)args,
+ sizeof(union xlnk_args));
+ if (status)
+ return -ENOMEM;
+
+ irq_id = temp_args.irqunregister.irq_id;
+ if (irq_id < 0 || irq_id >= XLNK_IRQ_POOL_SIZE)
+ return -EINVAL;
+
+ ctrl = xlnk_irq_set[irq_id];
+ if (!ctrl)
+ return -EINVAL;
+
+ xlnk_irq_set[irq_id] = NULL;
+
+ if (ctrl->enabled) {
+ disable_irq_nosync(ctrl->irq);
+ complete(&ctrl->cmp);
+ }
+ free_irq(ctrl->irq, ctrl);
+ kfree(ctrl);
+
+ return 0;
+}
+
+static int xlnk_irq_wait_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status;
+ int irq_id;
+ struct xlnk_irq_control *ctrl;
+
+ status = copy_from_user(&temp_args,
+ (void __user *)args,
+ sizeof(temp_args.irqwait));
+ if (status)
+ return -ENOMEM;
+
+ irq_id = temp_args.irqwait.irq_id;
+ if (irq_id < 0 || irq_id >= XLNK_IRQ_POOL_SIZE)
+ return -EINVAL;
+
+ ctrl = xlnk_irq_set[irq_id];
+ if (!ctrl)
+ return -EINVAL;
+
+ if (!ctrl->enabled) {
+ ctrl->enabled = 1;
+ enable_irq(ctrl->irq);
+ }
+
+ if (temp_args.irqwait.polling) {
+ if (!try_wait_for_completion(&ctrl->cmp))
+ temp_args.irqwait.success = 0;
+ else
+ temp_args.irqwait.success = 1;
+ } else {
+ wait_for_completion(&ctrl->cmp);
+ temp_args.irqwait.success = 1;
+ }
+
+ if (temp_args.irqwait.success) {
+ reinit_completion(&ctrl->cmp);
+ ctrl->enabled = 0;
+ }
+
+ status = copy_to_user((void __user *)args,
+ &temp_args,
+ sizeof(temp_args.irqwait));
+
+ return status;
+}
+
+static int xlnk_cachecontrol_ioctl(struct file *filp, unsigned int code,
+ unsigned long args)
+{
+ union xlnk_args temp_args;
+ int status, size;
+ void *kaddr;
+ xlnk_intptr_type paddr;
+ int buf_id;
+
+ status = copy_from_user(&temp_args,
+ (void __user *)args,
+ sizeof(union xlnk_args));
+
+ if (status) {
+ dev_err(xlnk_dev, "Error in copy_from_user. status = %d\n",
+ status);
+ return -ENOMEM;
+ }
+
+ if (!(temp_args.cachecontrol.action == 0 ||
+ temp_args.cachecontrol.action == 1)) {
+ dev_err(xlnk_dev, "Illegal action specified to cachecontrol_ioctl: %d\n",
+ temp_args.cachecontrol.action);
+ return -EINVAL;
+ }
+
+ size = temp_args.cachecontrol.size;
+ paddr = temp_args.cachecontrol.phys_addr;
+
+ spin_lock(&xlnk_buf_lock);
+ buf_id = xlnk_buf_find_by_phys_addr(paddr);
+ kaddr = xlnk_bufpool[buf_id];
+ spin_unlock(&xlnk_buf_lock);
+
+ if (buf_id == 0) {
+ pr_err("Illegal cachecontrol on non-sds_alloc memory");
+ return -EINVAL;
+ }
+
+#if XLNK_SYS_BIT_WIDTH == 32
+ __cpuc_flush_dcache_area(kaddr, size);
+ outer_flush_range(paddr, paddr + size);
+ if (temp_args.cachecontrol.action == 1)
+ outer_inv_range(paddr, paddr + size);
+#else
+ if (temp_args.cachecontrol.action == 1)
+ __dma_map_area(kaddr, size, DMA_FROM_DEVICE);
+ else
+ __dma_map_area(kaddr, size, DMA_TO_DEVICE);
+#endif
+ return 0;
+}
+
+static int xlnk_memop_ioctl(struct file *filp, unsigned long arg_addr)
+{
+ union xlnk_args args;
+ xlnk_intptr_type p_addr = 0;
+ int status = 0;
+ int buf_id;
+ struct xlnk_dmabuf_reg *cp = NULL;
+ int cacheable = 1;
+ enum dma_data_direction dmadir;
+ xlnk_intptr_type page_id;
+ unsigned int page_offset;
+ struct scatterlist sg;
+ unsigned long attrs = 0;
+
+ status = copy_from_user(&args,
+ (void __user *)arg_addr,
+ sizeof(union xlnk_args));
+
+ if (status) {
+ pr_err("Error in copy_from_user. status = %d\n", status);
+ return status;
+ }
+
+ if (!(args.memop.flags & XLNK_FLAG_MEM_ACQUIRE) &&
+ !(args.memop.flags & XLNK_FLAG_MEM_RELEASE)) {
+ pr_err("memop lacks acquire or release flag\n");
+ return -EINVAL;
+ }
+
+ if (args.memop.flags & XLNK_FLAG_MEM_ACQUIRE &&
+ args.memop.flags & XLNK_FLAG_MEM_RELEASE) {
+ pr_err("memop has both acquire and release defined\n");
+ return -EINVAL;
+ }
+
+ spin_lock(&xlnk_buf_lock);
+ buf_id = xlnk_buf_find_by_user_addr(args.memop.virt_addr,
+ current->pid);
+ if (buf_id > 0) {
+ cacheable = xlnk_bufcacheable[buf_id];
+ p_addr = xlnk_phyaddr[buf_id] +
+ (args.memop.virt_addr - xlnk_userbuf[buf_id]);
+ } else {
+ struct xlnk_dmabuf_reg *dp;
+
+ list_for_each_entry(dp, &xlnk_dmabuf_list, list) {
+ if (dp->user_vaddr == args.memop.virt_addr) {
+ cp = dp;
+ break;
+ }
+ }
+ }
+ spin_unlock(&xlnk_buf_lock);
+
+ if (buf_id <= 0 && !cp) {
+ pr_err("Error, buffer not found\n");
+ return -EINVAL;
+ }
+
+ dmadir = (enum dma_data_direction)args.memop.dir;
+
+ if (args.memop.flags & XLNK_FLAG_COHERENT || !cacheable)
+ attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+
+ if (buf_id > 0) {
+ page_id = p_addr >> PAGE_SHIFT;
+ page_offset = p_addr - (page_id << PAGE_SHIFT);
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg,
+ pfn_to_page(page_id),
+ args.memop.size,
+ page_offset);
+ sg_dma_len(&sg) = args.memop.size;
+ }
+
+ if (args.memop.flags & XLNK_FLAG_MEM_ACQUIRE) {
+ if (buf_id > 0) {
+ status = get_dma_ops(xlnk_dev)->map_sg(xlnk_dev,
+ &sg,
+ 1,
+ dmadir,
+ attrs);
+ if (!status) {
+ pr_err("Failed to map address\n");
+ return -EINVAL;
+ }
+ args.memop.phys_addr = (xlnk_intptr_type)
+ sg_dma_address(&sg);
+ args.memop.token = (xlnk_intptr_type)
+ sg_dma_address(&sg);
+ status = copy_to_user((void __user *)arg_addr,
+ &args,
+ sizeof(union xlnk_args));
+ if (status)
+ pr_err("Error in copy_to_user. status = %d\n",
+ status);
+ } else {
+ if (cp->dbuf_sg_table->nents != 1) {
+ pr_err("Non-SG-DMA datamovers require physically contiguous DMABUFs. DMABUF is not physically contiguous\n");
+ return -EINVAL;
+ }
+ args.memop.phys_addr = (xlnk_intptr_type)
+ sg_dma_address(cp->dbuf_sg_table->sgl);
+ args.memop.token = 0;
+ status = copy_to_user((void __user *)arg_addr,
+ &args,
+ sizeof(union xlnk_args));
+ if (status)
+ pr_err("Error in copy_to_user. status = %d\n",
+ status);
+ }
+ } else {
+ if (buf_id > 0) {
+ sg_dma_address(&sg) = (dma_addr_t)args.memop.token;
+ get_dma_ops(xlnk_dev)->unmap_sg(xlnk_dev,
+ &sg,
+ 1,
+ dmadir,
+ attrs);
+ }
+ }
+
+ return status;
+}
+
+/* This function provides IO interface to the bridge driver. */
+static long xlnk_ioctl(struct file *filp,
+ unsigned int code,
+ unsigned long args)
+{
+ if (_IOC_TYPE(code) != XLNK_IOC_MAGIC)
+ return -ENOTTY;
+ if (_IOC_NR(code) > XLNK_IOC_MAXNR)
+ return -ENOTTY;
+
+ /* some sanity check */
+ switch (code) {
+ case XLNK_IOCALLOCBUF:
+ return xlnk_allocbuf_ioctl(filp, code, args);
+ case XLNK_IOCFREEBUF:
+ return xlnk_freebuf_ioctl(filp, code, args);
+ case XLNK_IOCADDDMABUF:
+ return xlnk_adddmabuf_ioctl(filp, code, args);
+ case XLNK_IOCCLEARDMABUF:
+ return xlnk_cleardmabuf_ioctl(filp, code, args);
+ case XLNK_IOCDMAREQUEST:
+ return xlnk_dmarequest_ioctl(filp, code, args);
+ case XLNK_IOCDMASUBMIT:
+ return xlnk_dmasubmit_ioctl(filp, code, args);
+ case XLNK_IOCDMAWAIT:
+ return xlnk_dmawait_ioctl(filp, code, args);
+ case XLNK_IOCDMARELEASE:
+ return xlnk_dmarelease_ioctl(filp, code, args);
+ case XLNK_IOCDEVREGISTER:
+ return xlnk_devregister_ioctl(filp, code, args);
+ case XLNK_IOCDMAREGISTER:
+ return xlnk_dmaregister_ioctl(filp, code, args);
+ case XLNK_IOCDEVUNREGISTER:
+ return xlnk_devunregister_ioctl(filp, code, args);
+ case XLNK_IOCCACHECTRL:
+ return xlnk_cachecontrol_ioctl(filp, code, args);
+ case XLNK_IOCIRQREGISTER:
+ return xlnk_irq_register_ioctl(filp, code, args);
+ case XLNK_IOCIRQUNREGISTER:
+ return xlnk_irq_unregister_ioctl(filp, code, args);
+ case XLNK_IOCIRQWAIT:
+ return xlnk_irq_wait_ioctl(filp, code, args);
+ case XLNK_IOCSHUTDOWN:
+ return xlnk_shutdown(args);
+ case XLNK_IOCRECRES:
+ return xlnk_recover_resource(args);
+ case XLNK_IOCMEMOP:
+ return xlnk_memop_ioctl(filp, args);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct vm_operations_struct xlnk_vm_ops = {
+ .open = xlnk_vma_open,
+ .close = xlnk_vma_close,
+};
+
+/* This function maps kernel space memory to user space memory. */
+static int xlnk_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int bufid;
+ int status;
+
+ bufid = vma->vm_pgoff >> (16 - PAGE_SHIFT);
+
+ if (bufid == 0) {
+ unsigned long paddr = virt_to_phys(xlnk_dev_buf);
+
+ status = remap_pfn_range(vma,
+ vma->vm_start,
+ paddr >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ } else {
+ if (xlnk_bufcacheable[bufid] == 0)
+ vma->vm_page_prot =
+ pgprot_noncached(vma->vm_page_prot);
+ status = remap_pfn_range(vma, vma->vm_start,
+ xlnk_phyaddr[bufid]
+ >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ xlnk_userbuf[bufid] = vma->vm_start;
+ xlnk_buf_process[bufid] = current->pid;
+ }
+ if (status) {
+ pr_err("%s failed with code %d\n", __func__, status);
+ return status;
+ }
+
+ xlnk_vma_open(vma);
+ vma->vm_ops = &xlnk_vm_ops;
+ vma->vm_private_data = xlnk_bufpool[bufid];
+
+ return 0;
+}
+
+static void xlnk_vma_open(struct vm_area_struct *vma)
+{
+ xlnk_dev_vmas++;
+}
+
+static void xlnk_vma_close(struct vm_area_struct *vma)
+{
+ xlnk_dev_vmas--;
+}
+
+static int xlnk_shutdown(unsigned long buf)
+{
+ return 0;
+}
+
+static int xlnk_recover_resource(unsigned long buf)
+{
+ xlnk_free_all_buf();
+#ifdef CONFIG_XILINX_DMA_APF
+ xdma_release_all_channels();
+#endif
+ return 0;
+}
+
+module_platform_driver(xlnk_driver);
+
+MODULE_DESCRIPTION("Xilinx APF driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/apf/xlnk.h b/drivers/staging/apf/xlnk.h
new file mode 100644
index 000000000000..cbc2334c2e82
--- /dev/null
+++ b/drivers/staging/apf/xlnk.h
@@ -0,0 +1,175 @@
+#ifndef _XLNK_OS_H
+#define _XLNK_OS_H
+
+#include <linux/stddef.h>
+#include <linux/dmaengine.h>
+#include "xilinx-dma-apf.h"
+#include "xlnk-sysdef.h"
+
+#define XLNK_FLAG_COHERENT 0x00000001
+#define XLNK_FLAG_KERNEL_BUFFER 0x00000002
+#define XLNK_FLAG_DMAPOLLING 0x00000004
+#define XLNK_FLAG_IOMMU_VALID 0x00000008
+#define XLNK_FLAG_PHYSICAL_ADDR 0x00000100
+#define XLNK_FLAG_VIRTUAL_ADDR 0x00000200
+#define XLNK_FLAG_MEM_ACQUIRE 0x00001000
+#define XLNK_FLAG_MEM_RELEASE 0x00002000
+#define CF_FLAG_CACHE_FLUSH_INVALIDATE 0x00000001
+#define CF_FLAG_PHYSICALLY_CONTIGUOUS 0x00000002
+#define CF_FLAG_DMAPOLLING 0x00000004
+#define XLNK_IRQ_LEVEL 0x00000001
+#define XLNK_IRQ_EDGE 0x00000002
+#define XLNK_IRQ_ACTIVE_HIGH 0x00000004
+#define XLNK_IRQ_ACTIVE_LOW 0x00000008
+#define XLNK_IRQ_RESET_REG_VALID 0x00000010
+
+enum xlnk_dma_direction {
+ XLNK_DMA_BI = 0,
+ XLNK_DMA_TO_DEVICE = 1,
+ XLNK_DMA_FROM_DEVICE = 2,
+ XLNK_DMA_NONE = 3,
+};
+
+struct xlnk_dma_transfer_handle {
+ dma_addr_t dma_addr;
+ unsigned long transfer_length;
+ void *kern_addr;
+ unsigned long user_addr;
+ enum dma_data_direction transfer_direction;
+ int sg_effective_length;
+ int flags;
+ struct dma_chan *channel;
+ dma_cookie_t dma_cookie;
+ struct dma_async_tx_descriptor *async_desc;
+ struct completion completion_handle;
+};
+
+struct xlnk_dmabuf_reg {
+ xlnk_int_type dmabuf_fd;
+ xlnk_intptr_type user_vaddr;
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *dbuf_attach;
+ struct sg_table *dbuf_sg_table;
+ int is_mapped;
+ int dma_direction;
+ struct list_head list;
+};
+
+struct xlnk_irq_control {
+ int irq;
+ int enabled;
+ struct completion cmp;
+};
+
+/* CROSSES KERNEL-USER BOUNDARY */
+union xlnk_args {
+ struct __attribute__ ((__packed__)) {
+ xlnk_uint_type len;
+ xlnk_int_type id;
+ xlnk_intptr_type phyaddr;
+ xlnk_byte_type cacheable;
+ } allocbuf;
+ struct __attribute__ ((__packed__)) {
+ xlnk_uint_type id;
+ xlnk_intptr_type buf;
+ } freebuf;
+ struct __attribute__ ((__packed__)) {
+ xlnk_int_type dmabuf_fd;
+ xlnk_intptr_type user_addr;
+ } dmabuf;
+ struct __attribute__ ((__packed__)) {
+ xlnk_char_type name[64];
+ xlnk_intptr_type dmachan;
+ xlnk_uint_type bd_space_phys_addr;
+ xlnk_uint_type bd_space_size;
+ } dmarequest;
+#define XLNK_MAX_APPWORDS 5
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type dmachan;
+ xlnk_intptr_type buf;
+ xlnk_intptr_type buf2;
+ xlnk_uint_type buf_offset;
+ xlnk_uint_type len;
+ xlnk_uint_type bufflag;
+ xlnk_intptr_type sglist;
+ xlnk_uint_type sgcnt;
+ xlnk_enum_type dmadir;
+ xlnk_uint_type nappwords_i;
+ xlnk_uint_type appwords_i[XLNK_MAX_APPWORDS];
+ xlnk_uint_type nappwords_o;
+ xlnk_uint_type flag;
+ xlnk_intptr_type dmahandle; /* return value */
+ xlnk_uint_type last_bd_index;
+ } dmasubmit;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type dmahandle;
+ xlnk_uint_type nappwords;
+ xlnk_uint_type appwords[XLNK_MAX_APPWORDS];
+ /* appwords array we only accept 5 max */
+ xlnk_uint_type flags;
+ } dmawait;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type dmachan;
+ } dmarelease;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type base;
+ xlnk_uint_type size;
+ xlnk_uint_type irqs[8];
+ xlnk_char_type name[32];
+ xlnk_uint_type id;
+ } devregister;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type base;
+ } devunregister;
+ struct __attribute__ ((__packed__)) {
+ xlnk_char_type name[32];
+ xlnk_uint_type id;
+ xlnk_intptr_type base;
+ xlnk_uint_type size;
+ xlnk_uint_type chan_num;
+ xlnk_uint_type chan0_dir;
+ xlnk_uint_type chan0_irq;
+ xlnk_uint_type chan0_poll_mode;
+ xlnk_uint_type chan0_include_dre;
+ xlnk_uint_type chan0_data_width;
+ xlnk_uint_type chan1_dir;
+ xlnk_uint_type chan1_irq;
+ xlnk_uint_type chan1_poll_mode;
+ xlnk_uint_type chan1_include_dre;
+ xlnk_uint_type chan1_data_width;
+ } dmaregister;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type phys_addr;
+ xlnk_uint_type size;
+ xlnk_int_type action;
+ } cachecontrol;
+ struct __attribute__ ((__packed__)) {
+ xlnk_intptr_type virt_addr;
+ xlnk_int_type size;
+ xlnk_enum_type dir;
+ xlnk_int_type flags;
+ xlnk_intptr_type phys_addr;
+ xlnk_intptr_type token;
+ } memop;
+ struct __attribute__ ((__packed__)) {
+ xlnk_int_type irq;
+ xlnk_int_type subirq;
+ xlnk_uint_type type;
+ xlnk_intptr_type control_base;
+ xlnk_intptr_type reset_reg_base;
+ xlnk_uint_type reset_offset;
+ xlnk_uint_type reset_valid_high;
+ xlnk_uint_type reset_valid_low;
+ xlnk_int_type irq_id;
+ } irqregister;
+ struct __attribute__ ((__packed__)) {
+ xlnk_int_type irq_id;
+ } irqunregister;
+ struct __attribute__ ((__packed__)) {
+ xlnk_int_type irq_id;
+ xlnk_int_type polling;
+ xlnk_int_type success;
+ } irqwait;
+};
+
+#endif
diff --git a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
index 15b7a82f4b1e..eb329e0cdc84 100644
--- a/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
+++ b/drivers/staging/clocking-wizard/clk-xlnx-clock-wizard.c
@@ -26,19 +26,40 @@
#define WZRD_CLKFBOUT_MULT_SHIFT 8
#define WZRD_CLKFBOUT_MULT_MASK (0xff << WZRD_CLKFBOUT_MULT_SHIFT)
+#define WZRD_CLKFBOUT_FRAC_SHIFT 16
+#define WZRD_CLKFBOUT_FRAC_MASK (0x3ff << WZRD_CLKFBOUT_FRAC_SHIFT)
#define WZRD_DIVCLK_DIVIDE_SHIFT 0
#define WZRD_DIVCLK_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
#define WZRD_CLKOUT_DIVIDE_SHIFT 0
+#define WZRD_CLKOUT_DIVIDE_WIDTH 8
#define WZRD_CLKOUT_DIVIDE_MASK (0xff << WZRD_DIVCLK_DIVIDE_SHIFT)
+#define WZRD_CLKOUT_FRAC_SHIFT 8
+#define WZRD_CLKOUT_FRAC_MASK 0x3ff
+
+#define WZRD_DR_MAX_INT_DIV_VALUE 255
+#define WZRD_DR_NUM_RETRIES 10000
+#define WZRD_DR_STATUS_REG_OFFSET 0x04
+#define WZRD_DR_LOCK_BIT_MASK 0x00000001
+#define WZRD_DR_INIT_REG_OFFSET 0x25C
+#define WZRD_DR_DIV_TO_PHASE_OFFSET 4
+#define WZRD_DR_BEGIN_DYNA_RECONF 0x03
+
+/* Get the mask from width */
+#define div_mask(width) ((1 << (width)) - 1)
+
+/* Extract divider instance from clock hardware instance */
+#define to_clk_wzrd_divider(_hw) container_of(_hw, struct clk_wzrd_divider, hw)
enum clk_wzrd_int_clks {
wzrd_clk_mul,
wzrd_clk_mul_div,
+ wzrd_clk_mul_frac,
wzrd_clk_int_max
};
/**
- * struct clk_wzrd:
+ * struct clk_wzrd - Clock wizard private data structure
+ *
* @clk_data: Clock data
* @nb: Notifier block
* @base: Memory base
@@ -61,6 +82,29 @@ struct clk_wzrd {
bool suspended;
};
+/**
+ * struct clk_wzrd_divider - clock divider specific to clk_wzrd
+ *
+ * @hw: handle between common and hardware-specific interfaces
+ * @base: base address of register containing the divider
+ * @offset: offset address of register containing the divider
+ * @shift: shift to the divider bit field
+ * @width: width of the divider bit field
+ * @flags: clk_wzrd divider flags
+ * @table: array of value/divider pairs, last entry should have div = 0
+ * @lock: register lock
+ */
+struct clk_wzrd_divider {
+ struct clk_hw hw;
+ void __iomem *base;
+ u16 offset;
+ u8 shift;
+ u8 width;
+ u8 flags;
+ const struct clk_div_table *table;
+ spinlock_t *lock; /* divider lock */
+};
+
#define to_clk_wzrd(_nb) container_of(_nb, struct clk_wzrd, nb)
/* maximum frequencies for input/output clocks per speed grade */
@@ -70,6 +114,319 @@ static const unsigned long clk_wzrd_max_freq[] = {
1066000000UL
};
+/* spin lock variable for clk_wzrd */
+static DEFINE_SPINLOCK(clkwzrd_lock);
+
+static unsigned long clk_wzrd_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ void __iomem *div_addr =
+ (void __iomem *)((u64)divider->base + divider->offset);
+ unsigned int val;
+
+ val = readl(div_addr) >> divider->shift;
+ val &= div_mask(divider->width);
+
+ return divider_recalc_rate(hw, parent_rate, val, divider->table,
+ divider->flags, divider->width);
+}
+
+static int clk_wzrd_dynamic_reconfig(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int err = 0;
+ u16 retries;
+ u32 value;
+ unsigned long flags = 0;
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ void __iomem *div_addr =
+ (void __iomem *)((u64)divider->base + divider->offset);
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+ else
+ __acquire(divider->lock);
+
+ value = DIV_ROUND_CLOSEST(parent_rate, rate);
+
+ /* Cap the value to max */
+ if (value > WZRD_DR_MAX_INT_DIV_VALUE)
+ value = WZRD_DR_MAX_INT_DIV_VALUE;
+
+ /* Set divisor and clear phase offset */
+ writel(value, div_addr);
+ writel(0x00, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
+
+ /* Check status register */
+ retries = WZRD_DR_NUM_RETRIES;
+ while (retries--) {
+ if (readl(divider->base + WZRD_DR_STATUS_REG_OFFSET) &
+ WZRD_DR_LOCK_BIT_MASK)
+ break;
+ }
+
+ if (retries == 0) {
+ err = -ETIMEDOUT;
+ goto err_reconfig;
+ }
+
+ /* Initiate reconfiguration */
+ writel(WZRD_DR_BEGIN_DYNA_RECONF,
+ divider->base + WZRD_DR_INIT_REG_OFFSET);
+
+ /* Check status register */
+ retries = WZRD_DR_NUM_RETRIES;
+ while (retries--) {
+ if (readl(divider->base + WZRD_DR_STATUS_REG_OFFSET) &
+ WZRD_DR_LOCK_BIT_MASK)
+ break;
+ }
+
+ if (retries == 0)
+ err = -ETIMEDOUT;
+
+err_reconfig:
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+ else
+ __release(divider->lock);
+
+ return err;
+}
+
+static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ u8 div;
+
+ /*
+ * since we donot change parent rate we just round rate to closest
+ * achievable
+ */
+ div = DIV_ROUND_CLOSEST(*prate, rate);
+
+ return (*prate / div);
+}
+
+static const struct clk_ops clk_wzrd_clk_divider_ops = {
+ .round_rate = clk_wzrd_round_rate,
+ .set_rate = clk_wzrd_dynamic_reconfig,
+ .recalc_rate = clk_wzrd_recalc_rate,
+};
+
+static unsigned long clk_wzrd_recalc_ratef(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ unsigned int val;
+ u32 div, frac;
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ void __iomem *div_addr =
+ (void __iomem *)((u64)divider->base + divider->offset);
+
+ val = readl(div_addr);
+ div = val & div_mask(divider->width);
+ frac = (val >> WZRD_CLKOUT_FRAC_SHIFT) & WZRD_CLKOUT_FRAC_MASK;
+
+ return ((parent_rate * 1000) / ((div * 1000) + frac));
+}
+
+static int clk_wzrd_dynamic_reconfig_f(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int err = 0;
+ u16 retries;
+ u32 value, pre;
+ unsigned long flags = 0;
+ unsigned long rate_div, f, clockout0_div;
+ struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
+ void __iomem *div_addr =
+ (void __iomem *)((u64)divider->base + divider->offset);
+
+ if (divider->lock)
+ spin_lock_irqsave(divider->lock, flags);
+ else
+ __acquire(divider->lock);
+
+ rate_div = ((parent_rate * 1000) / rate);
+ clockout0_div = rate_div / 1000;
+
+ pre = DIV_ROUND_CLOSEST((parent_rate * 1000), rate);
+ f = (u32)(pre - (clockout0_div * 1000));
+ f = f & WZRD_CLKOUT_FRAC_MASK;
+
+ value = ((f << WZRD_CLKOUT_DIVIDE_WIDTH) | (clockout0_div &
+ WZRD_CLKOUT_DIVIDE_MASK));
+
+ /* Set divisor and clear phase offset */
+ writel(value, div_addr);
+ writel(0x0, div_addr + WZRD_DR_DIV_TO_PHASE_OFFSET);
+
+ /* Check status register */
+ retries = WZRD_DR_NUM_RETRIES;
+ while (retries--) {
+ if (readl(divider->base + WZRD_DR_STATUS_REG_OFFSET) &
+ WZRD_DR_LOCK_BIT_MASK)
+ break;
+ }
+
+ if (!retries) {
+ err = -ETIMEDOUT;
+ goto err_reconfig;
+ }
+
+ /* Initiate reconfiguration */
+ writel(WZRD_DR_BEGIN_DYNA_RECONF,
+ divider->base + WZRD_DR_INIT_REG_OFFSET);
+
+ /* Check status register */
+ retries = WZRD_DR_NUM_RETRIES;
+ while (retries--) {
+ if (readl(divider->base + WZRD_DR_STATUS_REG_OFFSET) &
+ WZRD_DR_LOCK_BIT_MASK)
+ break;
+ }
+
+ if (!retries)
+ err = -ETIMEDOUT;
+
+err_reconfig:
+ if (divider->lock)
+ spin_unlock_irqrestore(divider->lock, flags);
+ else
+ __release(divider->lock);
+
+ return err;
+}
+
+static long clk_wzrd_round_rate_f(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return rate;
+}
+
+static const struct clk_ops clk_wzrd_clk_divider_ops_f = {
+ .round_rate = clk_wzrd_round_rate_f,
+ .set_rate = clk_wzrd_dynamic_reconfig_f,
+ .recalc_rate = clk_wzrd_recalc_ratef,
+};
+
+static struct clk *clk_wzrd_register_divf(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ void __iomem *base, u16 offset,
+ u8 shift, u8 width,
+ u8 clk_divider_flags,
+ const struct clk_div_table *table,
+ spinlock_t *lock)
+{
+ struct clk_wzrd_divider *div;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
+ if (width + shift > 16) {
+ pr_warn("divider value exceeds LOWORD field\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* allocate the divider */
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+
+ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
+ init.ops = &clk_divider_ro_ops;
+ else
+ init.ops = &clk_wzrd_clk_divider_ops_f;
+
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_divider assignments */
+ div->base = base;
+ div->offset = offset;
+ div->shift = shift;
+ div->width = width;
+ div->flags = clk_divider_flags;
+ div->lock = lock;
+ div->hw.init = &init;
+ div->table = table;
+
+ /* register the clock */
+ hw = &div->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(div);
+ return ERR_PTR(ret);
+ }
+
+ return hw->clk;
+}
+
+static struct clk *clk_wzrd_register_divider(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ unsigned long flags,
+ void __iomem *base, u16 offset,
+ u8 shift, u8 width,
+ u8 clk_divider_flags,
+ const struct clk_div_table *table,
+ spinlock_t *lock)
+{
+ struct clk_wzrd_divider *div;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
+ if (width + shift > 16) {
+ pr_warn("divider value exceeds LOWORD field\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* allocate the divider */
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
+ init.ops = &clk_divider_ro_ops;
+ else
+ init.ops = &clk_wzrd_clk_divider_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+
+ /* struct clk_divider assignments */
+ div->base = base;
+ div->offset = offset;
+ div->shift = shift;
+ div->width = width;
+ div->flags = clk_divider_flags;
+ div->lock = lock;
+ div->hw.init = &init;
+ div->table = table;
+
+ /* register the clock */
+ hw = &div->hw;
+ ret = clk_hw_register(dev, hw);
+ if (ret) {
+ kfree(div);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw->clk;
+}
+
static int clk_wzrd_clk_notifier(struct notifier_block *nb, unsigned long event,
void *data)
{
@@ -131,7 +488,7 @@ static SIMPLE_DEV_PM_OPS(clk_wzrd_dev_pm_ops, clk_wzrd_suspend,
static int clk_wzrd_probe(struct platform_device *pdev)
{
int i, ret;
- u32 reg;
+ u32 reg, reg_f, mult;
unsigned long rate;
const char *clk_name;
struct clk_wzrd *clk_wzrd;
@@ -183,17 +540,13 @@ static int clk_wzrd_probe(struct platform_device *pdev)
goto err_disable_clk;
}
- /* we don't support fractional div/mul yet */
- reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
- WZRD_CLKFBOUT_FRAC_EN;
- reg |= readl(clk_wzrd->base + WZRD_CLK_CFG_REG(2)) &
- WZRD_CLKOUT0_FRAC_EN;
- if (reg)
- dev_warn(&pdev->dev, "fractional div/mul not supported\n");
-
/* register multiplier */
reg = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
WZRD_CLKFBOUT_MULT_MASK) >> WZRD_CLKFBOUT_MULT_SHIFT;
+ reg_f = (readl(clk_wzrd->base + WZRD_CLK_CFG_REG(0)) &
+ WZRD_CLKFBOUT_FRAC_MASK) >> WZRD_CLKFBOUT_FRAC_SHIFT;
+
+ mult = ((reg * 1000) + reg_f);
clk_name = kasprintf(GFP_KERNEL, "%s_mul", dev_name(&pdev->dev));
if (!clk_name) {
ret = -ENOMEM;
@@ -202,7 +555,7 @@ static int clk_wzrd_probe(struct platform_device *pdev)
clk_wzrd->clks_internal[wzrd_clk_mul] = clk_register_fixed_factor
(&pdev->dev, clk_name,
__clk_get_name(clk_wzrd->clk_in1),
- 0, reg, 1);
+ 0, mult, 1000);
kfree(clk_name);
if (IS_ERR(clk_wzrd->clks_internal[wzrd_clk_mul])) {
dev_err(&pdev->dev, "unable to register fixed-factor clock\n");
@@ -240,11 +593,24 @@ static int clk_wzrd_probe(struct platform_device *pdev)
ret = -EINVAL;
goto err_rm_int_clks;
}
- reg = readl(clk_wzrd->base + WZRD_CLK_CFG_REG(2) + i * 12);
- reg &= WZRD_CLKOUT_DIVIDE_MASK;
- reg >>= WZRD_CLKOUT_DIVIDE_SHIFT;
- clk_wzrd->clkout[i] = clk_register_fixed_factor
- (&pdev->dev, clkout_name, clk_name, 0, 1, reg);
+ if (!i)
+ clk_wzrd->clkout[i] = clk_wzrd_register_divf
+ (&pdev->dev, clkout_name,
+ clk_name, 0,
+ clk_wzrd->base, (WZRD_CLK_CFG_REG(2) + i * 12),
+ WZRD_CLKOUT_DIVIDE_SHIFT,
+ WZRD_CLKOUT_DIVIDE_WIDTH,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ NULL, &clkwzrd_lock);
+ else
+ clk_wzrd->clkout[i] = clk_wzrd_register_divider
+ (&pdev->dev, clkout_name,
+ clk_name, 0,
+ clk_wzrd->base, (WZRD_CLK_CFG_REG(2) + i * 12),
+ WZRD_CLKOUT_DIVIDE_SHIFT,
+ WZRD_CLKOUT_DIVIDE_WIDTH,
+ CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
+ NULL, &clkwzrd_lock);
if (IS_ERR(clk_wzrd->clkout[i])) {
int j;
diff --git a/drivers/staging/clocking-wizard/dt-binding.txt b/drivers/staging/clocking-wizard/dt-binding.txt
index 723271e93316..0439af67930b 100644
--- a/drivers/staging/clocking-wizard/dt-binding.txt
+++ b/drivers/staging/clocking-wizard/dt-binding.txt
@@ -9,6 +9,7 @@ http://www.xilinx.com/support/documentation/ip_documentation/clk_wiz/v5_1/pg065-
Required properties:
- compatible: Must be 'xlnx,clocking-wizard'
+ - #clock-cells: Number of cells in a clock specifier. Should be 1
- reg: Base and size of the cores register space
- clocks: Handle to input clock
- clock-names: Tuple containing 'clk_in1' and 's_axi_aclk'
@@ -19,12 +20,13 @@ Optional properties:
Example:
clock-generator@40040000 {
+ #clock-cells = <1>;
reg = <0x40040000 0x1000>;
compatible = "xlnx,clocking-wizard";
speed-grade = <1>;
clock-names = "clk_in1", "s_axi_aclk";
clocks = <&clkc 15>, <&clkc 15>;
- clock-output-names = "clk_out0", "clk_out1", "clk_out2",
+ clock-output-names = "clk_out1", "clk_out2",
"clk_out3", "clk_out4", "clk_out5",
"clk_out6", "clk_out7";
};
diff --git a/drivers/staging/comedi/drivers/adv_pci1760.c b/drivers/staging/comedi/drivers/adv_pci1760.c
index f460f21efb90..0f6faf263c82 100644
--- a/drivers/staging/comedi/drivers/adv_pci1760.c
+++ b/drivers/staging/comedi/drivers/adv_pci1760.c
@@ -59,7 +59,7 @@
#define PCI1760_CMD_CLR_IMB2 0x00 /* Clears IMB2 */
#define PCI1760_CMD_SET_DO 0x01 /* Set output state */
#define PCI1760_CMD_GET_DO 0x02 /* Read output status */
-#define PCI1760_CMD_GET_STATUS 0x03 /* Read current status */
+#define PCI1760_CMD_GET_STATUS 0x07 /* Read current status */
#define PCI1760_CMD_GET_FW_VER 0x0e /* Read firmware version */
#define PCI1760_CMD_GET_HW_VER 0x0f /* Read hardware version */
#define PCI1760_CMD_SET_PWM_HI(x) (0x10 + (x) * 2) /* Set "hi" period */
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
index ef4c7c8a2b71..9e60d2a0edc1 100644
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ b/drivers/staging/comedi/drivers/comedi_test.c
@@ -87,6 +87,8 @@ struct waveform_private {
struct comedi_device *dev; /* parent comedi device */
u64 ao_last_scan_time; /* time of previous AO scan in usec */
unsigned int ao_scan_period; /* AO scan period in usec */
+ bool ai_timer_enable:1; /* should AI timer be running? */
+ bool ao_timer_enable:1; /* should AO timer be running? */
unsigned short ao_loopbacks[N_CHANS];
};
@@ -236,8 +238,12 @@ static void waveform_ai_timer(struct timer_list *t)
time_increment = devpriv->ai_convert_time - now;
else
time_increment = 1;
- mod_timer(&devpriv->ai_timer,
- jiffies + usecs_to_jiffies(time_increment));
+ spin_lock(&dev->spinlock);
+ if (devpriv->ai_timer_enable) {
+ mod_timer(&devpriv->ai_timer,
+ jiffies + usecs_to_jiffies(time_increment));
+ }
+ spin_unlock(&dev->spinlock);
}
overrun:
@@ -393,9 +399,12 @@ static int waveform_ai_cmd(struct comedi_device *dev,
* Seem to need an extra jiffy here, otherwise timer expires slightly
* early!
*/
+ spin_lock_bh(&dev->spinlock);
+ devpriv->ai_timer_enable = true;
devpriv->ai_timer.expires =
jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1;
add_timer(&devpriv->ai_timer);
+ spin_unlock_bh(&dev->spinlock);
return 0;
}
@@ -404,6 +413,9 @@ static int waveform_ai_cancel(struct comedi_device *dev,
{
struct waveform_private *devpriv = dev->private;
+ spin_lock_bh(&dev->spinlock);
+ devpriv->ai_timer_enable = false;
+ spin_unlock_bh(&dev->spinlock);
if (in_softirq()) {
/* Assume we were called from the timer routine itself. */
del_timer(&devpriv->ai_timer);
@@ -495,8 +507,12 @@ static void waveform_ao_timer(struct timer_list *t)
unsigned int time_inc = devpriv->ao_last_scan_time +
devpriv->ao_scan_period - now;
- mod_timer(&devpriv->ao_timer,
- jiffies + usecs_to_jiffies(time_inc));
+ spin_lock(&dev->spinlock);
+ if (devpriv->ao_timer_enable) {
+ mod_timer(&devpriv->ao_timer,
+ jiffies + usecs_to_jiffies(time_inc));
+ }
+ spin_unlock(&dev->spinlock);
}
underrun:
@@ -517,9 +533,12 @@ static int waveform_ao_inttrig_start(struct comedi_device *dev,
async->inttrig = NULL;
devpriv->ao_last_scan_time = ktime_to_us(ktime_get());
+ spin_lock_bh(&dev->spinlock);
+ devpriv->ao_timer_enable = true;
devpriv->ao_timer.expires =
jiffies + usecs_to_jiffies(devpriv->ao_scan_period);
add_timer(&devpriv->ao_timer);
+ spin_unlock_bh(&dev->spinlock);
return 1;
}
@@ -604,6 +623,9 @@ static int waveform_ao_cancel(struct comedi_device *dev,
struct waveform_private *devpriv = dev->private;
s->async->inttrig = NULL;
+ spin_lock_bh(&dev->spinlock);
+ devpriv->ao_timer_enable = false;
+ spin_unlock_bh(&dev->spinlock);
if (in_softirq()) {
/* Assume we were called from the timer routine itself. */
del_timer(&devpriv->ao_timer);
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index ccc65cfc519f..51b814e44783 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -642,33 +642,22 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
struct vmk80xx_private *devpriv = dev->private;
struct usb_interface *intf = comedi_to_usb_interface(dev);
struct usb_host_interface *iface_desc = intf->cur_altsetting;
- struct usb_endpoint_descriptor *ep_desc;
- int i;
-
- if (iface_desc->desc.bNumEndpoints != 2)
- return -ENODEV;
-
- for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
- ep_desc = &iface_desc->endpoint[i].desc;
-
- if (usb_endpoint_is_int_in(ep_desc) ||
- usb_endpoint_is_bulk_in(ep_desc)) {
- if (!devpriv->ep_rx)
- devpriv->ep_rx = ep_desc;
- continue;
- }
+ struct usb_endpoint_descriptor *ep_rx_desc, *ep_tx_desc;
+ int ret;
- if (usb_endpoint_is_int_out(ep_desc) ||
- usb_endpoint_is_bulk_out(ep_desc)) {
- if (!devpriv->ep_tx)
- devpriv->ep_tx = ep_desc;
- continue;
- }
- }
+ if (devpriv->model == VMK8061_MODEL)
+ ret = usb_find_common_endpoints(iface_desc, &ep_rx_desc,
+ &ep_tx_desc, NULL, NULL);
+ else
+ ret = usb_find_common_endpoints(iface_desc, NULL, NULL,
+ &ep_rx_desc, &ep_tx_desc);
- if (!devpriv->ep_rx || !devpriv->ep_tx)
+ if (ret)
return -ENODEV;
+ devpriv->ep_rx = ep_rx_desc;
+ devpriv->ep_tx = ep_tx_desc;
+
if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
return -EINVAL;
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index cc4c18c3fb36..7d18ad68be26 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -2593,10 +2593,15 @@ static int nbu2ss_ep_queue(struct usb_ep *_ep,
req->unaligned = false;
if (req->unaligned) {
- if (!ep->virt_buf)
+ if (!ep->virt_buf) {
ep->virt_buf = dma_alloc_coherent(udc->dev, PAGE_SIZE,
&ep->phys_buf,
GFP_ATOMIC | GFP_DMA);
+ if (!ep->virt_buf) {
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return -ENOMEM;
+ }
+ }
if (ep->epnum > 0) {
if (ep->direct == USB_DIR_IN)
memcpy(ep->virt_buf, req->req.buf,
diff --git a/drivers/staging/fclk/Kconfig b/drivers/staging/fclk/Kconfig
new file mode 100644
index 000000000000..5f68261a206d
--- /dev/null
+++ b/drivers/staging/fclk/Kconfig
@@ -0,0 +1,9 @@
+#
+# Xilinx PL clk enabler
+#
+
+config XILINX_FCLK
+ tristate "Xilinx PL clock enabler"
+ depends on COMMON_CLK && OF
+ ---help---
+ Support for the Xilinx fclk clock enabler.
diff --git a/drivers/staging/fclk/Makefile b/drivers/staging/fclk/Makefile
new file mode 100644
index 000000000000..71723036c94e
--- /dev/null
+++ b/drivers/staging/fclk/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_XILINX_FCLK) += xilinx_fclk.o
diff --git a/drivers/staging/fclk/TODO b/drivers/staging/fclk/TODO
new file mode 100644
index 000000000000..912325fe5f4d
--- /dev/null
+++ b/drivers/staging/fclk/TODO
@@ -0,0 +1,2 @@
+TODO:
+ - Remove this hack and clock adapt all the drivers.
diff --git a/drivers/staging/fclk/dt-binding.txt b/drivers/staging/fclk/dt-binding.txt
new file mode 100644
index 000000000000..23521608b4a8
--- /dev/null
+++ b/drivers/staging/fclk/dt-binding.txt
@@ -0,0 +1,16 @@
+Binding for Xilinx pl clocks
+
+This binding uses the common clock binding[1].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+ - compatible: Must be 'xlnx,fclk'
+ - clocks: Handle to input clock
+
+Example:
+ fclk3: fclk3 {
+ status = "disabled";
+ compatible = "xlnx,fclk";
+ clocks = <&clkc 71>;
+ };
diff --git a/drivers/staging/fclk/xilinx_fclk.c b/drivers/staging/fclk/xilinx_fclk.c
new file mode 100644
index 000000000000..189928b8dd79
--- /dev/null
+++ b/drivers/staging/fclk/xilinx_fclk.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2017 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+struct fclk_state {
+ struct device *dev;
+ struct clk *pl;
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id fclk_of_match[] = {
+ { .compatible = "xlnx,fclk",},
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, fclk_of_match);
+
+static ssize_t set_rate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fclk_state *st = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%lu\n", clk_get_rate(st->pl));
+}
+
+static ssize_t set_rate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret = 0;
+ unsigned long rate;
+ struct fclk_state *st = dev_get_drvdata(dev);
+
+ ret = kstrtoul(buf, 0, &rate);
+ if (ret)
+ return -EINVAL;
+
+ rate = clk_round_rate(st->pl, rate);
+ ret = clk_set_rate(st->pl, rate);
+
+ return ret ? ret : count;
+}
+
+static DEVICE_ATTR_RW(set_rate);
+
+static const struct attribute *fclk_ctrl_attrs[] = {
+ &dev_attr_set_rate.attr,
+ NULL,
+};
+
+static const struct attribute_group fclk_ctrl_attr_grp = {
+ .attrs = (struct attribute **)fclk_ctrl_attrs,
+};
+
+static int fclk_probe(struct platform_device *pdev)
+{
+ struct fclk_state *st;
+ int ret;
+ struct device *dev = &pdev->dev;
+
+ st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->dev = dev;
+ platform_set_drvdata(pdev, st);
+
+ st->pl = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(st->pl))
+ return PTR_ERR(st->pl);
+
+ ret = clk_prepare_enable(st->pl);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+
+ ret = sysfs_create_group(&dev->kobj, &fclk_ctrl_attr_grp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int fclk_remove(struct platform_device *pdev)
+{
+ struct fclk_state *st = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(st->pl);
+ return 0;
+}
+
+static struct platform_driver fclk_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = fclk_of_match,
+ },
+ .probe = fclk_probe,
+ .remove = fclk_remove,
+};
+
+module_platform_driver(fclk_driver);
+
+MODULE_AUTHOR("Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>");
+MODULE_DESCRIPTION("fclk enable");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
index d2672b65c3f4..e59bb27236b9 100644
--- a/drivers/staging/greybus/light.c
+++ b/drivers/staging/greybus/light.c
@@ -100,15 +100,15 @@ static struct led_classdev *get_channel_cdev(struct gb_channel *channel)
static struct gb_channel *get_channel_from_mode(struct gb_light *light,
u32 mode)
{
- struct gb_channel *channel = NULL;
+ struct gb_channel *channel;
int i;
for (i = 0; i < light->channels_count; i++) {
channel = &light->channels[i];
- if (channel && channel->mode == mode)
- break;
+ if (channel->mode == mode)
+ return channel;
}
- return channel;
+ return NULL;
}
static int __gb_lights_flash_intensity_set(struct gb_channel *channel,
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index af0bcf95ee8a..54388660021e 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -624,7 +624,7 @@ static void ad5933_work(struct work_struct *work)
struct ad5933_state, work.work);
struct iio_dev *indio_dev = i2c_get_clientdata(st->client);
__be16 buf[2];
- int val[2];
+ u16 val[2];
unsigned char status;
int ret;
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index ed404355ea4c..a00693c61870 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -101,7 +101,7 @@ struct ad2s1210_state {
static const int ad2s1210_mode_vals[4][2] = {
[MOD_POS] = { 0, 0 },
[MOD_VEL] = { 0, 1 },
- [MOD_CONFIG] = { 1, 0 },
+ [MOD_CONFIG] = { 1, 1 },
};
static inline void ad2s1210_set_mode(enum ad2s1210_mode mode,
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
index e61bd8e1d246..b565c26ca72f 100644
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -1584,8 +1584,10 @@ static int ks_wlan_set_encode_ext(struct net_device *dev,
commit |= SME_WEP_FLAG;
}
if (enc->key_len) {
- memcpy(&key->key_val[0], &enc->key[0], enc->key_len);
- key->key_len = enc->key_len;
+ int key_len = clamp_val(enc->key_len, 0, IW_ENCODING_TOKEN_MAX);
+
+ memcpy(&key->key_val[0], &enc->key[0], key_len);
+ key->key_len = key_len;
commit |= (SME_WEP_VAL1 << index);
}
break;
diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
index a15d970adb98..135e1e64c244 100644
--- a/drivers/staging/media/imx/imx-media-csc-scaler.c
+++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
@@ -800,6 +800,7 @@ static int ipu_csc_scaler_release(struct file *file)
dev_dbg(priv->dev, "Releasing instance %p\n", ctx);
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdlr);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
index 53239ea67fe4..ee9fed6fc67e 100644
--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -1115,6 +1115,11 @@ static int imgu_v4l2_subdev_register(struct imgu_device *imgu,
struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
/* Initialize subdev media entity */
+ imgu_sd->subdev.entity.ops = &imgu_media_ops;
+ for (i = 0; i < IMGU_NODE_NUM; i++) {
+ imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ }
r = media_entity_pads_init(&imgu_sd->subdev.entity, IMGU_NODE_NUM,
imgu_sd->subdev_pads);
if (r) {
@@ -1122,11 +1127,6 @@ static int imgu_v4l2_subdev_register(struct imgu_device *imgu,
"failed initialize subdev media entity (%d)\n", r);
return r;
}
- imgu_sd->subdev.entity.ops = &imgu_media_ops;
- for (i = 0; i < IMGU_NODE_NUM; i++) {
- imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
- }
/* Initialize subdev */
v4l2_subdev_init(&imgu_sd->subdev, &imgu_subdev_ops);
@@ -1221,15 +1221,15 @@ static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
}
/* Initialize media entities */
+ node->vdev_pad.flags = node->output ?
+ MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
+ vdev->entity.ops = NULL;
r = media_entity_pads_init(&vdev->entity, 1, &node->vdev_pad);
if (r) {
dev_err(dev, "failed initialize media entity (%d)\n", r);
mutex_destroy(&node->lock);
return r;
}
- node->vdev_pad.flags = node->output ?
- MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
- vdev->entity.ops = NULL;
/* Initialize vbq */
vbq->type = node->vdev_fmt.type;
diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c
index 8dd1396909d7..a242bbe23ba2 100644
--- a/drivers/staging/media/meson/vdec/vdec.c
+++ b/drivers/staging/media/meson/vdec/vdec.c
@@ -1074,6 +1074,7 @@ static int vdec_probe(struct platform_device *pdev)
err_vdev_release:
video_device_release(vdev);
+ v4l2_device_unregister(&core->v4l2_dev);
return ret;
}
@@ -1082,6 +1083,7 @@ static int vdec_remove(struct platform_device *pdev)
struct amvdec_core *core = platform_get_drvdata(pdev);
video_unregister_device(core->vdev_dec);
+ v4l2_device_unregister(&core->v4l2_dev);
return 0;
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index e80e82a276e9..6ad448899779 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -323,6 +323,8 @@ static int cedrus_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
+ platform_set_drvdata(pdev, dev);
+
dev->vfd = cedrus_video_device;
dev->dev = &pdev->dev;
dev->pdev = pdev;
@@ -392,8 +394,6 @@ static int cedrus_probe(struct platform_device *pdev)
goto err_m2m_mc;
}
- platform_set_drvdata(pdev, dev);
-
return 0;
err_m2m_mc:
diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c
index 64c979155a49..774abedad987 100644
--- a/drivers/staging/most/dim2/dim2.c
+++ b/drivers/staging/most/dim2/dim2.c
@@ -47,7 +47,7 @@ MODULE_PARM_DESC(fcnt, "Num of frames per sub-buffer for sync channels as a powe
static DEFINE_SPINLOCK(dim_lock);
static void dim2_tasklet_fn(unsigned long data);
-static DECLARE_TASKLET(dim2_tasklet, dim2_tasklet_fn, 0);
+static DECLARE_TASKLET_OLD(dim2_tasklet, dim2_tasklet_fn);
/**
* struct hdm_channel - private structure to keep channel specific data
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index a28ade050e34..d84a4cb9dd67 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -41,7 +41,7 @@
#endif
static void cvm_oct_tx_do_cleanup(unsigned long arg);
-static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
+static DECLARE_TASKLET_OLD(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup);
/* Maximum number of SKBs to try to free per xmit packet. */
#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index bcbf0c8cd420..be377e75703b 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -767,6 +767,7 @@ static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset)
else
netif_wake_queue(dev);
+ priv->bfirst_after_down = false;
return 0;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 20e494186c9e..458ecca00ba1 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -185,7 +185,6 @@ static void _rtl92e_dm_init_fsync(struct net_device *dev);
static void _rtl92e_dm_deinit_fsync(struct net_device *dev);
static void _rtl92e_dm_check_txrateandretrycount(struct net_device *dev);
-static void _rtl92e_dm_check_ac_dc_power(struct net_device *dev);
static void _rtl92e_dm_check_fsync(struct net_device *dev);
static void _rtl92e_dm_check_rf_ctrl_gpio(void *data);
static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t);
@@ -238,8 +237,6 @@ void rtl92e_dm_watchdog(struct net_device *dev)
if (priv->being_init_adapter)
return;
- _rtl92e_dm_check_ac_dc_power(dev);
-
_rtl92e_dm_check_txrateandretrycount(dev);
_rtl92e_dm_check_edca_turbo(dev);
@@ -257,30 +254,6 @@ void rtl92e_dm_watchdog(struct net_device *dev)
_rtl92e_dm_cts_to_self(dev);
}
-static void _rtl92e_dm_check_ac_dc_power(struct net_device *dev)
-{
- struct r8192_priv *priv = rtllib_priv(dev);
- static char const ac_dc_script[] = "/etc/acpi/wireless-rtl-ac-dc-power.sh";
- char *argv[] = {(char *)ac_dc_script, DRV_NAME, NULL};
- static char *envp[] = {"HOME=/",
- "TERM=linux",
- "PATH=/usr/bin:/bin",
- NULL};
-
- if (priv->ResetProgress == RESET_TYPE_SILENT) {
- RT_TRACE((COMP_INIT | COMP_POWER | COMP_RF),
- "GPIOChangeRFWorkItemCallBack(): Silent Reset!!!!!!!\n");
- return;
- }
-
- if (priv->rtllib->state != RTLLIB_LINKED)
- return;
- call_usermodehelper(ac_dc_script, argv, envp, UMH_WAIT_PROC);
-
- return;
-};
-
-
void rtl92e_init_adaptive_rate(struct net_device *dev)
{
@@ -1800,10 +1773,6 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data)
u8 tmp1byte;
enum rt_rf_power_state eRfPowerStateToSet;
bool bActuallySet = false;
- char *argv[3];
- static char const RadioPowerPath[] = "/etc/acpi/events/RadioPower.sh";
- static char *envp[] = {"HOME=/", "TERM=linux", "PATH=/usr/bin:/bin",
- NULL};
bActuallySet = false;
@@ -1835,14 +1804,6 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data)
mdelay(1000);
priv->bHwRfOffAction = 1;
rtl92e_set_rf_state(dev, eRfPowerStateToSet, RF_CHANGE_BY_HW);
- if (priv->bHwRadioOff)
- argv[1] = "RFOFF";
- else
- argv[1] = "RFON";
-
- argv[0] = (char *)RadioPowerPath;
- argv[2] = NULL;
- call_usermodehelper(RadioPowerPath, argv, envp, UMH_WAIT_PROC);
}
}
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 83c30e2d82f5..a78f914082fe 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -1490,9 +1490,9 @@ static int rtllib_rx_Monitor(struct rtllib_device *ieee, struct sk_buff *skb,
hdrlen += 4;
}
- rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen);
ieee->stats.rx_packets++;
ieee->stats.rx_bytes += skb->len;
+ rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen);
return 1;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 00e34c392a38..d51f734aca26 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -943,9 +943,11 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
#endif
if (ieee->iw_mode == IW_MODE_MONITOR) {
+ unsigned int len = skb->len;
+
ieee80211_monitor_rx(ieee, skb, rx_stats);
stats->rx_packets++;
- stats->rx_bytes += skb->len;
+ stats->rx_bytes += len;
return 1;
}
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index ec33fb9122e9..57badc1e91e3 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -1013,7 +1013,7 @@ typedef struct r8192_priv {
bool bis_any_nonbepkts;
bool bcurrent_turbo_EDCA;
bool bis_cur_rdlstate;
- struct timer_list fsync_timer;
+ struct delayed_work fsync_work;
bool bfsync_processing; /* 500ms Fsync timer is active or not */
u32 rate_record;
u32 rateCountDiffRecord;
diff --git a/drivers/staging/rtl8192u/r8192U_dm.c b/drivers/staging/rtl8192u/r8192U_dm.c
index c23e43b095d9..30b272da36f5 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.c
+++ b/drivers/staging/rtl8192u/r8192U_dm.c
@@ -2585,19 +2585,20 @@ static void dm_init_fsync(struct net_device *dev)
priv->ieee80211->fsync_seconddiff_ratethreshold = 200;
priv->ieee80211->fsync_state = Default_Fsync;
priv->framesyncMonitor = 1; /* current default 0xc38 monitor on */
- timer_setup(&priv->fsync_timer, dm_fsync_timer_callback, 0);
+ INIT_DELAYED_WORK(&priv->fsync_work, dm_fsync_work_callback);
}
static void dm_deInit_fsync(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- del_timer_sync(&priv->fsync_timer);
+ cancel_delayed_work_sync(&priv->fsync_work);
}
-void dm_fsync_timer_callback(struct timer_list *t)
+void dm_fsync_work_callback(struct work_struct *work)
{
- struct r8192_priv *priv = from_timer(priv, t, fsync_timer);
+ struct r8192_priv *priv =
+ container_of(work, struct r8192_priv, fsync_work.work);
struct net_device *dev = priv->ieee80211->dev;
u32 rate_index, rate_count = 0, rate_count_diff = 0;
bool bSwitchFromCountDiff = false;
@@ -2664,17 +2665,16 @@ void dm_fsync_timer_callback(struct timer_list *t)
}
}
if (bDoubleTimeInterval) {
- if (timer_pending(&priv->fsync_timer))
- del_timer_sync(&priv->fsync_timer);
- priv->fsync_timer.expires = jiffies +
- msecs_to_jiffies(priv->ieee80211->fsync_time_interval*priv->ieee80211->fsync_multiple_timeinterval);
- add_timer(&priv->fsync_timer);
+ cancel_delayed_work_sync(&priv->fsync_work);
+ schedule_delayed_work(&priv->fsync_work,
+ msecs_to_jiffies(priv
+ ->ieee80211->fsync_time_interval *
+ priv->ieee80211->fsync_multiple_timeinterval));
} else {
- if (timer_pending(&priv->fsync_timer))
- del_timer_sync(&priv->fsync_timer);
- priv->fsync_timer.expires = jiffies +
- msecs_to_jiffies(priv->ieee80211->fsync_time_interval);
- add_timer(&priv->fsync_timer);
+ cancel_delayed_work_sync(&priv->fsync_work);
+ schedule_delayed_work(&priv->fsync_work,
+ msecs_to_jiffies(priv
+ ->ieee80211->fsync_time_interval));
}
} else {
/* Let Register return to default value; */
@@ -2702,7 +2702,7 @@ static void dm_EndSWFsync(struct net_device *dev)
struct r8192_priv *priv = ieee80211_priv(dev);
RT_TRACE(COMP_HALDM, "%s\n", __func__);
- del_timer_sync(&(priv->fsync_timer));
+ cancel_delayed_work_sync(&priv->fsync_work);
/* Let Register return to default value; */
if (priv->bswitch_fsync) {
@@ -2744,11 +2744,9 @@ static void dm_StartSWFsync(struct net_device *dev)
if (priv->ieee80211->fsync_rate_bitmap & rateBitmap)
priv->rate_record += priv->stats.received_rate_histogram[1][rateIndex];
}
- if (timer_pending(&priv->fsync_timer))
- del_timer_sync(&priv->fsync_timer);
- priv->fsync_timer.expires = jiffies +
- msecs_to_jiffies(priv->ieee80211->fsync_time_interval);
- add_timer(&priv->fsync_timer);
+ cancel_delayed_work_sync(&priv->fsync_work);
+ schedule_delayed_work(&priv->fsync_work,
+ msecs_to_jiffies(priv->ieee80211->fsync_time_interval));
write_nic_dword(dev, rOFDM0_RxDetector2, 0x465c12cd);
diff --git a/drivers/staging/rtl8192u/r8192U_dm.h b/drivers/staging/rtl8192u/r8192U_dm.h
index 0b2a1c688597..2159018b4e38 100644
--- a/drivers/staging/rtl8192u/r8192U_dm.h
+++ b/drivers/staging/rtl8192u/r8192U_dm.h
@@ -166,7 +166,7 @@ void dm_force_tx_fw_info(struct net_device *dev,
void dm_init_edca_turbo(struct net_device *dev);
void dm_rf_operation_test_callback(unsigned long data);
void dm_rf_pathcheck_workitemcallback(struct work_struct *work);
-void dm_fsync_timer_callback(struct timer_list *t);
+void dm_fsync_work_callback(struct work_struct *work);
void dm_cck_txpower_adjust(struct net_device *dev, bool binch14);
void dm_shadow_init(struct net_device *dev);
void dm_initialize_txpower_tracking(struct net_device *dev);
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 0c3ae8495afb..c0982c13ece7 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -323,6 +323,7 @@ int r8712_init_drv_sw(struct _adapter *padapter)
mp871xinit(padapter);
init_default_value(padapter);
r8712_InitSwLeds(padapter);
+ mutex_init(&padapter->mutex_start);
return ret;
}
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index ff3cb09c57a6..30e965c410ff 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -117,34 +117,6 @@ static void r871x_internal_cmd_hdl(struct _adapter *padapter, u8 *pbuf)
kfree(pdrvcmd->pbuf);
}
-static u8 read_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
-{
- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
-
- /* invoke cmd->callback function */
- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (!pcmd_callback)
- r8712_free_cmd_obj(pcmd);
- else
- pcmd_callback(padapter, pcmd);
- return H2C_SUCCESS;
-}
-
-static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
-{
- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
-
- /* invoke cmd->callback function */
- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (!pcmd_callback)
- r8712_free_cmd_obj(pcmd);
- else
- pcmd_callback(padapter, pcmd);
- return H2C_SUCCESS;
-}
-
static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
@@ -213,14 +185,6 @@ static struct cmd_obj *cmd_hdl_filter(struct _adapter *padapter,
pcmd_r = NULL;
switch (pcmd->cmdcode) {
- case GEN_CMD_CODE(_Read_MACREG):
- read_macreg_hdl(padapter, (u8 *)pcmd);
- pcmd_r = pcmd;
- break;
- case GEN_CMD_CODE(_Write_MACREG):
- write_macreg_hdl(padapter, (u8 *)pcmd);
- pcmd_r = pcmd;
- break;
case GEN_CMD_CODE(_Read_BBREG):
read_bbreg_hdl(padapter, (u8 *)pcmd);
break;
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index f7c1258eaa39..fef9233cef42 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -570,7 +570,6 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
if (rtl871x_load_fw(padapter))
goto error;
spin_lock_init(&padapter->lock_rx_ff0_filter);
- mutex_init(&padapter->mutex_start);
return 0;
error:
usb_put_dev(udev);
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index 8d93c2f26890..a82114de21a7 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -165,8 +165,6 @@ No irqsave is necessary.
int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
{
- int res = 0;
-
init_completion(&pcmdpriv->cmd_queue_comp);
init_completion(&pcmdpriv->terminate_cmdthread_comp);
@@ -178,18 +176,16 @@ int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
pcmdpriv->cmd_allocated_buf = rtw_zmalloc(MAX_CMDSZ + CMDBUFF_ALIGN_SZ);
- if (!pcmdpriv->cmd_allocated_buf) {
- res = -ENOMEM;
- goto exit;
- }
+ if (!pcmdpriv->cmd_allocated_buf)
+ return -ENOMEM;
pcmdpriv->cmd_buf = pcmdpriv->cmd_allocated_buf + CMDBUFF_ALIGN_SZ - ((SIZE_PTR)(pcmdpriv->cmd_allocated_buf) & (CMDBUFF_ALIGN_SZ-1));
pcmdpriv->rsp_allocated_buf = rtw_zmalloc(MAX_RSPSZ + 4);
if (!pcmdpriv->rsp_allocated_buf) {
- res = -ENOMEM;
- goto exit;
+ kfree(pcmdpriv->cmd_allocated_buf);
+ return -ENOMEM;
}
pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((SIZE_PTR)(pcmdpriv->rsp_allocated_buf) & 3);
@@ -197,8 +193,8 @@ int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
pcmdpriv->cmd_issued_cnt = pcmdpriv->cmd_done_cnt = pcmdpriv->rsp_cnt = 0;
mutex_init(&pcmdpriv->sctx_mutex);
-exit:
- return res;
+
+ return 0;
}
static void c2h_wk_callback(_workitem *work);
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 81ecfd1a200d..08b07e77bb9d 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -577,7 +577,7 @@ static u_long get_word(struct vc_data *vc)
}
attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
buf[cnt++] = attr_ch;
- while (tmpx < vc->vc_cols - 1) {
+ while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
tmp_pos += 2;
tmpx++;
ch = get_char(vc, (u_short *)tmp_pos, &temp);
@@ -1781,7 +1781,7 @@ static void speakup_con_update(struct vc_data *vc)
{
unsigned long flags;
- if (!speakup_console[vc->vc_num] || spk_parked)
+ if (!speakup_console[vc->vc_num] || spk_parked || !synth)
return;
if (!spin_trylock_irqsave(&speakup_info.spinlock, flags))
/* Speakup output, discard */
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index 3568bfb89912..b5944e7bdbf6 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -208,8 +208,10 @@ void spk_do_flush(void)
wake_up_process(speakup_task);
}
-void synth_write(const char *buf, size_t count)
+void synth_write(const char *_buf, size_t count)
{
+ const unsigned char *buf = (const unsigned char *) _buf;
+
while (count--)
synth_buffer_add(*buf++);
synth_start();
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
index 1c180ead4a20..ad143f601974 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c
@@ -31,8 +31,11 @@
#define USE_VCHIQ_ARM
#include "interface/vchi/vchi.h"
-/* maximum number of components supported */
-#define VCHIQ_MMAL_MAX_COMPONENTS 4
+/*
+ * maximum number of components supported.
+ * This matches the maximum permitted by default on the VPU
+ */
+#define VCHIQ_MMAL_MAX_COMPONENTS 64
/*#define FULL_MSG_DUMP 1*/
@@ -167,8 +170,6 @@ struct vchiq_mmal_instance {
/* protect accesses to context_map */
struct mutex context_map_lock;
- /* component to use next */
- int component_idx;
struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
/* ordered workqueue to process all bulk operations */
@@ -927,9 +928,10 @@ static int create_component(struct vchiq_mmal_instance *instance,
/* build component create message */
m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
- m.u.component_create.client_component = (u32)(unsigned long)component;
- strncpy(m.u.component_create.name, name,
- sizeof(m.u.component_create.name));
+ m.u.component_create.client_component = component->client_component;
+ strscpy_pad(m.u.component_create.name, name,
+ sizeof(m.u.component_create.name));
+ m.u.component_create.pid = 0;
ret = send_synchronous_mmal_msg(instance, &m,
sizeof(m.u.component_create),
@@ -1616,17 +1618,29 @@ int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
{
int ret;
int idx; /* port index */
- struct vchiq_mmal_component *component;
+ struct vchiq_mmal_component *component = NULL;
if (mutex_lock_interruptible(&instance->vchiq_mutex))
return -EINTR;
- if (instance->component_idx == VCHIQ_MMAL_MAX_COMPONENTS) {
+ for (idx = 0; idx < VCHIQ_MMAL_MAX_COMPONENTS; idx++) {
+ if (!instance->component[idx].in_use) {
+ component = &instance->component[idx];
+ component->in_use = 1;
+ break;
+ }
+ }
+
+ if (!component) {
ret = -EINVAL; /* todo is this correct error? */
goto unlock;
}
- component = &instance->component[instance->component_idx];
+ /* We need a handle to reference back to our component structure.
+ * Use the array index in instance->component rather than rolling
+ * another IDR.
+ */
+ component->client_component = idx;
ret = create_component(instance, component, name);
if (ret < 0) {
@@ -1678,8 +1692,6 @@ int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
goto release_component;
}
- instance->component_idx++;
-
*component_out = component;
mutex_unlock(&instance->vchiq_mutex);
@@ -1689,6 +1701,8 @@ int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
release_component:
destroy_component(instance, component);
unlock:
+ if (component)
+ component->in_use = 0;
mutex_unlock(&instance->vchiq_mutex);
return ret;
@@ -1710,6 +1724,8 @@ int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
ret = destroy_component(instance, component);
+ component->in_use = 0;
+
mutex_unlock(&instance->vchiq_mutex);
return ret;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
index 47897e81ec58..a75c5f0a770e 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.h
@@ -82,6 +82,7 @@ struct vchiq_mmal_port {
};
struct vchiq_mmal_component {
+ u32 in_use:1;
u32 enabled:1;
u32 handle; /* VideoCore handle for component */
u32 inputs; /* Number of input ports */
@@ -91,6 +92,7 @@ struct vchiq_mmal_component {
struct vchiq_mmal_port input[MAX_PORT_COUNT]; /* input ports */
struct vchiq_mmal_port output[MAX_PORT_COUNT]; /* output ports */
struct vchiq_mmal_port clock[MAX_PORT_COUNT]; /* clock ports */
+ u32 client_component; /* Used to ref back to client struct */
};
int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance);
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 082302944c37..18284c427b7e 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -560,7 +560,7 @@ err_free_rd:
kfree(desc->rd_info);
err_free_desc:
- while (--i) {
+ while (i--) {
desc = &priv->aRD0Ring[i];
device_free_rx_buf(priv, desc);
kfree(desc->rd_info);
@@ -606,7 +606,7 @@ err_free_rd:
kfree(desc->rd_info);
err_free_desc:
- while (--i) {
+ while (i--) {
desc = &priv->aRD1Ring[i];
device_free_rx_buf(priv, desc);
kfree(desc->rd_info);
@@ -670,7 +670,7 @@ static int device_init_td0_ring(struct vnt_private *priv)
return 0;
err_free_desc:
- while (--i) {
+ while (i--) {
desc = &priv->apTD0Rings[i];
kfree(desc->td_info);
}
@@ -710,7 +710,7 @@ static int device_init_td1_ring(struct vnt_private *priv)
return 0;
err_free_desc:
- while (--i) {
+ while (i--) {
desc = &priv->apTD1Rings[i];
kfree(desc->td_info);
}
diff --git a/drivers/staging/wilc1000/wilc_hif.c b/drivers/staging/wilc1000/wilc_hif.c
index 221e3d93db14..22e02fd068b4 100644
--- a/drivers/staging/wilc1000/wilc_hif.c
+++ b/drivers/staging/wilc1000/wilc_hif.c
@@ -441,38 +441,49 @@ out:
void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
struct cfg80211_crypto_settings *crypto)
{
- struct wilc_join_bss_param *param;
- struct ieee80211_p2p_noa_attr noa_attr;
- u8 rates_len = 0;
- const u8 *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
+ const u8 *ies_data, *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
const u8 *ht_ie, *wpa_ie, *wmm_ie, *rsn_ie;
+ struct ieee80211_p2p_noa_attr noa_attr;
+ const struct cfg80211_bss_ies *ies;
+ struct wilc_join_bss_param *param;
+ u8 rates_len = 0, ies_len;
int ret;
- const struct cfg80211_bss_ies *ies = rcu_dereference(bss->ies);
param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param)
return NULL;
+ rcu_read_lock();
+ ies = rcu_dereference(bss->ies);
+ ies_data = kmemdup(ies->data, ies->len, GFP_ATOMIC);
+ if (!ies_data) {
+ rcu_read_unlock();
+ kfree(param);
+ return NULL;
+ }
+ ies_len = ies->len;
+ rcu_read_unlock();
+
param->beacon_period = cpu_to_le16(bss->beacon_interval);
param->cap_info = cpu_to_le16(bss->capability);
param->bss_type = WILC_FW_BSS_TYPE_INFRA;
param->ch = ieee80211_frequency_to_channel(bss->channel->center_freq);
ether_addr_copy(param->bssid, bss->bssid);
- ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
+ ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies_data, ies_len);
if (ssid_elm) {
if (ssid_elm[1] <= IEEE80211_MAX_SSID_LEN)
memcpy(param->ssid, ssid_elm + 2, ssid_elm[1]);
}
- tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies->data, ies->len);
+ tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies_data, ies_len);
if (tim_elm && tim_elm[1] >= 2)
param->dtim_period = tim_elm[3];
memset(param->p_suites, 0xFF, 3);
memset(param->akm_suites, 0xFF, 3);
- rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies->data, ies->len);
+ rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies_data, ies_len);
if (rates_ie) {
rates_len = rates_ie[1];
if (rates_len > WILC_MAX_RATES_SUPPORTED)
@@ -483,7 +494,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
if (rates_len < WILC_MAX_RATES_SUPPORTED) {
supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
- ies->data, ies->len);
+ ies_data, ies_len);
if (supp_rates_ie) {
u8 ext_rates = supp_rates_ie[1];
@@ -498,11 +509,11 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
}
}
- ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies->data, ies->len);
+ ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies_data, ies_len);
if (ht_ie)
param->ht_capable = true;
- ret = cfg80211_get_p2p_attr(ies->data, ies->len,
+ ret = cfg80211_get_p2p_attr(ies_data, ies_len,
IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
(u8 *)&noa_attr, sizeof(noa_attr));
if (ret > 0) {
@@ -526,7 +537,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
}
wmm_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WMM,
- ies->data, ies->len);
+ ies_data, ies_len);
if (wmm_ie) {
struct ieee80211_wmm_param_ie *ie;
@@ -541,13 +552,13 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
wpa_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPA,
- ies->data, ies->len);
+ ies_data, ies_len);
if (wpa_ie) {
param->mode_802_11i = 1;
param->rsn_found = true;
}
- rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies->data, ies->len);
+ rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies_data, ies_len);
if (rsn_ie) {
int offset = 8;
@@ -570,6 +581,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
param->akm_suites[i] = crypto->akm_suites[i] & 0xFF;
}
+ kfree(ies_data);
return (void *)param;
}
diff --git a/drivers/staging/wilc1000/wilc_netdev.c b/drivers/staging/wilc1000/wilc_netdev.c
index 508acb8bb089..f34b1f0d3a80 100644
--- a/drivers/staging/wilc1000/wilc_netdev.c
+++ b/drivers/staging/wilc1000/wilc_netdev.c
@@ -717,14 +717,15 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
if (skb->dev != ndev) {
netdev_err(ndev, "Packet not destined to this device\n");
- return 0;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
}
tx_data = kmalloc(sizeof(*tx_data), GFP_ATOMIC);
if (!tx_data) {
dev_kfree_skb(skb);
netif_wake_queue(ndev);
- return 0;
+ return NETDEV_TX_OK;
}
tx_data->buff = skb->data;
@@ -748,7 +749,7 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
mutex_unlock(&wilc->vif_mutex);
}
- return 0;
+ return NETDEV_TX_OK;
}
static int wilc_mac_close(struct net_device *ndev)
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
index c787c5da8f2b..22a30da011e1 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/wilc_sdio.c
@@ -20,6 +20,7 @@ static const struct sdio_device_id wilc_sdio_ids[] = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_WILC, SDIO_DEVICE_ID_WILC) },
{ },
};
+MODULE_DEVICE_TABLE(sdio, wilc_sdio_ids);
#define WILC_SDIO_BLOCK_SIZE 512
diff --git a/drivers/staging/xlnx_ctrl_driver/Kconfig b/drivers/staging/xlnx_ctrl_driver/Kconfig
new file mode 100644
index 000000000000..3bff5e6d1aca
--- /dev/null
+++ b/drivers/staging/xlnx_ctrl_driver/Kconfig
@@ -0,0 +1,15 @@
+config XLNX_CTRL_FRMBUF
+ tristate "FB Control driver"
+ help
+ This driver is to support Xilinx Framebuffer read and write IP. This
+ driver is simple control plane driver which is controlled by ioctls
+ from userspace. It is free from any other media framework like V4l2 or
+ DRM hence, doesn't need to adhere to V4L2 or DRM.
+
+config XLNX_CTRL_VPSS
+ tristate "VPSS Control driver"
+ help
+ This driver is to support Xilinx VPSS IP. This driver is simple
+ control plane driver which is controlled by ioctls from userspace. It
+ is free from any media framework like V4l2 or DRM hence, doesn't need
+ to adhere to V4L2 or DRM.
diff --git a/drivers/staging/xlnx_ctrl_driver/MAINTAINERS b/drivers/staging/xlnx_ctrl_driver/MAINTAINERS
new file mode 100644
index 000000000000..bcfd70d359ec
--- /dev/null
+++ b/drivers/staging/xlnx_ctrl_driver/MAINTAINERS
@@ -0,0 +1,4 @@
+XILINX CONTROL DRIVER
+M: Saurabh Sengar <saurabh.singh@xilinx.com>
+S: Maintained
+F: drivers/staging/xlnx_ctrl_driver
diff --git a/drivers/staging/xlnx_ctrl_driver/Makefile b/drivers/staging/xlnx_ctrl_driver/Makefile
new file mode 100644
index 000000000000..312bd1f5d233
--- /dev/null
+++ b/drivers/staging/xlnx_ctrl_driver/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_XLNX_CTRL_FRMBUF) += xlnx_frmb.o
+obj-$(CONFIG_XLNX_CTRL_VPSS) += xlnx_vpss.o
diff --git a/drivers/staging/xlnx_ctrl_driver/xlnx_frmb.c b/drivers/staging/xlnx_ctrl_driver/xlnx_frmb.c
new file mode 100644
index 000000000000..0b36575e493b
--- /dev/null
+++ b/drivers/staging/xlnx_ctrl_driver/xlnx_frmb.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx Framebuffer read control driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ * Author: Saurabh Sengar <saurabh.singh@xilinx.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-buf.h>
+#include <linux/dmaengine.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/xlnx_ctrl.h>
+
+/* TODO: clock framework */
+
+#define XFBWR_FB_CTRL 0x00
+#define XFBWR_FB_WIDTH 0x10
+#define XFBWR_FB_HEIGHT 0x18
+#define XFBWR_FB_STRIDE 0x20
+#define XFBWR_FB_COLOR 0x28
+#define XFBWR_FB_PLANE1 0x30
+#define XFBWR_FB_PLANE2 0x3C
+
+#define XFBWR_FB_CTRL_START BIT(0)
+#define XFBWR_FB_CTRL_IDLE BIT(2)
+#define XFBWR_FB_CTRL_RESTART BIT(7)
+#define XFBWR_FB_CTRL_OFF 0
+
+static u64 dma_mask = -1ULL;
+
+struct frmb_dmabuf_reg {
+ s32 dmabuf_fd;
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *dbuf_attach;
+ struct sg_table *dbuf_sg_table;
+};
+
+/**
+ * struct frmb_struct - Xilinx framebuffer ctrl object
+ *
+ * @dev: device structure
+ * @db: framebuffer ctrl driver dmabuf structure
+ * @frmb_miscdev: The misc device registered
+ * @regs: Base address of framebuffer IP
+ * @is_fbrd: True for framebuffer Read else false
+ */
+struct frmb_struct {
+ struct device *dev;
+ struct frmb_dmabuf_reg db;
+ struct miscdevice frmb_miscdev;
+ void __iomem *regs;
+ bool is_fbrd;
+};
+
+struct frmb_data {
+ u32 fd;
+ u32 height;
+ u32 width;
+ u32 stride;
+ u32 color;
+ u32 n_planes;
+ u32 offset;
+};
+
+struct match_struct {
+ char name[8];
+ bool is_read;
+};
+
+static const struct match_struct read_struct = {
+ .name = "fbrd",
+ .is_read = true,
+};
+
+static const struct match_struct write_struct = {
+ .name = "fbwr",
+ .is_read = false,
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id frmb_of_match[] = {
+ { .compatible = "xlnx,ctrl-fbwr-1.0", .data = &write_struct},
+ { .compatible = "xlnx,ctrl-fbrd-1.0", .data = &read_struct},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, frmb_of_match);
+
+static inline struct frmb_struct *to_frmb_struct(struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+
+ return container_of(miscdev, struct frmb_struct, frmb_miscdev);
+}
+
+static inline u32 frmb_ior(void __iomem *lp, off_t offset)
+{
+ return readl(lp + offset);
+}
+
+static inline void frmb_iow(void __iomem *lp, off_t offset, u32 value)
+{
+ writel(value, (lp + offset));
+}
+
+phys_addr_t frmb_add_dmabuf(u32 fd, struct frmb_struct *frmb_g)
+{
+ frmb_g->db.dbuf = dma_buf_get(fd);
+ frmb_g->db.dbuf_attach = dma_buf_attach(frmb_g->db.dbuf, frmb_g->dev);
+ if (IS_ERR(frmb_g->db.dbuf_attach)) {
+ dma_buf_put(frmb_g->db.dbuf);
+ dev_err(frmb_g->dev, "Failed DMA-BUF attach\n");
+ return -EINVAL;
+ }
+
+ frmb_g->db.dbuf_sg_table = dma_buf_map_attachment(frmb_g->db.dbuf_attach
+ , DMA_BIDIRECTIONAL);
+
+ if (!frmb_g->db.dbuf_sg_table) {
+ dev_err(frmb_g->dev, "Failed DMA-BUF map_attachment\n");
+ dma_buf_detach(frmb_g->db.dbuf, frmb_g->db.dbuf_attach);
+ dma_buf_put(frmb_g->db.dbuf);
+ return -EINVAL;
+ }
+
+ return (u32)sg_dma_address(frmb_g->db.dbuf_sg_table->sgl);
+}
+
+static void xlnk_clear_dmabuf(struct frmb_struct *frmb_g)
+{
+ dma_buf_unmap_attachment(frmb_g->db.dbuf_attach,
+ frmb_g->db.dbuf_sg_table,
+ DMA_BIDIRECTIONAL);
+ dma_buf_detach(frmb_g->db.dbuf, frmb_g->db.dbuf_attach);
+ dma_buf_put(frmb_g->db.dbuf);
+}
+
+static long frmb_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ long retval = 0;
+ struct frmb_data data;
+ phys_addr_t phys_y = 0, phys_uv = 0;
+ struct frmb_struct *frmb_g = to_frmb_struct(file);
+
+ switch (cmd) {
+ case XSET_FB_POLL:
+ retval = frmb_ior(frmb_g->regs, XFBWR_FB_CTRL);
+ if (retval == XFBWR_FB_CTRL_IDLE)
+ retval = 0;
+ else
+ retval = 1;
+ break;
+ case XSET_FB_ENABLE_SNGL:
+ frmb_iow(frmb_g->regs, XFBWR_FB_CTRL, XFBWR_FB_CTRL_START);
+ break;
+ case XSET_FB_ENABLE:
+ frmb_iow(frmb_g->regs, XFBWR_FB_CTRL, XFBWR_FB_CTRL_START);
+ frmb_iow(frmb_g->regs, XFBWR_FB_CTRL,
+ XFBWR_FB_CTRL_RESTART | XFBWR_FB_CTRL_START);
+ break;
+ case XSET_FB_DISABLE:
+ frmb_iow(frmb_g->regs, XFBWR_FB_CTRL, XFBWR_FB_CTRL_OFF);
+ break;
+ case XSET_FB_CONFIGURE:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ break;
+ }
+ frmb_iow(frmb_g->regs, XFBWR_FB_WIDTH, data.width);
+ frmb_iow(frmb_g->regs, XFBWR_FB_HEIGHT, data.height);
+ frmb_iow(frmb_g->regs, XFBWR_FB_STRIDE, data.stride);
+ frmb_iow(frmb_g->regs, XFBWR_FB_COLOR, data.color);
+ break;
+ case XSET_FB_CAPTURE:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ break;
+ }
+ phys_y = frmb_add_dmabuf(data.fd, frmb_g);
+ frmb_iow(frmb_g->regs, XFBWR_FB_PLANE1, phys_y);
+ if (data.n_planes == 2) {
+ phys_uv = phys_y + data.offset;
+ frmb_iow(frmb_g->regs, XFBWR_FB_PLANE2, phys_uv);
+ }
+ break;
+ case XSET_FB_RELEASE:
+ xlnk_clear_dmabuf(frmb_g);
+ break;
+ default:
+ retval = -EINVAL;
+ }
+ return retval;
+}
+
+static const struct file_operations frmb_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = frmb_ioctl,
+};
+
+static int frmb_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ int ret;
+ struct resource *res_frmb;
+ const struct of_device_id *match;
+ struct frmb_struct *frmb_g;
+ struct gpio_desc *reset_gpio;
+ const struct match_struct *config;
+
+ pdev->dev.dma_mask = &dma_mask;
+ pdev->dev.coherent_dma_mask = dma_mask;
+
+ frmb_g = devm_kzalloc(&pdev->dev, sizeof(*frmb_g), GFP_KERNEL);
+ if (!frmb_g)
+ return -ENOMEM;
+
+ reset_gpio = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(reset_gpio)) {
+ ret = PTR_ERR(reset_gpio);
+ if (ret == -EPROBE_DEFER)
+ dev_dbg(&pdev->dev, "No gpio probed, Deferring...\n");
+ else
+ dev_err(&pdev->dev, "No reset gpio info from dts\n");
+ return ret;
+ }
+ gpiod_set_value_cansleep(reset_gpio, 0);
+
+ platform_set_drvdata(pdev, frmb_g);
+ frmb_g->dev = &pdev->dev;
+ res_frmb = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ frmb_g->regs = devm_ioremap_resource(&pdev->dev, res_frmb);
+ if (IS_ERR(frmb_g->regs))
+ return PTR_ERR(frmb_g->regs);
+
+ match = of_match_node(frmb_of_match, node);
+ if (!match)
+ return -ENODEV;
+
+ config = match->data;
+ frmb_g->frmb_miscdev.name = config->name;
+ frmb_g->is_fbrd = config->is_read;
+
+ frmb_g->frmb_miscdev.minor = MISC_DYNAMIC_MINOR;
+ frmb_g->frmb_miscdev.fops = &frmb_fops;
+ frmb_g->frmb_miscdev.parent = NULL;
+ ret = misc_register(&frmb_g->frmb_miscdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "FrameBuffer control driver registration failed!\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "FrameBuffer control driver success!\n");
+
+ return ret;
+}
+
+static int frmb_remove(struct platform_device *pdev)
+{
+ struct frmb_struct *frmb_g = platform_get_drvdata(pdev);
+
+ misc_deregister(&frmb_g->frmb_miscdev);
+ return 0;
+}
+
+static struct platform_driver frmb_driver = {
+ .probe = frmb_probe,
+ .remove = frmb_remove,
+ .driver = {
+ .name = "xlnx_ctrl-frmb",
+ .of_match_table = frmb_of_match,
+ },
+};
+
+module_platform_driver(frmb_driver);
+
+MODULE_DESCRIPTION("Xilinx Framebuffer control driver");
+MODULE_AUTHOR("Saurabh Sengar");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/xlnx_ctrl_driver/xlnx_vpss.c b/drivers/staging/xlnx_ctrl_driver/xlnx_vpss.c
new file mode 100644
index 000000000000..017ad0a4cffd
--- /dev/null
+++ b/drivers/staging/xlnx_ctrl_driver/xlnx_vpss.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx VPSS control driver.
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ * Author: Saurabh Sengar <saurabh.singh@xilinx.com>
+ */
+
+/* TODO: clock framework */
+
+#include <linux/fs.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/xlnx_ctrl.h>
+
+/* VPSS block offset */
+#define XHSCALER_OFFSET 0
+#define XSAXIS_RST_OFFSET 0x10000
+#define XVSCALER_OFFSET 0x20000
+
+#define XVPSS_GPIO_CHAN 8
+
+#define XVPSS_MAX_WIDTH 3840
+#define XVPSS_MAX_HEIGHT 2160
+
+#define XVPSS_STEPPREC 65536
+
+/* Video IP PPC */
+#define XVPSS_PPC_1 1
+#define XVPSS_PPC_2 2
+
+#define XVPSS_MAX_TAPS 12
+#define XVPSS_PHASES 64
+#define XVPSS_TAPS_6 6
+
+/* Mask definitions for Low and high 16 bits in a 32 bit number */
+#define XVPSS_MASK_LOW_16BITS GENMASK(15, 0)
+#define XVPSS_MASK_LOW_32BITS GENMASK(31, 0)
+#define XVPSS_STEP_PRECISION_SHIFT (16)
+#define XVPSS_PHASE_SHIFT_BY_6 (6)
+#define XVPSS_PHASE_MULTIPLIER (9)
+#define XVPSS_BITSHIFT_16 (16)
+
+/* VPSS AP Control Registers */
+#define XVPSS_START BIT(0)
+#define XVPSS_RESTART BIT(7)
+#define XVPSS_STREAM_ON (XVPSS_START | XVPSS_RESTART)
+
+/* H-scaler registers */
+#define XVPSS_H_AP_CTRL (0x0000)
+#define XVPSS_H_GIE (0x0004)
+#define XVPSS_H_IER (0x0008)
+#define XVPSS_H_ISR (0x000c)
+#define XVPSS_H_HEIGHT (0x0010)
+#define XVPSS_H_WIDTHIN (0x0018)
+#define XVPSS_H_WIDTHOUT (0x0020)
+#define XVPSS_H_COLOR (0x0028)
+#define XVPSS_H_PIXELRATE (0x0030)
+#define XVPSS_H_COLOROUT (0X0038)
+#define XVPSS_H_HFLTCOEFF_BASE (0x0800)
+#define XVPSS_H_HFLTCOEFF_HIGH (0x0bff)
+#define XVPSS_H_PHASESH_V_BASE (0x2000)
+#define XVPSS_H_PHASESH_V_HIGH (0x3fff)
+
+/* H-scaler masks */
+#define XVPSS_PHASESH_WR_EN BIT(8)
+
+/* V-scaler registers */
+#define XVPSS_V_AP_CTRL (0x000)
+#define XVPSS_V_GIE (0x004)
+#define XVPSS_V_IER (0x008)
+#define XVPSS_V_ISR (0x00c)
+#define XVPSS_V_HEIGHTIN (0x010)
+#define XVPSS_V_WIDTH (0x018)
+#define XVPSS_V_HEIGHTOUT (0x020)
+#define XVPSS_V_LINERATE (0x028)
+#define XVPSS_V_COLOR (0x030)
+#define XVPSS_V_VFLTCOEFF_BASE (0x800)
+#define XVPSS_V_VFLTCOEFF_HIGH (0xbff)
+
+#define XVPSS_GPIO_RST_SEL 1
+#define XVPSS_GPIO_VIDEO_IN BIT(0)
+#define XVPSS_RST_IP_AXIS BIT(1)
+#define XVPSS_GPIO_MASK_ALL (XVPSS_GPIO_VIDEO_IN | XVPSS_RST_IP_AXIS)
+
+enum xvpss_color {
+ XVPSS_YUV_RGB,
+ XVPSS_YUV_444,
+ XVPSS_YUV_422,
+ XVPSS_YUV_420,
+};
+
+/* VPSS coefficients for 6 tap filters */
+static const u16
+xvpss_coeff_taps6[XVPSS_PHASES][XVPSS_TAPS_6] = {
+ { -132, 236, 3824, 236, -132, 64, },
+ { -116, 184, 3816, 292, -144, 64, },
+ { -100, 132, 3812, 348, -160, 64, },
+ { -88, 84, 3808, 404, -176, 64, },
+ { -72, 36, 3796, 464, -192, 64, },
+ { -60, -8, 3780, 524, -208, 68, },
+ { -48, -52, 3768, 588, -228, 68, },
+ { -32, -96, 3748, 652, -244, 68, },
+ { -20, -136, 3724, 716, -260, 72, },
+ { -8, -172, 3696, 784, -276, 72, },
+ { 0, -208, 3676, 848, -292, 72, },
+ { 12, -244, 3640, 920, -308, 76, },
+ { 20, -276, 3612, 988, -324, 76, },
+ { 32, -304, 3568, 1060, -340, 80, },
+ { 40, -332, 3532, 1132, -356, 80, },
+ { 48, -360, 3492, 1204, -372, 84, },
+ { 56, -384, 3448, 1276, -388, 88, },
+ { 64, -408, 3404, 1352, -404, 88, },
+ { 72, -428, 3348, 1428, -416, 92, },
+ { 76, -448, 3308, 1500, -432, 92, },
+ { 84, -464, 3248, 1576, -444, 96, },
+ { 88, -480, 3200, 1652, -460, 96, },
+ { 92, -492, 3140, 1728, -472, 100, },
+ { 96, -504, 3080, 1804, -484, 104, },
+ { 100, -516, 3020, 1880, -492, 104, },
+ { 104, -524, 2956, 1960, -504, 104, },
+ { 104, -532, 2892, 2036, -512, 108, },
+ { 108, -540, 2832, 2108, -520, 108, },
+ { 108, -544, 2764, 2184, -528, 112, },
+ { 112, -544, 2688, 2260, -532, 112, },
+ { 112, -548, 2624, 2336, -540, 112, },
+ { 112, -548, 2556, 2408, -544, 112, },
+ { 112, -544, 2480, 2480, -544, 112, },
+ { 112, -544, 2408, 2556, -548, 112, },
+ { 112, -540, 2336, 2624, -548, 112, },
+ { 112, -532, 2260, 2688, -544, 112, },
+ { 112, -528, 2184, 2764, -544, 108, },
+ { 108, -520, 2108, 2832, -540, 108, },
+ { 108, -512, 2036, 2892, -532, 104, },
+ { 104, -504, 1960, 2956, -524, 104, },
+ { 104, -492, 1880, 3020, -516, 100, },
+ { 104, -484, 1804, 3080, -504, 96, },
+ { 100, -472, 1728, 3140, -492, 92, },
+ { 96, -460, 1652, 3200, -480, 88, },
+ { 96, -444, 1576, 3248, -464, 84, },
+ { 92, -432, 1500, 3308, -448, 76, },
+ { 92, -416, 1428, 3348, -428, 72, },
+ { 88, -404, 1352, 3404, -408, 64, },
+ { 88, -388, 1276, 3448, -384, 56, },
+ { 84, -372, 1204, 3492, -360, 48, },
+ { 80, -356, 1132, 3532, -332, 40, },
+ { 80, -340, 1060, 3568, -304, 32, },
+ { 76, -324, 988, 3612, -276, 20, },
+ { 76, -308, 920, 3640, -244, 12, },
+ { 72, -292, 848, 3676, -208, 0, },
+ { 72, -276, 784, 3696, -172, -8, },
+ { 72, -260, 716, 3724, -136, -20, },
+ { 68, -244, 652, 3748, -96, -32, },
+ { 68, -228, 588, 3768, -52, -48, },
+ { 68, -208, 524, 3780, -8, -60, },
+ { 64, -192, 464, 3796, 36, -72, },
+ { 64, -176, 404, 3808, 84, -88, },
+ { 64, -160, 348, 3812, 132, -100, },
+ { 64, -144, 292, 3816, 184, -116, }
+};
+
+/**
+ * struct xvpss_struct - Xilinx VPSS ctrl object
+ *
+ * @dev: device structure
+ * @xvpss_miscdev: The misc device registered
+ * @regs: Base address of VPSS
+ * @n_taps: number of horizontal/vertical taps
+ * @ppc: Pixels per Clock cycle the IP operates upon
+ * @is_polyphase: True for polypshase else false
+ * @vpss_coeff: The complete array of H-scaler/V-scaler coefficients
+ * @H_phases: The phases needed to program the H-scaler for different taps
+ * @reset_gpio: GPIO reset line to bring VPSS Scaler out of reset
+ */
+struct xvpss_struct {
+ struct device *dev;
+ struct miscdevice xvpss_miscdev;
+ void __iomem *regs;
+ int n_taps;
+ int ppc;
+ bool is_polyphase;
+ short vpss_coeff[XVPSS_PHASES][XVPSS_MAX_TAPS];
+ u32 H_phases[XVPSS_MAX_WIDTH];
+ struct gpio_desc *reset_gpio;
+};
+
+struct xvpss_data {
+ u32 height_in;
+ u32 width_in;
+ u32 height_out;
+ u32 width_out;
+ u32 color_in;
+ u32 color_out;
+};
+
+/* Match table for of_platform binding */
+static const struct of_device_id xvpss_of_match[] = {
+ { .compatible = "xlnx,ctrl-xvpss-1.0", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xvpss_of_match);
+
+static inline struct xvpss_struct *to_xvpss_struct(struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+
+ return container_of(miscdev, struct xvpss_struct, xvpss_miscdev);
+}
+
+static inline u32 xvpss_ior(void __iomem *lp, off_t offset)
+{
+ return readl(lp + offset);
+}
+
+static inline void xvpss_iow(void __iomem *lp, off_t offset, u32 value)
+{
+ writel(value, (lp + offset));
+}
+
+static inline void xvpss_clr(void __iomem *base, u32 offset, u32 clr)
+{
+ xvpss_iow(base, offset, xvpss_ior(base, offset) & ~clr);
+}
+
+static inline void xvpss_set(void __iomem *base, u32 offset, u32 set)
+{
+ xvpss_iow(base, offset, xvpss_ior(base, offset) | set);
+}
+
+static inline void xvpss_disable_block(struct xvpss_struct *xvpss_g,
+ u32 channel, u32 ip_block)
+{
+ xvpss_clr(xvpss_g->regs, ((channel - 1) * XVPSS_GPIO_CHAN) +
+ XSAXIS_RST_OFFSET, ip_block);
+}
+
+static inline void
+xvpss_enable_block(struct xvpss_struct *xvpss_g, u32 channel, u32 ip_block)
+{
+ xvpss_set(xvpss_g->regs, ((channel - 1) * XVPSS_GPIO_CHAN) +
+ XSAXIS_RST_OFFSET, ip_block);
+}
+
+static void xvpss_reset(struct xvpss_struct *xvpss_g)
+{
+ xvpss_disable_block(xvpss_g, XVPSS_GPIO_RST_SEL, XVPSS_GPIO_MASK_ALL);
+ xvpss_enable_block(xvpss_g, XVPSS_GPIO_RST_SEL, XVPSS_RST_IP_AXIS);
+}
+
+static void xvpss_enable(struct xvpss_struct *xvpss_g)
+{
+ xvpss_iow(xvpss_g->regs, XHSCALER_OFFSET +
+ XVPSS_H_AP_CTRL, XVPSS_STREAM_ON);
+ xvpss_iow(xvpss_g->regs, XVSCALER_OFFSET +
+ XVPSS_V_AP_CTRL, XVPSS_STREAM_ON);
+ xvpss_enable_block(xvpss_g, XVPSS_GPIO_RST_SEL, XVPSS_RST_IP_AXIS);
+}
+
+static void xvpss_disable(struct xvpss_struct *xvpss_g)
+{
+ xvpss_disable_block(xvpss_g, XVPSS_GPIO_RST_SEL, XVPSS_GPIO_MASK_ALL);
+}
+
+static void xvpss_set_input(struct xvpss_struct *xvpss_g,
+ u32 width, u32 height, u32 color)
+{
+ xvpss_iow(xvpss_g->regs, XVSCALER_OFFSET + XVPSS_V_HEIGHTIN, height);
+ xvpss_iow(xvpss_g->regs, XVSCALER_OFFSET + XVPSS_V_WIDTH, width);
+ xvpss_iow(xvpss_g->regs, XHSCALER_OFFSET + XVPSS_H_WIDTHIN, width);
+ xvpss_iow(xvpss_g->regs, XVSCALER_OFFSET + XVPSS_V_COLOR, color);
+}
+
+static void xvpss_set_output(struct xvpss_struct *xvpss_g, u32 width,
+ u32 height, u32 color)
+{
+ xvpss_iow(xvpss_g->regs, XVSCALER_OFFSET + XVPSS_V_HEIGHTOUT, height);
+ xvpss_iow(xvpss_g->regs, XHSCALER_OFFSET + XVPSS_H_HEIGHT, height);
+ xvpss_iow(xvpss_g->regs, XHSCALER_OFFSET + XVPSS_H_WIDTHOUT, width);
+ xvpss_iow(xvpss_g->regs, XHSCALER_OFFSET + XVPSS_H_COLOROUT, color);
+}
+
+static void xvpss_load_ext_coeff(struct xvpss_struct *xvpss_g,
+ const short *coeff, u32 ntaps)
+{
+ unsigned int i, j, pad, offset;
+
+ /* Determine if coefficient needs padding (effective vs. max taps) */
+ pad = XVPSS_MAX_TAPS - ntaps;
+ offset = pad >> 1;
+ /* Load coefficients into vpss coefficient table */
+ for (i = 0; i < XVPSS_PHASES; i++) {
+ for (j = 0; j < ntaps; ++j)
+ xvpss_g->vpss_coeff[i][j + offset] =
+ coeff[i * ntaps + j];
+ }
+ if (pad) {
+ for (i = 0; i < XVPSS_PHASES; i++) {
+ for (j = 0; j < offset; j++)
+ xvpss_g->vpss_coeff[i][j] = 0;
+ j = ntaps + offset;
+ for (; j < XVPSS_MAX_TAPS; j++)
+ xvpss_g->vpss_coeff[i][j] = 0;
+ }
+ }
+}
+
+static void xvpss_select_coeff(struct xvpss_struct *xvpss_g)
+{
+ const short *coeff;
+ u32 ntaps;
+
+ coeff = &xvpss_coeff_taps6[0][0];
+ ntaps = XVPSS_TAPS_6;
+
+ xvpss_load_ext_coeff(xvpss_g, coeff, ntaps);
+}
+
+static void xvpss_set_coeff(struct xvpss_struct *xvpss_g)
+{
+ u32 nphases = XVPSS_PHASES;
+ u32 ntaps = xvpss_g->n_taps;
+ int val, i, j, offset, rd_indx;
+ u32 v_addr, h_addr;
+
+ offset = (XVPSS_MAX_TAPS - ntaps) / 2;
+ v_addr = XVSCALER_OFFSET + XVPSS_V_VFLTCOEFF_BASE;
+ h_addr = XHSCALER_OFFSET + XVPSS_H_HFLTCOEFF_BASE;
+
+ for (i = 0; i < nphases; i++) {
+ for (j = 0; j < ntaps / 2; j++) {
+ rd_indx = j * 2 + offset;
+ val = (xvpss_g->vpss_coeff[i][rd_indx + 1] <<
+ XVPSS_BITSHIFT_16) | (xvpss_g->vpss_coeff[i][rd_indx] &
+ XVPSS_MASK_LOW_16BITS);
+ xvpss_iow(xvpss_g->regs, v_addr +
+ ((i * ntaps / 2 + j) * 4), val);
+ xvpss_iow(xvpss_g->regs, h_addr +
+ ((i * ntaps / 2 + j) * 4), val);
+ }
+ }
+}
+
+static void xvpss_h_calculate_phases(struct xvpss_struct *xvpss_g,
+ u32 width_in, u32 width_out,
+ u32 pixel_rate)
+{
+ unsigned int loop_width, x, s, nphases = XVPSS_PHASES;
+ unsigned int nppc = xvpss_g->ppc;
+ unsigned int shift = XVPSS_STEP_PRECISION_SHIFT - ilog2(nphases);
+ int offset = 0, xwrite_pos = 0, nr_rds, nr_rds_clck;
+ bool output_write_en, get_new_pix;
+ u64 phaseH;
+ u32 array_idx = 0;
+
+ loop_width = max_t(u32, width_in, width_out);
+ loop_width = ALIGN(loop_width + nppc - 1, nppc);
+
+ memset(xvpss_g->H_phases, 0, sizeof(xvpss_g->H_phases));
+ for (x = 0; x < loop_width; x++) {
+ nr_rds_clck = 0;
+ for (s = 0; s < nppc; s++) {
+ phaseH = (offset >> shift) & (nphases - 1);
+ get_new_pix = false;
+ output_write_en = false;
+ if ((offset >> XVPSS_STEP_PRECISION_SHIFT) != 0) {
+ get_new_pix = true;
+ offset -= (1 << XVPSS_STEP_PRECISION_SHIFT);
+ array_idx++;
+ }
+
+ if (((offset >> XVPSS_STEP_PRECISION_SHIFT) == 0) &&
+ xwrite_pos < width_out) {
+ offset += pixel_rate;
+ output_write_en = true;
+ xwrite_pos++;
+ }
+
+ xvpss_g->H_phases[x] |= (phaseH <<
+ (s * XVPSS_PHASE_MULTIPLIER));
+ xvpss_g->H_phases[x] |= (array_idx <<
+ (XVPSS_PHASE_SHIFT_BY_6 +
+ (s * XVPSS_PHASE_MULTIPLIER)));
+ if (output_write_en) {
+ xvpss_g->H_phases[x] |= (XVPSS_PHASESH_WR_EN <<
+ (s * XVPSS_PHASE_MULTIPLIER));
+ }
+
+ if (get_new_pix)
+ nr_rds_clck++;
+ }
+ if (array_idx >= nppc)
+ array_idx &= (nppc - 1);
+
+ nr_rds += nr_rds_clck;
+ if (nr_rds >= nppc)
+ nr_rds -= nppc;
+ }
+}
+
+static void xvpss_h_set_phases(struct xvpss_struct *xvpss_g)
+{
+ u32 loop_width, index, val, offset, i, lsb, msb;
+
+ loop_width = XVPSS_MAX_WIDTH / xvpss_g->ppc;
+ offset = XHSCALER_OFFSET + XVPSS_H_PHASESH_V_BASE;
+
+ switch (xvpss_g->ppc) {
+ case XVPSS_PPC_1:
+ index = 0;
+ for (i = 0; i < loop_width; i += 2) {
+ lsb = xvpss_g->H_phases[i] & XVPSS_MASK_LOW_16BITS;
+ msb = xvpss_g->H_phases[i + 1] & XVPSS_MASK_LOW_16BITS;
+ val = (msb << 16 | lsb);
+ xvpss_iow(xvpss_g->regs, offset +
+ (index * 4), val);
+ ++index;
+ }
+ return;
+ case XVPSS_PPC_2:
+ for (i = 0; i < loop_width; i++) {
+ val = (xvpss_g->H_phases[i] & XVPSS_MASK_LOW_32BITS);
+ xvpss_iow(xvpss_g->regs, offset + (i * 4), val);
+ }
+ return;
+ }
+}
+
+static void xvpss_algo_config(struct xvpss_struct *xvpss_g,
+ struct xvpss_data data)
+{
+ u32 pxl_rate, line_rate;
+ u32 width_in = data.width_in;
+ u32 width_out = data.width_out;
+ u32 height_in = data.height_in;
+ u32 height_out = data.height_out;
+
+ line_rate = (height_in * XVPSS_STEPPREC) / height_out;
+
+ if (xvpss_g->is_polyphase) {
+ xvpss_select_coeff(xvpss_g);
+ xvpss_set_coeff(xvpss_g);
+ }
+ xvpss_iow(xvpss_g->regs, XVSCALER_OFFSET + XVPSS_V_LINERATE, line_rate);
+ pxl_rate = (width_in * XVPSS_STEPPREC) / width_out;
+ xvpss_iow(xvpss_g->regs, XHSCALER_OFFSET + XVPSS_H_PIXELRATE, pxl_rate);
+
+ xvpss_h_calculate_phases(xvpss_g, width_in, width_out, pxl_rate);
+ xvpss_h_set_phases(xvpss_g);
+}
+
+static long xvpss_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ long retval = 0;
+ struct xvpss_data data;
+ struct xvpss_struct *xvpss_g = to_xvpss_struct(file);
+ u32 hcol;
+
+ switch (cmd) {
+ case XVPSS_SET_CONFIGURE:
+ if (copy_from_user(&data, (char __user *)arg, sizeof(data))) {
+ pr_err("Copy from user failed\n");
+ retval = -EINVAL;
+ goto end;
+ }
+ xvpss_reset(xvpss_g);
+ xvpss_set_input(xvpss_g, data.width_in, data.height_in,
+ data.color_in);
+ hcol = data.color_in;
+ if (hcol == XVPSS_YUV_420)
+ hcol = XVPSS_YUV_422;
+ xvpss_iow(xvpss_g->regs, XHSCALER_OFFSET + XVPSS_H_COLOR, hcol);
+ xvpss_set_output(xvpss_g, data.width_out, data.height_out,
+ data.color_out);
+ xvpss_algo_config(xvpss_g, data);
+ break;
+ case XVPSS_SET_ENABLE:
+ xvpss_enable(xvpss_g);
+ break;
+ case XVPSS_SET_DISABLE:
+ xvpss_disable(xvpss_g);
+ break;
+ default:
+ retval = -EINVAL;
+ }
+end:
+ return retval;
+}
+
+static const struct file_operations xvpss_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = xvpss_ioctl,
+};
+
+static int xvpss_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *res;
+ struct xvpss_struct *xvpss_g;
+ struct device_node *node;
+
+ xvpss_g = devm_kzalloc(&pdev->dev, sizeof(*xvpss_g), GFP_KERNEL);
+ if (!xvpss_g)
+ return -ENOMEM;
+
+ xvpss_g->reset_gpio = devm_gpiod_get(&pdev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(xvpss_g->reset_gpio)) {
+ ret = PTR_ERR(xvpss_g->reset_gpio);
+ if (ret == -EPROBE_DEFER)
+ dev_dbg(&pdev->dev, "No gpio probed, Deferring...\n");
+ else
+ dev_err(&pdev->dev, "No reset gpio info from dts\n");
+ return ret;
+ }
+ gpiod_set_value_cansleep(xvpss_g->reset_gpio, 0);
+
+ platform_set_drvdata(pdev, &xvpss_g);
+ xvpss_g->dev = &pdev->dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xvpss_g->regs = devm_ioremap_resource(xvpss_g->dev, res);
+ if (IS_ERR(xvpss_g->regs))
+ return PTR_ERR(xvpss_g->regs);
+
+ node = pdev->dev.of_node;
+ ret = of_property_read_u32(node, "xlnx,vpss-taps", &xvpss_g->n_taps);
+ if (ret < 0) {
+ dev_err(xvpss_g->dev, "taps not present in DT\n");
+ return ret;
+ }
+
+ switch (xvpss_g->n_taps) {
+ case 2:
+ case 4:
+ break;
+ case 6:
+ xvpss_g->is_polyphase = true;
+ break;
+ default:
+ dev_err(xvpss_g->dev, "taps value not supported\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(node, "xlnx,vpss-ppc", &xvpss_g->ppc);
+ if (ret < 0) {
+ dev_err(xvpss_g->dev, "PPC is missing in DT\n");
+ return ret;
+ }
+ if (xvpss_g->ppc != XVPSS_PPC_1 && xvpss_g->ppc != XVPSS_PPC_2) {
+ dev_err(xvpss_g->dev, "Unsupported ppc: %d", xvpss_g->ppc);
+ return -EINVAL;
+ }
+
+ xvpss_g->xvpss_miscdev.minor = MISC_DYNAMIC_MINOR;
+ xvpss_g->xvpss_miscdev.name = "xvpss";
+ xvpss_g->xvpss_miscdev.fops = &xvpss_fops;
+ ret = misc_register(&xvpss_g->xvpss_miscdev);
+ if (ret < 0) {
+ pr_err("Xilinx VPSS registration failed!\n");
+ return ret;
+ }
+
+ dev_info(xvpss_g->dev, "Xlnx VPSS control driver initialized!\n");
+
+ return ret;
+}
+
+static int xvpss_remove(struct platform_device *pdev)
+{
+ struct xvpss_struct *xvpss_g = platform_get_drvdata(pdev);
+
+ misc_deregister(&xvpss_g->xvpss_miscdev);
+ return 0;
+}
+
+static struct platform_driver xvpss_driver = {
+ .probe = xvpss_probe,
+ .remove = xvpss_remove,
+ .driver = {
+ .name = "xlnx_vpss",
+ .of_match_table = xvpss_of_match,
+ },
+};
+
+module_platform_driver(xvpss_driver);
+
+MODULE_DESCRIPTION("Xilinx VPSS control driver");
+MODULE_AUTHOR("Saurabh Sengar");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/xlnx_ernic/Kconfig b/drivers/staging/xlnx_ernic/Kconfig
new file mode 100644
index 000000000000..2d83fea0f3b9
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/Kconfig
@@ -0,0 +1,4 @@
+config ERNIC
+ tristate "Xilinx ERNIC driver"
+ help
+ Driver for the XILINX Embedded Remote DMA(RDMA) Enabled NIC.
diff --git a/drivers/staging/xlnx_ernic/MAINTAINERS b/drivers/staging/xlnx_ernic/MAINTAINERS
new file mode 100644
index 000000000000..0355f5d3320f
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/MAINTAINERS
@@ -0,0 +1,4 @@
+XILINX EMBEDDED REMOTE DMA ENABLED NIC
+M: Sandeep Dhanvada <sandeep.dhanvada@xilinx.com>
+S: Maintained
+F: drivers/staging/xlnx_ernic
diff --git a/drivers/staging/xlnx_ernic/Makefile b/drivers/staging/xlnx_ernic/Makefile
new file mode 100644
index 000000000000..564933fa42d7
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/Makefile
@@ -0,0 +1,7 @@
+#TODO: Need to remove these flags and fix compilation warnings.
+ccflags-y := -Wno-incompatible-pointer-types -Wno-packed-bitfield-compat
+
+obj-m += xernic.o
+obj-m += xernic_bw_test.o
+
+xernic-objs := xmain.o xcm.o xqp.o xmr.o
diff --git a/drivers/staging/xlnx_ernic/dt-binding.txt b/drivers/staging/xlnx_ernic/dt-binding.txt
new file mode 100644
index 000000000000..2a9d098125b7
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/dt-binding.txt
@@ -0,0 +1,29 @@
+Xilinx Embedded RDMA NIC (ERNIC)
+--------------------------------
+
+The Xilinx Embedded Remote DMA(RDMA) NIC is an implementation of
+RDMA over Converged Ethernet (RoCEv2) enabled NIC functionality.
+
+Supported features by ERNIC are:
+1. both IPv4 and IPv6.
+2. 100 Gb/s data path.
+3. Incoming and outgoing RDMA READ, RDMA WRITE and RDMA SEND.
+
+Required properties:
+- compatible : Must contain "xlnx,ernic-1.0".
+- interrupts: Contains the interrupt line numbers.
+- reg: Physical base address and length of the registers set for the device.
+
+ernic_0: ernic@84000000 {
+ compatible = "xlnx,ernic-1.0";
+ interrupts = <4 2
+ 5 2
+ 6 2
+ 7 2
+ 8 2
+ 9 2
+ 10 2
+ 11 2
+ 12 2>;
+ reg = <0x84000000 0x40000>;
+};
diff --git a/drivers/staging/xlnx_ernic/xcm.c b/drivers/staging/xlnx_ernic/xcm.c
new file mode 100644
index 000000000000..64d102e540b4
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xcm.c
@@ -0,0 +1,1962 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#include "xcommon.h"
+
+unsigned int psn_num;
+unsigned int mad_tid = 0x11223344;
+/*****************************************************************************/
+
+/**
+ * xrnic_cm_prepare_mra() - Prepares Message Receipt Acknowledgment packet
+ * @qp_attr: qp info for which mra packet is prepared
+ * @msg : message being MRAed. 0x0- REQ, 0x1-REP, 0x2-LAP
+ * @rq_buf: Buffer to store the message
+ */
+static void xrnic_cm_prepare_mra(struct xrnic_qp_attr *qp_attr,
+ enum xrnic_msg_mra msg, void *rq_buf)
+{
+ struct mra *mra;
+ unsigned short temp;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ mra = (struct mra *)&send_sgl_temp_ipv4->mad.data;
+ temp = htons(MSG_RSP_ACK);
+ send_sgl_temp_ipv4->mad.attribute_id = temp;
+ } else {
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ mra = (struct mra *)&send_sgl_temp_ipv6->mad.data;
+ temp = htons(MSG_RSP_ACK);
+ send_sgl_temp_ipv6->mad.attribute_id = temp;
+ }
+ mra->local_cm_id = qp_attr->local_cm_id;
+ mra->remote_comm_id = qp_attr->remote_cm_id;
+ pr_info("[%d %s] remote_comm_id 0%x\n", __LINE__, __func__,
+ mra->remote_comm_id);
+ mra->message_mraed = msg;
+ mra->service_timeout = XRNIC_MRA_SERVICE_TIMEOUT;
+ /*4.096 ìS*2 Service Timeout*/
+
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_prepare_rep() - Prepares Reply packet
+ * @qp_attr: qp info for which reply packet is prepared
+ * @rq_buf: Buffer to store the data indicating the acceptance
+ */
+static void xrnic_cm_prepare_rep(struct xrnic_qp_attr *qp_attr, void *rq_buf)
+{
+ struct rdma_qp_attr *rdma_qp_attr = (struct rdma_qp_attr *)
+ &((struct xrnic_reg_map *)xrnic_dev->xrnic_mmap.xrnic_regs)
+ ->rdma_qp_attr[qp_attr->qp_num - 2];
+ struct ethhdr_t *eth_hdr;
+ struct ipv4hdr *ipv4 = NULL;
+ struct ipv6hdr *ipv6 = NULL;
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4 = NULL;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6 = NULL;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+ struct rep *rep;
+ struct req *req;
+ unsigned short temp;
+ unsigned char rq_opcode;
+ unsigned int config_value, start_psn_value;
+ struct xrnic_rdma_cm_id *cm_id = qp_attr->cm_id;
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ rep = (struct rep *)&send_sgl_temp_ipv4->mad.data;
+ eth_hdr = (struct ethhdr_t *)(recv_qp_pkt_ipv4);
+ ipv4 = (struct ipv4hdr *)
+ ((char *)recv_qp_pkt_ipv4 + XRNIC_ETH_HLEN);
+ req = (struct req *)&recv_qp_pkt_ipv4->mad.data;
+ temp = htons(CONNECT_REPLY);
+ send_sgl_temp_ipv4->mad.attribute_id = temp;
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ rep = (struct rep *)&send_sgl_temp_ipv6->mad.data;
+ eth_hdr = (struct ethhdr_t *)(recv_qp_pkt_ipv6);
+ ipv6 = (struct ipv6hdr *)
+ ((char *)recv_qp_pkt_ipv6 + XRNIC_ETH_HLEN);
+ req = (struct req *)&recv_qp_pkt_ipv6->mad.data;
+ temp = htons(CONNECT_REPLY);
+ send_sgl_temp_ipv6->mad.attribute_id = temp;
+ }
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ DEBUG_LOG("qp_num:%x\n", qp_attr->qp_num);
+
+ rep->local_cm_id = qp_attr->local_cm_id;
+ rep->remote_comm_id = qp_attr->remote_cm_id;
+
+ rep->local_qpn = ((qp_attr->qp_num >> 16) & 0xFF) |
+ (((qp_attr->qp_num >> 8) & 0xFF) << 8) |
+ ((qp_attr->qp_num & 0xFF) << 16);
+ DEBUG_LOG("local_qpn %d qp_num %d\n",
+ rep->local_qpn, qp_attr->qp_num);
+
+ memcpy((void *)rep->private_data,
+ (void *)&cm_id->conn_param.private_data,
+ cm_id->conn_param.private_data_len);
+
+ DEBUG_LOG("cm_id->conn_param.private_data_len %d\n",
+ cm_id->conn_param.private_data_len);
+ DEBUG_LOG("cm_id->conn_param.responder_resources %d\n",
+ cm_id->conn_param.responder_resources);
+ DEBUG_LOG("cm_id->conn_param.initiator_depth %d\n",
+ cm_id->conn_param.initiator_depth);
+ DEBUG_LOG("cm_id->conn_param.flow_control %d\n",
+ cm_id->conn_param.flow_control);
+ DEBUG_LOG("cm_id->conn_param.retry_count %d\n",
+ cm_id->conn_param.retry_count);
+ DEBUG_LOG("cm_id->conn_param.rnr_retry_count %d\n",
+ cm_id->conn_param.rnr_retry_count);
+
+ /*Inititator depth not rquired for Target.*/
+ rep->initiator_depth = cm_id->conn_param.initiator_depth;
+ rep->responder_resources = cm_id->conn_param.responder_resources;
+ rep->end_end_flow_control = cm_id->conn_param.flow_control;
+ rep->rnr_retry_count = cm_id->conn_param.rnr_retry_count;
+ rep->target_ack_delay = XRNIC_REP_TARGET_ACK_DELAY;
+ rep->fail_over_accepted = XRNIC_REP_FAIL_OVER_ACCEPTED;
+
+ DEBUG_LOG("req->initiator_depth %x\n", rep->initiator_depth);
+ DEBUG_LOG("rep->responder_resources %x\n", rep->responder_resources);
+
+ rep->sqr = XRNIC_REQ_SRQ;
+ rep->local_ca_guid[0] = 0x7c;
+ rep->local_ca_guid[1] = 0xfe;
+ rep->local_ca_guid[2] = 0x90;
+ rep->local_ca_guid[3] = 0x03;
+ rep->local_ca_guid[4] = 0x00;
+ rep->local_ca_guid[5] = 0xb8;
+ rep->local_ca_guid[6] = 0x57;
+ rep->local_ca_guid[7] = 0x70;
+
+ qp_attr->remote_qpn = req->local_qpn;
+
+ DEBUG_LOG("local_qpn [0x%x] [%d]\n", req->local_qpn,
+ ntohl(req->local_qpn));
+ config_value = ((req->local_qpn & 0xFF) << 16)
+ | (((req->local_qpn >> 8) & 0xFF) << 8)
+ | ((req->local_qpn >> 16) & 0xFF);
+
+ pr_info("config_value:%d req->local_qpn %d qp_attr->remote_qpn %d\n",
+ config_value, req->local_qpn, qp_attr->remote_qpn);
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->dest_qp_conf)));
+
+ /* Set the MAC address */
+ config_value = eth_hdr->h_source[5] | (eth_hdr->h_source[4] << 8) |
+ (eth_hdr->h_source[3] << 16) |
+ (eth_hdr->h_source[2] << 24);
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->mac_dest_addr_lsb)));
+ DEBUG_LOG("mac_xrnic_src_addr_lsb->0x%x\n", config_value);
+
+ config_value = eth_hdr->h_source[1] | (eth_hdr->h_source[0] << 8);
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->mac_dest_addr_msb)));
+ DEBUG_LOG("mac_xrnic_src_addr_msb->0x%x\n", config_value);
+
+ config_value = 0;
+ DEBUG_LOG("req->start_psn:%x %x %x\n", req->start_psn[0],
+ req->start_psn[1], req->start_psn[2]);
+ config_value = (req->start_psn[2] | (req->start_psn[1] << 8) |
+ (req->start_psn[0] << 16));
+ DEBUG_LOG("req->start psn 0x%x\n", config_value);
+ start_psn_value = config_value;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_psn)));
+ memcpy(rep->start_psn, req->start_psn, 3);
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ config_value = ipv4->src_addr;
+ DEBUG_LOG("ipaddress:%x\n", config_value);
+ iowrite32(htonl(config_value),
+ ((void *)(&rdma_qp_attr->ip_dest_addr1)));
+ config_value = ioread32((void *)&rdma_qp_attr->ip_dest_addr1);
+ DEBUG_LOG("read ipaddress:%x\n", config_value);
+ } else {
+ config_value = ipv6->saddr.in6_u.u6_addr32[3];
+ DEBUG_LOG("ipaddress1:%x\n", config_value);
+ iowrite32(htonl(config_value),
+ ((void *)(&rdma_qp_attr->ip_dest_addr1)));
+ config_value = ipv6->saddr.in6_u.u6_addr32[2];
+ DEBUG_LOG("ipaddress:%x\n", config_value);
+ iowrite32(htonl(config_value),
+ ((void *)(&rdma_qp_attr->ip_dest_addr2)));
+ config_value = ipv6->saddr.in6_u.u6_addr32[1];
+ DEBUG_LOG("ipaddress:%x\n", config_value);
+ iowrite32(htonl(config_value),
+ ((void *)(&rdma_qp_attr->ip_dest_addr3)));
+ config_value = ipv6->saddr.in6_u.u6_addr32[0];
+ DEBUG_LOG("ipaddress:%x\n", config_value);
+ iowrite32(htonl(config_value),
+ ((void *)(&rdma_qp_attr->ip_dest_addr4)));
+ config_value = ioread32((void *)&rdma_qp_attr->qp_conf);
+ config_value = config_value | XRNIC_QP_CONFIG_IPV6_EN;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+ DEBUG_LOG("read ipaddress:%x\n", config_value);
+ }
+ rq_opcode = XRNIC_RDMA_READ;
+ config_value = ((start_psn_value - 1) | (rq_opcode << 24));
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->last_rq_req)));
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_prepare_rej() - Prepares Reject packet
+ * @qp_attr: qp info for which reply packet is prepared
+ * @reason: reason for the rejection
+ * @msg: message whose contents cause sendor to reject communication
+ * 0x0-REQ, 0x1-REP, 0x2-No message
+ */
+void xrnic_cm_prepare_rej(struct xrnic_qp_attr *qp_attr,
+ enum xrnic_rej_reason reason, enum xrnic_msg_rej msg)
+{
+ struct rej *rej;
+ unsigned short temp;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ if (qp_attr->ip_addr_type == AF_INET) {
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ rej = (struct rej *)&send_sgl_temp_ipv4->mad.data;
+ temp = htons(CONNECT_REJECT);
+ send_sgl_temp_ipv4->mad.attribute_id = temp;
+ } else {
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ rej = (struct rej *)&send_sgl_temp_ipv6->mad.data;
+ temp = htons(CONNECT_REJECT);
+ send_sgl_temp_ipv6->mad.attribute_id = temp;
+ }
+ pr_info("Sending rej\n");
+
+ rej->local_cm_id = qp_attr->local_cm_id;
+ rej->remote_comm_id = qp_attr->remote_cm_id;
+ rej->message_rejected = msg;
+ rej->reason = htons(reason);
+ rej->reject_info_length = XRNIC_REJ_INFO_LEN;
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_prepare_initial_headers() - Retrieves information from the response
+ * @qp_attr: qp info on which the response is sent
+ * @rq_buf: receive queue buffer
+ */
+void xrnic_prepare_initial_headers(struct xrnic_qp_attr *qp_attr, void *rq_buf)
+{
+ struct mad *mad;
+ unsigned char temp;
+ struct ethhdr_t *eth_hdr;
+ struct ipv4hdr *ipv4;
+ struct ipv6hdr *ipv6;
+ struct udphdr *udp;
+ struct bth *bthp;
+ struct deth *dethp;
+ unsigned short *ipv4_hdr_ptr;
+ unsigned int ipv4_hdr_chksum;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+ int i;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ eth_hdr = (struct ethhdr_t *)(recv_qp_pkt_ipv4);
+ ipv4 = (struct ipv4hdr *)
+ ((char *)recv_qp_pkt_ipv4 + XRNIC_ETH_HLEN);
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ /* In the ethernet header swap source and desitnation MAC */
+ memcpy(send_sgl_temp_ipv4->eth.h_source,
+ eth_hdr->h_dest, XRNIC_ETH_ALEN);
+ memcpy(send_sgl_temp_ipv4->eth.h_dest,
+ eth_hdr->h_source, XRNIC_ETH_ALEN);
+ /* Copy the ethernet type field */
+ send_sgl_temp_ipv4->eth.eth_type = eth_hdr->eth_type;
+
+ /* In the IP header swap source IP and desitnation IP */
+ memcpy(&send_sgl_temp_ipv4->ipv4, ipv4,
+ sizeof(struct ipv4hdr));
+ send_sgl_temp_ipv4->ipv4.dest_addr = ipv4->src_addr;
+ send_sgl_temp_ipv4->ipv4.src_addr = ipv4->dest_addr;
+ ipv4->total_length = (sizeof(struct ipv4hdr) +
+ sizeof(struct udphdr) + sizeof(struct bth) +
+ sizeof(struct deth) + sizeof(struct mad)) + 4;
+ DEBUG_LOG("ipv4->total_length:%d\n", ipv4->total_length);
+ DEBUG_LOG("ipv4 length:%d\n", sizeof(struct ipv4hdr));
+ DEBUG_LOG("udp length:%d\n", sizeof(struct udphdr));
+ DEBUG_LOG("ethhdr length:%d\n", sizeof(struct ethhdr_t));
+ DEBUG_LOG("bth length:%d\n", sizeof(struct bth));
+ DEBUG_LOG("deth length:%d\n", sizeof(struct deth));
+
+ send_sgl_temp_ipv4->ipv4.total_length =
+ htons(ipv4->total_length);
+ send_sgl_temp_ipv4->ipv4.hdr_chksum = 0;
+ send_sgl_temp_ipv4->ipv4.id = ipv4->id;
+
+ ipv4_hdr_ptr = (unsigned short *)
+ (&send_sgl_temp_ipv4->ipv4);
+ ipv4_hdr_chksum = 0;
+
+ for (i = 0; i < 10; i++) {
+ ipv4_hdr_chksum += *ipv4_hdr_ptr;
+ ipv4_hdr_ptr++;
+ }
+
+ ipv4_hdr_chksum = ~((ipv4_hdr_chksum & 0x0000FFFF) +
+ (ipv4_hdr_chksum >> 16));
+ send_sgl_temp_ipv4->ipv4.hdr_chksum = ipv4_hdr_chksum;
+ DEBUG_LOG("check sum :%x\n", ipv4_hdr_chksum);
+ udp = (struct udphdr *)((char *)recv_qp_pkt_ipv4 +
+ XRNIC_ETH_HLEN + sizeof(struct ipv4hdr));
+ /* Copy the UDP packets and update length field */
+ send_sgl_temp_ipv4->udp.source = udp->source;
+ send_sgl_temp_ipv4->udp.dest = udp->dest;
+ udp->len = sizeof(struct udphdr) + sizeof(struct bth) +
+ sizeof(struct deth) + sizeof(struct mad) +
+ XRNIC_ICRC_SIZE;
+ DEBUG_LOG("udp total_length:%x\n", udp->len);
+ DEBUG_LOG("mad size:%d\n", sizeof(struct mad));
+ send_sgl_temp_ipv4->udp.len = htons(udp->len);
+ udp->check = 0;
+ send_sgl_temp_ipv4->udp.check = htons(udp->check);
+
+ /* Base Transport header setings */
+ bthp = (struct bth *)((char *)udp + sizeof(struct udphdr));
+
+ /* Fill bth fields */
+ send_sgl_temp_ipv4->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+ send_sgl_temp_ipv4->bth.solicited_event =
+ XRNIC_SET_SOLICT_EVENT;
+ send_sgl_temp_ipv4->bth.migration_req =
+ XRNIC_MIGRATION_REQ;
+ send_sgl_temp_ipv4->bth.pad_count = XRNIC_PAD_COUNT;
+ send_sgl_temp_ipv4->bth.transport_hdr_ver =
+ XRNIC_TRANSPORT_HDR_VER;
+ DEBUG_LOG("bth transport hdr ver:%x\n",
+ bthp->transport_hdr_ver);
+ send_sgl_temp_ipv4->bth.transport_hdr_ver =
+ bthp->transport_hdr_ver;
+ send_sgl_temp_ipv4->bth.destination_qp[0] = 0;
+ send_sgl_temp_ipv4->bth.destination_qp[1] = 0;
+ send_sgl_temp_ipv4->bth.destination_qp[2] =
+ XRNIC_DESTINATION_QP;
+ send_sgl_temp_ipv4->bth.reserved1 = XRNIC_RESERVED1;
+ send_sgl_temp_ipv4->bth.ack_request = XRNIC_ACK_REQ;
+ send_sgl_temp_ipv4->bth.reserved2 = XRNIC_RESERVED2;
+ send_sgl_temp_ipv4->bth.pkt_seq_num = 1;
+ send_sgl_temp_ipv4->bth.partition_key = 65535;
+
+ /* DETH setings */
+ dethp = (struct deth *)((char *)bthp + sizeof(struct bth));
+ send_sgl_temp_ipv4->deth.q_key = dethp->q_key;
+ send_sgl_temp_ipv4->deth.reserved = XRNIC_DETH_RESERVED;
+ send_sgl_temp_ipv4->deth.src_qp = dethp->src_qp;
+
+ /* MAD setings */
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ send_sgl_temp_ipv4->mad.base_ver = XRNIC_MAD_BASE_VER;
+ send_sgl_temp_ipv4->mad.class_version = 2;
+ DEBUG_LOG("class:%x\n", send_sgl_temp_ipv4->mad.class_version);
+ send_sgl_temp_ipv4->mad.mgmt_class = XRNIC_MAD_MGMT_CLASS;
+ temp = (XRNIC_MAD_RESP_BIT << 7) | XRNIC_MAD_COMM_SEND;
+ send_sgl_temp_ipv4->mad.resp_bit_method = temp;
+ DEBUG_LOG("mad method:%x\n",
+ send_sgl_temp_ipv4->mad.resp_bit_method);
+ send_sgl_temp_ipv4->mad.reserved = XRNIC_MAD_RESERVED;
+ send_sgl_temp_ipv4->mad.transaction_id = mad->transaction_id;
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ eth_hdr = (struct ethhdr_t *)(recv_qp_pkt_ipv6);
+ ipv6 = (struct ipv6hdr *)
+ ((char *)recv_qp_pkt_ipv6 + XRNIC_ETH_HLEN);
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ /* In the ethernet header swap source and desitnation MAC */
+ memcpy(send_sgl_temp_ipv6->eth.h_source,
+ eth_hdr->h_dest, XRNIC_ETH_ALEN);
+ memcpy(send_sgl_temp_ipv6->eth.h_dest,
+ eth_hdr->h_source, XRNIC_ETH_ALEN);
+ send_sgl_temp_ipv6->eth.eth_type = eth_hdr->eth_type;
+ memcpy(&send_sgl_temp_ipv6->ipv6, ipv6,
+ sizeof(struct ipv6hdr));
+ /* In the ethernet header swap source IP and desitnation IP */
+ memcpy(&send_sgl_temp_ipv6->ipv6.daddr, &ipv6->saddr,
+ sizeof(struct in6_addr));
+ memcpy(&send_sgl_temp_ipv6->ipv6.saddr, &ipv6->daddr,
+ sizeof(struct in6_addr));
+ udp = (struct udphdr *)((char *)recv_qp_pkt_ipv6 +
+ XRNIC_ETH_HLEN + sizeof(struct ipv6hdr));
+ /* Copy the UDP packets and update length field */
+ send_sgl_temp_ipv6->udp.source = udp->source;
+ send_sgl_temp_ipv6->udp.dest = udp->dest;
+ udp->len = sizeof(struct udphdr) + sizeof(struct bth) +
+ sizeof(struct deth) + sizeof(struct mad) +
+ XRNIC_ICRC_SIZE;
+ DEBUG_LOG("udp total_length:%x\n", udp->len);
+ DEBUG_LOG("mad size:%d\n", sizeof(struct mad));
+ send_sgl_temp_ipv6->udp.len = htons(udp->len);
+ udp->check = 0;
+ send_sgl_temp_ipv6->udp.check = htons(udp->check);
+
+ /* Base Transport header setings */
+ bthp = (struct bth *)((char *)udp + sizeof(struct udphdr));
+
+ /* Fill bth fields */
+ send_sgl_temp_ipv6->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+ send_sgl_temp_ipv6->bth.solicited_event =
+ XRNIC_SET_SOLICT_EVENT;
+ send_sgl_temp_ipv6->bth.migration_req = XRNIC_MIGRATION_REQ;
+ send_sgl_temp_ipv6->bth.pad_count = XRNIC_PAD_COUNT;
+ send_sgl_temp_ipv6->bth.transport_hdr_ver =
+ XRNIC_TRANSPORT_HDR_VER;
+ DEBUG_LOG("bth transport_hdr_ver:%x\n",
+ bthp->transport_hdr_ver);
+ send_sgl_temp_ipv6->bth.transport_hdr_ver =
+ bthp->transport_hdr_ver;
+ send_sgl_temp_ipv6->bth.destination_qp[0] = 0;
+ send_sgl_temp_ipv6->bth.destination_qp[1] = 0;
+ send_sgl_temp_ipv6->bth.destination_qp[2] =
+ XRNIC_DESTINATION_QP;
+ send_sgl_temp_ipv6->bth.reserved1 = XRNIC_RESERVED1;
+ send_sgl_temp_ipv6->bth.ack_request = XRNIC_ACK_REQ;
+ send_sgl_temp_ipv6->bth.reserved2 = XRNIC_RESERVED2;
+ send_sgl_temp_ipv6->bth.pkt_seq_num = 1;
+ send_sgl_temp_ipv6->bth.partition_key = 65535;
+
+ /* DETH setings */
+ dethp = (struct deth *)((char *)bthp + sizeof(struct bth));
+ send_sgl_temp_ipv6->deth.q_key = dethp->q_key;
+ send_sgl_temp_ipv6->deth.reserved = XRNIC_DETH_RESERVED;
+ send_sgl_temp_ipv6->deth.src_qp = dethp->src_qp;
+
+ /* MAD setings */
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ send_sgl_temp_ipv6->mad.base_ver = XRNIC_MAD_BASE_VER;
+ send_sgl_temp_ipv6->mad.class_version = 2;
+ DEBUG_LOG("class:%x\n", send_sgl_temp_ipv6->mad.class_version);
+ send_sgl_temp_ipv6->mad.mgmt_class = XRNIC_MAD_MGMT_CLASS;
+ temp = (XRNIC_MAD_RESP_BIT << 7) | XRNIC_MAD_COMM_SEND;
+ send_sgl_temp_ipv6->mad.resp_bit_method = temp;
+ DEBUG_LOG("mad method:%x\n",
+ send_sgl_temp_ipv6->mad.resp_bit_method);
+ send_sgl_temp_ipv6->mad.reserved = XRNIC_MAD_RESERVED;
+ send_sgl_temp_ipv6->mad.transaction_id = mad->transaction_id;
+ }
+
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_prepare_dreq() - Prepares Disconnection Request Packet
+ * @qp_attr: qp info to be released
+ */
+static void xrnic_cm_prepare_dreq(struct xrnic_qp_attr *qp_attr)
+{
+ struct dreq *dreq;
+ unsigned short temp;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ dreq = (struct dreq *)&send_sgl_temp_ipv4->mad.data;
+ temp = htons(DISCONNECT_REQUEST);
+ send_sgl_temp_ipv4->mad.attribute_id = temp;
+ } else {
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ dreq = (struct dreq *)&send_sgl_temp_ipv6->mad.data;
+ temp = htons(DISCONNECT_REQUEST);
+ send_sgl_temp_ipv6->mad.attribute_id = temp;
+ }
+ dreq->local_cm_id = qp_attr->local_cm_id;
+ dreq->remote_comm_id = qp_attr->remote_cm_id;
+ dreq->remote_qpn_eecn = qp_attr->remote_qpn;
+
+ DEBUG_LOG("Exiting %s %d %d\n",
+ __func__, qp_attr->remote_qpn, dreq->remote_qpn_eecn);
+}
+
+/**
+ * xrnic_cm_disconnect_send_handler() - Sends Disconnection Request and frees
+ * all the attributes related to the qp
+ * @qp_attr: qp info to be released by dreq
+ */
+void xrnic_cm_disconnect_send_handler(struct xrnic_qp_attr *qp_attr)
+{
+ int qp1_send_pkt_size;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ if (qp_attr->ip_addr_type == AF_INET)
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv4);
+ else
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv6);
+
+ xrnic_cm_prepare_dreq(qp_attr);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_DREQ_SENT;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_prepare_drep() - Prepares disconnect reply packet
+ * @qp_attr: qp info for which drep packet is prepared
+ * @rq_buf: receive queue buffer
+ */
+static void xrnic_cm_prepare_drep(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ struct drep *drep;
+ unsigned short temp;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+
+ DEBUG_LOG("Enteing %s\n", __func__);
+ if (qp_attr->ip_addr_type == AF_INET) {
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ drep = (struct drep *)&send_sgl_temp_ipv4->mad.data;
+ temp = htons(DISCONNECT_REPLY);
+ send_sgl_temp_ipv4->mad.attribute_id = temp;
+ } else {
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ drep = (struct drep *)&send_sgl_temp_ipv6->mad.data;
+ temp = htons(DISCONNECT_REPLY);
+ send_sgl_temp_ipv6->mad.attribute_id = temp;
+ }
+ drep->local_cm_id = qp_attr->local_cm_id;
+ drep->remote_comm_id = qp_attr->remote_cm_id;
+
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_disconnect_request_handler() - Handles Disconnection Request.
+ * @qp_attr: qp info on which the reply is to be sent
+ * @rq_buf: receive queue buffer
+ */
+static void xrnic_cm_disconnect_request_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ int qp1_send_pkt_size;
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+
+ DEBUG_LOG("Entering %s qp_num %d\n", __func__, qp_attr->qp_num);
+ if (qp_attr->cm_id) {
+ DEBUG_LOG("cm id is not clean qp_num %d\n", qp_attr->qp_num);
+ cm_id_info = qp_attr->cm_id->cm_id_info;
+ cm_id_info->conn_event_info.cm_event = XRNIC_DREQ_RCVD;
+ cm_id_info->conn_event_info.status = 0;
+ cm_id_info->conn_event_info.private_data_len = 0;
+ cm_id_info->conn_event_info.private_data = NULL;
+ qp_attr->cm_id->xrnic_cm_handler(qp_attr->cm_id,
+ &cm_id_info->conn_event_info);
+ qp_attr->cm_id = NULL;
+ } else {
+ pr_err("CM ID is NULL\n");
+ }
+ if (qp_attr->ip_addr_type == AF_INET)
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv4);
+ else
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv6);
+ qp_attr->curr_state = XRNIC_DREQ_RCVD;
+ xrnic_cm_prepare_drep(qp_attr, rq_buf);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+
+ qp_attr->curr_state = XRNIC_TIMEWAIT;
+ qp_attr->resend_count = 0;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_disconnect_reply_handler() - Handles disconnect reply packets.
+ * @qp_attr: qp info of which qp to be destroyed
+ * @rq_buf: receive queue buffer
+ */
+static void xrnic_cm_disconnect_reply_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ DEBUG_LOG("Entering %s\n", __func__);
+ qp_attr->curr_state = XRNIC_DREQ_RCVD;
+ /*Call back to nvmeof. */
+
+ /*TBD: Need to Change state while handling with Rimer.*/
+ qp_attr->curr_state = XRNIC_TIMEWAIT;
+ qp_attr->resend_count = 0;
+
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_connect_reject_handler() - Handles connect reject packets.
+ * @qp_attr: qp info
+ * @rq_buf: receive queue buffer
+ */
+static void xrnic_cm_connect_reject_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+ struct mad *mad;
+ struct rej *rej;
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ rej = (struct rej *)&mad->data;
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ rej = (struct rej *)&mad->data;
+ }
+
+ if (rej->message_rejected == XRNIC_REJ_REP ||
+ rej->message_rejected == XRNIC_REJ_REQ ||
+ rej->message_rejected == XRNIC_REJ_OTHERS) {
+ qp_attr->resend_count = 0;
+ qp_attr->remote_cm_id = 0;
+ qp_attr->cm_id = NULL;
+ xrnic_reset_io_qp(qp_attr);
+ memset((void *)&qp_attr->mac_addr, 0, XRNIC_ETH_ALEN);
+ qp_attr->ip_addr_type = 0;
+ xrnic_qp_app_configuration(qp_attr->qp_num,
+ XRNIC_HW_QP_DISABLE);
+ qp_attr->curr_state = XRNIC_LISTEN;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ if (qp_attr->cm_id) {
+ cm_id_info = qp_attr->cm_id->cm_id_info;
+ cm_id_info->conn_event_info.cm_event = XRNIC_REJ_RECV;
+ cm_id_info->conn_event_info.status = 0;
+ cm_id_info->conn_event_info.private_data_len = 0;
+ cm_id_info->conn_event_info.private_data = NULL;
+ qp_attr->cm_id->xrnic_cm_handler(qp_attr->cm_id,
+ &cm_id_info->conn_event_info);
+ } else {
+ pr_err("%s CM_ID is NULL\n", __func__);
+ }
+ }
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_msg_rsp_ack_handler() - Handles message response packets.
+ * @qp_attr: qp info
+ * @rq_buf: receive queue buffer
+ */
+void xrnic_cm_msg_rsp_ack_handler(struct xrnic_qp_attr *qp_attr, void *rq_buf)
+{
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+ struct mad *mad;
+ struct mra *mra;
+
+ DEBUG_LOG("Enter ing %s\n", __func__);
+ if (qp_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ mra = (struct mra *)&mad->data;
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ mra = (struct mra *)&mad->data;
+ }
+
+ if (mra->message_mraed == XRNIC_MRA_REP) {
+ qp_attr->curr_state = XRNIC_MRA_RCVD;
+ qp_attr->resend_count = 0;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ }
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_connect_rep_handler() - handles connect reply packets
+ * @qp_attr : qp info
+ * @rq_buf : receive queue buffer
+ */
+static void xrnic_cm_connect_rep_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_REP_RCVD;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ if (qp_attr->cm_id) {
+ cm_id_info = qp_attr->cm_id->cm_id_info;
+ cm_id_info->conn_event_info.cm_event = XRNIC_REP_RCVD;
+ cm_id_info->conn_event_info.status = 0;
+ cm_id_info->conn_event_info.private_data_len = 0;
+ cm_id_info->conn_event_info.private_data = NULL;
+ qp_attr->cm_id->xrnic_cm_handler(qp_attr->cm_id,
+ &cm_id_info->conn_event_info);
+ } else {
+ pr_err("%s CM_ID is NULL\n", __func__);
+ }
+ pr_info("Connection Established Local QPn=%#x\n", qp_attr->qp_num);
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_ready_to_use_handler() - handles ready to use packets
+ * @qp_attr : qp info
+ * @rq_buf : receive queue buffer
+ */
+static void xrnic_cm_ready_to_use_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_ESTABLISHD;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ if (qp_attr->cm_id) {
+ cm_id_info = qp_attr->cm_id->cm_id_info;
+ cm_id_info->conn_event_info.cm_event = XRNIC_ESTABLISHD;
+ cm_id_info->conn_event_info.status = 0;
+ cm_id_info->conn_event_info.private_data_len = 0;
+ cm_id_info->conn_event_info.private_data = NULL;
+ qp_attr->cm_id->xrnic_cm_handler(qp_attr->cm_id,
+ &cm_id_info->conn_event_info);
+ } else {
+ pr_err("%s CM_ID is NULL\n", __func__);
+ }
+ pr_info("Connection Established Local QPn=%x\n", qp_attr->qp_num);
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_create_child_cm() - creates child cm.
+ * @cm_id_info : to update child cm info after creation
+ */
+static void xrnic_create_child_cm(struct xrnic_rdma_cm_id_info *cm_id_info)
+{
+ struct xrnic_rdma_cm_id *ch_cm;
+
+ ch_cm = kzalloc(sizeof(*ch_cm), GFP_ATOMIC);
+ cm_id_info->child_cm_id = ch_cm;
+}
+
+/**
+ * xrnic_cm_connect_request_handler() - handles connect request packets.
+ * @qp_attr : qp info
+ * @rq_buf : receive queue buffer
+ */
+static void xrnic_cm_connect_request_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4 = NULL;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6 = NULL;
+ struct mad *mad = NULL;
+ struct req *req = NULL;
+ int qp1_send_pkt_size, child_qp_num, status;
+ enum xrnic_rej_reason reason = XRNIC_REJ_CONSUMER_REJECT;
+ enum xrnic_msg_rej msg_rej;
+ enum xrnic_msg_mra msg_mra;
+ u16 port_num;
+ void *temp;
+ struct xrnic_rdma_cm_id *child_cm_id;
+ struct xrnic_rdma_cm_id *parent_cm_id;
+ struct xrnic_rdma_cm_id_info *child_cm_id_info;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ if (qp_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ req = (struct req *)&mad->data;
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv4);
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ req = (struct req *)&mad->data;
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv6);
+ }
+
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_REQ_RCVD;
+
+ DEBUG_LOG("req-> local_cm_resp_tout:%x.\n", req->local_cm_resp_tout);
+ DEBUG_LOG("req-> path_packet_payload_mtu:%x.\n",
+ req->path_packet_payload_mtu);
+ if (req->remote_cm_resp_tout < XRNIC_REQ_REMOTE_CM_RESP_TOUT) {
+ pr_info("remote_cm_resp_tout:%x", req->remote_cm_resp_tout);
+
+ msg_mra = XRNIC_MRA_REQ;
+ xrnic_cm_prepare_mra(qp_attr, msg_mra, rq_buf);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr, qp1_send_pkt_size);
+ qp_attr->curr_state = XRNIC_MRA_SENT;
+ }
+
+ temp = (char *)&req->private_data;
+ temp += 36;
+ port_num = htons(req->service_id[6] | req->service_id[7] << 8);
+ DEBUG_LOG("req-> service_id[0]:%x.\n", req->service_id[0]);
+ DEBUG_LOG("req-> service_id[1]:%x.\n", req->service_id[1]);
+ DEBUG_LOG("req-> service_id[2]:%x.\n", req->service_id[2]);
+ DEBUG_LOG("req-> service_id[3]:%x.\n", req->service_id[3]);
+ DEBUG_LOG("req-> service_id[4]:%x.\n", req->service_id[4]);
+ DEBUG_LOG("req-> service_id[5]:%x.\n", req->service_id[5]);
+ DEBUG_LOG("req-> service_id[6]:%x.\n", req->service_id[6]);
+ DEBUG_LOG("req-> service_id[7]:%x.\n", req->service_id[7]);
+ DEBUG_LOG("req->port_num:%d,%x\n", port_num, port_num);
+
+ if (xrnic_dev->port_status[port_num - 1] == XRNIC_PORT_QP_FREE ||
+ port_num < 1 || port_num > XRNIC_MAX_PORT_SUPPORT) {
+ /*We need to validate that.*/
+ pr_err("PORT number is not correct sending rej.\n");
+ reason = XRNIC_REJ_PRIM_LID_PORT_NOT_EXIST;
+ msg_rej = XRNIC_REJ_REQ;
+ goto send_rep_rej;
+ }
+
+ xrnic_create_child_cm(xrnic_dev->cm_id_info[port_num - 1]);
+ child_qp_num =
+ xrnic_dev->cm_id_info[port_num - 1]->parent_cm_id.child_qp_num++;
+ child_cm_id = xrnic_dev->cm_id_info[port_num - 1]->child_cm_id;
+ parent_cm_id = &xrnic_dev->cm_id_info[port_num - 1]->parent_cm_id;
+ child_cm_id->cm_id_info = xrnic_dev->cm_id_info[port_num - 1];
+ child_cm_id->cm_context = parent_cm_id->cm_context;
+ child_cm_id->ps = parent_cm_id->ps;
+ child_cm_id->xrnic_cm_handler = parent_cm_id->xrnic_cm_handler;
+ child_cm_id->local_cm_id = qp_attr->local_cm_id;
+ child_cm_id->port_num = port_num;
+ child_cm_id->child_qp_num = child_qp_num + 1;
+ child_cm_id->qp_info.qp_num = qp_attr->qp_num;
+ child_cm_id->qp_status = XRNIC_PORT_QP_FREE;
+ child_cm_id_info = child_cm_id->cm_id_info;
+ child_cm_id_info->conn_event_info.cm_event = XRNIC_REQ_RCVD;
+ child_cm_id_info->conn_event_info.status = 0;
+ child_cm_id_info->conn_event_info.private_data = (void *)temp;
+ child_cm_id_info->conn_event_info.private_data_len = 32;
+ list_add_tail(&child_cm_id->list, &cm_id_list);
+ status = parent_cm_id->xrnic_cm_handler(child_cm_id,
+ &child_cm_id_info->conn_event_info);
+ if (status) {
+ pr_err("xrnic_cm_handler failed sending rej.\n");
+ reason = XRNIC_REJ_CONSUMER_REJECT;
+ msg_rej = XRNIC_REJ_REQ;
+ goto send_rep_rej;
+ }
+
+ qp_attr->remote_cm_id = req->local_cm_id;
+ qp_attr->cm_id = child_cm_id;
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ qp_attr->ipv4_addr = recv_qp_pkt_ipv4->ipv4.src_addr;
+ memcpy(&qp_attr->mac_addr,
+ &recv_qp_pkt_ipv4->eth.h_source, XRNIC_ETH_ALEN);
+ qp_attr->source_qp_num = recv_qp_pkt_ipv4->deth.src_qp;
+ } else {
+ memcpy(&qp_attr->ipv6_addr,
+ &recv_qp_pkt_ipv6->ipv6.saddr,
+ sizeof(struct in6_addr));
+ memcpy(&qp_attr->mac_addr,
+ &recv_qp_pkt_ipv6->eth.h_source, XRNIC_ETH_ALEN);
+ qp_attr->source_qp_num = recv_qp_pkt_ipv6->deth.src_qp;
+ }
+
+ xrnic_cm_prepare_rep(qp_attr, rq_buf);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr, qp1_send_pkt_size);
+
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_REP_SENT;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ DEBUG_LOG("Exiting %s\n", __func__);
+ return;
+send_rep_rej:
+
+ qp_attr->remote_cm_id = req->local_cm_id;
+
+ xrnic_cm_prepare_rej(qp_attr, msg_rej, reason);
+ /* Reject code added end */
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr, qp1_send_pkt_size);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp, qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_REJ_SENT;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ DEBUG_LOG("Exiting %s with reject reason [%d]\n", __func__, reason);
+}
+
+/**
+ * fill_cm_rtu_data() - Fills rtu data to send rtu packet.
+ * @cm_id : CM ID
+ * @send_sgl_qp1 : data pointer
+ * @cm_req_size : total header size
+ * @return: send_sgl_qp1 data pointer
+ */
+static char *fill_cm_rtu_data(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size)
+{
+ struct cma_rtu *rtu_data;
+
+ SET_CM_HDR(send_sgl_qp1);
+ rtu_data = (struct cma_rtu *)send_sgl_qp1;
+ memset(rtu_data, 0, sizeof(*rtu_data));
+ rtu_data->local_comm_id = cm_id->local_cm_id;
+ rtu_data->remote_comm_id = cm_id->remote_cm_id;
+ return send_sgl_qp1;
+}
+
+/**
+ * fill_cm_req_data() - Fills request data to send in request packet.
+ * @cm_id : CM ID
+ * @send_sgl_qp1 : data pointer
+ * @cm_req_size : total header size
+ * @return: send_sgl_qp1 data pointer
+ */
+static char *fill_cm_req_data(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size)
+{
+ struct ernic_cm_req *cm_req;
+ struct cma_hdr data;
+ int val;
+ int sgid, dgid;
+ unsigned int psn;
+ struct sockaddr_in *sin4, *din4;
+
+ sin4 = (struct sockaddr_in *)&cm_id->route.s_addr;
+ din4 = (struct sockaddr_in *)&cm_id->route.d_addr;
+
+ SET_CM_HDR(send_sgl_qp1);
+ cm_req = (struct ernic_cm_req *)send_sgl_qp1;
+ memset(cm_req, 0, sizeof(*cm_req));
+
+ cm_req->local_comm_id = cpu_to_be32(cm_id->local_cm_id);
+ cm_req->service_id = cpu_to_be64((cm_id->ps << 16) |
+ be16_to_cpu(din4->sin_port));
+ ether_addr_copy(&cm_req->local_ca_guid, &cm_id->route.smac);
+ cm_req->local_qkey = 0;
+ cm_req->offset32 = cpu_to_be32((cm_id->local_cm_id << 8) |
+ cm_id->conn_param.responder_resources);
+ cm_req->offset36 = cpu_to_be32 (cm_id->conn_param.initiator_depth);
+
+ val = (XRNIC_REQ_LOCAL_CM_RESP_TOUT | (XRNIC_SVC_TYPE_UC << 5) |
+ (cm_id->conn_param.flow_control << 7));
+ cm_req->offset40 = cpu_to_be32(val);
+ get_random_bytes(&psn, 24);
+ psn &= 0xFFFFFF;
+ val = ((psn << 8) | XRNIC_REQ_REMOTE_CM_RESP_TOUT |
+ (cm_id->conn_param.retry_count << 5));
+ cm_req->offset44 = cpu_to_be32(val);
+ cm_id->qp_info.starting_psn = psn;
+
+ cm_req->pkey = 0xFFFF;
+ cm_req->offset50 = ((1 << 4) |
+ (cm_id->conn_param.rnr_retry_count << 5));
+ cm_req->offset51 = (1 << 4);
+ cm_req->local_lid = cpu_to_be16(0xFFFF);
+ cm_req->remote_lid = cpu_to_be16(0xFFFF);
+ sgid = sin4->sin_addr.s_addr;
+ dgid = din4->sin_addr.s_addr;
+ val = cpu_to_be32(0xFFFF);
+ memcpy(cm_req->local_gid.raw + 8, &val, 4);
+ memcpy(cm_req->local_gid.raw + 12, &sgid, 4);
+ memcpy(cm_req->remote_gid.raw + 8, &val, 4);
+ memcpy(cm_req->remote_gid.raw + 12, &dgid, 4);
+ cm_req->offset88 = cpu_to_be32(1 << 2);
+ cm_req->traffic_class = 0;
+ cm_req->hop_limit = 0x40;
+ cm_req->offset94 = 0;
+ cm_req->offset95 = 0x18;
+
+ data.cma_version = CMA_VERSION;
+ data.ip_version = (4 << 4);
+ data.port = din4->sin_port;
+ data.src_addr.ip4.addr = sin4->sin_addr.s_addr;
+ data.dst_addr.ip4.addr = din4->sin_addr.s_addr;
+ memcpy(cm_req->private_data, &data, sizeof(data));
+
+ return send_sgl_qp1;
+}
+
+/**
+ * fill_ipv4_cm_req() - fills cm request data for rdma connect.
+ * @cm_id : CM ID
+ * @send_sgl_qp1 : data pointer
+ * @cm_req_size : total header size
+ */
+void fill_ipv4_cm_req(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size)
+{
+ send_sgl_qp1 = fill_ipv4_headers(cm_id, send_sgl_qp1, cm_req_size);
+ send_sgl_qp1 = fill_mad_common_header(cm_id, send_sgl_qp1,
+ cm_req_size, CM_REQ_ATTR_ID);
+ send_sgl_qp1 = fill_cm_req_data(cm_id, send_sgl_qp1, cm_req_size);
+}
+
+/**
+ * xrnic_cm_send_rtu() - Sends Ready to use packet.
+ * @cm_id : CM ID
+ * @cm_rep : IPV4 mad data
+ */
+static void xrnic_cm_send_rtu(struct xrnic_rdma_cm_id *cm_id,
+ struct rep *cm_rep)
+{
+ int cm_req_size;
+ char *send_sgl_qp1, *head;
+
+ cm_req_size = sizeof(struct ethhdr) + sizeof(struct iphdr) +
+ sizeof(struct udphdr) + IB_BTH_BYTES + IB_DETH_BYTES +
+ sizeof(struct ib_mad_hdr) + sizeof(struct cma_rtu) +
+ EXTRA_PKT_LEN;
+
+ head = kmalloc(cm_req_size, GFP_ATOMIC);
+ send_sgl_qp1 = head;
+ send_sgl_qp1 = fill_ipv4_headers(cm_id, send_sgl_qp1, cm_req_size);
+ send_sgl_qp1 = fill_mad_common_header(cm_id, send_sgl_qp1,
+ cm_req_size, CM_RTU_ATTR_ID);
+ send_sgl_qp1 = fill_cm_rtu_data(cm_id, send_sgl_qp1, cm_req_size);
+ xrnic_send_mad(head, cm_req_size - EXTRA_PKT_LEN);
+}
+
+/*
+ * xrnic_rdma_accept() - This function implements incoming connect request.
+ * accept functionality
+ * @cm_id : CM ID of the incoming connect request
+ * @conn_param : Connection parameters
+ * @return: XRNIC_SUCCESS if successfully accepts the connection,
+ * otherwise error representative value
+ */
+int xrnic_rdma_accept(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_conn_param *conn_param)
+{
+ struct xrnic_qp_info *qp_info;
+
+ if (xrnic_dev->port_status[cm_id->port_num - 1] !=
+ XRNIC_PORT_QP_IN_USE)
+ return -XRNIC_INVALID_CM_ID;
+
+ if (cm_id->qp_status == XRNIC_PORT_QP_IN_USE)
+ return -XRNIC_INVALID_QP_ID;
+
+ qp_info = &cm_id->qp_info;
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ if (qp_info->sq_depth > XRNIC_MAX_SQ_DEPTH ||
+ qp_info->rq_depth > XRNIC_MAX_RQ_DEPTH ||
+ qp_info->send_sge_size > XRNIC_MAX_SEND_SGL_SIZE ||
+ qp_info->send_pkt_size > XRNIC_MAX_SEND_PKT_SIZE)
+ return -XRNIC_INVALID_QP_INIT_ATTR;
+
+ /*Return Error if wrong conn_param is coming.*/
+ if (conn_param->private_data_len > XRNIC_CM_PRVATE_DATA_LENGTH ||
+ conn_param->responder_resources > XRNIC_RESPONDER_RESOURCES ||
+ conn_param->initiator_depth > XRNIC_INITIATOR_DEPTH ||
+ conn_param->flow_control > 1 ||
+ conn_param->retry_count > XRNIC_REQ_RETRY_COUNT ||
+ conn_param->rnr_retry_count > XRNIC_REP_RNR_RETRY_COUNT)
+ return -XRNIC_INVALID_QP_CONN_PARAM;
+
+ memcpy((void *)&cm_id->conn_param.private_data,
+ (void *)&conn_param->private_data,
+ conn_param->private_data_len);
+ cm_id->conn_param.private_data_len = conn_param->private_data_len;
+ cm_id->conn_param.responder_resources =
+ conn_param->responder_resources;
+ cm_id->conn_param.initiator_depth = conn_param->initiator_depth;
+ cm_id->conn_param.flow_control = conn_param->flow_control;
+ cm_id->conn_param.retry_count = conn_param->retry_count;
+ cm_id->conn_param.rnr_retry_count = conn_param->rnr_retry_count;
+
+ xrnic_qp_app_configuration(qp_info->qp_num, XRNIC_HW_QP_ENABLE);
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_accept);
+
+/*
+ * xrnic_rdma_disconnect() - This function implements RDMA disconnect.
+ * @cm_id : CM ID to destroy or disconnect
+ * @return: XRNIC_SUCCESS if successfully disconnects
+ * otherwise error representative value
+ */
+int xrnic_rdma_disconnect(struct xrnic_rdma_cm_id *cm_id)
+{
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+ int i;
+
+ if (xrnic_dev->port_status[cm_id->port_num - 1]) {
+ if (cm_id->local_cm_id >= 2) {
+ if (cm_id->child_qp_num < 1)
+ return -XRNIC_INVALID_CM_ID;
+
+ if (cm_id->qp_info.qp_num) {
+ pr_err("CM ID of QP is not destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ if (cm_id->qp_status == XRNIC_PORT_QP_FREE) {
+ pr_err("CM ID is already destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ pr_info("Free local cm id[%d] ", cm_id->local_cm_id);
+ pr_info("Child qp number [%d] ", cm_id->child_qp_num);
+ pr_info("qp_num [%d]\n", cm_id->qp_info.qp_num);
+ cm_id->qp_status = XRNIC_PORT_QP_FREE;
+ } else if (cm_id->local_cm_id == 1) {
+ if (cm_id->qp_status == XRNIC_PORT_QP_FREE) {
+ pr_err("CM ID is already destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ cm_id_info = (struct xrnic_rdma_cm_id_info *)
+ cm_id->cm_id_info;
+ for (i = 0; i < cm_id_info->num_child; i++) {
+ if (cm_id_info->child_cm_id[i].qp_status ==
+ XRNIC_PORT_QP_IN_USE){
+ pr_err("child CM IDs not destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ }
+ cm_id->qp_status = XRNIC_PORT_QP_FREE;
+ } else {
+ pr_err("Received invalid CM ID\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ } else {
+ pr_err("Received invalid Port ID\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_disconnect);
+
+/*
+ * xrnic_rdma_destroy_id() - Function destroys CM ID of the channel.
+ * @cm_id : CM ID of the incoming connect request
+ * @flag : Flag to indicate disconnect send
+ * @return: XRNIC_SUCCESS if successfully,
+ * otherwise error representative value
+ */
+int xrnic_rdma_destroy_id(struct xrnic_rdma_cm_id *cm_id, int flag)
+{
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+ int i;
+ u32 local_cm_id = cm_id->local_cm_id;
+
+ if (xrnic_dev->port_status[cm_id->port_num - 1]) {
+ if (local_cm_id >= 2) {
+ if (cm_id->child_qp_num < 1)
+ return -XRNIC_INVALID_CM_ID;
+ if (cm_id->qp_status == XRNIC_PORT_QP_IN_USE) {
+ pr_err("CM ID is not destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ if (flag)
+ xrnic_cm_disconnect_send_handler
+ (&xrnic_dev->qp_attr[local_cm_id - 2]);
+
+ pr_info("Free local cm id[%d] ", cm_id->local_cm_id);
+ pr_info("Child qp number [%d] ", cm_id->child_qp_num);
+ pr_info("qp_num [%d]\n", cm_id->qp_info.qp_num);
+
+ cm_id_info =
+ xrnic_dev->cm_id_info[cm_id->port_num - 1];
+ cm_id_info->parent_cm_id.child_qp_num--;
+ __list_del_entry(&cm_id->list);
+ kfree(cm_id);
+ } else if (local_cm_id == 1) {
+ if (cm_id->qp_status == XRNIC_PORT_QP_IN_USE) {
+ pr_err("CM ID is already destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+
+ cm_id_info = (struct xrnic_rdma_cm_id_info *)
+ cm_id->cm_id_info;
+ for (i = 0; i < cm_id_info->num_child; i++) {
+ if (cm_id_info->child_cm_id[i].qp_status ==
+ XRNIC_PORT_QP_IN_USE) {
+ pr_err("child CM IDs not destroyed\n");
+ return XRNIC_INVALID_CM_ID;
+ }
+ }
+ xrnic_dev->io_qp_count = xrnic_dev->io_qp_count +
+ cm_id_info->num_child;
+ xrnic_dev->cm_id_info[cm_id->port_num - 1] = NULL;
+ xrnic_dev->port_status[cm_id->port_num - 1] =
+ XRNIC_PORT_QP_FREE;
+ __list_del_entry(&cm_id->list);
+ kfree(cm_id_info->child_cm_id);
+ kfree(cm_id_info);
+ } else {
+ pr_err("Received invalid CM ID\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ } else {
+ return -XRNIC_INVALID_CM_ID;
+ }
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_destroy_id);
+
+/*
+ * xrnic_send_mad() - This function initiates sending a management packet on
+ * QP1.
+ * @send_buf : Input buffer to fill
+ * @size : Size of the send buffer
+ */
+void xrnic_send_mad(void *send_buf, u32 size)
+{
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+
+ xrnic_qp1_send_mad_pkt(send_buf, qp1_attr, size);
+}
+EXPORT_SYMBOL(xrnic_send_mad);
+
+/*
+ * xrnic_identify_remote_host () - This function searches internal data.
+ * structures for remote info
+ * @rq_buf : received data buffer from other end
+ * @qp_num : QP number on which packet has been received
+ * @return: XRNIC_SUCCESS if remote end info is available,
+ * XRNIC_FAILED otherwise
+ */
+int xrnic_identify_remote_host(void *rq_buf, int qp_num)
+{
+ /* First find our Which IP version came from IPV packet and accrdingly
+ * Compare IP address from eiither AF_INET or AF_INET6.
+ */
+ /* It may be two condition of failure, either we just bypass this
+ * CONNECT_REQUEST as we have alrady there or there
+ * is no QP free at all.
+ */
+ struct mad *mad;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+
+ if (qp1_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ }
+
+ if (htons(mad->attribute_id) == CONNECT_REQUEST) {
+ if (qp1_attr->ip_addr_type == AF_INET6) {
+ if (mad->data[0] ==
+ xrnic_dev->qp_attr[qp_num].remote_cm_id &&
+ xrnic_dev->qp1_attr.source_qp_num ==
+ xrnic_dev->qp_attr[qp_num].source_qp_num &&
+ (strcmp(xrnic_dev->qp1_attr.mac_addr,
+ xrnic_dev->qp_attr[qp_num].mac_addr)
+ == 0) &&
+ (!memcmp(&xrnic_dev->qp1_attr.ipv6_addr,
+ &xrnic_dev->qp_attr[qp_num].ipv6_addr,
+ sizeof(struct in6_addr))))
+ return XRNIC_SUCCESS;
+ } else {
+ if (mad->data[0] ==
+ xrnic_dev->qp_attr[qp_num].remote_cm_id &&
+ xrnic_dev->qp1_attr.source_qp_num ==
+ xrnic_dev->qp_attr[qp_num].source_qp_num &&
+ (strcmp(xrnic_dev->qp1_attr.mac_addr,
+ xrnic_dev->qp_attr[qp_num].mac_addr)
+ == 0) &&
+ xrnic_dev->qp1_attr.ipv4_addr ==
+ xrnic_dev->qp_attr[qp_num].ipv4_addr)
+ return XRNIC_SUCCESS;
+ }
+ } else {
+ /* Need to Compare udp->source_port,ethernet->source_mac,
+ * ip->source_ip, deth->source_qp == 1, local_cm_id is le
+ */
+
+ if (qp1_attr->ip_addr_type == AF_INET6) {
+ if (mad->data[0] ==
+ xrnic_dev->qp_attr[qp_num].remote_cm_id &&
+ mad->data[1] ==
+ xrnic_dev->qp_attr[qp_num].local_cm_id &&
+ xrnic_dev->qp1_attr.source_qp_num ==
+ xrnic_dev->qp_attr[qp_num].source_qp_num &&
+ (strcmp(xrnic_dev->qp1_attr.mac_addr,
+ xrnic_dev->qp_attr[qp_num].mac_addr)
+ == 0) &&
+ (!memcmp(&xrnic_dev->qp1_attr.ipv6_addr,
+ &xrnic_dev->qp_attr[qp_num].ipv6_addr,
+ sizeof(struct in6_addr))))
+
+ return XRNIC_SUCCESS;
+ } else {
+ if (mad->data[0] ==
+ xrnic_dev->qp_attr[qp_num].remote_cm_id &&
+ mad->data[1] ==
+ xrnic_dev->qp_attr[qp_num].local_cm_id &&
+ xrnic_dev->qp1_attr.source_qp_num ==
+ xrnic_dev->qp_attr[qp_num].source_qp_num &&
+ (strcmp(xrnic_dev->qp1_attr.mac_addr,
+ xrnic_dev->qp_attr[qp_num].mac_addr)
+ == 0) &&
+ xrnic_dev->qp1_attr.ipv4_addr ==
+ xrnic_dev->qp_attr[qp_num].ipv4_addr)
+
+ return XRNIC_SUCCESS;
+ }
+ }
+ return XRNIC_FAILED;
+}
+
+/*
+ * xrnic_rdma_resolve_addr() - This function looks for a destination.
+ * address and initiates ARP if required
+ * @cm_id : CM channel ID which is being used for connection set up
+ * @src_addr : IPV4/IPV6 address of the source
+ * @dst_addr : IPV4/IPV6 address of the destination
+ * @timeout : Address resolve timeout
+ * @return: SUCCESS value if route resolved or error representative value
+ * otherwise
+ */
+int xrnic_rdma_resolve_addr(struct xrnic_rdma_cm_id *cm_id,
+ struct sockaddr *src_addr,
+ struct sockaddr *dst_addr, int timeout)
+{
+ struct flowi4 fl4;
+ struct rtable *rt;
+ struct neighbour *n;
+ int arp_retry = 3;
+ int ret = 0;
+ struct sockaddr_in sin4, *din4;
+ struct net_device *net_dev;
+ struct xrnic_rdma_cm_event_info event;
+
+ net_dev = dev_get_by_name(&init_net, "eth0");
+ memset(&fl4, 0, sizeof(fl4));
+ din4 = (struct sockaddr_in *)dst_addr;
+ fl4.daddr = din4->sin_addr.s_addr;
+ rt = ip_route_output_key(&init_net, &fl4);
+ if (IS_ERR(rt)) {
+ event.cm_event = XRNIC_CM_EVENT_ADDR_ERROR;
+ event.status = PTR_ERR(rt);
+ cm_id->xrnic_cm_handler(cm_id, &event);
+ ret = PTR_ERR(rt);
+ goto err;
+ }
+
+ event.cm_event = XRNIC_CM_EVENT_ADDR_RESOLVED;
+ event.status = 0;
+ cm_id->xrnic_cm_handler(cm_id, &event);
+
+ sin4.sin_addr.s_addr = fl4.saddr;
+ sin4.sin_port = cpu_to_be16(ERNIC_UDP_SRC_PORT);
+ sin4.sin_family = dst_addr->sa_family;
+
+ /* HACK: ARP is not resolved for the first time, retries are needed */
+ do {
+ n = rt->dst.ops->neigh_lookup(&rt->dst, NULL, &fl4.daddr);
+ } while (arp_retry-- > 0);
+
+ if (IS_ERR(n))
+ pr_info("ERNIC neigh lookup failed\n");
+
+ memcpy(&cm_id->route.s_addr, &sin4, sizeof(sin4));
+ memcpy(&cm_id->route.d_addr, dst_addr, sizeof(*dst_addr));
+ ether_addr_copy(cm_id->route.smac, net_dev->dev_addr);
+ ether_addr_copy(cm_id->route.dmac, n->ha);
+ event.cm_event = XRNIC_CM_EVENT_ROUTE_RESOLVED;
+ event.status = 0;
+ cm_id->xrnic_cm_handler(cm_id, &event);
+err:
+ return ret;
+}
+EXPORT_SYMBOL(xrnic_rdma_resolve_addr);
+
+/*
+ * fill_ipv4_headers() - This function fills the IPV4 address for an
+ * outgoing packet.
+ * @cm_id : CM ID info for addresses
+ * @send_sgl_qp1 : SGL info
+ * @cm_req_size : request size
+ * @return: pointer to SGL info
+ */
+char *fill_ipv4_headers(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size)
+{
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ struct udphdr *udph;
+ struct sockaddr_in *sin4, *din4;
+
+ sin4 = (struct sockaddr_in *)&cm_id->route.s_addr;
+ din4 = (struct sockaddr_in *)&cm_id->route.d_addr;
+
+ SET_ETH_HDR(send_sgl_qp1);
+ eth = (struct ethhdr *)send_sgl_qp1;
+ ether_addr_copy(&eth->h_dest, &cm_id->route.dmac);
+ ether_addr_copy(&eth->h_source, &cm_id->route.smac);
+ eth->h_proto = cpu_to_be16(ETH_P_IP);
+
+ SET_IP_HDR(send_sgl_qp1);
+ iph = (struct iphdr *)send_sgl_qp1;
+ iph->ihl = 5;
+ iph->version = 4;
+ iph->ttl = 32;
+ iph->tos = 0;
+ iph->protocol = IPPROTO_UDP;
+ iph->saddr = sin4->sin_addr.s_addr;
+ iph->daddr = din4->sin_addr.s_addr;
+ iph->id = 0;
+ iph->frag_off = cpu_to_be16(0x2 << 13);
+ iph->tot_len = cpu_to_be16(cm_req_size - ETH_HLEN);
+
+ ip_send_check(iph);
+
+ SET_NET_HDR(send_sgl_qp1);
+ udph = (struct udphdr *)send_sgl_qp1;
+ udph->source = sin4->sin_port;
+ udph->dest = din4->sin_port;
+ udph->len = cpu_to_be16(cm_req_size - ETH_HLEN - (iph->ihl * 4));
+ udph->check = 0;
+
+ return send_sgl_qp1;
+}
+
+/*
+ * fill_mad_common_header() - This function fills the MAD headers.
+ * @cm_id : CM ID info
+ * @send_sgl_qp1 : SGL info
+ * @cm_req_size : request size
+ * @cm_attr : cm attribute ID
+ * @return: pointer to SGL info
+ */
+char *fill_mad_common_header(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size,
+ int cm_attr)
+{
+ struct ib_bth *bth;
+ struct ib_deth *deth;
+ struct ib_mad_hdr *madh;
+ int val;
+
+ SET_BTH_HDR(send_sgl_qp1);
+ bth = (struct ib_bth *)send_sgl_qp1;
+ memset(bth, 0, sizeof(*bth));
+ val = (BTH_SET(OPCODE, IB_OPCODE_UD_SEND_ONLY) |
+ BTH_SET(SE, XRNIC_SET_SOLICT_EVENT) |
+ BTH_SET(MIG, XRNIC_MIGRATION_REQ) |
+ BTH_SET(PAD, XRNIC_PAD_COUNT) |
+ BTH_SET(TVER, XRNIC_TRANSPORT_HDR_VER) |
+ BTH_SET(PKEY, 65535));
+ bth->offset0 = cpu_to_be32(val);
+ bth->offset4 = cpu_to_be32(BTH_SET(DEST_QP, 1));
+ bth->offset8 = cpu_to_be32(BTH_SET(PSN, psn_num++));
+
+ SET_DETH_HDR(send_sgl_qp1);
+ deth = (struct ib_deth *)send_sgl_qp1;
+ deth->offset0 = cpu_to_be32 (IB_ENFORCED_QEY);
+ deth->offset4 = cpu_to_be32 (DETH_SET(SQP, 2));
+
+ SET_MAD_HDR(send_sgl_qp1);
+ madh = (struct ib_mad_hdr *)send_sgl_qp1;
+ memset(madh, 0, sizeof(*madh));
+ madh->base_version = IB_MGMT_BASE_VERSION;
+ madh->mgmt_class = IB_MGMT_CLASS_CM;
+ madh->class_version = IB_CM_CLASS_VER;
+ madh->method = IB_MGMT_METHOD_SEND;
+ madh->attr_id = cm_attr;
+ madh->tid = cpu_to_be64(mad_tid++);
+ madh->status = 0;
+ madh->class_specific = 0;
+ madh->attr_mod = 0;
+
+ return send_sgl_qp1;
+}
+
+/*
+ * xrnic_rdma_connect() - This function initiates connetion process.
+ * @cm_id : CM ID info
+ * @conn_param : Connection parameters for the new connection
+ * @return: XRNIC_SUCCESS
+ */
+int xrnic_rdma_connect(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_conn_param *conn_param)
+{
+ int cm_req_size;
+ char *send_sgl_qp1, *head;
+
+ cm_req_size = sizeof(struct ethhdr) + sizeof(struct iphdr) +
+ sizeof(struct udphdr) + IB_BTH_BYTES + IB_DETH_BYTES +
+ sizeof(struct ib_mad_hdr) +
+ sizeof(struct ernic_cm_req) + EXTRA_PKT_LEN;
+
+ head = kmalloc(cm_req_size, GFP_ATOMIC);
+ send_sgl_qp1 = head;
+ memcpy(&cm_id->conn_param, conn_param, sizeof(*conn_param));
+ fill_ipv4_cm_req(cm_id, send_sgl_qp1, cm_req_size);
+ xrnic_send_mad(head, cm_req_size - EXTRA_PKT_LEN);
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_connect);
+
+/*
+ * xrnic_process_mad_pkt() - This function process a received MAD packet.
+ * @rq_buf : receive queue pointer
+ * @return: XRNIC_SUCCESS if successfully processed the MAD packet otherwise
+ * XRNIC_FAILED
+ */
+static int xrnic_process_mad_pkt(void *rq_buf)
+{
+ int ret = 0;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct deth *deth;
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+
+ if (qp1_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ deth = (struct deth *)&recv_qp_pkt_ipv4->deth;
+ qp1_attr->ipv4_addr = recv_qp_pkt_ipv4->ipv4.src_addr;
+ memcpy(&qp1_attr->mac_addr,
+ &recv_qp_pkt_ipv4->eth.h_source, XRNIC_ETH_ALEN);
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ deth = (struct deth *)&recv_qp_pkt_ipv6->deth;
+ memcpy(&qp1_attr->ipv6_addr,
+ &recv_qp_pkt_ipv6->ipv6.saddr,
+ sizeof(struct in6_addr));
+ memcpy(&qp1_attr->mac_addr,
+ &recv_qp_pkt_ipv6->eth.h_source,
+ XRNIC_ETH_ALEN);
+ }
+ qp1_attr->source_qp_num = deth->src_qp;
+
+ ret = xrnic_cm_establishment_handler(rq_buf);
+ if (ret) {
+ pr_err("cm establishment failed with ret code %d\n", ret);
+ return XRNIC_FAILED;
+ }
+
+ return XRNIC_SUCCESS;
+}
+
+/*
+ * xrnic_mad_pkt_recv_intr_handler() - Interrupt handler for MAD packet
+ * interrupt type
+ * @data : XRNIC device info
+ */
+void xrnic_mad_pkt_recv_intr_handler(unsigned long data)
+{
+ struct xrnic_dev_info *xrnic_dev = (struct xrnic_dev_info *)data;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct xrnic_memory_map *xrnic_mmap = (struct xrnic_memory_map *)
+ qp1_attr->xrnic_mmap;
+ struct rdma_qp1_attr *rdma_qp1_attr = (struct rdma_qp1_attr *)
+ &xrnic_mmap->xrnic_regs->rdma_qp1_attr;
+ u32 config_value = 0;
+ u8 rq_buf[XRNIC_RECV_PKT_SIZE];
+ void *rq_buf_temp, *rq_buf_unaligned;
+ int ret = 0, j, rq_pkt_num = 0, rq_pkt_count = 0;
+ struct ethhdr_t *ethhdr;
+ unsigned long flag;
+
+ spin_lock_irqsave(&qp1_attr->qp_lock, flag);
+ rq_buf_unaligned = (void *)rq_buf;
+
+ /* We need to maintain sq_cmpl_db_local as per hardware update
+ * for Queue spesific sq_cmpl_db_local register
+ * Also in case of resend some packect we
+ * need to maintain this variable
+ */
+ config_value = ioread32((char *)xrnic_mmap->rq_wrptr_db_add +
+ (4 * (qp1_attr->qp_num - 1)));
+ pr_info("config_value = %d, db_local = %d\n",
+ config_value, qp1_attr->rq_wrptr_db_local);
+ if (qp1_attr->rq_wrptr_db_local == config_value) {
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+ return;
+ }
+
+ if (qp1_attr->rq_wrptr_db_local > config_value)
+ rq_pkt_count = (config_value + XRNIC_RQ_DEPTH) -
+ qp1_attr->rq_wrptr_db_local;
+ else
+ rq_pkt_count = config_value - qp1_attr->rq_wrptr_db_local;
+
+ DEBUG_LOG("rx pkt count = 0x%x\n", rq_pkt_count);
+ for (j = 0 ; j < rq_pkt_count ; j++) {
+ config_value = ioread32((char *)xrnic_mmap->sq_cmpl_db_add +
+ (4 * (qp1_attr->qp_num - 1)));
+
+ rq_pkt_num = qp1_attr->rq_wrptr_db_local;
+ if (rq_pkt_num >= XRNIC_RQ_DEPTH)
+ rq_pkt_num = rq_pkt_num - XRNIC_RQ_DEPTH;
+
+ ethhdr = (struct ethhdr_t *)((char *)qp1_attr->rq_buf_ba_ca +
+ (rq_pkt_num * XRNIC_RECV_PKT_SIZE));
+
+ if (ethhdr->eth_type == htons(XRNIC_ETH_P_IP)) {
+ rq_buf_temp = (char *)qp1_attr->rq_buf_ba_ca +
+ (rq_pkt_num * XRNIC_RECV_PKT_SIZE);
+ memcpy((char *)rq_buf_unaligned,
+ (char *)rq_buf_temp, XRNIC_RECV_PKT_SIZE);
+ qp1_attr->ip_addr_type = AF_INET;
+ } else {
+ rq_buf_temp = (char *)qp1_attr->rq_buf_ba_ca +
+ (rq_pkt_num * XRNIC_RECV_PKT_SIZE);
+ memcpy((char *)rq_buf_unaligned,
+ (char *)rq_buf_temp, XRNIC_RECV_PKT_SIZE);
+ qp1_attr->ip_addr_type = AF_INET6;
+ }
+ ret = xrnic_process_mad_pkt(rq_buf_unaligned);
+
+ if (ret) {
+ DEBUG_LOG("MAD pkt processing failed for pkt num %d\n",
+ rq_pkt_num);
+ }
+
+ qp1_attr->rq_wrptr_db_local = qp1_attr->rq_wrptr_db_local + 1;
+ config_value = qp1_attr->rq_wrptr_db_local;
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->rq_ci_db)));
+
+ if (qp1_attr->rq_wrptr_db_local == XRNIC_RQ_DEPTH)
+ qp1_attr->rq_wrptr_db_local = 0;
+ }
+
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_cm_establishment_handler() - handles the state after the
+ * communication is established.
+ * @rq_buf : receive queue buffer
+ * @return: 0 on success, -1 incase of failure
+ */
+int xrnic_cm_establishment_handler(void *rq_buf)
+{
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+ struct mad *mad;
+ struct req *req;
+ struct rep *rep;
+ struct deth *deth;
+ struct xrnic_qp_attr *qp_attr;
+ int i = 0, ret;
+ enum xrnic_rej_reason reason;
+ enum xrnic_msg_rej msg;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ int qp1_send_pkt_size;
+ struct xrnic_rdma_cm_id *cm_id, *tmp;
+ struct sockaddr_in *din4;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+
+ if (qp1_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ req = (struct req *)&mad->data;
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv4);
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ req = (struct req *)&mad->data;
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv6);
+ }
+ switch (htons(mad->attribute_id)) {
+ case CONNECT_REQUEST:
+ DEBUG_LOG("Connect request recevied\n");
+ for (i = 0 ; i < XRNIC_MAX_QP_SUPPORT ; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ ret = xrnic_find_free_qp();
+ DEBUG_LOG("Q pair no:%x, i = %d\n", ret, i);
+ if (ret < 0) {
+ qp_attr = qp1_attr;
+ qp_attr->ip_addr_type = qp1_attr->ip_addr_type;
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ pr_err("no QP is free for connection.\n");
+ reason = XRNIC_REJ_NO_QP_AVAILABLE;
+ msg = XRNIC_REJ_REQ;
+ qp_attr->remote_cm_id = req->local_cm_id;
+ xrnic_cm_prepare_rej(qp_attr, msg, reason);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ return XRNIC_FAILED;
+ }
+ i = ret;
+ }
+
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_LISTEN ||
+ qp_attr->curr_state == XRNIC_MRA_SENT ||
+ qp_attr->curr_state == XRNIC_REJ_SENT ||
+ qp_attr->curr_state == XRNIC_REP_SENT ||
+ qp_attr->curr_state == XRNIC_ESTABLISHD) {
+ qp_attr->ip_addr_type = qp1_attr->ip_addr_type;
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_connect_request_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state for Connect Request\n");
+ return XRNIC_FAILED;
+ }
+ break;
+
+ case READY_TO_USE:
+ DEBUG_LOG("RTU received\n");
+
+ for (i = 0 ; i < XRNIC_MAX_QP_SUPPORT ; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ pr_err("no QP is free for connection. in RTU\n");
+ return XRNIC_FAILED;
+ }
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_REP_SENT ||
+ qp_attr->curr_state == XRNIC_MRA_RCVD) {
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_ready_to_use_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state to serve RTU\n");
+ return XRNIC_FAILED;
+ }
+ break;
+
+ case MSG_RSP_ACK:
+ DEBUG_LOG("Message received Ack interrupt\n");
+ for (i = 0 ; i < XRNIC_MAX_QP_SUPPORT ; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ pr_err("no QP is free for connection\n");
+ return XRNIC_FAILED;
+ }
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_REP_SENT) {
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_msg_rsp_ack_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state to serve MSG RSP ACK\n");
+ return XRNIC_FAILED;
+ }
+ break;
+
+ case CONNECT_REPLY:
+ DEBUG_LOG("Connect reply received\n");
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ rep = (struct rep *)&recv_qp_pkt_ipv4->mad.data;
+ deth = (struct deth *)&recv_qp_pkt_ipv4->deth;
+ list_for_each_entry_safe(cm_id, tmp, &cm_id_list, list) {
+ if (cm_id->local_cm_id ==
+ be32_to_cpu(rep->remote_comm_id))
+ break;
+ }
+ /* Something wrong if qp num is 0. Don't send Reply
+ * TODO: Send Reject instead of muting the Reply
+ */
+ if (cm_id->qp_info.qp_num == 0)
+ goto done;
+ cm_id->local_cm_id = rep->remote_comm_id;
+ cm_id->remote_cm_id = rep->local_cm_id;
+ qp_attr = &xrnic_dev->qp_attr[(cm_id->qp_info.qp_num - 2)];
+ qp_attr->local_cm_id = rep->remote_comm_id;
+ qp_attr->remote_cm_id = rep->local_cm_id;
+ qp_attr->remote_qp = (be32_to_cpu(rep->local_qpn) >> 8);
+ qp_attr->source_qp_num = (deth->src_qp);
+ qp_attr->starting_psn = (cm_id->qp_info.starting_psn - 1);
+ qp_attr->rem_starting_psn = (rep->start_psn[2] |
+ rep->start_psn[1] << 8 |
+ rep->start_psn[0] << 16);
+ ether_addr_copy(qp_attr->mac_addr, cm_id->route.dmac);
+ din4 = &cm_id->route.d_addr;
+ cm_id->port_num = be16_to_cpu(din4->sin_port);
+ xrnic_dev->port_status[cm_id->port_num - 1] =
+ XRNIC_PORT_QP_IN_USE;
+ qp_attr->ipv4_addr = din4->sin_addr.s_addr;
+ qp_attr->ip_addr_type = AF_INET;
+ qp_attr->cm_id = cm_id;
+ xrnic_qp_app_configuration(cm_id->qp_info.qp_num,
+ XRNIC_HW_QP_ENABLE);
+ xrnic_cm_connect_rep_handler(qp_attr, NULL);
+ xrnic_cm_send_rtu(cm_id, rep);
+ qp_attr->curr_state = XRNIC_ESTABLISHD;
+done:
+ break;
+
+ case CONNECT_REJECT:
+ DEBUG_LOG("Connect Reject received\n");
+ for (i = 0 ; i < XRNIC_MAX_QP_SUPPORT ; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ pr_err("no QP is free for connection.\n");
+ return XRNIC_FAILED;
+ }
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_MRA_SENT ||
+ qp_attr->curr_state == XRNIC_REP_SENT ||
+ qp_attr->curr_state == XRNIC_MRA_RCVD) {
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_connect_reject_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state to serve connect reject\n");
+ return XRNIC_FAILED;
+ }
+
+ break;
+
+ case DISCONNECT_REQUEST:
+ DEBUG_LOG("Disconnect request received\n");
+ for (i = 0; i < XRNIC_MAX_QP_SUPPORT; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ pr_err("no QPis free for connection.\n");
+ return XRNIC_FAILED;
+ }
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_ESTABLISHD ||
+ qp_attr->curr_state == XRNIC_DREQ_SENT ||
+ qp_attr->curr_state == XRNIC_TIMEWAIT) {
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_disconnect_request_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state to for Disconnect request\n");
+ return XRNIC_FAILED;
+ }
+ break;
+
+ case DISCONNECT_REPLY:
+ DEBUG_LOG("Disconnect reply received\n");
+ for (i = 0; i < XRNIC_MAX_QP_SUPPORT; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ pr_err("no QP is free for connection.\n");
+ return XRNIC_FAILED;
+ }
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_DREQ_SENT) {
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_disconnect_reply_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state to for Disconnect reply\n");
+ return XRNIC_FAILED;
+ }
+ break;
+
+ case SERVICE_ID_RESOLUTION_REQ:
+ DEBUG_LOG("Received service ID resolution request\n");
+ pr_err("Not handling service ID resolution request\n");
+ return XRNIC_FAILED;
+
+ case SERVICE_ID_RESOLUTION_REQ_REPLY:
+ DEBUG_LOG("Received service ID resolution reply\n");
+ pr_err("Not handling service ID resolution reply\n");
+ return XRNIC_FAILED;
+
+ case LOAD_ALTERNATE_PATH:
+ DEBUG_LOG("Received Load Alternate Path request\n");
+ pr_err("Not handling Load Alternate Path request\n");
+ return XRNIC_FAILED;
+
+ case ALTERNATE_PATH_RESPONSE:
+ DEBUG_LOG("Received LAP response\n");
+ pr_err("Not handling LAP response\n");
+ return XRNIC_FAILED;
+
+ default:
+ pr_err("default mad attribute 0x%x\n", mad->attribute_id);
+ break;
+ }
+
+ DEBUG_LOG("Exiting %s\n", __func__);
+ return XRNIC_SUCCESS;
+}
diff --git a/drivers/staging/xlnx_ernic/xcm.h b/drivers/staging/xlnx_ernic/xcm.h
new file mode 100644
index 000000000000..6640b83e5166
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xcm.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+#ifndef _CM_H
+#define _CM_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+/***************************** Include Files ********************************/
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <rdma/ib_mad.h>
+#include <rdma/ib_cm.h>
+
+/************************** Constant Definitions *****************************/
+
+/* EXTRA Bytes for Invariant CRC */
+#define ERNIC_INV_CRC 4
+/* ERNIC Doesn't have Variant CRC for P2P */
+#define ERNIC_VAR_CRC 0
+#define EXTRA_PKT_LEN (ERNIC_INV_CRC + ERNIC_VAR_CRC)
+/* As per RoCEv2 Annex17, SRC PORT can be Fixed for ordering issues.
+ * So, to make things simple, ERNIC also uses constant udp source port
+ */
+#define ERNIC_UDP_SRC_PORT 0xA000
+
+#define SET_VAL(start, size, val) ((((val) & ((1U << (size)) - 1)) << (start)))
+#define GET_VAL(start, size, val) (((val) >> (start)) & ((1U << (size)) - 1))
+#define BTH_SET(FIELD, v) SET_VAL(BTH_##FIELD##_OFF, \
+ BTH_##FIELD##_SZ, v)
+#define DETH_SET(FIELD, v) SET_VAL(DETH_##FIELD##_OFF, \
+ DETH_##FIELD##_SZ, v)
+
+#define SET_HDR_OFFSET(ptr, off) ((ptr) += off)
+#define SET_CM_HDR(ptr) SET_HDR_OFFSET(ptr, sizeof(struct ib_mad_hdr))
+#define SET_ETH_HDR(ptr) SET_HDR_OFFSET(ptr, 0)
+#define SET_IP_HDR(ptr) SET_HDR_OFFSET(ptr, sizeof(struct ethhdr))
+#define SET_NET_HDR(ptr) SET_HDR_OFFSET(ptr, sizeof(struct iphdr))
+#define SET_BTH_HDR(ptr) SET_HDR_OFFSET(ptr, sizeof(struct udphdr))
+#define SET_DETH_HDR(ptr) SET_HDR_OFFSET(ptr, IB_BTH_BYTES)
+#define SET_MAD_HDR(ptr) SET_HDR_OFFSET(ptr, IB_DETH_BYTES)
+
+#define CMA_VERSION 0
+#define IB_ENFORCED_QEY 0x80010000
+#define IB_CM_CLASS_VER 2
+/*****************************************************************************/
+struct ib_bth {
+ __be32 offset0;
+#define BTH_PKEY_OFF 0
+#define BTH_PKEY_SZ 16
+#define BTH_TVER_OFF 16
+#define BTH_TVER_SZ 4
+#define BTH_PAD_OFF 20
+#define BTH_PAD_SZ 2
+#define BTH_MIG_OFF 22
+#define BTH_MIG_SZ 1
+#define BTH_SE_OFF 23
+#define BTH_SE_SZ 1
+#define BTH_OPCODE_OFF 24
+#define BTH_OPCODE_SZ 8
+ __be32 offset4;
+#define BTH_DEST_QP_OFF 0
+#define BTH_DEST_QP_SZ 24
+ __be32 offset8;
+#define BTH_PSN_OFF 0
+#define BTH_PSN_SZ 24
+#define BTH_ACK_OFF 31
+#define BTH_ACK_SZ 1
+};
+
+struct ib_deth {
+ __be32 offset0;
+#define DETH_QKEY_OFF 0
+#define DETH_QKEY_SZ 32
+ __be32 offset4;
+#define DETH_SQP_OFF 0
+#define DETH_SQP_SZ 24
+};
+
+struct cma_rtu {
+ u32 local_comm_id;
+ u32 remote_comm_id;
+ u8 private_data[224];
+};
+
+union cma_ip_addr {
+ struct in6_addr ip6;
+ struct {
+ __be32 pad[3];
+ __be32 addr;
+ } ip4;
+};
+
+/* CA11-1: IP Addressing CM REQ Message Private Data Format */
+struct cma_hdr {
+ u8 cma_version;
+ u8 ip_version; /* IP version: 7:4 */
+ __be16 port;
+ union cma_ip_addr src_addr;
+ union cma_ip_addr dst_addr;
+};
+
+enum transport_svc_type {
+ XRNIC_SVC_TYPE_RC = 0,
+ XRNIC_SVC_TYPE_UC,
+ XRNIC_SVC_TYPE_RD,
+ XRNIC_SVC_TYPE_RSVD,
+};
+
+extern struct list_head cm_id_list;
+
+void xrnic_qp1_send_mad_pkt(void *send_sgl_temp,
+ struct xrnic_qp_attr *qp1_attr, u32 send_pkt_size);
+void xrnic_reset_io_qp(struct xrnic_qp_attr *qp_attr);
+void fill_ipv4_cm_req(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size);
+char *fill_ipv4_headers(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size);
+int xrnic_cm_establishment_handler(void *rq_buf);
+char *fill_mad_common_header(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size,
+ int cm_attr);
+void xrnic_prepare_initial_headers(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf);
+void xrnic_cm_msg_rsp_ack_handler(struct xrnic_qp_attr *qp_attr, void *rq_buf);
+void xrnic_cm_disconnect_send_handler(struct xrnic_qp_attr *qp_attr);
+void xrnic_cm_prepare_rej(struct xrnic_qp_attr *qp_attr,
+ enum xrnic_rej_reason reason,
+ enum xrnic_msg_rej msg);
+void xrnic_send_mad(void *send_buf, u32 size);
+int xrnic_identify_remote_host(void *rq_buf, int qp_num);
+void xrnic_mad_pkt_recv_intr_handler(unsigned long data);
+
+struct ernic_cm_req {
+ u32 local_comm_id;
+ u32 rsvd1;
+ __u64 service_id;
+ __u64 local_ca_guid;
+ u32 rsvd2;
+ u32 local_qkey;
+ u32 offset32;
+ u32 offset36;
+ u32 offset40;
+ u32 offset44;
+ u16 pkey;
+ u8 offset50;
+ u8 offset51;
+ u16 local_lid;
+ u16 remote_lid;
+ union ib_gid local_gid;
+ union ib_gid remote_gid;
+ u32 offset88;
+ u8 traffic_class;
+ u8 hop_limit;
+ u8 offset94;
+ u8 offset95;
+ u8 rsvd3[45];
+ u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE];
+} __packed;
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _CM_H*/
diff --git a/drivers/staging/xlnx_ernic/xcommon.h b/drivers/staging/xlnx_ernic/xcommon.h
new file mode 100644
index 000000000000..c7d9ff6c84b6
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xcommon.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef COMMOM_INCL_H
+#define COMMOM_INCL_H
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include "xif.h"
+#include "xrocev2.h"
+#include "xhw_def.h"
+#include "xqp.h"
+#include "xcm.h"
+#include "xmr.h"
+#include "xmain.h"
+
+#define XRNIC_FAILED -1
+#define XRNIC_SUCCESS 0
+#define DEBUG_LOG(x, ...) do { \
+ if (debug)\
+ pr_info(x, ##__VA_ARGS__); \
+ } while (0)
+
+extern int debug;
+
+struct xrnic_dev_info {
+ struct xrnic_memory_map xrnic_mmap;
+ struct xrnic_qp_attr qp1_attr;
+ /* TODO: Need to allocate qp_attr on heap.
+ * when max Queue Pairs increases in the design, static memory
+ * requirement will be huge.
+ */
+ struct xrnic_qp_attr qp_attr[XRNIC_MAX_QP_SUPPORT];
+ /* DESTINATION ADDR_FAMILY - IPv4/V6 */
+ u16 ip_addr_type;
+ /* DESTINATION addr in NBO */
+ u8 ipv6_addr[16];
+ u32 pmtu;
+ /* IPV4 address */
+ u8 ipv4_addr[4];
+ u32 qp_falat_local_ptr;
+ struct xrnic_rdma_cm_id_info *curr_cm_id_info;
+ /* TODO: Need to allocate cm_id_info and port_status on heap. */
+ struct xrnic_rdma_cm_id_info *cm_id_info[XRNIC_MAX_PORT_SUPPORT];
+ enum xrnic_port_qp_status port_status[XRNIC_MAX_PORT_SUPPORT];
+ /* Interrupt for RNIC */
+ u32 xrnic_irq;
+ struct tasklet_struct mad_pkt_recv_task;
+ struct tasklet_struct qp_pkt_recv_task;
+ struct tasklet_struct qp_fatal_task;
+ struct tasklet_struct wqe_completed_task;
+ u32 io_qp_count;
+ /*Character Driver Interface*/
+ struct device_node *dev_node;
+ struct resource resource;
+ struct cdev cdev;
+ char pkt_buffer[512];
+ struct device *dev;
+};
+
+extern struct xrnic_dev_info *xrnic_dev;
+#ifdef __cplusplus
+ }
+#endif
+#endif
diff --git a/drivers/staging/xlnx_ernic/xernic_bw_test.c b/drivers/staging/xlnx_ernic/xernic_bw_test.c
new file mode 100644
index 000000000000..0f0977660621
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xernic_bw_test.c
@@ -0,0 +1,482 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx RDMA NIC perftest driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <net/addrconf.h>
+#include "xcommon.h"
+#include "xperftest.h"
+
+/* Default Port Number for Perftest and Depths for XRNIC */
+#define PERFTEST_PORT 18515
+#define PERFTEST_SQ_DEPTH 0x80
+#define PERFTEST_RQ_DEPTH 0x40
+/* Admin and IO QPs */
+#define PERFTEST_ADMIN_QPS 1
+#define PERFTEST_IO_QPS 1
+#define PERFTEST_MAX_QPS (PERFTEST_ADMIN_QPS + PERFTEST_IO_QPS)
+#define PERFTEST_DEFAULT_MEM_SIZE (4 * 1024 * 1024)
+
+#define _1MB_BUF_SIZ (1024 * 1024)
+#define PERF_TEST_RQ_BUF_SIZ ((_1MB_BUF_SIZ + XRNIC_RECV_PKT_SIZE) *\
+ PERFTEST_RQ_DEPTH)
+
+struct xrnic_rdma_cm_id *cm_id;
+static char server_ip[32] = "0.0.0.0";
+struct ernic_pd *pd;
+int prev_qpn;
+
+/* TODO: currently, we have single instance.
+ * Need to convert as per-instance context.
+ */
+struct perftest_ctx {
+ struct xrnic_rdma_cm_id *cm_id;
+ struct ernic_pd *pd;
+ struct mr *reg_mr; /*registered MR */
+};
+
+phys_addr_t phys_mem[PERFTEST_MAX_QPS];
+int io_mr_idx;
+struct mr *perftest_io_mr[PERFTEST_IO_QPS];
+
+struct perftest_ctx perf_context[PERFTEST_MAX_QPS];
+
+struct perftest_wr {
+ union ctx ctx;
+ __u8 reserved1[2];
+ __u32 local_offset[2];
+ __u32 length;
+ __u8 opcode;
+ __u8 reserved2[3];
+ __u32 remote_offset[2];
+ __u32 remote_tag;
+ __u32 completion_info[4];
+ __u8 reserved4[16];
+} __packed;
+
+struct xrnic_qp_init_attr qp_attr;
+
+struct perftest_trinfo {
+ phys_addr_t rq_buf_ba_phys;
+ phys_addr_t send_sgl_phys;
+ phys_addr_t sq_ba_phys;
+ phys_addr_t cq_ba_phys;
+ phys_addr_t rq_wptr_db_phys;
+ phys_addr_t sq_cmpl_db_phys;
+ void __iomem *rq_buf_ba;
+ void __iomem *send_sgl;
+ void __iomem *sq_ba;
+ void __iomem *cq_ba;
+};
+
+struct perftest_trinfo trinfo;
+struct xrnic_rdma_conn_param conn_param;
+int rq_ci_db, sq_cmpl_db;
+
+int port = -1;
+module_param_string(server_ip, server_ip, sizeof(server_ip), 0444);
+module_param(port, int, 0444);
+MODULE_PARM_DESC(server_ip, "Target server ip address");
+
+/**
+ * perftest_parse_addr() - Parses the input IP address.
+ * @s_addr: IP address structure.
+ * @buf: Output IPV4 buffer pointer.
+ * return: 0 If address is either IPv6 or IPv4.
+ * else, returns EINVAL.
+ */
+int perftest_parse_addr(struct sockaddr_storage *s_addr, char *buf)
+{
+ size_t buflen = strlen(buf);
+ int ret;
+ const char *delim;
+
+ if (buflen <= INET_ADDRSTRLEN) {
+ struct sockaddr_in *sin_addr = (struct sockaddr_in *)s_addr;
+
+ ret = in4_pton(buf, buflen, (u8 *)&sin_addr->sin_addr.s_addr,
+ '\0', NULL);
+ if (!ret)
+ goto fail;
+
+ sin_addr->sin_family = AF_INET;
+ return 0;
+ }
+ if (buflen <= INET6_ADDRSTRLEN) {
+ struct sockaddr_in6 *sin6_addr = (struct sockaddr_in6 *)s_addr;
+
+ ret = in6_pton(buf, buflen,
+ (u8 *)&sin6_addr->sin6_addr.s6_addr,
+ -1, &delim);
+ if (!ret)
+ goto fail;
+
+ sin6_addr->sin6_family = AF_INET6;
+ return 0;
+ }
+fail:
+ return -EINVAL;
+}
+
+/**
+ * rq_handler() - receive packet callback routine.
+ * @rq_count: Rx packet count.
+ * @rq_context: context info.
+ */
+void rq_handler(u32 rq_count, void *rq_context)
+{
+ int i, qp_num, offset;
+ struct ernic_bwtest_struct *rq_buf;
+ struct xrnic_rdma_cm_id *cm_id;
+ struct perftest_wr *sq_wr;
+ struct mr *mem;
+ struct perftest_ctx *ctx;
+
+ ctx = (struct perftest_ctx *)rq_context;
+ cm_id = ctx->cm_id;
+ qp_num = cm_id->child_qp_num;
+ offset = sq_cmpl_db * XRNIC_SEND_SGL_SIZE;
+ for (i = 0; i < rq_count; i++) {
+ if (qp_num == 1) {
+ rq_buf = (struct ernic_bwtest_struct *)(char *)
+ cm_id->qp_info.rq_buf_ba_ca +
+ ((qp_num - 1) * rq_ci_db *
+ XRNIC_RECV_PKT_SIZE);
+ if (io_mr_idx > PERFTEST_IO_QPS)
+ goto done;
+ mem = perftest_io_mr[io_mr_idx];
+
+ rq_buf->rkey = htonl((unsigned int)mem->rkey);
+ rq_buf->vaddr = cpu_to_be64(mem->vaddr);
+
+ memcpy((u8 *)(trinfo.send_sgl + offset),
+ (u8 *)rq_buf,
+ sizeof(struct ernic_bwtest_struct));
+
+ sq_wr = (struct perftest_wr *)trinfo.sq_ba +
+ sq_cmpl_db;
+ sq_wr->ctx.wr_id = sq_cmpl_db;
+ sq_wr->length = sizeof(struct ernic_bwtest_struct);
+ sq_wr->remote_tag = ntohl(0xDEAD);
+ sq_wr->local_offset[0] = trinfo.send_sgl_phys + offset;
+ sq_wr->local_offset[1] = 0;
+
+ sq_wr->remote_offset[0] = 0x12345678;
+ sq_wr->remote_offset[1] = 0xABCDABCD;
+ sq_wr->completion_info[0] = htonl(0x11111111);
+ sq_wr->completion_info[1] = htonl(0x22222222);
+ sq_wr->completion_info[2] = htonl(0x33333333);
+ sq_wr->completion_info[3] = htonl(0x44444444);
+ sq_wr->opcode = XRNIC_SEND_ONLY;
+ }
+ xrnic_post_recv(&cm_id->qp_info, 1);
+ if (qp_num == 1) {
+ xrnic_post_send(&cm_id->qp_info, 1);
+ if (prev_qpn != rq_buf->qp_number) {
+ if (prev_qpn != 0)
+ io_mr_idx++;
+ prev_qpn = rq_buf->qp_number;
+ }
+ }
+
+done:
+ rq_ci_db++;
+
+ if (rq_ci_db >= (PERFTEST_RQ_DEPTH - 20))
+ rq_ci_db = 0;
+ if (qp_num == 1) {
+ sq_cmpl_db++;
+ if (sq_cmpl_db >= PERFTEST_SQ_DEPTH)
+ sq_cmpl_db = 0;
+ }
+ }
+}
+
+/**
+ * sq_handler() - completion call back.
+ * @sq_count: Tx packet count.
+ * @sq_context: context info.
+ */
+void sq_handler(u32 sq_count, void *sq_context)
+{
+/* TODO: This function is just a place holder for now.
+ * This function should handle completions for outgoing
+ * RDMA_SEND, RDMA_READ and RDMA_WRITE.
+ */
+ pr_info("XLNX[%d:%s]\n", __LINE__, __func__);
+}
+
+/**
+ * perftest_fill_wr() - Fills the workrequest in send queue base address.
+ * @sq_ba: send queue base address of the QP.
+ */
+void perftest_fill_wr(void __iomem *sq_ba)
+{
+ struct perftest_wr *sq_wr;
+ int i;
+
+ for (i = 0; i < XRNIC_SQ_DEPTH; i++) {
+ sq_wr = (struct perftest_wr *)sq_ba + i;
+ sq_wr->ctx.wr_id = i;
+ sq_wr->length = 16;
+ sq_wr->completion_info[0] = 0xAAAAAAAA;
+ sq_wr->completion_info[1] = 0xBBBBBBBB;
+ sq_wr->completion_info[2] = 0xCCCCCCCC;
+ sq_wr->completion_info[3] = 0xDDDDDDDD;
+ sq_wr->opcode = XRNIC_SEND_ONLY;
+ }
+}
+
+/**
+ * perftest_cm_handler() - CM handler call back routine.
+ * @cm_id: CM ID on which event received.
+ * @conn_event: Event information on the CM.
+ * @return: 0 on success or error code on failure.
+ */
+static int perftest_cm_handler(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_cm_event_info *conn_event)
+{
+ int qp_num, per_qp_size;
+ struct perftest_ctx *ctx;
+
+ qp_num = cm_id->child_qp_num;
+ memset(&qp_attr, 0, sizeof(struct xrnic_qp_init_attr));
+ ctx = &perf_context[qp_num - 1];
+ switch (conn_event->cm_event) {
+ case XRNIC_REQ_RCVD:
+ qp_attr.xrnic_rq_event_handler = rq_handler;
+ qp_attr.xrnic_sq_event_handler = sq_handler;
+ qp_attr.qp_type = XRNIC_QPT_RC;
+ if (qp_num > 1) {
+ qp_attr.recv_pkt_size = _1MB_BUF_SIZ;
+ per_qp_size = (qp_num - 2) * _1MB_BUF_SIZ *
+ PERFTEST_RQ_DEPTH + XRNIC_RECV_PKT_SIZE *
+ PERFTEST_RQ_DEPTH;
+ } else {
+ qp_attr.recv_pkt_size = XRNIC_RECV_PKT_SIZE;
+ per_qp_size = 0;
+ }
+ qp_attr.rq_buf_ba_ca_phys = trinfo.rq_buf_ba_phys +
+ per_qp_size;
+ qp_attr.rq_buf_ba_ca = (char *)trinfo.rq_buf_ba +
+ per_qp_size;
+ per_qp_size = (qp_num - 1) * sizeof(struct perftest_wr) *
+ PERFTEST_SQ_DEPTH;
+ qp_attr.sq_ba_phys = trinfo.sq_ba_phys + per_qp_size;
+ qp_attr.sq_ba = (char *)trinfo.sq_ba + per_qp_size;
+ per_qp_size = (qp_num - 1) * (PERFTEST_SQ_DEPTH * 4);
+ qp_attr.cq_ba_phys = trinfo.cq_ba_phys + per_qp_size;
+ qp_attr.cq_ba = (char *)trinfo.cq_ba + per_qp_size;
+ qp_attr.rq_context = ctx;
+ qp_attr.sq_context = ctx;
+ ctx->cm_id = cm_id;
+ qp_attr.sq_depth = PERFTEST_SQ_DEPTH;
+ qp_attr.rq_depth = PERFTEST_RQ_DEPTH;
+ ctx->reg_mr = reg_phys_mr(pd, phys_mem[qp_num - 1],
+ PERFTEST_DEFAULT_MEM_SIZE,
+ MR_ACCESS_RDWR, NULL);
+ if (qp_num > 1)
+ perftest_io_mr[qp_num - 2] = ctx->reg_mr;
+
+ xrnic_rdma_create_qp(cm_id, ctx->reg_mr->pd,
+ &qp_attr);
+
+ memset(&conn_param, 0, sizeof(conn_param));
+ conn_param.initiator_depth = 16;
+ conn_param.responder_resources = 16;
+ xrnic_rdma_accept(cm_id, &conn_param);
+ break;
+ case XRNIC_ESTABLISHD:
+ if (cm_id->child_qp_num > 1) {
+ perftest_fill_wr((char *)trinfo.sq_ba +
+ ((qp_num - 1) *
+ sizeof(struct perftest_wr) *
+ PERFTEST_SQ_DEPTH));
+ xrnic_hw_hs_reset_sq_cq(&cm_id->qp_info, NULL);
+ }
+ break;
+ case XRNIC_DREQ_RCVD:
+ xrnic_destroy_qp(&cm_id->qp_info);
+ xrnic_rdma_disconnect(cm_id);
+ xrnic_rdma_destroy_id(cm_id, 0);
+ dereg_mr(ctx->reg_mr);
+ io_mr_idx = 0;
+ prev_qpn = 0;
+ rq_ci_db = 0;
+ sq_cmpl_db = 0;
+ break;
+ default:
+ pr_info("Unhandled CM Event: %d\n",
+ conn_event->cm_event);
+ }
+ return 0;
+}
+
+/**
+ * perftest_init() - Perf test init function.
+ * @return: 0 on success or error code on failure.
+ */
+static int __init perftest_init(void)
+{
+ int ret, i;
+ struct sockaddr_storage s_addr;
+ struct sockaddr_in *sin_addr;
+ struct sockaddr_in6 *sin6_addr;
+
+ if (strcmp(server_ip, "0.0.0.0") == 0) {
+ pr_err("server ip module parameter not provided\n");
+ return -EINVAL;
+ }
+
+ /* If port number is not set, then it should point to the default */
+ if (-1 == port) {
+ port = PERFTEST_PORT;
+ pr_info("Using app default port number: %d\n", port);
+ } else if (port < 0) {
+ /* Any other -ve value */
+ /* Some ports are reserved and few other may be use,
+ * we could add check here to validate given port number
+ * is free to use or not
+ */
+ pr_err("port number should not be a negative value\n");
+ return -EINVAL;
+ }
+ pr_info("Using port number %d\n", port);
+
+ cm_id = xrnic_rdma_create_id(perftest_cm_handler, NULL, XRNIC_PS_TCP,
+ XRNIC_QPT_UC, PERFTEST_MAX_QPS);
+ if (!cm_id)
+ goto err;
+
+ if (perftest_parse_addr(&s_addr, server_ip))
+ goto err;
+
+ if (s_addr.ss_family == AF_INET) {
+ sin_addr = (struct sockaddr_in *)&s_addr;
+ ret = xrnic_rdma_bind_addr(cm_id,
+ (u8 *)&sin_addr->sin_addr.s_addr,
+ port, AF_INET);
+ if (ret < 0) {
+ pr_err("RDMA BIND Failed for IPv4\n");
+ goto err;
+ }
+ }
+ if (s_addr.ss_family == AF_INET6) {
+ sin6_addr = (struct sockaddr_in6 *)&s_addr;
+ ret = xrnic_rdma_bind_addr(cm_id,
+ (u8 *)&sin6_addr->sin6_addr.s6_addr,
+ port, AF_INET6);
+ if (ret < 0) {
+ pr_err("RDMA BIND Failed for IPv6\n");
+ goto err;
+ }
+ }
+
+ if (xrnic_rdma_listen(cm_id, 1) != XRNIC_SUCCESS)
+ goto err;
+
+ trinfo.rq_buf_ba_phys = alloc_mem(NULL, PERF_TEST_RQ_BUF_SIZ);
+ if (-ENOMEM == trinfo.rq_buf_ba_phys)
+ goto err;
+ trinfo.rq_buf_ba =
+ (void __iomem *)(uintptr_t)get_virt_addr
+ (trinfo.rq_buf_ba_phys);
+
+ trinfo.send_sgl_phys = alloc_mem(NULL, 0x400000);
+ if (-ENOMEM == trinfo.send_sgl_phys)
+ goto err;
+ trinfo.send_sgl =
+ (void __iomem *)(uintptr_t)get_virt_addr(trinfo.send_sgl_phys);
+
+ trinfo.sq_ba_phys = alloc_mem(NULL, 0x100000);
+ if (-ENOMEM == trinfo.sq_ba_phys)
+ goto err;
+ trinfo.sq_ba =
+ (void __iomem *)(uintptr_t)get_virt_addr(trinfo.sq_ba_phys);
+
+ trinfo.cq_ba_phys = alloc_mem(NULL, 0x40000);
+ if (-ENOMEM == trinfo.cq_ba_phys)
+ goto err;
+ trinfo.cq_ba =
+ (void __iomem *)(uintptr_t)get_virt_addr(trinfo.cq_ba_phys);
+ trinfo.rq_wptr_db_phys = alloc_mem(NULL, 8);
+ trinfo.sq_cmpl_db_phys = alloc_mem(NULL, 8);
+ pd = alloc_pd();
+ for (i = 0; i < PERFTEST_MAX_QPS; i++) {
+ phys_mem[i] = alloc_mem(pd, PERFTEST_DEFAULT_MEM_SIZE);
+ if (IS_ERR_VALUE(phys_mem[i])) {
+ pr_err("PERFTEST[%d:%s] Mem registration failed: %lld\n",
+ __LINE__, __func__, phys_mem[i]);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+/* free_mem() works on only valid physical address returned from alloc_mem(),
+ * and ignores if NULL or invalid address is passed.
+ * So, even if any of the above allocations fail in the middle,
+ * we can safely call free_mem() on all addresses.
+ *
+ * we are using carve-out memory for the requirements of ERNIC.
+ * so, we cannot use devm_kzalloc() as kernel cannot see these
+ * memories until ioremapped.
+ */
+ free_mem(trinfo.rq_buf_ba_phys);
+ free_mem(trinfo.send_sgl_phys);
+ free_mem(trinfo.sq_ba_phys);
+ free_mem(trinfo.cq_ba_phys);
+ free_mem(trinfo.rq_wptr_db_phys);
+ free_mem(trinfo.sq_cmpl_db_phys);
+ for (i = 0; i < PERFTEST_MAX_QPS; i++)
+ free_mem(phys_mem[i]);
+
+ dealloc_pd(pd);
+
+ return -EINVAL;
+}
+
+/**
+ * perftest_exit() - perftest module exit function.
+ */
+static void __exit perftest_exit(void)
+{
+ int i;
+
+ free_mem(trinfo.rq_buf_ba_phys);
+ free_mem(trinfo.send_sgl_phys);
+ free_mem(trinfo.sq_ba_phys);
+ free_mem(trinfo.cq_ba_phys);
+ free_mem(trinfo.rq_wptr_db_phys);
+ free_mem(trinfo.sq_cmpl_db_phys);
+ for (i = 0; i < PERFTEST_MAX_QPS; i++)
+ free_mem(phys_mem[i]);
+
+ dealloc_pd(pd);
+}
+
+/* This driver is an example driver, which uses the APIs exported in
+ * ernic driver, to demonstrate the RDMA communication between peers
+ * on the infiniband network. The remote peer can be any RDMA enbled NIC.
+ * There is no real device for this driver and so, compatibility string and
+ * probe function are not needed for this driver.
+ */
+module_init(perftest_init);
+module_exit(perftest_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Perftest Example driver");
+MODULE_AUTHOR("SDHANVAD");
diff --git a/drivers/staging/xlnx_ernic/xhw_config.h b/drivers/staging/xlnx_ernic/xhw_config.h
new file mode 100644
index 000000000000..7846abd18bec
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xhw_config.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef _XRNIC_HW_CONFIG_H
+#define _XRNIC_HW_CONFIG_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/types.h>
+
+#define XRNIC_HW_MAX_QP_ENABLE 30
+#define XRNIC_HW_MAX_QP_SUPPORT 28
+#define XRNIC_HW_FLOW_CONTROL_VALUE 0
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _XRNIC_HW_CONFIG_H*/
diff --git a/drivers/staging/xlnx_ernic/xhw_def.h b/drivers/staging/xlnx_ernic/xhw_def.h
new file mode 100644
index 000000000000..c59f266c03f6
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xhw_def.h
@@ -0,0 +1,641 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef _XRNIC_HW_DEF_H
+#define _XRNIC_HW_DEF_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/types.h>
+#include "xhw_config.h"
+
+#define XRNIC_MAX_QP_ENABLE XRNIC_HW_MAX_QP_ENABLE
+#define XRNIC_MAX_QP_SUPPORT XRNIC_HW_MAX_QP_SUPPORT
+#define XRNIC_MAX_PORT_SUPPORT 0xFFFE
+#define XRNIC_REG_WIDTH 32
+#define XRNIC_QPS_ENABLED XRNIC_MAX_QP_ENABLE
+#define XRNIC_QP1_SEND_PKT_SIZE 512
+#define XRNIC_FLOW_CONTROL_VALUE XRNIC_HW_FLOW_CONTROL_VALUE
+#define XRNIC_CONFIG_XRNIC_EN 0x1
+#define XRNIC_UDP_SRC_PORT 0x12B7
+#define XRNIC_CONFIG_IP_VERSION (0x1 << 1)
+#define XRNIC_CONFIG_DEPKT_BYPASS_EN (0x1 << 2)
+#define XRNIC_CONFIG_ERR_BUF_EN (0x1 << 5)
+#define XRNIC_CONFIG_FLOW_CONTROL_EN (XRNIC_FLOW_CONTROL_VALUE << 6)
+#define XRNIC_CONFIG_NUM_QPS_ENABLED (XRNIC_QPS_ENABLED << 8)
+#define XRNIC_CONFIG_UDP_SRC_PORT (XRNIC_UDP_SRC_PORT << 16)
+
+#define XRNIC_RQ_CQ_INTR_STS_REG_SUPPORTED 1
+
+/* Clear the the interrupt writing that bit to interrupt status register.*/
+#define RDMA_READ 4
+#define RDMA_SEND 2
+#define RDMA_WRITE 0
+
+#define XRNIC_QP_TIMEOUT_RETRY_CNT 0x3 /*0x3*/
+#define XRNIC_QP_TIMEOUT_RNR_NAK_TVAL 0x1F /*MAX*/
+#define XRNIC_QP_TIMEOUT_CONFIG_TIMEOUT 0x1F /*MAX 0x1f*/
+#define XRNIC_QP_TIMEOUT_CONFIG_RETRY_CNT \
+ (XRNIC_QP_TIMEOUT_RETRY_CNT << 8)
+#define XRNIC_QP_TIMEOUT_CONFIG_RNR_RETRY_CNT \
+ (XRNIC_QP_TIMEOUT_RETRY_CNT << 11)
+#define XRNIC_QP_TIMEOUT_CONFIG_RNR_NAK_TVAL \
+ (XRNIC_QP_TIMEOUT_RNR_NAK_TVAL << 16)
+
+#define XRNIC_QP_PMTU 0x4
+#define XRNIC_QP_MAX_RD_OS 0xFF
+#define XRNIC_QP_RQ_BUFF_SZ 0x2
+#define XRNIC_QP1_RQ_BUFF_SZ 0x02
+#define XRNIC_QP_CONFIG_QP_ENABLE 0x1
+#define XRNIC_QP_CONFIG_ACK_COALSE_EN BIT(1)
+#define XRNIC_QP_CONFIG_RQ_INTR_EN BIT(2)
+#define XRNIC_QP_CONFIG_CQE_INTR_EN BIT(3)
+#define XRNIC_QP_CONFIG_HW_HNDSHK_DIS BIT(4)
+#define XRNIC_QP_CONFIG_CQE_WRITE_EN BIT(5)
+#define XRNIC_QP_CONFIG_UNDER_RECOVERY BIT(6)
+#define XRNIC_QP_CONFIG_IPV6_EN BIT(7)
+#define XRNIC_QP_CONFIG_PMTU (0x4 << 8)
+#define XRNIC_QP_CONFIG_PMTU_256 (0x0 << 8)
+#define XRNIC_QP_CONFIG_PMTU_512 (0x1 << 8)
+#define XRNIC_QP_CONFIG_PMTU_1024 (0x2 << 8)
+#define XRNIC_QP_CONFIG_PMTU_2048 (0x3 << 8)
+#define XRNIC_QP_CONFIG_PMTU_4096 (0x4 << 8)
+#define XRNIC_QP_RQ_BUF_SIZ_DIV (256)
+#define XRNIC_QP_RQ_BUF_CFG_REG_BIT_OFS (16)
+#define XRNIC_QP_CONFIG_RQ_BUFF_SZ(x) (((x) / XRNIC_QP_RQ_BUF_SIZ_DIV)\
+ << XRNIC_QP_RQ_BUF_CFG_REG_BIT_OFS)
+#define XRNIC_QP1_CONFIG_RQ_BUFF_SZ (XRNIC_QP1_RQ_BUFF_SZ << 16)
+
+#define XRNIC_QP_PARTITION_KEY 0xFFFF
+#define XRNIC_QP_TIME_TO_LIVE 0x40
+
+#define XRNIC_QP_ADV_CONFIG_TRAFFIC_CLASS 0x3F
+#define XRNIC_QP_ADV_CONFIG_TIME_TO_LIVE (XRNIC_QP_TIME_TO_LIVE << 8)
+#define XRNIC_QP_ADV_CONFIG_PARTITION_KEY (XRNIC_QP_PARTITION_KEY << 16)
+
+#define XRNIC_REJ_RESEND_COUNT 3
+#define XRNIC_REP_RESEND_COUNT 3
+#define XRNIC_DREQ_RESEND_COUNT 3
+
+#define XNVEMEOF_RNIC_IF_RHOST_BASE_ADDRESS 0x8c000000
+#define XRNIC_CONFIG_ENABLE 1
+#define XRNIC_RESERVED_SPACE 0x4000
+#define XRNIC_NUM_OF_TX_HDR 128
+#define XRNIC_SIZE_OF_TX_HDR 128
+#define XRNIC_NUM_OF_TX_SGL 256
+#define XRNIC_SIZE_OF_TX_SGL 64
+#define XRNIC_NUM_OF_BYPASS_BUF 32
+#define XRNIC_SIZE_OF_BYPASS_BUF 512
+#define XRNIC_NUM_OF_ERROR_BUF 64
+#define XRNIC_SIZE_OF_ERROR_BUF 256
+#define XRNIC_OUT_ERRST_Q_NUM_ENTRIES 0x40
+#define XRNIC_OUT_ERRST_Q_WRPTR 0x0
+#define XRNIC_IN_ERRST_Q_NUM_ENTRIES 0x40
+#define XRNIC_IN_ERRST_Q_WRPTR 0x0
+#define XRNIC_NUM_OF_DATA_BUF 4096
+#define XRNIC_SIZE_OF_DATA_BUF 4096
+#define XRNIC_NUM_OF_RESP_ERR_BUF 64
+#define XRNIC_SIZE_OF_RESP_ERR_BUF 256
+#define XRNIC_MAD_HEADER 24
+#define XRNIC_MAD_DATA 232
+#define XRNIC_RECV_PKT_SIZE 512
+#define XRNIC_SEND_PKT_SIZE 64
+#define XRNIC_SEND_SGL_SIZE 4096
+#define XRNIC_MAX_SEND_SGL_SIZE 4096
+#define XRNIC_MAX_SEND_PKT_SIZE 4096
+#define XRNIC_MAX_RECV_PKT_SIZE 4096
+#define XRNIC_MAX_SQ_DEPTH 256
+#define XRNIC_MAX_RQ_DEPTH 256
+#define XRNIC_SQ_DEPTH 128
+#define XRNIC_RQ_DEPTH 64
+#define XRNIC_RQ_WRPTR_DBL 0xBC004000
+#define XRNIC_BYPASS_BUF_WRPTR 0xBC00C000
+#define XRNIC_ERROR_BUF_WRPTR 0xBC010000
+
+#define PKT_VALID_ERR_INTR_EN 0x1
+#define MAD_PKT_RCVD_INTR_EN (0x1 << 1)
+#define BYPASS_PKT_RCVD_INTR_EN (0x1 << 2)
+#define RNR_NACK_GEN_INTR_EN (0x1 << 3)
+#define WQE_COMPLETED_INTR_EN (0x1 << 4)
+#define ILL_OPC_SENDQ_INTR_EN (0x1 << 5)
+#define QP_PKT_RCVD_INTR_EN (0x1 << 6)
+#define FATAL_ERR_INTR_EN (0x1 << 7)
+#define ERNIC_MEM_REGISTER
+
+#define XRNIC_INTR_ENABLE_DEFAULT 0x000000FF
+#define XRNIC_VALID_INTR_ENABLE 0
+
+/* XRNIC Controller global configuration registers */
+
+struct xrnic_conf {
+ __u32 xrnic_en:1;
+ __u32 ip_version:1; //IPv6 or IPv4
+ __u32 depkt_bypass_en:1;
+ __u32 reserved:5;
+ __u32 num_qps_enabled:8;
+ __u32 udp_src_port:16;
+} __packed;
+
+struct tx_hdr_buf_sz {
+ __u32 num_hdrs:16;
+ __u32 buffer_sz:16; //in bytes
+} __packed;
+
+struct tx_sgl_buf_sz {
+ __u32 num_sgls:16;
+ __u32 buffer_sz:16; //in bytes
+} __packed;
+
+struct bypass_buf_sz {
+ __u32 num_bufs:16;
+ __u32 buffer_sz:16;
+} __packed;
+
+struct err_pkt_buf_sz {
+ __u32 num_bufs:16;
+ __u32 buffer_sz:16;
+} __packed;
+
+struct timeout_conf {
+ __u32 timeout:5;
+ __u32 reserved:3;
+ __u32 retry_cnt:3;
+ __u32 retry_cnt_rnr:3;
+ __u32 reserved1:2;
+ __u32 rnr_nak_tval:5;
+ __u32 reserved2:11;
+
+} __packed;
+
+struct out_errsts_q_sz {
+ __u32 num_entries:16;
+ __u32 reserved:16;
+} __packed;
+
+struct in_errsts_q_sz {
+ __u32 num_entries:16;
+ __u32 reserved:16;
+} __packed;
+
+struct inc_sr_pkt_cnt {
+ __u32 inc_send_cnt:16;
+ __u32 inc_rresp_cnt:16;
+} __packed;
+
+struct inc_am_pkt_cnt {
+ __u32 inc_acknack_cnt:16;
+ __u32 inc_mad_cnt:16;
+} __packed;
+
+struct out_io_pkt_cnt {
+ __u32 inc_send_cnt:16;
+ __u32 inc_rw_cnt:16;
+} __packed;
+
+struct out_am_pkt_cnt {
+ __u32 inc_acknack_cnt:16;
+ __u32 inc_mad_cnt:16;
+} __packed;
+
+struct last_in_pkt {
+ __u32 opcode:8;
+ __u32 qpid:8;
+ __u32 psn_lsb:16;
+} __packed;
+
+struct last_out_pkt {
+ __u32 opcode:8;
+ __u32 qpid:8;
+ __u32 psn_lsb:16;
+} __packed;
+
+/*Interrupt register definition.*/
+struct intr_en {
+ __u32 pkt_valdn_err_intr_en:1;
+ __u32 mad_pkt_rcvd_intr_en:1;
+ __u32 bypass_pkt_rcvd_intr_en:1;
+ __u32 rnr_nack_gen_intr_en:1;
+ __u32 wqe_completed_i:1;
+ __u32 ill_opc_in_sq_intr_en:1;
+ __u32 qp_pkt_rcvd_intr_en:1;
+ __u32 fatal_err_intr_en:1;
+ __u32 reverved:24;
+} __packed;
+
+struct data_buf_sz {
+ __u16 num_bufs;
+ __u16 buffer_sz;
+};
+
+struct resp_err_buf_sz {
+ __u16 num_bufs;
+ __u16 buffer_sz;
+};
+
+/*Global register configuration*/
+struct xrnic_ctrl_config {
+ struct xrnic_conf xrnic_conf;
+ __u32 xrnic_adv_conf;
+ __u32 reserved1[2];
+ __u32 mac_xrnic_src_addr_lsb;
+ __u32 mac_xrnic_src_addr_msb;
+ __u32 reserved2[2];
+ __u32 ip_xrnic_addr1; //0x0020
+ __u32 ip_xrnic_addr2; //0x0024
+ __u32 ip_xrnic_addr3; //0x0028
+ __u32 ip_xrnic_addr4; //0x002C
+ __u32 tx_hdr_buf_ba; //0x0030
+ __u32 reserved_0x34; //0x0034
+ struct tx_hdr_buf_sz tx_hdr_buf_sz; //0x0038
+ __u32 reserved_0x3c;
+
+ __u32 tx_sgl_buf_ba; //0x0040
+ __u32 reserved_0x44; //0x0044
+ struct tx_sgl_buf_sz tx_sgl_buf_sz; //0x0048
+ __u32 reserved_0x4c;
+
+ __u32 bypass_buf_ba; //0x0050
+ __u32 reserved_0x54; //0x0054
+ struct bypass_buf_sz bypass_buf_sz; //0x0058
+ __u32 bypass_buf_wrptr; //0x005C
+ __u32 err_pkt_buf_ba; //0x0060
+ __u32 reserved_0x64; //0x0064
+ struct err_pkt_buf_sz err_pkt_buf_sz; //0x0068
+ __u32 err_buf_wrptr; //0x006C
+ __u32 ipv4_address; //0x0070
+ __u32 reserved_0x74;
+
+ __u32 out_errsts_q_ba; //0x0078
+ __u32 reserved_0x7c;
+ struct out_errsts_q_sz out_errsts_q_sz; //0x0080
+ __u32 out_errsts_q_wrptr; //0x0084
+
+ __u32 in_errsts_q_ba; //0x0088
+ __u32 reserved_0x8c;
+ struct in_errsts_q_sz in_errsts_q_sz; //0x0090
+ __u32 in_errsts_q_wrptr; //0x0094
+
+ __u32 reserved_0x98; //0x0098
+ __u32 reserved_0x9c; //0x009C
+
+ __u32 data_buf_ba; //0x00A0
+ __u32 reserved_0xa4; //0x00A4
+ struct data_buf_sz data_buf_sz; //0x00A8
+
+ __u32 cnct_io_conf; //0x00AC
+
+ __u32 resp_err_pkt_buf_ba; //0x00B0
+ __u32 reserved_0xb4; //0x00B4
+ struct resp_err_buf_sz resp_err_buf_sz; //0x00B8
+
+ __u32 reserved3[17]; //0x0095
+
+ struct inc_sr_pkt_cnt inc_sr_pkt_cnt;//0x0100
+ struct inc_am_pkt_cnt inc_am_pkt_cnt;//0x0104
+ struct out_io_pkt_cnt out_io_pkt_cnt;//0x108
+ struct out_am_pkt_cnt out_am_pkt_cnt;//0x010c
+ struct last_in_pkt last_in_pkt; //0x0110
+ struct last_out_pkt last_out_pkt; //0x0114
+
+ __u32 inv_dup_pkt_cnt; //0x0118 incoming invalid duplicate
+
+ __u32 rnr_in_pkt_sts; //0x011C
+ __u32 rnr_out_pkt_sts; //0x0120
+
+ __u32 wqe_proc_sts; //0x0124
+
+ __u32 pkt_hdr_vld_sts; //0x0128
+ __u32 qp_mgr_sts; //0x012C
+
+ __u32 incoming_all_drop_count; //0x130
+ __u32 incoming_nack_pkt_count; //0x134
+ __u32 outgoing_nack_pkt_count; //0x138
+ __u32 resp_handler_status; //0x13C
+
+ __u32 reserved4[16];
+
+ struct intr_en intr_en; //0x0180
+ __u32 intr_sts; //0x0184
+ __u32 reserved5[2];
+ __u32 rq_intr_sts_1; //0x0190
+ __u32 rq_intr_sts_2; //0x0194
+ __u32 rq_intr_sts_3; //0x0198
+ __u32 rq_intr_sts_4; //0x019C
+ __u32 rq_intr_sts_5; //0x01A0
+ __u32 rq_intr_sts_6; //0x01A4
+ __u32 rq_intr_sts_7; //0x01A8
+ __u32 rq_intr_sts_8; //0x01AC
+
+ __u32 cq_intr_sts_1; //0x01B0
+ __u32 cq_intr_sts_2; //0x01B4
+ __u32 cq_intr_sts_3; //0x01B8
+ __u32 cq_intr_sts_4; //0x01BC
+ __u32 cq_intr_sts_5; //0x01B0
+ __u32 cq_intr_sts_6; //0x01B4
+ __u32 cq_intr_sts_7; //0x01B8
+ __u32 cq_intr_sts_8; //0x01BC
+
+ __u32 reserved6[12];
+};
+
+struct qp_conf {
+ __u32 qp_enable:1;
+ __u32 ack_coalsc_en:1;
+ __u32 rq_intr_en:1;
+ __u32 cq_intr_en:1;
+ __u32 hw_hndshk_dis:1;
+ __u32 cqe_write_en:1;
+ __u32 qp_under_recovery:1;
+ __u32 ip_version:1;
+ __u32 pmtu :3;
+ __u32 reserved2:5;
+ __u32 rq_buf_sz:16; //RQ buffer size (in multiples of 256B)
+} __packed;
+
+struct qp_adv_conf {
+ __u32 traffic_class:6;
+ __u32 reserved1 :2;
+ __u32 time_to_live:8;
+ __u32 partition_key:16;
+} __packed;
+
+struct time_out {
+ __u32 timeout:5;
+ __u32 reserved1:3;
+ __u32 retry_cnt:3;
+ __u32 reserved2:5;
+ __u32 rnr_nak_tval:5;
+ __u32 reserved3:3;
+ __u32 curr_retry_cnt:3;
+ __u32 reserved4:2;
+ __u32 curr_rnr_nack_cnt:3;
+ __u32 reserved:1;
+} __packed;
+
+struct qp_status {
+ __u32 qp_fatal:1;
+ __u32 rq_ovfl:1;
+ __u32 sq_full:1;
+ __u32 osq_full:1;
+ __u32 cq_full:1;
+ __u32 reserved1:4;
+ __u32 sq_empty:1;
+ __u32 osq_empty:1;
+ __u32 qp_retried:1;
+ __u32 reserved2:4;
+ __u32 nak_syndr_rcvd:7;
+ __u32 reserved3:1;
+ __u32 curr_retry_cnt:3;
+ __u32 reserved4:1;
+ __u32 curr_rnr_nack_cnt:3;
+ __u32 reserved5:1;
+} __packed;
+
+//This structure is applicable to the rdma queue pair other than QP1.
+struct rq_buf_ba_ca {
+ __u32 reserved:8; //0x308
+ __u32 rq_buf_ba:24;
+} __packed;
+
+struct sq_ba {
+ __u32 reserved1:5; //0x310
+ __u32 sq_ba:27;
+} __packed;
+
+struct cq_ba {
+ __u32 reserved2:5; //0x318
+ __u32 cq_ba:27;
+} __packed;
+
+struct cq_head {
+ __u32 cq_head:16; //0x330
+ __u32 reserved5:16;
+} __packed;
+
+struct rq_ci_db {
+ __u32 rq_ci_db:16; //0x334
+ __u32 reserved6:16;
+} __packed;
+
+struct sq_pi_db {
+ __u32 sq_pi_db:16; //0x338
+ __u32 reserved7:16;
+} __packed;
+
+struct q_depth {
+ __u32 sq_depth:16; //0x33c
+ __u32 cq_depth:16;
+} __packed;
+
+struct sq_psn {
+ __u32 sq_psn:24; //0x340
+ __u32 reserved8:8;
+} __packed;
+
+struct last_rq_req {
+ __u32 rq_psn:24; //0x344
+ __u32 rq_opcode:8;
+} __packed;
+
+struct dest_qp_conf {
+ __u32 dest_qpid:24; //0x348
+ __u32 reserved9:8;
+} __packed;
+
+struct stat_ssn {
+ __u32 exp_ssn:24; //0x380
+ __u32 reserved10:8;
+} __packed;
+
+struct stat_msn {
+ __u32 curr_msn:24; //0x384
+ __u32 reserved11:8;
+
+} __packed;
+
+struct stat_curr_sqptr_pro {
+ __u32 curr_sqptr_proc:16;
+ __u32 reserved12:16;
+} __packed;
+
+struct stat_resp_psn {
+ __u32 exp_resp_psn:24;
+ __u32 reserved:8;
+} __packed;
+
+struct stat_rq_buf_ca {
+ __u32 reserved:8;
+ __u32 rq_buf_ca:24;
+} __packed;
+
+/*QP1 is special attribue for all the management packets as per ROCEv2 spec */
+struct rdma_qp1_attr {
+ struct qp_conf qp_conf; //0x200
+ struct qp_adv_conf qp_adv_conf; //0x204
+ struct rq_buf_ba_ca rq_buf_ba_ca; //0x208
+ __u32 reserved_0x20c; //0x20c
+ struct sq_ba sq_ba; //0x210
+ __u32 reserved_0x214; //0x214
+ struct cq_ba cq_ba; //0x218
+ __u32 reserved_0x21c; //0x2c0
+ __u32 rq_wrptr_db_add; //0x220
+ __u32 reserved_0x224; //0x224
+ __u32 sq_cmpl_db_add; //0x228
+ __u32 reserved_0x22c; //0x22c
+ struct cq_head cq_head; //0x230
+ struct rq_ci_db rq_ci_db; //0x234
+ struct sq_pi_db sq_pi_db; //0x238
+ struct q_depth q_depth; //0x23c
+ __u32 reserved1[2]; //0x240
+ struct dest_qp_conf dest_qp_conf; //0x248
+ struct timeout_conf timeout_conf; //0x24C
+ __u32 mac_dest_addr_lsb; //0x250
+ __u32 mac_dest_addr_msb; //0x254
+ __u32 reserved2[2];
+ __u32 ip_dest_addr1; //0x260
+ __u32 ip_dest_addr2; //0x264
+ __u32 ip_dest_addr3; //0x268
+ __u32 ip_dest_addr4; //0x26C
+ __u32 reserved3[6]; //0x270-287(inclusive)
+ struct qp_status qp_status; //0x288
+ __u32 reserved4[2]; //0x240-287(inclusive)
+ struct stat_rq_buf_ca stat_rq_buf_ca;//0x294
+ __u32 reserved5[26]; //0x298-2Ff(inclusive)
+};
+
+/* General RDMA QP attribute*/
+struct rdma_qp_attr {
+ struct qp_conf qp_conf; //0x300
+ struct qp_adv_conf qp_adv_conf; //0x304
+ struct rq_buf_ba_ca rq_buf_ba_ca;//0x308
+ __u32 reserved_0x30c; //0x30c
+ struct sq_ba sq_ba; //0x310
+ __u32 reserved_0x314; //0x214
+ struct cq_ba cq_ba; //0x318
+ __u32 reserved_0x31c; //0x31c
+ __u32 rq_wrptr_db_add; //0x320
+ __u32 reserved_0x324; //0x324
+ __u32 sq_cmpl_db_add; //0x328
+ __u32 reserved_0x32c; //0x22c
+ struct cq_head cq_head; //0x330
+ struct rq_ci_db rq_ci_db;//0x334
+ struct sq_pi_db sq_pi_db; //0x338
+ struct q_depth q_depth;//0x33c
+ struct sq_psn sq_psn; //0x340
+ struct last_rq_req last_rq_req;//0x344
+ struct dest_qp_conf dest_qp_conf; //0x348
+ struct timeout_conf timeout_conf; //0x34C
+ __u32 mac_dest_addr_lsb; //0x350
+ __u32 mac_dest_addr_msb; //0x354
+ __u32 reserved1[2]; //0x358
+ __u32 ip_dest_addr1; //0x360
+ __u32 ip_dest_addr2; //0x364
+ __u32 ip_dest_addr3; //0x368
+ __u32 ip_dest_addr4; //0x36C
+ __u32 reserved2[4];
+ struct stat_ssn stat_ssn;//0x380
+ struct stat_msn stat_msn;//0x384
+ struct qp_status qp_status; //0x388
+ struct stat_curr_sqptr_pro stat_curr_sqptr_pro;//0x38C
+ struct stat_resp_psn stat_resp_psn; //0x0390
+ struct stat_rq_buf_ca stat_rq_buf_ca;//0x0394
+ __u32 stat_wqe; //0x398
+ __u32 stat_rq_pi_db; //0x39C
+#ifdef ERNIC_MEM_REGISTER
+ __u32 reserved3[4];
+ __u32 pd;
+ __u32 reserved[19];
+#else
+ __u32 reserved3[24];
+#endif
+};
+
+union ctx { // 2 Byte
+ __u16 context;
+ __u16 wr_id;
+} __packed;
+
+//Work request 64Byte size
+struct wr {
+ union ctx ctx; // 2 Byte
+ __u8 reserved1[2];
+ __u32 local_offset[2];
+ __u32 length;
+ __u8 opcode;
+ __u8 reserved2[3];
+ __u32 remote_offset[2];
+ __u32 remote_tag;
+ __u32 completion_info[4];
+ __u8 reserved4[16];
+} __packed;
+
+union ctxe {
+ __u16 context :16;
+ __u16 wr_id:16;
+} __packed;
+
+//Completion Queue Entry 16 Byte
+struct cqe {
+ union ctxe ctxe; // 2 Byte
+ __u8 opcode;
+ __u8 err_flag;
+} __packed;
+
+struct xrnic_reg_map {
+ struct xrnic_ctrl_config xrnic_ctrl_config;
+ struct rdma_qp1_attr rdma_qp1_attr;
+ struct rdma_qp_attr rdma_qp_attr[255];
+
+};
+
+struct xrnic_memory_map {
+ struct xrnic_reg_map *xrnic_regs;
+ u64 xrnic_regs_phys;
+ void *send_sgl;
+ u64 send_sgl_phys;
+ void *cq_ba;
+ u64 cq_ba_phys;
+ void *rq_buf_ba_ca;
+ u64 rq_buf_ba_ca_phys;
+ struct wr *sq_ba;
+ u64 sq_ba_phys;
+ void *tx_hdr_buf_ba;
+ u64 tx_hdr_buf_ba_phys;
+ void *tx_sgl_buf_ba;
+ u64 tx_sgl_buf_ba_phys;
+ void *bypass_buf_ba;
+ u64 bypass_buf_ba_phys;
+ void *err_pkt_buf_ba;
+ u64 err_pkt_buf_ba_phys;
+ void *out_errsts_q_ba;
+ u64 out_errsts_q_ba_phys;
+ void *in_errsts_q_ba;
+ u64 in_errsts_q_ba_phys;
+ void *rq_wrptr_db_add;
+ u64 rq_wrptr_db_add_phys;
+ void *sq_cmpl_db_add;
+ u64 sq_cmpl_db_add_phys;
+ void *stat_rq_buf_ca;
+ u64 stat_rq_buf_ca_phys;
+ void *data_buf_ba;
+ u64 data_buf_ba_phys;
+ u64 resp_err_pkt_buf_ba_phys;
+ void *resp_err_pkt_buf_ba;
+ u32 intr_en;
+ u32 cq_intr[8];
+ u32 rq_intr[8];
+ u64 xrnicif_phys;
+};
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _XRNIC_HW_DEF_H*/
diff --git a/drivers/staging/xlnx_ernic/xif.h b/drivers/staging/xlnx_ernic/xif.h
new file mode 100644
index 000000000000..fb5f02d8c08c
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xif.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef _XRNIC_IF_H
+#define _XRNIC_IF_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/types.h>
+#include <linux/udp.h>
+
+#define XRNIC_MAX_CHILD_CM_ID 255
+#define XRNIC_CM_PRVATE_DATA_LENGTH 32
+
+enum xrnic_wc_event {
+ XRNIC_WC_RDMA_WRITE = 0x0,
+ XRNIC_WC_SEND = 0x2,
+ XRNIC_WC_RDMA_READ = 0x4,
+};
+
+union xrnic_ctxe { // 2 Byte
+ __u16 context :16;
+ __u16 wr_id:16;
+} __packed;
+
+struct xrnic_cqe {
+ union xrnic_ctxe ctxe; // 2 Byte
+ __u8 opcode; // 1 Byte
+ __u8 err_flag; // 1 Byte
+} __packed;
+
+enum xrnic_port_space {
+ XRNIC_PS_SDP = 0x0001,
+ XRNIC_PS_IPOIB = 0x0002,
+ XRNIC_PS_IB = 0x013F,
+ XRNIC_PS_TCP = 0x0106,
+ XRNIC_PS_UDP = 0x0111,
+};
+
+enum xrnic_cm_error {
+ XRNIC_INVALID_CM_ID = 2,
+ XRNIC_INVALID_CM_OUTSTANDING = 3,
+ XRNIC_INVALID_QP_ID = 4,
+ XRNIC_INVALID_QP_INIT_ATTR = 5,
+ XRNIC_INVALID_NUM_CHILD = 6,
+ XRNIC_INVALID_CHILD_ID = 7,
+ XRNIC_INVALID_CHILD_NUM = 8,
+ XRNIC_INVALID_QP_TYPE = 9,
+ XRNIC_INVALID_PORT = 10,
+ XRNIC_INVALID_ADDR = 11,
+ XRNIC_INVALID_PKT_CNT = 12,
+ XRNIC_INVALID_ADDR_TYPE = 13,
+ XRNIC_INVALID_QP_CONN_PARAM = 14,
+ XRNIC_INVALID_QP_STATUS = 15,
+};
+
+enum xrnic_qp_type {
+ XRNIC_QPT_RC,
+ XRNIC_QPT_UC,
+ XRNIC_QPT_UD,
+};
+
+enum xrnic_rdma_cm_event_type {
+ XRNIC_LISTEN = 1,
+ XRNIC_REQ_RCVD,
+ XRNIC_MRA_SENT,
+ XRNIC_REJ_SENT,
+ XRNIC_REJ_RECV,
+ XRNIC_REP_SENT,
+ XRNIC_MRA_RCVD,
+ XRNIC_ESTABLISHD,
+ XRNIC_DREQ_RCVD,
+ XRNIC_DREQ_SENT,
+ XRNIC_RTU_TIMEOUT,
+ XRNIC_TIMEWAIT,
+ XRNIC_DREP_TIMEOUT,
+ XRNIC_REP_RCVD,
+ XRNIC_CM_EVENT_ADDR_ERROR,
+ XRNIC_CM_EVENT_ADDR_RESOLVED,
+ XRNIC_CM_EVENT_ROUTE_RESOLVED,
+};
+
+struct xrnic_hw_handshake_info {
+ u32 rq_wrptr_db_add;
+ u32 sq_cmpl_db_add;
+ u32 cnct_io_conf_l_16b;
+};
+
+struct xrnic_qp_info {
+ void (*xrnic_rq_event_handler)(u32 rq_count, void *rp_context);
+ void *rq_context;
+ void (*xrnic_sq_event_handler)(u32 cq_head, void *sp_context);
+ void *sq_context;
+ void *rq_buf_ba_ca;
+ u64 rq_buf_ba_ca_phys;
+ void *sq_ba;
+ u64 sq_ba_phys;
+ void *cq_ba;
+ u64 cq_ba_phys;
+ u32 sq_depth;
+ u32 rq_depth;
+ u32 send_sge_size;
+ u32 send_pkt_size;
+ u32 recv_pkt_size;
+ u32 qp_num;
+ u32 starting_psn;
+ struct ernic_pd *pd;
+};
+
+struct xrnic_qp_init_attr {
+ void (*xrnic_rq_event_handler)(u32 rq_count, void *rp_context);
+ void *rq_context;
+ void (*xrnic_sq_event_handler)(u32 cq_head, void *sp_context);
+ void *sq_context;
+ enum xrnic_qp_type qp_type;
+ void *rq_buf_ba_ca;
+ u64 rq_buf_ba_ca_phys;
+ void *sq_ba;
+ u64 sq_ba_phys;
+ void *cq_ba;
+ u64 cq_ba_phys;
+ u32 sq_depth;
+ u32 rq_depth;
+ u32 send_sge_size;
+ u32 send_pkt_size;
+ u32 recv_pkt_size;
+};
+
+struct xrnic_rdma_route {
+ u8 src_addr[16];
+ u8 dst_addr[16];
+ u16 ip_addr_type;
+ u8 smac[ETH_ALEN];
+ u8 dmac[ETH_ALEN];
+ struct sockaddr_storage s_addr;
+ struct sockaddr_storage d_addr;
+};
+
+enum xrnic_port_qp_status {
+ XRNIC_PORT_QP_FREE,
+ XRNIC_PORT_QP_IN_USE,
+};
+
+struct xrnic_rdma_cm_event_info {
+ enum xrnic_rdma_cm_event_type cm_event;
+ int status;
+ void *private_data;
+ u32 private_data_len;
+};
+
+struct xrnic_rdma_conn_param {
+ u8 private_data[XRNIC_CM_PRVATE_DATA_LENGTH];
+ u8 private_data_len;
+ u8 responder_resources;
+ u8 initiator_depth;
+ u8 flow_control;
+ u8 retry_count;
+ u8 rnr_retry_count;
+ u32 qp_num;
+ u32 srq;
+};
+
+enum xrnic_cm_state {
+ XRNIC_CM_REQ_SENT = 0,
+ XRNIC_CM_REP_RCVD,
+ XRNIC_CM_ESTABLISHED,
+};
+
+struct xrnic_rdma_cm_id {
+ int (*xrnic_cm_handler)(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_cm_event_info *event);
+ void *cm_context;
+ u32 local_cm_id;
+ u32 remote_cm_id;
+ struct xrnic_qp_info qp_info;
+ struct xrnic_rdma_route route;
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+ enum xrnic_port_space ps;
+ enum xrnic_qp_type qp_type;
+ u16 port_num;
+ u16 child_qp_num;
+ struct xrnic_rdma_conn_param conn_param;
+ enum xrnic_port_qp_status qp_status;
+ int cm_state;
+ struct list_head list;
+};
+
+struct xrnic_rdma_cm_id_info {
+ struct xrnic_rdma_cm_id parent_cm_id;
+ struct xrnic_rdma_cm_id *child_cm_id;
+ u32 num_child;
+ struct xrnic_rdma_cm_event_info conn_event_info;
+};
+
+void xrnic_rq_event_handler (u32 rq_count, void *user_arg);
+void xrnic_sq_event_handler (u32 cq_head, void *user_arg);
+int xrnic_cm_handler (struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_cm_event_info *conn_event_info);
+
+struct xrnic_rdma_cm_id *xrnic_rdma_create_id
+ (int (*xrnic_cm_handler)(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_cm_event_info *conn_event_info), void *cm_context,
+ enum xrnic_port_space ps, enum xrnic_qp_type qp_type,
+ int num_child_qp);
+
+int xrnic_rdma_bind_addr(struct xrnic_rdma_cm_id *cm_id,
+ u8 *addr, u16 port_num, u16 ip_addr_type);
+
+int xrnic_rdma_listen(struct xrnic_rdma_cm_id *cm_id, int outstanding);
+int xrnic_rdma_create_qp(struct xrnic_rdma_cm_id *cm_id, struct ernic_pd *pd,
+ struct xrnic_qp_init_attr *init_attr);
+int xrnic_rdma_accept(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_conn_param *conn_param);
+int xrnic_post_recv(struct xrnic_qp_info *qp_info, u32 rq_count);
+int xrnic_post_send(struct xrnic_qp_info *qp_info, u32 sq_count);
+int xrnic_destroy_qp(struct xrnic_qp_info *qp_info);
+int xrnic_rdma_disconnect(struct xrnic_rdma_cm_id *cm_id);
+int xrnic_rdma_destroy_id(struct xrnic_rdma_cm_id *cm_id, int flag);
+int xrnic_hw_hs_reset_sq_cq(struct xrnic_qp_info *qp_info,
+ struct xrnic_hw_handshake_info *hw_hs_info);
+int xrnic_hw_hs_reset_rq(struct xrnic_qp_info *qp_info);
+
+int xrnic_rdma_resolve_addr(struct xrnic_rdma_cm_id *cm_id,
+ struct sockaddr *src_addr,
+ struct sockaddr *dst_addr, int timeout);
+int xrnic_rdma_connect(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_conn_param *conn_param);
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _XRNIC_IF_H*/
diff --git a/drivers/staging/xlnx_ernic/xioctl.h b/drivers/staging/xlnx_ernic/xioctl.h
new file mode 100644
index 000000000000..8c9738e69383
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xioctl.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+#ifndef _XRNIC_IOCTL_H_
+#define _XRNIC_IOCTL_H_
+
+#include <asm/ioctl.h>
+#include "xlog.h"
+
+#define XRNIC_MAGIC 'L'
+
+#define XRNIC_DISPLAY_MMAP_ALL _IOW(XRNIC_MAGIC, 1, uint)
+#define XRNIC_DISPLAY_MMAP_CONFIG _IOW(XRNIC_MAGIC, 2, uint)
+#define XRNIC_DISPLAY_MMAP_QP1 _IOW(XRNIC_MAGIC, 3, uint)
+#define XRNIC_DISPLAY_MMAP_QPX _IOW(XRNIC_MAGIC, 4, uint)
+#define XRNIC_DISPLAY_PKT _IOW(XRNIC_MAGIC, 5, uint)
+
+#define XRNIC_MAX_CMDS 5
+
+#endif /* _XRNIC_IOCTL_H_ */
diff --git a/drivers/staging/xlnx_ernic/xmain.c b/drivers/staging/xlnx_ernic/xmain.c
new file mode 100644
index 000000000000..67d525b51716
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xmain.c
@@ -0,0 +1,1592 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ * Author : Sandeep Dhanvada <sandeep.dhanvada@xilinx.com>
+ * : Anjaneyulu Reddy Mule <anjaneyulu.reddy.mule@xilinx.com>
+ * : Srija Malyala <srija.malyala@xilinx.com>
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/inet.h>
+#include <linux/time.h>
+#include <linux/cdev.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <net/addrconf.h>
+#include <linux/types.h>
+#include "xcommon.h"
+
+/* TODO: Need to remove this macro as all the experimental code is verified.
+ * All the non-experimental code should be deleted.
+ */
+#define EXPERIMENTAL_CODE
+int debug;
+struct class *xrnic_class;
+/* Need to enable this using sysfs.*/
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none, 1=all)");
+
+#define XRNIC_REG_MAP_NODE 0
+#define cpu_to_be24(x) ((x) << 16)
+
+struct xrnic_conn_param {
+ const void *private_data;
+ u8 private_data_len;
+ u8 responder_resources;
+ u8 initiator_depth;
+ u8 flow_control;
+ u8 retry_count;
+ u8 rnr_retry_count;
+ u8 srq;
+ u8 qp_num;
+};
+
+/* EXTRA Bytes for Invariant CRC */
+#define ERNIC_INV_CRC 4
+/* ERNIC Doesn't have Variant CRC for P2P */
+#define ERNIC_VAR_CRC 0
+#define EXTRA_PKT_LEN (ERNIC_INV_CRC + ERNIC_VAR_CRC)
+
+struct xrnic_dev_info *xrnic_dev;
+static dev_t xrnic_dev_number;
+
+/*
+ * To store the IP address of the controller, which is passed as a
+ * module param
+ */
+static char server_ip[16];
+/* To store the port number. This is passed as a module param */
+static unsigned short port_num;
+/* To store the mac_address. This is passed as a module param */
+static ushort mac_address[6] = {0x1, 0x0, 0x0, 0x35, 0x0a, 0x00};
+/* To store the ethernet interface name, which is passed as a module param */
+static char *ifname = "eth0";
+
+module_param(port_num, ushort, 0444);
+MODULE_PARM_DESC(port_num, "network port number");
+
+module_param_array(mac_address, ushort, NULL, 0444);
+MODULE_PARM_DESC(mac_address, "mac address");
+
+module_param_string(server_ip, server_ip, 32, 0444);
+MODULE_PARM_DESC(server_ip, "Target server ip address");
+
+module_param(ifname, charp, 0444);
+MODULE_PARM_DESC(ifname, "Target server interface name eth0..");
+
+/**
+ * xrnic_rdma_create_id() - Creates and RDMA ID
+ * @xrnic_cm_handler: communication event handler
+ * @cm_context: CM context
+ * @ps: Port space
+ * @qp_type: Queue transport type
+ * @num_child: Max QP count
+ *
+ * @return: 0 on success, other value incase of failure
+ */
+struct xrnic_rdma_cm_id *xrnic_rdma_create_id
+ (int (*xrnic_cm_handler)(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_cm_event_info *conn_event_info), void *cm_context,
+ enum xrnic_port_space ps, enum xrnic_qp_type qp_type, int num_child)
+{
+ struct xrnic_qp_attr *qp1_attr = NULL;
+ struct xrnic_rdma_cm_id *cm_id = NULL;
+ struct xrnic_qp_info *qp_info = NULL;
+ struct xrnic_rdma_cm_id_info *cm_id_info = NULL;
+
+ if (!xrnic_dev) {
+ pr_err("Received NULL pointer\n");
+ return (struct xrnic_rdma_cm_id *)NULL;
+ }
+
+ qp1_attr = &xrnic_dev->qp1_attr;
+ if (xrnic_dev->io_qp_count < num_child ||
+ num_child < 0 || qp_type != qp1_attr->qp_type) {
+ pr_err("Invalid info received\n");
+ return NULL;
+ }
+
+ cm_id_info = kzalloc(sizeof(*cm_id_info), GFP_KERNEL);
+ if (!cm_id_info)
+ return ERR_PTR(-ENOMEM);
+
+ xrnic_dev->curr_cm_id_info = cm_id_info;
+ cm_id = (struct xrnic_rdma_cm_id *)&cm_id_info->parent_cm_id;
+ cm_id->xrnic_cm_handler = xrnic_cm_handler;
+ cm_id->cm_context = cm_context;
+ cm_id->ps = ps;
+ cm_id->qp_type = qp_type;
+ cm_id->cm_id_info = cm_id_info;
+ cm_id->child_qp_num = 0;
+ cm_id->qp_status = XRNIC_PORT_QP_FREE;
+
+ qp_info = &cm_id->qp_info;
+ memset(qp_info, 0, sizeof(*qp_info));
+
+ qp_info->qp_num = qp1_attr->qp_num;
+ list_add_tail(&cm_id->list, &cm_id_list);
+
+ return cm_id;
+}
+EXPORT_SYMBOL(xrnic_rdma_create_id);
+
+/**
+ * ipv6_addr_compare() - Compares IPV6 addresses
+ * @addr1: Address 1 to compare
+ * @addr2: Address 2 to compare
+ * @size: size of the address
+ *
+ * @return: 0 on success, -1 incase of a mismatch
+ */
+static int ipv6_addr_compare(u8 *addr1, u8 *addr2, size_t size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (addr1[(size - 1) - i] != addr2[i])
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * xrnic_rdma_bind_addr() - Binds IP-V4/V6 addresses
+ * @cm_id: CM ID to with address CM info
+ * @addr: Address to bind to
+ * @port_num: Tranport port number
+ * @ip_addr_type: IP-V4/V6
+ *
+ * @return: 0 on success, error indicative value incase of failure
+ */
+int xrnic_rdma_bind_addr(struct xrnic_rdma_cm_id *cm_id,
+ u8 *addr, u16 port_num, u16 ip_addr_type)
+{
+ if (!cm_id || !xrnic_dev) {
+ pr_err("Invalid CM ID or XRNIC device info\n");
+ return -EINVAL;
+ }
+
+ if (xrnic_dev->curr_cm_id_info != cm_id->cm_id_info)
+ return -XRNIC_INVALID_CM_ID;
+
+ if (port_num < 1UL || port_num > XRNIC_MAX_PORT_SUPPORT)
+ return -XRNIC_INVALID_PORT;
+
+ if (!cm_id)
+ return -XRNIC_INVALID_CM_ID;
+
+ if (cm_id->child_qp_num)
+ return -XRNIC_INVALID_CHILD_NUM;
+
+ if (xrnic_dev->cm_id_info[port_num - 1])
+ return -XRNIC_INVALID_PORT;
+
+ if (xrnic_dev->port_status[port_num - 1] == XRNIC_PORT_QP_IN_USE)
+ return XRNIC_INVALID_CM_ID;
+
+ if (cm_id->qp_status == XRNIC_PORT_QP_IN_USE)
+ return XRNIC_INVALID_CM_ID;
+
+ if (ip_addr_type == AF_INET6) {
+ if (ipv6_addr_compare((u8 *)&xrnic_dev->ipv6_addr, addr,
+ sizeof(struct in6_addr)))
+ return -XRNIC_INVALID_ADDR;
+ memcpy((void *)&cm_id->route.src_addr, (void *)addr,
+ sizeof(struct in6_addr));
+ } else if (ip_addr_type == AF_INET) {
+ if (memcmp(&xrnic_dev->ipv4_addr, addr,
+ sizeof(struct in_addr)))
+ return -XRNIC_INVALID_ADDR;
+ memcpy((void *)&cm_id->route.src_addr, (void *)addr,
+ sizeof(struct in_addr));
+ } else {
+ return -XRNIC_INVALID_ADDR_TYPE;
+ }
+ xrnic_dev->cm_id_info[port_num - 1] = cm_id->cm_id_info;
+ cm_id->port_num = port_num;
+ cm_id->route.ip_addr_type = ip_addr_type;
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_bind_addr);
+
+/**
+ * xrnic_rdma_listen() - Initiates listen on the socket
+ * @cm_id: CM ID
+ * @backlog: back log
+ *
+ * @return: 0 on success, error indicative value incase of failure
+ */
+int xrnic_rdma_listen(struct xrnic_rdma_cm_id *cm_id, int backlog)
+{
+ if (!cm_id || !xrnic_dev) {
+ pr_err("Rx invalid pointers\n");
+ return -EINVAL;
+ }
+
+ if (xrnic_dev->curr_cm_id_info != cm_id->cm_id_info)
+ return XRNIC_INVALID_CM_ID;
+
+ if (xrnic_dev->port_status[cm_id->port_num - 1] ==
+ XRNIC_PORT_QP_IN_USE)
+ return XRNIC_INVALID_PORT;
+
+ if (cm_id->qp_status == XRNIC_PORT_QP_IN_USE)
+ return XRNIC_INVALID_QP_ID;
+
+ xrnic_dev->port_status[cm_id->port_num - 1] = XRNIC_PORT_QP_IN_USE;
+ xrnic_dev->curr_cm_id_info = NULL;
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_listen);
+
+/**
+ * xrnic_hw_hs_reset_sq_cq() - Enables HW Handshake for a given QP
+ * @qp_info: QP which should be enabled for HW Handshake
+ * @hw_hs_info: HW Handshake info with which QP config needs to be updated
+ *
+ * @return: XRNIC_SUCCESS on success, error indicative value incase of failure
+ */
+int xrnic_hw_hs_reset_sq_cq(struct xrnic_qp_info *qp_info,
+ struct xrnic_hw_handshake_info *hw_hs_info)
+{
+ struct xrnic_qp_attr *qp_attr;
+
+ if (!qp_info) {
+ pr_err("Rx invalid qp info\n");
+ return -EINVAL;
+ }
+
+ if (!xrnic_dev) {
+ pr_err("Invalid ERNIC info\n");
+ return -EINVAL;
+ }
+
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ qp_attr = &xrnic_dev->qp_attr[qp_info->qp_num - 2];
+ if (qp_attr->remote_cm_id)
+ xrnic_reset_io_qp_sq_cq_ptr(qp_attr, hw_hs_info);
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_hw_hs_reset_sq_cq);
+
+/**
+ * xrnic_hw_hs_reset_rq() - Updates HW handshake for RQ
+ * @qp_info: QP which should be enabled for HW Handshake
+ *
+ * @return: XRNIC_SUCCESS on success, error indicative value incase of failure
+ */
+int xrnic_hw_hs_reset_rq(struct xrnic_qp_info *qp_info)
+{
+ struct xrnic_qp_attr *qp_attr;
+
+ if (!qp_info) {
+ pr_err("Rx invalid qp info\n");
+ return -EINVAL;
+ }
+
+ if (!xrnic_dev) {
+ pr_err("Invalid ERNIC info\n");
+ return -EINVAL;
+ }
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ qp_attr = &xrnic_dev->qp_attr[qp_info->qp_num - 2];
+ if (qp_attr->remote_cm_id)
+ xrnic_reset_io_qp_rq_ptr(qp_attr);
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_hw_hs_reset_rq);
+
+/**
+ * set_ipv4_ipaddress() - Configures XRNIC IP address
+ * @return: 0 on success, error indicative value incase of failure
+ */
+static int set_ipv4_ipaddress(void)
+{
+ struct xrnic_ctrl_config *xrnic_ctrl_config =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ u32 config_value = 0;
+ u32 ipv4_addr = 0;
+ struct net_device *dev = __dev_get_by_name(&init_net, ifname);
+ struct in_device *inet_dev;
+
+ inet_dev = (struct in_device *)dev->ip_ptr;
+
+ if (!dev) {
+ pr_err("CMAC interface not configured\n");
+ return XRNIC_FAILED;
+ }
+
+ if (((struct in_device *)dev->ip_ptr)->ifa_list) {
+ ipv4_addr = inet_dev->ifa_list->ifa_address;
+ if (!ipv4_addr) {
+ pr_err("cmac ip addr: ifa_address not available\n");
+ return XRNIC_FAILED;
+ }
+ snprintf(server_ip, 16, "%pI4", &ipv4_addr);
+ in4_pton(server_ip, strlen(server_ip), xrnic_dev->ipv4_addr,
+ '\0', NULL);
+ DEBUG_LOG("xcmac ip_address:%s\n", server_ip);
+ } else {
+ pr_info("xcmac ip address: not available at present\n");
+ return 0;
+ }
+
+ switch (dev->mtu) {
+ case 340:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_256;
+ break;
+ case 592:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_512;
+ break;
+ case 1500:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_1024;
+ break;
+ case 2200:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_2048;
+ break;
+ case 4200:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_4096;
+ break;
+ default:
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_4096;
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ }
+ config_value = (xrnic_dev->ipv4_addr[3] << 0) |
+ (xrnic_dev->ipv4_addr[2] << 8) |
+ (xrnic_dev->ipv4_addr[1] << 16) |
+ (xrnic_dev->ipv4_addr[0] << 24);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_config->ipv4_address)));
+ DEBUG_LOG("XRNIC IPV4 address [%x]\n", config_value);
+ return 0;
+}
+
+/**
+ * set_ipv6_ipaddress() - Configures XRNIC IPV6 address
+ * @return: 0 on success, error indicative value incase of failure
+ */
+static int set_ipv6_ipaddress(void)
+{
+ struct xrnic_ctrl_config *xrnic_ctrl_conf;
+ u32 config_value = 0;
+ struct inet6_dev *idev;
+ struct inet6_ifaddr *ifp, *tmp;
+ u8 i, ip6_set = 0;
+ struct net_device *dev = __dev_get_by_name(&init_net, ifname);
+
+ xrnic_ctrl_conf = &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ if (!dev) {
+ pr_err("CMAC interface not configured\n");
+ return XRNIC_FAILED;
+ }
+
+ idev = __in6_dev_get(dev);
+ if (!idev) {
+ pr_err("ipv6 inet device not found\n");
+ return 0;
+ }
+
+ list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
+ DEBUG_LOG("IP=%pI6, MAC=%pM\n", &ifp->addr, dev->dev_addr);
+ for (i = 0; i < 16; i++) {
+ DEBUG_LOG("IP=%x\n", ifp->addr.s6_addr[i]);
+ xrnic_dev->ipv6_addr[15 - i] = ifp->addr.s6_addr[i];
+ }
+ ip6_set = 1;
+ }
+ if (ip6_set == 0) {
+ pr_info("xcmac ipv6 address: not available at present\n");
+ return 0;
+ }
+
+ switch (dev->mtu) {
+ case 340:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_256;
+ break;
+ case 592:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_512;
+ break;
+ case 1500:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_1024;
+ break;
+ case 2200:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_2048;
+ break;
+ case 4200:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_4096;
+ break;
+ default:
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_4096;
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ }
+ config_value = (xrnic_dev->ipv6_addr[0] << 0) |
+ (xrnic_dev->ipv6_addr[1] << 8) |
+ (xrnic_dev->ipv6_addr[2] << 16) |
+ (xrnic_dev->ipv6_addr[3] << 24);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->ip_xrnic_addr1)));
+ DEBUG_LOG("XRNIC IPV6 address [%x]\n", config_value);
+
+ config_value = (xrnic_dev->ipv6_addr[4] << 0) |
+ (xrnic_dev->ipv6_addr[5] << 8) |
+ (xrnic_dev->ipv6_addr[6] << 16) |
+ (xrnic_dev->ipv6_addr[7] << 24);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->ip_xrnic_addr2)));
+ DEBUG_LOG("XRNIC IPV6 address [%x]\n", config_value);
+
+ config_value = (xrnic_dev->ipv6_addr[8] << 0) |
+ (xrnic_dev->ipv6_addr[9] << 8) |
+ (xrnic_dev->ipv6_addr[10] << 16) |
+ (xrnic_dev->ipv6_addr[11] << 24);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->ip_xrnic_addr3)));
+ DEBUG_LOG("XRNIC IPV6 address [%x]\n", config_value);
+
+ config_value = (xrnic_dev->ipv6_addr[12] << 0) |
+ (xrnic_dev->ipv6_addr[13] << 8) |
+ (xrnic_dev->ipv6_addr[14] << 16) |
+ (xrnic_dev->ipv6_addr[15] << 24);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->ip_xrnic_addr4)));
+ DEBUG_LOG("XRNIC IPV6 address [%x]\n", config_value);
+ return 0;
+}
+
+/**
+ * cmac_inet6addr_event() - Handles IPV6 events
+ * @notifier: notifier info
+ * @event: Rx event
+ * @data: Event specific data
+ *
+ * @return: 0 on success, error indicative value incase of failure
+ */
+static int cmac_inet6addr_event(struct notifier_block *notifier,
+ unsigned long event, void *data)
+{
+ switch (event) {
+ case NETDEV_DOWN:
+ pr_info("Driver link down\r\n");
+ break;
+ case NETDEV_UP:
+ pr_info("Driver link up ipv6\r\n");
+ if (set_ipv6_ipaddress() == XRNIC_FAILED)
+ return XRNIC_FAILED;
+ break;
+ case NETDEV_CHANGEADDR:
+ pr_info("Driver link change address ipv6\r\n");
+ if (set_ipv6_ipaddress() == XRNIC_FAILED)
+ return XRNIC_FAILED;
+ break;
+ }
+ return 0;
+}
+
+/**
+ * cmac_inetaddr_event() - Handles IPV4 events
+ * @notifier: notifier info
+ * @event: Rx event
+ * @data: Event specific data
+ * @return: 0 on success, error indicative value incase of failure
+ */
+static int cmac_inetaddr_event(struct notifier_block *notifier,
+ unsigned long event, void *data)
+{
+ struct in_ifaddr *ifa = data;
+ struct net_device *event_netdev = ifa->ifa_dev->dev;
+ struct net_device *dev = __dev_get_by_name(&init_net, ifname);
+
+ if (!dev) {
+ pr_err("CMAC interface not configured\n");
+ return XRNIC_FAILED;
+ }
+
+ if (event_netdev != dev)
+ return 0;
+ pr_info("Xrnic: event = %ld\n", event);
+ switch (event) {
+ case NETDEV_DOWN:
+ pr_info("Xrnic: link down\n");
+ break;
+ case NETDEV_UP:
+ pr_info("Xrnic: link up\n");
+ if (set_ipv4_ipaddress() == XRNIC_FAILED)
+ return XRNIC_FAILED;
+ break;
+ case NETDEV_CHANGEADDR:
+ pr_info("Xrnic: ip address change detected\n");
+ if (set_ipv4_ipaddress() == XRNIC_FAILED)
+ return XRNIC_FAILED;
+ break;
+ }
+ return 0;
+}
+
+struct notifier_block cmac_inetaddr_notifier = {
+ .notifier_call = cmac_inetaddr_event
+};
+
+struct notifier_block cmac_inet6addr_notifier = {
+ .notifier_call = cmac_inet6addr_event
+};
+
+static const struct file_operations xrnic_fops = {
+ /*TODO: Implement read/write/ioctl operations. */
+ .owner = THIS_MODULE, /* Owner */
+};
+
+/**
+ * xrnic_irq_handler() - XRNIC interrupt handler
+ * @irq: Irq number
+ * @data: Pointer to XRNIC device info structure
+ *
+ * @return: IRQ_HANDLED incase of success or other value in case of failure
+ */
+static irqreturn_t xrnic_irq_handler(int irq, void *data)
+{
+ struct xrnic_dev_info *xrnic_dev = (struct xrnic_dev_info *)data;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct xrnic_ctrl_config *xrnic_ctrl_config =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ u32 config_value = 0;
+ unsigned long flag;
+
+ spin_lock_irqsave(&qp1_attr->qp_lock, flag);
+ config_value = ioread32((void *)&xrnic_ctrl_config->intr_sts);
+
+ /* We are checking masked interrupt.*/
+ config_value = config_value & xrnic_dev->xrnic_mmap.intr_en;
+ if (!config_value)
+ pr_err("Rx disabled or masked interrupt\n");
+
+ if (config_value & PKT_VALID_ERR_INTR_EN) {
+ pr_info("Packet validation fail interrupt rx\n");
+ iowrite32(PKT_VALID_ERR_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ }
+
+ if (config_value & MAD_PKT_RCVD_INTR_EN) {
+ DEBUG_LOG("MAD Packet rx interrupt\n");
+ /* Clear the interrupt */
+ iowrite32(MAD_PKT_RCVD_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ /* process the MAD pkt */
+ tasklet_schedule(&xrnic_dev->mad_pkt_recv_task);
+ }
+
+ if (config_value & BYPASS_PKT_RCVD_INTR_EN) {
+ DEBUG_LOG("Bypass packet Rx interrupt\n");
+ iowrite32(BYPASS_PKT_RCVD_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ }
+
+ if (config_value & RNR_NACK_GEN_INTR_EN) {
+ DEBUG_LOG("Rx RNR Nack interrupt\n");
+ iowrite32(RNR_NACK_GEN_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ }
+
+ if (config_value & WQE_COMPLETED_INTR_EN) {
+ DEBUG_LOG("Rx WQE completion interrupt\n");
+ xrnic_dev->xrnic_mmap.intr_en = xrnic_dev->xrnic_mmap.intr_en &
+ (~WQE_COMPLETED_INTR_EN);
+ iowrite32(xrnic_dev->xrnic_mmap.intr_en,
+ ((void *)(&xrnic_ctrl_config->intr_en)));
+ tasklet_schedule(&xrnic_dev->wqe_completed_task);
+ }
+
+ if (config_value & ILL_OPC_SENDQ_INTR_EN) {
+ DEBUG_LOG("Rx illegal opcode interrupt\n");
+ iowrite32(ILL_OPC_SENDQ_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ }
+
+ if (config_value & QP_PKT_RCVD_INTR_EN) {
+ DEBUG_LOG("Rx data packet interrupt\n");
+ xrnic_dev->xrnic_mmap.intr_en = xrnic_dev->xrnic_mmap.intr_en &
+ (~QP_PKT_RCVD_INTR_EN);
+ iowrite32(xrnic_dev->xrnic_mmap.intr_en,
+ ((void *)(&xrnic_ctrl_config->intr_en)));
+ tasklet_schedule(&xrnic_dev->qp_pkt_recv_task);
+ }
+
+ if (config_value & FATAL_ERR_INTR_EN) {
+ pr_info("Rx Fatal error interrupt\n");
+
+ iowrite32(FATAL_ERR_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ /* 0 is some random value*/
+ xrnic_qp_fatal_handler(0);
+ }
+
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+ return IRQ_HANDLED;
+}
+
+/**
+ * xrnic_ctrl_hw_configuration() - Xrnic control configuration initizations
+ * @return: 0 on success, other value incase of failure
+ */
+static int xrnic_ctrl_hw_configuration(void)
+{
+ struct xrnic_memory_map *xrnic_mmap = &xrnic_dev->xrnic_mmap;
+ struct xrnic_ctrl_config *xrnic_ctrl_conf;
+ u32 config_value = 0;
+ struct net_device *dev = NULL;
+
+ xrnic_ctrl_conf = &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+
+ if (!xrnic_dev || !xrnic_dev->xrnic_mmap.xrnic_regs ||
+ !xrnic_ctrl_conf) {
+ pr_err("Invalid device pointers\n");
+ return -EINVAL;
+ }
+
+ xrnic_mmap = &xrnic_dev->xrnic_mmap;
+
+ dev = __dev_get_by_name(&init_net, ifname);
+ if (!dev) {
+ pr_err("Ethernet mac address not configured\n");
+ return XRNIC_FAILED;
+ }
+ /* Set the MAC address */
+ config_value = dev->dev_addr[5] | (dev->dev_addr[4] << 8) |
+ (dev->dev_addr[3] << 16) | (dev->dev_addr[2] << 24);
+ DEBUG_LOG("Source MAC address LSB [%x]\n", config_value);
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->mac_xrnic_src_addr_lsb)));
+
+ DEBUG_LOG("Source MAC address LSB [%x]\n", config_value);
+ config_value = dev->dev_addr[1] | (dev->dev_addr[0] << 8);
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->mac_xrnic_src_addr_msb)));
+ DEBUG_LOG("Source MAC address MSB [%x]\n", config_value);
+
+ if (set_ipv4_ipaddress() == XRNIC_FAILED) {
+ pr_err("ETH0 AF_INET address: ifa_list not available.\n");
+ return XRNIC_FAILED;
+ }
+
+ if (set_ipv6_ipaddress() == XRNIC_FAILED) {
+ pr_err("ETH0 AF_INET6 address: ifa_list not available.\n");
+ return XRNIC_FAILED;
+ }
+
+ /* At present 128 TX headers and each size 128 bytes */
+ config_value = xrnic_mmap->tx_hdr_buf_ba_phys;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->tx_hdr_buf_ba)));
+ DEBUG_LOG("Tx header buf base address [0x%x]\n", config_value);
+
+ config_value = XRNIC_NUM_OF_TX_HDR | (XRNIC_SIZE_OF_TX_HDR << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->tx_hdr_buf_sz)));
+ DEBUG_LOG("Tx header buf size [0x%x]\n", config_value);
+
+ /* At present 256 TX SGL and each size 16 bytes */
+ config_value = xrnic_mmap->tx_sgl_buf_ba_phys & 0xffffffff;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->tx_sgl_buf_ba)));
+ DEBUG_LOG("Tx SGL buf base address [0x%x]\n", config_value);
+
+ config_value = XRNIC_NUM_OF_TX_SGL | (XRNIC_SIZE_OF_TX_SGL << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->tx_sgl_buf_sz)));
+ DEBUG_LOG("Tx SGL buf size [0x%x]\n", config_value);
+
+ /* At present 32 Bypass buffers and each size 512 bytes */
+ config_value = xrnic_mmap->bypass_buf_ba_phys;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->bypass_buf_ba)));
+ DEBUG_LOG("Bypass buf base address [0x%x]\n", config_value);
+
+ config_value = XRNIC_NUM_OF_BYPASS_BUF |
+ (XRNIC_SIZE_OF_BYPASS_BUF << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->bypass_buf_sz)));
+ DEBUG_LOG("Bypass buf size [0x%x]\n", config_value);
+
+ config_value = XRNIC_BYPASS_BUF_WRPTR;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->bypass_buf_wrptr)));
+ DEBUG_LOG("Bypass buffer write pointer [0x%x]\n", config_value);
+
+ config_value = xrnic_mmap->err_pkt_buf_ba_phys;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->err_pkt_buf_ba)));
+ DEBUG_LOG("Error packet buf base address [0x%x]\n", config_value);
+
+ config_value = XRNIC_NUM_OF_ERROR_BUF |
+ (XRNIC_SIZE_OF_ERROR_BUF << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->err_pkt_buf_sz)));
+ DEBUG_LOG("Error packet buf size [0x%x]\n", config_value);
+
+ config_value = XRNIC_ERROR_BUF_WRPTR;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->err_buf_wrptr)));
+ DEBUG_LOG("Error pakcet buf write pointer [0x%x]\n", config_value);
+
+ config_value = xrnic_mmap->out_errsts_q_ba_phys;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->out_errsts_q_ba)));
+ DEBUG_LOG("Outgoing error status queue base address [0x%x]\n",
+ config_value);
+
+ config_value = XRNIC_OUT_ERRST_Q_NUM_ENTRIES;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->out_errsts_q_sz)));
+ DEBUG_LOG("Outgoing error status queue size [0x%x]\n", config_value);
+
+ config_value = xrnic_mmap->in_errsts_q_ba_phys;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->in_errsts_q_ba)));
+ DEBUG_LOG("Incoming error status queue base address [0x%x]\n",
+ config_value);
+
+ config_value = XRNIC_IN_ERRST_Q_NUM_ENTRIES;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->in_errsts_q_sz)));
+ DEBUG_LOG("Incoming error status queue size [0x%x]\n", config_value);
+
+ config_value = xrnic_mmap->data_buf_ba_phys;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->data_buf_ba)));
+ DEBUG_LOG("RDMA Outgoing data buf base addr [0x%x]\n", config_value);
+
+ config_value = XRNIC_NUM_OF_DATA_BUF | (XRNIC_SIZE_OF_DATA_BUF << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->data_buf_sz)));
+ DEBUG_LOG("RDMA Outgoing data buf size [0x%x]\n", config_value);
+
+ config_value = xrnic_mmap->resp_err_pkt_buf_ba_phys;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->resp_err_pkt_buf_ba)));
+ DEBUG_LOG("Response error packet buf base address [0x%x]\n",
+ config_value);
+
+ config_value = XRNIC_NUM_OF_RESP_ERR_BUF |
+ (XRNIC_SIZE_OF_RESP_ERR_BUF << 16);
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->resp_err_buf_sz)));
+ DEBUG_LOG("Response error packet buf size [0x%x]\n", config_value);
+
+ /* Enable the RNIC configuration*/
+ config_value = (XRNIC_CONFIG_XRNIC_EN |
+ XRNIC_CONFIG_ERR_BUF_EN |
+ XRNIC_CONFIG_NUM_QPS_ENABLED |
+ XRNIC_CONFIG_FLOW_CONTROL_EN |
+ XRNIC_CONFIG_UDP_SRC_PORT);
+
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->xrnic_conf)));
+ return XRNIC_SUCCESS;
+}
+
+/**
+ * xrnic_ctrl_hw_init() - Xrnic control configuration initizations
+ * @return: 0 on success, other value incase of failure
+ */
+static int xrnic_ctrl_hw_init(void)
+{
+ struct xrnic_ctrl_config *xrnic_ctrl_config =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ u32 config_value = 0;
+ int ret = 0, i;
+
+ /* Invoking rnic global initialization configuration */
+ ret = xrnic_ctrl_hw_configuration();
+ if (ret) {
+ pr_err("xrnic hw config failed with ret code [%d]\n", ret);
+ return ret;
+ }
+
+ /* Invoking RDMA QP1 configuration */
+ ret = xrnic_qp1_hw_configuration();
+ if (ret) {
+ pr_err("xrnic qp1 config failed with ret code [%d]\n", ret);
+ return ret;
+ }
+
+ /* Invoking RDMA other data path QP configuration, as we are not
+ * resgistring any data path interrupt handler so no ret.
+ */
+ for (i = 0; i < XRNIC_MAX_QP_SUPPORT; i++)
+ xrnic_qp_hw_configuration(i);
+
+ /* Enabling xrnic interrupts. */
+ config_value = MAD_PKT_RCVD_INTR_EN |
+ RNR_NACK_GEN_INTR_EN |
+ WQE_COMPLETED_INTR_EN | ILL_OPC_SENDQ_INTR_EN |
+ QP_PKT_RCVD_INTR_EN | FATAL_ERR_INTR_EN;
+
+ if (config_value & ~XRNIC_INTR_ENABLE_DEFAULT) {
+ DEBUG_LOG("Setting the default interrupt enable config\n");
+ config_value = XRNIC_INTR_ENABLE_DEFAULT;
+ }
+
+ /*Writing to interrupt enable register.*/
+ xrnic_dev->xrnic_mmap.intr_en = config_value;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_config->intr_en)));
+
+ DEBUG_LOG("Interrupt enable reg value [%#x]\n",
+ ioread32((void __iomem *)&xrnic_ctrl_config->intr_en));
+ return ret;
+}
+
+/**
+ * xrnic_fill_wr() - This function fills the Send queue work request info
+ * @qp_attr: qp config info to fill the WR
+ * @qp_depth: Depth of the Queue
+ */
+void xrnic_fill_wr(struct xrnic_qp_attr *qp_attr, u32 qp_depth)
+{
+ int i;
+ struct wr *sq_wr; /*sq_ba*/
+
+ for (i = 0; i < qp_depth; i++) {
+ sq_wr = (struct wr *)qp_attr->sq_ba + i;
+ sq_wr->ctx.wr_id = i;
+ sq_wr->local_offset[0] = (qp_attr->send_sgl_phys & 0xffffffff)
+ + (i * XRNIC_SEND_SGL_SIZE);
+ sq_wr->local_offset[1] = 0;
+ sq_wr->length = XRNIC_SEND_SGL_SIZE;
+ sq_wr->opcode = XRNIC_SEND_ONLY;
+ sq_wr->remote_offset[0] = 0;
+ sq_wr->remote_offset[1] = 0;
+ sq_wr->remote_tag = 0;
+ }
+}
+
+static int xernic_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct device_node *np = NULL;
+ struct resource resource;
+ void __iomem *virt_addr;
+ u64 start_addr;
+ int status;
+ int len;
+/* TODO: Not using pdev. Rather using a global data structure xrnic_dev,
+ * which is shared among all the objects in ernic driver.
+ * Need to set platform private data as xrnic_dev and all the objects of
+ * ernic driver has to retrieve from platform_device pointer.
+ */
+#ifdef EXPERIMENTAL_CODE
+ int val = 0;
+#endif
+ phys_addr_t phy_addr;
+
+ pr_info("XRNIC driver Version = %s\n", XRNIC_VERSION);
+
+ register_inetaddr_notifier(&cmac_inetaddr_notifier);
+ register_inet6addr_notifier(&cmac_inet6addr_notifier);
+ init_mr(MEMORY_REGION_BASE, MEMORY_REGION_LEN);
+
+ np = of_find_node_by_name(NULL, "ernic");
+ if (!np) {
+ pr_err("xrnic can't find compatible node in device tree.\n");
+ return -ENODEV;
+ }
+
+ xrnic_dev = kzalloc(sizeof(*xrnic_dev), GFP_KERNEL);
+ if (!xrnic_dev)
+ return -ENOMEM;
+ ret = alloc_chrdev_region(&xrnic_dev_number, 0,
+ NUM_XRNIC_DEVS, DEVICE_NAME);
+ if (ret) {
+ DEBUG_LOG("XRNIC:: Failed to register char device\n");
+ goto alloc_failed;
+ } else {
+ DEBUG_LOG(KERN_INFO "XRNIC Registered with :\n");
+ DEBUG_LOG(KERN_INFO "Major : %u || ", MAJOR(xrnic_dev_number));
+ DEBUG_LOG(KERN_INFO "Minor : %u\n", MINOR(xrnic_dev_number));
+ }
+/* TODO: xrnic_class is created but not used. Need to enable debug and
+ * statistic counters though this interface.
+ */
+ xrnic_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(xrnic_class)) {
+ ret = PTR_ERR(xrnic_class);
+ goto class_failed;
+ }
+
+ /* Connect the file operations with the cdev */
+ /* TODO: cdev created but not used. Need to implement when
+ * userspace applications are implemented. Currently all the
+ * callbacks in xrnic_fops are dummy.
+ */
+ cdev_init(&xrnic_dev->cdev, &xrnic_fops);
+ xrnic_dev->cdev.owner = THIS_MODULE;
+
+ /* Connect the major/minor number to the cdev */
+ ret = cdev_add(&xrnic_dev->cdev, xrnic_dev_number, 1);
+ if (IS_ERR(ERR_PTR(ret))) {
+ DEBUG_LOG("ERROR: XRNIC cdev allocation failed\n");
+ goto cdev_failed;
+ }
+
+ device_create(xrnic_class, NULL, xrnic_dev_number, NULL,
+ "%s", "xrnic0");
+
+ /* The node offset argument 0 xrnic 0x0 0x84000000 len 128K*/
+ ret = of_address_to_resource(np, XRNIC_REG_MAP_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 0.\n");
+ goto dev_failed;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_REG_MAP_NODE);
+ DEBUG_LOG("xrnic memory 0x%llx of size=%x bytes mapped at 0x%p\n",
+ start_addr, (u32)resource.end - (u32)resource.start,
+ virt_addr);
+
+ xrnic_dev->xrnic_mmap.xrnic_regs_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.xrnic_regs = (struct xrnic_reg_map *)virt_addr;
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (IS_ERR_VALUE(phy_addr)) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.tx_hdr_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.tx_hdr_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.tx_hdr_buf_ba, 0, len);
+ DEBUG_LOG("xrnic memory Tx HDR BUF Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.tx_hdr_buf_ba_phys);
+#else
+ /*Mapping for Xrnic TX HEADERS 0x20100000 to 16 kb.*/
+ ret = of_address_to_resource(np, XRNIC_TX_HDR_BUF_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 5.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_TX_HDR_BUF_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory TX header 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.tx_hdr_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.tx_hdr_buf_ba = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (IS_ERR_VALUE(phy_addr)) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.tx_sgl_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.tx_sgl_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.tx_sgl_buf_ba, 0, len);
+ DEBUG_LOG("xrnic memory Tx SGL Buf Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.tx_sgl_buf_ba_phys);
+#else
+ /*Mapping for Xrnic TX DMA SGL 0xB4000000 to 16 kb.*/
+ ret = of_address_to_resource(np, XRNIC_TX_SGL_BUF_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 6.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_TX_SGL_BUF_NODE);
+ DEBUG_LOG("xrnic memory TX SGL 0x%llx of size=%x\n",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.tx_sgl_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.tx_sgl_buf_ba = (void *)(uintptr_t)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (IS_ERR_VALUE(phy_addr)) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.bypass_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.bypass_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.bypass_buf_ba, 0, len);
+ DEBUG_LOG("xrnic memory Bypass Buf Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.bypass_buf_ba_phys);
+#else
+ /*Mapping for Xrnic BYPASS PL 0x20120000 to 16 kb.*/
+ /*Mapping for Xrnic BYPASS PS 0x20120000 to 16 kb.*/
+ ret = of_address_to_resource(np, XRNIC_BYPASS_BUF_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 7.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_BYPASS_BUF_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory BYPASS:0x%llx of siz:%xb mapped at 0x%p\n",
+ start_addr, (u32)resource.end - (u32)resource.start,
+ virt_addr);
+
+ xrnic_dev->xrnic_mmap.bypass_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.bypass_buf_ba = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_NUM_OF_ERROR_BUF * XRNIC_SIZE_OF_ERROR_BUF;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.err_pkt_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.err_pkt_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.err_pkt_buf_ba, 0, len);
+ DEBUG_LOG("xrnic memory ERR PKT Buf Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.err_pkt_buf_ba_phys);
+#else
+ /*Mapping for Xrnic ERROR-DROPP PL 0x20110000 to 16 kb.*/
+ /*Mapping for Xrnic ERROR-DROPP PS 0x20110000 to 16 kb.*/
+ ret = of_address_to_resource(np, XRNIC_ERRPKT_BUF_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 8.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_ERRPKT_BUF_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory ERROR PKT 0x%llx of size=%x\n",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.err_pkt_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.err_pkt_buf_ba = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_OUT_ERRST_Q_NUM_ENTRIES;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.out_errsts_q_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.out_errsts_q_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.out_errsts_q_ba, 0, len);
+ DEBUG_LOG("xrnic memory OUT ERR STS Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.out_errsts_q_ba_phys);
+#else
+ /*Mapping for Xrnic OUT ERR_STS 0x29000000 to 4 kb.*/
+ ret = of_address_to_resource(np, XRNIC_OUTERR_STS_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 9.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_OUTERR_STS_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory 0x%llx of size=%x bytes mapped at 0x%p\n",
+ start_addr, (u32)resource.end - (u32)resource.start,
+ virt_addr);
+
+ xrnic_dev->xrnic_mmap.out_errsts_q_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.out_errsts_q_ba = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_IN_ERRST_Q_NUM_ENTRIES;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.in_errsts_q_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.in_errsts_q_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.in_errsts_q_ba, 0, len);
+ DEBUG_LOG("xrnic memory IN ERR STS Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.in_errsts_q_ba_phys);
+#else
+ /*Mapping for Xrnic IN ERR_STS PL 0x29001000 to 16 kb.*/
+ /*Mapping for Xrnic IN ERR_STS PS 0x29001000 to 16 kb.*/
+ ret = of_address_to_resource(np, XRNIC_INERR_STS_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 10.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_INERR_STS_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory 0x%llx of size=%x bytes mapped at 0x%p\n",
+ start_addr, (u32)resource.end - (u32)resource.start,
+ virt_addr);
+
+ xrnic_dev->xrnic_mmap.in_errsts_q_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.in_errsts_q_ba = (void *)virt_addr;
+#endif
+
+ /*Mapping for Xrnic RQ WR DBRL PL 0x29002000 to 4 kb.*/
+ /*Mapping for Xrnic RQ WR DBRL PS 0x29002000 to 4 kb.*/
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_NUM_OF_DATA_BUF * XRNIC_SIZE_OF_DATA_BUF;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.data_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.data_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.data_buf_ba, 0, len);
+#else
+ /*Mapping for Xrnic RQ STATUS PER QP 0x29040000 to 4 kb.*/
+ ret = of_address_to_resource(np, XRNIC_DATA_BUF_BA_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 14.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_DATA_BUF_BA_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory DATA BUFF BA 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.data_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.data_buf_ba = (void *)virt_addr;
+#endif
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_NUM_OF_RESP_ERR_BUF * XRNIC_SIZE_OF_RESP_ERR_BUF;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba, 0, len);
+#else
+ /*Mapping for Xrnic RQ STATUS PER QP 0x20130000 to 16kb.*/
+ ret = of_address_to_resource(np, XRNIC_RESP_ERR_PKT_BUF_BA, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 14.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_RESP_ERR_PKT_BUF_BA);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic response error packet buffer base address [0x%llx]",
+ start_addr);
+ DEBUG_LOG(" of size=%x bytes mapped at 0x%p\n",
+ (u32)resource.end - (u32)resource.start, virt_addr);
+
+ xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba = (void *)virt_addr;
+#endif
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_SEND_SGL_SIZE * XRNIC_SQ_DEPTH;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.send_sgl_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.send_sgl =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+
+ memset(xrnic_dev->xrnic_mmap.send_sgl, 0, len);
+ DEBUG_LOG("xrnic memory Send SGL Base Addr = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.send_sgl_phys);
+
+#else /* EXPERIMENTAL_CODE */
+ ret = of_address_to_resource(np, XRNIC_SEND_SGL_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 1.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_SEND_SGL_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+
+ DEBUG_LOG("xrnic memory send sgl 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.send_sgl_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.send_sgl = (void *)virt_addr;
+#endif /* EXPERIMENTAL_CODE */
+
+ DEBUG_LOG("send SGL physical address :%llx\n",
+ xrnic_dev->xrnic_mmap.send_sgl_phys);
+ DEBUG_LOG("xrnic mmap:%p\n", &xrnic_dev->xrnic_mmap);
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_SQ_DEPTH * sizeof(struct xrnic_cqe);
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.cq_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.cq_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.cq_ba, 0, len);
+ DEBUG_LOG("xrnic memory CQ BA Base Addr = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.cq_ba_phys);
+
+#else
+ ret = of_address_to_resource(np, XRNIC_CQ_BA_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 2.\n");
+ goto mem_config_err;
+ }
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_CQ_BA_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory send CQ 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.cq_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.cq_ba = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_RECV_PKT_SIZE * XRNIC_RQ_DEPTH;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.rq_buf_ba_ca_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.rq_buf_ba_ca =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+
+ memset(xrnic_dev->xrnic_mmap.rq_buf_ba_ca, 0, len);
+ DEBUG_LOG("xrnic memory Receive Q Buffer = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.rq_buf_ba_ca_phys);
+
+#else /* EXPERIMENTAL_CODE */
+ ret = of_address_to_resource(np, XRNIC_RQ_BUF_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 3.\n");
+ goto mem_config_err;
+ }
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_RQ_BUF_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory receive Q Buf 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.rq_buf_ba_ca_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.rq_buf_ba_ca = (void *)virt_addr;
+#endif /* EXPERIMENTAL_CODE */
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_SEND_PKT_SIZE * XRNIC_SQ_DEPTH;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.sq_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.sq_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.sq_ba, 0, len);
+ DEBUG_LOG("xrnic memory Send Q Base Addr = %#x, %llx.\n",
+ val, xrnic_dev->xrnic_mmap.sq_ba_phys);
+#else
+ ret = of_address_to_resource(np, XRNIC_SQ_BA_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 4.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_SQ_BA_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory SEND Q 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.sq_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.sq_ba = (struct wr *)virt_addr;
+#endif
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.rq_wrptr_db_add_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.rq_wrptr_db_add =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.rq_wrptr_db_add, 0, len);
+#else
+ ret = of_address_to_resource(np, XRNIC_RQWR_PTR_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 11.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_RQWR_PTR_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory RQ WPTR 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.rq_wrptr_db_add_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.rq_wrptr_db_add = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.sq_cmpl_db_add_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.sq_cmpl_db_add =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.sq_cmpl_db_add, 0, len);
+#else
+ ret = of_address_to_resource(np, XRNIC_SQ_CMPL_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 12.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_SQ_CMPL_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory SQ CMPL 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG("bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.sq_cmpl_db_add_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.sq_cmpl_db_add = (void *)virt_addr;
+#endif
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.stat_rq_buf_ca_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.stat_rq_buf_ca =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.stat_rq_buf_ca, 0, len);
+#else
+ ret = of_address_to_resource(np, XRNIC_STAT_XRNIC_RQ_BUF_NODE,
+ &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 13.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_STAT_XRNIC_RQ_BUF_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory STAT RQ BUF 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.stat_rq_buf_ca_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.stat_rq_buf_ca = (void *)virt_addr;
+#endif
+ xrnic_dev->io_qp_count = XRNIC_MAX_QP_SUPPORT;
+ /* XRNIC controller H/W configuration which includes XRNIC
+ * global configuration, QP1 initialization and interrupt enable.
+ */
+ ret = xrnic_ctrl_hw_init();
+ if (ret < 0) {
+ pr_err("xrnic hw init failed.\n");
+ goto mem_config_err;
+ }
+ /* TODO: Currently, ERNIC IP is exporting 8 interrupt lines in DTS.
+ * But, IP will assert only first interrupt line for all 8 lines.
+ * Internally, all 8 lines are logically ORed and given as
+ * Single interrupt with interrupt status register showing which
+ * line is asserted. So, we are parsing just the 0th index of irq_map
+ * from DTS and in interrupt handler routine, we are reading the
+ * interrupt status register to identify which interrupt is asserted.
+ *
+ * Need to fix the design to export only 1 interrupt line in DTS.
+ */
+ xrnic_dev->xrnic_irq = irq_of_parse_and_map(np, 0);
+ if (!xrnic_dev->xrnic_irq) {
+ pr_err("xrnic can't determine irq.\n");
+ ret = XRNIC_FAILED;
+ }
+ status = request_irq(xrnic_dev->xrnic_irq, xrnic_irq_handler, 0,
+ "xrnic_irq", xrnic_dev);
+ if (status) {
+ pr_err("XRNIC irq request handler failed\n");
+ goto err_irq;
+ }
+
+ tasklet_init(&xrnic_dev->mad_pkt_recv_task,
+ xrnic_mad_pkt_recv_intr_handler,
+ (unsigned long)xrnic_dev);
+ tasklet_init(&xrnic_dev->qp_pkt_recv_task,
+ xrnic_qp_pkt_recv_intr_handler, (unsigned long)xrnic_dev);
+ tasklet_init(&xrnic_dev->qp_fatal_task,
+ xrnic_qp_fatal_handler, (unsigned long)xrnic_dev);
+ tasklet_init(&xrnic_dev->wqe_completed_task,
+ xrnic_wqe_completed_intr_handler,
+ (unsigned long)xrnic_dev);
+ INIT_LIST_HEAD(&cm_id_list);
+
+ return XRNIC_SUCCESS;
+err_irq:
+mem_config_err:
+/* free_mem() works on only valid physical address returned from alloc_mem(),
+ * and ignores if NULL or invalid address is passed.
+ * So, even if any of the above allocations fail in the middle,
+ * we can safely call free_mem() on all addresses.
+ *
+ * we are using carve-out memory for the requirements of ERNIC.
+ * so, we cannot use devm_kzalloc() as kernel cannot see these
+ * memories until ioremapped.
+ */
+ iounmap(xrnic_dev->xrnic_mmap.xrnic_regs);
+ free_mem(xrnic_dev->xrnic_mmap.send_sgl_phys);
+ free_mem(xrnic_dev->xrnic_mmap.cq_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.rq_buf_ba_ca_phys);
+ free_mem(xrnic_dev->xrnic_mmap.sq_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.tx_hdr_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.tx_sgl_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.bypass_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.err_pkt_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.out_errsts_q_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.in_errsts_q_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.rq_wrptr_db_add_phys);
+ free_mem(xrnic_dev->xrnic_mmap.sq_cmpl_db_add_phys);
+ free_mem(xrnic_dev->xrnic_mmap.stat_rq_buf_ca_phys);
+ free_mem(xrnic_dev->xrnic_mmap.data_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba_phys);
+
+dev_failed:
+ /* Remove the cdev */
+ cdev_del(&xrnic_dev->cdev);
+
+ /* Remove the device node entry */
+ device_destroy(xrnic_class, xrnic_dev_number);
+
+cdev_failed:
+ /* Destroy xrnic_class */
+ class_destroy(xrnic_class);
+
+class_failed:
+ /* Release the major number */
+ unregister_chrdev_region(MAJOR(xrnic_dev_number), 1);
+
+alloc_failed:
+ kfree(xrnic_dev);
+ return ret;
+}
+
+static int xernic_remove(struct platform_device *pdev)
+{
+/* TODO: Not using pdev. Rather using a global data structure xrnic_dev,
+ * which is shared among all the objects in ernic driver.
+ * Need to get xrnic_dev from platform_device pointer.
+ */
+ iounmap(xrnic_dev->xrnic_mmap.xrnic_regs);
+ free_mem(xrnic_dev->xrnic_mmap.send_sgl_phys);
+ free_mem(xrnic_dev->xrnic_mmap.cq_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.rq_buf_ba_ca_phys);
+ free_mem(xrnic_dev->xrnic_mmap.sq_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.tx_hdr_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.tx_sgl_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.bypass_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.err_pkt_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.out_errsts_q_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.in_errsts_q_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.rq_wrptr_db_add_phys);
+ free_mem(xrnic_dev->xrnic_mmap.sq_cmpl_db_add_phys);
+ free_mem(xrnic_dev->xrnic_mmap.stat_rq_buf_ca_phys);
+ free_mem(xrnic_dev->xrnic_mmap.data_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba_phys);
+
+ cdev_del(&xrnic_dev->cdev);
+ device_destroy(xrnic_class, xrnic_dev_number);
+ cdev_del(&xrnic_dev->cdev);
+ unregister_chrdev_region(MAJOR(xrnic_dev_number), 1);
+ free_irq(xrnic_dev->xrnic_irq, xrnic_dev);
+ kfree(xrnic_dev);
+ class_destroy(xrnic_class);
+ unregister_inetaddr_notifier(&cmac_inetaddr_notifier);
+ unregister_inet6addr_notifier(&cmac_inet6addr_notifier);
+
+ return 0;
+}
+
+static const struct of_device_id xernic_of_match[] = {
+ { .compatible = "xlnx,ernic-1.0", },
+ { /* end of table*/ }
+};
+MODULE_DEVICE_TABLE(of, xernic_of_match);
+
+static struct platform_driver xernic_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xernic_of_match,
+ },
+ .probe = xernic_probe,
+ .remove = xernic_remove,
+};
+
+module_platform_driver(xernic_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xilinx RNIC driver");
+MODULE_AUTHOR("Sandeep Dhanvada");
diff --git a/drivers/staging/xlnx_ernic/xmain.h b/drivers/staging/xlnx_ernic/xmain.h
new file mode 100644
index 000000000000..2f45f94d2f85
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xmain.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef _XLNX_MAIN_H_
+#define _XLNX_MAIN_H_
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#define XRNIC_VERSION "1.2"
+#define NUM_XRNIC_DEVS 1
+#define DEVICE_NAME "xrnic"
+#define DRIVER_NAME "xrnic"
+
+int xrnic_open(struct inode *inode, struct file *file);
+int xrnic_release(struct inode *inode, struct file *file);
+long xrnic_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+ssize_t xrnic_read(struct file *file, char *buf,
+ size_t count, loff_t *ppos);
+ssize_t xrnic_write(struct file *file, const char *buf,
+ size_t count, loff_t *ppos);
+void xrnic_fill_wr(struct xrnic_qp_attr *qp_attr, u32 qp_depth);
+#ifdef __cplusplus
+ }
+#endif
+
+#endif
diff --git a/drivers/staging/xlnx_ernic/xmr.c b/drivers/staging/xlnx_ernic/xmr.c
new file mode 100644
index 000000000000..4959595d48d0
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xmr.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Memory registrations helpers for RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#include "xcommon.h"
+#include "xhw_config.h"
+
+struct list_head mr_free;
+struct list_head mr_alloc;
+
+atomic_t pd_index = ATOMIC_INIT(0);
+int free_mem_ceil;
+int free_mem_remain;
+void __iomem *mtt_va;
+
+DECLARE_BITMAP(ernic_memtable, XRNIC_HW_MAX_QP_SUPPORT);
+/**
+ * alloc_pool_remove() - remove an entry from alloc pool
+ * @chunk: memory region to be removed from alloc pool.
+ * @return: 0 on success.
+ *
+ * TODO: Need to modify the return value as void and remove return statement.
+ */
+int alloc_pool_remove(struct mr *chunk)
+{
+ struct mr *next, *tmp;
+
+ list_for_each_entry_safe(next, tmp, &mr_alloc, list) {
+ if (next->paddr == chunk->paddr) {
+ __list_del_entry(&next->list);
+ free_mem_remain += chunk->len;
+ }
+ }
+ return 0;
+}
+
+/**
+ * free_pool_insert() - inserts specified memory region in the free pool
+ * @chunk: memory region to be inserted in free pool.
+ * @return: 0 on success. else, returns -ENOMEM.
+ *
+ * Adds the specified memory to the free pool and if possible,
+ * merges it with adjacent regions in free pool.
+ */
+int free_pool_insert(struct mr *chunk)
+{
+ struct mr *next, *dup, *tmp;
+ struct mr *prev = NULL;
+
+ dup = kzalloc(sizeof(*dup), GFP_ATOMIC);
+ memcpy(dup, chunk, sizeof(*dup));
+
+ /* If list is empty, then, add the new region to the free pool */
+ if (list_empty(&mr_free)) {
+ list_add_tail(&dup->list, &mr_free);
+ goto done;
+ }
+
+ /* If the new region size exceeds the free memory limit,
+ * return error.
+ */
+ if (free_mem_ceil < (free_mem_remain + dup->len))
+ return -ENOMEM;
+
+ /* For a non-empty list, add the region at a suitable place
+ * in the free pool.
+ */
+ list_for_each_entry_safe(next, tmp, &mr_free, list) {
+ if (dup->paddr < next->paddr) {
+ prev = list_prev_entry(next, list);
+ list_add(&dup->list, &prev->list);
+ goto merge_free_pool;
+ }
+ }
+ /*
+ * If no suitable position to insert within free pool, then,
+ * append at the tail.
+ */
+ list_add_tail(&dup->list, &mr_free);
+
+ /* If possible, merge the region with previous and next regions. */
+merge_free_pool:
+ if (next && (dup->paddr + dup->len == next->paddr)) {
+ dup->len += next->len;
+ __list_del_entry(&next->list);
+ }
+
+ if (prev && (prev->paddr + prev->len == dup->paddr)) {
+ prev->len += dup->len;
+ __list_del_entry(&dup->list);
+ }
+ /* Except Phys and Virt address, clear all the contents of the region,
+ * If this region is in alloc pool, remove it from alloc pool.
+ */
+done:
+ dup->lkey = 0;
+ dup->rkey = 0;
+ dup->vaddr = 0;
+ dup->access = MR_ACCESS_RESVD;
+ alloc_pool_remove(chunk);
+ return 0;
+}
+EXPORT_SYMBOL(free_pool_insert);
+
+/**
+ * alloc_pd() - Allocates a Protection Domain
+ * @return: returns pointer to ernic_pd struct.
+ *
+ */
+struct ernic_pd *alloc_pd(void)
+{
+ struct ernic_pd *new_pd;
+ /* TODO: Need to check for return value and return ENOMEM */
+ new_pd = kzalloc(sizeof(*new_pd), GFP_ATOMIC);
+ atomic_inc(&pd_index);
+ atomic_set(&new_pd->id, atomic_read(&pd_index));
+ return new_pd;
+}
+EXPORT_SYMBOL(alloc_pd);
+
+/**
+ * dealloc_pd() - Allocates a Protection Domain
+ * @pd: protection domain to be deallocated.
+ *
+ */
+void dealloc_pd(struct ernic_pd *pd)
+{
+ atomic_dec(&pd_index);
+ kfree(pd);
+}
+EXPORT_SYMBOL(dealloc_pd);
+
+/**
+ * dereg_mr() - deregisters the memory region from the Channel adapter.
+ * @mr: memory region to be de-registered.
+ *
+ * dereg_mr() de-registers a memory region with CA and clears the memory region
+ * registered with CA.
+ */
+void dereg_mr(struct mr *mr)
+{
+ int mtt_idx = (mr->rkey & 0xFF);
+
+ //memset(mtt_va + mtt_offset, 0, sizeof(struct ernic_mtt));
+ clear_bit(mtt_idx, ernic_memtable);
+}
+EXPORT_SYMBOL(dereg_mr);
+
+/**
+ * alloc_mem() - Allocates a Memory Region
+ * @pd: Protection domain mapped to the memory region
+ * @len: Length of the memory region required
+ * @return: on success, returns the physical address.
+ * else, returns -ENOMEM.
+ */
+phys_addr_t alloc_mem(struct ernic_pd *pd, int len)
+{
+ struct mr *next, *new_alloc, *new_free, *tmp;
+ int _len;
+
+ _len = round_up(len, 256);
+ new_alloc = kzalloc(sizeof(*new_alloc), GFP_KERNEL);
+ new_free = kzalloc(sizeof(*new_free), GFP_KERNEL);
+
+ /* requested more memory than the free pool capacity? */
+ if (free_mem_remain < _len)
+ goto err;
+
+ list_for_each_entry_safe(next, tmp, &mr_free, list) {
+ if (next->len == _len) {
+ new_alloc->paddr = next->paddr;
+ __list_del_entry(&next->list);
+ goto reg_mr;
+ }
+ if (next->len > _len) {
+ __list_del_entry(&next->list);
+ new_alloc->paddr = next->paddr;
+ new_free->paddr = next->paddr + _len;
+ new_free->len = next->len - _len;
+ free_pool_insert(new_free);
+ goto reg_mr;
+ }
+ }
+
+err:
+ /* No free memory of requested size */
+ kfree(new_alloc);
+ kfree(new_free);
+
+ return -ENOMEM;
+reg_mr:
+ free_mem_remain = free_mem_remain - _len;
+ new_alloc->pd = pd;
+ new_alloc->len = _len;
+ new_alloc->vaddr = (u64)(uintptr_t)ioremap(new_alloc->paddr, _len);
+ list_add_tail(&new_alloc->list, &mr_alloc);
+ return new_alloc->paddr;
+}
+EXPORT_SYMBOL(alloc_mem);
+
+u64 get_virt_addr(phys_addr_t phys_addr)
+{
+ struct mr *next;
+
+ list_for_each_entry(next, &mr_alloc, list) {
+ if (next->paddr == phys_addr)
+ return next->vaddr;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(get_virt_addr);
+
+/**
+ * free_mem() - inserts a memory region in free pool and
+ * removes from alloc pool
+ * @paddr: physical address to be freed.
+ *
+ */
+void free_mem(phys_addr_t paddr)
+{
+ struct mr *next;
+
+ list_for_each_entry(next, &mr_alloc, list) {
+ if (next->paddr == paddr)
+ goto found;
+ }
+ return;
+found:
+ iounmap((void __iomem *)(unsigned long)next->vaddr);
+ free_pool_insert(next);
+}
+EXPORT_SYMBOL(free_mem);
+
+/**
+ * register_mem_to_ca() - Registers a memory region with the Channel Adapter
+ * @mr: memory region to register.
+ * @return: a pointer to struct mr
+ *
+ * register_mem_to_ca() validates the memory region provided and registers
+ * the memory region with the CA and updates the mkey in the registered region.
+ *
+ */
+static struct mr *register_mem_to_ca(struct mr *mr)
+{
+ int bit, mtt_idx, offset;
+ struct ernic_mtt mtt;
+
+ bit = find_first_zero_bit(ernic_memtable, XRNIC_HW_MAX_QP_SUPPORT);
+ set_bit(bit, ernic_memtable);
+ mtt_idx = bit;
+ mtt.pa = mr->paddr;
+ mtt.iova = mr->vaddr;
+ mtt.pd = atomic_read(&mr->pd->id);
+ mr->rkey = (mtt_idx << 8) | bit;
+ mtt.rkey = mr->rkey;
+ mtt.access = mr->access;
+ mtt.len = mr->len;
+ offset = (int)(mtt_va + (mtt_idx * 0x100));
+
+ iowrite32(mtt.pd, (void __iomem *)(offset + ERNIC_PD_OFFSET));
+ iowrite32((mtt.iova & 0xFFFFFFFF),
+ (void __iomem *)(offset + ERNIC_IOVA_OFFSET));
+ iowrite32(((mtt.iova >> 32) & 0xFFFFFFFF),
+ (void __iomem *)(offset + ERNIC_IOVA_OFFSET + 4));
+ iowrite32((mtt.pa & 0xFFFFFFFF),
+ (void __iomem *)(offset + ERNIC_PA_OFFSET));
+ iowrite32(((mtt.pa >> 32) & 0xFFFFFFFF),
+ (void __iomem *)(offset + ERNIC_PA_OFFSET + 4));
+ iowrite32((mtt.rkey & 0xFFFF),
+ (void __iomem *)(offset + ERNIC_RKEY_OFFSET));
+ iowrite32(mtt.len, (void __iomem *)(offset + ERNIC_LEN_OFFSET));
+ iowrite32(mtt.access, (void __iomem *)(offset + ERNIC_ACCESS_OFFSET));
+ return mr;
+}
+
+/**
+ * reg_phys_mr() - Registers a physical address with the Channel Adapter
+ * @pd: Protection domian associtated with the physical address.
+ * @phys_addr: The physical address to be registered.
+ * @len: length of the buffer to be registered.
+ * @access: access permissions for the registered buffer.
+ * @va_reg_base: Virtual address. Currently, ERNIC doesn't support either
+ * Base Memory Extensions or Zero Based VA. So, this arg is
+ * ignired for now. This is just to satisfy the Verbs Signature.
+ * @return: on success, returns a pointer to struct mr.
+ * else, returns a pointer to error.
+ *
+ * register_mem_to_ca() validates the memory region provided and registers
+ * the memory region with the CA and updates the mkey in the registered region.
+ */
+struct mr *reg_phys_mr(struct ernic_pd *pd, phys_addr_t phys_addr,
+ int len, int access, void *va_reg_base)
+{
+ struct mr *phys_mr;
+ struct mr *next;
+
+ list_for_each_entry(next, &mr_alloc, list) {
+ if (next->paddr == phys_addr)
+ goto found;
+ }
+ /* Physical Address of the requested region is invalid */
+ return ERR_PTR(-EINVAL);
+found:
+ phys_mr = kzalloc(sizeof(*phys_mr), GFP_KERNEL);
+ phys_mr->paddr = phys_addr;
+ phys_mr->vaddr = next->vaddr;
+ phys_mr->len = len;
+ phys_mr->access = access;
+ phys_mr->pd = pd;
+
+ return register_mem_to_ca(phys_mr);
+}
+EXPORT_SYMBOL(reg_phys_mr);
+
+struct mr *query_mr(struct ernic_pd *pd)
+{
+ struct mr *next, *tmp;
+
+ list_for_each_entry_safe(next, tmp, &mr_alloc, list) {
+ if (atomic_read(&next->pd->id) == atomic_read(&pd->id)) {
+ pr_info("Found MR\n");
+ goto ret;
+ }
+ }
+ return ERR_PTR(-EINVAL);
+ret:
+ return next;
+}
+EXPORT_SYMBOL(query_mr);
+
+/**
+ * dump_list() - prints all the regions for the specified list.
+ * @head: HEAD pointer for the list to be printed.
+ *
+ * dump_list() iterates over the specified list HEAD and
+ * prints all the physical address and length at each node in the list.
+ */
+static void dump_list(struct list_head *head)
+{
+ struct mr *next;
+
+ list_for_each_entry(next, head, list) {
+ pr_info("MR [%d:%s] Phys_addr = %#x, vaddr = %llx, len = %d\n",
+ __LINE__, __func__,
+ next->paddr, next->vaddr, next->len);
+ }
+}
+
+/**
+ * dump_free_list() - prints all the regions in the free pool.
+ *
+ * dump_free_list() is a wrapper function for dump_list()
+ * to print free pool data
+ *
+ */
+void dump_free_list(void)
+{
+ dump_list(&mr_free);
+}
+EXPORT_SYMBOL(dump_free_list);
+
+/**
+ * dump_alloc_list() - prints all the regions in the alloc pool.
+ *
+ * dump_alloc_list() is a wrapper function for dump_list()
+ * to print alloc pool data
+ */
+void dump_alloc_list(void)
+{
+ dump_list(&mr_alloc);
+}
+EXPORT_SYMBOL(dump_alloc_list);
+
+/**
+ * init_mr() - Initialization function for memory region.
+ * @addr: Physical Address of the starting memory region.
+ * @length: Length of the region to initialize.
+ * @return: 0 on success.
+ * else, -EINVAL.
+ *
+ * init_mr() initializes a region of free memory
+ *
+ * Note: This should be called only once by the RNIC driver.
+ */
+int init_mr(phys_addr_t addr, int length)
+{
+ struct mr *reg = kmalloc(sizeof(struct mr *), GFP_KERNEL);
+
+ /* Multiple init_mr() calls? */
+ if (free_mem_ceil > 0)
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&mr_free);
+ INIT_LIST_HEAD(&mr_alloc);
+ reg->paddr = addr;
+ reg->len = length;
+ free_pool_insert(reg);
+ free_mem_remain = reg->len;
+ free_mem_ceil = free_mem_remain;
+/* TODO: 0x2000 is the current Protection domain length for 255
+ * Protection Domains.
+ * Need to retrieve number of Protections doamins and length of each
+ * protection domains from DTS and calculate the overall remap size for
+ * all protection domains, instead of using a hard-coded value.
+ * currently, length of each protection domain is not exported in DTS.
+ */
+ mtt_va = ioremap(MTT_BASE, 0x2000);
+ return 0;
+}
diff --git a/drivers/staging/xlnx_ernic/xmr.h b/drivers/staging/xlnx_ernic/xmr.h
new file mode 100644
index 000000000000..7c822b22eff9
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xmr.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+struct mr {
+ phys_addr_t paddr;
+ u64 vaddr;
+ int len;
+ unsigned int access;
+ struct ernic_pd *pd;
+ int lkey;
+ int rkey;
+ struct list_head list;
+};
+
+struct ernic_pd {
+ atomic_t id;
+};
+
+struct ernic_mtt {
+ unsigned long pd;
+#define ERNIC_PD_OFFSET 0
+ u64 iova;
+#define ERNIC_IOVA_OFFSET 4
+ u64 pa;
+#define ERNIC_PA_OFFSET 12
+ int rkey;
+#define ERNIC_RKEY_OFFSET 20
+ int len;
+#define ERNIC_LEN_OFFSET 24
+ unsigned int access;
+#define ERNIC_ACCESS_OFFSET 28
+};
+
+phys_addr_t alloc_mem(struct ernic_pd *pd, int len);
+void free_mem(phys_addr_t paddr);
+struct mr *query_mr(struct ernic_pd *pd);
+struct ernic_pd *alloc_pd(void);
+void dealloc_pd(struct ernic_pd *pd);
+void dump_free_list(void);
+void dump_alloc_list(void);
+int init_mr(phys_addr_t addr, int len);
+int free_pool_insert(struct mr *chunk);
+void dereg_mr(struct mr *mr);
+u64 get_virt_addr(phys_addr_t phys_addr);
+struct mr *reg_phys_mr(struct ernic_pd *pd, phys_addr_t phys_addr,
+ int len, int access, void *va_reg_base);
+int alloc_pool_remove(struct mr *chunk);
+
+extern void __iomem *mtt_va;
+/* TODO: Get the Base address and Length from DTS, instead of Macro.
+ * Currently, the design is only for Microblaze with a fixed memory
+ * in the design.
+ *
+ * MEMORY_REGION_BASE is a carve-out memory which will be ioremapped
+ * when required for ERNIC Configuration and Queue Pairs.
+ */
+#define MEMORY_REGION_BASE 0xC4000000
+#define MEMORY_REGION_LEN 0x3BFFFFFF
+/* TODO: Get MTT_BASE from DTS instead of Macro. */
+#define MTT_BASE 0x84000000
+#define MR_ACCESS_READ 0
+#define MR_ACCESS_WRITE 1
+#define MR_ACCESS_RDWR 2
+#define MR_ACCESS_RESVD 3
diff --git a/drivers/staging/xlnx_ernic/xperftest.h b/drivers/staging/xlnx_ernic/xperftest.h
new file mode 100644
index 000000000000..609469450a9f
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xperftest.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ */
+
+#ifndef _PERF_TEST_H
+#define _PERF_TEST_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+struct ernic_bwtest_struct {
+ u64 reserved1;
+ int qp_number;
+ int reserved2;
+ unsigned long long rkey;
+ unsigned long long vaddr;
+ char reserved3[24];
+};
+
+int perftest_parse_addr(struct sockaddr_storage *s_addr, char *buf);
+void rq_handler(u32 rq_count, void *rq_context);
+void sq_handler(u32 rq_count, void *sq_context);
+void perftest_fill_wr(void __iomem *sq_ba);
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _PERF_TEST_H*/
diff --git a/drivers/staging/xlnx_ernic/xqp.c b/drivers/staging/xlnx_ernic/xqp.c
new file mode 100644
index 000000000000..dae21fda5da6
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xqp.c
@@ -0,0 +1,1310 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#include "xcommon.h"
+
+#define DISPLAY_REGS_ON_DISCONNECT
+#define EXPERIMENTAL_CODE
+
+struct xrnic_conn_param {
+ const void *private_data;
+ u8 private_data_len;
+ u8 responder_resources;
+ u8 initiator_depth;
+ u8 flow_control;
+ u8 retry_count;
+ u8 rnr_retry_count;
+ u8 srq;
+ u8 qp_num;
+};
+
+/* EXTRA Bytes for Invariant CRC */
+#define ERNIC_INV_CRC 4
+/* ERNIC Doesn't have Variant CRC for P2P */
+#define ERNIC_VAR_CRC 0
+#define EXTRA_PKT_LEN (ERNIC_INV_CRC + ERNIC_VAR_CRC)
+
+#define cpu_to_be24(x) ((x) << 16)
+
+#define CMA_VERSION 0
+#define QP_STAT_SQ_EMPTY_BIT_POS (9)
+#define QP_STAT_OUTSTANDG_EMPTY_Q_BIT_POS (10)
+
+int in_err_wr_ptr;
+struct list_head cm_id_list;
+
+/**
+ * xrnic_set_qp_state() - Sets the qp state to the desired state
+ * @qp_num: XRNIC QP number
+ * @state: State to set
+ *
+ * @return: XRNIC_SUCCESS in case of success or a error representative value
+ */
+int xrnic_set_qp_state(int qp_num, int state)
+{
+ if (qp_num < 0)
+ return -XRNIC_INVALID_QP_ID;
+
+ if (state != XRNIC_QP_IN_USE && state != XRNIC_QP_FREE)
+ return -XRNIC_INVALID_QP_STATUS;
+
+ xrnic_dev->qp_attr[qp_num].qp_status = state;
+ return XRNIC_SUCCESS;
+}
+
+/**
+ * xrnic_find_free_qp() - Finds the free qp to use
+ * @return: free QP Num or error value incase of no free QP
+ */
+int xrnic_find_free_qp(void)
+{
+ int i;
+
+ for (i = 0 ; i < XRNIC_MAX_QP_SUPPORT ; i++) {
+ /*Checking for QP with ZERO REMOTE and LOCAL cm id*/
+ if (xrnic_dev->qp_attr[i].qp_status == XRNIC_QP_FREE)
+ return i;
+ }
+ return XRNIC_FAILED;
+}
+
+/**
+ * xrnic_rdma_create_qp() - Finds the free qp to use
+ * @cm_id: CM ID to associate with QP
+ * @pd: Protection domain to assosciate the QP with
+ * @init_attr: QP attributes or config values
+ * @return: XRNIC_SUCCESS if successful otherwise error representing code
+ */
+int xrnic_rdma_create_qp(struct xrnic_rdma_cm_id *cm_id, struct ernic_pd *pd,
+ struct xrnic_qp_init_attr *init_attr)
+{
+ struct xrnic_qp_attr *qp_attr;
+ struct xrnic_qp_info *qp_info;
+ int ret;
+
+ if (init_attr->sq_depth > XRNIC_MAX_SQ_DEPTH ||
+ init_attr->rq_depth > XRNIC_MAX_RQ_DEPTH ||
+ init_attr->send_sge_size > XRNIC_MAX_SEND_SGL_SIZE ||
+ init_attr->send_pkt_size > XRNIC_MAX_SEND_PKT_SIZE) {
+ return -XRNIC_INVALID_QP_INIT_ATTR;
+ }
+
+ qp_info = &cm_id->qp_info;
+
+ qp_info->qp_num = xrnic_find_free_qp();
+ qp_info->qp_num += 2;
+
+ ret = xrnic_set_qp_state((qp_info->qp_num - 2), XRNIC_QP_IN_USE);
+ if (ret < 0)
+ return ret;
+
+ qp_attr = &xrnic_dev->qp_attr[qp_info->qp_num - 2];
+
+ if (qp_info->qp_num < 2 || qp_attr->qp_type != init_attr->qp_type)
+ return -XRNIC_INVALID_QP_ID;
+
+ cm_id->qp_type = init_attr->qp_type;
+ cm_id->local_cm_id = (qp_info->qp_num);
+
+ qp_info->xrnic_rq_event_handler = init_attr->xrnic_rq_event_handler;
+ qp_info->rq_context = init_attr->rq_context;
+ qp_info->xrnic_sq_event_handler = init_attr->xrnic_sq_event_handler;
+ qp_info->sq_context = init_attr->sq_context;
+
+ qp_info->rq_buf_ba_ca = init_attr->rq_buf_ba_ca;
+ qp_info->rq_buf_ba_ca_phys = init_attr->rq_buf_ba_ca_phys;
+ qp_info->sq_ba = init_attr->sq_ba;
+ qp_info->sq_ba_phys = init_attr->sq_ba_phys;
+ qp_info->cq_ba = init_attr->cq_ba;
+ qp_info->cq_ba_phys = init_attr->cq_ba_phys;
+
+ qp_info->sq_depth = init_attr->sq_depth;
+ qp_info->rq_depth = init_attr->rq_depth;
+ qp_info->send_sge_size = init_attr->send_sge_size;
+ qp_info->send_pkt_size = init_attr->send_pkt_size;
+ qp_info->recv_pkt_size = init_attr->recv_pkt_size;
+
+ qp_attr->rq_buf_ba_ca = qp_info->rq_buf_ba_ca;
+ qp_attr->rq_buf_ba_ca_phys = qp_info->rq_buf_ba_ca_phys;
+ qp_attr->sq_ba = qp_info->sq_ba;
+ qp_attr->sq_ba_phys = qp_info->sq_ba_phys;
+ qp_attr->cq_ba = qp_info->cq_ba;
+ qp_attr->cq_ba_phys = qp_info->cq_ba_phys;
+
+ qp_attr->sq_depth = qp_info->sq_depth;
+ qp_attr->rq_depth = qp_info->rq_depth;
+ qp_attr->send_sge_size = qp_info->send_sge_size;
+ qp_attr->send_pkt_size = qp_info->send_pkt_size;
+ qp_attr->recv_pkt_size = qp_info->recv_pkt_size;
+#ifdef ERNIC_MEM_REGISTER
+ if (pd)
+ qp_attr->pd = atomic_read(&pd->id);
+#endif
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_create_qp);
+
+/**
+ * xrnic_post_recv() - This function receives an incoming packet
+ * @qp_info: QP info on which packet should be received
+ * @rq_count: Number of packets to receive
+ * @return: SUCCESS if received required number of packets else error
+ * representative value
+ */
+int xrnic_post_recv(struct xrnic_qp_info *qp_info, u32 rq_count)
+{
+ struct xrnic_qp_attr *qp_attr;
+ int ret = -XRNIC_INVALID_QP_ID;
+
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ qp_attr = &xrnic_dev->qp_attr[qp_info->qp_num - 2];
+ if (qp_attr->remote_cm_id)
+ ret = xrnic_qp_recv_pkt(qp_attr, rq_count);
+
+ return ret;
+}
+EXPORT_SYMBOL(xrnic_post_recv);
+
+/**
+ * xrnic_post_send() - This function post a SEND WR
+ * @qp_info: QP info to post the request
+ * @sq_count: SEND packet count
+ * @return: SUCCESS if successfully posts a SEND,
+ * otherwise error representative value
+ */
+int xrnic_post_send(struct xrnic_qp_info *qp_info, u32 sq_count)
+{
+ struct xrnic_qp_attr *qp_attr;
+ int ret = -XRNIC_INVALID_QP_ID;
+
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ qp_attr = &xrnic_dev->qp_attr[qp_info->qp_num - 2];
+ if (qp_attr->remote_cm_id)
+ ret = xrnic_qp_send_pkt(qp_attr, sq_count);
+
+ return ret;
+}
+EXPORT_SYMBOL(xrnic_post_send);
+
+/**
+ * xrnic_destroy_qp() - Function destroys QP and reset the QP info
+ * @qp_info: QP info or config
+ * @return: XRNIC_SUCCESS if successfully destroys the QP,
+ * otherwise error representative value
+ */
+int xrnic_destroy_qp(struct xrnic_qp_info *qp_info)
+{
+ u32 qp_num;
+ struct xrnic_qp_attr *qp_attr;
+
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ if (qp_info->qp_num >= 2) {
+ qp_num = qp_info->qp_num;
+ qp_attr = &xrnic_dev->qp_attr[qp_num - 2];
+ xrnic_set_qp_state((qp_num - 2), XRNIC_QP_FREE);
+
+ memset((void *)qp_info, 0, sizeof(struct xrnic_qp_info));
+
+ qp_attr->rq_buf_ba_ca = qp_info->rq_buf_ba_ca;
+ qp_attr->rq_buf_ba_ca_phys = qp_info->rq_buf_ba_ca_phys;
+ qp_attr->sq_ba = qp_info->sq_ba;
+ qp_attr->sq_ba_phys = qp_info->sq_ba_phys;
+ qp_attr->cq_ba = qp_info->cq_ba;
+ qp_attr->cq_ba_phys = qp_info->cq_ba_phys;
+
+ qp_attr->sq_depth = qp_info->sq_depth;
+ qp_attr->rq_depth = qp_info->rq_depth;
+ qp_attr->send_sge_size = qp_info->send_sge_size;
+ qp_attr->send_pkt_size = qp_info->send_pkt_size;
+ qp_attr->recv_pkt_size = qp_info->recv_pkt_size;
+ qp_attr->cm_id = NULL;
+ } else {
+ pr_err("Received invalid QP ID\n");
+ return -XRNIC_INVALID_QP_ID;
+ }
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_destroy_qp);
+
+/**
+ * xrnic_reset_io_qp() - This function reset the QP config
+ * @qp_attr: QP memory map or config
+ */
+void xrnic_reset_io_qp(struct xrnic_qp_attr *qp_attr)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct xrnic_ctrl_config *xrnic_ctrl_config;
+ struct xrnic_reg_map *reg_map;
+ unsigned long timeout;
+ u32 sq_pi_db_val, cq_head_val;
+ u32 rq_ci_db_val, stat_rq_pi_db_val;
+ u32 config_value;
+ int qp_num = qp_attr->qp_num - 2;
+ struct rdma_qp_attr *rdma_qp_attr;
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp_attr->xrnic_mmap;
+ reg_map = xrnic_dev->xrnic_mmap.xrnic_regs;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+ xrnic_ctrl_config = &reg_map->xrnic_ctrl_config;
+
+ /* 1. WAIT FOR SQ/OSQ EMPTY TO BE SET */
+ while (!((ioread32(&rdma_qp_attr->qp_status) >> 9) & 0x3))
+ ;
+
+ /* 2 WAIT FOR register values SQ_PI_DB == CQ_HEAD */
+ sq_pi_db_val = ioread32(((void *)(&rdma_qp_attr->sq_pi_db)));
+
+ cq_head_val = ioread32(((void *)(&rdma_qp_attr->cq_head)));
+
+ timeout = jiffies;
+ while (!(sq_pi_db_val == cq_head_val)) {
+ sq_pi_db_val = ioread32(((void *)(&rdma_qp_attr->sq_pi_db)));
+ cq_head_val = ioread32(((void *)(&rdma_qp_attr->cq_head)));
+ if (time_after(jiffies, (timeout + 1 * HZ)))
+ break;
+ }
+
+ /* 3. WAIT FOR register values STAT_RQ_PI_DB == RQ_CI_DB */
+ rq_ci_db_val = ioread32(((void *)(&rdma_qp_attr->rq_ci_db)));
+
+ stat_rq_pi_db_val = ioread32(((void *)(&rdma_qp_attr->stat_rq_pi_db)));
+
+ timeout = jiffies;
+ while (!(rq_ci_db_val == stat_rq_pi_db_val)) {
+ rq_ci_db_val = ioread32(((void *)(&rdma_qp_attr->rq_ci_db)));
+ stat_rq_pi_db_val = ioread32(((void *)
+ (&rdma_qp_attr->stat_rq_pi_db)));
+ if (time_after(jiffies, (timeout + 1 * HZ)))
+ break;
+ }
+ /* 4. SET QP_CONF register HW handshake disable to 1 */
+ config_value = ioread32(((void *)(&rdma_qp_attr->qp_conf)));
+ config_value = config_value | XRNIC_QP_CONFIG_HW_HNDSHK_DIS |
+ XRNIC_QP_CONFIG_RQ_INTR_EN | XRNIC_QP_CONFIG_CQE_INTR_EN;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+ DEBUG_LOG("QP config value is 0x%x\n", config_value);
+
+ config_value = ioread32((char *)xrnic_mmap->rq_wrptr_db_add +
+ (4 * (qp_attr->qp_num - 1)));
+ config_value = (xrnic_mmap->rq_wrptr_db_add_phys +
+ (4 * (qp_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_wrptr_db_add)));
+
+ config_value = (xrnic_mmap->sq_cmpl_db_add_phys +
+ (4 * (qp_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_cmpl_db_add)));
+
+ /* 5. SET QP_CONF register QP ENABLE TO 0 and QP_ADV_CONF register
+ * SW OVERRIDE TO 1
+ */
+ config_value = ioread32(((void *)(&rdma_qp_attr->qp_conf)));
+ config_value = config_value & ~XRNIC_QP_CONFIG_QP_ENABLE;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+ /* Enable SW override enable */
+ config_value = 0x1;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ /* 6. Initialized QP under reset: */
+ config_value = 0x0;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->stat_rq_pi_db)));
+
+ config_value = qp_attr->rq_buf_ba_ca_phys & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_buf_ba_ca)));
+
+ config_value = 0x0;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_ci_db)));
+
+ iowrite32(config_value,
+ ((void *)(&rdma_qp_attr->stat_curr_sqptr_pro)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_pi_db)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->cq_head)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_psn)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->last_rq_req)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->stat_msn)));
+
+ /* 7.Initialized Ethernet side registers */
+ /* NO need as we are doing during connect initiatlization */
+
+ /* 8. Set QP_CONF register QP ENABLE TO 1 */
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->qp_conf)));
+ config_value = config_value | XRNIC_QP_CONFIG_QP_ENABLE;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+
+ config_value = ioread32((void *)&rdma_qp_attr->qp_conf);
+ config_value = config_value & ~XRNIC_QP_CONFIG_UNDER_RECOVERY;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+
+ /* 9.Set QP_ADV_CONF register SW_OVERRIDE SET TO 0 */
+ /* Disable SW override enable */
+ config_value = 0;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ qp_attr->rq_wrptr_db_local = 0;
+ qp_attr->sq_cmpl_db_local = 0;
+ qp_attr->rq_ci_db_local = 0;
+ qp_attr->sq_pi_db_local = 0;
+ qp_attr->sqhd = 0;
+}
+
+/**
+ * xrnic_reset_io_qp_sq_cq_ptr() - This function resets SQ, CQ pointers of QP
+ * @qp_attr: QP config
+ * @hw_hs_info: QP HW handshake config
+ */
+void xrnic_reset_io_qp_sq_cq_ptr(struct xrnic_qp_attr *qp_attr,
+ struct xrnic_hw_handshake_info *hw_hs_info)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct xrnic_reg_map *reg_map;
+ struct xrnic_ctrl_config *xrnic_ctrl_config;
+ struct rdma_qp_attr *rdma_qp_attr;
+ u32 config_value = 0;
+ int qp_num = qp_attr->qp_num - 2;
+
+ xrnic_mmap = qp_attr->xrnic_mmap;
+ reg_map = xrnic_dev->xrnic_mmap.xrnic_regs;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+ xrnic_ctrl_config = &reg_map->xrnic_ctrl_config;
+
+ /* Enable SW override enable */
+ config_value = 0x1;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ if (!hw_hs_info)
+ goto enable_hw_hs;
+
+ config_value = 0;
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->cq_head)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_pi_db)));
+
+ iowrite32(config_value,
+ ((void *)(&rdma_qp_attr->stat_curr_sqptr_pro)));
+
+ config_value = hw_hs_info->rq_wrptr_db_add;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_wrptr_db_add)));
+
+ config_value = hw_hs_info->sq_cmpl_db_add;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_cmpl_db_add)));
+
+ config_value = ioread32((void *)(&rdma_qp_attr->stat_rq_pi_db));
+
+ config_value = hw_hs_info->cnct_io_conf_l_16b |
+ ((config_value & 0xFFFF) << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_config->cnct_io_conf)));
+enable_hw_hs:
+ config_value = XRNIC_QP_CONFIG_QP_ENABLE |
+ xrnic_dev->pmtu |
+ XRNIC_QP_CONFIG_RQ_BUFF_SZ(qp_attr->recv_pkt_size);
+
+ if (qp_attr->ip_addr_type == AF_INET6)
+ config_value = config_value | XRNIC_QP_CONFIG_IPV6_EN;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+
+ /* Disable SW override enable */
+
+ config_value = 0;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->cq_head)));
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->sq_pi_db)));
+
+ config_value = ioread32(((void *)
+ (&rdma_qp_attr->stat_curr_sqptr_pro)));
+
+ qp_attr->rq_wrptr_db_local = 0;
+ qp_attr->sq_cmpl_db_local = 0;
+ qp_attr->rq_ci_db_local = 0;
+ qp_attr->sq_pi_db_local = 0;
+ qp_attr->sqhd = 0;
+}
+
+/**
+ * xrnic_reset_io_qp_rq_ptr() - This function resets RQ pointers of QP
+ * @qp_attr: QP config
+ */
+void xrnic_reset_io_qp_rq_ptr(struct xrnic_qp_attr *qp_attr)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct xrnic_reg_map *reg_map;
+ struct xrnic_ctrl_config *xrnic_ctrl_config;
+ struct rdma_qp_attr *rdma_qp_attr;
+ u32 config_value = 0;
+ int qp_num = qp_attr->qp_num - 2;
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp_attr->xrnic_mmap;
+ reg_map = xrnic_dev->xrnic_mmap.xrnic_regs;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+ xrnic_ctrl_config = &reg_map->xrnic_ctrl_config;
+
+ config_value = 0x1;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ config_value = 0x0;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_ci_db)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->stat_rq_pi_db)));
+
+ config_value = qp_attr->rq_buf_ba_ca_phys & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_buf_ba_ca)));
+
+ config_value = XRNIC_QP_CONFIG_QP_ENABLE |
+ XRNIC_QP_CONFIG_CQE_INTR_EN | xrnic_dev->pmtu |
+ XRNIC_QP_CONFIG_RQ_BUFF_SZ(qp_attr->recv_pkt_size) |
+ XRNIC_QP_CONFIG_HW_HNDSHK_DIS |
+ XRNIC_QP_CONFIG_CQE_WRITE_EN;
+ if (qp_attr->ip_addr_type == AF_INET6)
+ config_value = config_value | XRNIC_QP_CONFIG_IPV6_EN;
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+ /* Disable SW override enable */
+ config_value = 0x0;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->rq_ci_db)));
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->stat_rq_buf_ca)));
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->stat_rq_pi_db)));
+}
+
+/**
+ * xrnic_qp_send_pkt() - This function sends packets
+ * @qp_attr: QP config
+ * @sq_pkt_count: Number of packets to send
+ * @return: XRNIC_SUCCESS if successful
+ * otherwise error representative value
+ */
+int xrnic_qp_send_pkt(struct xrnic_qp_attr *qp_attr, u32 sq_pkt_count)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct rdma_qp_attr *rdma_qp_attr;
+ u32 config_value = 0, sq_pkt_count_tmp;
+ int qp_num = qp_attr->qp_num - 2;
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp_attr->xrnic_mmap;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+
+ config_value = ioread32((char *)xrnic_mmap->sq_cmpl_db_add +
+ (4 * (qp_attr->qp_num - 1)));
+ if (config_value == 0)
+ sq_pkt_count_tmp = qp_attr->sq_depth;
+ else if (qp_attr->sq_cmpl_db_local >= config_value)
+ sq_pkt_count_tmp = (config_value + qp_attr->sq_depth) -
+ qp_attr->sq_cmpl_db_local;
+ else
+ sq_pkt_count_tmp = config_value - qp_attr->sq_cmpl_db_local;
+ if (sq_pkt_count_tmp < sq_pkt_count)
+ return -XRNIC_INVALID_PKT_CNT;
+
+ /* We need to maintain sq_cmpl_db_local as per hardware
+ * update for Queue spesific sq_cmpl_db_local register.
+ * Also in case of resend some packect
+ * we need to maintain this variable.
+ */
+
+ qp_attr->sq_cmpl_db_local = qp_attr->sq_cmpl_db_local + sq_pkt_count;
+ if (qp_attr->sq_cmpl_db_local > qp_attr->sq_depth)
+ qp_attr->sq_cmpl_db_local = qp_attr->sq_cmpl_db_local
+ - qp_attr->sq_depth;
+ config_value = qp_attr->sq_cmpl_db_local;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_pi_db)));
+
+ return XRNIC_SUCCESS;
+}
+
+/**
+ * xrnic_qp_recv_pkt() - This function receives packets
+ * @qp_attr: QP config
+ * @rq_pkt_count: receive packet count
+ * @return: XRNIC_SUCCESS if successful
+ * otherwise error representative value
+ */
+int xrnic_qp_recv_pkt(struct xrnic_qp_attr *qp_attr, u32 rq_pkt_count)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct rdma_qp_attr *rdma_qp_attr;
+ u32 config_value = 0, rq_pkt_count_tmp;
+ int qp_num = qp_attr->qp_num - 2;
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp_attr->xrnic_mmap;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+
+ config_value = ioread32((char *)xrnic_mmap->rq_wrptr_db_add +
+ (4 * (qp_attr->qp_num - 1)));
+ if (config_value == 0)
+ rq_pkt_count_tmp = qp_attr->rq_depth;
+ else if (qp_attr->rq_wrptr_db_local >= config_value)
+ rq_pkt_count_tmp = (config_value + qp_attr->rq_depth) -
+ qp_attr->rq_wrptr_db_local;
+ else
+ rq_pkt_count_tmp = config_value - qp_attr->rq_wrptr_db_local;
+
+ if (rq_pkt_count_tmp < rq_pkt_count)
+ return -XRNIC_INVALID_PKT_CNT;
+ /* We need to maintain sq_cmpl_db_local as per hardware
+ * update for Queue spesific sq_cmpl_db_local register.
+ * Also in case of resend some packect
+ * we need to maintain this variable.
+ */
+
+ qp_attr->rq_wrptr_db_local = qp_attr->rq_wrptr_db_local + rq_pkt_count;
+ if (qp_attr->rq_wrptr_db_local > qp_attr->rq_depth)
+ qp_attr->rq_wrptr_db_local = qp_attr->rq_wrptr_db_local
+ - qp_attr->rq_depth;
+
+ config_value = qp_attr->rq_wrptr_db_local;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_ci_db)));
+
+ return XRNIC_SUCCESS;
+}
+
+/**
+ * xrnic_qp1_send_mad_pkt() - This function initiates sending a management
+ * datagram packet.
+ * @send_sgl_temp: Scatter gather list
+ * @qp1_attr: QP1 info
+ * @send_pkt_size: Send packe size
+ */
+void xrnic_qp1_send_mad_pkt(void *send_sgl_temp,
+ struct xrnic_qp_attr *qp1_attr, u32 send_pkt_size)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct rdma_qp1_attr *rdma_qp1_attr;
+ u32 config_value = 0;
+ struct wr *sq_wr; /*sq_ba*/
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp1_attr->xrnic_mmap;
+ rdma_qp1_attr = &xrnic_mmap->xrnic_regs->rdma_qp1_attr;
+
+ /* We need to maintain sq_cmpl_db_local as per hardware
+ * update for Queue spesific sq_cmpl_db_local register.
+ * Also in case of resend some packect
+ * we need to maintain this variable.
+ */
+ sq_wr = (struct wr *)qp1_attr->sq_ba + qp1_attr->sq_cmpl_db_local;
+ /* All will be 4096 that is madatory.*/
+ sq_wr->length = send_pkt_size;
+ memcpy((void *)((char *)qp1_attr->send_sgl +
+ (qp1_attr->sq_cmpl_db_local * XRNIC_SEND_SGL_SIZE)),
+ (const void *)send_sgl_temp,
+ XRNIC_SEND_SGL_SIZE);
+ qp1_attr->sq_cmpl_db_local = qp1_attr->sq_cmpl_db_local + 1;
+
+ config_value = qp1_attr->sq_cmpl_db_local;
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->sq_pi_db)));
+
+ if (qp1_attr->sq_cmpl_db_local == XRNIC_SQ_DEPTH)
+ qp1_attr->sq_cmpl_db_local = 0;
+}
+
+/**
+ * xrnic_qp_pkt_recv() - This function process received data packets
+ * @qp_attr: QP info on which data packet has been received
+ */
+static void xrnic_qp_pkt_recv(struct xrnic_qp_attr *qp_attr)
+{
+ struct xrnic_memory_map *xrnic_mmap = (struct xrnic_memory_map *)
+ qp_attr->xrnic_mmap;
+ u32 config_value = 0;
+ unsigned long flag;
+ int rq_pkt_count = 0;
+ struct xrnic_rdma_cm_id *cm_id = qp_attr->cm_id;
+
+ spin_lock_irqsave(&qp_attr->qp_lock, flag);
+ config_value = ioread32((char *)xrnic_mmap->rq_wrptr_db_add +
+ (4 * (qp_attr->qp_num - 1)));
+ if (qp_attr->rq_wrptr_db_local == config_value) {
+ spin_unlock_irqrestore(&qp_attr->qp_lock, flag);
+ return;
+ }
+ if (qp_attr->rq_wrptr_db_local > config_value) {
+ rq_pkt_count = (config_value + qp_attr->rq_depth) -
+ qp_attr->rq_wrptr_db_local;
+ } else {
+ rq_pkt_count = config_value - qp_attr->rq_wrptr_db_local;
+ }
+
+ cm_id->qp_info.xrnic_rq_event_handler(rq_pkt_count,
+ cm_id->qp_info.rq_context);
+
+ spin_unlock_irqrestore(&qp_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_wqe_completed() - This function process completion interrupts
+ * @qp_attr: QP info for which completion is received
+ */
+static void xrnic_wqe_completed(struct xrnic_qp_attr *qp_attr)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct rdma_qp_attr *rdma_qp_attr;
+ u32 config_value = 0;
+ unsigned long flag;
+ struct xrnic_rdma_cm_id *cm_id = qp_attr->cm_id;
+ int qp_num = qp_attr->qp_num;
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp_attr->xrnic_mmap;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num - 2];
+ /* We need to maintain sq_cmpl_db_local as per hardware update
+ * for Queue spesific sq_cmpl_db_local register.
+ * Also in case of resend some packect we
+ * need to maintain this variable.
+ */
+ spin_lock_irqsave(&qp_attr->qp_lock, flag);
+ config_value = ioread32((char *)&rdma_qp_attr->cq_head);
+ cm_id->qp_info.xrnic_sq_event_handler(config_value,
+ cm_id->qp_info.sq_context);
+ spin_unlock_irqrestore(&qp_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_wqe_completed_intr_handler() - Interrupt handler for completion
+ * interrupt type
+ * @data: XRNIC device info
+ */
+void xrnic_wqe_completed_intr_handler(unsigned long data)
+{
+ struct xrnic_dev_info *xrnic_dev = (struct xrnic_dev_info *)data;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct xrnic_qp_attr *qp_attr;
+ struct xrnic_ctrl_config *xrnic_ctrl_config =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ unsigned long cq_intr = 0, qp_num, i, j;
+ unsigned long flag;
+
+ for (i = 0 ; i < XRNIC_RQ_CQ_INTR_STS_REG_SUPPORTED ; i++) {
+ cq_intr = ioread32((void __iomem *)
+ ((&xrnic_ctrl_config->cq_intr_sts_1) +
+ (i * 4)));
+
+ if (!cq_intr)
+ continue;
+
+ for (j = find_first_bit(&cq_intr, XRNIC_REG_WIDTH);
+ j < XRNIC_REG_WIDTH;
+ j = find_next_bit(&cq_intr, XRNIC_REG_WIDTH, j + 1)) {
+ qp_num = (i << 5) + j;
+ iowrite32((1 << j), (void __iomem *)
+ ((&xrnic_ctrl_config->cq_intr_sts_1) +
+ (i * 4)));
+ qp_attr = &xrnic_dev->qp_attr[qp_num - 2];
+ if (qp_attr->cm_id)
+ xrnic_wqe_completed(qp_attr);
+ else
+ pr_err("Received CM ID is NULL\n");
+ }
+ }
+ spin_lock_irqsave(&qp1_attr->qp_lock, flag);
+ xrnic_dev->xrnic_mmap.intr_en = xrnic_dev->xrnic_mmap.intr_en |
+ WQE_COMPLETED_INTR_EN;
+ iowrite32(xrnic_dev->xrnic_mmap.intr_en,
+ ((void *)(&xrnic_ctrl_config->intr_en)));
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_qp_pkt_recv_intr_handler() - Interrupt handler for data
+ * packet interrupt
+ * @data: XRNIC device info
+ */
+void xrnic_qp_pkt_recv_intr_handler(unsigned long data)
+{
+ struct xrnic_dev_info *xrnic_dev = (struct xrnic_dev_info *)data;
+ struct xrnic_memory_map *xrnic_mmap =
+ (struct xrnic_memory_map *)&xrnic_dev->xrnic_mmap;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct xrnic_qp_attr *qp_attr;
+ struct rdma_qp_attr *rdma_qp_attr;
+ struct xrnic_reg_map *regs;
+ struct xrnic_ctrl_config *xrnic_ctrl_config =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ unsigned long rq_intr = 0, qp_num, i, j, config_value;
+ unsigned long flag;
+
+ for (i = 0 ; i < XRNIC_RQ_CQ_INTR_STS_REG_SUPPORTED ; i++) {
+ rq_intr = ioread32((void __iomem *)
+ (&xrnic_ctrl_config->rq_intr_sts_1 + (i * 4)));
+
+ if (!rq_intr)
+ continue;
+
+ for (j = find_first_bit(&rq_intr, XRNIC_REG_WIDTH);
+ j < XRNIC_REG_WIDTH; j = find_next_bit
+ (&rq_intr, XRNIC_REG_WIDTH, j + 1)) {
+ qp_num = (i << 5) + j;
+ /* We need to change this with Work Request as
+ * for other Admin QP required wait events.
+ */
+ iowrite32((1 << j), ((void __iomem *)
+ (&xrnic_ctrl_config->rq_intr_sts_1) +
+ (i * 4)));
+ qp_attr = &xrnic_dev->qp_attr[qp_num - 2];
+ regs = xrnic_mmap->xrnic_regs;
+ rdma_qp_attr = &regs->rdma_qp_attr[qp_num - 2];
+ config_value = ioread32((void *)
+ (&rdma_qp_attr->qp_conf));
+ if (qp_attr->cm_id &&
+ (config_value & XRNIC_QP_CONFIG_HW_HNDSHK_DIS)) {
+ xrnic_qp_pkt_recv(qp_attr);
+ } else {
+ if (qp_attr->cm_id)
+ pr_err("Received CM ID is NULL\n");
+ else
+ pr_err("HW handshake is enabled\n");
+ }
+ }
+ }
+ spin_lock_irqsave(&qp1_attr->qp_lock, flag);
+ xrnic_dev->xrnic_mmap.intr_en = xrnic_dev->xrnic_mmap.intr_en |
+ QP_PKT_RCVD_INTR_EN;
+ iowrite32(xrnic_dev->xrnic_mmap.intr_en,
+ ((void *)(&xrnic_ctrl_config->intr_en)));
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_qp_fatal_handler() - Interrupt handler for QP fatal interrupt type
+ * @data: XRNIC device info
+ */
+void xrnic_qp_fatal_handler(unsigned long data)
+{
+ struct xrnic_memory_map *xrnic_mmap =
+ (struct xrnic_memory_map *)&xrnic_dev->xrnic_mmap;
+ struct xrnic_ctrl_config *xrnic_conf =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ struct rdma_qp_attr *rdma_qp_attr;
+ int i, err_entries;
+ unsigned long timeout;
+ unsigned long config_value, qp_num, qp, sq_pi_db_val, cq_head_val;
+ struct xrnic_qp_attr *qp_attr;
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+
+ err_entries = ioread32((void *)&xrnic_conf->in_errsts_q_wrptr);
+ pr_info("No of QPs in Fatal: %d\r\n", err_entries - in_err_wr_ptr);
+ for (i = 0; i < (err_entries - in_err_wr_ptr); i++) {
+ qp_num = ioread32((char *)xrnic_mmap->in_errsts_q_ba +
+ ((8 * in_err_wr_ptr) + (8 * i)));
+ qp_num = (qp_num & 0xFFFF0000) >> 16;
+ qp = qp_num - 2;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp];
+ if (rdma_qp_attr) {
+ while (!((ioread32(&rdma_qp_attr->qp_status) >> 9) &
+ 0x3))
+ DEBUG_LOG("Fatal wait for SQ/OSQ empty\n");
+
+ /* 2 WAIT FOR register values SQ_PI_DB == CQ_HEAD */
+ sq_pi_db_val = ioread32(((void *)
+ (&rdma_qp_attr->sq_pi_db)));
+
+ cq_head_val = ioread32((void *)&rdma_qp_attr->cq_head);
+
+ timeout = jiffies;
+ while (!(sq_pi_db_val == cq_head_val)) {
+ sq_pi_db_val = ioread32(((void *)
+ (&rdma_qp_attr->sq_pi_db)));
+ cq_head_val = ioread32(((void *)
+ (&rdma_qp_attr->cq_head)));
+ if (time_after(jiffies, (timeout + 1 * HZ))) {
+ pr_info("SQ PI != CQ Head\n");
+ break;
+ }
+ }
+
+ /* Poll and wait for register value
+ * RESP_HNDL_STS.sq_pici_db_check_en == ‘1’
+ */
+ while (!((ioread32(&xrnic_conf->resp_handler_status)
+ >> 16) & 0x1))
+ DEBUG_LOG("waiting for RESP_HNDL_STS\n");
+
+ config_value = ioread32((void *)
+ &rdma_qp_attr->qp_conf);
+ config_value = config_value &
+ (~XRNIC_QP_CONFIG_QP_ENABLE);
+ iowrite32(config_value,
+ ((void *)(&rdma_qp_attr->qp_conf)));
+
+ config_value = ioread32((void *)
+ &rdma_qp_attr->qp_conf);
+ config_value = config_value |
+ XRNIC_QP_CONFIG_UNDER_RECOVERY;
+ iowrite32(config_value,
+ ((void *)(&rdma_qp_attr->qp_conf)));
+
+ /* Calling CM Handler to disconnect QP.*/
+ qp_attr = &xrnic_dev->qp_attr[qp_num - 2];
+ if (qp_attr->cm_id) {
+ cm_id_info = qp_attr->cm_id->cm_id_info;
+ cm_id_info->conn_event_info.cm_event =
+ XRNIC_DREQ_RCVD;
+ cm_id_info->conn_event_info.status = 1;
+ cm_id_info->conn_event_info.private_data_len =
+ 0;
+ cm_id_info->conn_event_info.private_data =
+ NULL;
+ qp_attr->cm_id->xrnic_cm_handler
+ (qp_attr->cm_id,
+ &cm_id_info->conn_event_info);
+ qp_attr->cm_id = NULL;
+ } else {
+ pr_err("Received CM ID is NULL\n");
+ }
+ }
+ in_err_wr_ptr++;
+ }
+}
+
+/**
+ * xrnic_qp1_hw_configuration() - This function configures the QP1 registers
+ * @return: 0 if successfully configures QP1
+ */
+int xrnic_qp1_hw_configuration(void)
+{
+ struct xrnic_memory_map *xrnic_mmap = (struct xrnic_memory_map *)
+ &xrnic_dev->xrnic_mmap;
+ struct xrnic_qp_attr *qp1_attr = (struct xrnic_qp_attr *)
+ &xrnic_dev->qp1_attr;
+ struct rdma_qp1_attr *rdma_qp1_attr;
+ u32 config_value = 0;
+
+ qp1_attr->qp_num = 1;
+ rdma_qp1_attr = &xrnic_dev->xrnic_mmap.xrnic_regs->rdma_qp1_attr;
+ config_value = XRNIC_QP_CONFIG_QP_ENABLE | xrnic_dev->pmtu |
+ XRNIC_QP1_CONFIG_RQ_BUFF_SZ |
+ XRNIC_QP_CONFIG_RQ_INTR_EN |
+ XRNIC_QP_CONFIG_HW_HNDSHK_DIS;
+
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->qp_conf)));
+
+ config_value = (xrnic_mmap->rq_buf_ba_ca_phys +
+ ((qp1_attr->qp_num - 1) * XRNIC_RECV_PKT_SIZE *
+ XRNIC_RQ_DEPTH)) & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->rq_buf_ba_ca)));
+
+ qp1_attr->rq_buf_ba_ca = xrnic_mmap->rq_buf_ba_ca +
+ ((qp1_attr->qp_num - 1) *
+ XRNIC_RECV_PKT_SIZE *
+ XRNIC_RQ_DEPTH);
+
+ qp1_attr->rq_buf_ba_ca_phys = config_value;
+
+ config_value = xrnic_mmap->sq_ba_phys + ((qp1_attr->qp_num - 1) *
+ XRNIC_SEND_PKT_SIZE * XRNIC_SQ_DEPTH);
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->sq_ba)));
+
+ qp1_attr->sq_ba = (struct wr *)((void *)xrnic_mmap->sq_ba +
+ ((qp1_attr->qp_num - 1) *
+ XRNIC_SEND_PKT_SIZE *
+ XRNIC_SQ_DEPTH));
+ qp1_attr->sq_ba_phys = config_value;
+
+ qp1_attr->send_sgl_phys = xrnic_mmap->send_sgl_phys +
+ (XRNIC_SEND_SGL_SIZE *
+ XRNIC_SQ_DEPTH *
+ (qp1_attr->qp_num - 1));
+ qp1_attr->send_sgl = xrnic_mmap->send_sgl +
+ (XRNIC_SEND_SGL_SIZE *
+ XRNIC_SQ_DEPTH *
+ (qp1_attr->qp_num - 1));
+
+ xrnic_fill_wr(qp1_attr, XRNIC_SQ_DEPTH);
+
+ config_value = xrnic_mmap->cq_ba_phys + ((qp1_attr->qp_num - 1) *
+ XRNIC_SQ_DEPTH * sizeof(struct cqe));
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->cq_ba)));
+
+ qp1_attr->cq_ba = (struct cqe *)(xrnic_mmap->cq_ba +
+ ((qp1_attr->qp_num - 1) *
+ XRNIC_SQ_DEPTH *
+ sizeof(struct cqe)));
+ config_value = (xrnic_mmap->rq_wrptr_db_add_phys +
+ (4 * (qp1_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value,
+ ((void *)(&rdma_qp1_attr->rq_wrptr_db_add)));
+
+ config_value = (xrnic_mmap->sq_cmpl_db_add_phys +
+ (4 * (qp1_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value,
+ ((void *)(&rdma_qp1_attr->sq_cmpl_db_add)));
+
+ config_value = XRNIC_SQ_DEPTH | (XRNIC_RQ_DEPTH << 16);
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->q_depth)));
+
+ config_value = (xrnic_mmap->stat_rq_buf_ca_phys +
+ (4 * (qp1_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value,
+ ((void *)(&rdma_qp1_attr->stat_rq_buf_ca)));
+
+ config_value = XRNIC_QP_TIMEOUT_CONFIG_TIMEOUT |
+ XRNIC_QP_TIMEOUT_CONFIG_RETRY_CNT |
+ XRNIC_QP_TIMEOUT_CONFIG_RNR_RETRY_CNT |
+ XRNIC_QP_TIMEOUT_CONFIG_RNR_NAK_TVAL;
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->timeout_conf)));
+ qp1_attr->qp1_attr = (struct xrnic_qp_attr *)&xrnic_dev->qp1_attr;
+ qp1_attr->rq_wrptr_db_local = 0;
+ qp1_attr->sq_cmpl_db_local = 0;
+ qp1_attr->rq_ci_db_local = 0;
+ qp1_attr->sq_pi_db_local = 0;
+
+ qp1_attr->resend_count = 0;
+ qp1_attr->local_cm_id = htonl(qp1_attr->qp_num);
+ qp1_attr->remote_cm_id = 0;
+
+ qp1_attr->curr_state = XRNIC_LISTEN;
+
+ qp1_attr->sqhd = 0;
+ qp1_attr->qp_type = XRNIC_QPT_UC;
+ qp1_attr->ip_addr_type = 0;
+
+ qp1_attr->xrnic_mmap = &xrnic_dev->xrnic_mmap;
+
+ spin_lock_init(&qp1_attr->qp_lock);
+ return 0;
+}
+
+/**
+ * xrnic_display_qp_reg() - This function displays qp register info
+ * @qp_num: QP num for which register dump is required
+ */
+void xrnic_display_qp_reg(int qp_num)
+{
+ int i;
+ struct xrnic_memory_map *xrnic_mmap = &xrnic_dev->xrnic_mmap;
+ struct rdma_qp_attr *rdma_qp_attr =
+ &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num - 2];
+
+ for (i = 0; i < 45; i++)
+ pr_info("0x%X: 0x%08X\n",
+ (0x84020000 + (0x100 * (qp_num + 1)) + (i * 4)),
+ ioread32((void __iomem *)rdma_qp_attr + (i * 4)));
+}
+
+/**
+ * xrnic_qp_timer() - This function configures QP timer
+ * @data: QP attribute info
+ */
+void xrnic_qp_timer(struct timer_list *data)
+{
+ struct xrnic_qp_attr *qp_attr = (struct xrnic_qp_attr *)data;
+ struct xrnic_qp_attr *qp1_attr = qp_attr->qp1_attr;
+ enum xrnic_rej_reason reason;
+ enum xrnic_msg_rej msg;
+ unsigned long flag;
+ int qp1_send_pkt_size;
+
+ spin_lock_irqsave(&qp1_attr->qp_lock, flag);
+ if (qp_attr->ip_addr_type == AF_INET)
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv4);
+ else
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv6);
+ if (qp_attr->curr_state == XRNIC_REJ_SENT) {
+ DEBUG_LOG("REJ SENT\n");
+ if (qp_attr->resend_count < XRNIC_REJ_RESEND_COUNT) {
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ qp_attr->resend_count = qp_attr->resend_count + 1;
+ qp_attr->curr_state = XRNIC_REJ_SENT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ } else {
+ qp_attr->resend_count = 0;
+ qp_attr->remote_cm_id = 0;
+ xrnic_reset_io_qp(qp_attr);
+ memset((void *)&qp_attr->mac_addr, 0,
+ XRNIC_ETH_ALEN);
+ qp_attr->ip_addr_type = 0;
+ xrnic_qp_app_configuration(qp_attr->qp_num,
+ XRNIC_HW_QP_DISABLE);
+ qp_attr->curr_state = XRNIC_LISTEN;
+ }
+ } else if (qp_attr->curr_state == XRNIC_REP_SENT) {
+ DEBUG_LOG("REP SENT\n");
+ if (qp_attr->resend_count < XRNIC_REJ_RESEND_COUNT) {
+ qp_attr->curr_state = XRNIC_RTU_TIMEOUT;
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ qp_attr->resend_count = qp_attr->resend_count + 1;
+ qp_attr->curr_state = XRNIC_REP_SENT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ } else {
+ reason = XRNIC_REJ_TIMEOUT;
+ msg = XRNIC_REJ_REP;
+ xrnic_cm_prepare_rej(qp_attr, msg, reason);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_TIMEWAIT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ }
+ } else if (qp_attr->curr_state == XRNIC_MRA_RCVD) {
+ DEBUG_LOG("MRA Received\n");
+ qp_attr->curr_state = XRNIC_RTU_TIMEOUT;
+
+ reason = XRNIC_REJ_TIMEOUT;
+ msg = XRNIC_REJ_TIMEOUT;
+ xrnic_cm_prepare_rej(qp_attr, msg, reason);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_TIMEWAIT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ } else if (qp_attr->curr_state == XRNIC_DREQ_SENT) {
+ DEBUG_LOG("Disconnect Req Sent\n");
+ if (qp_attr->resend_count < XRNIC_DREQ_RESEND_COUNT) {
+ qp_attr->curr_state = XRNIC_DREP_TIMEOUT;
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ qp_attr->resend_count = qp_attr->resend_count + 1;
+ qp_attr->curr_state = XRNIC_DREQ_SENT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ } else {
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_TIMEWAIT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ }
+ } else if (qp_attr->curr_state == XRNIC_TIMEWAIT) {
+ DEBUG_LOG("In time wait state\n");
+ qp_attr->resend_count = 0;
+ qp_attr->remote_cm_id = 0;
+#ifdef DISPLAY_REGS_ON_DISCONNECT
+ xrnic_display_qp_reg(qp_attr->qp_num);
+#endif
+ xrnic_reset_io_qp(qp_attr);
+ memset((void *)&qp_attr->mac_addr, 0, XRNIC_ETH_ALEN);
+ qp_attr->ip_addr_type = 0;
+ xrnic_qp_app_configuration(qp_attr->qp_num,
+ XRNIC_HW_QP_DISABLE);
+ qp_attr->curr_state = XRNIC_LISTEN;
+ } else {
+ qp_attr->resend_count = 0;
+ qp_attr->qp_timer.expires = 0;
+ }
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_qp_app_configuration() - This function programs the QP registers
+ * @qp_num: QP num to configure
+ * @hw_qp_status: value to indicae HW QP or not
+ */
+void xrnic_qp_app_configuration(int qp_num,
+ enum xrnic_hw_qp_status hw_qp_status)
+{
+ struct xrnic_memory_map *xrnic_mmap = &xrnic_dev->xrnic_mmap;
+ struct xrnic_qp_attr *qp_attr = &xrnic_dev->qp_attr[qp_num - 2];
+ struct rdma_qp_attr *rdma_qp_attr =
+ &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num - 2];
+ u32 config_value = 0;
+ int recv_pkt_size = qp_attr->recv_pkt_size;
+
+ /* Host number will directly map to local cm id.*/
+ if (hw_qp_status == XRNIC_HW_QP_ENABLE) {
+ config_value = XRNIC_QP_CONFIG_QP_ENABLE |
+ XRNIC_QP_CONFIG_RQ_INTR_EN |
+ XRNIC_QP_CONFIG_CQE_INTR_EN | xrnic_dev->pmtu |
+ XRNIC_QP_CONFIG_RQ_BUFF_SZ(recv_pkt_size) |
+ XRNIC_QP_CONFIG_HW_HNDSHK_DIS |
+ XRNIC_QP_CONFIG_CQE_WRITE_EN;
+ } else if (hw_qp_status == XRNIC_HW_QP_DISABLE) {
+ config_value = XRNIC_QP_CONFIG_RQ_INTR_EN |
+ XRNIC_QP_CONFIG_CQE_INTR_EN | xrnic_dev->pmtu |
+ XRNIC_QP_CONFIG_RQ_BUFF_SZ(recv_pkt_size) |
+ XRNIC_QP_CONFIG_HW_HNDSHK_DIS |
+ XRNIC_QP_CONFIG_CQE_WRITE_EN;
+ config_value = 0;
+ } else {
+ DEBUG_LOG("Invalid HW QP status\n");
+ }
+ if (qp_attr->ip_addr_type == AF_INET6)
+ config_value = config_value | XRNIC_QP_CONFIG_IPV6_EN;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+
+ config_value = qp_attr->rq_buf_ba_ca_phys;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_buf_ba_ca)));
+
+ config_value = qp_attr->sq_ba_phys;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_ba)));
+
+ config_value = qp_attr->cq_ba_phys;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->cq_ba)));
+
+ config_value = qp_attr->sq_depth | (qp_attr->rq_depth << 16);
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->q_depth)));
+
+ config_value = (qp_attr->starting_psn |
+ (IB_OPCODE_RC_SEND_ONLY << 24));
+ iowrite32(config_value, (void *)&rdma_qp_attr->last_rq_req);
+
+ config_value = be32_to_cpu(qp_attr->ipv4_addr);
+ iowrite32(config_value, (void *)&rdma_qp_attr->ip_dest_addr1);
+ config_value = ((qp_attr->mac_addr[2] << 24) |
+ (qp_attr->mac_addr[3] << 16) |
+ (qp_attr->mac_addr[4] << 8) |
+ qp_attr->mac_addr[5]);
+ iowrite32(config_value, (void *)&rdma_qp_attr->mac_dest_addr_lsb);
+
+ config_value = ((qp_attr->mac_addr[0] << 8) | qp_attr->mac_addr[1]);
+ iowrite32(config_value, (void *)&rdma_qp_attr->mac_dest_addr_msb);
+
+ config_value = qp_attr->remote_qp;
+ iowrite32(config_value, (void *)&rdma_qp_attr->dest_qp_conf);
+
+ iowrite32(qp_attr->rem_starting_psn, (void *)&rdma_qp_attr->sq_psn);
+#ifdef ERNIC_MEM_REGISTER
+ if (qp_attr->pd)
+ iowrite32(qp_attr->pd, ((void *)(&rdma_qp_attr->pd)));
+#endif
+}
+
+/**
+ * xrnic_qp_hw_configuration() - This function configures QP registers
+ * @qp_num: QP num
+ */
+void xrnic_qp_hw_configuration(int qp_num)
+{
+ struct xrnic_memory_map *xrnic_mmap = &xrnic_dev->xrnic_mmap;
+ struct xrnic_qp_attr *qp_attr = &xrnic_dev->qp_attr[qp_num];
+ struct rdma_qp_attr *rdma_qp_attr =
+ &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+ u32 config_value = 0;
+
+ /* As qp_num start from 0 and data QP start from 2 */
+ qp_attr->qp_num = qp_num + 2;
+
+ config_value = XRNIC_QP_ADV_CONFIG_TRAFFIC_CLASS |
+ XRNIC_QP_ADV_CONFIG_TIME_TO_LIVE |
+ XRNIC_QP_ADV_CONFIG_PARTITION_KEY;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_adv_conf)));
+
+ /*DDR address for RQ and SQ doorbell.*/
+
+ config_value = xrnic_mmap->rq_wrptr_db_add_phys +
+ (4 * (qp_attr->qp_num - 1));
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_wrptr_db_add)));
+
+ config_value = (xrnic_mmap->sq_cmpl_db_add_phys +
+ (4 * (qp_attr->qp_num - 1)))
+ & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_cmpl_db_add)));
+
+ config_value = (xrnic_mmap->stat_rq_buf_ca_phys +
+ (4 * (qp_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->stat_rq_buf_ca)));
+
+ config_value = XRNIC_QP_TIMEOUT_CONFIG_TIMEOUT |
+ XRNIC_QP_TIMEOUT_CONFIG_RETRY_CNT |
+ XRNIC_QP_TIMEOUT_CONFIG_RNR_RETRY_CNT |
+ XRNIC_QP_TIMEOUT_CONFIG_RNR_NAK_TVAL;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->timeout_conf)));
+ qp_attr->qp1_attr = (struct xrnic_qp_attr *)&xrnic_dev->qp1_attr;
+ qp_attr->rq_wrptr_db_local = 0;
+ qp_attr->sq_cmpl_db_local = 0;
+ qp_attr->rq_ci_db_local = 0;
+ qp_attr->sq_pi_db_local = 0;
+ qp_attr->cm_id = NULL;
+ qp_attr->resend_count = 0;
+ qp_attr->local_cm_id = qp_attr->qp_num;
+ qp_attr->remote_cm_id = 0;
+ memset((void *)&qp_attr->mac_addr, 0, XRNIC_ETH_ALEN);
+ qp_attr->ip_addr_type = 0;
+ qp_attr->sqhd = 0;
+ qp_attr->qp_type = XRNIC_QPT_RC;
+ qp_attr->ip_addr_type = 0;
+
+ qp_attr->curr_state = XRNIC_LISTEN;
+
+ qp_attr->xrnic_mmap = &xrnic_dev->xrnic_mmap;
+
+ /* Intitialize State with XRNIC_LISTEN */
+ timer_setup(&qp_attr->qp_timer, xrnic_qp_timer,
+ (unsigned long)qp_attr);
+
+ spin_lock_init(&qp_attr->qp_lock);
+}
+
+#ifdef EXPERIMENTAL_CODE
+#define XRNIC_REG_MAP_NODE 0
+#define XRNIC_SEND_SGL_NODE 1
+#define XRNIC_CQ_BA_NODE 1
+#define XRNIC_RQ_BUF_NODE 1
+#define XRNIC_SQ_BA_NODE 1
+#define XRNIC_TX_HDR_BUF_NODE 1
+#define XRNIC_TX_SGL_BUF_NODE 1
+#define XRNIC_BYPASS_BUF_NODE 1
+#define XRNIC_ERRPKT_BUF_NODE 1
+#define XRNIC_OUTERR_STS_NODE 1
+
+#define XRNIC_RQWR_PTR_NODE 1
+#define XRNIC_SQ_CMPL_NODE 2
+#define XRNIC_STAT_XRNIC_RQ_BUF_NODE 3
+#else /* ! EXPERIMENTAL_CODE */
+#define XRNIC_REG_MAP_NODE 0
+#define XRNIC_SEND_SGL_NODE 1
+#define XRNIC_CQ_BA_NODE 2
+#define XRNIC_RQ_BUF_NODE 3
+#define XRNIC_SQ_BA_NODE 4
+#define XRNIC_TX_HDR_BUF_NODE 5
+#define XRNIC_TX_SGL_BUF_NODE 6
+#define XRNIC_BYPASS_BUF_NODE 7
+#define XRNIC_ERRPKT_BUF_NODE 8
+#define XRNIC_OUTERR_STS_NODE 9
+#define XRNIC_INERR_STS_NODE 10
+#define XRNIC_RQWR_PTR_NODE 11
+#define XRNIC_SQ_CMPL_NODE 12
+#define XRNIC_STAT_XRNIC_RQ_BUF_NODE 13
+#define XRNIC_DATA_BUF_BA_NODE 14
+#define XRNIC_RESP_ERR_PKT_BUF_BA 15
+#endif /* EXPERIMENTAL_CODE */
diff --git a/drivers/staging/xlnx_ernic/xqp.h b/drivers/staging/xlnx_ernic/xqp.h
new file mode 100644
index 000000000000..442932f66daf
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xqp.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef QP_H
+#define QP_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/interrupt.h>
+enum qp_type {
+ XRNIC_NOT_ALLOCATED = 1,
+ XRNIC_DISC_CTRL_QP = 2,
+ XRNIC_NVMEOF_CTRL_QP = 3,
+ XRNIC_NVMEOF_IO_QP = 4,
+};
+
+enum ernic_qp_status {
+ XRNIC_QP_FREE,
+ XRNIC_QP_IN_USE,
+};
+
+struct xrnic_qp_attr {
+ struct xrnic_memory_map *xrnic_mmap;
+ struct xrnic_qp_attr *qp1_attr;
+ struct xrnic_rdma_cm_id *cm_id;
+ void *send_sgl;
+ u64 send_sgl_phys;
+ void *rq_buf_ba_ca;
+ u64 rq_buf_ba_ca_phys;
+ void *sq_ba;
+ u64 sq_ba_phys;
+ void *cq_ba;
+ u64 cq_ba_phys;
+ u32 sq_depth;
+ u32 rq_depth;
+ u32 send_sge_size;
+ u32 send_pkt_size;
+ u32 recv_pkt_size;
+ u32 qp_num;
+ u32 local_cm_id;
+ u32 remote_cm_id;
+ u32 remote_qpn;
+ u32 qp_status;
+ u32 starting_psn;
+ u32 rem_starting_psn;
+ u8 send_sgl_temp[XRNIC_QP1_SEND_PKT_SIZE];
+ u32 resend_count;
+ u32 rq_wrptr_db_local;
+ u32 sq_cmpl_db_local;
+ u32 rq_ci_db_local;
+ u32 sq_pi_db_local;
+ u16 ip_addr_type; /* DESTINATION ADDR_FAMILY */
+ u32 ipv4_addr; /* DESTINATION IP addr */
+ u8 ipv6_addr[16];
+ u8 mac_addr[6];
+ u32 source_qp_num;
+ /* remote qpn used in Active CM. source_qp_num is the source
+ * queue pair in deth
+ */
+ u32 remote_qp;
+ enum xrnic_rdma_cm_event_type curr_state;
+ /* DISC or NVMECTRL Its direct mapping to host ID to
+ * particular host_no.
+ */
+ enum xrnic_qp_type qp_type;
+ u16 sqhd;
+ /*Its direct mapping to host ID to access particular host_no.*/
+ u16 nvmeof_cntlid;
+ u32 nvmeof_qp_id;
+ struct timer_list qp_timer;
+ struct tasklet_struct qp_task;
+ /* kernel locking primitive */
+ spinlock_t qp_lock;
+ char irq_name[32];
+ u32 irq_vect;
+ u32 pd;
+};
+
+enum xrnic_hw_qp_status {
+ XRNIC_HW_QP_ENABLE,
+ XRNIC_HW_QP_DISABLE,
+};
+
+void xrnic_display_qp_reg(int qp_num);
+void xrnic_qp_fatal_handler(unsigned long data);
+void xrnic_qp_timer(struct timer_list *data);
+void xrnic_qp_pkt_recv_intr_handler(unsigned long data);
+void xrnic_qp_task_handler(unsigned long data);
+void xrnic_wqe_completed_intr_handler(unsigned long data);
+
+/* QP Specific function templates */
+int xrnic_qp_recv_pkt(struct xrnic_qp_attr *qp_attr, u32 rq_pkt_count);
+int xrnic_qp_send_pkt(struct xrnic_qp_attr *qp_attr, u32 sq_pkt_count);
+void xrnic_reset_io_qp_rq_ptr(struct xrnic_qp_attr *qp_attr);
+void xrnic_reset_io_qp_sq_cq_ptr(struct xrnic_qp_attr *qp_attr,
+ struct xrnic_hw_handshake_info *hw_hs_info);
+void xrnic_qp_hw_configuration(int qp_num);
+int xrnic_qp1_hw_configuration(void);
+void xrnic_qp_app_configuration(int qp_num,
+ enum xrnic_hw_qp_status hw_qp_status);
+int xrnic_find_free_qp(void);
+int xrnic_set_qp_state(int qp_num, int state);
+
+#ifdef __cplusplus
+ }
+#endif
+#endif
diff --git a/drivers/staging/xlnx_ernic/xrocev2.h b/drivers/staging/xlnx_ernic/xrocev2.h
new file mode 100644
index 000000000000..fec90081d094
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xrocev2.h
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef _XRNIC_ROCEV2_H
+#define _XRNIC_ROCEV2_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/types.h>
+#include <linux/udp.h>
+#include <rdma/ib_pack.h>
+
+#define XRNIC_REQ_QPN 0x1
+#define XRNIC_RESPONDER_RESOURCES 0x10
+#define XRNIC_INITIATOR_DEPTH 0x10
+#define XRNIC_REQ_LOCAL_CM_RESP_TOUT 0x11
+#define XRNIC_REQ_REMOTE_CM_RESP_TOUT 0x14
+#define XRNIC_REQ_PATH_PKT_PAYLOAD_MTU 92
+#define XRNIC_REQ_RETRY_COUNT 0x7
+#define XRNIC_REQ_RDC_EXISTS 1
+#define XRNIC_REQ_SRQ 0
+
+#define XRNIC_REJ_INFO_LEN 0
+
+#define XRNIC_MRA_SERVICE_TIMEOUT 0x11
+
+#define XRNIC_REP_END_END_FLOW_CONTROL 0x0
+#define XRNIC_REP_FAIL_OVER_ACCEPTED 0x3
+#define XRNIC_REP_TARGET_ACK_DELAY 0x1F
+#define XRNIC_REP_RNR_RETRY_COUNT 0x7
+
+#define XRNIC_CM_TIMEOUT 0x4
+#define XRNIC_CM_TIMER_TIMEOUT 0x11
+
+enum xrnic_wc_opcod {
+ XRNIC_RDMA_WRITE = 0x0,
+ XRNIC_SEND_ONLY = 0x2,
+ XRNIC_RDMA_READ = 0x4
+};
+
+enum xrnic_msg_rej {
+ XRNIC_REJ_REQ = 0x0,
+ XRNIC_REJ_REP = 0x1,
+ XRNIC_REJ_OTHERS = 0x2,
+};
+
+enum xrnic_msg_mra {
+ XRNIC_MRA_REQ = 0x0,
+ XRNIC_MRA_REP = 0x1,
+ XRNIC_MRA_LAP = 0x2,
+};
+
+enum xrnic_rej_reason {
+ XRNIC_REJ_NO_QP_AVAILABLE = 1,
+ XRNIC_REJ_NO_EE_AVAILABLE = 2,
+ XRNIC_REJ_NO_RESOURCE_AVAILABLE = 3,
+ XRNIC_REJ_TIMEOUT = 4,
+ XRNIC_REJ_UNSUPPORTED_REQ = 5,
+ XRNIC_REJ_INVALID_CM_ID = 6,
+ XRNIC_REJ_INVALID_QPN = 7,
+ XRNIC_REJ_RDC_NOT_EXIST = 11,
+ XRNIC_REJ_PRIM_LID_PORT_NOT_EXIST = 13,
+ XRNIC_REJ_INVALID_MTU = 26,
+ XRNIC_REJ_INSUFFICIENT_RESP_RESOURCE = 27,
+ XRNIC_REJ_CONSUMER_REJECT = 28,
+ XRNIC_REJ_DUPLICATE_LOCAL_CM_ID = 30,
+ XRNIC_REJ_UNSUPPORTED_CLASS_VERSION = 31,
+};
+
+//mad common status field
+struct mad_comm_status {
+ __u8 busy:1;
+ __u8 redir_reqd:1;
+ __u8 invalid_field_code:3;
+ __u8 reserved:3;
+ __u8 class_specific;
+} __packed;
+
+#define XRNIC_MAD_BASE_VER 1
+#define XRNIC_MAD_MGMT_CLASS 0x07
+#define XRNIC_MAD_RESP_BIT 0x0
+#define XRNIC_MAD_COMM_SEND 0x3
+#define XRNIC_MAD_RESERVED 0x0
+
+/* Management data gram (MAD's) */
+struct mad //Size 256Byte
+{
+ __u8 base_ver;
+ __u8 mgmt_class;
+ __u8 class_version;
+ __u8 resp_bit_method;
+ struct mad_comm_status status;// 2 bytes
+ __be16 class_specific;
+ __be64 transaction_id;
+ __be16 attribute_id;
+ __be16 reserved;
+ __be32 attrb_modifier;
+ __be32 data[58];
+} __packed;
+
+struct req {
+ __u32 local_cm_id;
+ __u32 reserved1;
+ __u8 service_id[8];
+ __u8 local_ca_guid[8];
+ __u32 reserved2;
+ __u32 local_q_key;
+ __u32 local_qpn:24;
+ __u8 responder_resources:8;
+ __u32 local_eecn:24;
+ __u32 initiator_depth:8;
+ __u32 remote_eecn:24;
+
+ __u32 remote_cm_resp_tout:5;
+ __u32 transport_svc_type:2;
+ __u32 e2e_flow_control:1;
+ __u8 start_psn[3];
+ __u8 local_cm_resp_tout:5;
+ __u8 retry_count: 3;
+ __u16 p_key;
+ __u8 path_packet_payload_mtu:4;
+ __u8 rdc_exists:1;
+ __u8 rnr_retry_count:3;
+ __u8 max_cm_retries:4;
+ __u8 srq:1;
+ __u8 reserved3:3;
+ __u16 primary_local_port_lid;
+ __u16 primary_remote_port_lid;
+ __u64 primary_local_port_gid[2];
+ __u64 primary_remote_port_gid[2];
+ __u32 primary_flow_label:20;
+ __u32 reserved4:6;
+ __u32 primary_packet_rate:6;
+ __u32 primary_traffic_class:8;
+ __u32 primary_hop_limit:8;
+ __u32 primary_sl:4;
+ __u32 primary_subnet_local:1;
+ __u32 reserved5:3;
+ __u32 primary_local_ack_tout:5;
+ __u32 reserved6:3;
+ __u32 alternate_local_port_lid:16;
+ __u32 alternate_remote_port_lid:16;
+ __u64 alternate_local_port_gid[2];
+ __u64 alternate_remote_port_gid[2];
+ __u32 alternate_flow_labe:20;
+ __u32 reserved7:6;
+ __u32 alternate_packet_rate:6;
+ __u32 alternate_traffic_class:8;
+ __u32 alternate_hop_limit:8;
+ __u32 alternate_sl:4;
+ __u32 alternate_subnet_local:1;
+ __u32 reserved8:3;
+ __u32 alternate_local_ack_timeout: 5;
+ __u32 reserved9:3;
+ __u8 private_data[92];
+} __packed;
+
+/* MRA Message contents */
+/* Message Receipt Acknoldgement */
+struct mra {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u8 message_mraed:2;
+ __u8 reserved1:6;
+ __u8 service_timeout:5;
+ __u8 reserved2:3;
+ __u8 private_data[222];
+} __packed;
+
+/* REJ Message contents */
+struct rej {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u8 message_rejected:2;
+ __u8 reserved1:6;
+ __u8 reject_info_length:7;
+ __u8 reserved2:1;
+ __u16 reason;
+ __u8 additional_reject_info[72];
+ __u8 private_data[148];
+} __packed;
+
+/* REP Message contents */
+struct rep {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u32 local_q_key;
+ __u32 local_qpn:24;
+ __u8 reserved1:8;
+ __u32 local_ee_context:24;
+ __u32 reserved2:8;
+ __u8 start_psn[3];
+ __u8 reserved3;
+ __u8 responder_resources;
+ __u8 initiator_depth;
+ union {
+ __u8 target_fail_end;
+ __u8 target_ack_delay:5;
+ __u8 fail_over_accepted:2;
+ };
+ __u8 end_end_flow_control:1;
+ __u8 rnr_retry_count:3;
+ __u8 sqr:1;
+ __u8 reserved4:4;
+ __u8 local_ca_guid[8];
+ __u8 private_data[196];
+} __packed;
+
+/* RTU indicates that the connection is established,
+ * and that the recipient
+ * may begin transmitting
+ */
+struct rtu {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u8 private_data[224];
+} __packed;
+
+#define XRNIC_SEND_UD 0x64
+#define XRNIC_SET_SOLICT_EVENT 0x0
+#define XRNIC_RESET_SOLICT_EVENT 0x0
+#define XRNIC_MIGRATION_REQ 0x0
+#define XRNIC_PAD_COUNT 0x0
+#define XRNIC_TRANSPORT_HDR_VER 0x0
+#define XRNIC_DESTINATION_QP 0x1
+#define XRNIC_RESERVED1 0x0
+#define XRNIC_ACK_REQ 0x0
+#define XRNIC_RESERVED2 0x0
+
+struct bth {
+ __u8 opcode;
+ __u8 solicited_event:1;
+ __u8 migration_req:1;
+ __u8 pad_count:2;
+ __u8 transport_hdr_ver:4;
+ __be16 partition_key;
+ __u8 reserved1;
+ __u8 destination_qp[3];
+ __u32 ack_request:1;
+ __u32 reserved2:7;
+ __u32 pkt_seq_num:24;
+} __packed;
+
+#define XRNIC_DETH_RESERVED 0
+struct deth {
+ __be32 q_key;
+ __u8 reserved;
+ __be32 src_qp:24;
+} __packed;
+
+/* DREQ request for communication release*/
+struct dreq {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u32 remote_qpn_eecn:24;
+ __u32 reserved:8;
+ __u8 private_data[220];
+} __packed;
+
+/* DREP - reply to request for communication release */
+struct drep {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u8 private_data[228];
+} __packed;
+
+/* LAP - load alternate path */
+struct lap {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u32 reserved1;
+ __u32 remote_QPN_EECN:24;
+ __u32 remote_cm_response_timeout:5;
+ __u32 reserved2:3;
+ __u32 reserved3;
+ __u32 alt_local_port_id:16;
+ __u32 alt_remote_port_id:16;
+ __u64 alt_local_port_gid[2];
+ __u64 alt_remote_port_gid[2];
+ __u32 alt_flow_label:20;
+ __u32 reserved4:4;
+ __u32 alt_traffic_class:8;
+ __u32 alt_hope_limit:8;
+ __u32 reserved5:2;
+ __u32 alt_pkt_rate:6;
+ __u32 alt_sl:4;
+ __u32 alt_subnet_local:1;
+ __u32 reserved6:3;
+ __u32 alt_local_ack_timeout:5;
+ __u32 reserved7:3;
+ __u8 private_data[168];
+} __packed;
+
+/* APR - alternate path response */
+struct apr {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u8 additional_info_length;
+ __u8 ap_status;
+ __u8 reserved1[2];
+ __u8 additional_info[72];
+ __u8 private_data[148];
+} __packed;
+
+enum cm_establishment_states {
+ CLASS_PORT_INFO = 0x1,
+ CONNECT_REQUEST = 0x10, /* Request for connection */
+ MSG_RSP_ACK = 0x11, /* Message Response Ack */
+ CONNECT_REJECT = 0x12, /* Connect Reject */
+ CONNECT_REPLY = 0x13, /* Reply for request communication */
+ READY_TO_USE = 0x14, /* Ready to use */
+ DISCONNECT_REQUEST = 0x15, /* Receive Disconnect req */
+ DISCONNECT_REPLY = 0x16, /* Send Disconnect reply */
+ SERVICE_ID_RESOLUTION_REQ = 0x17,
+ SERVICE_ID_RESOLUTION_REQ_REPLY = 0x18,
+ LOAD_ALTERNATE_PATH = 0x19,
+ ALTERNATE_PATH_RESPONSE = 0x1a,
+};
+
+#define XRNIC_ETH_ALEN 6
+#define XRNIC_ETH_P_IP 0x0800
+#define XRNIC_ETH_P_ARP 0x0806
+#define XRNIC_ETH_HLEN 14
+#define XRNIC_ICRC_SIZE 4
+
+//Ethernet header
+struct ethhdr_t {
+ unsigned char h_dest[XRNIC_ETH_ALEN];
+ unsigned char h_source[XRNIC_ETH_ALEN];
+ __be16 eth_type; /*< packet type ID field */
+} __packed;
+
+struct ipv4hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 ihl:4,
+ version:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 version:4, /*< Internet Header Length */
+ ihl:4; /*< Version */
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u8 tos; /*< Type of service */
+ __be16 total_length; /*< Total length */
+ __be16 id; /*< Identification */
+ u16 frag_off; /*< Fragment offset */
+ __u8 time_to_live; /*< Time to live */
+ __u8 protocol; /*< Protocol */
+ __be16 hdr_chksum; /*< Header checksum */
+ __be32 src_addr; /*< Source address */
+ __be32 dest_addr; /*< Destination address */
+} __packed;
+
+struct qp_cm_pkt {
+ struct ethhdr_t eth; //14 Byte
+ union {
+ struct ipv4hdr ipv4; //20 bytes
+ struct ipv4hdr ipv6; //20 bytes
+ } ip;
+ struct udphdr udp; //8 Byte
+ struct bth bth; //12 Bytes
+ struct deth deth; //8 Byte
+ struct mad mad; //[XRNIC_MAD_HEADER + XRNIC_MAD_DATA]
+} __packed;
+
+/*
+ * RoCEv2 packet for receiver. Duplicated for ease of code readability.
+ */
+struct qp_cm_pkt_hdr_ipv4 {
+ struct ethhdr_t eth; //14 Byte
+ struct ipv4hdr ipv4;
+ struct udphdr udp; //8 Byte
+ struct bth bth;
+ struct deth deth; //8 Byte
+ struct mad mad; //[XRNIC_MAD_HEADER + XRNIC_MAD_DATA]
+} __packed;
+
+struct qp_cm_pkt_hdr_ipv6 {
+ struct ethhdr_t eth; //14 Byte
+ struct ipv6hdr ipv6;
+ struct udphdr udp; //8 Byte
+ struct bth bth;
+ struct deth deth; //8 Byte
+ struct mad mad; //[XRNIC_MAD_HEADER + XRNIC_MAD_DATA]
+} __packed;
+
+/* MAD Packet validation defines */
+#define MAD_BASIC_VER 1
+#define OPCODE_SEND_UD 0x64
+
+#define MAD_SUBNET_CLASS 0x1
+#define MAD_DIRECT_SUBNET_CLASS 0x81
+
+#define MAD_SEND_CM_MSG 0x03
+#define MAD_VERF_FAILED -1
+#define MAD_VERF_SUCCESS 0
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _XRNIC_ROCEV2_H*/
diff --git a/drivers/staging/xlnx_tsmux/Kconfig b/drivers/staging/xlnx_tsmux/Kconfig
new file mode 100644
index 000000000000..0c1d9498e35b
--- /dev/null
+++ b/drivers/staging/xlnx_tsmux/Kconfig
@@ -0,0 +1,11 @@
+config XLNX_TSMUX
+ tristate "Xilinx MPEG2 Transport Stream Muxer"
+ select DMA_SHARED_BUFFER
+ help
+ This driver is developed for mpeg2 transport stream muxer,
+ designed to allow passage of multimedia streams from the source
+ kernel sub-system, prepares mpeg2 transport stream and forward
+ to the sink kernel subsystem.
+
+ To compile this driver as a module, choose M here.
+ If unsure, choose N.
diff --git a/drivers/staging/xlnx_tsmux/MAINTAINERS b/drivers/staging/xlnx_tsmux/MAINTAINERS
new file mode 100644
index 000000000000..cfab4fa55698
--- /dev/null
+++ b/drivers/staging/xlnx_tsmux/MAINTAINERS
@@ -0,0 +1,4 @@
+XILINX MPG2TSMUX DRIVER
+M: Venkateshwar Rao <venkateshwar.rao.gannavarapu@xilinx.com>
+S: Maintained
+F: drivers/staging/xlnx_tsmux
diff --git a/drivers/staging/xlnx_tsmux/Makefile b/drivers/staging/xlnx_tsmux/Makefile
new file mode 100644
index 000000000000..4437068337e7
--- /dev/null
+++ b/drivers/staging/xlnx_tsmux/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_XLNX_TSMUX) += xlnx_mpg2tsmux.o
diff --git a/drivers/staging/xlnx_tsmux/dt-binding.txt b/drivers/staging/xlnx_tsmux/dt-binding.txt
new file mode 100644
index 000000000000..e4a7095d92e1
--- /dev/null
+++ b/drivers/staging/xlnx_tsmux/dt-binding.txt
@@ -0,0 +1,28 @@
+The Xilinx mpegtsmux IP reads the elementary streams from memory and
+writes the MPEG2 TS(transport stream) to memory.
+
+The mpeg2 ts muxer follows the dma descriptor based approach. Each DMA
+descriptor contains information about each of the elementary stream
+buffer properties and buffer address. It reads the descriptors one after
+the other and generates the TS packets with the information in the
+descriptor. The IP writes the generated TS packets at the output buffer
+address.
+
+Required properties:
+
+- compatible: must be "xlnx,tsmux-1.0"
+- interrupts: interrupt number
+- interrupts-parent: phandle for interrupt controller
+- reg: base address and size of the IP core
+- clock-names: must contain "ap_clk"
+- clocks: phandle to AXI Lite
+
+Example:
+ ts2mux: ts2mux@0xa0200000 {
+ compatible = "xlnx,tsmux-1.0";
+ interrupt-parent = <&gic>;
+ interrupts = <0 90 4>;
+ reg = <0x0 0xa0200000 0x0 0x30000>;
+ clock-names = "ap_clk";
+ clocks = <&misc_clk_0>;
+ };
diff --git a/drivers/staging/xlnx_tsmux/xlnx_mpg2tsmux.c b/drivers/staging/xlnx_tsmux/xlnx_mpg2tsmux.c
new file mode 100644
index 000000000000..84f4b501570a
--- /dev/null
+++ b/drivers/staging/xlnx_tsmux/xlnx_mpg2tsmux.c
@@ -0,0 +1,1510 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx TS mux driver
+ *
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Venkateshwar Rao G <venkateshwar.rao.gannavarapu@xilinx.com>
+ */
+#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmapool.h>
+#include <linux/dma-buf.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <uapi/linux/xlnx_mpg2tsmux_interface.h>
+
+#define DRIVER_NAME "mpegtsmux-1.0"
+#define DRIVER_CLASS "mpg2mux_ts_cls"
+#define DRIVER_MAX_DEV (10)
+
+/* Register offsets and bit masks */
+#define XTSMUX_RST_CTRL 0x00
+#define XTSMUX_GLBL_IER 0x04
+#define XTSMUX_IER_STAT 0x08
+#define XTSMUX_ISR_STAT 0x0c
+#define XTSMUX_ERR_STAT 0x10
+#define XTSMUX_LAST_NODE_PROCESSED 0x14
+#define XTSMUX_MUXCONTEXT_ADDR 0x20
+#define XTSMUX_STREAMCONTEXT_ADDR 0x30
+#define XTSMUX_NUM_STREAM_IDTBL 0x48
+#define XTSMUX_NUM_DESC 0x70
+#define XTSMUX_STREAM_IDTBL_ADDR 0x78
+#define XTSMUX_CONTEXT_DATA_SIZE 64
+
+#define XTSMUX_RST_CTRL_START_MASK BIT(0)
+#define XTSMUX_GLBL_IER_ENABLE_MASK BIT(0)
+#define XTSMUX_IER_ENABLE_MASK BIT(0)
+
+/* Number of input/output streams supported */
+#define XTSMUX_MAXIN_STRM 112
+#define XTSMUX_MAXIN_PLSTRM 16
+#define XTSMUX_MAXIN_TLSTRM (XTSMUX_MAXIN_STRM + XTSMUX_MAXIN_PLSTRM)
+#define XTSMUX_MAXOUT_STRM 112
+#define XTSMUX_MAXOUT_PLSTRM 16
+#define XTSMUX_MAXOUT_TLSTRM (XTSMUX_MAXOUT_STRM + XTSMUX_MAXOUT_PLSTRM)
+#define XTSMUX_POOL_SIZE 128
+/* Initial version is tested with 256 align only */
+#define XTSMUX_POOL_ALIGN 256
+#define XTSMUX_STRMBL_FREE 0
+#define XTSMUX_STRMBL_BUSY 1
+
+/**
+ * struct stream_context - struct to enqueue a stream context descriptor
+ * @command: stream context type
+ * @is_pcr_stream: flag for pcr(programmable clock recovery) stream
+ * @stream_id: stream identification number
+ * @extended_stream_id: extended stream id
+ * @reserved1: reserved for hardware alignment
+ * @pid: packet id number
+ * @dmabuf_id: 0 for buf allocated by driver, nonzero for external buf
+ * @size_data_in: size in bytes of input buffer
+ * @pts: presentation time stamp
+ * @dts: display time stamp
+ * @in_buf_pointer: physical address of src buf address
+ * @reserved2: reserved for hardware alignment
+ * @insert_pcr: inserting pcr in stream context
+ * @reserved3: reserved for hardware alignment
+ * @pcr_extension: pcr extension number
+ * @pcr_base: pcr base number
+ */
+struct stream_context {
+ enum ts_mux_command command;
+ u8 is_pcr_stream;
+ u8 stream_id;
+ u8 extended_stream_id;
+ u8 reserved1;
+ u16 pid;
+ u16 dmabuf_id;
+ u32 size_data_in;
+ u64 pts;
+ u64 dts;
+ u64 in_buf_pointer;
+ u32 reserved2;
+ u8 insert_pcr;
+ u8 reserved3;
+ u16 pcr_extension;
+ u64 pcr_base;
+};
+
+/**
+ * enum node_status_info - status of stream context
+ * @NOT_FILLED: node not filled
+ * @UPDATED_BY_DRIVER: updated by driver
+ * @READ_BY_IP: read by IP
+ * @USED_BY_IP: used by IP
+ * @NODE_INVALID: invalid node
+ */
+enum node_status_info {
+ NOT_FILLED = 0,
+ UPDATED_BY_DRIVER,
+ READ_BY_IP,
+ USED_BY_IP,
+ NODE_INVALID
+};
+
+/**
+ * enum stream_errors - stream context error type
+ * @NO_ERROR: no error
+ * @PARTIAL_FRAME_WRITTEN: partial frame written
+ * @DESCRIPTOR_NOT_READABLE: descriptor not readable
+ */
+enum stream_errors {
+ NO_ERROR = 0,
+ PARTIAL_FRAME_WRITTEN,
+ DESCRIPTOR_NOT_READABLE
+};
+
+/**
+ * struct strm_node - struct to describe stream node in linked list
+ * @node_number: node number to handle streams
+ * @node_status: status of stream node
+ * @element: stream context info
+ * @error_code: error codes
+ * @reserved1: reserved bits for hardware align
+ * @tail_pointer: physical address of next stream node in linked list
+ * @strm_phy_addr: physical address of stream context
+ * @node: struct of linked list head
+ * @reserved2: reserved for hardware align
+ */
+struct stream_context_node {
+ u32 node_number;
+ enum node_status_info node_status;
+ struct stream_context element;
+ enum stream_errors error_code;
+ u32 reserved1;
+ u64 tail_pointer;
+ u64 strm_phy_addr;
+ struct list_head node;
+ u64 reserved2;
+};
+
+/**
+ * struct strm_info - struct to describe streamid node in streamid table
+ * @pid: identification number of stream
+ * @continuity_counter: counter to maintain packet count for a stream
+ * @usageflag: flag to know free or under use for allocating streamid node
+ * @strmtbl_update: struct to know enqueue or dequeue streamid in table
+ */
+struct stream_info {
+ u16 pid;
+ u8 continuity_counter;
+ u8 usageflag;
+ enum strmtbl_cnxt strmtbl_update;
+};
+
+/* Enum for error handling of mux context */
+enum mux_op_errs {
+ MUXER_NO_ERROR = 0,
+ ERROR_OUTPUT_BUFFER_IS_NOT_ACCESIBLE,
+ ERROR_PARTIAL_PACKET_WRITTEN
+};
+
+/**
+ * struct muxer_context - struct to describe mux node in linked list
+ * @node_status: status of mux node
+ * @reserved: reserved for hardware align
+ * @dst_buf_start_addr: physical address of dst buf
+ * @dst_buf_size: size of the output buffer
+ * @dst_buf_written: size of data written in dst buf
+ * @num_of_pkts_written: number of packets in dst buf
+ * @error_code: error status of mux node updated by IP
+ * @mux_phy_addr: physical address of muxer
+ * @node: struct of linked list head
+ */
+struct muxer_context {
+ enum node_status_info node_status;
+ u32 reserved;
+ u64 dst_buf_start_addr;
+ u32 dst_buf_size;
+ u32 dst_buf_written;
+ u32 num_of_pkts_written;
+ enum mux_op_errs error_code;
+ u64 mux_phy_addr;
+ struct list_head node;
+};
+
+/**
+ * struct xlnx_tsmux_dmabufintl - dma buf internal info
+ * @dbuf: reference to a buffer's dmabuf struct
+ * @attach: attachment to the buffer's dmabuf
+ * @sgt: scatterlist info for the buffer's dmabuf
+ * @dmabuf_addr: buffer physical address
+ * @dmabuf_fd: dma buffer fd
+ * @buf_id: dma buffer reference id
+ */
+struct xlnx_tsmux_dmabufintl {
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ dma_addr_t dmabuf_addr;
+ s32 dmabuf_fd;
+ u16 buf_id;
+};
+
+/**
+ * struct xlnx_tsmux - xilinx mpeg2 TS muxer device
+ * @dev: pointer to struct device instance used by the driver
+ * @iomem: base address of the HW/IP
+ * @chdev: char device handle
+ * @user_count: count of users who have opened the device
+ * @lock: spinlock to protect driver data structures
+ * @waitq: wait queue used by the driver
+ * @irq: irq number
+ * @id: device instance ID
+ * @num_inbuf: number of input buffers allocated uisng DMA
+ * @num_outbuf: number of output buffers allocated uisng DMA
+ * @srcbuf_size: size of each source buffer
+ * @dstbuf_size: size of each destination buffer
+ * @strm_node: list containing descriptors of stream context
+ * @mux_node: list containing descriptors of mux context
+ * @stcxt_node_cnt: stream number used for maintaing list
+ * @num_strmnodes: number of stream nodes in the streamid table
+ * @intn_stream_count: internal count of streams added to stream context
+ * @outbuf_idx: index number to maintain output buffers
+ * @srcbuf_addrs: physical address of source buffer
+ * @dstbuf_addrs: physical address of destination buffer
+ * @src_kaddrs: kernel VA for source buffer allocated by the driver
+ * @dst_kaddrs: kernel VA for destination buffer allocated by the driver
+ * @strm_ctx_pool: dma pool to allocate stream context buffers
+ * @mux_ctx_pool: dma pool to allocate mux context buffers
+ * @strmtbl_addrs: physical address of streamid table
+ * @strmtbl_kaddrs: kernel VA for streamid table
+ * @intn_strmtbl_addrs: physical address of streamid table for internal
+ * @intn_strmtbl_kaddrs: kernel VA for streamid table for internal
+ * @ap_clk: interface clock
+ * @src_dmabufintl: array of src DMA buf allocated by user
+ * @dst_dmabufintl: array of src DMA buf allocated by user
+ * @outbuf_written: size in bytes written in output buffer
+ * @stream_count: stream count
+ */
+struct xlnx_tsmux {
+ struct device *dev;
+ void __iomem *iomem;
+ struct cdev chdev;
+ atomic_t user_count;
+ /* lock is used to protect access to sync_err and wdg_err */
+ spinlock_t lock;
+ wait_queue_head_t waitq;
+ s32 irq;
+ s32 id;
+ u32 num_inbuf;
+ u32 num_outbuf;
+ size_t srcbuf_size;
+ size_t dstbuf_size;
+ struct list_head strm_node;
+ struct list_head mux_node;
+ u32 stcxt_node_cnt;
+ u32 num_strmnodes;
+ atomic_t intn_stream_count;
+ atomic_t outbuf_idx;
+ dma_addr_t srcbuf_addrs[XTSMUX_MAXIN_TLSTRM];
+ dma_addr_t dstbuf_addrs[XTSMUX_MAXOUT_TLSTRM];
+ void *src_kaddrs[XTSMUX_MAXIN_TLSTRM];
+ void *dst_kaddrs[XTSMUX_MAXOUT_TLSTRM];
+ struct dma_pool *strm_ctx_pool;
+ struct dma_pool *mux_ctx_pool;
+ dma_addr_t strmtbl_addrs;
+ void *strmtbl_kaddrs;
+ dma_addr_t intn_strmtbl_addrs;
+ void *intn_strmtbl_kaddrs;
+ struct clk *ap_clk;
+ struct xlnx_tsmux_dmabufintl src_dmabufintl[XTSMUX_MAXIN_STRM];
+ struct xlnx_tsmux_dmabufintl dst_dmabufintl[XTSMUX_MAXOUT_STRM];
+ s32 outbuf_written;
+ atomic_t stream_count;
+};
+
+static inline u32 xlnx_tsmux_read(const struct xlnx_tsmux *mpgmuxts,
+ const u32 reg)
+{
+ return ioread32(mpgmuxts->iomem + reg);
+}
+
+static inline void xlnx_tsmux_write(const struct xlnx_tsmux *mpgmuxts,
+ const u32 reg, const u32 val)
+{
+ iowrite32(val, (void __iomem *)(mpgmuxts->iomem + reg));
+}
+
+/* TODO: Optimize using iowrite64 call */
+static inline void xlnx_tsmux_write64(const struct xlnx_tsmux *mpgmuxts,
+ const u32 reg, const u64 val)
+{
+ iowrite32(lower_32_bits(val), (void __iomem *)(mpgmuxts->iomem + reg));
+ iowrite32(upper_32_bits(val), (void __iomem *)(mpgmuxts->iomem +
+ reg + 4));
+}
+
+static int xlnx_tsmux_start_muxer(struct xlnx_tsmux *mpgmuxts)
+{
+ struct stream_context_node *new_strm_node;
+ struct muxer_context *new_mux_node;
+
+ new_mux_node = list_first_entry_or_null(&mpgmuxts->mux_node,
+ struct muxer_context, node);
+ if (!new_mux_node)
+ return -ENXIO;
+
+ xlnx_tsmux_write64(mpgmuxts, XTSMUX_MUXCONTEXT_ADDR,
+ new_mux_node->mux_phy_addr);
+
+ new_strm_node = list_first_entry_or_null(&mpgmuxts->strm_node,
+ struct stream_context_node,
+ node);
+ if (!new_strm_node)
+ return -ENXIO;
+
+ xlnx_tsmux_write64(mpgmuxts, XTSMUX_STREAMCONTEXT_ADDR,
+ new_strm_node->strm_phy_addr);
+
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_NUM_DESC,
+ atomic_read(&mpgmuxts->intn_stream_count));
+
+ xlnx_tsmux_write64(mpgmuxts, XTSMUX_STREAM_IDTBL_ADDR,
+ (u64)mpgmuxts->intn_strmtbl_addrs);
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_NUM_STREAM_IDTBL, 1);
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_GLBL_IER,
+ XTSMUX_GLBL_IER_ENABLE_MASK);
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_IER_STAT,
+ XTSMUX_IER_ENABLE_MASK);
+
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_RST_CTRL,
+ XTSMUX_RST_CTRL_START_MASK);
+
+ return 0;
+}
+
+static void xlnx_tsmux_stop_muxer(const struct xlnx_tsmux *mpgmuxts)
+{
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_GLBL_IER, 0);
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_IER_STAT, 0);
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_RST_CTRL, 0);
+}
+
+static enum xlnx_tsmux_status xlnx_tsmux_get_status(const struct
+ xlnx_tsmux * mpgmuxts)
+{
+ u32 status;
+
+ status = xlnx_tsmux_read(mpgmuxts, XTSMUX_RST_CTRL);
+
+ if (!status)
+ return MPG2MUX_ERROR;
+
+ if (status & XTSMUX_RST_CTRL_START_MASK)
+ return MPG2MUX_BUSY;
+
+ return MPG2MUX_READY;
+}
+
+static struct class *xlnx_tsmux_class;
+static dev_t xlnx_tsmux_devt;
+static atomic_t xlnx_tsmux_ndevs = ATOMIC_INIT(0);
+
+static int xlnx_tsmux_open(struct inode *pin, struct file *fptr)
+{
+ struct xlnx_tsmux *mpgtsmux;
+
+ mpgtsmux = container_of(pin->i_cdev, struct xlnx_tsmux, chdev);
+
+ fptr->private_data = mpgtsmux;
+ atomic_inc(&mpgtsmux->user_count);
+ atomic_set(&mpgtsmux->outbuf_idx, 0);
+ mpgtsmux->stcxt_node_cnt = 0;
+
+ return 0;
+}
+
+static int xlnx_tsmux_release(struct inode *pin, struct file *fptr)
+{
+ struct xlnx_tsmux *mpgtsmux = (struct xlnx_tsmux *)fptr->private_data;
+
+ if (!mpgtsmux)
+ return -EIO;
+
+ return 0;
+}
+
+/* TODO: Optimize buf alloc, dealloc API's to accommodate src, dst, strmtbl */
+static int xlnx_tsmux_ioctl_srcbuf_dealloc(struct xlnx_tsmux *mpgmuxts)
+{
+ unsigned int i;
+
+ for (i = 0; i < mpgmuxts->num_inbuf; i++) {
+ if (!mpgmuxts->src_kaddrs[i] || !mpgmuxts->srcbuf_addrs[i])
+ break;
+ dma_free_coherent(mpgmuxts->dev, mpgmuxts->srcbuf_size,
+ mpgmuxts->src_kaddrs[i],
+ mpgmuxts->srcbuf_addrs[i]);
+ mpgmuxts->src_kaddrs[i] = NULL;
+ }
+
+ return 0;
+}
+
+static int xlnx_tsmux_ioctl_srcbuf_alloc(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret;
+ unsigned int i;
+ struct strc_bufs_info buf_data;
+
+ ret = copy_from_user(&buf_data, arg, sizeof(struct strc_bufs_info));
+ if (ret < 0) {
+ dev_dbg(mpgmuxts->dev, "Failed to read input buffer info\n");
+ return ret;
+ }
+
+ if (buf_data.num_buf > XTSMUX_MAXIN_PLSTRM) {
+ dev_dbg(mpgmuxts->dev, "Excessive input payload. supported %d",
+ XTSMUX_MAXIN_PLSTRM);
+ return -EINVAL;
+ }
+
+ mpgmuxts->num_inbuf = buf_data.num_buf;
+ mpgmuxts->srcbuf_size = buf_data.buf_size;
+ /* buf_size & num_buf boundary conditions are handled in application
+ * and initial version of driver tested with 32-bit addressing only
+ */
+ for (i = 0; i < mpgmuxts->num_inbuf; i++) {
+ mpgmuxts->src_kaddrs[i] =
+ dma_alloc_coherent(mpgmuxts->dev,
+ mpgmuxts->srcbuf_size,
+ &mpgmuxts->srcbuf_addrs[i],
+ GFP_KERNEL | GFP_DMA32);
+ if (!mpgmuxts->src_kaddrs[i]) {
+ dev_dbg(mpgmuxts->dev, "dma alloc fail %d buffer", i);
+ goto exit_free;
+ }
+ }
+
+ return 0;
+
+exit_free:
+ xlnx_tsmux_ioctl_srcbuf_dealloc(mpgmuxts);
+
+ return -ENOMEM;
+}
+
+static int xlnx_tsmux_ioctl_dstbuf_dealloc(struct xlnx_tsmux *mpgmuxts)
+{
+ unsigned int i;
+
+ for (i = 0; i < mpgmuxts->num_outbuf; i++) {
+ if (!mpgmuxts->dst_kaddrs[i] || !mpgmuxts->dstbuf_addrs[i])
+ break;
+ dma_free_coherent(mpgmuxts->dev, mpgmuxts->dstbuf_size,
+ mpgmuxts->dst_kaddrs[i],
+ mpgmuxts->dstbuf_addrs[i]);
+ mpgmuxts->dst_kaddrs[i] = NULL;
+ }
+
+ return 0;
+}
+
+static int xlnx_tsmux_ioctl_dstbuf_alloc(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret;
+ unsigned int i;
+ struct strc_bufs_info buf_data;
+
+ ret = copy_from_user(&buf_data, arg, sizeof(struct strc_bufs_info));
+ if (ret < 0) {
+ dev_dbg(mpgmuxts->dev, "%s: Failed to read output buffer info",
+ __func__);
+ return ret;
+ }
+
+ if (buf_data.num_buf > XTSMUX_MAXOUT_PLSTRM) {
+ dev_dbg(mpgmuxts->dev, "Excessive output payload supported %d",
+ XTSMUX_MAXOUT_PLSTRM);
+ return -EINVAL;
+ }
+
+ mpgmuxts->num_outbuf = buf_data.num_buf;
+ mpgmuxts->dstbuf_size = buf_data.buf_size;
+ /* buf_size & num_buf boundary conditions are handled in application*/
+ for (i = 0; i < mpgmuxts->num_outbuf; i++) {
+ mpgmuxts->dst_kaddrs[i] =
+ dma_alloc_coherent(mpgmuxts->dev,
+ mpgmuxts->dstbuf_size,
+ &mpgmuxts->dstbuf_addrs[i],
+ GFP_KERNEL | GFP_DMA32);
+ if (!mpgmuxts->dst_kaddrs[i]) {
+ dev_dbg(mpgmuxts->dev, "dmamem alloc fail for %d", i);
+ goto exit_free;
+ }
+ }
+
+ return 0;
+
+exit_free:
+ xlnx_tsmux_ioctl_dstbuf_dealloc(mpgmuxts);
+
+ return -ENOMEM;
+}
+
+static int xlnx_tsmux_ioctl_strmtbl_dealloc(struct xlnx_tsmux *mpgmuxts)
+{
+ u32 buf_size;
+
+ buf_size = sizeof(struct stream_info) * mpgmuxts->num_strmnodes;
+ if (!mpgmuxts->strmtbl_kaddrs || !mpgmuxts->strmtbl_addrs)
+ return 0;
+
+ dma_free_coherent(mpgmuxts->dev, buf_size, mpgmuxts->strmtbl_kaddrs,
+ mpgmuxts->strmtbl_addrs);
+ mpgmuxts->strmtbl_kaddrs = NULL;
+
+ if (!mpgmuxts->intn_strmtbl_kaddrs || !mpgmuxts->intn_strmtbl_addrs)
+ return 0;
+ dma_free_coherent(mpgmuxts->dev, buf_size,
+ mpgmuxts->intn_strmtbl_kaddrs,
+ mpgmuxts->intn_strmtbl_addrs);
+ mpgmuxts->intn_strmtbl_kaddrs = NULL;
+
+ return 0;
+}
+
+static int xlnx_tsmux_ioctl_strmtbl_alloc(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret, buf_size;
+ u16 num_nodes;
+
+ ret = copy_from_user(&num_nodes, arg, sizeof(u16));
+ if (ret < 0) {
+ dev_dbg(mpgmuxts->dev, "Failed to read streamid table info");
+ return ret;
+ }
+ mpgmuxts->num_strmnodes = num_nodes;
+ buf_size = sizeof(struct stream_info) * mpgmuxts->num_strmnodes;
+
+ mpgmuxts->strmtbl_kaddrs =
+ dma_alloc_coherent(mpgmuxts->dev,
+ buf_size, &mpgmuxts->strmtbl_addrs,
+ GFP_KERNEL | GFP_DMA32);
+ if (!mpgmuxts->strmtbl_kaddrs) {
+ dev_dbg(mpgmuxts->dev, "dmamem alloc fail for strm table");
+ return -ENOMEM;
+ }
+
+ /* Allocating memory for internal streamid table */
+ mpgmuxts->intn_strmtbl_kaddrs =
+ dma_alloc_coherent(mpgmuxts->dev,
+ buf_size, &mpgmuxts->intn_strmtbl_addrs,
+ GFP_KERNEL | GFP_DMA32);
+
+ if (!mpgmuxts->intn_strmtbl_kaddrs) {
+ dev_dbg(mpgmuxts->dev, "dmamem alloc fail for intr strm table");
+ goto exist_free;
+ }
+
+ return 0;
+exist_free:
+ xlnx_tsmux_ioctl_strmtbl_dealloc(mpgmuxts);
+
+ return -ENOMEM;
+}
+
+static int xlnx_tsmux_update_strminfo_table(struct xlnx_tsmux *mpgmuxts,
+ struct strc_strminfo new_strm_info)
+{
+ u32 i = 0;
+ struct stream_info *cptr;
+
+ cptr = (struct stream_info *)mpgmuxts->strmtbl_kaddrs;
+
+ if (new_strm_info.strmtbl_ctxt == ADD_TO_TBL) {
+ /* Finding free memory block and writing input data into the block*/
+ for (i = 0; i < mpgmuxts->num_strmnodes; i++, cptr++) {
+ if (!cptr->usageflag) {
+ cptr->pid = new_strm_info.pid;
+ cptr->continuity_counter = 0;
+ cptr->usageflag = XTSMUX_STRMBL_BUSY;
+ break;
+ }
+ }
+ } else if (new_strm_info.strmtbl_ctxt == DEL_FR_TBL) {
+ for (i = 0; i < mpgmuxts->num_strmnodes; i++, cptr++) {
+ if (cptr->pid == new_strm_info.pid) {
+ cptr->usageflag = XTSMUX_STRMBL_FREE;
+ break;
+ }
+ }
+ }
+
+ if (i == mpgmuxts->num_strmnodes)
+ return -EIO;
+
+ return 0;
+}
+
+static int xlnx_tsmux_ioctl_update_strmtbl(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret;
+ struct strc_strminfo new_strm_info;
+
+ ret = copy_from_user(&new_strm_info, arg, sizeof(struct strc_strminfo));
+ if (ret < 0) {
+ dev_dbg(mpgmuxts->dev, "Reading strmInfo failed");
+ return ret;
+ }
+
+ return xlnx_tsmux_update_strminfo_table(mpgmuxts, new_strm_info);
+}
+
+static int xlnx_tsmux_enqueue_stream_context(struct xlnx_tsmux *mpgmuxts,
+ struct
+ stream_context_in * stream_data)
+{
+ struct stream_context_node *new_strm_node, *prev_strm_node;
+ void *kaddr_strm_node;
+ dma_addr_t strm_phy_addr;
+ unsigned long flags;
+ u32 i;
+
+ kaddr_strm_node = dma_pool_alloc(mpgmuxts->strm_ctx_pool,
+ GFP_KERNEL | GFP_DMA32,
+ &strm_phy_addr);
+
+ new_strm_node = (struct stream_context_node *)kaddr_strm_node;
+ if (!new_strm_node)
+ return -ENOMEM;
+
+ /* update the stream context node */
+ wmb();
+ new_strm_node->element.command = stream_data->command;
+ new_strm_node->element.is_pcr_stream = stream_data->is_pcr_stream;
+ new_strm_node->element.stream_id = stream_data->stream_id;
+ new_strm_node->element.extended_stream_id =
+ stream_data->extended_stream_id;
+ new_strm_node->element.pid = stream_data->pid;
+ new_strm_node->element.size_data_in = stream_data->size_data_in;
+ new_strm_node->element.pts = stream_data->pts;
+ new_strm_node->element.dts = stream_data->dts;
+ new_strm_node->element.insert_pcr = stream_data->insert_pcr;
+ new_strm_node->element.pcr_base = stream_data->pcr_base;
+ new_strm_node->element.pcr_extension = stream_data->pcr_extension;
+
+ /* Check for external dma buffer */
+ if (!stream_data->is_dmabuf) {
+ new_strm_node->element.in_buf_pointer =
+ mpgmuxts->srcbuf_addrs[stream_data->srcbuf_id];
+ new_strm_node->element.dmabuf_id = 0;
+ } else {
+ for (i = 0; i < XTSMUX_MAXIN_STRM; i++) {
+ /* Serching dma buf info based on srcbuf_id */
+ if (stream_data->srcbuf_id ==
+ mpgmuxts->src_dmabufintl[i].dmabuf_fd) {
+ new_strm_node->element.in_buf_pointer =
+ mpgmuxts->src_dmabufintl[i].dmabuf_addr;
+ new_strm_node->element.dmabuf_id =
+ mpgmuxts->src_dmabufintl[i].buf_id;
+ break;
+ }
+ }
+
+ /* No dma buf found with srcbuf_id*/
+ if (i == XTSMUX_MAXIN_STRM) {
+ dev_err(mpgmuxts->dev, "No DMA buffer with %d",
+ stream_data->srcbuf_id);
+ return -ENOMEM;
+ }
+ }
+
+ new_strm_node->strm_phy_addr = (u64)strm_phy_addr;
+ new_strm_node->node_number = mpgmuxts->stcxt_node_cnt + 1;
+ mpgmuxts->stcxt_node_cnt++;
+ new_strm_node->node_status = UPDATED_BY_DRIVER;
+ new_strm_node->error_code = NO_ERROR;
+ new_strm_node->tail_pointer = 0;
+
+ spin_lock_irqsave(&mpgmuxts->lock, flags);
+ /* If it is not first stream in stream node linked list find
+ * physical address of current node and add to last node in list
+ */
+ if (!list_empty_careful(&mpgmuxts->strm_node)) {
+ prev_strm_node = list_last_entry(&mpgmuxts->strm_node,
+ struct stream_context_node,
+ node);
+ prev_strm_node->tail_pointer = new_strm_node->strm_phy_addr;
+ }
+ /* update the list and stream count */
+ wmb();
+ list_add_tail(&new_strm_node->node, &mpgmuxts->strm_node);
+ atomic_inc(&mpgmuxts->stream_count);
+ spin_unlock_irqrestore(&mpgmuxts->lock, flags);
+
+ return 0;
+}
+
+static int xlnx_tsmux_set_stream_desc(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ struct stream_context_in *stream_data;
+ int ret = 0;
+
+ stream_data = kzalloc(sizeof(*stream_data), GFP_KERNEL);
+ if (!stream_data)
+ return -ENOMEM;
+
+ ret = copy_from_user(stream_data, arg,
+ sizeof(struct stream_context_in));
+ if (ret) {
+ dev_err(mpgmuxts->dev, "Failed to copy stream data from user");
+ goto error_free;
+ }
+
+ ret = xlnx_tsmux_enqueue_stream_context(mpgmuxts, stream_data);
+
+error_free:
+ kfree(stream_data);
+
+ return ret;
+}
+
+static int xlnx_tsmux_ioctl_set_stream_context(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret;
+
+ ret = xlnx_tsmux_set_stream_desc(mpgmuxts, arg);
+ if (ret < 0) {
+ dev_err(mpgmuxts->dev, "Setting stream descripter failed");
+ return ret;
+ }
+
+ return 0;
+}
+
+static enum xlnx_tsmux_status xlnx_tsmux_get_device_status(struct xlnx_tsmux *
+ mpgmuxts)
+{
+ enum xlnx_tsmux_status ip_status;
+
+ ip_status = xlnx_tsmux_get_status(mpgmuxts);
+
+ if (ip_status == MPG2MUX_ERROR) {
+ dev_err(mpgmuxts->dev, "Failed to get device status");
+ return -EACCES;
+ }
+
+ if (ip_status == MPG2MUX_BUSY)
+ return -EBUSY;
+
+ return MPG2MUX_READY;
+}
+
+static int xlnx_tsmux_ioctl_start(struct xlnx_tsmux *mpgmuxts)
+{
+ enum xlnx_tsmux_status ip_stat;
+ int cnt;
+
+ /* get IP status */
+ ip_stat = xlnx_tsmux_get_device_status(mpgmuxts);
+ if (ip_stat != MPG2MUX_READY) {
+ dev_err(mpgmuxts->dev, "device is busy");
+ return ip_stat;
+ }
+
+ if (list_empty(&mpgmuxts->mux_node) ||
+ list_empty(&mpgmuxts->strm_node)) {
+ dev_err(mpgmuxts->dev, "No stream or mux to start device");
+ return -EIO;
+ }
+
+ cnt = atomic_read(&mpgmuxts->stream_count);
+ atomic_set(&mpgmuxts->intn_stream_count, cnt);
+
+ return xlnx_tsmux_start_muxer(mpgmuxts);
+}
+
+static void xlnx_tsmux_free_dmalloc(struct xlnx_tsmux *mpgmuxts)
+{
+ dma_pool_destroy(mpgmuxts->strm_ctx_pool);
+ dma_pool_destroy(mpgmuxts->mux_ctx_pool);
+}
+
+static int xlnx_tsmux_ioctl_stop(struct xlnx_tsmux *mpgmuxts)
+{
+ enum xlnx_tsmux_status ip_stat;
+ unsigned long flags;
+
+ ip_stat = xlnx_tsmux_get_device_status(mpgmuxts);
+ if (ip_stat != MPG2MUX_READY) {
+ dev_err(mpgmuxts->dev, "device is busy");
+ return ip_stat;
+ }
+
+ /* Free all driver allocated memory and reset linked list
+ * Reset IP registers
+ */
+ xlnx_tsmux_free_dmalloc(mpgmuxts);
+ spin_lock_irqsave(&mpgmuxts->lock, flags);
+ INIT_LIST_HEAD(&mpgmuxts->strm_node);
+ INIT_LIST_HEAD(&mpgmuxts->mux_node);
+ spin_unlock_irqrestore(&mpgmuxts->lock, flags);
+ xlnx_tsmux_stop_muxer(mpgmuxts);
+
+ return 0;
+}
+
+static int xlnx_tsmux_ioctl_get_status(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret;
+ enum xlnx_tsmux_status ip_stat;
+
+ ip_stat = xlnx_tsmux_get_device_status(mpgmuxts);
+
+ ret = copy_to_user(arg, (void *)&ip_stat,
+ (unsigned long)(sizeof(enum xlnx_tsmux_status)));
+ if (ret) {
+ dev_err(mpgmuxts->dev, "Unable to copy device status to user");
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+static int xlnx_tsmux_ioctl_get_outbufinfo(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret;
+ int out_index;
+ struct out_buffer out_info;
+
+ out_info.buf_write = mpgmuxts->outbuf_written;
+ mpgmuxts->outbuf_written = 0;
+ out_index = atomic_read(&mpgmuxts->outbuf_idx);
+ if (out_index)
+ out_info.buf_id = 0;
+ else
+ out_info.buf_id = 1;
+
+ ret = copy_to_user(arg, (void *)&out_info,
+ (unsigned long)(sizeof(struct out_buffer)));
+ if (ret) {
+ dev_err(mpgmuxts->dev, "Unable to copy outbuf info");
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+static int xlnx_tsmux_enqueue_mux_context(struct xlnx_tsmux *mpgmuxts,
+ struct muxer_context_in *mux_data)
+{
+ struct muxer_context *new_mux_node;
+ u32 out_index;
+ void *kaddr_mux_node;
+ dma_addr_t mux_phy_addr;
+ unsigned long flags;
+ s32 i;
+
+ kaddr_mux_node = dma_pool_alloc(mpgmuxts->mux_ctx_pool,
+ GFP_KERNEL | GFP_DMA32,
+ &mux_phy_addr);
+
+ new_mux_node = (struct muxer_context *)kaddr_mux_node;
+ if (!new_mux_node)
+ return -EAGAIN;
+
+ new_mux_node->node_status = UPDATED_BY_DRIVER;
+ new_mux_node->mux_phy_addr = (u64)mux_phy_addr;
+
+ /* Check for external dma buffer */
+ if (!mux_data->is_dmabuf) {
+ out_index = 0;
+ new_mux_node->dst_buf_start_addr =
+ (u64)mpgmuxts->dstbuf_addrs[out_index];
+ new_mux_node->dst_buf_size = mpgmuxts->dstbuf_size;
+ if (out_index)
+ atomic_set(&mpgmuxts->outbuf_idx, 0);
+ else
+ atomic_set(&mpgmuxts->outbuf_idx, 1);
+ } else {
+ for (i = 0; i < XTSMUX_MAXOUT_STRM; i++) {
+ if (mux_data->dstbuf_id ==
+ mpgmuxts->dst_dmabufintl[i].dmabuf_fd) {
+ new_mux_node->dst_buf_start_addr =
+ mpgmuxts->dst_dmabufintl[i].dmabuf_addr;
+ break;
+ }
+ }
+ if (i == XTSMUX_MAXOUT_STRM) {
+ dev_err(mpgmuxts->dev, "No DMA buffer with %d",
+ mux_data->dstbuf_id);
+ return -ENOMEM;
+ }
+ new_mux_node->dst_buf_size = mux_data->dmabuf_size;
+ }
+ new_mux_node->error_code = MUXER_NO_ERROR;
+
+ spin_lock_irqsave(&mpgmuxts->lock, flags);
+ list_add_tail(&new_mux_node->node, &mpgmuxts->mux_node);
+ spin_unlock_irqrestore(&mpgmuxts->lock, flags);
+
+ return 0;
+}
+
+static int xlnx_tsmux_set_mux_desc(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ struct muxer_context_in *mux_data;
+ int ret = 0;
+
+ mux_data = kzalloc(sizeof(*mux_data), GFP_KERNEL);
+ if (!mux_data)
+ return -ENOMEM;
+
+ ret = copy_from_user(mux_data, arg,
+ sizeof(struct muxer_context_in));
+ if (ret) {
+ dev_err(mpgmuxts->dev, "failed to copy muxer data from user");
+ goto kmem_free;
+ }
+
+ return xlnx_tsmux_enqueue_mux_context(mpgmuxts, mux_data);
+
+kmem_free:
+ kfree(mux_data);
+
+ return ret;
+}
+
+static int xlnx_tsmux_ioctl_set_mux_context(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ int ret;
+
+ ret = xlnx_tsmux_set_mux_desc(mpgmuxts, arg);
+ if (ret < 0)
+ dev_dbg(mpgmuxts->dev, "Setting mux context failed");
+
+ return ret;
+}
+
+static int xlnx_tsmux_ioctl_verify_dmabuf(struct xlnx_tsmux *mpgmuxts,
+ void __user *arg)
+{
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ struct xlnx_tsmux_dmabuf_info *dbuf_info;
+ s32 i;
+ int ret = 0;
+
+ dbuf_info = kzalloc(sizeof(*dbuf_info), GFP_KERNEL);
+ if (!dbuf_info)
+ return -ENOMEM;
+
+ ret = copy_from_user(dbuf_info, arg,
+ sizeof(struct xlnx_tsmux_dmabuf_info));
+ if (ret) {
+ dev_err(mpgmuxts->dev, "Failed to copy from user");
+ goto dmak_free;
+ }
+ if (dbuf_info->dir != DMA_TO_MPG2MUX &&
+ dbuf_info->dir != DMA_FROM_MPG2MUX) {
+ dev_err(mpgmuxts->dev, "Incorrect DMABUF direction %d",
+ dbuf_info->dir);
+ ret = -EINVAL;
+ goto dmak_free;
+ }
+ dbuf = dma_buf_get(dbuf_info->buf_fd);
+ if (IS_ERR(dbuf)) {
+ dev_err(mpgmuxts->dev, "dma_buf_get fail fd %d direction %d",
+ dbuf_info->buf_fd, dbuf_info->dir);
+ ret = PTR_ERR(dbuf);
+ goto dmak_free;
+ }
+ attach = dma_buf_attach(dbuf, mpgmuxts->dev);
+ if (IS_ERR(attach)) {
+ dev_err(mpgmuxts->dev, "dma_buf_attach fail fd %d dir %d",
+ dbuf_info->buf_fd, dbuf_info->dir);
+ ret = PTR_ERR(attach);
+ goto err_dmabuf_put;
+ }
+ sgt = dma_buf_map_attachment(attach,
+ (enum dma_data_direction)(dbuf_info->dir));
+ if (IS_ERR(sgt)) {
+ dev_err(mpgmuxts->dev, "dma_buf_map_attach fail fd %d dir %d",
+ dbuf_info->buf_fd, dbuf_info->dir);
+ ret = PTR_ERR(sgt);
+ goto err_dmabuf_detach;
+ }
+
+ if (sgt->nents > 1) {
+ ret = -EIO;
+ dev_dbg(mpgmuxts->dev, "Not contig nents %d fd %d direction %d",
+ sgt->nents, dbuf_info->buf_fd, dbuf_info->dir);
+ goto err_dmabuf_unmap_attachment;
+ }
+ dev_dbg(mpgmuxts->dev, "dmabuf %s is physically contiguous",
+ (dbuf_info->dir ==
+ DMA_TO_MPG2MUX ? "Source" : "Destination"));
+
+ if (dbuf_info->dir == DMA_TO_MPG2MUX) {
+ for (i = 0; i < XTSMUX_MAXIN_STRM; i++) {
+ if (!mpgmuxts->src_dmabufintl[i].buf_id) {
+ mpgmuxts->src_dmabufintl[i].dbuf = dbuf;
+ mpgmuxts->src_dmabufintl[i].attach = attach;
+ mpgmuxts->src_dmabufintl[i].sgt = sgt;
+ mpgmuxts->src_dmabufintl[i].dmabuf_addr =
+ sg_dma_address(sgt->sgl);
+ mpgmuxts->src_dmabufintl[i].dmabuf_fd =
+ dbuf_info->buf_fd;
+ mpgmuxts->src_dmabufintl[i].buf_id = i + 1;
+ dev_dbg(mpgmuxts->dev,
+ "%s: phy-addr=0x%llx for src dmabuf=%d",
+ __func__,
+ mpgmuxts->src_dmabufintl[i].dmabuf_addr,
+ mpgmuxts->src_dmabufintl[i].dmabuf_fd);
+ break;
+ }
+ }
+ /* External src streams more than XTSMUX_MAXIN_STRM
+ * can not be handled
+ */
+ if (i == XTSMUX_MAXIN_STRM) {
+ ret = -EIO;
+ dev_dbg(mpgmuxts->dev, "src DMA bufs more than %d",
+ XTSMUX_MAXIN_STRM);
+ goto err_dmabuf_unmap_attachment;
+ }
+ } else {
+ for (i = 0; i < XTSMUX_MAXOUT_STRM; i++) {
+ if (!mpgmuxts->dst_dmabufintl[i].buf_id) {
+ mpgmuxts->dst_dmabufintl[i].dbuf = dbuf;
+ mpgmuxts->dst_dmabufintl[i].attach = attach;
+ mpgmuxts->dst_dmabufintl[i].sgt = sgt;
+ mpgmuxts->dst_dmabufintl[i].dmabuf_addr =
+ sg_dma_address(sgt->sgl);
+ mpgmuxts->dst_dmabufintl[i].dmabuf_fd =
+ dbuf_info->buf_fd;
+ mpgmuxts->dst_dmabufintl[i].buf_id = i + 1;
+ dev_dbg(mpgmuxts->dev,
+ "phy-addr=0x%llx for src dmabuf=%d",
+ mpgmuxts->dst_dmabufintl[i].dmabuf_addr,
+ mpgmuxts->dst_dmabufintl[i].dmabuf_fd);
+ break;
+ }
+ }
+ /* External dst streams more than XTSMUX_MAXOUT_STRM
+ * can not be handled
+ */
+ if (i == XTSMUX_MAXOUT_STRM) {
+ ret = -EIO;
+ dev_dbg(mpgmuxts->dev, "dst DMA bufs more than %d",
+ XTSMUX_MAXOUT_STRM);
+ goto err_dmabuf_unmap_attachment;
+ }
+ }
+
+ return 0;
+
+err_dmabuf_unmap_attachment:
+ dma_buf_unmap_attachment(attach, sgt,
+ (enum dma_data_direction)dbuf_info->dir);
+err_dmabuf_detach:
+ dma_buf_detach(dbuf, attach);
+err_dmabuf_put:
+ dma_buf_put(dbuf);
+dmak_free:
+ kfree(dbuf_info);
+
+ return ret;
+}
+
+static long xlnx_tsmux_ioctl(struct file *fptr,
+ unsigned int cmd, unsigned long data)
+{
+ struct xlnx_tsmux *mpgmuxts;
+ void __user *arg;
+ int ret;
+
+ mpgmuxts = fptr->private_data;
+ if (!mpgmuxts)
+ return -EINVAL;
+
+ arg = (void __user *)data;
+ switch (cmd) {
+ case MPG2MUX_INBUFALLOC:
+ ret = xlnx_tsmux_ioctl_srcbuf_alloc(mpgmuxts, arg);
+ break;
+ case MPG2MUX_INBUFDEALLOC:
+ ret = xlnx_tsmux_ioctl_srcbuf_dealloc(mpgmuxts);
+ break;
+ case MPG2MUX_OUTBUFALLOC:
+ ret = xlnx_tsmux_ioctl_dstbuf_alloc(mpgmuxts, arg);
+ break;
+ case MPG2MUX_OUTBUFDEALLOC:
+ ret = xlnx_tsmux_ioctl_dstbuf_dealloc(mpgmuxts);
+ break;
+ case MPG2MUX_STBLALLOC:
+ ret = xlnx_tsmux_ioctl_strmtbl_alloc(mpgmuxts, arg);
+ break;
+ case MPG2MUX_STBLDEALLOC:
+ ret = xlnx_tsmux_ioctl_strmtbl_dealloc(mpgmuxts);
+ break;
+ case MPG2MUX_TBLUPDATE:
+ ret = xlnx_tsmux_ioctl_update_strmtbl(mpgmuxts, arg);
+ break;
+ case MPG2MUX_SETSTRM:
+ ret = xlnx_tsmux_ioctl_set_stream_context(mpgmuxts, arg);
+ break;
+ case MPG2MUX_START:
+ ret = xlnx_tsmux_ioctl_start(mpgmuxts);
+ break;
+ case MPG2MUX_STOP:
+ ret = xlnx_tsmux_ioctl_stop(mpgmuxts);
+ break;
+ case MPG2MUX_STATUS:
+ ret = xlnx_tsmux_ioctl_get_status(mpgmuxts, arg);
+ break;
+ case MPG2MUX_GETOUTBUF:
+ ret = xlnx_tsmux_ioctl_get_outbufinfo(mpgmuxts, arg);
+ break;
+ case MPG2MUX_SETMUX:
+ ret = xlnx_tsmux_ioctl_set_mux_context(mpgmuxts, arg);
+ break;
+ case MPG2MUX_VDBUF:
+ ret = xlnx_tsmux_ioctl_verify_dmabuf(mpgmuxts, arg);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret < 0)
+ dev_err(mpgmuxts->dev, "ioctl %d failed\n", cmd);
+
+ return ret;
+}
+
+static int xlnx_tsmux_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+ struct xlnx_tsmux *mpgmuxts = fp->private_data;
+ int ret, buf_id;
+
+ if (!mpgmuxts)
+ return -ENODEV;
+
+ buf_id = vma->vm_pgoff;
+
+ if (buf_id < mpgmuxts->num_inbuf) {
+ if (!mpgmuxts->srcbuf_addrs[buf_id]) {
+ dev_err(mpgmuxts->dev, "Mem not allocated for src %d",
+ buf_id);
+ return -EINVAL;
+ }
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ ret = remap_pfn_range(vma, vma->vm_start,
+ mpgmuxts->srcbuf_addrs[buf_id] >>
+ PAGE_SHIFT, vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ if (ret) {
+ dev_err(mpgmuxts->dev, "mmap fail bufid = %d", buf_id);
+ return -EINVAL;
+ }
+ } else if (buf_id < (mpgmuxts->num_inbuf + mpgmuxts->num_outbuf)) {
+ buf_id -= mpgmuxts->num_inbuf;
+ if (!mpgmuxts->dstbuf_addrs[buf_id]) {
+ dev_err(mpgmuxts->dev, "Mem not allocated fordst %d",
+ buf_id);
+ return -EINVAL;
+ }
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ ret =
+ remap_pfn_range(vma, vma->vm_start,
+ mpgmuxts->dstbuf_addrs[buf_id] >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+ if (ret) {
+ dev_err(mpgmuxts->dev, "mmap fail buf_id = %d", buf_id);
+ ret = -EINVAL;
+ }
+ } else {
+ dev_err(mpgmuxts->dev, "Wrong buffer id -> %d buf", buf_id);
+ return -EINVAL;
+ }
+ fp->private_data = mpgmuxts;
+ return 0;
+}
+
+static __poll_t xlnx_tsmux_poll(struct file *fptr, poll_table *wait)
+{
+ struct xlnx_tsmux *mpgmuxts = fptr->private_data;
+
+ poll_wait(fptr, &mpgmuxts->waitq, wait);
+
+ if (xlnx_tsmux_read(mpgmuxts, XTSMUX_LAST_NODE_PROCESSED))
+ return EPOLLIN | EPOLLPRI;
+
+ return 0;
+}
+
+static const struct file_operations mpg2mux_fops = {
+ .open = xlnx_tsmux_open,
+ .release = xlnx_tsmux_release,
+ .unlocked_ioctl = xlnx_tsmux_ioctl,
+ .mmap = xlnx_tsmux_mmap,
+ .poll = xlnx_tsmux_poll,
+};
+
+static void xlnx_tsmux_free_dmabufintl(struct xlnx_tsmux_dmabufintl
+ *intl_dmabuf, u16 dmabuf_id,
+ enum xlnx_tsmux_dma_dir dir)
+{
+ unsigned int i = dmabuf_id - 1;
+
+ if (intl_dmabuf[i].dmabuf_fd) {
+ dma_buf_unmap_attachment(intl_dmabuf[i].attach,
+ intl_dmabuf[i].sgt,
+ (enum dma_data_direction)dir);
+ dma_buf_detach(intl_dmabuf[i].dbuf, intl_dmabuf[i].attach);
+ dma_buf_put(intl_dmabuf[i].dbuf);
+ intl_dmabuf[i].dmabuf_fd = 0;
+ intl_dmabuf[i].buf_id = 0;
+ }
+}
+
+static int xlnx_tsmux_update_complete(struct xlnx_tsmux *mpgmuxts)
+{
+ struct stream_context_node *tstrm_node;
+ struct muxer_context *temp_mux;
+ u32 num_strm_node, i;
+ u32 num_strms;
+ unsigned long flags;
+
+ num_strm_node = xlnx_tsmux_read(mpgmuxts, XTSMUX_LAST_NODE_PROCESSED);
+ if (num_strm_node == 0)
+ return -1;
+
+ /* Removing completed stream nodes from the list */
+ spin_lock_irqsave(&mpgmuxts->lock, flags);
+ num_strms = atomic_read(&mpgmuxts->intn_stream_count);
+ for (i = 0; i < num_strms; i++) {
+ tstrm_node =
+ list_first_entry(&mpgmuxts->strm_node,
+ struct stream_context_node, node);
+ list_del(&tstrm_node->node);
+ atomic_dec(&mpgmuxts->stream_count);
+ if (tstrm_node->element.dmabuf_id)
+ xlnx_tsmux_free_dmabufintl
+ (mpgmuxts->src_dmabufintl,
+ tstrm_node->element.dmabuf_id,
+ DMA_TO_MPG2MUX);
+ if (tstrm_node->node_number == num_strm_node) {
+ dma_pool_free(mpgmuxts->strm_ctx_pool, tstrm_node,
+ tstrm_node->strm_phy_addr);
+ break;
+ }
+ }
+
+ /* Removing completed mux nodes from the list */
+ temp_mux = list_first_entry(&mpgmuxts->mux_node, struct muxer_context,
+ node);
+ mpgmuxts->outbuf_written = temp_mux->dst_buf_written;
+
+ list_del(&temp_mux->node);
+ spin_unlock_irqrestore(&mpgmuxts->lock, flags);
+
+ return 0;
+}
+
+static irqreturn_t xlnx_tsmux_intr_handler(int irq, void *ctx)
+{
+ u32 status;
+ struct xlnx_tsmux *mpgmuxts = (struct xlnx_tsmux *)ctx;
+
+ status = xlnx_tsmux_read(mpgmuxts, XTSMUX_ISR_STAT);
+ status &= XTSMUX_IER_ENABLE_MASK;
+
+ if (status) {
+ xlnx_tsmux_write(mpgmuxts, XTSMUX_ISR_STAT, status);
+ xlnx_tsmux_update_complete(mpgmuxts);
+ if (mpgmuxts->outbuf_written)
+ wake_up_interruptible(&mpgmuxts->waitq);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int xlnx_tsmux_probe(struct platform_device *pdev)
+{
+ struct xlnx_tsmux *mpgmuxts;
+ struct device *dev = &pdev->dev;
+ struct device *dev_crt;
+ struct resource *dev_resrc;
+ int ret = -1;
+ unsigned long flags;
+
+ /* DRIVER_MAX_DEV is to limit the number of instances, but
+ * Initial version is tested with single instance only.
+ * TODO: replace atomic_read with ida_simple_get
+ */
+ if (atomic_read(&xlnx_tsmux_ndevs) >= DRIVER_MAX_DEV) {
+ dev_err(&pdev->dev, "Limit of %d number of device is reached",
+ DRIVER_MAX_DEV);
+ return -EIO;
+ }
+
+ mpgmuxts = devm_kzalloc(&pdev->dev, sizeof(struct xlnx_tsmux),
+ GFP_KERNEL);
+ if (!mpgmuxts)
+ return -ENOMEM;
+ mpgmuxts->dev = &pdev->dev;
+ dev_resrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mpgmuxts->iomem = devm_ioremap_resource(mpgmuxts->dev, dev_resrc);
+ if (IS_ERR(mpgmuxts->iomem))
+ return PTR_ERR(mpgmuxts->iomem);
+
+ mpgmuxts->irq = irq_of_parse_and_map(mpgmuxts->dev->of_node, 0);
+ if (!mpgmuxts->irq) {
+ dev_err(mpgmuxts->dev, "Unable to get IRQ");
+ return -EINVAL;
+ }
+
+ mpgmuxts->ap_clk = devm_clk_get(dev, "ap_clk");
+ if (IS_ERR(mpgmuxts->ap_clk)) {
+ ret = PTR_ERR(mpgmuxts->ap_clk);
+ dev_err(dev, "failed to get ap clk %d\n", ret);
+ goto cdev_err;
+ }
+ ret = clk_prepare_enable(mpgmuxts->ap_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable ap clk %d\n", ret);
+ goto err_disable_ap_clk;
+ }
+
+ /* Initializing variables used in Muxer */
+ spin_lock_irqsave(&mpgmuxts->lock, flags);
+ INIT_LIST_HEAD(&mpgmuxts->strm_node);
+ INIT_LIST_HEAD(&mpgmuxts->mux_node);
+ spin_unlock_irqrestore(&mpgmuxts->lock, flags);
+ mpgmuxts->strm_ctx_pool = dma_pool_create("strcxt_pool", mpgmuxts->dev,
+ XTSMUX_POOL_SIZE,
+ XTSMUX_POOL_ALIGN,
+ XTSMUX_POOL_SIZE *
+ XTSMUX_MAXIN_TLSTRM);
+ if (!mpgmuxts->strm_ctx_pool) {
+ dev_err(mpgmuxts->dev, "Allocation fail for strm ctx pool");
+ return -ENOMEM;
+ }
+
+ mpgmuxts->mux_ctx_pool = dma_pool_create("muxcxt_pool", mpgmuxts->dev,
+ XTSMUX_POOL_SIZE,
+ XTSMUX_POOL_SIZE,
+ XTSMUX_POOL_SIZE *
+ XTSMUX_MAXIN_TLSTRM);
+
+ if (!mpgmuxts->mux_ctx_pool) {
+ dev_err(mpgmuxts->dev, "Allocation fail for mux ctx pool");
+ goto mux_err;
+ }
+
+ init_waitqueue_head(&mpgmuxts->waitq);
+
+ ret = devm_request_irq(mpgmuxts->dev, mpgmuxts->irq,
+ xlnx_tsmux_intr_handler, IRQF_SHARED,
+ DRIVER_NAME, mpgmuxts);
+
+ if (ret < 0) {
+ dev_err(mpgmuxts->dev, "Unable to register IRQ");
+ goto mux_err;
+ }
+
+ cdev_init(&mpgmuxts->chdev, &mpg2mux_fops);
+ mpgmuxts->chdev.owner = THIS_MODULE;
+ mpgmuxts->id = atomic_read(&xlnx_tsmux_ndevs);
+ ret = cdev_add(&mpgmuxts->chdev, MKDEV(MAJOR(xlnx_tsmux_devt),
+ mpgmuxts->id), 1);
+
+ if (ret < 0) {
+ dev_err(mpgmuxts->dev, "cdev_add failed");
+ goto cadd_err;
+ }
+
+ dev_crt = device_create(xlnx_tsmux_class, mpgmuxts->dev,
+ MKDEV(MAJOR(xlnx_tsmux_devt), mpgmuxts->id),
+ mpgmuxts, "mpgmuxts%d", mpgmuxts->id);
+
+ if (IS_ERR(dev_crt)) {
+ ret = PTR_ERR(dev_crt);
+ dev_err(mpgmuxts->dev, "Unable to create device");
+ goto cdev_err;
+ }
+
+ dev_info(mpgmuxts->dev,
+ "Xilinx mpeg2 TS muxer device probe completed");
+
+ atomic_inc(&xlnx_tsmux_ndevs);
+
+ return 0;
+
+err_disable_ap_clk:
+ clk_disable_unprepare(mpgmuxts->ap_clk);
+cdev_err:
+ cdev_del(&mpgmuxts->chdev);
+ device_destroy(xlnx_tsmux_class, MKDEV(MAJOR(xlnx_tsmux_devt),
+ mpgmuxts->id));
+cadd_err:
+ dma_pool_destroy(mpgmuxts->mux_ctx_pool);
+mux_err:
+ dma_pool_destroy(mpgmuxts->strm_ctx_pool);
+
+ return ret;
+}
+
+static int xlnx_tsmux_remove(struct platform_device *pdev)
+{
+ struct xlnx_tsmux *mpgmuxts;
+
+ mpgmuxts = platform_get_drvdata(pdev);
+ if (!mpgmuxts || !xlnx_tsmux_class)
+ return -EIO;
+ dma_pool_destroy(mpgmuxts->mux_ctx_pool);
+ dma_pool_destroy(mpgmuxts->strm_ctx_pool);
+
+ device_destroy(xlnx_tsmux_class, MKDEV(MAJOR(xlnx_tsmux_devt),
+ mpgmuxts->id));
+ cdev_del(&mpgmuxts->chdev);
+ atomic_dec(&xlnx_tsmux_ndevs);
+ clk_disable_unprepare(mpgmuxts->ap_clk);
+
+ return 0;
+}
+
+static const struct of_device_id xlnx_tsmux_of_match[] = {
+ { .compatible = "xlnx,tsmux-1.0", },
+ { }
+};
+
+static struct platform_driver xlnx_tsmux_driver = {
+ .probe = xlnx_tsmux_probe,
+ .remove = xlnx_tsmux_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xlnx_tsmux_of_match,
+ },
+};
+
+static int __init xlnx_tsmux_mod_init(void)
+{
+ int err;
+
+ xlnx_tsmux_class = class_create(THIS_MODULE, DRIVER_NAME);
+ if (IS_ERR(xlnx_tsmux_class)) {
+ pr_err("%s : Unable to create driver class", __func__);
+ return PTR_ERR(xlnx_tsmux_class);
+ }
+
+ err = alloc_chrdev_region(&xlnx_tsmux_devt, 0, DRIVER_MAX_DEV,
+ DRIVER_NAME);
+ if (err < 0) {
+ pr_err("%s : Unable to get major number", __func__);
+ goto err_class;
+ }
+
+ err = platform_driver_register(&xlnx_tsmux_driver);
+ if (err < 0) {
+ pr_err("%s : Unable to register %s driver", __func__,
+ DRIVER_NAME);
+ goto err_driver;
+ }
+
+ return 0;
+
+err_driver:
+ unregister_chrdev_region(xlnx_tsmux_devt, DRIVER_MAX_DEV);
+err_class:
+ class_destroy(xlnx_tsmux_class);
+
+ return err;
+}
+
+static void __exit xlnx_tsmux_mod_exit(void)
+{
+ platform_driver_unregister(&xlnx_tsmux_driver);
+ unregister_chrdev_region(xlnx_tsmux_devt, DRIVER_MAX_DEV);
+ class_destroy(xlnx_tsmux_class);
+ xlnx_tsmux_class = NULL;
+}
+
+module_init(xlnx_tsmux_mod_init);
+module_exit(xlnx_tsmux_mod_exit);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("Xilinx mpeg2 transport stream muxer IP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/xlnxsync/Kconfig b/drivers/staging/xlnxsync/Kconfig
new file mode 100644
index 000000000000..08e73384dc94
--- /dev/null
+++ b/drivers/staging/xlnxsync/Kconfig
@@ -0,0 +1,11 @@
+config XLNX_SYNC
+ tristate "Xilinx Synchronizer"
+ depends on ARCH_ZYNQMP
+ help
+ This driver is developed for Xilinx Synchronizer IP. It is used to
+ monitor the AXI addresses of the producer and initiate the
+ consumer to start earlier, thereby reducing the latency to process
+ the data.
+
+ To compile this driver as a module, choose M here.
+ If unsure, choose N
diff --git a/drivers/staging/xlnxsync/MAINTAINERS b/drivers/staging/xlnxsync/MAINTAINERS
new file mode 100644
index 000000000000..e2d720419783
--- /dev/null
+++ b/drivers/staging/xlnxsync/MAINTAINERS
@@ -0,0 +1,4 @@
+XILINX SYNCHRONIZER DRIVER
+M: Vishal Sagar <vishal.sagar@xilinx.com>
+S: Maintained
+F: drivers/staging/xlnxsync
diff --git a/drivers/staging/xlnxsync/Makefile b/drivers/staging/xlnxsync/Makefile
new file mode 100644
index 000000000000..b126a36da37c
--- /dev/null
+++ b/drivers/staging/xlnxsync/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_XLNX_SYNC) += xlnxsync.o
diff --git a/drivers/staging/xlnxsync/dt-binding.txt b/drivers/staging/xlnxsync/dt-binding.txt
new file mode 100644
index 000000000000..f1ed9d724de8
--- /dev/null
+++ b/drivers/staging/xlnxsync/dt-binding.txt
@@ -0,0 +1,34 @@
+Xilinx Synchronizer
+-------------------
+
+The Xilinx Synchronizer is used for buffer synchronization between
+producer and consumer blocks. It manages to do so by tapping onto the bus
+where the producer block is writing frame data to memory and consumer block is
+reading the frame data from memory.
+
+It can work on the encode path with max 4 channels or on decode path with
+max 2 channels.
+
+Required properties:
+- compatible : Must contain "xlnx,sync-ip-1.0"
+- reg: Physical base address and length of the registers set for the device.
+- interrupts: Contains the interrupt line number.
+- interrupt-parent: phandle to interrupt controller.
+- clock-names: The input clock names for axilite, producer and consumer clock.
+- clocks: Reference to the clock that drives the axi interface, producer and consumer.
+- xlnx,num-chan: Range from 1 to 2 for decode.
+ Range from 1 to 4 for encode.
+
+Optional properties:
+- xlnx,encode: Present if IP configured for encoding path, else absent.
+
+v_sync_vcu: subframe_sync_vcu@a00e0000 {
+ compatible = "xlnx,sync-ip-1.0";
+ reg = <0x0 0xa00e0000 0x0 0x10000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 96 4>;
+ clock-names = "s_axi_ctrl_aclk", "s_axi_mm_p_aclk", "s_axi_mm_aclk";
+ clocks = <&vid_s_axi_clk>, <&vid_stream_clk>, <&vid_stream_clk>;
+ xlnx,num-chan = <4>;
+ xlnx,encode;
+};
diff --git a/drivers/staging/xlnxsync/xlnxsync.c b/drivers/staging/xlnxsync/xlnxsync.c
new file mode 100644
index 000000000000..2de6714fe33c
--- /dev/null
+++ b/drivers/staging/xlnxsync/xlnxsync.c
@@ -0,0 +1,1290 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Synchronizer IP driver
+ *
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Author: Vishal Sagar <vishal.sagar@xilinx.com>
+ *
+ * This driver is used to control the Xilinx Synchronizer IP
+ * to achieve sub frame latency for encode and decode with VCU.
+ * This is done by monitoring the address lines for specific values.
+ */
+
+#include <linux/cdev.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/uaccess.h>
+#include <linux/xlnxsync.h>
+
+/* Register offsets and bit masks */
+#define XLNXSYNC_CTRL_REG 0x00
+#define XLNXSYNC_ISR_REG 0x04
+/* Producer Luma/Chroma Start/End Address */
+#define XLNXSYNC_PL_START_LO_REG 0x08
+#define XLNXSYNC_PL_START_HI_REG 0x0C
+#define XLNXSYNC_PC_START_LO_REG 0x20
+#define XLNXSYNC_PC_START_HI_REG 0x24
+#define XLNXSYNC_PL_END_LO_REG 0x38
+#define XLNXSYNC_PL_END_HI_REG 0x3C
+#define XLNXSYNC_PC_END_LO_REG 0x50
+#define XLNXSYNC_PC_END_HI_REG 0x54
+#define XLNXSYNC_L_MARGIN_REG 0x68
+#define XLNXSYNC_C_MARGIN_REG 0x74
+#define XLNXSYNC_IER_REG 0x80
+#define XLNXSYNC_DBG_REG 0x84
+/* Consumer Luma/Chroma Start/End Address */
+#define XLNXSYNC_CL_START_LO_REG 0x88
+#define XLNXSYNC_CL_START_HI_REG 0x8C
+#define XLNXSYNC_CC_START_LO_REG 0xA0
+#define XLNXSYNC_CC_START_HI_REG 0xA4
+#define XLNXSYNC_CL_END_LO_REG 0xB8
+#define XLNXSYNC_CL_END_HI_REG 0xBC
+#define XLNXSYNC_CC_END_LO_REG 0xD0
+#define XLNXSYNC_CC_END_HI_REG 0xD4
+
+/* Luma/Chroma Core offset registers */
+#define XLNXSYNC_LCOREOFF_REG 0x400
+#define XLNXSYNC_CCOREOFF_REG 0x410
+#define XLNXSYNC_COREOFF_NEXT 0x4
+
+#define XLNXSYNC_CTRL_ENCDEC_MASK BIT(0)
+#define XLNXSYNC_CTRL_ENABLE_MASK BIT(1)
+#define XLNXSYNC_CTRL_INTR_EN_MASK BIT(2)
+#define XLNXSYNC_CTRL_SOFTRESET BIT(3)
+
+#define XLNXSYNC_ISR_SYNC_FAIL_MASK BIT(0)
+#define XLNXSYNC_ISR_WDG_ERR_MASK BIT(1)
+/* Producer related */
+#define XLNXSYNC_ISR_PLDONE_SHIFT (2)
+#define XLNXSYNC_ISR_PLDONE_MASK GENMASK(3, 2)
+#define XLNXSYNC_ISR_PLSKIP_MASK BIT(4)
+#define XLNXSYNC_ISR_PLVALID_MASK BIT(5)
+#define XLNXSYNC_ISR_PCDONE_SHIFT (6)
+#define XLNXSYNC_ISR_PCDONE_MASK GENMASK(7, 6)
+#define XLNXSYNC_ISR_PCSKIP_MASK BIT(8)
+#define XLNXSYNC_ISR_PCVALID_MASK BIT(9)
+/* Consumer related */
+#define XLNXSYNC_ISR_CLDONE_SHIFT (10)
+#define XLNXSYNC_ISR_CLDONE_MASK GENMASK(11, 10)
+#define XLNXSYNC_ISR_CLSKIP_MASK BIT(12)
+#define XLNXSYNC_ISR_CLVALID_MASK BIT(13)
+#define XLNXSYNC_ISR_CCDONE_SHIFT (14)
+#define XLNXSYNC_ISR_CCDONE_MASK GENMASK(15, 14)
+#define XLNXSYNC_ISR_CCSKIP_MASK BIT(16)
+#define XLNXSYNC_ISR_CCVALID_MASK BIT(17)
+
+#define XLNXSYNC_ISR_LDIFF BIT(18)
+#define XLNXSYNC_ISR_CDIFF BIT(19)
+
+/* bit 44 of start address */
+#define XLNXSYNC_FB_VALID_MASK BIT(12)
+#define XLNXSYNC_FB_HI_ADDR_MASK GENMASK(11, 0)
+
+#define XLNXSYNC_IER_SYNC_FAIL_MASK BIT(0)
+#define XLNXSYNC_IER_WDG_ERR_MASK BIT(1)
+/* Producer */
+#define XLNXSYNC_IER_PLVALID_MASK BIT(5)
+#define XLNXSYNC_IER_PCVALID_MASK BIT(9)
+/* Consumer */
+#define XLNXSYNC_IER_CLVALID_MASK BIT(13)
+#define XLNXSYNC_IER_CCVALID_MASK BIT(17)
+/* Diff */
+#define XLNXSYNC_IER_LDIFF BIT(18)
+#define XLNXSYNC_IER_CDIFF BIT(19)
+
+#define XLNXSYNC_IER_ALL_MASK (XLNXSYNC_IER_SYNC_FAIL_MASK |\
+ XLNXSYNC_IER_WDG_ERR_MASK |\
+ XLNXSYNC_IER_PLVALID_MASK |\
+ XLNXSYNC_IER_PCVALID_MASK |\
+ XLNXSYNC_IER_CLVALID_MASK |\
+ XLNXSYNC_IER_CCVALID_MASK |\
+ XLNXSYNC_IER_LDIFF |\
+ XLNXSYNC_IER_CDIFF)
+
+/* Other macros */
+#define XLNXSYNC_CHAN_OFFSET 0x100
+
+#define XLNXSYNC_DEVNAME_LEN (32)
+
+#define XLNXSYNC_DRIVER_NAME "xlnxsync"
+#define XLNXSYNC_DRIVER_VERSION "0.1"
+
+#define XLNXSYNC_DEV_MAX 256
+
+/* Module Parameters */
+static struct class *xlnxsync_class;
+static dev_t xlnxsync_devt;
+/* Used to keep track of sync devices */
+static DEFINE_IDA(xs_ida);
+
+/**
+ * struct xlnxsync_device - Xilinx Synchronizer struct
+ * @chdev: Character device driver struct
+ * @dev: Pointer to device
+ * @iomem: Pointer to the register space
+ * @sync_mutex: Mutex used to serialize ioctl calls
+ * @wq_fbdone: wait queue for frame buffer done events
+ * @wq_error: wait queue for error events
+ * @l_done: Luma done result array
+ * @c_done: Chroma done result array
+ * @sync_err: Capture synchronization error per channel
+ * @wdg_err: Capture watchdog error per channel
+ * @ldiff_err: Luma buffer diff > 1
+ * @cdiff_err: Chroma buffer diff > 1
+ * @axi_clk: Pointer to clock structure for axilite clock
+ * @p_clk: Pointer to clock structure for producer clock
+ * @c_clk: Pointer to clock structure for consumer clock
+ * @user_count: Usage count
+ * @reserved: Channel reserved status
+ * @irq: IRQ number
+ * @irq_lock: Spinlock used to protect access to sync and watchdog error
+ * @minor: device id count
+ * @config: IP config struct
+ *
+ * This structure contains the device driver related parameters
+ */
+struct xlnxsync_device {
+ struct cdev chdev;
+ struct device *dev;
+ void __iomem *iomem;
+ /* sync_mutex is used to serialize ioctl calls */
+ struct mutex sync_mutex;
+ wait_queue_head_t wq_fbdone;
+ wait_queue_head_t wq_error;
+ bool l_done[XLNXSYNC_MAX_ENC_CHAN][XLNXSYNC_BUF_PER_CHAN][XLNXSYNC_IO];
+ bool c_done[XLNXSYNC_MAX_ENC_CHAN][XLNXSYNC_BUF_PER_CHAN][XLNXSYNC_IO];
+ bool sync_err[XLNXSYNC_MAX_ENC_CHAN];
+ bool wdg_err[XLNXSYNC_MAX_ENC_CHAN];
+ bool ldiff_err[XLNXSYNC_MAX_ENC_CHAN];
+ bool cdiff_err[XLNXSYNC_MAX_ENC_CHAN];
+ struct clk *axi_clk;
+ struct clk *p_clk;
+ struct clk *c_clk;
+ atomic_t user_count;
+ bool reserved[XLNXSYNC_MAX_ENC_CHAN];
+ int irq;
+ /* irq_lock is used to protect access to sync_err and wdg_err */
+ spinlock_t irq_lock;
+ int minor;
+ struct xlnxsync_config config;
+};
+
+/**
+ * struct xlnxsync_ctx - Synchronizer context struct
+ * @dev: Xilinx synchronizer device struct
+ * @chan_id: Channel id
+ *
+ * This structure contains the device driver related parameters
+ */
+struct xlnxsync_ctx {
+ struct xlnxsync_device *dev;
+ u32 chan_id;
+};
+
+static inline u32 xlnxsync_read(struct xlnxsync_device *dev, u32 chan, u32 reg)
+{
+ return ioread32(dev->iomem + (chan * XLNXSYNC_CHAN_OFFSET) + reg);
+}
+
+static inline void xlnxsync_write(struct xlnxsync_device *dev, u32 chan,
+ u32 reg, u32 val)
+{
+ iowrite32(val, dev->iomem + (chan * XLNXSYNC_CHAN_OFFSET) + reg);
+}
+
+static inline void xlnxsync_clr(struct xlnxsync_device *dev, u32 chan, u32 reg,
+ u32 clr)
+{
+ xlnxsync_write(dev, chan, reg, xlnxsync_read(dev, chan, reg) & ~clr);
+}
+
+static inline void xlnxsync_set(struct xlnxsync_device *dev, u32 chan, u32 reg,
+ u32 set)
+{
+ xlnxsync_write(dev, chan, reg, xlnxsync_read(dev, chan, reg) | set);
+}
+
+static bool xlnxsync_is_buf_done(struct xlnxsync_device *dev,
+ u32 channel, u32 buf, u32 io)
+{
+ u32 luma_valid, chroma_valid;
+ u32 reg_laddr, reg_caddr;
+
+ switch (io) {
+ case XLNXSYNC_PROD:
+ reg_laddr = XLNXSYNC_PL_START_HI_REG;
+ reg_caddr = XLNXSYNC_PC_START_HI_REG;
+ break;
+ case XLNXSYNC_CONS:
+ reg_laddr = XLNXSYNC_CL_START_HI_REG;
+ reg_caddr = XLNXSYNC_CC_START_HI_REG;
+ break;
+ default:
+ return false;
+ }
+
+ luma_valid = xlnxsync_read(dev, channel, reg_laddr + (buf << 3)) &
+ XLNXSYNC_FB_VALID_MASK;
+ chroma_valid = xlnxsync_read(dev, channel, reg_caddr + (buf << 3)) &
+ XLNXSYNC_FB_VALID_MASK;
+ if (!luma_valid && !chroma_valid)
+ return true;
+
+ return false;
+}
+
+static void xlnxsync_reset_chan(struct xlnxsync_device *dev, u32 chan)
+{
+ u8 num_retries = 50;
+
+ xlnxsync_set(dev, chan, XLNXSYNC_CTRL_REG, XLNXSYNC_CTRL_SOFTRESET);
+ /* Wait for a maximum of ~100ms to flush pending transactions */
+ while (num_retries--) {
+ if (!(xlnxsync_read(dev, chan, XLNXSYNC_CTRL_REG) &
+ XLNXSYNC_CTRL_SOFTRESET))
+ break;
+ usleep_range(2000, 2100);
+ }
+}
+
+static void xlnxsync_reset(struct xlnxsync_device *dev)
+{
+ u32 i;
+
+ for (i = 0; i < dev->config.max_channels; i++)
+ xlnxsync_reset_chan(dev, i);
+}
+
+static dma_addr_t xlnxsync_get_phy_addr(struct xlnxsync_device *dev,
+ u32 fd)
+{
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ dma_addr_t phy_addr = 0;
+
+ dbuf = dma_buf_get(fd);
+ if (IS_ERR(dbuf)) {
+ dev_err(dev->dev, "%s : Failed to get dma buf\n", __func__);
+ goto get_phy_addr_err;
+ }
+
+ attach = dma_buf_attach(dbuf, dev->dev);
+ if (IS_ERR(attach)) {
+ dev_err(dev->dev, "%s : Failed to attach buf\n", __func__);
+ goto fail_attach;
+ }
+
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ dev_err(dev->dev, "%s : Failed to attach map\n", __func__);
+ goto fail_map;
+ }
+
+ phy_addr = sg_dma_address(sgt->sgl);
+ dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+
+fail_map:
+ dma_buf_detach(dbuf, attach);
+fail_attach:
+ dma_buf_put(dbuf);
+get_phy_addr_err:
+ return phy_addr;
+}
+
+static int xlnxsync_config_channel(struct xlnxsync_device *dev,
+ void __user *arg)
+{
+ struct xlnxsync_chan_config cfg;
+ int ret, i = 0, j;
+ dma_addr_t phy_start_address;
+ u64 luma_start_address[XLNXSYNC_IO];
+ u64 chroma_start_address[XLNXSYNC_IO];
+ u64 luma_end_address[XLNXSYNC_IO];
+ u64 chroma_end_address[XLNXSYNC_IO];
+
+ ret = copy_from_user(&cfg, arg, sizeof(cfg));
+ if (ret) {
+ dev_err(dev->dev, "%s : Failed to copy from user\n", __func__);
+ return ret;
+ }
+
+ if (cfg.hdr_ver != XLNXSYNC_IOCTL_HDR_VER) {
+ dev_err(dev->dev, "%s : ioctl version mismatch\n", __func__);
+ dev_err(dev->dev,
+ "ioctl ver = 0x%llx expected ver = 0x%llx\n",
+ cfg.hdr_ver, (u64)XLNXSYNC_IOCTL_HDR_VER);
+ return -EINVAL;
+ }
+
+ /* Calculate luma/chroma physical addresses */
+ phy_start_address = xlnxsync_get_phy_addr(dev, cfg.dma_fd);
+ if (!phy_start_address) {
+ dev_err(dev->dev, "%s : Failed to obtain physical address\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ luma_start_address[XLNXSYNC_PROD] =
+ cfg.luma_start_offset[XLNXSYNC_PROD] + phy_start_address;
+ luma_start_address[XLNXSYNC_CONS] =
+ cfg.luma_start_offset[XLNXSYNC_CONS] + phy_start_address;
+ chroma_start_address[XLNXSYNC_PROD] =
+ cfg.chroma_start_offset[XLNXSYNC_PROD] + phy_start_address;
+ chroma_start_address[XLNXSYNC_CONS] =
+ cfg. chroma_start_offset[XLNXSYNC_CONS] + phy_start_address;
+ luma_end_address[XLNXSYNC_PROD] =
+ cfg.luma_end_offset[XLNXSYNC_PROD] + phy_start_address;
+ luma_end_address[XLNXSYNC_CONS] =
+ cfg.luma_end_offset[XLNXSYNC_CONS] + phy_start_address;
+ chroma_end_address[XLNXSYNC_PROD] =
+ cfg.chroma_end_offset[XLNXSYNC_PROD] + phy_start_address;
+ chroma_end_address[XLNXSYNC_CONS] =
+ cfg.chroma_end_offset[XLNXSYNC_CONS] + phy_start_address;
+
+ if (cfg.channel_id >= dev->config.max_channels) {
+ dev_err(dev->dev, "%s : Incorrect channel id %d\n",
+ __func__, cfg.channel_id);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev->dev, "Channel id = %d", cfg.channel_id);
+ dev_dbg(dev->dev, "Producer address\n");
+ dev_dbg(dev->dev, "Luma Start Addr = 0x%llx End Addr = 0x%llx Margin = 0x%08x\n",
+ luma_start_address[XLNXSYNC_PROD],
+ luma_end_address[XLNXSYNC_PROD], cfg.luma_margin);
+ dev_dbg(dev->dev, "Chroma Start Addr = 0x%llx End Addr = 0x%llx Margin = 0x%08x\n",
+ chroma_start_address[XLNXSYNC_PROD],
+ chroma_end_address[XLNXSYNC_PROD], cfg.chroma_margin);
+ dev_dbg(dev->dev, "FB id = %d IsMono = %d\n",
+ cfg.fb_id[XLNXSYNC_PROD], cfg.ismono[XLNXSYNC_PROD]);
+ dev_dbg(dev->dev, "Consumer address\n");
+ dev_dbg(dev->dev, "Luma Start Addr = 0x%llx End Addr = 0x%llx\n",
+ luma_start_address[XLNXSYNC_CONS],
+ luma_end_address[XLNXSYNC_CONS]);
+ dev_dbg(dev->dev, "Chroma Start Addr = 0x%llx End Addr = 0x%llx\n",
+ chroma_start_address[XLNXSYNC_CONS],
+ chroma_end_address[XLNXSYNC_CONS]);
+ dev_dbg(dev->dev, "FB id = %d IsMono = %d\n",
+ cfg.fb_id[XLNXSYNC_CONS], cfg.ismono[XLNXSYNC_CONS]);
+
+ for (j = 0; j < XLNXSYNC_IO; j++) {
+ u32 l_start_reg, l_end_reg, c_start_reg, c_end_reg;
+
+ if (cfg.fb_id[j] == XLNXSYNC_AUTO_SEARCH) {
+ /*
+ * When fb_id is 0xFF auto search for free fb
+ * in a channel
+ */
+ dev_dbg(dev->dev, "%s : auto search free fb\n",
+ __func__);
+ for (i = 0; i < XLNXSYNC_BUF_PER_CHAN; i++) {
+ if (xlnxsync_is_buf_done(dev, cfg.channel_id, i,
+ j))
+ break;
+ dev_dbg(dev->dev, "Channel %d %s FB %d is busy\n",
+ cfg.channel_id, j ? "prod" : "cons", i);
+ }
+
+ if (i == XLNXSYNC_BUF_PER_CHAN)
+ return -EBUSY;
+
+ } else if (cfg.fb_id[j] >= 0 &&
+ cfg.fb_id[j] < XLNXSYNC_BUF_PER_CHAN) {
+ /* If fb_id is specified, check its availability */
+ if (!(xlnxsync_is_buf_done(dev, cfg.channel_id,
+ cfg.fb_id[j], j))) {
+ dev_dbg(dev->dev,
+ "%s : %s FB %d in channel %d is busy!\n",
+ __func__, j ? "prod" : "cons",
+ i, cfg.channel_id);
+ return -EBUSY;
+ }
+ dev_dbg(dev->dev, "%s : Configure fb %d\n",
+ __func__, i);
+ } else {
+ /* Invalid fb_id passed */
+ dev_err(dev->dev, "Invalid FB id %d for configuration!\n",
+ cfg.fb_id[j]);
+ return -EINVAL;
+ }
+
+ if (j == XLNXSYNC_PROD) {
+ l_start_reg = XLNXSYNC_PL_START_LO_REG;
+ l_end_reg = XLNXSYNC_PL_END_LO_REG;
+ c_start_reg = XLNXSYNC_PC_START_LO_REG;
+ c_end_reg = XLNXSYNC_PC_END_LO_REG;
+ } else {
+ l_start_reg = XLNXSYNC_CL_START_LO_REG;
+ l_end_reg = XLNXSYNC_CL_END_LO_REG;
+ c_start_reg = XLNXSYNC_CC_START_LO_REG;
+ c_end_reg = XLNXSYNC_CC_END_LO_REG;
+ }
+
+ /* Start Address */
+ xlnxsync_write(dev, cfg.channel_id, l_start_reg + (i << 3),
+ lower_32_bits(luma_start_address[j]));
+
+ xlnxsync_write(dev, cfg.channel_id,
+ (l_start_reg + 4) + (i << 3),
+ upper_32_bits(luma_start_address[j]) &
+ XLNXSYNC_FB_HI_ADDR_MASK);
+
+ /* End Address */
+ xlnxsync_write(dev, cfg.channel_id, l_end_reg + (i << 3),
+ lower_32_bits(luma_end_address[j]));
+ xlnxsync_write(dev, cfg.channel_id, l_end_reg + 4 + (i << 3),
+ upper_32_bits(luma_end_address[j]));
+
+ /* Set margin */
+ xlnxsync_write(dev, cfg.channel_id,
+ XLNXSYNC_L_MARGIN_REG + (i << 2),
+ cfg.luma_margin);
+
+ if (!cfg.ismono[j]) {
+ dev_dbg(dev->dev, "%s : Not monochrome. Program Chroma\n",
+ __func__);
+
+ /* Chroma Start Address */
+ xlnxsync_write(dev, cfg.channel_id,
+ c_start_reg + (i << 3),
+ lower_32_bits(chroma_start_address[j]));
+
+ xlnxsync_write(dev, cfg.channel_id,
+ c_start_reg + 4 + (i << 3),
+ upper_32_bits(chroma_start_address[j]) &
+ XLNXSYNC_FB_HI_ADDR_MASK);
+
+ /* Chroma End Address */
+ xlnxsync_write(dev, cfg.channel_id,
+ c_end_reg + (i << 3),
+ lower_32_bits(chroma_end_address[j]));
+
+ xlnxsync_write(dev, cfg.channel_id,
+ c_end_reg + 4 + (i << 3),
+ upper_32_bits(chroma_end_address[j]));
+
+ /* Chroma Margin */
+ xlnxsync_write(dev, cfg.channel_id,
+ XLNXSYNC_C_MARGIN_REG + (i << 2),
+ cfg.chroma_margin);
+
+ /* Set the Valid bit */
+ xlnxsync_set(dev, cfg.channel_id,
+ c_start_reg + 4 + (i << 3),
+ XLNXSYNC_FB_VALID_MASK);
+ }
+
+ /* Set the Valid bit */
+ xlnxsync_set(dev, cfg.channel_id, l_start_reg + 4 + (i << 3),
+ XLNXSYNC_FB_VALID_MASK);
+ }
+
+ for (i = 0; i < XLNXSYNC_MAX_CORES; i++) {
+ iowrite32(cfg.luma_core_offset[i],
+ dev->iomem + XLNXSYNC_LCOREOFF_REG +
+ (i * XLNXSYNC_COREOFF_NEXT));
+
+ iowrite32(cfg.chroma_core_offset[i],
+ dev->iomem + XLNXSYNC_CCOREOFF_REG +
+ (i * XLNXSYNC_COREOFF_NEXT));
+ }
+
+ return 0;
+}
+
+static int xlnxsync_get_channel_status(struct xlnxsync_device *dev,
+ void __user *arg)
+{
+ int ret;
+ u32 i, j, k;
+ unsigned long flags;
+ struct xlnxsync_stat status;
+
+ for (i = 0; i < dev->config.max_channels; i++) {
+ /* Update Buffers status */
+ for (j = 0; j < XLNXSYNC_BUF_PER_CHAN; j++) {
+ for (k = 0; k < XLNXSYNC_IO; k++) {
+ if (xlnxsync_is_buf_done(dev, i, j, k))
+ status.fbdone[i][j][k] = true;
+ else
+ status.fbdone[i][j][k] = false;
+ }
+ }
+
+ /* Update channel enable status */
+ if (xlnxsync_read(dev, i, XLNXSYNC_CTRL_REG) &
+ XLNXSYNC_CTRL_ENABLE_MASK)
+ status.enable[i] = true;
+
+ /* Update channel error status */
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ status.sync_err[i] = dev->sync_err[i];
+ status.wdg_err[i] = dev->wdg_err[i];
+ status.ldiff_err[i] = dev->ldiff_err[i];
+ status.cdiff_err[i] = dev->cdiff_err[i];
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+ }
+
+ status.hdr_ver = XLNXSYNC_IOCTL_HDR_VER;
+
+ ret = copy_to_user(arg, &status, sizeof(status));
+ if (ret)
+ dev_err(dev->dev, "%s: failed to copy result data to user\n",
+ __func__);
+ return ret;
+}
+
+static int xlnxsync_enable(struct xlnxsync_device *dev, u32 channel,
+ bool enable)
+{
+ if (dev->config.hdr_ver != XLNXSYNC_IOCTL_HDR_VER) {
+ dev_err(dev->dev, "ioctl not supported!\n");
+ return -EINVAL;
+ }
+
+ /* check channel v/s max from dt */
+ if (channel >= dev->config.max_channels) {
+ dev_err(dev->dev, "Invalid channel %d. Max channels = %d!\n",
+ channel, dev->config.max_channels);
+ return -EINVAL;
+ }
+
+ if (enable) {
+ dev_dbg(dev->dev, "Enabling %d channel\n", channel);
+ xlnxsync_set(dev, channel, XLNXSYNC_IER_REG,
+ XLNXSYNC_IER_ALL_MASK);
+ xlnxsync_set(dev, channel, XLNXSYNC_CTRL_REG,
+ XLNXSYNC_CTRL_ENABLE_MASK |
+ XLNXSYNC_CTRL_INTR_EN_MASK);
+ } else {
+ dev_dbg(dev->dev, "Disabling %d channel\n", channel);
+ xlnxsync_reset_chan(dev, channel);
+ xlnxsync_clr(dev, channel, XLNXSYNC_CTRL_REG,
+ XLNXSYNC_CTRL_ENABLE_MASK |
+ XLNXSYNC_CTRL_INTR_EN_MASK);
+ xlnxsync_clr(dev, channel, XLNXSYNC_IER_REG,
+ XLNXSYNC_IER_ALL_MASK);
+ dev->reserved[channel] = false;
+ }
+
+ return 0;
+}
+
+static int xlnxsync_get_config(struct xlnxsync_device *dev, void __user *arg)
+{
+ struct xlnxsync_config cfg;
+ int ret;
+
+ cfg.encode = dev->config.encode;
+ cfg.max_channels = dev->config.max_channels;
+ cfg.hdr_ver = XLNXSYNC_IOCTL_HDR_VER;
+
+ dev_dbg(dev->dev, "IP Config : encode = %d max_channels = %d\n",
+ cfg.encode, cfg.max_channels);
+ dev_dbg(dev->dev, "ioctl version = 0x%llx\n", cfg.hdr_ver);
+ ret = copy_to_user(arg, &cfg, sizeof(cfg));
+ if (ret) {
+ dev_err(dev->dev, "%s: failed to copy result data to user\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xlnxsync_clr_chan_err(struct xlnxsync_device *dev,
+ void __user *arg)
+{
+ struct xlnxsync_clr_err errcfg;
+ int ret;
+ unsigned long flags;
+
+ ret = copy_from_user(&errcfg, arg, sizeof(errcfg));
+ if (ret) {
+ dev_err(dev->dev, "%s : Failed to copy from user\n", __func__);
+ return ret;
+ }
+
+ if (errcfg.hdr_ver != XLNXSYNC_IOCTL_HDR_VER) {
+ dev_err(dev->dev, "%s : ioctl version mismatch\n", __func__);
+ dev_err(dev->dev,
+ "ioctl ver = 0x%llx expected ver = 0x%llx\n",
+ errcfg.hdr_ver, (u64)XLNXSYNC_IOCTL_HDR_VER);
+ return -EINVAL;
+ }
+
+ if (errcfg.channel_id >= dev->config.max_channels) {
+ dev_err(dev->dev, "%s : Incorrect channel id %d\n",
+ __func__, errcfg.channel_id);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev->dev, "%s : Clearing %d channel errors\n",
+ __func__, errcfg.channel_id);
+ /* Clear channel error status */
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ if (dev->sync_err[errcfg.channel_id])
+ dev->sync_err[errcfg.channel_id] = false;
+
+ if (dev->wdg_err[errcfg.channel_id])
+ dev->wdg_err[errcfg.channel_id] = false;
+
+ if (dev->ldiff_err[errcfg.channel_id])
+ dev->ldiff_err[errcfg.channel_id] = false;
+
+ if (dev->cdiff_err[errcfg.channel_id])
+ dev->cdiff_err[errcfg.channel_id] = false;
+
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ return 0;
+}
+
+static int xlnxsync_get_fbdone_status(struct xlnxsync_device *dev,
+ void __user *arg)
+{
+ struct xlnxsync_fbdone fbdone_stat;
+ int ret, i, j, k;
+
+ fbdone_stat.hdr_ver = XLNXSYNC_IOCTL_HDR_VER;
+
+ for (i = 0; i < dev->config.max_channels; i++)
+ for (j = 0; j < XLNXSYNC_BUF_PER_CHAN; j++)
+ for (k = 0; k < XLNXSYNC_IO; k++)
+ if (dev->l_done[i][j][k] &&
+ dev->c_done[i][j][k])
+ fbdone_stat.status[i][j][k] = true;
+
+ ret = copy_to_user(arg, &fbdone_stat, sizeof(fbdone_stat));
+ if (ret)
+ dev_err(dev->dev, "%s: failed to copy result data to user\n",
+ __func__);
+
+ return ret;
+}
+
+static int xlnxsync_clr_fbdone_status(struct xlnxsync_device *dev,
+ void __user *arg)
+{
+ struct xlnxsync_fbdone fbd;
+ int ret, i, j, k;
+ unsigned long flags;
+
+ ret = copy_from_user(&fbd, arg, sizeof(fbd));
+ if (ret) {
+ dev_err(dev->dev, "%s : Failed to copy from user\n", __func__);
+ return ret;
+ }
+
+ if (fbd.hdr_ver != XLNXSYNC_IOCTL_HDR_VER) {
+ dev_err(dev->dev, "%s : ioctl version mismatch\n", __func__);
+ dev_err(dev->dev,
+ "ioctl ver = 0x%llx expected ver = 0x%llx\n",
+ fbd.hdr_ver, (u64)XLNXSYNC_IOCTL_HDR_VER);
+ return -EINVAL;
+ }
+
+ /* Clear channel error status */
+ spin_lock_irqsave(&dev->irq_lock, flags);
+
+ for (i = 0; i < dev->config.max_channels; i++) {
+ for (j = 0; j < XLNXSYNC_BUF_PER_CHAN; j++) {
+ for (k = 0; k < XLNXSYNC_IO; k++) {
+ fbd.status[i][j][k] = false;
+ dev->l_done[i][j][k] = false;
+ dev->c_done[i][j][k] = false;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+
+ return 0;
+}
+
+static int xlnxsync_reserve_get_channel(struct xlnxsync_device *dev,
+ void __user *arg)
+{
+ int ret;
+ u8 i;
+
+ if (dev->config.hdr_ver != XLNXSYNC_IOCTL_HDR_VER) {
+ dev_err(dev->dev, "ioctl not supported!\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dev->config.max_channels; i++) {
+ if (!dev->reserved[i])
+ break;
+ }
+
+ if (i == dev->config.max_channels) {
+ ret = -EBUSY;
+ dev_dbg(dev->dev, "No channel is free!\n");
+ return ret;
+ }
+
+ dev_dbg(dev->dev, "Reserving channel %d\n", i);
+ dev->reserved[i] = true;
+ ret = copy_to_user(arg, &i, sizeof(i));
+ if (ret) {
+ dev_err(dev->dev, "%s: failed to copy result data to user\n",
+ __func__);
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+static long xlnxsync_ioctl(struct file *fptr, unsigned int cmd,
+ unsigned long data)
+{
+ int ret = -EINVAL;
+ u32 channel = data;
+ void __user *arg = (void __user *)data;
+ struct xlnxsync_ctx *ctx = fptr->private_data;
+ struct xlnxsync_device *xlnxsync_dev;
+
+ xlnxsync_dev = ctx->dev;
+ if (!xlnxsync_dev) {
+ pr_err("%s: File op error\n", __func__);
+ return -EIO;
+ }
+
+ dev_dbg(xlnxsync_dev->dev, "ioctl = 0x%08x\n", cmd);
+
+ mutex_lock(&xlnxsync_dev->sync_mutex);
+
+ switch (cmd) {
+ case XLNXSYNC_GET_CFG:
+ ret = xlnxsync_get_config(xlnxsync_dev, arg);
+ break;
+ case XLNXSYNC_GET_CHAN_STATUS:
+ ret = xlnxsync_get_channel_status(xlnxsync_dev, arg);
+ break;
+ case XLNXSYNC_SET_CHAN_CONFIG:
+ ret = xlnxsync_config_channel(xlnxsync_dev, arg);
+ break;
+ case XLNXSYNC_CHAN_ENABLE:
+ ctx->chan_id = channel;
+ ret = xlnxsync_enable(xlnxsync_dev, channel, true);
+ break;
+ case XLNXSYNC_CHAN_DISABLE:
+ ret = xlnxsync_enable(xlnxsync_dev, channel, false);
+ break;
+ case XLNXSYNC_CLR_CHAN_ERR:
+ ret = xlnxsync_clr_chan_err(xlnxsync_dev, arg);
+ break;
+ case XLNXSYNC_GET_CHAN_FBDONE_STAT:
+ ret = xlnxsync_get_fbdone_status(xlnxsync_dev, arg);
+ break;
+ case XLNXSYNC_CLR_CHAN_FBDONE_STAT:
+ ret = xlnxsync_clr_fbdone_status(xlnxsync_dev, arg);
+ break;
+ case XLNXSYNC_RESERVE_GET_CHAN_ID:
+ ret = xlnxsync_reserve_get_channel(xlnxsync_dev, arg);
+ break;
+ }
+
+ mutex_unlock(&xlnxsync_dev->sync_mutex);
+
+ return ret;
+}
+
+static __poll_t xlnxsync_poll(struct file *fptr, poll_table *wait)
+{
+ u32 j, k;
+ bool err_event, framedone_event;
+ __poll_t ret = 0, req_events = poll_requested_events(wait);
+ unsigned long flags;
+ struct xlnxsync_ctx *ctx = fptr->private_data;
+ struct xlnxsync_device *dev;
+
+ dev = ctx->dev;
+ if (!dev) {
+ pr_err("%s: File op error\n", __func__);
+ return -EIO;
+ }
+
+ dev_dbg_ratelimited(dev->dev, "%s : entered req_events = 0x%x!\n",
+ __func__, req_events);
+
+ if (!(req_events & (POLLPRI | POLLIN)))
+ return 0;
+
+ if (req_events & EPOLLPRI) {
+ poll_wait(fptr, &dev->wq_error, wait);
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ err_event = false;
+ if (dev->sync_err[ctx->chan_id] || dev->wdg_err[ctx->chan_id] ||
+ dev->ldiff_err[ctx->chan_id] ||
+ dev->cdiff_err[ctx->chan_id])
+ err_event = true;
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+ dev_dbg_ratelimited(dev->dev, "%s : error event occurred!\n",
+ __func__);
+ if (err_event)
+ ret |= POLLPRI;
+ }
+
+ if (req_events & EPOLLIN) {
+ poll_wait(fptr, &dev->wq_fbdone, wait);
+ spin_lock_irqsave(&dev->irq_lock, flags);
+ framedone_event = false;
+ for (j = 0; j < XLNXSYNC_BUF_PER_CHAN; j++) {
+ for (k = 0; k < XLNXSYNC_IO; k++) {
+ if (dev->l_done[ctx->chan_id][j][k] &&
+ dev->c_done[ctx->chan_id][j][k])
+ framedone_event = true;
+ }
+ }
+ spin_unlock_irqrestore(&dev->irq_lock, flags);
+ dev_dbg_ratelimited(dev->dev, "%s : framedone event occurred!\n",
+ __func__);
+ if (framedone_event)
+ ret |= POLLIN;
+ }
+
+ return ret;
+}
+
+static int xlnxsync_open(struct inode *iptr, struct file *fptr)
+{
+ struct xlnxsync_device *xlnxsync;
+ struct xlnxsync_ctx *ctx;
+
+ xlnxsync = container_of(iptr->i_cdev, struct xlnxsync_device, chdev);
+ if (!xlnxsync) {
+ pr_err("%s: failed to get xlnxsync driver handle\n", __func__);
+ return -EAGAIN;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = xlnxsync;
+ fptr->private_data = ctx;
+
+ atomic_inc(&xlnxsync->user_count);
+ dev_dbg(xlnxsync->dev, "%s: tid=%d Opened with user count = %d\n",
+ __func__, current->pid, atomic_read(&xlnxsync->user_count));
+
+ return 0;
+}
+
+static int xlnxsync_release(struct inode *iptr, struct file *fptr)
+{
+ struct xlnxsync_device *xlnxsync;
+ struct xlnxsync_ctx *ctx = fptr->private_data;
+ unsigned int i, j;
+
+ xlnxsync = container_of(iptr->i_cdev, struct xlnxsync_device, chdev);
+ if (!xlnxsync) {
+ pr_err("%s: failed to get xlnxsync driver handle", __func__);
+ return -EAGAIN;
+ }
+
+ dev_dbg(xlnxsync->dev, "%s: tid=%d user count = %d chan_id = %d\n",
+ __func__, current->pid, atomic_read(&xlnxsync->user_count),
+ ctx->chan_id);
+
+ if (xlnxsync_read(xlnxsync, ctx->chan_id, XLNXSYNC_CTRL_REG) &
+ XLNXSYNC_CTRL_ENABLE_MASK) {
+ dev_dbg(xlnxsync->dev, "Disabling %d channel\n", ctx->chan_id);
+ xlnxsync_reset_chan(xlnxsync, ctx->chan_id);
+ xlnxsync_clr(xlnxsync, ctx->chan_id, XLNXSYNC_CTRL_REG,
+ XLNXSYNC_CTRL_ENABLE_MASK |
+ XLNXSYNC_CTRL_INTR_EN_MASK);
+ xlnxsync_clr(xlnxsync, ctx->chan_id, XLNXSYNC_IER_REG,
+ XLNXSYNC_IER_ALL_MASK);
+ }
+
+ xlnxsync->reserved[ctx->chan_id] = false;
+ xlnxsync->sync_err[ctx->chan_id] = false;
+ xlnxsync->wdg_err[ctx->chan_id] = false;
+ xlnxsync->ldiff_err[ctx->chan_id] = false;
+ xlnxsync->cdiff_err[ctx->chan_id] = false;
+ for (i = 0; i < XLNXSYNC_BUF_PER_CHAN; i++) {
+ for (j = 0; j < XLNXSYNC_IO; j++) {
+ xlnxsync->l_done[ctx->chan_id][i][j] = false;
+ xlnxsync->c_done[ctx->chan_id][i][j] = false;
+ }
+ }
+
+ if (atomic_dec_and_test(&xlnxsync->user_count)) {
+ xlnxsync_reset(xlnxsync);
+ dev_dbg(xlnxsync->dev,
+ "%s: tid=%d Stopping and clearing device",
+ __func__, current->pid);
+ }
+
+ kfree(ctx);
+ return 0;
+}
+
+static const struct file_operations xlnxsync_fops = {
+ .open = xlnxsync_open,
+ .release = xlnxsync_release,
+ .unlocked_ioctl = xlnxsync_ioctl,
+ .poll = xlnxsync_poll,
+};
+
+static irqreturn_t xlnxsync_irq_handler(int irq, void *data)
+{
+ struct xlnxsync_device *xlnxsync = (struct xlnxsync_device *)data;
+ u32 val, i;
+ bool err_event;
+ bool framedone_event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&xlnxsync->irq_lock, flags);
+ err_event = false;
+ framedone_event = false;
+ for (i = 0; i < xlnxsync->config.max_channels; i++) {
+ u32 j, k;
+
+ val = xlnxsync_read(xlnxsync, i, XLNXSYNC_ISR_REG);
+ xlnxsync_write(xlnxsync, i, XLNXSYNC_ISR_REG, val);
+
+ if (val & XLNXSYNC_ISR_SYNC_FAIL_MASK)
+ xlnxsync->sync_err[i] = true;
+ if (val & XLNXSYNC_ISR_WDG_ERR_MASK)
+ xlnxsync->wdg_err[i] = true;
+ if (val & XLNXSYNC_ISR_LDIFF)
+ xlnxsync->ldiff_err[i] = true;
+ if (val & XLNXSYNC_ISR_CDIFF)
+ xlnxsync->cdiff_err[i] = true;
+ if (xlnxsync->sync_err[i] || xlnxsync->wdg_err[i] ||
+ xlnxsync->ldiff_err[i] || xlnxsync->cdiff_err[i])
+ err_event = true;
+
+ if (val & XLNXSYNC_ISR_PLDONE_MASK) {
+ j = (val & XLNXSYNC_ISR_PLDONE_MASK) >>
+ XLNXSYNC_ISR_PLDONE_SHIFT;
+
+ xlnxsync->l_done[i][j][XLNXSYNC_PROD] = true;
+ }
+
+ if (val & XLNXSYNC_ISR_PCDONE_MASK) {
+ j = (val & XLNXSYNC_ISR_PCDONE_MASK) >>
+ XLNXSYNC_ISR_PCDONE_SHIFT;
+
+ xlnxsync->c_done[i][j][XLNXSYNC_PROD] = true;
+ }
+
+ if (val & XLNXSYNC_ISR_CLDONE_MASK) {
+ j = (val & XLNXSYNC_ISR_CLDONE_MASK) >>
+ XLNXSYNC_ISR_CLDONE_SHIFT;
+
+ xlnxsync->l_done[i][j][XLNXSYNC_CONS] = true;
+ }
+
+ if (val & XLNXSYNC_ISR_CCDONE_MASK) {
+ j = (val & XLNXSYNC_ISR_CCDONE_MASK) >>
+ XLNXSYNC_ISR_CCDONE_SHIFT;
+
+ xlnxsync->c_done[i][j][XLNXSYNC_CONS] = true;
+ }
+
+ for (j = 0; j < XLNXSYNC_BUF_PER_CHAN; j++) {
+ for (k = 0; k < XLNXSYNC_IO; k++) {
+ if (xlnxsync->l_done[i][j][k] &&
+ xlnxsync->c_done[i][j][k])
+ framedone_event = true;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&xlnxsync->irq_lock, flags);
+
+ if (err_event) {
+ dev_dbg_ratelimited(xlnxsync->dev, "%s : error occurred\n",
+ __func__);
+ wake_up_interruptible(&xlnxsync->wq_error);
+ }
+
+ if (framedone_event) {
+ dev_dbg_ratelimited(xlnxsync->dev, "%s : framedone occurred\n",
+ __func__);
+ wake_up_interruptible(&xlnxsync->wq_fbdone);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int xlnxsync_parse_dt_prop(struct xlnxsync_device *xlnxsync)
+{
+ struct device_node *node = xlnxsync->dev->of_node;
+ int ret;
+
+ xlnxsync->config.encode = of_property_read_bool(node, "xlnx,encode");
+ dev_dbg(xlnxsync->dev, "synchronizer type = %s\n",
+ xlnxsync->config.encode ? "encode" : "decode");
+
+ ret = of_property_read_u32(node, "xlnx,num-chan",
+ (u32 *)&xlnxsync->config.max_channels);
+ if (ret)
+ return ret;
+
+ dev_dbg(xlnxsync->dev, "max channels = %d\n",
+ xlnxsync->config.max_channels);
+
+ if (xlnxsync->config.max_channels == 0 ||
+ xlnxsync->config.max_channels > XLNXSYNC_MAX_ENC_CHAN) {
+ dev_err(xlnxsync->dev, "Number of channels should be 1 to 4.\n");
+ dev_err(xlnxsync->dev, "Invalid number of channels : %d\n",
+ xlnxsync->config.max_channels);
+ return -EINVAL;
+ }
+
+ if (!xlnxsync->config.encode &&
+ xlnxsync->config.max_channels > XLNXSYNC_MAX_DEC_CHAN) {
+ dev_err(xlnxsync->dev, "Decode can't have more than 2 channels.\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int xlnxsync_clk_setup(struct xlnxsync_device *xlnxsync)
+{
+ int ret;
+
+ xlnxsync->axi_clk = devm_clk_get(xlnxsync->dev, "s_axi_ctrl_aclk");
+ if (IS_ERR(xlnxsync->axi_clk)) {
+ ret = PTR_ERR(xlnxsync->axi_clk);
+ dev_err(xlnxsync->dev, "failed to get axi_aclk (%d)\n", ret);
+ return ret;
+ }
+
+ xlnxsync->p_clk = devm_clk_get(xlnxsync->dev, "s_axi_mm_p_aclk");
+ if (IS_ERR(xlnxsync->p_clk)) {
+ ret = PTR_ERR(xlnxsync->p_clk);
+ dev_err(xlnxsync->dev, "failed to get p_aclk (%d)\n", ret);
+ return ret;
+ }
+
+ xlnxsync->c_clk = devm_clk_get(xlnxsync->dev, "s_axi_mm_aclk");
+ if (IS_ERR(xlnxsync->c_clk)) {
+ ret = PTR_ERR(xlnxsync->c_clk);
+ dev_err(xlnxsync->dev, "failed to get axi_mm (%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(xlnxsync->axi_clk);
+ if (ret) {
+ dev_err(xlnxsync->dev, "failed to enable axi_clk (%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(xlnxsync->p_clk);
+ if (ret) {
+ dev_err(xlnxsync->dev, "failed to enable p_clk (%d)\n", ret);
+ goto err_pclk;
+ }
+
+ ret = clk_prepare_enable(xlnxsync->c_clk);
+ if (ret) {
+ dev_err(xlnxsync->dev, "failed to enable axi_mm (%d)\n", ret);
+ goto err_cclk;
+ }
+
+ return ret;
+
+err_cclk:
+ clk_disable_unprepare(xlnxsync->p_clk);
+err_pclk:
+ clk_disable_unprepare(xlnxsync->axi_clk);
+
+ return ret;
+}
+
+static int xlnxsync_probe(struct platform_device *pdev)
+{
+ struct xlnxsync_device *xlnxsync;
+ struct device *dc;
+ struct resource *res;
+ int ret;
+
+ xlnxsync = devm_kzalloc(&pdev->dev, sizeof(*xlnxsync), GFP_KERNEL);
+ if (!xlnxsync)
+ return -ENOMEM;
+
+ xlnxsync->minor = ida_simple_get(&xs_ida, 0, XLNXSYNC_DEV_MAX,
+ GFP_KERNEL);
+ if (xlnxsync->minor < 0)
+ return xlnxsync->minor;
+
+ xlnxsync->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get resource.\n");
+ return -ENODEV;
+ }
+
+ xlnxsync->iomem = devm_ioremap_nocache(xlnxsync->dev, res->start,
+ resource_size(res));
+ if (!xlnxsync->iomem) {
+ dev_err(&pdev->dev, "ip register mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ ret = xlnxsync_parse_dt_prop(xlnxsync);
+ if (ret < 0)
+ return ret;
+
+ xlnxsync->config.hdr_ver = XLNXSYNC_IOCTL_HDR_VER;
+ dev_info(xlnxsync->dev, "ioctl header version = 0x%llx\n",
+ xlnxsync->config.hdr_ver);
+
+ xlnxsync->irq = irq_of_parse_and_map(xlnxsync->dev->of_node, 0);
+ if (!xlnxsync->irq) {
+ dev_err(xlnxsync->dev, "Unable to parse and get irq.\n");
+ return -EINVAL;
+ }
+ ret = devm_request_threaded_irq(xlnxsync->dev, xlnxsync->irq, NULL,
+ xlnxsync_irq_handler, IRQF_ONESHOT,
+ dev_name(xlnxsync->dev), xlnxsync);
+
+ if (ret) {
+ dev_err(xlnxsync->dev, "Err = %d Interrupt handler reg failed!\n",
+ ret);
+ return ret;
+ }
+
+ ret = xlnxsync_clk_setup(xlnxsync);
+ if (ret) {
+ dev_err(xlnxsync->dev, "clock setup failed!\n");
+ return ret;
+ }
+
+ init_waitqueue_head(&xlnxsync->wq_fbdone);
+ init_waitqueue_head(&xlnxsync->wq_error);
+ spin_lock_init(&xlnxsync->irq_lock);
+ mutex_init(&xlnxsync->sync_mutex);
+
+ cdev_init(&xlnxsync->chdev, &xlnxsync_fops);
+ xlnxsync->chdev.owner = THIS_MODULE;
+ ret = cdev_add(&xlnxsync->chdev,
+ MKDEV(MAJOR(xlnxsync_devt), xlnxsync->minor), 1);
+ if (ret < 0) {
+ dev_err(xlnxsync->dev, "cdev_add failed");
+ goto clk_err;
+ }
+
+ if (!xlnxsync_class) {
+ dev_err(xlnxsync->dev, "xvfsync device class not created");
+ goto cdev_err;
+ }
+ dc = device_create(xlnxsync_class, xlnxsync->dev,
+ MKDEV(MAJOR(xlnxsync_devt), xlnxsync->minor),
+ xlnxsync, "xlnxsync%d", xlnxsync->minor);
+ if (IS_ERR(dc)) {
+ ret = PTR_ERR(dc);
+ dev_err(xlnxsync->dev, "Unable to create device");
+ goto cdev_err;
+ }
+
+ platform_set_drvdata(pdev, xlnxsync);
+ dev_info(xlnxsync->dev, "Xilinx Synchronizer probe successful!\n");
+
+ return 0;
+
+cdev_err:
+ cdev_del(&xlnxsync->chdev);
+clk_err:
+ clk_disable_unprepare(xlnxsync->c_clk);
+ clk_disable_unprepare(xlnxsync->p_clk);
+ clk_disable_unprepare(xlnxsync->axi_clk);
+ ida_simple_remove(&xs_ida, xlnxsync->minor);
+
+ return ret;
+}
+
+static int xlnxsync_remove(struct platform_device *pdev)
+{
+ struct xlnxsync_device *xlnxsync = platform_get_drvdata(pdev);
+
+ if (!xlnxsync || !xlnxsync_class)
+ return -EIO;
+
+ cdev_del(&xlnxsync->chdev);
+ clk_disable_unprepare(xlnxsync->c_clk);
+ clk_disable_unprepare(xlnxsync->p_clk);
+ clk_disable_unprepare(xlnxsync->axi_clk);
+ ida_simple_remove(&xs_ida, xlnxsync->minor);
+
+ return 0;
+}
+
+static const struct of_device_id xlnxsync_of_match[] = {
+ { .compatible = "xlnx,sync-ip-1.0", },
+ { /* end of table*/ }
+};
+MODULE_DEVICE_TABLE(of, xlnxsync_of_match);
+
+static struct platform_driver xlnxsync_driver = {
+ .driver = {
+ .name = XLNXSYNC_DRIVER_NAME,
+ .of_match_table = xlnxsync_of_match,
+ },
+ .probe = xlnxsync_probe,
+ .remove = xlnxsync_remove,
+};
+
+static int __init xlnxsync_init_mod(void)
+{
+ int err;
+
+ xlnxsync_class = class_create(THIS_MODULE, XLNXSYNC_DRIVER_NAME);
+ if (IS_ERR(xlnxsync_class)) {
+ pr_err("%s : Unable to create xlnxsync class", __func__);
+ return PTR_ERR(xlnxsync_class);
+ }
+ err = alloc_chrdev_region(&xlnxsync_devt, 0,
+ XLNXSYNC_DEV_MAX, XLNXSYNC_DRIVER_NAME);
+ if (err < 0) {
+ pr_err("%s: Unable to get major number for xlnxsync", __func__);
+ goto err_class;
+ }
+ err = platform_driver_register(&xlnxsync_driver);
+ if (err < 0) {
+ pr_err("%s: Unable to register %s driver",
+ __func__, XLNXSYNC_DRIVER_NAME);
+ goto err_pdrv;
+ }
+ return 0;
+err_pdrv:
+ unregister_chrdev_region(xlnxsync_devt, XLNXSYNC_DEV_MAX);
+err_class:
+ class_destroy(xlnxsync_class);
+ return err;
+}
+
+static void __exit xlnxsync_cleanup_mod(void)
+{
+ platform_driver_unregister(&xlnxsync_driver);
+ unregister_chrdev_region(xlnxsync_devt, XLNXSYNC_DEV_MAX);
+ class_destroy(xlnxsync_class);
+ xlnxsync_class = NULL;
+}
+module_init(xlnxsync_init_mod);
+module_exit(xlnxsync_cleanup_mod);
+
+MODULE_AUTHOR("Vishal Sagar");
+MODULE_DESCRIPTION("Xilinx Synchronizer IP Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(XLNXSYNC_DRIVER_VERSION);
diff --git a/drivers/staging/xroeframer/Kconfig b/drivers/staging/xroeframer/Kconfig
new file mode 100644
index 000000000000..16aa1f2c6a78
--- /dev/null
+++ b/drivers/staging/xroeframer/Kconfig
@@ -0,0 +1,18 @@
+#
+# Xilinx Radio over Ethernet Framer driver
+#
+
+config XROE_FRAMER
+ tristate "Xilinx Radio over Ethernet Framer driver"
+ ---help---
+ The "Radio Over Ethernet Framer" IP (roe_framer) ingests/generates
+ Ethernet packet data, (de-)multiplexes packets based on protocol
+ into/from various Radio Antenna data streams.
+
+ It has 2 main, independent, data paths:
+
+ - Downlink, from the BaseBand to the Phone, Ethernet to Antenna,
+ we call this the De-Framer path, or defm on all related IP signals.
+
+ - Uplink, from the Phone to the BaseBand, Antenna to Ethernet,
+ we call this the Framer path, or fram on all related IP signals.
diff --git a/drivers/staging/xroeframer/Makefile b/drivers/staging/xroeframer/Makefile
new file mode 100644
index 000000000000..f7bf07e98243
--- /dev/null
+++ b/drivers/staging/xroeframer/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Radio over Ethernet Framer driver
+#
+obj-$(CONFIG_XROE_FRAMER) := framer.o
+
+framer-objs := xroe_framer.o \
+ sysfs_xroe.o \
+ sysfs_xroe_framer_ipv4.o \
+ sysfs_xroe_framer_ipv6.o \
+ sysfs_xroe_framer_udp.o \
+ sysfs_xroe_framer_stats.o
diff --git a/drivers/staging/xroeframer/README b/drivers/staging/xroeframer/README
new file mode 100644
index 000000000000..505a46c2cf62
--- /dev/null
+++ b/drivers/staging/xroeframer/README
@@ -0,0 +1,47 @@
+Xilinx Radio over Ethernet Framer driver
+=========================================
+
+About the RoE Framer
+
+The "Radio Over Ethernet Framer" IP (roe_framer) ingests/generates Ethernet
+packet data, (de-)multiplexes packets based on protocol into/from various
+Radio Antenna data streams.
+
+It has 2 main, independent, data paths
+
+- Downlink, from the BaseBand to the Phone, Ethernet to Antenna,
+we call this the De-Framer path, or defm on all related IP signals.
+
+- Uplink, from the Phone to the BaseBand, Antenna to Ethernet,
+we call this the Framer path, or fram on all related IP signals.
+
+Key points:
+
+- Apart from the AXI4-Lite configuration port and a handful of strobe/control
+signals all data interfaces are AXI Stream(AXIS).
+- The IP does not contain an Ethernet MAC IP, rather it routes, or creates
+packets based on the direction through the roe_framer.
+- Currently designed to work with
+ - 1, 2 or 4 10G Ethernet AXIS stream ports to/from 1, 2, 4, 8, 16,
+ or 32 antenna ports
+ Note: each Ethernet port is 64 bit data @ 156.25MHz
+ - 1 or 2 25G Ethernet AXIS stream ports to/from 1, 2, 4, 8, 16,
+ or 32 antenna ports
+ Note: each Ethernet port is 64 bit data @ 390.25MHz
+- Contains a filter so that all non-protocol packets, or non-hardware-IP
+processed packets can be forwarded to another block for processing. In general
+this in a Microprocessor, specifically the Zynq ARM in our case. This filter
+function can move into the optional switch when TSN is used.
+
+About the Linux Driver
+
+The RoE Framer Linux Driver provides sysfs access to the framer controls. The
+loading of the driver to the hardware is possible using Device Tree binding
+(see "dt-binding.txt" for more information). When the driver is loaded, the
+general controls (such as framing mode, enable, restart etc) are exposed
+under /sys/kernel/xroe. Furthermore, specific controls can be found under
+/sys/kernel/xroe/framer. These include protocol-specific settings, for
+IPv4, IPv6 & UDP.
+
+There is also the option of accessing the framer's register map using
+ioctl calls for both reading and writing (where permitted) directly.
diff --git a/drivers/staging/xroeframer/roe_framer_ctrl.h b/drivers/staging/xroeframer/roe_framer_ctrl.h
new file mode 100644
index 000000000000..162c49a9bc3b
--- /dev/null
+++ b/drivers/staging/xroeframer/roe_framer_ctrl.h
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+/*-----------------------------------------------------------------------------
+ * C Header bank BASE definitions
+ *-----------------------------------------------------------------------------
+ */
+#define ROE_FRAMER_V1_0_CFG_BASE_ADDR 0x0 /* 0 */
+#define ROE_FRAMER_V1_0_FRAM_BASE_ADDR 0x2000 /* 8192 */
+#define ROE_FRAMER_V1_0_FRAM_DRP_BASE_ADDR 0x4000 /* 16384 */
+#define ROE_FRAMER_V1_0_DEFM_BASE_ADDR 0x6000 /* 24576 */
+#define ROE_FRAMER_V1_0_DEFM_DRP_BASE_ADDR 0x8000 /* 32768 */
+#define ROE_FRAMER_V1_0_ETH_BASE_ADDR 0xa000 /* 40960 */
+#define ROE_FRAMER_V1_0_STATS_BASE_ADDR 0xc000 /* 49152 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_cfg
+ * with prefix cfg_ @ address 0x0
+ *-----------------------------------------------------------------------------
+ */
+/* Type = roInt */
+#define CFG_MAJOR_REVISION_ADDR 0x0 /* 0 */
+#define CFG_MAJOR_REVISION_MASK 0xff000000 /* 4278190080 */
+#define CFG_MAJOR_REVISION_OFFSET 0x18 /* 24 */
+#define CFG_MAJOR_REVISION_WIDTH 0x8 /* 8 */
+#define CFG_MAJOR_REVISION_DEFAULT 0x1 /* 1 */
+
+/* Type = roInt */
+#define CFG_MINOR_REVISION_ADDR 0x0 /* 0 */
+#define CFG_MINOR_REVISION_MASK 0xff0000 /* 16711680 */
+#define CFG_MINOR_REVISION_OFFSET 0x10 /* 16 */
+#define CFG_MINOR_REVISION_WIDTH 0x8 /* 8 */
+#define CFG_MINOR_REVISION_DEFAULT 0x0 /* 0 */
+
+/* Type = roInt */
+#define CFG_VERSION_REVISION_ADDR 0x0 /* 0 */
+#define CFG_VERSION_REVISION_MASK 0xff00 /* 65280 */
+#define CFG_VERSION_REVISION_OFFSET 0x8 /* 8 */
+#define CFG_VERSION_REVISION_WIDTH 0x8 /* 8 */
+#define CFG_VERSION_REVISION_DEFAULT 0x0 /* 0 */
+
+/* Type = roInt */
+#define CFG_INTERNAL_REVISION_ADDR 0x4 /* 4 */
+#define CFG_INTERNAL_REVISION_MASK 0xffffffff /* 4294967295 */
+#define CFG_INTERNAL_REVISION_OFFSET 0x0 /* 0 */
+#define CFG_INTERNAL_REVISION_WIDTH 0x20 /* 32 */
+#define CFG_INTERNAL_REVISION_DEFAULT 0x12345678 /* 305419896 */
+
+/* Type = rw */
+#define CFG_TIMEOUT_VALUE_ADDR 0x8 /* 8 */
+#define CFG_TIMEOUT_VALUE_MASK 0xfff /* 4095 */
+#define CFG_TIMEOUT_VALUE_OFFSET 0x0 /* 0 */
+#define CFG_TIMEOUT_VALUE_WIDTH 0xc /* 12 */
+#define CFG_TIMEOUT_VALUE_DEFAULT 0x80 /* 128 */
+
+/* Type = rw */
+#define CFG_USER_RW_OUT_ADDR 0xc /* 12 */
+#define CFG_USER_RW_OUT_MASK 0xff /* 255 */
+#define CFG_USER_RW_OUT_OFFSET 0x0 /* 0 */
+#define CFG_USER_RW_OUT_WIDTH 0x8 /* 8 */
+#define CFG_USER_RW_OUT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_USER_RO_IN_ADDR 0xc /* 12 */
+#define CFG_USER_RO_IN_MASK 0xff0000 /* 16711680 */
+#define CFG_USER_RO_IN_OFFSET 0x10 /* 16 */
+#define CFG_USER_RO_IN_WIDTH 0x8 /* 8 */
+#define CFG_USER_RO_IN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define CFG_MASTER_INT_ENABLE_ADDR 0x10 /* 16 */
+#define CFG_MASTER_INT_ENABLE_MASK 0x1 /* 1 */
+#define CFG_MASTER_INT_ENABLE_OFFSET 0x0 /* 0 */
+#define CFG_MASTER_INT_ENABLE_WIDTH 0x1 /* 1 */
+#define CFG_MASTER_INT_ENABLE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define CFG_FRAM_FIFO_OF_ENABLE_ADDR 0x14 /* 20 */
+#define CFG_FRAM_FIFO_OF_ENABLE_MASK 0x1 /* 1 */
+#define CFG_FRAM_FIFO_OF_ENABLE_OFFSET 0x0 /* 0 */
+#define CFG_FRAM_FIFO_OF_ENABLE_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_FIFO_OF_ENABLE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define CFG_FRAM_FIFO_UF_ENABLE_ADDR 0x14 /* 20 */
+#define CFG_FRAM_FIFO_UF_ENABLE_MASK 0x2 /* 2 */
+#define CFG_FRAM_FIFO_UF_ENABLE_OFFSET 0x1 /* 1 */
+#define CFG_FRAM_FIFO_UF_ENABLE_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_FIFO_UF_ENABLE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define CFG_AXI_TIMEOUT_ENABLE_ADDR 0x14 /* 20 */
+#define CFG_AXI_TIMEOUT_ENABLE_MASK 0x80000000 /* 2147483648 */
+#define CFG_AXI_TIMEOUT_ENABLE_OFFSET 0x1f /* 31 */
+#define CFG_AXI_TIMEOUT_ENABLE_WIDTH 0x1 /* 1 */
+#define CFG_AXI_TIMEOUT_ENABLE_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define CFG_INTERRUPT_STATUS_SAMPLE_ADDR 0x1c /* 28 */
+#define CFG_INTERRUPT_STATUS_SAMPLE_MASK 0x1 /* 1 */
+#define CFG_INTERRUPT_STATUS_SAMPLE_OFFSET 0x0 /* 0 */
+#define CFG_INTERRUPT_STATUS_SAMPLE_WIDTH 0x1 /* 1 */
+#define CFG_INTERRUPT_STATUS_SAMPLE_DEFAULT 0x1 /* 1 */
+
+/* Type = roSig */
+#define CFG_FRAM_RESET_STATUS_ADDR 0x18 /* 24 */
+#define CFG_FRAM_RESET_STATUS_MASK 0x1 /* 1 */
+#define CFG_FRAM_RESET_STATUS_OFFSET 0x0 /* 0 */
+#define CFG_FRAM_RESET_STATUS_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_RESET_STATUS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_DEFM_RESET_STATUS_ADDR 0x18 /* 24 */
+#define CFG_DEFM_RESET_STATUS_MASK 0x2 /* 2 */
+#define CFG_DEFM_RESET_STATUS_OFFSET 0x1 /* 1 */
+#define CFG_DEFM_RESET_STATUS_WIDTH 0x1 /* 1 */
+#define CFG_DEFM_RESET_STATUS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_FRAM_ANT_OF_INTERRUPT_ADDR 0x18 /* 24 */
+#define CFG_FRAM_ANT_OF_INTERRUPT_MASK 0x100 /* 256 */
+#define CFG_FRAM_ANT_OF_INTERRUPT_OFFSET 0x8 /* 8 */
+#define CFG_FRAM_ANT_OF_INTERRUPT_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_ANT_OF_INTERRUPT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_FRAM_ETH_OF_INTERRUPT_ADDR 0x18 /* 24 */
+#define CFG_FRAM_ETH_OF_INTERRUPT_MASK 0x200 /* 512 */
+#define CFG_FRAM_ETH_OF_INTERRUPT_OFFSET 0x9 /* 9 */
+#define CFG_FRAM_ETH_OF_INTERRUPT_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_ETH_OF_INTERRUPT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_FRAM_ANT_UF_INTERRUPT_ADDR 0x18 /* 24 */
+#define CFG_FRAM_ANT_UF_INTERRUPT_MASK 0x400 /* 1024 */
+#define CFG_FRAM_ANT_UF_INTERRUPT_OFFSET 0xa /* 10 */
+#define CFG_FRAM_ANT_UF_INTERRUPT_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_ANT_UF_INTERRUPT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_FRAM_ETH_UF_INTERRUPT_ADDR 0x18 /* 24 */
+#define CFG_FRAM_ETH_UF_INTERRUPT_MASK 0x800 /* 2048 */
+#define CFG_FRAM_ETH_UF_INTERRUPT_OFFSET 0xb /* 11 */
+#define CFG_FRAM_ETH_UF_INTERRUPT_WIDTH 0x1 /* 1 */
+#define CFG_FRAM_ETH_UF_INTERRUPT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_AXI_TIMEOUT_STATUS_ADDR 0x18 /* 24 */
+#define CFG_AXI_TIMEOUT_STATUS_MASK 0x80000000 /* 2147483648 */
+#define CFG_AXI_TIMEOUT_STATUS_OFFSET 0x1f /* 31 */
+#define CFG_AXI_TIMEOUT_STATUS_WIDTH 0x1 /* 1 */
+#define CFG_AXI_TIMEOUT_STATUS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_CONFIG_NO_OF_FRAM_ANTS_ADDR 0x20 /* 32 */
+#define CFG_CONFIG_NO_OF_FRAM_ANTS_MASK 0xffff /* 65535 */
+#define CFG_CONFIG_NO_OF_FRAM_ANTS_OFFSET 0x0 /* 0 */
+#define CFG_CONFIG_NO_OF_FRAM_ANTS_WIDTH 0x10 /* 16 */
+#define CFG_CONFIG_NO_OF_FRAM_ANTS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_CONFIG_NO_OF_DEFM_ANTS_ADDR 0x20 /* 32 */
+#define CFG_CONFIG_NO_OF_DEFM_ANTS_MASK 0xffff0000 /* 4294901760 */
+#define CFG_CONFIG_NO_OF_DEFM_ANTS_OFFSET 0x10 /* 16 */
+#define CFG_CONFIG_NO_OF_DEFM_ANTS_WIDTH 0x10 /* 16 */
+#define CFG_CONFIG_NO_OF_DEFM_ANTS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_CONFIG_NO_OF_ETH_PORTS_ADDR 0x24 /* 36 */
+#define CFG_CONFIG_NO_OF_ETH_PORTS_MASK 0x3ff /* 1023 */
+#define CFG_CONFIG_NO_OF_ETH_PORTS_OFFSET 0x0 /* 0 */
+#define CFG_CONFIG_NO_OF_ETH_PORTS_WIDTH 0xa /* 10 */
+#define CFG_CONFIG_NO_OF_ETH_PORTS_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define CFG_CONFIG_ETH_SPEED_ADDR 0x24 /* 36 */
+#define CFG_CONFIG_ETH_SPEED_MASK 0x3ff0000 /* 67043328 */
+#define CFG_CONFIG_ETH_SPEED_OFFSET 0x10 /* 16 */
+#define CFG_CONFIG_ETH_SPEED_WIDTH 0xa /* 10 */
+#define CFG_CONFIG_ETH_SPEED_DEFAULT 0x0 /* 0 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_fram
+ * with prefix fram_ @ address 0x2000
+ *------------------------------------------------------------------------------
+ */
+/* Type = rwpdef */
+#define FRAM_DISABLE_ADDR 0x2000 /* 8192 */
+#define FRAM_DISABLE_MASK 0x1 /* 1 */
+#define FRAM_DISABLE_OFFSET 0x0 /* 0 */
+#define FRAM_DISABLE_WIDTH 0x1 /* 1 */
+#define FRAM_DISABLE_DEFAULT 0x1 /* 1 */
+
+/* Type = roSig */
+#define FRAM_READY_ADDR 0x2000 /* 8192 */
+#define FRAM_READY_MASK 0x2 /* 2 */
+#define FRAM_READY_OFFSET 0x1 /* 1 */
+#define FRAM_READY_WIDTH 0x1 /* 1 */
+#define FRAM_READY_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define FRAM_FIFO_FULL_INDICATOR_ADDR 0x2004 /* 8196 */
+#define FRAM_FIFO_FULL_INDICATOR_MASK 0xffffffff /* 4294967295 */
+#define FRAM_FIFO_FULL_INDICATOR_OFFSET 0x0 /* 0 */
+#define FRAM_FIFO_FULL_INDICATOR_WIDTH 0x20 /* 32 */
+#define FRAM_FIFO_FULL_INDICATOR_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_LOW_CNT_MIN_ADDR 0x2020 /* 8224 */
+#define FRAM_SN_DATA_LOW_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_LOW_CNT_MIN_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_LOW_CNT_MIN_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_LOW_CNT_MIN_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_LOW_CNT_MAX_ADDR 0x2024 /* 8228 */
+#define FRAM_SN_DATA_LOW_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_LOW_CNT_MAX_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_LOW_CNT_MAX_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_LOW_CNT_MAX_DEFAULT 0x78 /* 120 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_LOW_CNT_INITVAL_ADDR 0x2028 /* 8232 */
+#define FRAM_SN_DATA_LOW_CNT_INITVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_LOW_CNT_INITVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_LOW_CNT_INITVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_LOW_CNT_INITVAL_DEFAULT 0x75 /* 117 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_LOW_CNT_INCVAL_ADDR 0x202c /* 8236 */
+#define FRAM_SN_DATA_LOW_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_LOW_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_LOW_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_LOW_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_HIGH_CNT_MIN_ADDR 0x2030 /* 8240 */
+#define FRAM_SN_DATA_HIGH_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_HIGH_CNT_MIN_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_HIGH_CNT_MIN_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_HIGH_CNT_MIN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_HIGH_CNT_MAX_ADDR 0x2034 /* 8244 */
+#define FRAM_SN_DATA_HIGH_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_HIGH_CNT_MAX_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_HIGH_CNT_MAX_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_HIGH_CNT_MAX_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_HIGH_CNT_INITVAL_ADDR 0x2038 /* 8248 */
+#define FRAM_SN_DATA_HIGH_CNT_INITVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_HIGH_CNT_INITVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_HIGH_CNT_INITVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_HIGH_CNT_INITVAL_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define FRAM_SN_DATA_HIGH_CNT_INCVAL_ADDR 0x203c /* 8252 */
+#define FRAM_SN_DATA_HIGH_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_DATA_HIGH_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_DATA_HIGH_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_DATA_HIGH_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_LOW_CNT_MIN_ADDR 0x2050 /* 8272 */
+#define FRAM_SN_CTRL_LOW_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_LOW_CNT_MIN_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_LOW_CNT_MIN_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_LOW_CNT_MIN_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_LOW_CNT_MAX_ADDR 0x2054 /* 8276 */
+#define FRAM_SN_CTRL_LOW_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_LOW_CNT_MAX_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_LOW_CNT_MAX_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_LOW_CNT_MAX_DEFAULT 0x78 /* 120 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_LOW_CNT_INITVAL_ADDR 0x2058 /* 8280 */
+#define FRAM_SN_CTRL_LOW_CNT_INITVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_LOW_CNT_INITVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_LOW_CNT_INITVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_LOW_CNT_INITVAL_DEFAULT 0x75 /* 117 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_LOW_CNT_INCVAL_ADDR 0x205c /* 8284 */
+#define FRAM_SN_CTRL_LOW_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_LOW_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_LOW_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_LOW_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_HIGH_CNT_MIN_ADDR 0x2060 /* 8288 */
+#define FRAM_SN_CTRL_HIGH_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_HIGH_CNT_MIN_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_HIGH_CNT_MIN_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_HIGH_CNT_MIN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_HIGH_CNT_MAX_ADDR 0x2064 /* 8292 */
+#define FRAM_SN_CTRL_HIGH_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_HIGH_CNT_MAX_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_HIGH_CNT_MAX_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_HIGH_CNT_MAX_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_HIGH_CNT_INITVAL_ADDR 0x2068 /* 8296 */
+#define FRAM_SN_CTRL_HIGH_CNT_INITVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_HIGH_CNT_INITVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_HIGH_CNT_INITVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_HIGH_CNT_INITVAL_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define FRAM_SN_CTRL_HIGH_CNT_INCVAL_ADDR 0x206c /* 8300 */
+#define FRAM_SN_CTRL_HIGH_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define FRAM_SN_CTRL_HIGH_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define FRAM_SN_CTRL_HIGH_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define FRAM_SN_CTRL_HIGH_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define FRAM_PROTOCOL_DEFINITION_ADDR 0x2200 /* 8704 */
+#define FRAM_PROTOCOL_DEFINITION_MASK 0xf /* 15 */
+#define FRAM_PROTOCOL_DEFINITION_OFFSET 0x0 /* 0 */
+#define FRAM_PROTOCOL_DEFINITION_WIDTH 0x4 /* 4 */
+#define FRAM_PROTOCOL_DEFINITION_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_GEN_VLAN_TAG_ADDR 0x2200 /* 8704 */
+#define FRAM_GEN_VLAN_TAG_MASK 0x10 /* 16 */
+#define FRAM_GEN_VLAN_TAG_OFFSET 0x4 /* 4 */
+#define FRAM_GEN_VLAN_TAG_WIDTH 0x1 /* 1 */
+#define FRAM_GEN_VLAN_TAG_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_SEL_IPV_ADDRESS_TYPE_ADDR 0x2200 /* 8704 */
+#define FRAM_SEL_IPV_ADDRESS_TYPE_MASK 0x60 /* 96 */
+#define FRAM_SEL_IPV_ADDRESS_TYPE_OFFSET 0x5 /* 5 */
+#define FRAM_SEL_IPV_ADDRESS_TYPE_WIDTH 0x2 /* 2 */
+#define FRAM_SEL_IPV_ADDRESS_TYPE_DEFAULT 0x0 /* 0 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_fram_drp
+ * with prefix fram_drp @ address 0x4000
+ *------------------------------------------------------------------------------
+ */
+/* Type = rw */
+#define FRAM_DRPFRAM_DATA_PC_ID_ADDR 0x4000 /* 16384 */
+#define FRAM_DRPFRAM_DATA_PC_ID_MASK 0xffff /* 65535 */
+#define FRAM_DRPFRAM_DATA_PC_ID_OFFSET 0x0 /* 0 */
+#define FRAM_DRPFRAM_DATA_PC_ID_WIDTH 0x10 /* 16 */
+#define FRAM_DRPFRAM_DATA_PC_ID_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_DRPFRAM_DATA_MESSAGE_TYPE_ADDR 0x4000 /* 16384 */
+#define FRAM_DRPFRAM_DATA_MESSAGE_TYPE_MASK 0xff0000 /* 16711680 */
+#define FRAM_DRPFRAM_DATA_MESSAGE_TYPE_OFFSET 0x10 /* 16 */
+#define FRAM_DRPFRAM_DATA_MESSAGE_TYPE_WIDTH 0x8 /* 8 */
+#define FRAM_DRPFRAM_DATA_MESSAGE_TYPE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_DRPFRAM_DATA_ETHERNET_PORT_ADDR 0x4000 /* 16384 */
+#define FRAM_DRPFRAM_DATA_ETHERNET_PORT_MASK 0xff000000 /* 4278190080 */
+#define FRAM_DRPFRAM_DATA_ETHERNET_PORT_OFFSET 0x18 /* 24 */
+#define FRAM_DRPFRAM_DATA_ETHERNET_PORT_WIDTH 0x8 /* 8 */
+#define FRAM_DRPFRAM_DATA_ETHERNET_PORT_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_DRPFRAM_CTRL_PC_ID_ADDR 0x4400 /* 17408 */
+#define FRAM_DRPFRAM_CTRL_PC_ID_MASK 0xffff /* 65535 */
+#define FRAM_DRPFRAM_CTRL_PC_ID_OFFSET 0x0 /* 0 */
+#define FRAM_DRPFRAM_CTRL_PC_ID_WIDTH 0x10 /* 16 */
+#define FRAM_DRPFRAM_CTRL_PC_ID_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_DRPFRAM_CTRL_MESSAGE_TYPE_ADDR 0x4400 /* 17408 */
+#define FRAM_DRPFRAM_CTRL_MESSAGE_TYPE_MASK 0xff0000 /* 16711680 */
+#define FRAM_DRPFRAM_CTRL_MESSAGE_TYPE_OFFSET 0x10 /* 16 */
+#define FRAM_DRPFRAM_CTRL_MESSAGE_TYPE_WIDTH 0x8 /* 8 */
+#define FRAM_DRPFRAM_CTRL_MESSAGE_TYPE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define FRAM_DRPFRAM_CTRL_ETHERNET_PORT_ADDR 0x4400 /* 17408 */
+#define FRAM_DRPFRAM_CTRL_ETHERNET_PORT_MASK 0xff000000 /* 4278190080 */
+#define FRAM_DRPFRAM_CTRL_ETHERNET_PORT_OFFSET 0x18 /* 24 */
+#define FRAM_DRPFRAM_CTRL_ETHERNET_PORT_WIDTH 0x8 /* 8 */
+#define FRAM_DRPFRAM_CTRL_ETHERNET_PORT_DEFAULT 0x0 /* 0 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_defm
+ * with prefix defm_ @ address 0x6000
+ *------------------------------------------------------------------------------
+ */
+/* Type = rw */
+#define DEFM_RESTART_ADDR 0x6000 /* 24576 */
+#define DEFM_RESTART_MASK 0x1 /* 1 */
+#define DEFM_RESTART_OFFSET 0x0 /* 0 */
+#define DEFM_RESTART_WIDTH 0x1 /* 1 */
+#define DEFM_RESTART_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_READY_ADDR 0x6000 /* 24576 */
+#define DEFM_READY_MASK 0x2 /* 2 */
+#define DEFM_READY_OFFSET 0x1 /* 1 */
+#define DEFM_READY_WIDTH 0x1 /* 1 */
+#define DEFM_READY_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_ERR_PACKET_FILTER_ADDR 0x6004 /* 24580 */
+#define DEFM_ERR_PACKET_FILTER_MASK 0x3 /* 3 */
+#define DEFM_ERR_PACKET_FILTER_OFFSET 0x0 /* 0 */
+#define DEFM_ERR_PACKET_FILTER_WIDTH 0x2 /* 2 */
+#define DEFM_ERR_PACKET_FILTER_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_DATA_PKT_MESSAGE_TYPE_ADDR 0x6008 /* 24584 */
+#define DEFM_DATA_PKT_MESSAGE_TYPE_MASK 0xff /* 255 */
+#define DEFM_DATA_PKT_MESSAGE_TYPE_OFFSET 0x0 /* 0 */
+#define DEFM_DATA_PKT_MESSAGE_TYPE_WIDTH 0x8 /* 8 */
+#define DEFM_DATA_PKT_MESSAGE_TYPE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_CTRL_PKT_MESSAGE_TYPE_ADDR 0x600c /* 24588 */
+#define DEFM_CTRL_PKT_MESSAGE_TYPE_MASK 0xff /* 255 */
+#define DEFM_CTRL_PKT_MESSAGE_TYPE_OFFSET 0x0 /* 0 */
+#define DEFM_CTRL_PKT_MESSAGE_TYPE_WIDTH 0x8 /* 8 */
+#define DEFM_CTRL_PKT_MESSAGE_TYPE_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_LOW_CNT_MIN_ADDR 0x6020 /* 24608 */
+#define DEFM_SN_DATA_LOW_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_LOW_CNT_MIN_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_LOW_CNT_MIN_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_LOW_CNT_MIN_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_LOW_CNT_MAX_ADDR 0x6024 /* 24612 */
+#define DEFM_SN_DATA_LOW_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_LOW_CNT_MAX_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_LOW_CNT_MAX_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_LOW_CNT_MAX_DEFAULT 0x78 /* 120 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_LOW_CNT_INCVAL_ADDR 0x602c /* 24620 */
+#define DEFM_SN_DATA_LOW_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_LOW_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_LOW_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_LOW_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_HIGH_CNT_MIN_ADDR 0x6030 /* 24624 */
+#define DEFM_SN_DATA_HIGH_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_HIGH_CNT_MIN_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_HIGH_CNT_MIN_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_HIGH_CNT_MIN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_HIGH_CNT_MAX_ADDR 0x6034 /* 24628 */
+#define DEFM_SN_DATA_HIGH_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_HIGH_CNT_MAX_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_HIGH_CNT_MAX_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_HIGH_CNT_MAX_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define DEFM_SN_DATA_HIGH_CNT_INCVAL_ADDR 0x603c /* 24636 */
+#define DEFM_SN_DATA_HIGH_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_DATA_HIGH_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define DEFM_SN_DATA_HIGH_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define DEFM_SN_DATA_HIGH_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_LOW_CNT_MIN_ADDR 0x6050 /* 24656 */
+#define DEFM_SN_CTRL_LOW_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_LOW_CNT_MIN_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_LOW_CNT_MIN_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_LOW_CNT_MIN_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_LOW_CNT_MAX_ADDR 0x6054 /* 24660 */
+#define DEFM_SN_CTRL_LOW_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_LOW_CNT_MAX_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_LOW_CNT_MAX_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_LOW_CNT_MAX_DEFAULT 0x78 /* 120 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_LOW_CNT_INCVAL_ADDR 0x605c /* 24668 */
+#define DEFM_SN_CTRL_LOW_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_LOW_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_LOW_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_LOW_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_HIGH_CNT_MIN_ADDR 0x6060 /* 24672 */
+#define DEFM_SN_CTRL_HIGH_CNT_MIN_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_HIGH_CNT_MIN_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_HIGH_CNT_MIN_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_HIGH_CNT_MIN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_HIGH_CNT_MAX_ADDR 0x6064 /* 24676 */
+#define DEFM_SN_CTRL_HIGH_CNT_MAX_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_HIGH_CNT_MAX_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_HIGH_CNT_MAX_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_HIGH_CNT_MAX_DEFAULT 0x4f /* 79 */
+
+/* Type = rw */
+#define DEFM_SN_CTRL_HIGH_CNT_INCVAL_ADDR 0x606c /* 24684 */
+#define DEFM_SN_CTRL_HIGH_CNT_INCVAL_MASK 0xffffffff /* 4294967295 */
+#define DEFM_SN_CTRL_HIGH_CNT_INCVAL_OFFSET 0x0 /* 0 */
+#define DEFM_SN_CTRL_HIGH_CNT_INCVAL_WIDTH 0x20 /* 32 */
+#define DEFM_SN_CTRL_HIGH_CNT_INCVAL_DEFAULT 0x1 /* 1 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W0_31_0_ADDR 0x6100 /* 24832 */
+#define DEFM_USER_DATA_FILTER_W0_31_0_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W0_31_0_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W0_31_0_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W0_31_0_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W0_63_32_ADDR 0x6104 /* 24836 */
+#define DEFM_USER_DATA_FILTER_W0_63_32_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W0_63_32_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W0_63_32_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W0_63_32_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W0_95_64_ADDR 0x6108 /* 24840 */
+#define DEFM_USER_DATA_FILTER_W0_95_64_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W0_95_64_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W0_95_64_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W0_95_64_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W0_127_96_ADDR 0x610c /* 24844 */
+#define DEFM_USER_DATA_FILTER_W0_127_96_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W0_127_96_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W0_127_96_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W0_127_96_DEFAULT 0xfffffeae /* 4294966958 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W0_MASK_ADDR 0x6110 /* 24848 */
+#define DEFM_USER_DATA_FILTER_W0_MASK_MASK 0xffff /* 65535 */
+#define DEFM_USER_DATA_FILTER_W0_MASK_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W0_MASK_WIDTH 0x10 /* 16 */
+#define DEFM_USER_DATA_FILTER_W0_MASK_DEFAULT 0xcfff /* 53247 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W1_31_0_ADDR 0x6120 /* 24864 */
+#define DEFM_USER_DATA_FILTER_W1_31_0_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W1_31_0_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W1_31_0_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W1_31_0_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W1_63_32_ADDR 0x6124 /* 24868 */
+#define DEFM_USER_DATA_FILTER_W1_63_32_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W1_63_32_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W1_63_32_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W1_63_32_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W1_95_64_ADDR 0x6128 /* 24872 */
+#define DEFM_USER_DATA_FILTER_W1_95_64_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W1_95_64_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W1_95_64_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W1_95_64_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W1_127_96_ADDR 0x612c /* 24876 */
+#define DEFM_USER_DATA_FILTER_W1_127_96_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W1_127_96_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W1_127_96_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W1_127_96_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W1_MASK_ADDR 0x6130 /* 24880 */
+#define DEFM_USER_DATA_FILTER_W1_MASK_MASK 0xffff /* 65535 */
+#define DEFM_USER_DATA_FILTER_W1_MASK_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W1_MASK_WIDTH 0x10 /* 16 */
+#define DEFM_USER_DATA_FILTER_W1_MASK_DEFAULT 0xffff /* 65535 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W2_31_0_ADDR 0x6140 /* 24896 */
+#define DEFM_USER_DATA_FILTER_W2_31_0_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W2_31_0_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W2_31_0_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W2_31_0_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W2_63_32_ADDR 0x6144 /* 24900 */
+#define DEFM_USER_DATA_FILTER_W2_63_32_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W2_63_32_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W2_63_32_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W2_63_32_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W2_95_64_ADDR 0x6148 /* 24904 */
+#define DEFM_USER_DATA_FILTER_W2_95_64_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W2_95_64_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W2_95_64_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W2_95_64_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W2_127_96_ADDR 0x614c /* 24908 */
+#define DEFM_USER_DATA_FILTER_W2_127_96_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W2_127_96_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W2_127_96_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W2_127_96_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W2_MASK_ADDR 0x6150 /* 24912 */
+#define DEFM_USER_DATA_FILTER_W2_MASK_MASK 0xffff /* 65535 */
+#define DEFM_USER_DATA_FILTER_W2_MASK_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W2_MASK_WIDTH 0x10 /* 16 */
+#define DEFM_USER_DATA_FILTER_W2_MASK_DEFAULT 0xffff /* 65535 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W3_31_0_ADDR 0x6160 /* 24928 */
+#define DEFM_USER_DATA_FILTER_W3_31_0_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W3_31_0_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W3_31_0_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W3_31_0_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W3_63_32_ADDR 0x6164 /* 24932 */
+#define DEFM_USER_DATA_FILTER_W3_63_32_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W3_63_32_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W3_63_32_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W3_63_32_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W3_95_64_ADDR 0x6168 /* 24936 */
+#define DEFM_USER_DATA_FILTER_W3_95_64_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W3_95_64_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W3_95_64_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W3_95_64_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W3_127_96_ADDR 0x616c /* 24940 */
+#define DEFM_USER_DATA_FILTER_W3_127_96_MASK 0xffffffff /* 4294967295 */
+#define DEFM_USER_DATA_FILTER_W3_127_96_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W3_127_96_WIDTH 0x20 /* 32 */
+#define DEFM_USER_DATA_FILTER_W3_127_96_DEFAULT 0xffffffff /* 4294967295 */
+
+/* Type = rwpdef */
+#define DEFM_USER_DATA_FILTER_W3_MASK_ADDR 0x6170 /* 24944 */
+#define DEFM_USER_DATA_FILTER_W3_MASK_MASK 0xffff /* 65535 */
+#define DEFM_USER_DATA_FILTER_W3_MASK_OFFSET 0x0 /* 0 */
+#define DEFM_USER_DATA_FILTER_W3_MASK_WIDTH 0x10 /* 16 */
+#define DEFM_USER_DATA_FILTER_W3_MASK_DEFAULT 0xffff /* 65535 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_defm_drp
+ * with prefix defm_drp @ address 0x8000
+ *------------------------------------------------------------------------------
+ */
+/* Type = rw */
+#define DEFM_DRPDEFM_DATA_PC_ID_ADDR 0x8000 /* 32768 */
+#define DEFM_DRPDEFM_DATA_PC_ID_MASK 0xffff /* 65535 */
+#define DEFM_DRPDEFM_DATA_PC_ID_OFFSET 0x0 /* 0 */
+#define DEFM_DRPDEFM_DATA_PC_ID_WIDTH 0x10 /* 16 */
+#define DEFM_DRPDEFM_DATA_PC_ID_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define DEFM_DRPDEFM_CTRL_PC_ID_ADDR 0x8400 /* 33792 */
+#define DEFM_DRPDEFM_CTRL_PC_ID_MASK 0xffff /* 65535 */
+#define DEFM_DRPDEFM_CTRL_PC_ID_OFFSET 0x0 /* 0 */
+#define DEFM_DRPDEFM_CTRL_PC_ID_WIDTH 0x10 /* 16 */
+#define DEFM_DRPDEFM_CTRL_PC_ID_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_LATENCY_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_LATENCY_MASK 0xffffff /* 16777215 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_LATENCY_OFFSET 0x0 /* 0 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_LATENCY_WIDTH 0x18 /* 24 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_LATENCY_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_ALIGNMENT_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_ALIGNMENT_MASK 0x1000000 /* 16777216 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_ALIGNMENT_OFFSET 0x18 /* 24 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_ALIGNMENT_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_ALIGNMENT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_OVERFLOW_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_OVERFLOW_MASK 0x2000000 /* 33554432 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_OVERFLOW_OFFSET 0x19 /* 25 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_OVERFLOW_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_OVERFLOW_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_UNDERFLOW_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_UNDERFLOW_MASK 0x4000000 /* 67108864 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_UNDERFLOW_OFFSET 0x1a /* 26 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_UNDERFLOW_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_UNDERFLOW_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_REGULAR_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_REGULAR_MASK 0x8000000 /* 134217728 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_REGULAR_OFFSET 0x1b /* 27 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_REGULAR_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_REGULAR_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_RWIN_ADDR 0x8800 /* 34816 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_RWIN_MASK 0xf0000000 /* 4026531840 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_RWIN_OFFSET 0x1c /* 28 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_RWIN_WIDTH 0x4 /* 4 */
+#define DEFM_DRPDEFM_DATA_BUFFER_STATE_RWIN_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_LATENCY_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_LATENCY_MASK 0xffffff /* 16777215 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_LATENCY_OFFSET 0x0 /* 0 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_LATENCY_WIDTH 0x18 /* 24 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_LATENCY_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_ALIGNMENT_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_ALIGNMENT_MASK 0x1000000 /* 16777216 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_ALIGNMENT_OFFSET 0x18 /* 24 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_ALIGNMENT_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_ALIGNMENT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_OVERFLOW_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_OVERFLOW_MASK 0x2000000 /* 33554432 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_OVERFLOW_OFFSET 0x19 /* 25 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_OVERFLOW_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_OVERFLOW_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_UNDERFLOW_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_UNDERFLOW_MASK 0x4000000 /* 67108864 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_UNDERFLOW_OFFSET 0x1a /* 26 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_UNDERFLOW_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_UNDERFLOW_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_REGULAR_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_REGULAR_MASK 0x8000000 /* 134217728 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_REGULAR_OFFSET 0x1b /* 27 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_REGULAR_WIDTH 0x1 /* 1 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_REGULAR_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_RWIN_ADDR 0x9800 /* 38912 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_RWIN_MASK 0xf0000000 /* 4026531840 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_RWIN_OFFSET 0x1c /* 28 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_RWIN_WIDTH 0x4 /* 4 */
+#define DEFM_DRPDEFM_CTRL_BUFFER_STATE_RWIN_DEFAULT 0x0 /* 0 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_eth
+ * with prefix eth_ @ address 0xa000
+ *------------------------------------------------------------------------------
+ */
+/* Type = rwpdef */
+#define ETH_DEST_ADDR_31_0_ADDR 0xa000 /* 40960 */
+#define ETH_DEST_ADDR_31_0_MASK 0xffffffff /* 4294967295 */
+#define ETH_DEST_ADDR_31_0_OFFSET 0x0 /* 0 */
+#define ETH_DEST_ADDR_31_0_WIDTH 0x20 /* 32 */
+#define ETH_DEST_ADDR_31_0_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_DEST_ADDR_47_32_ADDR 0xa004 /* 40964 */
+#define ETH_DEST_ADDR_47_32_MASK 0xffff /* 65535 */
+#define ETH_DEST_ADDR_47_32_OFFSET 0x0 /* 0 */
+#define ETH_DEST_ADDR_47_32_WIDTH 0x10 /* 16 */
+#define ETH_DEST_ADDR_47_32_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_SRC_ADDR_31_0_ADDR 0xa008 /* 40968 */
+#define ETH_SRC_ADDR_31_0_MASK 0xffffffff /* 4294967295 */
+#define ETH_SRC_ADDR_31_0_OFFSET 0x0 /* 0 */
+#define ETH_SRC_ADDR_31_0_WIDTH 0x20 /* 32 */
+#define ETH_SRC_ADDR_31_0_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_SRC_ADDR_47_32_ADDR 0xa00c /* 40972 */
+#define ETH_SRC_ADDR_47_32_MASK 0xffff /* 65535 */
+#define ETH_SRC_ADDR_47_32_OFFSET 0x0 /* 0 */
+#define ETH_SRC_ADDR_47_32_WIDTH 0x10 /* 16 */
+#define ETH_SRC_ADDR_47_32_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_VLAN_ID_ADDR 0xa010 /* 40976 */
+#define ETH_VLAN_ID_MASK 0xfff /* 4095 */
+#define ETH_VLAN_ID_OFFSET 0x0 /* 0 */
+#define ETH_VLAN_ID_WIDTH 0xc /* 12 */
+#define ETH_VLAN_ID_DEFAULT 0x1 /* 1 */
+
+/* Type = rwpdef */
+#define ETH_VLAN_DEI_ADDR 0xa010 /* 40976 */
+#define ETH_VLAN_DEI_MASK 0x1000 /* 4096 */
+#define ETH_VLAN_DEI_OFFSET 0xc /* 12 */
+#define ETH_VLAN_DEI_WIDTH 0x1 /* 1 */
+#define ETH_VLAN_DEI_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_VLAN_PCP_ADDR 0xa010 /* 40976 */
+#define ETH_VLAN_PCP_MASK 0xe000 /* 57344 */
+#define ETH_VLAN_PCP_OFFSET 0xd /* 13 */
+#define ETH_VLAN_PCP_WIDTH 0x3 /* 3 */
+#define ETH_VLAN_PCP_DEFAULT 0x7 /* 7 */
+
+/* Type = rw */
+#define ETH_IPV4_VERSION_ADDR 0xa030 /* 41008 */
+#define ETH_IPV4_VERSION_MASK 0xf /* 15 */
+#define ETH_IPV4_VERSION_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_VERSION_WIDTH 0x4 /* 4 */
+#define ETH_IPV4_VERSION_DEFAULT 0x4 /* 4 */
+
+/* Type = rw */
+#define ETH_IPV4_IHL_ADDR 0xa030 /* 41008 */
+#define ETH_IPV4_IHL_MASK 0xf0 /* 240 */
+#define ETH_IPV4_IHL_OFFSET 0x4 /* 4 */
+#define ETH_IPV4_IHL_WIDTH 0x4 /* 4 */
+#define ETH_IPV4_IHL_DEFAULT 0x5 /* 5 */
+
+/* Type = rw */
+#define ETH_IPV4_DSCP_ADDR 0xa034 /* 41012 */
+#define ETH_IPV4_DSCP_MASK 0x3f /* 63 */
+#define ETH_IPV4_DSCP_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_DSCP_WIDTH 0x6 /* 6 */
+#define ETH_IPV4_DSCP_DEFAULT 0x2e /* 46 */
+
+/* Type = rw */
+#define ETH_IPV4_ECN_ADDR 0xa034 /* 41012 */
+#define ETH_IPV4_ECN_MASK 0xc0 /* 192 */
+#define ETH_IPV4_ECN_OFFSET 0x6 /* 6 */
+#define ETH_IPV4_ECN_WIDTH 0x2 /* 2 */
+#define ETH_IPV4_ECN_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_IPV4_ID_ADDR 0xa038 /* 41016 */
+#define ETH_IPV4_ID_MASK 0xffff /* 65535 */
+#define ETH_IPV4_ID_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_ID_WIDTH 0x10 /* 16 */
+#define ETH_IPV4_ID_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_IPV4_FLAGS_ADDR 0xa03c /* 41020 */
+#define ETH_IPV4_FLAGS_MASK 0x7 /* 7 */
+#define ETH_IPV4_FLAGS_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_FLAGS_WIDTH 0x3 /* 3 */
+#define ETH_IPV4_FLAGS_DEFAULT 0x2 /* 2 */
+
+/* Type = rw */
+#define ETH_IPV4_FRAGMENT_OFFSET_ADDR 0xa03c /* 41020 */
+#define ETH_IPV4_FRAGMENT_OFFSET_MASK 0x1fff8 /* 131064 */
+#define ETH_IPV4_FRAGMENT_OFFSET_OFFSET 0x3 /* 3 */
+#define ETH_IPV4_FRAGMENT_OFFSET_WIDTH 0xe /* 14 */
+#define ETH_IPV4_FRAGMENT_OFFSET_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_IPV4_TIME_TO_LIVE_ADDR 0xa040 /* 41024 */
+#define ETH_IPV4_TIME_TO_LIVE_MASK 0xff /* 255 */
+#define ETH_IPV4_TIME_TO_LIVE_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_TIME_TO_LIVE_WIDTH 0x8 /* 8 */
+#define ETH_IPV4_TIME_TO_LIVE_DEFAULT 0x40 /* 64 */
+
+/* Type = rw */
+#define ETH_IPV4_PROTOCOL_ADDR 0xa044 /* 41028 */
+#define ETH_IPV4_PROTOCOL_MASK 0xff /* 255 */
+#define ETH_IPV4_PROTOCOL_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_PROTOCOL_WIDTH 0x8 /* 8 */
+#define ETH_IPV4_PROTOCOL_DEFAULT 0x11 /* 17 */
+
+/* Type = rwpdef */
+#define ETH_IPV4_SOURCE_ADD_ADDR 0xa048 /* 41032 */
+#define ETH_IPV4_SOURCE_ADD_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV4_SOURCE_ADD_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_SOURCE_ADD_WIDTH 0x20 /* 32 */
+#define ETH_IPV4_SOURCE_ADD_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV4_DESTINATION_ADD_ADDR 0xa04c /* 41036 */
+#define ETH_IPV4_DESTINATION_ADD_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV4_DESTINATION_ADD_OFFSET 0x0 /* 0 */
+#define ETH_IPV4_DESTINATION_ADD_WIDTH 0x20 /* 32 */
+#define ETH_IPV4_DESTINATION_ADD_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_UDP_SOURCE_PORT_ADDR 0xa050 /* 41040 */
+#define ETH_UDP_SOURCE_PORT_MASK 0xffff /* 65535 */
+#define ETH_UDP_SOURCE_PORT_OFFSET 0x0 /* 0 */
+#define ETH_UDP_SOURCE_PORT_WIDTH 0x10 /* 16 */
+#define ETH_UDP_SOURCE_PORT_DEFAULT 0x8000 /* 32768 */
+
+/* Type = rw */
+#define ETH_UDP_DESTINATION_PORT_ADDR 0xa050 /* 41040 */
+#define ETH_UDP_DESTINATION_PORT_MASK 0xffff0000 /* 4294901760 */
+#define ETH_UDP_DESTINATION_PORT_OFFSET 0x10 /* 16 */
+#define ETH_UDP_DESTINATION_PORT_WIDTH 0x10 /* 16 */
+#define ETH_UDP_DESTINATION_PORT_DEFAULT 0xc000 /* 49152 */
+
+/* Type = rw */
+#define ETH_IPV6_V_ADDR 0xa080 /* 41088 */
+#define ETH_IPV6_V_MASK 0xf /* 15 */
+#define ETH_IPV6_V_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_V_WIDTH 0x4 /* 4 */
+#define ETH_IPV6_V_DEFAULT 0x6 /* 6 */
+
+/* Type = rw */
+#define ETH_IPV6_TRAFFIC_CLASS_ADDR 0xa084 /* 41092 */
+#define ETH_IPV6_TRAFFIC_CLASS_MASK 0xff /* 255 */
+#define ETH_IPV6_TRAFFIC_CLASS_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_TRAFFIC_CLASS_WIDTH 0x8 /* 8 */
+#define ETH_IPV6_TRAFFIC_CLASS_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_IPV6_FLOW_LABEL_ADDR 0xa088 /* 41096 */
+#define ETH_IPV6_FLOW_LABEL_MASK 0xfffff /* 1048575 */
+#define ETH_IPV6_FLOW_LABEL_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_FLOW_LABEL_WIDTH 0x14 /* 20 */
+#define ETH_IPV6_FLOW_LABEL_DEFAULT 0x0 /* 0 */
+
+/* Type = rw */
+#define ETH_IPV6_NEXT_HEADER_ADDR 0xa08c /* 41100 */
+#define ETH_IPV6_NEXT_HEADER_MASK 0xff /* 255 */
+#define ETH_IPV6_NEXT_HEADER_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_NEXT_HEADER_WIDTH 0x8 /* 8 */
+#define ETH_IPV6_NEXT_HEADER_DEFAULT 0x11 /* 17 */
+
+/* Type = rw */
+#define ETH_IPV6_HOP_LIMIT_ADDR 0xa090 /* 41104 */
+#define ETH_IPV6_HOP_LIMIT_MASK 0xff /* 255 */
+#define ETH_IPV6_HOP_LIMIT_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_HOP_LIMIT_WIDTH 0x8 /* 8 */
+#define ETH_IPV6_HOP_LIMIT_DEFAULT 0x40 /* 64 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_SOURCE_ADD_31_0_ADDR 0xa094 /* 41108 */
+#define ETH_IPV6_SOURCE_ADD_31_0_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_SOURCE_ADD_31_0_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_SOURCE_ADD_31_0_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_SOURCE_ADD_31_0_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_SOURCE_ADD_63_32_ADDR 0xa098 /* 41112 */
+#define ETH_IPV6_SOURCE_ADD_63_32_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_SOURCE_ADD_63_32_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_SOURCE_ADD_63_32_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_SOURCE_ADD_63_32_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_SOURCE_ADD_95_64_ADDR 0xa09c /* 41116 */
+#define ETH_IPV6_SOURCE_ADD_95_64_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_SOURCE_ADD_95_64_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_SOURCE_ADD_95_64_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_SOURCE_ADD_95_64_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_SOURCE_ADD_127_96_ADDR 0xa0a0 /* 41120 */
+#define ETH_IPV6_SOURCE_ADD_127_96_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_SOURCE_ADD_127_96_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_SOURCE_ADD_127_96_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_SOURCE_ADD_127_96_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_DEST_ADD_31_0_ADDR 0xa0a4 /* 41124 */
+#define ETH_IPV6_DEST_ADD_31_0_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_DEST_ADD_31_0_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_DEST_ADD_31_0_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_DEST_ADD_31_0_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_DEST_ADD_63_32_ADDR 0xa0a8 /* 41128 */
+#define ETH_IPV6_DEST_ADD_63_32_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_DEST_ADD_63_32_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_DEST_ADD_63_32_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_DEST_ADD_63_32_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_DEST_ADD_95_64_ADDR 0xa0ac /* 41132 */
+#define ETH_IPV6_DEST_ADD_95_64_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_DEST_ADD_95_64_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_DEST_ADD_95_64_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_DEST_ADD_95_64_DEFAULT 0x0 /* 0 */
+
+/* Type = rwpdef */
+#define ETH_IPV6_DEST_ADD_127_96_ADDR 0xa0b0 /* 41136 */
+#define ETH_IPV6_DEST_ADD_127_96_MASK 0xffffffff /* 4294967295 */
+#define ETH_IPV6_DEST_ADD_127_96_OFFSET 0x0 /* 0 */
+#define ETH_IPV6_DEST_ADD_127_96_WIDTH 0x20 /* 32 */
+#define ETH_IPV6_DEST_ADD_127_96_DEFAULT 0x0 /* 0 */
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_framer_v1_0_stats
+ * with prefix stats_ @ address 0xc000
+ *------------------------------------------------------------------------------
+ */
+/* Type = roSig */
+#define STATS_TOTAL_RX_GOOD_PKT_CNT_ADDR 0xc000 /* 49152 */
+#define STATS_TOTAL_RX_GOOD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_TOTAL_RX_GOOD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_TOTAL_RX_GOOD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_TOTAL_RX_GOOD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_TOTAL_RX_BAD_PKT_CNT_ADDR 0xc004 /* 49156 */
+#define STATS_TOTAL_RX_BAD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_TOTAL_RX_BAD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_TOTAL_RX_BAD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_TOTAL_RX_BAD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_TOTAL_RX_BAD_FCS_CNT_ADDR 0xc008 /* 49160 */
+#define STATS_TOTAL_RX_BAD_FCS_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_TOTAL_RX_BAD_FCS_CNT_OFFSET 0x0 /* 0 */
+#define STATS_TOTAL_RX_BAD_FCS_CNT_WIDTH 0x20 /* 32 */
+#define STATS_TOTAL_RX_BAD_FCS_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_DATA_RX_PACKETS_CNT_ADDR 0xc00c /* 49164 */
+#define STATS_USER_DATA_RX_PACKETS_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_DATA_RX_PACKETS_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_DATA_RX_PACKETS_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_DATA_RX_PACKETS_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_DATA_RX_GOOD_PKT_CNT_ADDR 0xc010 /* 49168 */
+#define STATS_USER_DATA_RX_GOOD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_DATA_RX_GOOD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_DATA_RX_GOOD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_DATA_RX_GOOD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_DATA_RX_BAD_PKT_CNT_ADDR 0xc014 /* 49172 */
+#define STATS_USER_DATA_RX_BAD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_DATA_RX_BAD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_DATA_RX_BAD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_DATA_RX_BAD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_DATA_RX_BAD_FCS_CNT_ADDR 0xc018 /* 49176 */
+#define STATS_USER_DATA_RX_BAD_FCS_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_DATA_RX_BAD_FCS_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_DATA_RX_BAD_FCS_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_DATA_RX_BAD_FCS_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_CTRL_RX_PACKETS_CNT_ADDR 0xc01c /* 49180 */
+#define STATS_USER_CTRL_RX_PACKETS_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_CTRL_RX_PACKETS_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_CTRL_RX_PACKETS_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_CTRL_RX_PACKETS_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_CTRL_RX_GOOD_PKT_CNT_ADDR 0xc020 /* 49184 */
+#define STATS_USER_CTRL_RX_GOOD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_CTRL_RX_GOOD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_CTRL_RX_GOOD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_CTRL_RX_GOOD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_CTRL_RX_BAD_PKT_CNT_ADDR 0xc024 /* 49188 */
+#define STATS_USER_CTRL_RX_BAD_PKT_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_CTRL_RX_BAD_PKT_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_CTRL_RX_BAD_PKT_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_CTRL_RX_BAD_PKT_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_CTRL_RX_BAD_FCS_CNT_ADDR 0xc028 /* 49192 */
+#define STATS_USER_CTRL_RX_BAD_FCS_CNT_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_CTRL_RX_BAD_FCS_CNT_OFFSET 0x0 /* 0 */
+#define STATS_USER_CTRL_RX_BAD_FCS_CNT_WIDTH 0x20 /* 32 */
+#define STATS_USER_CTRL_RX_BAD_FCS_CNT_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_DATA_RX_PKTS_RATE_ADDR 0xc02c /* 49196 */
+#define STATS_USER_DATA_RX_PKTS_RATE_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_DATA_RX_PKTS_RATE_OFFSET 0x0 /* 0 */
+#define STATS_USER_DATA_RX_PKTS_RATE_WIDTH 0x20 /* 32 */
+#define STATS_USER_DATA_RX_PKTS_RATE_DEFAULT 0x0 /* 0 */
+
+/* Type = roSig */
+#define STATS_USER_CTRL_RX_PKTS_RATE_ADDR 0xc030 /* 49200 */
+#define STATS_USER_CTRL_RX_PKTS_RATE_MASK 0xffffffff /* 4294967295 */
+#define STATS_USER_CTRL_RX_PKTS_RATE_OFFSET 0x0 /* 0 */
+#define STATS_USER_CTRL_RX_PKTS_RATE_WIDTH 0x20 /* 32 */
+#define STATS_USER_CTRL_RX_PKTS_RATE_DEFAULT 0x0 /* 0 */
diff --git a/drivers/staging/xroeframer/sysfs_xroe.c b/drivers/staging/xroeframer/sysfs_xroe.c
new file mode 100644
index 000000000000..9caf5e50b02f
--- /dev/null
+++ b/drivers/staging/xroeframer/sysfs_xroe.c
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "xroe_framer.h"
+
+enum { XROE_SIZE_MAX = 15 };
+static int xroe_size;
+static char xroe_tmp[XROE_SIZE_MAX];
+
+/**
+ * version_show - Returns the block's revision number
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the revision string
+ *
+ * Returns the block's major, minor & version revision numbers
+ * in a %d.%d.%d format
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t version_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 major_rev;
+ u32 minor_rev;
+ u32 version_rev;
+
+ major_rev = utils_sysfs_show_wrapper(CFG_MAJOR_REVISION_ADDR,
+ CFG_MAJOR_REVISION_OFFSET,
+ CFG_MAJOR_REVISION_MASK, kobj);
+ minor_rev = utils_sysfs_show_wrapper(CFG_MINOR_REVISION_ADDR,
+ CFG_MINOR_REVISION_OFFSET,
+ CFG_MINOR_REVISION_MASK, kobj);
+ version_rev = utils_sysfs_show_wrapper(CFG_VERSION_REVISION_ADDR,
+ CFG_VERSION_REVISION_OFFSET,
+ CFG_VERSION_REVISION_MASK, kobj);
+ sprintf(buff, "%d.%d.%d\n", major_rev, minor_rev, version_rev);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * version_store - Writes to the framer version sysfs entry (not permitted)
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the revision string
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the framer version sysfs entry (not permitted)
+ *
+ * Return: 0
+ */
+static ssize_t version_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ return 0;
+}
+
+/**
+ * enable_show - Returns the framer's enable status
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the enable status
+ *
+ * Reads and writes the framer's enable status to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t enable_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 enable;
+
+ enable = utils_sysfs_show_wrapper(CFG_MASTER_INT_ENABLE_ADDR,
+ CFG_MASTER_INT_ENABLE_OFFSET,
+ CFG_MASTER_INT_ENABLE_MASK, kobj);
+ if (enable)
+ sprintf(buff, "true\n");
+ else
+ sprintf(buff, "false\n");
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * version_store - Writes to the framer's enable status register
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the enable status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the framer's enable status
+ * to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t enable_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ u32 enable = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (strncmp(xroe_tmp, "true", xroe_size) == 0)
+ enable = 1;
+ else if (strncmp(xroe_tmp, "false", xroe_size) == 0)
+ enable = 0;
+ utils_sysfs_store_wrapper(CFG_MASTER_INT_ENABLE_ADDR,
+ CFG_MASTER_INT_ENABLE_OFFSET,
+ CFG_MASTER_INT_ENABLE_MASK, enable, kobj);
+ return xroe_size;
+}
+
+/**
+ * framer_restart_show - Returns the framer's restart status
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the restart status
+ *
+ * Reads and writes the framer's restart status to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t framer_restart_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 restart;
+
+ restart = utils_sysfs_show_wrapper(FRAM_DISABLE_ADDR,
+ FRAM_DISABLE_OFFSET,
+ FRAM_DISABLE_MASK, kobj);
+ if (restart)
+ sprintf(buff, "true\n");
+
+ else
+ sprintf(buff, "false\n");
+
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * framer_restart_store - Writes to the framer's restart status register
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the restart status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the framer's restart status
+ * to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t framer_restart_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ u32 restart = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (strncmp(xroe_tmp, "true", xroe_size) == 0)
+ restart = 0x01;
+ else if (strncmp(xroe_tmp, "false", xroe_size) == 0)
+ restart = 0x00;
+ utils_sysfs_store_wrapper(FRAM_DISABLE_ADDR, FRAM_DISABLE_OFFSET,
+ FRAM_DISABLE_MASK, restart, kobj);
+ return xroe_size;
+}
+
+/**
+ * deframer_restart_show - Returns the deframer's restart status
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the restart status
+ *
+ * Reads and writes the deframer's restart status to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t deframer_restart_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 offset = DEFM_RESTART_OFFSET;
+ u32 mask = DEFM_RESTART_MASK;
+ u32 buffer = 0;
+ u32 restart = 0;
+ void __iomem *working_address = ((u8 *)lp->base_addr
+ + DEFM_RESTART_ADDR);
+
+ buffer = ioread32(working_address);
+ restart = (buffer & mask) >> offset;
+
+ if (restart)
+ sprintf(buff, "true\n");
+
+ else
+ sprintf(buff, "false\n");
+
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * deframer_restart_store - Writes to the deframer's restart status register
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the restart status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the deframer's restart status
+ * to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t deframer_restart_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ u32 offset = DEFM_RESTART_OFFSET;
+ u32 mask = DEFM_RESTART_MASK;
+ void __iomem *working_address = ((u8 *)lp->base_addr
+ + DEFM_RESTART_ADDR);
+ u32 restart = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (strncmp(xroe_tmp, "true", xroe_size) == 0) {
+ restart = 0x01;
+ utils_write32withmask(working_address, restart,
+ mask, offset);
+ } else if (strncmp(xroe_tmp, "false", xroe_size) == 0) {
+ restart = 0x00;
+ utils_write32withmask(working_address, restart,
+ mask, offset);
+ }
+
+ return xroe_size;
+}
+
+/**
+ * xxv_reset_show - Returns the XXV's reset status
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the reset status
+ *
+ * Reads and writes the XXV's reset status to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t xxv_reset_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 offset = CFG_USER_RW_OUT_OFFSET;
+ u32 mask = CFG_USER_RW_OUT_MASK;
+ u32 buffer = 0;
+ u32 restart = 0;
+ void __iomem *working_address = ((u8 *)lp->base_addr +
+ CFG_USER_RW_OUT_ADDR);
+
+ buffer = ioread32(working_address);
+ restart = (buffer & mask) >> offset;
+ if (restart)
+ sprintf(buff, "true\n");
+ else
+ sprintf(buff, "false\n");
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * xxv_reset_store - Writes to the XXV's reset register
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the reset status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the XXV's reset status
+ * to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t xxv_reset_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ u32 offset = CFG_USER_RW_OUT_OFFSET;
+ u32 mask = CFG_USER_RW_OUT_MASK;
+ void __iomem *working_address = ((u8 *)lp->base_addr +
+ CFG_USER_RW_OUT_ADDR);
+ u32 restart = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+
+ if (strncmp(xroe_tmp, "true", xroe_size) == 0) {
+ restart = 0x01;
+ utils_write32withmask(working_address, restart,
+ mask, offset);
+ } else if (strncmp(xroe_tmp, "false", xroe_size) == 0) {
+ restart = 0x00;
+ utils_write32withmask(working_address, restart,
+ mask, offset);
+ }
+ return xroe_size;
+}
+
+/**
+ * framing_show - Returns the current framing
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the reset status
+ *
+ * Reads and writes the current framing type to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t framing_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 offset = (DEFM_DATA_PKT_MESSAGE_TYPE_ADDR +
+ DEFM_DATA_PKT_MESSAGE_TYPE_OFFSET);
+ u8 buffer = 0;
+ u8 framing = 0xff;
+ void __iomem *working_address = ((u8 *)lp->base_addr + offset);
+
+ buffer = ioread8(working_address);
+ framing = buffer;
+ if (framing == 0)
+ sprintf(buff, "eCPRI\n");
+ else if (framing == 1)
+ sprintf(buff, "1914.3\n");
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * framing_store - Writes to the current framing register
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the reset status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the current framing
+ * to the sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t framing_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ u32 offset = (DEFM_DATA_PKT_MESSAGE_TYPE_ADDR +
+ DEFM_DATA_PKT_MESSAGE_TYPE_OFFSET);
+ void __iomem *working_address = ((u8 *)lp->base_addr + offset);
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (strncmp(xroe_tmp, "eCPRI", xroe_size) == 0)
+ iowrite8(0, working_address);
+ else if (strncmp(xroe_tmp, "1914.3", xroe_size) == 0)
+ iowrite8(1, working_address);
+ return xroe_size;
+}
+
+/* TODO Use DEVICE_ATTR/_RW/_RO macros */
+
+static struct kobj_attribute version_attribute =
+ __ATTR(version, 0444, version_show, version_store);
+
+static struct kobj_attribute enable_attribute =
+ __ATTR(enable, 0660, enable_show, enable_store);
+
+static struct kobj_attribute framer_restart =
+ __ATTR(framer_restart, 0660, framer_restart_show, framer_restart_store);
+
+static struct kobj_attribute deframer_restart =
+ __ATTR(deframer_restart, 0660, deframer_restart_show,
+ deframer_restart_store);
+
+static struct kobj_attribute xxv_reset =
+ __ATTR(xxv_reset, 0660, xxv_reset_show, xxv_reset_store);
+
+static struct kobj_attribute framing_attribute =
+ __ATTR(framing, 0660, framing_show, framing_store);
+
+static struct attribute *attrs[] = {
+ &version_attribute.attr,
+ &enable_attribute.attr,
+ &framer_restart.attr,
+ &deframer_restart.attr,
+ &xxv_reset.attr,
+ &framing_attribute.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+struct kobject *root_xroe_kobj;
+
+/**
+ * xroe_sysfs_init - Creates the xroe sysfs directory and entries
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroe sysfs directory and entries, as well as the
+ * subdirectories for IPv4, IPv6 & UDP
+ */
+int xroe_sysfs_init(void)
+{
+ int ret;
+
+ root_xroe_kobj = kobject_create_and_add("xroe", kernel_kobj);
+ if (!root_xroe_kobj)
+ return -ENOMEM;
+ ret = sysfs_create_group(root_xroe_kobj, &attr_group);
+ if (ret)
+ kobject_put(root_xroe_kobj);
+ ret = xroe_sysfs_ipv4_init();
+ if (ret)
+ return ret;
+ ret = xroe_sysfs_ipv6_init();
+ if (ret)
+ return ret;
+ ret = xroe_sysfs_udp_init();
+ if (ret)
+ return ret;
+ ret = xroe_sysfs_stats_init();
+ return ret;
+}
+
+/**
+ * xroe_sysfs_exit - Deletes the xroe sysfs directory and entries
+ *
+ * Deletes the xroe sysfs directory and entries, as well as the
+ * subdirectories for IPv4, IPv6 & UDP
+ *
+ */
+void xroe_sysfs_exit(void)
+{
+ int i;
+
+ xroe_sysfs_ipv4_exit();
+ xroe_sysfs_ipv6_exit();
+ xroe_sysfs_udp_exit();
+ xroe_sysfs_stats_exit();
+ for (i = 0; i < MAX_NUM_ETH_PORTS; i++)
+ kobject_put(kobj_eth_ports[i]);
+ kobject_put(kobj_framer);
+ kobject_put(root_xroe_kobj);
+}
+
+/**
+ * utils_write32withmask - Writes a masked 32-bit value
+ * @working_address: The starting address to write
+ * @value: The value to be written
+ * @mask: The mask to be used
+ * @offset: The offset from the provided starting address
+ *
+ * Writes a 32-bit value to the provided address with the input mask
+ *
+ * Return: 0 on success
+ */
+int utils_write32withmask(void __iomem *working_address, u32 value,
+ u32 mask, u32 offset)
+{
+ u32 read_register_value = 0;
+ u32 register_value_to_write = 0;
+ u32 delta = 0, buffer = 0;
+
+ read_register_value = ioread32(working_address);
+ buffer = (value << offset);
+ register_value_to_write = read_register_value & ~mask;
+ delta = buffer & mask;
+ register_value_to_write |= delta;
+ iowrite32(register_value_to_write, working_address);
+ return 0;
+}
+
+/**
+ * utils_sysfs_path_to_eth_port_num - Get the current ethernet port
+ * @kobj: The kobject of the entry calling the function
+ *
+ * Extracts the number of the current ethernet port instance
+ *
+ * Return: The number of the ethernet port instance (0 - MAX_NUM_ETH_PORTS) on
+ * success, -1 otherwise
+ */
+static int utils_sysfs_path_to_eth_port_num(struct kobject *kobj)
+{
+ char *current_path = NULL;
+ int port;
+ int ret;
+
+ current_path = kobject_get_path(kobj, GFP_KERNEL);
+ ret = sscanf(current_path, "/kernel/xroe/framer/eth_port_%d/", &port);
+ /* if sscanf() returns 0, no fields were assigned, therefore no
+ * adjustments will be made for port number
+ */
+ if (ret == 0)
+ port = 0;
+// printk(KERN_ALERT "current_path: %s port: %d\n", current_path, port);
+ kfree(current_path);
+ return port;
+}
+
+/**
+ * utils_sysfs_store_wrapper - Wraps the storing function for sysfs entries
+ * @address: The address of the register to be written
+ * @offset: The offset from the address of the register
+ * @mask: The mask to be used on the value to be written
+ * @value: The value to be written to the register
+ * @kobj: The kobject of the entry calling the function
+ *
+ * Wraps the core functionality of all "store" functions of sysfs entries.
+ * After calculating the ethernet port number (in N/A cases, it's 0), the value
+ * is written to the designated register
+ *
+ */
+void utils_sysfs_store_wrapper(u32 address, u32 offset, u32 mask, u32 value,
+ struct kobject *kobj)
+{
+ int port;
+ void __iomem *working_address;
+
+ port = utils_sysfs_path_to_eth_port_num(kobj);
+ working_address = (void __iomem *)(lp->base_addr +
+ (address + (0x100 * port)));
+ utils_write32withmask(working_address, value, mask, offset);
+}
+
+/**
+ * utils_sysfs_store_wrapper - Wraps the storing function for sysfs entries
+ * @address: The address of the register to be read
+ * @offset: The offset from the address of the register
+ * @mask: The mask to be used on the value to be read
+ * @kobj: The kobject of the entry calling the function
+ *
+ * Wraps the core functionality of all "show" functions of sysfs entries.
+ * After calculating the ethernet port number (in N/A cases, it's 0), the value
+ * is read from the designated register and returned.
+ *
+ * Return: The value designated by the address, offset and mask
+ */
+u32 utils_sysfs_show_wrapper(u32 address, u32 offset, u32 mask,
+ struct kobject *kobj)
+{
+ int port;
+ void __iomem *working_address;
+ u32 buffer;
+
+ port = utils_sysfs_path_to_eth_port_num(kobj);
+ working_address = (void __iomem *)(lp->base_addr +
+ (address + (0x100 * port)));
+ buffer = ioread32(working_address);
+ return (buffer & mask) >> offset;
+}
diff --git a/drivers/staging/xroeframer/sysfs_xroe_framer_ipv4.c b/drivers/staging/xroeframer/sysfs_xroe_framer_ipv4.c
new file mode 100644
index 000000000000..aaaefb10c597
--- /dev/null
+++ b/drivers/staging/xroeframer/sysfs_xroe_framer_ipv4.c
@@ -0,0 +1,718 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "xroe_framer.h"
+
+enum { XROE_SIZE_MAX = 15 };
+static int xroe_size;
+static char xroe_tmp[XROE_SIZE_MAX];
+
+static void utils_ipv4addr_hextochar(u32 ip, unsigned char *bytes);
+static int utils_ipv4addr_chartohex(char *ip_addr, uint32_t *p_ip_addr);
+
+/**
+ * ipv4_version_show - Returns the IPv4 version number
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 version number
+ *
+ * Returns the IPv4 version number
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_version_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 version;
+
+ version = utils_sysfs_show_wrapper(ETH_IPV4_VERSION_ADDR,
+ ETH_IPV4_VERSION_OFFSET,
+ ETH_IPV4_VERSION_MASK, kobj);
+ sprintf(buff, "%d\n", version);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_version_store - Writes to the IPv4 version number sysfs entry
+ * (not permitted)
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 version
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 version number sysfs entry (not permitted)
+ *
+ * Return: 0
+ */
+static ssize_t ipv4_version_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ return 0;
+}
+
+/**
+ * ipv4_ihl_show - Returns the IPv4 IHL
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 IHL
+ *
+ * Returns the IPv4 IHL
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_ihl_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 ihl;
+
+ ihl = utils_sysfs_show_wrapper(ETH_IPV4_IHL_ADDR, ETH_IPV4_IHL_OFFSET,
+ ETH_IPV4_IHL_MASK, kobj);
+ sprintf(buff, "%d\n", ihl);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_ihl_store - Writes to the IPv4 IHL sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 IHL
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 IHL sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_ihl_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 ihl;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &ihl);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_IHL_ADDR, ETH_IPV4_IHL_OFFSET,
+ ETH_IPV4_IHL_MASK, ihl, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_dscp_show - Returns the IPv4 DSCP
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 DSCP
+ *
+ * Returns the IPv4 DSCP
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_dscp_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 dscp;
+
+ dscp = utils_sysfs_show_wrapper(ETH_IPV4_DSCP_ADDR,
+ ETH_IPV4_DSCP_OFFSET,
+ ETH_IPV4_DSCP_MASK, kobj);
+ sprintf(buff, "%d\n", dscp);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_dscp_store - Writes to the IPv4 DSCP sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 DSCP
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 DSCP sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_dscp_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 dscp;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &dscp);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_DSCP_ADDR, ETH_IPV4_DSCP_OFFSET,
+ ETH_IPV4_DSCP_MASK, dscp, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_ecn_show - Returns the IPv4 ECN
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 ECN
+ *
+ * Returns the IPv4 ECN
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_ecn_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 ecn;
+
+ ecn = utils_sysfs_show_wrapper(ETH_IPV4_ECN_ADDR, ETH_IPV4_ECN_OFFSET,
+ ETH_IPV4_ECN_MASK, kobj);
+ sprintf(buff, "%d\n", ecn);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_ecn_store - Writes to the IPv4 ECN sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 ECN
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 ECN sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_ecn_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 ecn;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &ecn);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_ECN_ADDR, ETH_IPV4_ECN_OFFSET,
+ ETH_IPV4_ECN_MASK, ecn, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_id_show - Returns the IPv4 ID
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 ID
+ *
+ * Returns the IPv4 ID
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_id_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 id;
+
+ id = utils_sysfs_show_wrapper(ETH_IPV4_ID_ADDR, ETH_IPV4_ID_OFFSET,
+ ETH_IPV4_ID_MASK, kobj);
+ sprintf(buff, "%d\n", id);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_id_store - Writes to the IPv4 ID sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 ID
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 ID sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_id_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 id;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &id);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_ID_ADDR, ETH_IPV4_ID_OFFSET,
+ ETH_IPV4_ID_MASK, id, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_flags_show - Returns the IPv4 flags
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 flags
+ *
+ * Returns the IPv4 flags
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_flags_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 flags;
+
+ flags = utils_sysfs_show_wrapper(ETH_IPV4_FLAGS_ADDR,
+ ETH_IPV4_FLAGS_OFFSET,
+ ETH_IPV4_FLAGS_MASK, kobj);
+ sprintf(buff, "%d\n", flags);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_flags_store - Writes to the IPv4 flags sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 flags
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 flags sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_flags_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 flags;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &flags);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_FLAGS_ADDR, ETH_IPV4_FLAGS_OFFSET,
+ ETH_IPV4_FLAGS_MASK, flags, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_fragment_offset_show - Returns the IPv4 fragment offset
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 fragment offset
+ *
+ * Returns the IPv4 fragment offset
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_fragment_offset_show
+(struct kobject *kobj, struct kobj_attribute *attr, char *buff)
+{
+ u32 fragment;
+
+ fragment = utils_sysfs_show_wrapper(ETH_IPV4_FRAGMENT_OFFSET_ADDR,
+ ETH_IPV4_FRAGMENT_OFFSET_OFFSET,
+ ETH_IPV4_FRAGMENT_OFFSET_MASK,
+ kobj);
+ sprintf(buff, "%d\n", fragment);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_fragment_offset_store - Writes to the IPv4 fragment offset sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 fragment offset
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 fragment offset sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_fragment_offset_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ int ret;
+ u32 fragment;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &fragment);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_FRAGMENT_OFFSET_ADDR,
+ ETH_IPV4_FRAGMENT_OFFSET_OFFSET,
+ ETH_IPV4_FRAGMENT_OFFSET_MASK, fragment,
+ kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_ttl_show - Returns the IPv4 TTL
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 TTL
+ *
+ * Returns the IPv4 TTL
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_ttl_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 ttl;
+
+ ttl = utils_sysfs_show_wrapper(ETH_IPV4_TIME_TO_LIVE_ADDR,
+ ETH_IPV4_TIME_TO_LIVE_OFFSET,
+ ETH_IPV4_TIME_TO_LIVE_MASK, kobj);
+ sprintf(buff, "%d\n", ttl);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_ttl_store - Writes to the IPv4 TTL sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 TTL
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 TTL sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_ttl_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff,
+ size_t count)
+{
+ int ret;
+ u32 ttl;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &ttl);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_TIME_TO_LIVE_ADDR,
+ ETH_IPV4_TIME_TO_LIVE_OFFSET,
+ ETH_IPV4_TIME_TO_LIVE_MASK, ttl, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_protocol_show - Returns the IPv4 protocol
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 protocol
+ *
+ * Returns the IPv4 protocol
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_protocol_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 protocol;
+
+ protocol = utils_sysfs_show_wrapper(ETH_IPV4_PROTOCOL_ADDR,
+ ETH_IPV4_PROTOCOL_OFFSET,
+ ETH_IPV4_PROTOCOL_MASK, kobj);
+ sprintf(buff, "%d\n", protocol);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_protocol_store - Writes to the IPv4 protocol sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 protocol
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 protocol sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_protocol_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 protocol;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &protocol);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV4_PROTOCOL_ADDR,
+ ETH_IPV4_PROTOCOL_OFFSET,
+ ETH_IPV4_PROTOCOL_MASK, protocol, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_source_address_show - Returns the IPv4 source address
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 source address
+ *
+ * Returns the IPv4 source address in x.x.x.x format
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_source_address_show
+(struct kobject *kobj, struct kobj_attribute *attr, char *buff)
+{
+ u32 source_add = 0;
+ unsigned char ip_addr_char[4];
+
+ source_add = utils_sysfs_show_wrapper(ETH_IPV4_SOURCE_ADD_ADDR,
+ ETH_IPV4_SOURCE_ADD_OFFSET,
+ ETH_IPV4_SOURCE_ADD_MASK, kobj);
+ utils_ipv4addr_hextochar(source_add, ip_addr_char);
+ sprintf(buff, "%d.%d.%d.%d\n", ip_addr_char[3], ip_addr_char[2],
+ ip_addr_char[1], ip_addr_char[0]);
+
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_source_address_store - Writes to the IPv4 source address sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 source address
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 source address sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_source_address_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ u32 source_add = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (utils_ipv4addr_chartohex(xroe_tmp, &source_add) == 4)
+ utils_sysfs_store_wrapper(ETH_IPV4_SOURCE_ADD_ADDR,
+ ETH_IPV4_SOURCE_ADD_OFFSET,
+ ETH_IPV4_SOURCE_ADD_MASK, source_add,
+ kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv4_destination_address_show - Returns the IPv4 destination address
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 destination address
+ *
+ * Returns the IPv4 destination address in x.x.x.x format
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv4_destination_address_show
+(struct kobject *kobj, struct kobj_attribute *attr, char *buff)
+{
+ u32 dest_add = 0;
+ unsigned char ip_addr_char[4];
+
+ dest_add = utils_sysfs_show_wrapper(ETH_IPV4_DESTINATION_ADD_ADDR,
+ ETH_IPV4_DESTINATION_ADD_OFFSET,
+ ETH_IPV4_DESTINATION_ADD_MASK,
+ kobj);
+ utils_ipv4addr_hextochar(dest_add, ip_addr_char);
+ sprintf(buff, "%d.%d.%d.%d\n", ip_addr_char[3], ip_addr_char[2],
+ ip_addr_char[1], ip_addr_char[0]);
+
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv4_destination_address_store - Writes to the IPv4 destination address
+ * sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 destination address
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv4 destination address sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv4_destination_address_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ u32 dest_add = 0;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (utils_ipv4addr_chartohex(xroe_tmp, &dest_add) == 4)
+ utils_sysfs_store_wrapper(ETH_IPV4_DESTINATION_ADD_ADDR,
+ ETH_IPV4_DESTINATION_ADD_OFFSET,
+ ETH_IPV4_DESTINATION_ADD_MASK,
+ dest_add, kobj);
+ return xroe_size;
+}
+
+/* TODO Use DEVICE_ATTR/_RW/_RO macros */
+
+static struct kobj_attribute version_attribute =
+ __ATTR(version, 0444, ipv4_version_show, ipv4_version_store);
+static struct kobj_attribute ihl_attribute =
+ __ATTR(ihl, 0660, ipv4_ihl_show, ipv4_ihl_store);
+static struct kobj_attribute dscp_attribute =
+ __ATTR(dscp, 0660, ipv4_dscp_show, ipv4_dscp_store);
+static struct kobj_attribute ecn_attribute =
+ __ATTR(ecn, 0660, ipv4_ecn_show, ipv4_ecn_store);
+static struct kobj_attribute id_attribute =
+ __ATTR(id, 0660, ipv4_id_show, ipv4_id_store);
+static struct kobj_attribute flags_attribute =
+ __ATTR(flags, 0660, ipv4_flags_show, ipv4_flags_store);
+static struct kobj_attribute fragment_offset_attribute =
+ __ATTR(fragment_offset, 0660, ipv4_fragment_offset_show,
+ ipv4_fragment_offset_store);
+static struct kobj_attribute ttl_attribute =
+ __ATTR(ttl, 0660, ipv4_ttl_show, ipv4_ttl_store);
+static struct kobj_attribute protocol_attribute =
+ __ATTR(protocol, 0660, ipv4_protocol_show, ipv4_protocol_store);
+static struct kobj_attribute source_add_attribute =
+ __ATTR(source_add, 0660, ipv4_source_address_show,
+ ipv4_source_address_store);
+static struct kobj_attribute destination_add_attribute =
+ __ATTR(dest_add, 0660, ipv4_destination_address_show,
+ ipv4_destination_address_store);
+
+static struct attribute *attrs[] = {
+ &version_attribute.attr,
+ &ihl_attribute.attr,
+ &dscp_attribute.attr,
+ &ecn_attribute.attr,
+ &id_attribute.attr,
+ &flags_attribute.attr,
+ &fragment_offset_attribute.attr,
+ &ttl_attribute.attr,
+ &protocol_attribute.attr,
+ &source_add_attribute.attr,
+ &destination_add_attribute.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+struct kobject *kobj_framer;
+static struct kobject *kobj_ipv4[MAX_NUM_ETH_PORTS];
+struct kobject *kobj_eth_ports[MAX_NUM_ETH_PORTS];
+
+/**
+ * xroe_sysfs_ipv4_init - Creates the xroe sysfs "ipv4" subdirectory & entries
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroe sysfs "ipv4" subdirectory and entries under "xroe"
+ */
+int xroe_sysfs_ipv4_init(void)
+{
+ int ret;
+ int i;
+ char eth_port_dir_name[11];
+
+ kobj_framer = kobject_create_and_add("framer", root_xroe_kobj);
+ if (!kobj_framer)
+ return -ENOMEM;
+ for (i = 0; i < 4; i++) {
+ snprintf(eth_port_dir_name, sizeof(eth_port_dir_name),
+ "eth_port_%d", i);
+ kobj_eth_ports[i] = kobject_create_and_add(eth_port_dir_name,
+ kobj_framer);
+ if (!kobj_eth_ports[i])
+ return -ENOMEM;
+ kobj_ipv4[i] = kobject_create_and_add("ipv4",
+ kobj_eth_ports[i]);
+ if (!kobj_ipv4[i])
+ return -ENOMEM;
+ ret = sysfs_create_group(kobj_ipv4[i], &attr_group);
+ if (ret)
+ kobject_put(kobj_ipv4[i]);
+ }
+ return ret;
+}
+
+/**
+ * xroe_sysfs_ipv4_exit - Deletes the xroe sysfs "ipv4" subdirectory & entries
+ *
+ * Deletes the xroe sysfs "ipv4" subdirectory and entries,
+ * under the "xroe" entry
+ */
+void xroe_sysfs_ipv4_exit(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_ETH_PORTS; i++)
+ kobject_put(kobj_ipv4[i]);
+}
+
+/**
+ * utils_ipv4addr_hextochar - Integer to char array for IPv4 addresses
+ * @ip: The IP address in integer format
+ * @bytes: The IP address in a 4-byte array
+ *
+ * Coverts an IPv4 address given in unsigned integer format to a character array
+ */
+static void utils_ipv4addr_hextochar(u32 ip, unsigned char *bytes)
+{
+ bytes[0] = ip & 0xFF;
+ bytes[1] = (ip >> 8) & 0xFF;
+ bytes[2] = (ip >> 16) & 0xFF;
+ bytes[3] = (ip >> 24) & 0xFF;
+}
+
+/**
+ * utils_ipv4addr_chartohex - Character to char array for IPv4 addresses
+ * @ip_addr: The character array containing the IP address
+ * @p_ip_addr: The converted IPv4 address
+ *
+ * Coverts an IPv4 address given as a character array to integer format
+ *
+ * Return: 4 (the length of the resulting character array) on success,
+ * -1 in case of wrong input
+ */
+static int utils_ipv4addr_chartohex(char *ip_addr, uint32_t *p_ip_addr)
+{
+ int count = 0, ret = -1;
+ char *string;
+ unsigned char *found;
+ u32 byte_array[4];
+ u32 byte = 0;
+
+ string = ip_addr;
+ while ((found = (unsigned char *)strsep(&string, ".")) != NULL) {
+ if (count <= 4) {
+ ret = kstrtouint(found, 10, &byte);
+ if (ret)
+ return ret;
+ byte_array[count] = byte;
+ } else {
+ break;
+ }
+ count++;
+ }
+
+ if (count == 4) {
+ ret = count;
+ *p_ip_addr = byte_array[3] | (byte_array[2] << 8)
+ | (byte_array[1] << 16) | (byte_array[0] << 24);
+ }
+ return ret;
+}
diff --git a/drivers/staging/xroeframer/sysfs_xroe_framer_ipv6.c b/drivers/staging/xroeframer/sysfs_xroe_framer_ipv6.c
new file mode 100644
index 000000000000..c26eae426cc1
--- /dev/null
+++ b/drivers/staging/xroeframer/sysfs_xroe_framer_ipv6.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "xroe_framer.h"
+
+enum { XROE_SIZE_MAX = 60 };
+static int xroe_size;
+static char xroe_tmp[XROE_SIZE_MAX];
+
+static void utils_ipv6addr_32to16(u32 *ip32, uint16_t *ip16);
+static int utils_ipv6addr_chartohex(char *ip_addr, uint32_t *p_ip_addr);
+
+/**
+ * ipv6_version_show - Returns the IPv6 version number
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 version number
+ *
+ * Returns the IPv6 version number
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_version_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 version;
+
+ version = utils_sysfs_show_wrapper(ETH_IPV6_V_ADDR, ETH_IPV6_V_OFFSET,
+ ETH_IPV6_V_MASK, kobj);
+ sprintf(buff, "%d\n", version);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_version_store - Writes to the IPv6 version number sysfs entry
+ * (not permitted)
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 version
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 version number sysfs entry (not permitted)
+ *
+ * Return: 0
+ */
+static ssize_t ipv6_version_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ return 0;
+}
+
+/**
+ * ipv6_traffic_class_show - Returns the IPv6 traffic class
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 traffic class
+ *
+ * Returns the IPv6 traffic class
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_traffic_class_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 traffic_class;
+
+ traffic_class = utils_sysfs_show_wrapper(ETH_IPV6_TRAFFIC_CLASS_ADDR,
+ ETH_IPV6_TRAFFIC_CLASS_OFFSET,
+ ETH_IPV6_TRAFFIC_CLASS_MASK,
+ kobj);
+ sprintf(buff, "%d\n", traffic_class);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_traffic_class_store - Writes to the IPv6 traffic class
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 traffic class
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 traffic class sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_traffic_class_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 traffic_class;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &traffic_class);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV6_TRAFFIC_CLASS_ADDR,
+ ETH_IPV6_TRAFFIC_CLASS_OFFSET,
+ ETH_IPV6_TRAFFIC_CLASS_MASK, traffic_class,
+ kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv6_flow_label_show - Returns the IPv6 flow label
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 flow label
+ *
+ * Returns the IPv6 flow label
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_flow_label_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 flow_label;
+
+ flow_label = utils_sysfs_show_wrapper(ETH_IPV6_FLOW_LABEL_ADDR,
+ ETH_IPV6_FLOW_LABEL_OFFSET,
+ ETH_IPV6_FLOW_LABEL_MASK, kobj);
+ sprintf(buff, "%d\n", flow_label);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_flow_label_store - Writes to the IPv6 flow label
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 flow label
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 flow label sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_flow_label_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 flow_label;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &flow_label);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV6_FLOW_LABEL_ADDR,
+ ETH_IPV6_FLOW_LABEL_OFFSET,
+ ETH_IPV6_FLOW_LABEL_MASK, flow_label, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv6_next_header_show - Returns the IPv6 next header
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 next header
+ *
+ * Returns the IPv6 next header
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_next_header_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 next_header;
+
+ next_header = utils_sysfs_show_wrapper(ETH_IPV6_NEXT_HEADER_ADDR,
+ ETH_IPV6_NEXT_HEADER_OFFSET,
+ ETH_IPV6_NEXT_HEADER_MASK, kobj);
+ sprintf(buff, "%d\n", next_header);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_next_header_store - Writes to the IPv6 next header
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 next header
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 next header sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_next_header_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 next_header;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &next_header);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV6_NEXT_HEADER_ADDR,
+ ETH_IPV6_NEXT_HEADER_OFFSET,
+ ETH_IPV6_NEXT_HEADER_MASK, next_header, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv6_hop_limit_show - Returns the IPv6 hop limit
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 hop limit
+ *
+ * Returns the IPv6 hop limit
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_hop_limit_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 hop_limit;
+
+ hop_limit = utils_sysfs_show_wrapper(ETH_IPV6_HOP_LIMIT_ADDR,
+ ETH_IPV6_HOP_LIMIT_OFFSET,
+ ETH_IPV6_HOP_LIMIT_MASK, kobj);
+ sprintf(buff, "%d\n", hop_limit);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_hop_limit_store - Writes to the IPv6 hop limit
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv6 hop limit
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 hop limit sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_hop_limit_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ int ret;
+ u32 hop_limit;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &hop_limit);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_IPV6_HOP_LIMIT_ADDR,
+ ETH_IPV6_HOP_LIMIT_OFFSET,
+ ETH_IPV6_HOP_LIMIT_MASK, hop_limit, kobj);
+ return xroe_size;
+}
+
+/**
+ * ipv6_source_address_show - Returns the IPv6 source address
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 source address
+ *
+ * Returns the IPv6 source address in xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx
+ * format
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_source_address_show
+(struct kobject *kobj, struct kobj_attribute *attr, char *buff)
+{
+ u32 source[4];
+ u16 source_add16[8];
+
+ source[0] = utils_sysfs_show_wrapper(ETH_IPV6_SOURCE_ADD_31_0_ADDR,
+ ETH_IPV6_SOURCE_ADD_31_0_OFFSET,
+ ETH_IPV6_SOURCE_ADD_31_0_MASK,
+ kobj);
+ source[1] = utils_sysfs_show_wrapper(ETH_IPV6_SOURCE_ADD_63_32_ADDR,
+ ETH_IPV6_SOURCE_ADD_63_32_OFFSET,
+ ETH_IPV6_SOURCE_ADD_63_32_MASK,
+ kobj);
+ source[2] = utils_sysfs_show_wrapper(ETH_IPV6_SOURCE_ADD_95_64_ADDR,
+ ETH_IPV6_SOURCE_ADD_95_64_OFFSET,
+ ETH_IPV6_SOURCE_ADD_95_64_MASK,
+ kobj);
+ source[3] = utils_sysfs_show_wrapper(ETH_IPV6_SOURCE_ADD_127_96_ADDR,
+ ETH_IPV6_SOURCE_ADD_127_96_OFFSET,
+ ETH_IPV6_SOURCE_ADD_127_96_MASK,
+ kobj);
+
+ utils_ipv6addr_32to16(source, source_add16);
+ sprintf(buff, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ source_add16[0], source_add16[1], source_add16[2],
+ source_add16[3],
+ source_add16[4], source_add16[5], source_add16[6],
+ source_add16[7]);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_source_address_store - Writes to the IPv6 source address sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 source address
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 source address sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_source_address_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ u32 source_add[4];
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (utils_ipv6addr_chartohex(xroe_tmp, source_add) == 8) {
+ utils_sysfs_store_wrapper(ETH_IPV6_SOURCE_ADD_31_0_ADDR,
+ ETH_IPV6_SOURCE_ADD_31_0_OFFSET,
+ ETH_IPV6_SOURCE_ADD_31_0_MASK,
+ source_add[0], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_SOURCE_ADD_63_32_ADDR,
+ ETH_IPV6_SOURCE_ADD_63_32_OFFSET,
+ ETH_IPV6_SOURCE_ADD_63_32_MASK,
+ source_add[1], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_SOURCE_ADD_95_64_ADDR,
+ ETH_IPV6_SOURCE_ADD_95_64_OFFSET,
+ ETH_IPV6_SOURCE_ADD_95_64_MASK,
+ source_add[2], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_SOURCE_ADD_127_96_ADDR,
+ ETH_IPV6_SOURCE_ADD_127_96_OFFSET,
+ ETH_IPV6_SOURCE_ADD_127_96_MASK,
+ source_add[3], kobj);
+ }
+ return xroe_size;
+}
+
+/**
+ * ipv6_destination_address_show - Returns the IPv6 destination address
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 destination address
+ *
+ * Returns the IPv6 destination address in
+ * xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx format
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t ipv6_destination_address_show
+(struct kobject *kobj, struct kobj_attribute *attr, char *buff)
+{
+ u32 dest[4];
+ u16 dest_add16[8];
+
+ dest[0] = utils_sysfs_show_wrapper(ETH_IPV6_DEST_ADD_31_0_ADDR,
+ ETH_IPV6_DEST_ADD_31_0_OFFSET,
+ ETH_IPV6_DEST_ADD_31_0_MASK,
+ kobj);
+ dest[1] = utils_sysfs_show_wrapper(ETH_IPV6_DEST_ADD_63_32_ADDR,
+ ETH_IPV6_DEST_ADD_63_32_OFFSET,
+ ETH_IPV6_DEST_ADD_63_32_MASK,
+ kobj);
+ dest[2] = utils_sysfs_show_wrapper(ETH_IPV6_DEST_ADD_95_64_ADDR,
+ ETH_IPV6_DEST_ADD_95_64_OFFSET,
+ ETH_IPV6_DEST_ADD_95_64_MASK,
+ kobj);
+ dest[3] = utils_sysfs_show_wrapper(ETH_IPV6_DEST_ADD_127_96_ADDR,
+ ETH_IPV6_DEST_ADD_127_96_OFFSET,
+ ETH_IPV6_DEST_ADD_127_96_MASK,
+ kobj);
+
+ utils_ipv6addr_32to16(dest, dest_add16);
+ sprintf(buff, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+ dest_add16[0], dest_add16[1], dest_add16[2],
+ dest_add16[3],
+ dest_add16[4], dest_add16[5], dest_add16[6],
+ dest_add16[7]);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * ipv6_destination_address_store - Writes to the IPv6 destination address
+ * sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the IPv4 destination address
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the IPv6 destination address sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t ipv6_destination_address_store
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buff,
+size_t count)
+{
+ u32 dest_add[4];
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ strncpy(xroe_tmp, buff, xroe_size);
+ if (utils_ipv6addr_chartohex(xroe_tmp, dest_add) == 8) {
+ utils_sysfs_store_wrapper(ETH_IPV6_DEST_ADD_31_0_ADDR,
+ ETH_IPV6_DEST_ADD_31_0_OFFSET,
+ ETH_IPV6_DEST_ADD_31_0_MASK,
+ dest_add[0], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_DEST_ADD_63_32_ADDR,
+ ETH_IPV6_DEST_ADD_63_32_OFFSET,
+ ETH_IPV6_DEST_ADD_63_32_MASK,
+ dest_add[1], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_DEST_ADD_95_64_ADDR,
+ ETH_IPV6_DEST_ADD_95_64_OFFSET,
+ ETH_IPV6_DEST_ADD_95_64_MASK,
+ dest_add[2], kobj);
+ utils_sysfs_store_wrapper(ETH_IPV6_DEST_ADD_127_96_ADDR,
+ ETH_IPV6_DEST_ADD_127_96_OFFSET,
+ ETH_IPV6_DEST_ADD_127_96_MASK,
+ dest_add[3], kobj);
+ }
+ return xroe_size;
+}
+
+/* TODO Use DEVICE_ATTR/_RW/_RO macros */
+
+static struct kobj_attribute version_attribute =
+ __ATTR(version, 0444, ipv6_version_show, ipv6_version_store);
+static struct kobj_attribute traffic_class =
+ __ATTR(traffic_class, 0660, ipv6_traffic_class_show,
+ ipv6_traffic_class_store);
+static struct kobj_attribute flow_label =
+ __ATTR(flow_label, 0660, ipv6_flow_label_show, ipv6_flow_label_store);
+static struct kobj_attribute next_header =
+ __ATTR(next_header, 0660, ipv6_next_header_show,
+ ipv6_next_header_store);
+static struct kobj_attribute hop_limit =
+ __ATTR(hop_limit, 0660, ipv6_hop_limit_show, ipv6_hop_limit_store);
+static struct kobj_attribute source_add_attribute =
+ __ATTR(source_add, 0660, ipv6_source_address_show,
+ ipv6_source_address_store);
+static struct kobj_attribute dest_add_attribute =
+ __ATTR(dest_add, 0660, ipv6_destination_address_show,
+ ipv6_destination_address_store);
+
+static struct attribute *attrs[] = {
+ &version_attribute.attr,
+ &traffic_class.attr,
+ &flow_label.attr,
+ &next_header.attr,
+ &hop_limit.attr,
+ &source_add_attribute.attr,
+ &dest_add_attribute.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+static struct kobject *kobj_ipv6[MAX_NUM_ETH_PORTS];
+
+/**
+ * xroe_sysfs_ipv6_init - Creates the xroe sysfs "ipv6" subdirectory & entries
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroe sysfs "ipv6" subdirectory and entries under "xroe"
+ */
+int xroe_sysfs_ipv6_init(void)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ kobj_ipv6[i] = kobject_create_and_add("ipv6",
+ kobj_eth_ports[i]);
+ if (!kobj_ipv6[i])
+ return -ENOMEM;
+ ret = sysfs_create_group(kobj_ipv6[i], &attr_group);
+ if (ret)
+ kobject_put(kobj_ipv6[i]);
+ }
+ return ret;
+}
+
+/**
+ * xroe_sysfs_ipv4_exit - Deletes the xroe sysfs "ipv6" subdirectory & entries
+ *
+ * Deletes the xroe sysfs "ipv6" subdirectory and entries,
+ * under the "xroe" entry
+ *
+ */
+void xroe_sysfs_ipv6_exit(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_ETH_PORTS; i++)
+ kobject_put(kobj_ipv6[i]);
+}
+
+/**
+ * utils_ipv6addr_32to16 - uint32_t to uint16_t for IPv6 addresses
+ * @ip32: The IPv6 address in uint32_t format
+ * @ip16: The IPv6 address in uint16_t format
+ *
+ * Coverts an IPv6 address given in uint32_t format to uint16_t
+ */
+static void utils_ipv6addr_32to16(u32 *ip32, uint16_t *ip16)
+{
+ ip16[0] = ip32[0] >> 16;
+ ip16[1] = ip32[0] & 0x0000FFFF;
+ ip16[2] = ip32[1] >> 16;
+ ip16[3] = ip32[1] & 0x0000FFFF;
+ ip16[4] = ip32[2] >> 16;
+ ip16[5] = ip32[2] & 0x0000FFFF;
+ ip16[6] = ip32[3] >> 16;
+ ip16[7] = ip32[3] & 0x0000FFFF;
+}
+
+/**
+ * utils_ipv6addr_chartohex - Character to char array for IPv6 addresses
+ * @ip_addr: The character array containing the IP address
+ * @p_ip_addr: The converted IPv4 address
+ *
+ * Coverts an IPv6 address given as a character array to integer format
+ *
+ * Return: 8 (the length of the resulting character array) on success,
+ * -1 in case of wrong input
+ */
+static int utils_ipv6addr_chartohex(char *ip_addr, uint32_t *p_ip_addr)
+{
+ int ret;
+ int count;
+ char *string;
+ unsigned char *found;
+ u16 ip_array_16[8];
+ u32 field;
+
+ ret = -1;
+ count = 0;
+ string = ip_addr;
+ while ((found = (unsigned char *)strsep(&string, ":")) != NULL) {
+ if (count <= 8) {
+ ret = kstrtouint(found, 16, &field);
+ if (ret)
+ return ret;
+ ip_array_16[count] = (uint16_t)field;
+ } else {
+ break;
+ }
+ count++;
+ }
+ if (count == 8) {
+ p_ip_addr[0] = ip_array_16[1] | (ip_array_16[0] << 16);
+ p_ip_addr[1] = ip_array_16[3] | (ip_array_16[2] << 16);
+ p_ip_addr[2] = ip_array_16[5] | (ip_array_16[4] << 16);
+ p_ip_addr[3] = ip_array_16[7] | (ip_array_16[6] << 16);
+ ret = count;
+ }
+ return ret;
+}
diff --git a/drivers/staging/xroeframer/sysfs_xroe_framer_stats.c b/drivers/staging/xroeframer/sysfs_xroe_framer_stats.c
new file mode 100644
index 000000000000..063664bb987a
--- /dev/null
+++ b/drivers/staging/xroeframer/sysfs_xroe_framer_stats.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "xroe_framer.h"
+
+/**
+ * total_rx_good_pkt_show - Returns the total good rx packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total good rx packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_good_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_TOTAL_RX_GOOD_PKT_CNT_ADDR,
+ STATS_TOTAL_RX_GOOD_PKT_CNT_OFFSET,
+ STATS_TOTAL_RX_GOOD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_pkt_show - Returns the total bad rx packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad rx packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_TOTAL_RX_BAD_PKT_CNT_ADDR,
+ STATS_TOTAL_RX_BAD_PKT_CNT_OFFSET,
+ STATS_TOTAL_RX_BAD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_fcs_show - Returns the total bad fcs count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad frame check sequences count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_fcs_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_TOTAL_RX_BAD_FCS_CNT_ADDR,
+ STATS_TOTAL_RX_BAD_FCS_CNT_OFFSET,
+ STATS_TOTAL_RX_BAD_FCS_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_user_pkt_show - Returns the total user rx packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total user rx packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_user_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_DATA_RX_PACKETS_CNT_ADDR,
+ STATS_USER_DATA_RX_PACKETS_CNT_OFFSET,
+ STATS_USER_DATA_RX_PACKETS_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_good_user_pkt_show - Returns the total good user rx packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total good user rx packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_good_user_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_DATA_RX_GOOD_PKT_CNT_ADDR,
+ STATS_USER_DATA_RX_GOOD_PKT_CNT_OFFSET,
+ STATS_USER_DATA_RX_GOOD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_user_pkt_show - Returns the total bad user rx packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad user rx packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_user_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_DATA_RX_BAD_PKT_CNT_ADDR,
+ STATS_USER_DATA_RX_BAD_PKT_CNT_OFFSET,
+ STATS_USER_DATA_RX_BAD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_user_fcs_show - Returns the total bad user rx fcs count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad user frame check sequences count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_user_fcs_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_DATA_RX_BAD_FCS_CNT_ADDR,
+ STATS_USER_DATA_RX_BAD_FCS_CNT_OFFSET,
+ STATS_USER_DATA_RX_BAD_FCS_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_user_ctrl_pkt_show - Returns the total user rx control packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total user rx control packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_user_ctrl_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_CTRL_RX_PACKETS_CNT_ADDR,
+ STATS_USER_CTRL_RX_PACKETS_CNT_OFFSET,
+ STATS_USER_CTRL_RX_PACKETS_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_good_user_ctrl_pkt_show - Returns the total good user rx
+ * control packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total good user rx control packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_good_user_ctrl_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_CTRL_RX_GOOD_PKT_CNT_ADDR,
+ STATS_USER_CTRL_RX_GOOD_PKT_CNT_OFFSET,
+ STATS_USER_CTRL_RX_GOOD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_user_ctrl_pkt_show - Returns the total bad user rx
+ * control packet count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad user rx control packet count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_user_ctrl_pkt_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_CTRL_RX_BAD_PKT_CNT_ADDR,
+ STATS_USER_CTRL_RX_BAD_PKT_CNT_OFFSET,
+ STATS_USER_CTRL_RX_BAD_PKT_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * total_rx_bad_user_ctrl_fcs_show - Returns the total bad user rx
+ * control fcs count
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total bad user control frame check sequences count
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t total_rx_bad_user_ctrl_fcs_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 count;
+
+ count = utils_sysfs_show_wrapper(STATS_USER_CTRL_RX_BAD_FCS_CNT_ADDR,
+ STATS_USER_CTRL_RX_BAD_FCS_CNT_OFFSET,
+ STATS_USER_CTRL_RX_BAD_FCS_CNT_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", count);
+}
+
+/**
+ * rx_user_pkt_rate_show - Returns the rate of user packets
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total user rx packet count
+ *
+ * Return: Returns the rate of user packets
+ */
+static ssize_t rx_user_pkt_rate_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ u32 rate;
+
+ rate = utils_sysfs_show_wrapper(STATS_USER_DATA_RX_PKTS_RATE_ADDR,
+ STATS_USER_DATA_RX_PKTS_RATE_OFFSET,
+ STATS_USER_DATA_RX_PKTS_RATE_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", rate);
+}
+
+/**
+ * rx_user_ctrl_pkt_rate_show - Returns the rate of user control packets
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer the value will be written to
+ *
+ * Returns the total user rx packet count
+ *
+ * Return: Returns the rate of user control packets
+ */
+static ssize_t rx_user_ctrl_pkt_rate_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 rate;
+
+ rate = utils_sysfs_show_wrapper(STATS_USER_CTRL_RX_PKTS_RATE_ADDR,
+ STATS_USER_CTRL_RX_PKTS_RATE_OFFSET,
+ STATS_USER_CTRL_RX_PKTS_RATE_MASK,
+ kobj);
+ return sprintf(buff, "%d\n", rate);
+}
+
+/* TODO Use DEVICE_ATTR/_RW/_RO macros */
+static struct kobj_attribute total_rx_good_pkt_attribute =
+ __ATTR(total_rx_good_pkt, 0444, total_rx_good_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_pkt_attribute =
+ __ATTR(total_rx_bad_pkt, 0444, total_rx_bad_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_fcs_attribute =
+ __ATTR(total_rx_bad_fcs, 0444, total_rx_bad_fcs_show, NULL);
+static struct kobj_attribute total_rx_user_pkt_attribute =
+ __ATTR(total_rx_user_pkt, 0444, total_rx_user_pkt_show, NULL);
+static struct kobj_attribute total_rx_good_user_pkt_attribute =
+ __ATTR(total_rx_good_user_pkt, 0444, total_rx_good_user_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_user_pkt_attribute =
+ __ATTR(total_rx_bad_user_pkt, 0444, total_rx_bad_user_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_user_fcs_attribute =
+ __ATTR(total_rx_bad_user_fcs, 0444, total_rx_bad_user_fcs_show, NULL);
+static struct kobj_attribute total_rx_user_ctrl_pkt_attribute =
+ __ATTR(total_rx_user_ctrl_pkt, 0444, total_rx_user_ctrl_pkt_show, NULL);
+static struct kobj_attribute total_rx_good_user_ctrl_pkt_attribute =
+ __ATTR(total_rx_good_user_ctrl_pkt, 0444,
+ total_rx_good_user_ctrl_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_user_ctrl_pkt_attribute =
+ __ATTR(total_rx_bad_user_ctrl_pkt, 0444,
+ total_rx_bad_user_ctrl_pkt_show, NULL);
+static struct kobj_attribute total_rx_bad_user_ctrl_fcs_attribute =
+ __ATTR(total_rx_bad_user_ctrl_fcs, 0444,
+ total_rx_bad_user_ctrl_fcs_show, NULL);
+static struct kobj_attribute rx_user_pkt_rate_attribute =
+ __ATTR(rx_user_pkt_rate, 0444, rx_user_pkt_rate_show, NULL);
+static struct kobj_attribute rx_user_ctrl_pkt_rate_attribute =
+ __ATTR(rx_user_ctrl_pkt_rate, 0444, rx_user_ctrl_pkt_rate_show, NULL);
+
+static struct attribute *attrs[] = {
+ &total_rx_good_pkt_attribute.attr,
+ &total_rx_bad_pkt_attribute.attr,
+ &total_rx_bad_fcs_attribute.attr,
+ &total_rx_user_pkt_attribute.attr,
+ &total_rx_good_user_pkt_attribute.attr,
+ &total_rx_bad_user_pkt_attribute.attr,
+ &total_rx_bad_user_fcs_attribute.attr,
+ &total_rx_user_ctrl_pkt_attribute.attr,
+ &total_rx_good_user_ctrl_pkt_attribute.attr,
+ &total_rx_bad_user_ctrl_pkt_attribute.attr,
+ &total_rx_bad_user_ctrl_fcs_attribute.attr,
+ &rx_user_pkt_rate_attribute.attr,
+ &rx_user_ctrl_pkt_rate_attribute.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+struct kobject *kobj_stats;
+
+/**
+ * xroe_sysfs_stats_init - Creates the xroe sysfs "stats" subdirectory & entries
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroe sysfs "stats" subdirectory and entries under "xroe"
+ */
+int xroe_sysfs_stats_init(void)
+{
+ int ret;
+
+ kobj_stats = kobject_create_and_add("stats", root_xroe_kobj);
+ if (!kobj_stats)
+ return -ENOMEM;
+
+ ret = sysfs_create_group(kobj_stats, &attr_group);
+ if (ret)
+ kobject_put(kobj_stats);
+
+ return ret;
+}
+
+/**
+ * xroe_sysfs_stats_exit - Deletes the xroe sysfs "ipv4" subdirectory & entries
+ *
+ * Deletes the xroe sysfs "stats" subdirectory and entries,
+ * under the "xroe" entry
+ */
+void xroe_sysfs_stats_exit(void)
+{
+ kobject_put(kobj_stats);
+}
diff --git a/drivers/staging/xroeframer/sysfs_xroe_framer_udp.c b/drivers/staging/xroeframer/sysfs_xroe_framer_udp.c
new file mode 100644
index 000000000000..8f8a77b25da7
--- /dev/null
+++ b/drivers/staging/xroeframer/sysfs_xroe_framer_udp.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "xroe_framer.h"
+
+enum { XROE_SIZE_MAX = 15 };
+static int xroe_size;
+
+/**
+ * udp_source_port_show - Returns the UDP source port
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the UDP source port
+ *
+ * Returns the UDP source port
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t udp_source_port_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 source_port;
+
+ source_port = utils_sysfs_show_wrapper(ETH_UDP_SOURCE_PORT_ADDR,
+ ETH_UDP_SOURCE_PORT_OFFSET,
+ ETH_UDP_SOURCE_PORT_MASK, kobj);
+ sprintf(buff, "%d\n", source_port);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * udp_source_port_store - Writes to the UDP source port sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the UDP source port
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the UDP source port sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t udp_source_port_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 source_port;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &source_port);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_UDP_SOURCE_PORT_ADDR,
+ ETH_UDP_SOURCE_PORT_OFFSET,
+ ETH_UDP_SOURCE_PORT_MASK, source_port, kobj);
+ return xroe_size;
+}
+
+/**
+ * udp_destination_port_show - Returns the UDP destination port
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the UDP destination port
+ *
+ * Returns the UDP destination port
+ *
+ * Return: XROE_SIZE_MAX on success
+ */
+static ssize_t udp_destination_port_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ u32 dest_port;
+
+ dest_port = utils_sysfs_show_wrapper(ETH_UDP_DESTINATION_PORT_ADDR,
+ ETH_UDP_DESTINATION_PORT_OFFSET,
+ ETH_UDP_DESTINATION_PORT_MASK,
+ kobj);
+ sprintf(buff, "%d\n", dest_port);
+ return XROE_SIZE_MAX;
+}
+
+/**
+ * udp_destination_port_store - Writes to the UDP destination port sysfs entry
+ * @kobj: The kernel object of the entry
+ * @attr: The attributes of the kernel object
+ * @buff: The buffer containing the UDP destination port
+ * @count: The number of characters typed by the user
+ *
+ * Writes to the UDP destination port sysfs entry
+ *
+ * Return: XROE_SIZE_MAX or the value of "count", if that's lesser, on success
+ */
+static ssize_t udp_destination_port_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ int ret;
+ u32 dest_port;
+
+ xroe_size = min_t(size_t, count, (size_t)XROE_SIZE_MAX);
+ ret = kstrtouint(buff, 10, &dest_port);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(ETH_UDP_DESTINATION_PORT_ADDR,
+ ETH_UDP_DESTINATION_PORT_OFFSET,
+ ETH_UDP_DESTINATION_PORT_MASK, dest_port,
+ kobj);
+ return xroe_size;
+}
+
+/* TODO Use DEVICE_ATTR/_RW/_RO macros */
+
+static struct kobj_attribute source_port =
+ __ATTR(source_port, 0660, udp_source_port_show,
+ udp_source_port_store);
+static struct kobj_attribute dest_port =
+ __ATTR(dest_port, 0660, udp_destination_port_show,
+ udp_destination_port_store);
+
+static struct attribute *attrs[] = {
+ &source_port.attr,
+ &dest_port.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+static struct kobject *kobj_udp[MAX_NUM_ETH_PORTS];
+
+/**
+ * xroe_sysfs_udp_init - Creates the xroe sysfs "udp" subdirectory and entries
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroe sysfs "udp" subdirectory and entries under "xroe"
+ */
+int xroe_sysfs_udp_init(void)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ kobj_udp[i] = kobject_create_and_add("udp", kobj_eth_ports[i]);
+ if (!kobj_udp[i])
+ return -ENOMEM;
+ ret = sysfs_create_group(kobj_udp[i], &attr_group);
+ if (ret)
+ kobject_put(kobj_udp[i]);
+ }
+ return ret;
+}
+
+/**
+ * xroe_sysfs_ipv6_exit - Deletes the xroe sysfs "udp" subdirectory & entries
+ *
+ * Deletes the xroe sysfs "udp" subdirectory and entries,
+ * under the "xroe" entry
+ *
+ */
+void xroe_sysfs_udp_exit(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_NUM_ETH_PORTS; i++)
+ kobject_put(kobj_udp[i]);
+}
diff --git a/drivers/staging/xroeframer/xroe_framer.c b/drivers/staging/xroeframer/xroe_framer.c
new file mode 100644
index 000000000000..dba7c69b010f
--- /dev/null
+++ b/drivers/staging/xroeframer/xroe_framer.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "xroe_framer.h"
+
+#define DRIVER_NAME "framer"
+
+/*
+ * TODO: to be made static as well, so that multiple instances can be used. As
+ * of now, the "lp" structure is shared among the multiple source files
+ */
+struct framer_local *lp;
+static struct platform_driver framer_driver;
+/*
+ * TODO: placeholder for the IRQ once it's been implemented
+ * in the framer block
+ */
+static irqreturn_t framer_irq(int irq, void *lp)
+{
+ return IRQ_HANDLED;
+}
+
+/**
+ * framer_probe - Probes the device tree to locate the framer block
+ * @pdev: The structure containing the device's details
+ *
+ * Probes the device tree to locate the framer block and maps it to
+ * the kernel virtual memory space
+ *
+ * Return: 0 on success or a negative errno on error.
+ */
+static int framer_probe(struct platform_device *pdev)
+{
+ struct resource *r_mem; /* IO mem resources */
+ struct resource *r_irq;
+ struct device *dev = &pdev->dev;
+ int rc = 0;
+
+ dev_dbg(dev, "Device Tree Probing\n");
+ lp = devm_kzalloc(&pdev->dev, sizeof(*lp), GFP_KERNEL);
+ if (!lp)
+ return -ENOMEM;
+
+ /* Get iospace for the device */
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp->base_addr = devm_ioremap_resource(&pdev->dev, r_mem);
+ if (IS_ERR(lp->base_addr))
+ return PTR_ERR(lp->base_addr);
+
+ dev_set_drvdata(dev, lp);
+ xroe_sysfs_init();
+ /* Get IRQ for the device */
+ /*
+ * TODO: No IRQ *yet* in the DT from the framer block, as it's still
+ * under development. To be added once it's in the block, and also
+ * replace with platform_get_irq_byname()
+ */
+ r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (IS_ERR(r_irq)) {
+ dev_info(dev, "no IRQ found\n");
+ /*
+ * TODO: Return non-zero (error) code on no IRQ found.
+ * To be implemented once the IRQ is in the block
+ */
+ return 0;
+ }
+ rc = devm_request_irq(dev, lp->irq, &framer_irq, 0, DRIVER_NAME, lp);
+ if (rc) {
+ dev_err(dev, "testmodule: Could not allocate interrupt %d.\n",
+ lp->irq);
+ /*
+ * TODO: Return non-zero (error) code on no IRQ found.
+ * To be implemented once the IRQ is in the block
+ */
+ return 0;
+ }
+
+ return rc;
+}
+
+/**
+ * framer_init - Registers the driver
+ *
+ * Return: 0 on success, -1 on allocation error
+ *
+ * Registers the framer driver and creates character device drivers
+ * for the whole block, as well as separate ones for stats and
+ * radio control.
+ */
+static int __init framer_init(void)
+{
+ int ret;
+
+ pr_debug("XROE framer driver init\n");
+
+ ret = platform_driver_register(&framer_driver);
+
+ return ret;
+}
+
+/**
+ * framer_exit - Destroys the driver
+ *
+ * Unregisters the framer driver and destroys the character
+ * device driver for the whole block, as well as the separate ones
+ * for stats and radio control. Returns 0 upon successful execution
+ */
+static void __exit framer_exit(void)
+{
+ xroe_sysfs_exit();
+ platform_driver_unregister(&framer_driver);
+ pr_info("XROE Framer exit\n");
+}
+
+module_init(framer_init);
+module_exit(framer_exit);
+
+static const struct of_device_id framer_of_match[] = {
+ { .compatible = "xlnx,roe-framer-1.0", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, framer_of_match);
+
+static struct platform_driver framer_driver = {
+ .driver = {
+ /*
+ * TODO: .name shouldn't be necessary, though removing
+ * it results in kernel panic. To investigate further
+ */
+ .name = DRIVER_NAME,
+ .of_match_table = framer_of_match,
+ },
+ .probe = framer_probe,
+};
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("framer - Xilinx Radio over Ethernet Framer driver");
diff --git a/drivers/staging/xroeframer/xroe_framer.h b/drivers/staging/xroeframer/xroe_framer.h
new file mode 100644
index 000000000000..03b8bb39095c
--- /dev/null
+++ b/drivers/staging/xroeframer/xroe_framer.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+#include "roe_framer_ctrl.h"
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/stat.h> /* S_IRUSR, S_IWUSR */
+
+/* TODO: Remove hardcoded value of number of Ethernet ports and read the value
+ * from the device tree.
+ */
+#define MAX_NUM_ETH_PORTS 0x4
+/* TODO: to be made static as well, so that multiple instances can be used. As
+ * of now, the following 3 structures are shared among the multiple
+ * source files
+ */
+extern struct framer_local *lp;
+extern struct kobject *root_xroe_kobj;
+extern struct kobject *kobj_framer;
+extern struct kobject *kobj_eth_ports[MAX_NUM_ETH_PORTS];
+struct framer_local {
+ int irq;
+ unsigned long mem_start;
+ unsigned long mem_end;
+ void __iomem *base_addr;
+};
+
+int xroe_sysfs_init(void);
+int xroe_sysfs_ipv4_init(void);
+int xroe_sysfs_ipv6_init(void);
+int xroe_sysfs_udp_init(void);
+int xroe_sysfs_stats_init(void);
+void xroe_sysfs_exit(void);
+void xroe_sysfs_ipv4_exit(void);
+void xroe_sysfs_ipv6_exit(void);
+void xroe_sysfs_udp_exit(void);
+void xroe_sysfs_stats_exit(void);
+int utils_write32withmask(void __iomem *working_address, u32 value,
+ u32 mask, u32 offset);
+int utils_check_address_offset(u32 offset, size_t device_size);
+void utils_sysfs_store_wrapper(u32 address, u32 offset, u32 mask, u32 value,
+ struct kobject *kobj);
+u32 utils_sysfs_show_wrapper(u32 address, u32 offset, u32 mask,
+ struct kobject *kobj);
diff --git a/drivers/staging/xroetrafficgen/Kconfig b/drivers/staging/xroetrafficgen/Kconfig
new file mode 100644
index 000000000000..d2ead1483408
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# Xilinx Radio over Ethernet Traffic Generator driver
+#
+
+config XROE_TRAFFIC_GEN
+ tristate "Xilinx Radio over Ethernet Traffic Generator driver"
+ help
+ The Traffic Generator is used for in testing of other RoE IP Blocks
+ (currenty the XRoE Framer) and simulates an radio antenna interface.
+ It generates rolling rampdata for eCPRI antenna paths.
+ Each path is tagged with the antenna number. The sink locks to this
+ ramp data, then checks the next value is as expected.
diff --git a/drivers/staging/xroetrafficgen/Makefile b/drivers/staging/xroetrafficgen/Makefile
new file mode 100644
index 000000000000..e180a9bbc589
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Radio over Ethernet Framer driver
+#
+obj-$(XROE_TRAFFIC_GEN) := xroe_traffic_gen.o
+
+framer-objs := xroe-traffic-gen.o \
+ xroe-traffic-gen-sysfs.o \
diff --git a/drivers/staging/xroetrafficgen/README b/drivers/staging/xroetrafficgen/README
new file mode 100644
index 000000000000..1828426af847
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/README
@@ -0,0 +1,19 @@
+Xilinx Radio over Ethernet Traffic Generator driver
+===================================================
+
+About the RoE Framer Traffic Generator
+
+The Traffic Generator is used for in testing of other RoE IP Blocks (currenty
+the XRoE Framer) and simulates an radio antenna interface. It generates rolling
+rampdata for eCPRI antenna paths. Each path is tagged with the antenna number.
+The sink locks to this ramp data, then checks the next value is as expected.
+
+
+About the Linux Driver
+
+The RoE Traffic Generator Linux Driver provides sysfs access to control a
+simulated radio antenna interface.
+The loading of the driver to the hardware is possible using Device Tree binding
+(see "dt-binding.txt" for more information). When the driver is loaded, the
+general controls (such as sink lock, enable, loopback etc) are exposed
+under /sys/kernel/xroetrafficgen.
diff --git a/drivers/staging/xroetrafficgen/roe_radio_ctrl.h b/drivers/staging/xroetrafficgen/roe_radio_ctrl.h
new file mode 100644
index 000000000000..e093386f3e94
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/roe_radio_ctrl.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+/*-----------------------------------------------------------------------------
+ * C Header bank BASE definitions
+ *-----------------------------------------------------------------------------
+ */
+#define ROE_RADIO_CFG_BASE_ADDR 0x0
+#define ROE_RADIO_SOURCE_BASE_ADDR 0x1000
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_radio_cfg
+ * with prefix radio_ @ address 0x0
+ *-----------------------------------------------------------------------------
+ */
+/* Type = roInt */
+#define RADIO_ID_ADDR 0x0
+#define RADIO_ID_MASK 0xffffffff
+#define RADIO_ID_OFFSET 0x0
+#define RADIO_ID_WIDTH 0x20
+#define RADIO_ID_DEFAULT 0x120001
+
+/* Type = rw */
+#define RADIO_TIMEOUT_ENABLE_ADDR 0x4
+#define RADIO_TIMEOUT_ENABLE_MASK 0x1
+#define RADIO_TIMEOUT_ENABLE_OFFSET 0x0
+#define RADIO_TIMEOUT_ENABLE_WIDTH 0x1
+#define RADIO_TIMEOUT_ENABLE_DEFAULT 0x0
+
+/* Type = ro */
+#define RADIO_TIMEOUT_STATUS_ADDR 0x8
+#define RADIO_TIMEOUT_STATUS_MASK 0x1
+#define RADIO_TIMEOUT_STATUS_OFFSET 0x0
+#define RADIO_TIMEOUT_STATUS_WIDTH 0x1
+#define RADIO_TIMEOUT_STATUS_DEFAULT 0x1
+
+/* Type = rw */
+#define RADIO_TIMEOUT_VALUE_ADDR 0xc
+#define RADIO_TIMEOUT_VALUE_MASK 0xfff
+#define RADIO_TIMEOUT_VALUE_OFFSET 0x0
+#define RADIO_TIMEOUT_VALUE_WIDTH 0xc
+#define RADIO_TIMEOUT_VALUE_DEFAULT 0x80
+
+/* Type = rw */
+#define RADIO_GPIO_CDC_LEDMODE2_ADDR 0x10
+#define RADIO_GPIO_CDC_LEDMODE2_MASK 0x1
+#define RADIO_GPIO_CDC_LEDMODE2_OFFSET 0x0
+#define RADIO_GPIO_CDC_LEDMODE2_WIDTH 0x1
+#define RADIO_GPIO_CDC_LEDMODE2_DEFAULT 0x0
+
+/* Type = rw */
+#define RADIO_GPIO_CDC_LEDGPIO_ADDR 0x10
+#define RADIO_GPIO_CDC_LEDGPIO_MASK 0x30
+#define RADIO_GPIO_CDC_LEDGPIO_OFFSET 0x4
+#define RADIO_GPIO_CDC_LEDGPIO_WIDTH 0x2
+#define RADIO_GPIO_CDC_LEDGPIO_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_GPIO_CDC_DIPSTATUS_ADDR 0x14
+#define RADIO_GPIO_CDC_DIPSTATUS_MASK 0xff
+#define RADIO_GPIO_CDC_DIPSTATUS_OFFSET 0x0
+#define RADIO_GPIO_CDC_DIPSTATUS_WIDTH 0x8
+#define RADIO_GPIO_CDC_DIPSTATUS_DEFAULT 0x0
+
+/* Type = wPlsH */
+#define RADIO_SW_TRIGGER_ADDR 0x20
+#define RADIO_SW_TRIGGER_MASK 0x1
+#define RADIO_SW_TRIGGER_OFFSET 0x0
+#define RADIO_SW_TRIGGER_WIDTH 0x1
+#define RADIO_SW_TRIGGER_DEFAULT 0x0
+
+/* Type = rw */
+#define RADIO_CDC_ENABLE_ADDR 0x24
+#define RADIO_CDC_ENABLE_MASK 0x1
+#define RADIO_CDC_ENABLE_OFFSET 0x0
+#define RADIO_CDC_ENABLE_WIDTH 0x1
+#define RADIO_CDC_ENABLE_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_ERROR_ADDR 0x24
+#define RADIO_CDC_ERROR_MASK 0x2
+#define RADIO_CDC_ERROR_OFFSET 0x1
+#define RADIO_CDC_ERROR_WIDTH 0x1
+#define RADIO_CDC_ERROR_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_STATUS_ADDR 0x24
+#define RADIO_CDC_STATUS_MASK 0x4
+#define RADIO_CDC_STATUS_OFFSET 0x2
+#define RADIO_CDC_STATUS_WIDTH 0x1
+#define RADIO_CDC_STATUS_DEFAULT 0x0
+
+/* Type = rw */
+#define RADIO_CDC_LOOPBACK_ADDR 0x28
+#define RADIO_CDC_LOOPBACK_MASK 0x1
+#define RADIO_CDC_LOOPBACK_OFFSET 0x0
+#define RADIO_CDC_LOOPBACK_WIDTH 0x1
+#define RADIO_CDC_LOOPBACK_DEFAULT 0x0
+
+/* Type = rw */
+#define RADIO_SINK_ENABLE_ADDR 0x2c
+#define RADIO_SINK_ENABLE_MASK 0x1
+#define RADIO_SINK_ENABLE_OFFSET 0x0
+#define RADIO_SINK_ENABLE_WIDTH 0x1
+#define RADIO_SINK_ENABLE_DEFAULT 0x1
+
+/* Type = roSig */
+#define RADIO_CDC_ERROR_31_0_ADDR 0x30
+#define RADIO_CDC_ERROR_31_0_MASK 0xffffffff
+#define RADIO_CDC_ERROR_31_0_OFFSET 0x0
+#define RADIO_CDC_ERROR_31_0_WIDTH 0x20
+#define RADIO_CDC_ERROR_31_0_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_ERROR_63_32_ADDR 0x34
+#define RADIO_CDC_ERROR_63_32_MASK 0xffffffff
+#define RADIO_CDC_ERROR_63_32_OFFSET 0x0
+#define RADIO_CDC_ERROR_63_32_WIDTH 0x20
+#define RADIO_CDC_ERROR_63_32_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_ERROR_95_64_ADDR 0x38
+#define RADIO_CDC_ERROR_95_64_MASK 0xffffffff
+#define RADIO_CDC_ERROR_95_64_OFFSET 0x0
+#define RADIO_CDC_ERROR_95_64_WIDTH 0x20
+#define RADIO_CDC_ERROR_95_64_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_ERROR_127_96_ADDR 0x3c
+#define RADIO_CDC_ERROR_127_96_MASK 0xffffffff
+#define RADIO_CDC_ERROR_127_96_OFFSET 0x0
+#define RADIO_CDC_ERROR_127_96_WIDTH 0x20
+#define RADIO_CDC_ERROR_127_96_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_STATUS_31_0_ADDR 0x40
+#define RADIO_CDC_STATUS_31_0_MASK 0xffffffff
+#define RADIO_CDC_STATUS_31_0_OFFSET 0x0
+#define RADIO_CDC_STATUS_31_0_WIDTH 0x20
+#define RADIO_CDC_STATUS_31_0_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_STATUS_63_32_ADDR 0x44
+#define RADIO_CDC_STATUS_63_32_MASK 0xffffffff
+#define RADIO_CDC_STATUS_63_32_OFFSET 0x0
+#define RADIO_CDC_STATUS_63_32_WIDTH 0x20
+#define RADIO_CDC_STATUS_63_32_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_STATUS_95_64_ADDR 0x48
+#define RADIO_CDC_STATUS_95_64_MASK 0xffffffff
+#define RADIO_CDC_STATUS_95_64_OFFSET 0x0
+#define RADIO_CDC_STATUS_95_64_WIDTH 0x20
+#define RADIO_CDC_STATUS_95_64_DEFAULT 0x0
+
+/* Type = roSig */
+#define RADIO_CDC_STATUS_127_96_ADDR 0x4c
+#define RADIO_CDC_STATUS_127_96_MASK 0xffffffff
+#define RADIO_CDC_STATUS_127_96_OFFSET 0x0
+#define RADIO_CDC_STATUS_127_96_WIDTH 0x20
+#define RADIO_CDC_STATUS_127_96_DEFAULT 0x0
+
+/*-----------------------------------------------------------------------------
+ * C Header bank register definitions for bank roe_radio_source
+ * with prefix fram_ @ address 0x1000
+ *-----------------------------------------------------------------------------
+ */
+/* Type = rwpdef */
+#define FRAM_PACKET_DATA_SIZE_ADDR 0x1000
+#define FRAM_PACKET_DATA_SIZE_MASK 0x7f
+#define FRAM_PACKET_DATA_SIZE_OFFSET 0x0
+#define FRAM_PACKET_DATA_SIZE_WIDTH 0x7
+#define FRAM_PACKET_DATA_SIZE_DEFAULT 0x0
+
+/* Type = rwpdef */
+#define FRAM_PAUSE_DATA_SIZE_ADDR 0x1004
+#define FRAM_PAUSE_DATA_SIZE_MASK 0x7f
+#define FRAM_PAUSE_DATA_SIZE_OFFSET 0x0
+#define FRAM_PAUSE_DATA_SIZE_WIDTH 0x7
+#define FRAM_PAUSE_DATA_SIZE_DEFAULT 0x0
diff --git a/drivers/staging/xroetrafficgen/xroe-traffic-gen-sysfs.c b/drivers/staging/xroetrafficgen/xroe-traffic-gen-sysfs.c
new file mode 100644
index 000000000000..c9b05866fd78
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/xroe-traffic-gen-sysfs.c
@@ -0,0 +1,824 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include "roe_radio_ctrl.h"
+#include "xroe-traffic-gen.h"
+
+static int xroe_size;
+static char xroe_tmp[XROE_SIZE_MAX];
+
+/**
+ * utils_sysfs_store_wrapper - Wraps the storing function for sysfs entries
+ * @dev: The structure containing the device's information
+ * @address: The address of the register to be written
+ * @offset: The offset from the address of the register
+ * @mask: The mask to be used on the value to be written
+ * @value: The value to be written to the register
+ *
+ * Wraps the core functionality of all "store" functions of sysfs entries.
+ */
+static void utils_sysfs_store_wrapper(struct device *dev, u32 address,
+ u32 offset, u32 mask, u32 value)
+{
+ void __iomem *working_address;
+ u32 read_register_value = 0;
+ u32 register_value_to_write = 0;
+ u32 delta = 0;
+ u32 buffer = 0;
+ struct xroe_traffic_gen_local *lp = dev_get_drvdata(dev);
+
+ working_address = (void __iomem *)(lp->base_addr + address);
+ read_register_value = ioread32(working_address);
+ buffer = (value << offset);
+ register_value_to_write = read_register_value & ~mask;
+ delta = buffer & mask;
+ register_value_to_write |= delta;
+ iowrite32(register_value_to_write, working_address);
+}
+
+/**
+ * utils_sysfs_show_wrapper - Wraps the "show" function for sysfs entries
+ * @dev: The structure containing the device's information
+ * @address: The address of the register to be read
+ * @offset: The offset from the address of the register
+ * @mask: The mask to be used on the value to be read
+ *
+ * Wraps the core functionality of all "show" functions of sysfs entries.
+ *
+ * Return: The value designated by the address, offset and mask
+ */
+static u32 utils_sysfs_show_wrapper(struct device *dev, u32 address, u32 offset,
+ u32 mask)
+{
+ void __iomem *working_address;
+ u32 buffer;
+ struct xroe_traffic_gen_local *lp = dev_get_drvdata(dev);
+
+ working_address = (void __iomem *)(lp->base_addr + address);
+ buffer = ioread32(working_address);
+ return (buffer & mask) >> offset;
+}
+
+/**
+ * radio_id_show - Returns the block's ID number
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the ID number string
+ *
+ * Returns the traffic gen's ID (0x1179649 by default)
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 radio_id;
+
+ radio_id = utils_sysfs_show_wrapper(dev, RADIO_ID_ADDR,
+ RADIO_ID_OFFSET,
+ RADIO_ID_MASK);
+ return sprintf(buf, "%d\n", radio_id);
+}
+static DEVICE_ATTR_RO(radio_id);
+
+/**
+ * timeout_enable_show - Returns the traffic gen's timeout enable status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's timeout enable status to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t timeout_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 timeout_enable;
+
+ timeout_enable = utils_sysfs_show_wrapper(dev,
+ RADIO_TIMEOUT_ENABLE_ADDR,
+ RADIO_TIMEOUT_ENABLE_OFFSET,
+ RADIO_TIMEOUT_ENABLE_MASK);
+ return sprintf(buf, "%d\n", timeout_enable);
+}
+
+/**
+ * timeout_enable_store - Writes to the traffic gens's timeout enable
+ * status register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's timeout enable
+ * status to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t timeout_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 enable = 0;
+
+ strncpy(xroe_tmp, buf, xroe_size);
+ if (strncmp(xroe_tmp, "true", xroe_size) == 0)
+ enable = 1;
+ else if (strncmp(xroe_tmp, "false", xroe_size) == 0)
+ enable = 0;
+ utils_sysfs_store_wrapper(dev, RADIO_TIMEOUT_ENABLE_ADDR,
+ RADIO_TIMEOUT_ENABLE_OFFSET,
+ RADIO_TIMEOUT_ENABLE_MASK, enable);
+ return count;
+}
+static DEVICE_ATTR_RW(timeout_enable);
+
+/**
+ * timeout_status_show - Returns the timeout status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the ID number string
+ *
+ * Returns the traffic gen's timeout status (0x1 by default)
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t timeout_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 timeout;
+
+ timeout = utils_sysfs_show_wrapper(dev, RADIO_TIMEOUT_STATUS_ADDR,
+ RADIO_TIMEOUT_STATUS_OFFSET,
+ RADIO_TIMEOUT_STATUS_MASK);
+ return sprintf(buf, "%d\n", timeout);
+}
+static DEVICE_ATTR_RO(timeout_status);
+
+/**
+ * timeout_enable_show - Returns the traffic gen's timeout value
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's timeout value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t timeout_value_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 timeout_value;
+
+ timeout_value = utils_sysfs_show_wrapper(dev, RADIO_TIMEOUT_VALUE_ADDR,
+ RADIO_TIMEOUT_VALUE_OFFSET,
+ RADIO_TIMEOUT_VALUE_MASK);
+ return sprintf(buf, "%d\n", timeout_value);
+}
+
+/**
+ * timeout_enable_store - Writes to the traffic gens's timeout value
+ * status register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's timeout value
+ * to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t timeout_value_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 timeout_value;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &timeout_value);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_TIMEOUT_VALUE_ADDR,
+ RADIO_TIMEOUT_VALUE_OFFSET,
+ RADIO_TIMEOUT_VALUE_MASK, timeout_value);
+ return count;
+}
+static DEVICE_ATTR_RW(timeout_value);
+
+/**
+ * ledmode_show - Returns the current LED mode
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's LED mode value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t ledmode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u32 ledmode;
+
+ ledmode = utils_sysfs_show_wrapper(dev, RADIO_GPIO_CDC_LEDMODE2_ADDR,
+ RADIO_GPIO_CDC_LEDMODE2_OFFSET,
+ RADIO_GPIO_CDC_LEDMODE2_MASK);
+ return sprintf(buf, "%d\n", ledmode);
+}
+
+/**
+ * ledmode_store - Writes to the current LED mode register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's LED mode value
+ * to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t ledmode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 ledmode;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &ledmode);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_GPIO_CDC_LEDMODE2_ADDR,
+ RADIO_GPIO_CDC_LEDMODE2_OFFSET,
+ RADIO_GPIO_CDC_LEDMODE2_MASK, ledmode);
+ return count;
+}
+static DEVICE_ATTR_RW(ledmode);
+
+/**
+ * ledgpio_show - Returns the current LED gpio
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's LED gpio value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t ledgpio_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ u32 ledgpio;
+
+ ledgpio = utils_sysfs_show_wrapper(dev, RADIO_GPIO_CDC_LEDGPIO_ADDR,
+ RADIO_GPIO_CDC_LEDGPIO_OFFSET,
+ RADIO_GPIO_CDC_LEDGPIO_MASK);
+ return sprintf(buf, "%d\n", ledgpio);
+}
+
+/**
+ * ledgpio_store - Writes to the current LED gpio register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's LED gpio value
+ * to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t ledgpio_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 ledgpio;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &ledgpio);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_GPIO_CDC_LEDGPIO_ADDR,
+ RADIO_GPIO_CDC_LEDGPIO_OFFSET,
+ RADIO_GPIO_CDC_LEDGPIO_MASK, ledgpio);
+ return count;
+}
+static DEVICE_ATTR_RW(ledgpio);
+
+/**
+ * dip_status_show - Returns the current DIP switch value
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the GPIO DIP switch value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t dip_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 dip_status;
+
+ dip_status = utils_sysfs_show_wrapper(dev, RADIO_GPIO_CDC_LEDGPIO_ADDR,
+ RADIO_GPIO_CDC_LEDGPIO_OFFSET,
+ RADIO_GPIO_CDC_LEDGPIO_MASK);
+ return sprintf(buf, "0x%08x\n", dip_status);
+}
+static DEVICE_ATTR_RO(dip_status);
+
+/**
+ * sw_trigger_show - Returns the current SW trigger status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's SW trigger status value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t sw_trigger_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 sw_trigger;
+
+ sw_trigger = utils_sysfs_show_wrapper(dev, RADIO_SW_TRIGGER_ADDR,
+ RADIO_SW_TRIGGER_OFFSET,
+ RADIO_SW_TRIGGER_MASK);
+ return sprintf(buf, "%d\n", sw_trigger);
+}
+
+/**
+ * sw_trigger_store - Writes to the SW trigger status register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's SW trigger
+ * value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t sw_trigger_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 sw_trigger;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &sw_trigger);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_SW_TRIGGER_ADDR,
+ RADIO_SW_TRIGGER_OFFSET,
+ RADIO_SW_TRIGGER_MASK, sw_trigger);
+ return count;
+}
+static DEVICE_ATTR_RW(sw_trigger);
+
+/**
+ * radio_enable_show - Returns the current radio enable status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the enable status
+ *
+ * Reads and writes the traffic gen's radio enable value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 radio_enable;
+
+ radio_enable = utils_sysfs_show_wrapper(dev, RADIO_CDC_ENABLE_ADDR,
+ RADIO_CDC_ENABLE_OFFSET,
+ RADIO_CDC_ENABLE_MASK);
+ return sprintf(buf, "%d\n", radio_enable);
+}
+
+/**
+ * radio_enable_store - Writes to the radio enable register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's radio enable
+ * value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t radio_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 radio_enable;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &radio_enable);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_CDC_ENABLE_ADDR,
+ RADIO_CDC_ENABLE_OFFSET,
+ RADIO_CDC_ENABLE_MASK,
+ radio_enable);
+ return count;
+}
+static DEVICE_ATTR_RW(radio_enable);
+
+/**
+ * radio_error_show - Returns the current radio error status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the error status
+ *
+ * Reads and writes the traffic gen's radio error value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 radio_error;
+
+ radio_error = utils_sysfs_show_wrapper(dev, RADIO_CDC_ERROR_ADDR,
+ RADIO_CDC_STATUS_OFFSET,
+ RADIO_CDC_STATUS_MASK);
+ return sprintf(buf, "%d\n", radio_error);
+}
+static DEVICE_ATTR_RO(radio_error);
+
+/**
+ * radio_status_show - Returns the current radio status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the status
+ *
+ * Reads and writes the traffic gen's radio status value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 radio_status;
+
+ radio_status = utils_sysfs_show_wrapper(dev, RADIO_CDC_STATUS_ADDR,
+ RADIO_CDC_STATUS_OFFSET,
+ RADIO_CDC_STATUS_MASK);
+ return sprintf(buf, "%d\n", radio_status);
+}
+static DEVICE_ATTR_RO(radio_status);
+
+/**
+ * radio_loopback_show - Returns the current radio loopback status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the loopback status
+ *
+ * Reads and writes the traffic gen's radio loopback value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_loopback_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 radio_loopback;
+
+ radio_loopback = utils_sysfs_show_wrapper(dev,
+ RADIO_CDC_LOOPBACK_ADDR,
+ RADIO_CDC_LOOPBACK_OFFSET,
+ RADIO_CDC_LOOPBACK_MASK);
+ return sprintf(buf, "%d\n", radio_loopback);
+}
+
+/**
+ * radio_loopback_store - Writes to the radio loopback register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's radio loopback
+ * value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t radio_loopback_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 radio_loopback;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &radio_loopback);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_CDC_LOOPBACK_ADDR,
+ RADIO_CDC_LOOPBACK_OFFSET,
+ RADIO_CDC_LOOPBACK_MASK, radio_loopback);
+ return count;
+}
+static DEVICE_ATTR_RW(radio_loopback);
+
+/**
+ * radio_sink_enable_show - Returns the current radio sink enable status
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the loopback status
+ *
+ * Reads and writes the traffic gen's radio sink enable value to the sysfs entry
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t radio_sink_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 sink_enable;
+
+ sink_enable = utils_sysfs_show_wrapper(dev, RADIO_SINK_ENABLE_ADDR,
+ RADIO_SINK_ENABLE_OFFSET,
+ RADIO_SINK_ENABLE_MASK);
+ return sprintf(buf, "%d\n", sink_enable);
+}
+
+/**
+ * radio_sink_enable_store - Writes to the radio sink enable register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's radio sink
+ * enable value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t radio_sink_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 sink_enable;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &sink_enable);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, RADIO_SINK_ENABLE_ADDR,
+ RADIO_SINK_ENABLE_OFFSET,
+ RADIO_SINK_ENABLE_MASK, sink_enable);
+ return count;
+}
+static DEVICE_ATTR_RW(radio_sink_enable);
+
+/**
+ * antenna_status_show - Returns the status for all antennas
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the ID number string
+ *
+ * Returns the traffic gen's status for all antennas
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t antenna_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 status_0_31;
+ u32 status_63_32;
+ u32 status_95_64;
+ u32 status_127_96;
+
+ status_0_31 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_STATUS_31_0_ADDR,
+ RADIO_CDC_STATUS_31_0_OFFSET,
+ lower_32_bits(RADIO_CDC_STATUS_31_0_MASK));
+ status_63_32 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_STATUS_63_32_ADDR,
+ RADIO_CDC_STATUS_63_32_OFFSET,
+ lower_32_bits(RADIO_CDC_STATUS_63_32_MASK));
+ status_95_64 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_STATUS_95_64_ADDR,
+ RADIO_CDC_STATUS_95_64_OFFSET,
+ lower_32_bits(RADIO_CDC_STATUS_95_64_MASK));
+ status_127_96 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_STATUS_127_96_ADDR,
+ RADIO_CDC_STATUS_127_96_OFFSET,
+ lower_32_bits(RADIO_CDC_STATUS_127_96_MASK));
+
+ return sprintf(buf, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ status_0_31, status_63_32, status_95_64, status_127_96);
+}
+static DEVICE_ATTR_RO(antenna_status);
+
+/**
+ * antenna_error_show - Returns the error for all antennas
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the ID number string
+ *
+ * Returns the traffic gen's error for all antennas
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t antenna_error_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 error_0_31;
+ u32 error_63_32;
+ u32 error_95_64;
+ u32 error_127_96;
+
+ error_0_31 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_ERROR_31_0_ADDR,
+ RADIO_CDC_ERROR_31_0_OFFSET,
+ lower_32_bits(RADIO_CDC_ERROR_31_0_MASK));
+ error_63_32 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_ERROR_63_32_ADDR,
+ RADIO_CDC_ERROR_63_32_OFFSET,
+ lower_32_bits(RADIO_CDC_ERROR_63_32_MASK));
+ error_95_64 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_ERROR_95_64_ADDR,
+ RADIO_CDC_ERROR_95_64_OFFSET,
+ lower_32_bits(RADIO_CDC_ERROR_95_64_MASK));
+ error_127_96 =
+ utils_sysfs_show_wrapper(dev, RADIO_CDC_ERROR_127_96_ADDR,
+ RADIO_CDC_ERROR_127_96_OFFSET,
+ lower_32_bits(RADIO_CDC_ERROR_127_96_MASK));
+
+ return sprintf(buf, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ error_0_31, error_63_32, error_95_64, error_127_96);
+}
+static DEVICE_ATTR_RO(antenna_error);
+
+/**
+ * framer_packet_size_show - Returns the size of the framer's packet
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the loopback status
+ *
+ * Reads and writes the traffic gen's framer packet size value
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t framer_packet_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 packet_size;
+
+ packet_size = utils_sysfs_show_wrapper(dev, FRAM_PACKET_DATA_SIZE_ADDR,
+ FRAM_PACKET_DATA_SIZE_OFFSET,
+ FRAM_PACKET_DATA_SIZE_MASK);
+ return sprintf(buf, "%d\n", packet_size);
+}
+
+/**
+ * framer_packet_size_store - Writes to the framer's packet size register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's framer packet
+ * size value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t framer_packet_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 packet_size;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &packet_size);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, FRAM_PACKET_DATA_SIZE_ADDR,
+ FRAM_PACKET_DATA_SIZE_OFFSET,
+ FRAM_PACKET_DATA_SIZE_MASK, packet_size);
+ return count;
+}
+static DEVICE_ATTR_RW(framer_packet_size);
+
+/**
+ * framer_pause_size_show - Returns the size of the framer's pause
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the loopback status
+ *
+ * Reads and writes the traffic gen's framer pause size value
+ *
+ * Return: The number of characters printed on success
+ */
+static ssize_t framer_pause_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u32 pause_size;
+
+ pause_size = utils_sysfs_show_wrapper(dev, FRAM_PAUSE_DATA_SIZE_ADDR,
+ FRAM_PAUSE_DATA_SIZE_OFFSET,
+ FRAM_PAUSE_DATA_SIZE_MASK);
+ return sprintf(buf, "%d\n", pause_size);
+}
+
+/**
+ * framer_pause_size_store - Writes to the framer's pause size register
+ * @dev: The device's structure
+ * @attr: The attributes of the kernel object
+ * @buf: The buffer containing the timeout value
+ * @count: The number of characters typed by the user
+ *
+ * Reads the user input and accordingly writes the traffic gens's framer pause
+ * size value to the sysfs entry
+ *
+ * Return: The number of characters of the entry (count) on success
+ */
+static ssize_t framer_pause_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 pause_size;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &pause_size);
+ if (ret)
+ return ret;
+ utils_sysfs_store_wrapper(dev, FRAM_PAUSE_DATA_SIZE_ADDR,
+ FRAM_PAUSE_DATA_SIZE_OFFSET,
+ FRAM_PAUSE_DATA_SIZE_MASK, pause_size);
+ return count;
+}
+static DEVICE_ATTR_RW(framer_pause_size);
+
+static struct attribute *xroe_traffic_gen_attrs[] = {
+ &dev_attr_radio_id.attr,
+ &dev_attr_timeout_enable.attr,
+ &dev_attr_timeout_status.attr,
+ &dev_attr_timeout_value.attr,
+ &dev_attr_ledmode.attr,
+ &dev_attr_ledgpio.attr,
+ &dev_attr_dip_status.attr,
+ &dev_attr_sw_trigger.attr,
+ &dev_attr_radio_enable.attr,
+ &dev_attr_radio_error.attr,
+ &dev_attr_radio_status.attr,
+ &dev_attr_radio_loopback.attr,
+ &dev_attr_radio_sink_enable.attr,
+ &dev_attr_antenna_status.attr,
+ &dev_attr_antenna_error.attr,
+ &dev_attr_framer_packet_size.attr,
+ &dev_attr_framer_pause_size.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(xroe_traffic_gen);
+
+/**
+ * xroe_traffic_gen_sysfs_init - Creates the xroe sysfs directory and entries
+ * @dev: The device's structure
+ *
+ * Return: 0 on success, negative value in case of failure to
+ * create the sysfs group
+ *
+ * Creates the xroetrafficgen sysfs directory and entries
+ */
+int xroe_traffic_gen_sysfs_init(struct device *dev)
+{
+ int ret;
+
+ dev->groups = xroe_traffic_gen_groups;
+ ret = sysfs_create_group(&dev->kobj, *xroe_traffic_gen_groups);
+ if (ret)
+ dev_err(dev, "sysfs creation failed\n");
+
+ return ret;
+}
+
+/**
+ * xroe_traffic_gen_sysfs_exit - Deletes the xroe sysfs directory and entries
+ * @dev: The device's structure
+ *
+ * Deletes the xroetrafficgen sysfs directory and entries
+ */
+void xroe_traffic_gen_sysfs_exit(struct device *dev)
+{
+ sysfs_remove_group(&dev->kobj, *xroe_traffic_gen_groups);
+}
diff --git a/drivers/staging/xroetrafficgen/xroe-traffic-gen.c b/drivers/staging/xroetrafficgen/xroe-traffic-gen.c
new file mode 100644
index 000000000000..1ed6e488d38d
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/xroe-traffic-gen.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/sysfs.h>
+#include "xroe-traffic-gen.h"
+
+#define DRIVER_NAME "xroe_traffic_gen"
+
+static struct platform_driver xroe_traffic_gen_driver;
+
+/**
+ * xroe_traffic_gen_probe - Probes the device tree to locate the traffic gen
+ * block
+ * @pdev: The structure containing the device's details
+ *
+ * Probes the device tree to locate the traffic gen block and maps it to
+ * the kernel virtual memory space
+ *
+ * Return: 0 on success or a negative errno on error.
+ */
+static int xroe_traffic_gen_probe(struct platform_device *pdev)
+{
+ struct xroe_traffic_gen_local *lp;
+ struct resource *r_mem; /* IO mem resources */
+ struct device *dev = &pdev->dev;
+
+ lp = devm_kzalloc(&pdev->dev, sizeof(*lp), GFP_KERNEL);
+ if (!lp)
+ return -ENOMEM;
+
+ /* Get iospace for the device */
+ /*
+ * TODO: Use platform_get_resource_byname() instead when the DT entry
+ * of the traffic gen block has been finalised (when it gets out of
+ * the development stage).
+ */
+ r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp->base_addr = devm_ioremap_resource(&pdev->dev, r_mem);
+ if (IS_ERR(lp->base_addr))
+ return PTR_ERR(lp->base_addr);
+
+ dev_set_drvdata(dev, lp);
+ xroe_traffic_gen_sysfs_init(dev);
+ return 0;
+}
+
+/**
+ * xroe_traffic_gen_remove - Removes the sysfs entries created by the driver
+ * @pdev: The structure containing the device's details
+ *
+ * Removes the sysfs entries created by the driver
+ *
+ * Return: 0
+ */
+static int xroe_traffic_gen_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ xroe_traffic_gen_sysfs_exit(dev);
+ return 0;
+}
+
+/**
+ * xroe_traffic_gen_init - Registers the driver
+ *
+ * Return: 0 on success, -1 on allocation error
+ *
+ * Registers the traffic gen driver and creates the sysfs entries related
+ * to it
+ */
+static int __init xroe_traffic_gen_init(void)
+{
+ int ret;
+
+ pr_info("XROE traffic generator driver init\n");
+ ret = platform_driver_register(&xroe_traffic_gen_driver);
+ return ret;
+}
+
+/**
+ * xroe_traffic_gen_exit - Destroys the driver
+ *
+ * Unregisters the traffic gen driver
+ */
+static void __exit xroe_traffic_gen_exit(void)
+{
+ platform_driver_unregister(&xroe_traffic_gen_driver);
+ pr_debug("XROE traffic generator driver exit\n");
+}
+
+static const struct of_device_id xroe_traffic_gen_of_match[] = {
+ { .compatible = "xlnx,roe-traffic-gen-1.0", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, xroe_traffic_gen_of_match);
+
+static struct platform_driver xroe_traffic_gen_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xroe_traffic_gen_of_match,
+ },
+ .probe = xroe_traffic_gen_probe,
+ .remove = xroe_traffic_gen_remove,
+};
+
+module_init(xroe_traffic_gen_init);
+module_exit(xroe_traffic_gen_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("Xilinx Radio over Ethernet Traffic Generator driver");
diff --git a/drivers/staging/xroetrafficgen/xroe-traffic-gen.h b/drivers/staging/xroetrafficgen/xroe-traffic-gen.h
new file mode 100644
index 000000000000..55d968d89e10
--- /dev/null
+++ b/drivers/staging/xroetrafficgen/xroe-traffic-gen.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Vasileios Bimpikas <vasileios.bimpikas@xilinx.com>
+ */
+
+struct xroe_traffic_gen_local {
+ void __iomem *base_addr;
+};
+
+enum { XROE_SIZE_MAX = 15 };
+
+int xroe_traffic_gen_sysfs_init(struct device *dev);
+void xroe_traffic_gen_sysfs_exit(struct device *dev);