aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/xlnx_ernic
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/xlnx_ernic')
-rw-r--r--drivers/staging/xlnx_ernic/Kconfig4
-rw-r--r--drivers/staging/xlnx_ernic/MAINTAINERS4
-rw-r--r--drivers/staging/xlnx_ernic/Makefile7
-rw-r--r--drivers/staging/xlnx_ernic/dt-binding.txt29
-rw-r--r--drivers/staging/xlnx_ernic/xcm.c1962
-rw-r--r--drivers/staging/xlnx_ernic/xcm.h170
-rw-r--r--drivers/staging/xlnx_ernic/xcommon.h73
-rw-r--r--drivers/staging/xlnx_ernic/xernic_bw_test.c482
-rw-r--r--drivers/staging/xlnx_ernic/xhw_config.h26
-rw-r--r--drivers/staging/xlnx_ernic/xhw_def.h641
-rw-r--r--drivers/staging/xlnx_ernic/xif.h239
-rw-r--r--drivers/staging/xlnx_ernic/xioctl.h24
-rw-r--r--drivers/staging/xlnx_ernic/xmain.c1592
-rw-r--r--drivers/staging/xlnx_ernic/xmain.h33
-rw-r--r--drivers/staging/xlnx_ernic/xmr.c413
-rw-r--r--drivers/staging/xlnx_ernic/xmr.h68
-rw-r--r--drivers/staging/xlnx_ernic/xperftest.h33
-rw-r--r--drivers/staging/xlnx_ernic/xqp.c1310
-rw-r--r--drivers/staging/xlnx_ernic/xqp.h114
-rw-r--r--drivers/staging/xlnx_ernic/xrocev2.h409
20 files changed, 7633 insertions, 0 deletions
diff --git a/drivers/staging/xlnx_ernic/Kconfig b/drivers/staging/xlnx_ernic/Kconfig
new file mode 100644
index 000000000000..2d83fea0f3b9
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/Kconfig
@@ -0,0 +1,4 @@
+config ERNIC
+ tristate "Xilinx ERNIC driver"
+ help
+ Driver for the XILINX Embedded Remote DMA(RDMA) Enabled NIC.
diff --git a/drivers/staging/xlnx_ernic/MAINTAINERS b/drivers/staging/xlnx_ernic/MAINTAINERS
new file mode 100644
index 000000000000..0355f5d3320f
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/MAINTAINERS
@@ -0,0 +1,4 @@
+XILINX EMBEDDED REMOTE DMA ENABLED NIC
+M: Sandeep Dhanvada <sandeep.dhanvada@xilinx.com>
+S: Maintained
+F: drivers/staging/xlnx_ernic
diff --git a/drivers/staging/xlnx_ernic/Makefile b/drivers/staging/xlnx_ernic/Makefile
new file mode 100644
index 000000000000..564933fa42d7
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/Makefile
@@ -0,0 +1,7 @@
+#TODO: Need to remove these flags and fix compilation warnings.
+ccflags-y := -Wno-incompatible-pointer-types -Wno-packed-bitfield-compat
+
+obj-m += xernic.o
+obj-m += xernic_bw_test.o
+
+xernic-objs := xmain.o xcm.o xqp.o xmr.o
diff --git a/drivers/staging/xlnx_ernic/dt-binding.txt b/drivers/staging/xlnx_ernic/dt-binding.txt
new file mode 100644
index 000000000000..2a9d098125b7
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/dt-binding.txt
@@ -0,0 +1,29 @@
+Xilinx Embedded RDMA NIC (ERNIC)
+--------------------------------
+
+The Xilinx Embedded Remote DMA(RDMA) NIC is an implementation of
+RDMA over Converged Ethernet (RoCEv2) enabled NIC functionality.
+
+Supported features by ERNIC are:
+1. both IPv4 and IPv6.
+2. 100 Gb/s data path.
+3. Incoming and outgoing RDMA READ, RDMA WRITE and RDMA SEND.
+
+Required properties:
+- compatible : Must contain "xlnx,ernic-1.0".
+- interrupts: Contains the interrupt line numbers.
+- reg: Physical base address and length of the registers set for the device.
+
+ernic_0: ernic@84000000 {
+ compatible = "xlnx,ernic-1.0";
+ interrupts = <4 2
+ 5 2
+ 6 2
+ 7 2
+ 8 2
+ 9 2
+ 10 2
+ 11 2
+ 12 2>;
+ reg = <0x84000000 0x40000>;
+};
diff --git a/drivers/staging/xlnx_ernic/xcm.c b/drivers/staging/xlnx_ernic/xcm.c
new file mode 100644
index 000000000000..64d102e540b4
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xcm.c
@@ -0,0 +1,1962 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#include "xcommon.h"
+
+unsigned int psn_num;
+unsigned int mad_tid = 0x11223344;
+/*****************************************************************************/
+
+/**
+ * xrnic_cm_prepare_mra() - Prepares Message Receipt Acknowledgment packet
+ * @qp_attr: qp info for which mra packet is prepared
+ * @msg : message being MRAed. 0x0- REQ, 0x1-REP, 0x2-LAP
+ * @rq_buf: Buffer to store the message
+ */
+static void xrnic_cm_prepare_mra(struct xrnic_qp_attr *qp_attr,
+ enum xrnic_msg_mra msg, void *rq_buf)
+{
+ struct mra *mra;
+ unsigned short temp;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ mra = (struct mra *)&send_sgl_temp_ipv4->mad.data;
+ temp = htons(MSG_RSP_ACK);
+ send_sgl_temp_ipv4->mad.attribute_id = temp;
+ } else {
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ mra = (struct mra *)&send_sgl_temp_ipv6->mad.data;
+ temp = htons(MSG_RSP_ACK);
+ send_sgl_temp_ipv6->mad.attribute_id = temp;
+ }
+ mra->local_cm_id = qp_attr->local_cm_id;
+ mra->remote_comm_id = qp_attr->remote_cm_id;
+ pr_info("[%d %s] remote_comm_id 0%x\n", __LINE__, __func__,
+ mra->remote_comm_id);
+ mra->message_mraed = msg;
+ mra->service_timeout = XRNIC_MRA_SERVICE_TIMEOUT;
+ /*4.096 ìS*2 Service Timeout*/
+
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_prepare_rep() - Prepares Reply packet
+ * @qp_attr: qp info for which reply packet is prepared
+ * @rq_buf: Buffer to store the data indicating the acceptance
+ */
+static void xrnic_cm_prepare_rep(struct xrnic_qp_attr *qp_attr, void *rq_buf)
+{
+ struct rdma_qp_attr *rdma_qp_attr = (struct rdma_qp_attr *)
+ &((struct xrnic_reg_map *)xrnic_dev->xrnic_mmap.xrnic_regs)
+ ->rdma_qp_attr[qp_attr->qp_num - 2];
+ struct ethhdr_t *eth_hdr;
+ struct ipv4hdr *ipv4 = NULL;
+ struct ipv6hdr *ipv6 = NULL;
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4 = NULL;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6 = NULL;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+ struct rep *rep;
+ struct req *req;
+ unsigned short temp;
+ unsigned char rq_opcode;
+ unsigned int config_value, start_psn_value;
+ struct xrnic_rdma_cm_id *cm_id = qp_attr->cm_id;
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ rep = (struct rep *)&send_sgl_temp_ipv4->mad.data;
+ eth_hdr = (struct ethhdr_t *)(recv_qp_pkt_ipv4);
+ ipv4 = (struct ipv4hdr *)
+ ((char *)recv_qp_pkt_ipv4 + XRNIC_ETH_HLEN);
+ req = (struct req *)&recv_qp_pkt_ipv4->mad.data;
+ temp = htons(CONNECT_REPLY);
+ send_sgl_temp_ipv4->mad.attribute_id = temp;
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ rep = (struct rep *)&send_sgl_temp_ipv6->mad.data;
+ eth_hdr = (struct ethhdr_t *)(recv_qp_pkt_ipv6);
+ ipv6 = (struct ipv6hdr *)
+ ((char *)recv_qp_pkt_ipv6 + XRNIC_ETH_HLEN);
+ req = (struct req *)&recv_qp_pkt_ipv6->mad.data;
+ temp = htons(CONNECT_REPLY);
+ send_sgl_temp_ipv6->mad.attribute_id = temp;
+ }
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ DEBUG_LOG("qp_num:%x\n", qp_attr->qp_num);
+
+ rep->local_cm_id = qp_attr->local_cm_id;
+ rep->remote_comm_id = qp_attr->remote_cm_id;
+
+ rep->local_qpn = ((qp_attr->qp_num >> 16) & 0xFF) |
+ (((qp_attr->qp_num >> 8) & 0xFF) << 8) |
+ ((qp_attr->qp_num & 0xFF) << 16);
+ DEBUG_LOG("local_qpn %d qp_num %d\n",
+ rep->local_qpn, qp_attr->qp_num);
+
+ memcpy((void *)rep->private_data,
+ (void *)&cm_id->conn_param.private_data,
+ cm_id->conn_param.private_data_len);
+
+ DEBUG_LOG("cm_id->conn_param.private_data_len %d\n",
+ cm_id->conn_param.private_data_len);
+ DEBUG_LOG("cm_id->conn_param.responder_resources %d\n",
+ cm_id->conn_param.responder_resources);
+ DEBUG_LOG("cm_id->conn_param.initiator_depth %d\n",
+ cm_id->conn_param.initiator_depth);
+ DEBUG_LOG("cm_id->conn_param.flow_control %d\n",
+ cm_id->conn_param.flow_control);
+ DEBUG_LOG("cm_id->conn_param.retry_count %d\n",
+ cm_id->conn_param.retry_count);
+ DEBUG_LOG("cm_id->conn_param.rnr_retry_count %d\n",
+ cm_id->conn_param.rnr_retry_count);
+
+ /*Inititator depth not rquired for Target.*/
+ rep->initiator_depth = cm_id->conn_param.initiator_depth;
+ rep->responder_resources = cm_id->conn_param.responder_resources;
+ rep->end_end_flow_control = cm_id->conn_param.flow_control;
+ rep->rnr_retry_count = cm_id->conn_param.rnr_retry_count;
+ rep->target_ack_delay = XRNIC_REP_TARGET_ACK_DELAY;
+ rep->fail_over_accepted = XRNIC_REP_FAIL_OVER_ACCEPTED;
+
+ DEBUG_LOG("req->initiator_depth %x\n", rep->initiator_depth);
+ DEBUG_LOG("rep->responder_resources %x\n", rep->responder_resources);
+
+ rep->sqr = XRNIC_REQ_SRQ;
+ rep->local_ca_guid[0] = 0x7c;
+ rep->local_ca_guid[1] = 0xfe;
+ rep->local_ca_guid[2] = 0x90;
+ rep->local_ca_guid[3] = 0x03;
+ rep->local_ca_guid[4] = 0x00;
+ rep->local_ca_guid[5] = 0xb8;
+ rep->local_ca_guid[6] = 0x57;
+ rep->local_ca_guid[7] = 0x70;
+
+ qp_attr->remote_qpn = req->local_qpn;
+
+ DEBUG_LOG("local_qpn [0x%x] [%d]\n", req->local_qpn,
+ ntohl(req->local_qpn));
+ config_value = ((req->local_qpn & 0xFF) << 16)
+ | (((req->local_qpn >> 8) & 0xFF) << 8)
+ | ((req->local_qpn >> 16) & 0xFF);
+
+ pr_info("config_value:%d req->local_qpn %d qp_attr->remote_qpn %d\n",
+ config_value, req->local_qpn, qp_attr->remote_qpn);
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->dest_qp_conf)));
+
+ /* Set the MAC address */
+ config_value = eth_hdr->h_source[5] | (eth_hdr->h_source[4] << 8) |
+ (eth_hdr->h_source[3] << 16) |
+ (eth_hdr->h_source[2] << 24);
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->mac_dest_addr_lsb)));
+ DEBUG_LOG("mac_xrnic_src_addr_lsb->0x%x\n", config_value);
+
+ config_value = eth_hdr->h_source[1] | (eth_hdr->h_source[0] << 8);
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->mac_dest_addr_msb)));
+ DEBUG_LOG("mac_xrnic_src_addr_msb->0x%x\n", config_value);
+
+ config_value = 0;
+ DEBUG_LOG("req->start_psn:%x %x %x\n", req->start_psn[0],
+ req->start_psn[1], req->start_psn[2]);
+ config_value = (req->start_psn[2] | (req->start_psn[1] << 8) |
+ (req->start_psn[0] << 16));
+ DEBUG_LOG("req->start psn 0x%x\n", config_value);
+ start_psn_value = config_value;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_psn)));
+ memcpy(rep->start_psn, req->start_psn, 3);
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ config_value = ipv4->src_addr;
+ DEBUG_LOG("ipaddress:%x\n", config_value);
+ iowrite32(htonl(config_value),
+ ((void *)(&rdma_qp_attr->ip_dest_addr1)));
+ config_value = ioread32((void *)&rdma_qp_attr->ip_dest_addr1);
+ DEBUG_LOG("read ipaddress:%x\n", config_value);
+ } else {
+ config_value = ipv6->saddr.in6_u.u6_addr32[3];
+ DEBUG_LOG("ipaddress1:%x\n", config_value);
+ iowrite32(htonl(config_value),
+ ((void *)(&rdma_qp_attr->ip_dest_addr1)));
+ config_value = ipv6->saddr.in6_u.u6_addr32[2];
+ DEBUG_LOG("ipaddress:%x\n", config_value);
+ iowrite32(htonl(config_value),
+ ((void *)(&rdma_qp_attr->ip_dest_addr2)));
+ config_value = ipv6->saddr.in6_u.u6_addr32[1];
+ DEBUG_LOG("ipaddress:%x\n", config_value);
+ iowrite32(htonl(config_value),
+ ((void *)(&rdma_qp_attr->ip_dest_addr3)));
+ config_value = ipv6->saddr.in6_u.u6_addr32[0];
+ DEBUG_LOG("ipaddress:%x\n", config_value);
+ iowrite32(htonl(config_value),
+ ((void *)(&rdma_qp_attr->ip_dest_addr4)));
+ config_value = ioread32((void *)&rdma_qp_attr->qp_conf);
+ config_value = config_value | XRNIC_QP_CONFIG_IPV6_EN;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+ DEBUG_LOG("read ipaddress:%x\n", config_value);
+ }
+ rq_opcode = XRNIC_RDMA_READ;
+ config_value = ((start_psn_value - 1) | (rq_opcode << 24));
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->last_rq_req)));
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_prepare_rej() - Prepares Reject packet
+ * @qp_attr: qp info for which reply packet is prepared
+ * @reason: reason for the rejection
+ * @msg: message whose contents cause sendor to reject communication
+ * 0x0-REQ, 0x1-REP, 0x2-No message
+ */
+void xrnic_cm_prepare_rej(struct xrnic_qp_attr *qp_attr,
+ enum xrnic_rej_reason reason, enum xrnic_msg_rej msg)
+{
+ struct rej *rej;
+ unsigned short temp;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ if (qp_attr->ip_addr_type == AF_INET) {
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ rej = (struct rej *)&send_sgl_temp_ipv4->mad.data;
+ temp = htons(CONNECT_REJECT);
+ send_sgl_temp_ipv4->mad.attribute_id = temp;
+ } else {
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ rej = (struct rej *)&send_sgl_temp_ipv6->mad.data;
+ temp = htons(CONNECT_REJECT);
+ send_sgl_temp_ipv6->mad.attribute_id = temp;
+ }
+ pr_info("Sending rej\n");
+
+ rej->local_cm_id = qp_attr->local_cm_id;
+ rej->remote_comm_id = qp_attr->remote_cm_id;
+ rej->message_rejected = msg;
+ rej->reason = htons(reason);
+ rej->reject_info_length = XRNIC_REJ_INFO_LEN;
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_prepare_initial_headers() - Retrieves information from the response
+ * @qp_attr: qp info on which the response is sent
+ * @rq_buf: receive queue buffer
+ */
+void xrnic_prepare_initial_headers(struct xrnic_qp_attr *qp_attr, void *rq_buf)
+{
+ struct mad *mad;
+ unsigned char temp;
+ struct ethhdr_t *eth_hdr;
+ struct ipv4hdr *ipv4;
+ struct ipv6hdr *ipv6;
+ struct udphdr *udp;
+ struct bth *bthp;
+ struct deth *dethp;
+ unsigned short *ipv4_hdr_ptr;
+ unsigned int ipv4_hdr_chksum;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+ int i;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ eth_hdr = (struct ethhdr_t *)(recv_qp_pkt_ipv4);
+ ipv4 = (struct ipv4hdr *)
+ ((char *)recv_qp_pkt_ipv4 + XRNIC_ETH_HLEN);
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ /* In the ethernet header swap source and desitnation MAC */
+ memcpy(send_sgl_temp_ipv4->eth.h_source,
+ eth_hdr->h_dest, XRNIC_ETH_ALEN);
+ memcpy(send_sgl_temp_ipv4->eth.h_dest,
+ eth_hdr->h_source, XRNIC_ETH_ALEN);
+ /* Copy the ethernet type field */
+ send_sgl_temp_ipv4->eth.eth_type = eth_hdr->eth_type;
+
+ /* In the IP header swap source IP and desitnation IP */
+ memcpy(&send_sgl_temp_ipv4->ipv4, ipv4,
+ sizeof(struct ipv4hdr));
+ send_sgl_temp_ipv4->ipv4.dest_addr = ipv4->src_addr;
+ send_sgl_temp_ipv4->ipv4.src_addr = ipv4->dest_addr;
+ ipv4->total_length = (sizeof(struct ipv4hdr) +
+ sizeof(struct udphdr) + sizeof(struct bth) +
+ sizeof(struct deth) + sizeof(struct mad)) + 4;
+ DEBUG_LOG("ipv4->total_length:%d\n", ipv4->total_length);
+ DEBUG_LOG("ipv4 length:%d\n", sizeof(struct ipv4hdr));
+ DEBUG_LOG("udp length:%d\n", sizeof(struct udphdr));
+ DEBUG_LOG("ethhdr length:%d\n", sizeof(struct ethhdr_t));
+ DEBUG_LOG("bth length:%d\n", sizeof(struct bth));
+ DEBUG_LOG("deth length:%d\n", sizeof(struct deth));
+
+ send_sgl_temp_ipv4->ipv4.total_length =
+ htons(ipv4->total_length);
+ send_sgl_temp_ipv4->ipv4.hdr_chksum = 0;
+ send_sgl_temp_ipv4->ipv4.id = ipv4->id;
+
+ ipv4_hdr_ptr = (unsigned short *)
+ (&send_sgl_temp_ipv4->ipv4);
+ ipv4_hdr_chksum = 0;
+
+ for (i = 0; i < 10; i++) {
+ ipv4_hdr_chksum += *ipv4_hdr_ptr;
+ ipv4_hdr_ptr++;
+ }
+
+ ipv4_hdr_chksum = ~((ipv4_hdr_chksum & 0x0000FFFF) +
+ (ipv4_hdr_chksum >> 16));
+ send_sgl_temp_ipv4->ipv4.hdr_chksum = ipv4_hdr_chksum;
+ DEBUG_LOG("check sum :%x\n", ipv4_hdr_chksum);
+ udp = (struct udphdr *)((char *)recv_qp_pkt_ipv4 +
+ XRNIC_ETH_HLEN + sizeof(struct ipv4hdr));
+ /* Copy the UDP packets and update length field */
+ send_sgl_temp_ipv4->udp.source = udp->source;
+ send_sgl_temp_ipv4->udp.dest = udp->dest;
+ udp->len = sizeof(struct udphdr) + sizeof(struct bth) +
+ sizeof(struct deth) + sizeof(struct mad) +
+ XRNIC_ICRC_SIZE;
+ DEBUG_LOG("udp total_length:%x\n", udp->len);
+ DEBUG_LOG("mad size:%d\n", sizeof(struct mad));
+ send_sgl_temp_ipv4->udp.len = htons(udp->len);
+ udp->check = 0;
+ send_sgl_temp_ipv4->udp.check = htons(udp->check);
+
+ /* Base Transport header setings */
+ bthp = (struct bth *)((char *)udp + sizeof(struct udphdr));
+
+ /* Fill bth fields */
+ send_sgl_temp_ipv4->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+ send_sgl_temp_ipv4->bth.solicited_event =
+ XRNIC_SET_SOLICT_EVENT;
+ send_sgl_temp_ipv4->bth.migration_req =
+ XRNIC_MIGRATION_REQ;
+ send_sgl_temp_ipv4->bth.pad_count = XRNIC_PAD_COUNT;
+ send_sgl_temp_ipv4->bth.transport_hdr_ver =
+ XRNIC_TRANSPORT_HDR_VER;
+ DEBUG_LOG("bth transport hdr ver:%x\n",
+ bthp->transport_hdr_ver);
+ send_sgl_temp_ipv4->bth.transport_hdr_ver =
+ bthp->transport_hdr_ver;
+ send_sgl_temp_ipv4->bth.destination_qp[0] = 0;
+ send_sgl_temp_ipv4->bth.destination_qp[1] = 0;
+ send_sgl_temp_ipv4->bth.destination_qp[2] =
+ XRNIC_DESTINATION_QP;
+ send_sgl_temp_ipv4->bth.reserved1 = XRNIC_RESERVED1;
+ send_sgl_temp_ipv4->bth.ack_request = XRNIC_ACK_REQ;
+ send_sgl_temp_ipv4->bth.reserved2 = XRNIC_RESERVED2;
+ send_sgl_temp_ipv4->bth.pkt_seq_num = 1;
+ send_sgl_temp_ipv4->bth.partition_key = 65535;
+
+ /* DETH setings */
+ dethp = (struct deth *)((char *)bthp + sizeof(struct bth));
+ send_sgl_temp_ipv4->deth.q_key = dethp->q_key;
+ send_sgl_temp_ipv4->deth.reserved = XRNIC_DETH_RESERVED;
+ send_sgl_temp_ipv4->deth.src_qp = dethp->src_qp;
+
+ /* MAD setings */
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ send_sgl_temp_ipv4->mad.base_ver = XRNIC_MAD_BASE_VER;
+ send_sgl_temp_ipv4->mad.class_version = 2;
+ DEBUG_LOG("class:%x\n", send_sgl_temp_ipv4->mad.class_version);
+ send_sgl_temp_ipv4->mad.mgmt_class = XRNIC_MAD_MGMT_CLASS;
+ temp = (XRNIC_MAD_RESP_BIT << 7) | XRNIC_MAD_COMM_SEND;
+ send_sgl_temp_ipv4->mad.resp_bit_method = temp;
+ DEBUG_LOG("mad method:%x\n",
+ send_sgl_temp_ipv4->mad.resp_bit_method);
+ send_sgl_temp_ipv4->mad.reserved = XRNIC_MAD_RESERVED;
+ send_sgl_temp_ipv4->mad.transaction_id = mad->transaction_id;
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ eth_hdr = (struct ethhdr_t *)(recv_qp_pkt_ipv6);
+ ipv6 = (struct ipv6hdr *)
+ ((char *)recv_qp_pkt_ipv6 + XRNIC_ETH_HLEN);
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ /* In the ethernet header swap source and desitnation MAC */
+ memcpy(send_sgl_temp_ipv6->eth.h_source,
+ eth_hdr->h_dest, XRNIC_ETH_ALEN);
+ memcpy(send_sgl_temp_ipv6->eth.h_dest,
+ eth_hdr->h_source, XRNIC_ETH_ALEN);
+ send_sgl_temp_ipv6->eth.eth_type = eth_hdr->eth_type;
+ memcpy(&send_sgl_temp_ipv6->ipv6, ipv6,
+ sizeof(struct ipv6hdr));
+ /* In the ethernet header swap source IP and desitnation IP */
+ memcpy(&send_sgl_temp_ipv6->ipv6.daddr, &ipv6->saddr,
+ sizeof(struct in6_addr));
+ memcpy(&send_sgl_temp_ipv6->ipv6.saddr, &ipv6->daddr,
+ sizeof(struct in6_addr));
+ udp = (struct udphdr *)((char *)recv_qp_pkt_ipv6 +
+ XRNIC_ETH_HLEN + sizeof(struct ipv6hdr));
+ /* Copy the UDP packets and update length field */
+ send_sgl_temp_ipv6->udp.source = udp->source;
+ send_sgl_temp_ipv6->udp.dest = udp->dest;
+ udp->len = sizeof(struct udphdr) + sizeof(struct bth) +
+ sizeof(struct deth) + sizeof(struct mad) +
+ XRNIC_ICRC_SIZE;
+ DEBUG_LOG("udp total_length:%x\n", udp->len);
+ DEBUG_LOG("mad size:%d\n", sizeof(struct mad));
+ send_sgl_temp_ipv6->udp.len = htons(udp->len);
+ udp->check = 0;
+ send_sgl_temp_ipv6->udp.check = htons(udp->check);
+
+ /* Base Transport header setings */
+ bthp = (struct bth *)((char *)udp + sizeof(struct udphdr));
+
+ /* Fill bth fields */
+ send_sgl_temp_ipv6->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+ send_sgl_temp_ipv6->bth.solicited_event =
+ XRNIC_SET_SOLICT_EVENT;
+ send_sgl_temp_ipv6->bth.migration_req = XRNIC_MIGRATION_REQ;
+ send_sgl_temp_ipv6->bth.pad_count = XRNIC_PAD_COUNT;
+ send_sgl_temp_ipv6->bth.transport_hdr_ver =
+ XRNIC_TRANSPORT_HDR_VER;
+ DEBUG_LOG("bth transport_hdr_ver:%x\n",
+ bthp->transport_hdr_ver);
+ send_sgl_temp_ipv6->bth.transport_hdr_ver =
+ bthp->transport_hdr_ver;
+ send_sgl_temp_ipv6->bth.destination_qp[0] = 0;
+ send_sgl_temp_ipv6->bth.destination_qp[1] = 0;
+ send_sgl_temp_ipv6->bth.destination_qp[2] =
+ XRNIC_DESTINATION_QP;
+ send_sgl_temp_ipv6->bth.reserved1 = XRNIC_RESERVED1;
+ send_sgl_temp_ipv6->bth.ack_request = XRNIC_ACK_REQ;
+ send_sgl_temp_ipv6->bth.reserved2 = XRNIC_RESERVED2;
+ send_sgl_temp_ipv6->bth.pkt_seq_num = 1;
+ send_sgl_temp_ipv6->bth.partition_key = 65535;
+
+ /* DETH setings */
+ dethp = (struct deth *)((char *)bthp + sizeof(struct bth));
+ send_sgl_temp_ipv6->deth.q_key = dethp->q_key;
+ send_sgl_temp_ipv6->deth.reserved = XRNIC_DETH_RESERVED;
+ send_sgl_temp_ipv6->deth.src_qp = dethp->src_qp;
+
+ /* MAD setings */
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ send_sgl_temp_ipv6->mad.base_ver = XRNIC_MAD_BASE_VER;
+ send_sgl_temp_ipv6->mad.class_version = 2;
+ DEBUG_LOG("class:%x\n", send_sgl_temp_ipv6->mad.class_version);
+ send_sgl_temp_ipv6->mad.mgmt_class = XRNIC_MAD_MGMT_CLASS;
+ temp = (XRNIC_MAD_RESP_BIT << 7) | XRNIC_MAD_COMM_SEND;
+ send_sgl_temp_ipv6->mad.resp_bit_method = temp;
+ DEBUG_LOG("mad method:%x\n",
+ send_sgl_temp_ipv6->mad.resp_bit_method);
+ send_sgl_temp_ipv6->mad.reserved = XRNIC_MAD_RESERVED;
+ send_sgl_temp_ipv6->mad.transaction_id = mad->transaction_id;
+ }
+
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_prepare_dreq() - Prepares Disconnection Request Packet
+ * @qp_attr: qp info to be released
+ */
+static void xrnic_cm_prepare_dreq(struct xrnic_qp_attr *qp_attr)
+{
+ struct dreq *dreq;
+ unsigned short temp;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ dreq = (struct dreq *)&send_sgl_temp_ipv4->mad.data;
+ temp = htons(DISCONNECT_REQUEST);
+ send_sgl_temp_ipv4->mad.attribute_id = temp;
+ } else {
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ dreq = (struct dreq *)&send_sgl_temp_ipv6->mad.data;
+ temp = htons(DISCONNECT_REQUEST);
+ send_sgl_temp_ipv6->mad.attribute_id = temp;
+ }
+ dreq->local_cm_id = qp_attr->local_cm_id;
+ dreq->remote_comm_id = qp_attr->remote_cm_id;
+ dreq->remote_qpn_eecn = qp_attr->remote_qpn;
+
+ DEBUG_LOG("Exiting %s %d %d\n",
+ __func__, qp_attr->remote_qpn, dreq->remote_qpn_eecn);
+}
+
+/**
+ * xrnic_cm_disconnect_send_handler() - Sends Disconnection Request and frees
+ * all the attributes related to the qp
+ * @qp_attr: qp info to be released by dreq
+ */
+void xrnic_cm_disconnect_send_handler(struct xrnic_qp_attr *qp_attr)
+{
+ int qp1_send_pkt_size;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ if (qp_attr->ip_addr_type == AF_INET)
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv4);
+ else
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv6);
+
+ xrnic_cm_prepare_dreq(qp_attr);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_DREQ_SENT;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_prepare_drep() - Prepares disconnect reply packet
+ * @qp_attr: qp info for which drep packet is prepared
+ * @rq_buf: receive queue buffer
+ */
+static void xrnic_cm_prepare_drep(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ struct drep *drep;
+ unsigned short temp;
+ struct qp_cm_pkt_hdr_ipv4 *send_sgl_temp_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *send_sgl_temp_ipv6;
+
+ DEBUG_LOG("Enteing %s\n", __func__);
+ if (qp_attr->ip_addr_type == AF_INET) {
+ send_sgl_temp_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)
+ &qp_attr->send_sgl_temp;
+ drep = (struct drep *)&send_sgl_temp_ipv4->mad.data;
+ temp = htons(DISCONNECT_REPLY);
+ send_sgl_temp_ipv4->mad.attribute_id = temp;
+ } else {
+ send_sgl_temp_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)
+ &qp_attr->send_sgl_temp;
+ drep = (struct drep *)&send_sgl_temp_ipv6->mad.data;
+ temp = htons(DISCONNECT_REPLY);
+ send_sgl_temp_ipv6->mad.attribute_id = temp;
+ }
+ drep->local_cm_id = qp_attr->local_cm_id;
+ drep->remote_comm_id = qp_attr->remote_cm_id;
+
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_disconnect_request_handler() - Handles Disconnection Request.
+ * @qp_attr: qp info on which the reply is to be sent
+ * @rq_buf: receive queue buffer
+ */
+static void xrnic_cm_disconnect_request_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ int qp1_send_pkt_size;
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+
+ DEBUG_LOG("Entering %s qp_num %d\n", __func__, qp_attr->qp_num);
+ if (qp_attr->cm_id) {
+ DEBUG_LOG("cm id is not clean qp_num %d\n", qp_attr->qp_num);
+ cm_id_info = qp_attr->cm_id->cm_id_info;
+ cm_id_info->conn_event_info.cm_event = XRNIC_DREQ_RCVD;
+ cm_id_info->conn_event_info.status = 0;
+ cm_id_info->conn_event_info.private_data_len = 0;
+ cm_id_info->conn_event_info.private_data = NULL;
+ qp_attr->cm_id->xrnic_cm_handler(qp_attr->cm_id,
+ &cm_id_info->conn_event_info);
+ qp_attr->cm_id = NULL;
+ } else {
+ pr_err("CM ID is NULL\n");
+ }
+ if (qp_attr->ip_addr_type == AF_INET)
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv4);
+ else
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv6);
+ qp_attr->curr_state = XRNIC_DREQ_RCVD;
+ xrnic_cm_prepare_drep(qp_attr, rq_buf);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+
+ qp_attr->curr_state = XRNIC_TIMEWAIT;
+ qp_attr->resend_count = 0;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_disconnect_reply_handler() - Handles disconnect reply packets.
+ * @qp_attr: qp info of which qp to be destroyed
+ * @rq_buf: receive queue buffer
+ */
+static void xrnic_cm_disconnect_reply_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ DEBUG_LOG("Entering %s\n", __func__);
+ qp_attr->curr_state = XRNIC_DREQ_RCVD;
+ /*Call back to nvmeof. */
+
+ /*TBD: Need to Change state while handling with Rimer.*/
+ qp_attr->curr_state = XRNIC_TIMEWAIT;
+ qp_attr->resend_count = 0;
+
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_connect_reject_handler() - Handles connect reject packets.
+ * @qp_attr: qp info
+ * @rq_buf: receive queue buffer
+ */
+static void xrnic_cm_connect_reject_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+ struct mad *mad;
+ struct rej *rej;
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ rej = (struct rej *)&mad->data;
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ rej = (struct rej *)&mad->data;
+ }
+
+ if (rej->message_rejected == XRNIC_REJ_REP ||
+ rej->message_rejected == XRNIC_REJ_REQ ||
+ rej->message_rejected == XRNIC_REJ_OTHERS) {
+ qp_attr->resend_count = 0;
+ qp_attr->remote_cm_id = 0;
+ qp_attr->cm_id = NULL;
+ xrnic_reset_io_qp(qp_attr);
+ memset((void *)&qp_attr->mac_addr, 0, XRNIC_ETH_ALEN);
+ qp_attr->ip_addr_type = 0;
+ xrnic_qp_app_configuration(qp_attr->qp_num,
+ XRNIC_HW_QP_DISABLE);
+ qp_attr->curr_state = XRNIC_LISTEN;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ if (qp_attr->cm_id) {
+ cm_id_info = qp_attr->cm_id->cm_id_info;
+ cm_id_info->conn_event_info.cm_event = XRNIC_REJ_RECV;
+ cm_id_info->conn_event_info.status = 0;
+ cm_id_info->conn_event_info.private_data_len = 0;
+ cm_id_info->conn_event_info.private_data = NULL;
+ qp_attr->cm_id->xrnic_cm_handler(qp_attr->cm_id,
+ &cm_id_info->conn_event_info);
+ } else {
+ pr_err("%s CM_ID is NULL\n", __func__);
+ }
+ }
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_msg_rsp_ack_handler() - Handles message response packets.
+ * @qp_attr: qp info
+ * @rq_buf: receive queue buffer
+ */
+void xrnic_cm_msg_rsp_ack_handler(struct xrnic_qp_attr *qp_attr, void *rq_buf)
+{
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+ struct mad *mad;
+ struct mra *mra;
+
+ DEBUG_LOG("Enter ing %s\n", __func__);
+ if (qp_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ mra = (struct mra *)&mad->data;
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ mra = (struct mra *)&mad->data;
+ }
+
+ if (mra->message_mraed == XRNIC_MRA_REP) {
+ qp_attr->curr_state = XRNIC_MRA_RCVD;
+ qp_attr->resend_count = 0;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ }
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_connect_rep_handler() - handles connect reply packets
+ * @qp_attr : qp info
+ * @rq_buf : receive queue buffer
+ */
+static void xrnic_cm_connect_rep_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_REP_RCVD;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ if (qp_attr->cm_id) {
+ cm_id_info = qp_attr->cm_id->cm_id_info;
+ cm_id_info->conn_event_info.cm_event = XRNIC_REP_RCVD;
+ cm_id_info->conn_event_info.status = 0;
+ cm_id_info->conn_event_info.private_data_len = 0;
+ cm_id_info->conn_event_info.private_data = NULL;
+ qp_attr->cm_id->xrnic_cm_handler(qp_attr->cm_id,
+ &cm_id_info->conn_event_info);
+ } else {
+ pr_err("%s CM_ID is NULL\n", __func__);
+ }
+ pr_info("Connection Established Local QPn=%#x\n", qp_attr->qp_num);
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_cm_ready_to_use_handler() - handles ready to use packets
+ * @qp_attr : qp info
+ * @rq_buf : receive queue buffer
+ */
+static void xrnic_cm_ready_to_use_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_ESTABLISHD;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ if (qp_attr->cm_id) {
+ cm_id_info = qp_attr->cm_id->cm_id_info;
+ cm_id_info->conn_event_info.cm_event = XRNIC_ESTABLISHD;
+ cm_id_info->conn_event_info.status = 0;
+ cm_id_info->conn_event_info.private_data_len = 0;
+ cm_id_info->conn_event_info.private_data = NULL;
+ qp_attr->cm_id->xrnic_cm_handler(qp_attr->cm_id,
+ &cm_id_info->conn_event_info);
+ } else {
+ pr_err("%s CM_ID is NULL\n", __func__);
+ }
+ pr_info("Connection Established Local QPn=%x\n", qp_attr->qp_num);
+ DEBUG_LOG("Exiting %s\n", __func__);
+}
+
+/**
+ * xrnic_create_child_cm() - creates child cm.
+ * @cm_id_info : to update child cm info after creation
+ */
+static void xrnic_create_child_cm(struct xrnic_rdma_cm_id_info *cm_id_info)
+{
+ struct xrnic_rdma_cm_id *ch_cm;
+
+ ch_cm = kzalloc(sizeof(*ch_cm), GFP_ATOMIC);
+ cm_id_info->child_cm_id = ch_cm;
+}
+
+/**
+ * xrnic_cm_connect_request_handler() - handles connect request packets.
+ * @qp_attr : qp info
+ * @rq_buf : receive queue buffer
+ */
+static void xrnic_cm_connect_request_handler(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf)
+{
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4 = NULL;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6 = NULL;
+ struct mad *mad = NULL;
+ struct req *req = NULL;
+ int qp1_send_pkt_size, child_qp_num, status;
+ enum xrnic_rej_reason reason = XRNIC_REJ_CONSUMER_REJECT;
+ enum xrnic_msg_rej msg_rej;
+ enum xrnic_msg_mra msg_mra;
+ u16 port_num;
+ void *temp;
+ struct xrnic_rdma_cm_id *child_cm_id;
+ struct xrnic_rdma_cm_id *parent_cm_id;
+ struct xrnic_rdma_cm_id_info *child_cm_id_info;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+ if (qp_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ req = (struct req *)&mad->data;
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv4);
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ req = (struct req *)&mad->data;
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv6);
+ }
+
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_REQ_RCVD;
+
+ DEBUG_LOG("req-> local_cm_resp_tout:%x.\n", req->local_cm_resp_tout);
+ DEBUG_LOG("req-> path_packet_payload_mtu:%x.\n",
+ req->path_packet_payload_mtu);
+ if (req->remote_cm_resp_tout < XRNIC_REQ_REMOTE_CM_RESP_TOUT) {
+ pr_info("remote_cm_resp_tout:%x", req->remote_cm_resp_tout);
+
+ msg_mra = XRNIC_MRA_REQ;
+ xrnic_cm_prepare_mra(qp_attr, msg_mra, rq_buf);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr, qp1_send_pkt_size);
+ qp_attr->curr_state = XRNIC_MRA_SENT;
+ }
+
+ temp = (char *)&req->private_data;
+ temp += 36;
+ port_num = htons(req->service_id[6] | req->service_id[7] << 8);
+ DEBUG_LOG("req-> service_id[0]:%x.\n", req->service_id[0]);
+ DEBUG_LOG("req-> service_id[1]:%x.\n", req->service_id[1]);
+ DEBUG_LOG("req-> service_id[2]:%x.\n", req->service_id[2]);
+ DEBUG_LOG("req-> service_id[3]:%x.\n", req->service_id[3]);
+ DEBUG_LOG("req-> service_id[4]:%x.\n", req->service_id[4]);
+ DEBUG_LOG("req-> service_id[5]:%x.\n", req->service_id[5]);
+ DEBUG_LOG("req-> service_id[6]:%x.\n", req->service_id[6]);
+ DEBUG_LOG("req-> service_id[7]:%x.\n", req->service_id[7]);
+ DEBUG_LOG("req->port_num:%d,%x\n", port_num, port_num);
+
+ if (xrnic_dev->port_status[port_num - 1] == XRNIC_PORT_QP_FREE ||
+ port_num < 1 || port_num > XRNIC_MAX_PORT_SUPPORT) {
+ /*We need to validate that.*/
+ pr_err("PORT number is not correct sending rej.\n");
+ reason = XRNIC_REJ_PRIM_LID_PORT_NOT_EXIST;
+ msg_rej = XRNIC_REJ_REQ;
+ goto send_rep_rej;
+ }
+
+ xrnic_create_child_cm(xrnic_dev->cm_id_info[port_num - 1]);
+ child_qp_num =
+ xrnic_dev->cm_id_info[port_num - 1]->parent_cm_id.child_qp_num++;
+ child_cm_id = xrnic_dev->cm_id_info[port_num - 1]->child_cm_id;
+ parent_cm_id = &xrnic_dev->cm_id_info[port_num - 1]->parent_cm_id;
+ child_cm_id->cm_id_info = xrnic_dev->cm_id_info[port_num - 1];
+ child_cm_id->cm_context = parent_cm_id->cm_context;
+ child_cm_id->ps = parent_cm_id->ps;
+ child_cm_id->xrnic_cm_handler = parent_cm_id->xrnic_cm_handler;
+ child_cm_id->local_cm_id = qp_attr->local_cm_id;
+ child_cm_id->port_num = port_num;
+ child_cm_id->child_qp_num = child_qp_num + 1;
+ child_cm_id->qp_info.qp_num = qp_attr->qp_num;
+ child_cm_id->qp_status = XRNIC_PORT_QP_FREE;
+ child_cm_id_info = child_cm_id->cm_id_info;
+ child_cm_id_info->conn_event_info.cm_event = XRNIC_REQ_RCVD;
+ child_cm_id_info->conn_event_info.status = 0;
+ child_cm_id_info->conn_event_info.private_data = (void *)temp;
+ child_cm_id_info->conn_event_info.private_data_len = 32;
+ list_add_tail(&child_cm_id->list, &cm_id_list);
+ status = parent_cm_id->xrnic_cm_handler(child_cm_id,
+ &child_cm_id_info->conn_event_info);
+ if (status) {
+ pr_err("xrnic_cm_handler failed sending rej.\n");
+ reason = XRNIC_REJ_CONSUMER_REJECT;
+ msg_rej = XRNIC_REJ_REQ;
+ goto send_rep_rej;
+ }
+
+ qp_attr->remote_cm_id = req->local_cm_id;
+ qp_attr->cm_id = child_cm_id;
+
+ if (qp_attr->ip_addr_type == AF_INET) {
+ qp_attr->ipv4_addr = recv_qp_pkt_ipv4->ipv4.src_addr;
+ memcpy(&qp_attr->mac_addr,
+ &recv_qp_pkt_ipv4->eth.h_source, XRNIC_ETH_ALEN);
+ qp_attr->source_qp_num = recv_qp_pkt_ipv4->deth.src_qp;
+ } else {
+ memcpy(&qp_attr->ipv6_addr,
+ &recv_qp_pkt_ipv6->ipv6.saddr,
+ sizeof(struct in6_addr));
+ memcpy(&qp_attr->mac_addr,
+ &recv_qp_pkt_ipv6->eth.h_source, XRNIC_ETH_ALEN);
+ qp_attr->source_qp_num = recv_qp_pkt_ipv6->deth.src_qp;
+ }
+
+ xrnic_cm_prepare_rep(qp_attr, rq_buf);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr, qp1_send_pkt_size);
+
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_REP_SENT;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ DEBUG_LOG("Exiting %s\n", __func__);
+ return;
+send_rep_rej:
+
+ qp_attr->remote_cm_id = req->local_cm_id;
+
+ xrnic_cm_prepare_rej(qp_attr, msg_rej, reason);
+ /* Reject code added end */
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr, qp1_send_pkt_size);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp, qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_REJ_SENT;
+ if (timer_pending(&qp_attr->qp_timer))
+ del_timer_sync(&qp_attr->qp_timer);
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ DEBUG_LOG("Exiting %s with reject reason [%d]\n", __func__, reason);
+}
+
+/**
+ * fill_cm_rtu_data() - Fills rtu data to send rtu packet.
+ * @cm_id : CM ID
+ * @send_sgl_qp1 : data pointer
+ * @cm_req_size : total header size
+ * @return: send_sgl_qp1 data pointer
+ */
+static char *fill_cm_rtu_data(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size)
+{
+ struct cma_rtu *rtu_data;
+
+ SET_CM_HDR(send_sgl_qp1);
+ rtu_data = (struct cma_rtu *)send_sgl_qp1;
+ memset(rtu_data, 0, sizeof(*rtu_data));
+ rtu_data->local_comm_id = cm_id->local_cm_id;
+ rtu_data->remote_comm_id = cm_id->remote_cm_id;
+ return send_sgl_qp1;
+}
+
+/**
+ * fill_cm_req_data() - Fills request data to send in request packet.
+ * @cm_id : CM ID
+ * @send_sgl_qp1 : data pointer
+ * @cm_req_size : total header size
+ * @return: send_sgl_qp1 data pointer
+ */
+static char *fill_cm_req_data(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size)
+{
+ struct ernic_cm_req *cm_req;
+ struct cma_hdr data;
+ int val;
+ int sgid, dgid;
+ unsigned int psn;
+ struct sockaddr_in *sin4, *din4;
+
+ sin4 = (struct sockaddr_in *)&cm_id->route.s_addr;
+ din4 = (struct sockaddr_in *)&cm_id->route.d_addr;
+
+ SET_CM_HDR(send_sgl_qp1);
+ cm_req = (struct ernic_cm_req *)send_sgl_qp1;
+ memset(cm_req, 0, sizeof(*cm_req));
+
+ cm_req->local_comm_id = cpu_to_be32(cm_id->local_cm_id);
+ cm_req->service_id = cpu_to_be64((cm_id->ps << 16) |
+ be16_to_cpu(din4->sin_port));
+ ether_addr_copy(&cm_req->local_ca_guid, &cm_id->route.smac);
+ cm_req->local_qkey = 0;
+ cm_req->offset32 = cpu_to_be32((cm_id->local_cm_id << 8) |
+ cm_id->conn_param.responder_resources);
+ cm_req->offset36 = cpu_to_be32 (cm_id->conn_param.initiator_depth);
+
+ val = (XRNIC_REQ_LOCAL_CM_RESP_TOUT | (XRNIC_SVC_TYPE_UC << 5) |
+ (cm_id->conn_param.flow_control << 7));
+ cm_req->offset40 = cpu_to_be32(val);
+ get_random_bytes(&psn, 24);
+ psn &= 0xFFFFFF;
+ val = ((psn << 8) | XRNIC_REQ_REMOTE_CM_RESP_TOUT |
+ (cm_id->conn_param.retry_count << 5));
+ cm_req->offset44 = cpu_to_be32(val);
+ cm_id->qp_info.starting_psn = psn;
+
+ cm_req->pkey = 0xFFFF;
+ cm_req->offset50 = ((1 << 4) |
+ (cm_id->conn_param.rnr_retry_count << 5));
+ cm_req->offset51 = (1 << 4);
+ cm_req->local_lid = cpu_to_be16(0xFFFF);
+ cm_req->remote_lid = cpu_to_be16(0xFFFF);
+ sgid = sin4->sin_addr.s_addr;
+ dgid = din4->sin_addr.s_addr;
+ val = cpu_to_be32(0xFFFF);
+ memcpy(cm_req->local_gid.raw + 8, &val, 4);
+ memcpy(cm_req->local_gid.raw + 12, &sgid, 4);
+ memcpy(cm_req->remote_gid.raw + 8, &val, 4);
+ memcpy(cm_req->remote_gid.raw + 12, &dgid, 4);
+ cm_req->offset88 = cpu_to_be32(1 << 2);
+ cm_req->traffic_class = 0;
+ cm_req->hop_limit = 0x40;
+ cm_req->offset94 = 0;
+ cm_req->offset95 = 0x18;
+
+ data.cma_version = CMA_VERSION;
+ data.ip_version = (4 << 4);
+ data.port = din4->sin_port;
+ data.src_addr.ip4.addr = sin4->sin_addr.s_addr;
+ data.dst_addr.ip4.addr = din4->sin_addr.s_addr;
+ memcpy(cm_req->private_data, &data, sizeof(data));
+
+ return send_sgl_qp1;
+}
+
+/**
+ * fill_ipv4_cm_req() - fills cm request data for rdma connect.
+ * @cm_id : CM ID
+ * @send_sgl_qp1 : data pointer
+ * @cm_req_size : total header size
+ */
+void fill_ipv4_cm_req(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size)
+{
+ send_sgl_qp1 = fill_ipv4_headers(cm_id, send_sgl_qp1, cm_req_size);
+ send_sgl_qp1 = fill_mad_common_header(cm_id, send_sgl_qp1,
+ cm_req_size, CM_REQ_ATTR_ID);
+ send_sgl_qp1 = fill_cm_req_data(cm_id, send_sgl_qp1, cm_req_size);
+}
+
+/**
+ * xrnic_cm_send_rtu() - Sends Ready to use packet.
+ * @cm_id : CM ID
+ * @cm_rep : IPV4 mad data
+ */
+static void xrnic_cm_send_rtu(struct xrnic_rdma_cm_id *cm_id,
+ struct rep *cm_rep)
+{
+ int cm_req_size;
+ char *send_sgl_qp1, *head;
+
+ cm_req_size = sizeof(struct ethhdr) + sizeof(struct iphdr) +
+ sizeof(struct udphdr) + IB_BTH_BYTES + IB_DETH_BYTES +
+ sizeof(struct ib_mad_hdr) + sizeof(struct cma_rtu) +
+ EXTRA_PKT_LEN;
+
+ head = kmalloc(cm_req_size, GFP_ATOMIC);
+ send_sgl_qp1 = head;
+ send_sgl_qp1 = fill_ipv4_headers(cm_id, send_sgl_qp1, cm_req_size);
+ send_sgl_qp1 = fill_mad_common_header(cm_id, send_sgl_qp1,
+ cm_req_size, CM_RTU_ATTR_ID);
+ send_sgl_qp1 = fill_cm_rtu_data(cm_id, send_sgl_qp1, cm_req_size);
+ xrnic_send_mad(head, cm_req_size - EXTRA_PKT_LEN);
+}
+
+/*
+ * xrnic_rdma_accept() - This function implements incoming connect request.
+ * accept functionality
+ * @cm_id : CM ID of the incoming connect request
+ * @conn_param : Connection parameters
+ * @return: XRNIC_SUCCESS if successfully accepts the connection,
+ * otherwise error representative value
+ */
+int xrnic_rdma_accept(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_conn_param *conn_param)
+{
+ struct xrnic_qp_info *qp_info;
+
+ if (xrnic_dev->port_status[cm_id->port_num - 1] !=
+ XRNIC_PORT_QP_IN_USE)
+ return -XRNIC_INVALID_CM_ID;
+
+ if (cm_id->qp_status == XRNIC_PORT_QP_IN_USE)
+ return -XRNIC_INVALID_QP_ID;
+
+ qp_info = &cm_id->qp_info;
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ if (qp_info->sq_depth > XRNIC_MAX_SQ_DEPTH ||
+ qp_info->rq_depth > XRNIC_MAX_RQ_DEPTH ||
+ qp_info->send_sge_size > XRNIC_MAX_SEND_SGL_SIZE ||
+ qp_info->send_pkt_size > XRNIC_MAX_SEND_PKT_SIZE)
+ return -XRNIC_INVALID_QP_INIT_ATTR;
+
+ /*Return Error if wrong conn_param is coming.*/
+ if (conn_param->private_data_len > XRNIC_CM_PRVATE_DATA_LENGTH ||
+ conn_param->responder_resources > XRNIC_RESPONDER_RESOURCES ||
+ conn_param->initiator_depth > XRNIC_INITIATOR_DEPTH ||
+ conn_param->flow_control > 1 ||
+ conn_param->retry_count > XRNIC_REQ_RETRY_COUNT ||
+ conn_param->rnr_retry_count > XRNIC_REP_RNR_RETRY_COUNT)
+ return -XRNIC_INVALID_QP_CONN_PARAM;
+
+ memcpy((void *)&cm_id->conn_param.private_data,
+ (void *)&conn_param->private_data,
+ conn_param->private_data_len);
+ cm_id->conn_param.private_data_len = conn_param->private_data_len;
+ cm_id->conn_param.responder_resources =
+ conn_param->responder_resources;
+ cm_id->conn_param.initiator_depth = conn_param->initiator_depth;
+ cm_id->conn_param.flow_control = conn_param->flow_control;
+ cm_id->conn_param.retry_count = conn_param->retry_count;
+ cm_id->conn_param.rnr_retry_count = conn_param->rnr_retry_count;
+
+ xrnic_qp_app_configuration(qp_info->qp_num, XRNIC_HW_QP_ENABLE);
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_accept);
+
+/*
+ * xrnic_rdma_disconnect() - This function implements RDMA disconnect.
+ * @cm_id : CM ID to destroy or disconnect
+ * @return: XRNIC_SUCCESS if successfully disconnects
+ * otherwise error representative value
+ */
+int xrnic_rdma_disconnect(struct xrnic_rdma_cm_id *cm_id)
+{
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+ int i;
+
+ if (xrnic_dev->port_status[cm_id->port_num - 1]) {
+ if (cm_id->local_cm_id >= 2) {
+ if (cm_id->child_qp_num < 1)
+ return -XRNIC_INVALID_CM_ID;
+
+ if (cm_id->qp_info.qp_num) {
+ pr_err("CM ID of QP is not destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ if (cm_id->qp_status == XRNIC_PORT_QP_FREE) {
+ pr_err("CM ID is already destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ pr_info("Free local cm id[%d] ", cm_id->local_cm_id);
+ pr_info("Child qp number [%d] ", cm_id->child_qp_num);
+ pr_info("qp_num [%d]\n", cm_id->qp_info.qp_num);
+ cm_id->qp_status = XRNIC_PORT_QP_FREE;
+ } else if (cm_id->local_cm_id == 1) {
+ if (cm_id->qp_status == XRNIC_PORT_QP_FREE) {
+ pr_err("CM ID is already destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ cm_id_info = (struct xrnic_rdma_cm_id_info *)
+ cm_id->cm_id_info;
+ for (i = 0; i < cm_id_info->num_child; i++) {
+ if (cm_id_info->child_cm_id[i].qp_status ==
+ XRNIC_PORT_QP_IN_USE){
+ pr_err("child CM IDs not destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ }
+ cm_id->qp_status = XRNIC_PORT_QP_FREE;
+ } else {
+ pr_err("Received invalid CM ID\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ } else {
+ pr_err("Received invalid Port ID\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_disconnect);
+
+/*
+ * xrnic_rdma_destroy_id() - Function destroys CM ID of the channel.
+ * @cm_id : CM ID of the incoming connect request
+ * @flag : Flag to indicate disconnect send
+ * @return: XRNIC_SUCCESS if successfully,
+ * otherwise error representative value
+ */
+int xrnic_rdma_destroy_id(struct xrnic_rdma_cm_id *cm_id, int flag)
+{
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+ int i;
+ u32 local_cm_id = cm_id->local_cm_id;
+
+ if (xrnic_dev->port_status[cm_id->port_num - 1]) {
+ if (local_cm_id >= 2) {
+ if (cm_id->child_qp_num < 1)
+ return -XRNIC_INVALID_CM_ID;
+ if (cm_id->qp_status == XRNIC_PORT_QP_IN_USE) {
+ pr_err("CM ID is not destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ if (flag)
+ xrnic_cm_disconnect_send_handler
+ (&xrnic_dev->qp_attr[local_cm_id - 2]);
+
+ pr_info("Free local cm id[%d] ", cm_id->local_cm_id);
+ pr_info("Child qp number [%d] ", cm_id->child_qp_num);
+ pr_info("qp_num [%d]\n", cm_id->qp_info.qp_num);
+
+ cm_id_info =
+ xrnic_dev->cm_id_info[cm_id->port_num - 1];
+ cm_id_info->parent_cm_id.child_qp_num--;
+ __list_del_entry(&cm_id->list);
+ kfree(cm_id);
+ } else if (local_cm_id == 1) {
+ if (cm_id->qp_status == XRNIC_PORT_QP_IN_USE) {
+ pr_err("CM ID is already destroyed\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+
+ cm_id_info = (struct xrnic_rdma_cm_id_info *)
+ cm_id->cm_id_info;
+ for (i = 0; i < cm_id_info->num_child; i++) {
+ if (cm_id_info->child_cm_id[i].qp_status ==
+ XRNIC_PORT_QP_IN_USE) {
+ pr_err("child CM IDs not destroyed\n");
+ return XRNIC_INVALID_CM_ID;
+ }
+ }
+ xrnic_dev->io_qp_count = xrnic_dev->io_qp_count +
+ cm_id_info->num_child;
+ xrnic_dev->cm_id_info[cm_id->port_num - 1] = NULL;
+ xrnic_dev->port_status[cm_id->port_num - 1] =
+ XRNIC_PORT_QP_FREE;
+ __list_del_entry(&cm_id->list);
+ kfree(cm_id_info->child_cm_id);
+ kfree(cm_id_info);
+ } else {
+ pr_err("Received invalid CM ID\n");
+ return -XRNIC_INVALID_CM_ID;
+ }
+ } else {
+ return -XRNIC_INVALID_CM_ID;
+ }
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_destroy_id);
+
+/*
+ * xrnic_send_mad() - This function initiates sending a management packet on
+ * QP1.
+ * @send_buf : Input buffer to fill
+ * @size : Size of the send buffer
+ */
+void xrnic_send_mad(void *send_buf, u32 size)
+{
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+
+ xrnic_qp1_send_mad_pkt(send_buf, qp1_attr, size);
+}
+EXPORT_SYMBOL(xrnic_send_mad);
+
+/*
+ * xrnic_identify_remote_host () - This function searches internal data.
+ * structures for remote info
+ * @rq_buf : received data buffer from other end
+ * @qp_num : QP number on which packet has been received
+ * @return: XRNIC_SUCCESS if remote end info is available,
+ * XRNIC_FAILED otherwise
+ */
+int xrnic_identify_remote_host(void *rq_buf, int qp_num)
+{
+ /* First find our Which IP version came from IPV packet and accrdingly
+ * Compare IP address from eiither AF_INET or AF_INET6.
+ */
+ /* It may be two condition of failure, either we just bypass this
+ * CONNECT_REQUEST as we have alrady there or there
+ * is no QP free at all.
+ */
+ struct mad *mad;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+
+ if (qp1_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ }
+
+ if (htons(mad->attribute_id) == CONNECT_REQUEST) {
+ if (qp1_attr->ip_addr_type == AF_INET6) {
+ if (mad->data[0] ==
+ xrnic_dev->qp_attr[qp_num].remote_cm_id &&
+ xrnic_dev->qp1_attr.source_qp_num ==
+ xrnic_dev->qp_attr[qp_num].source_qp_num &&
+ (strcmp(xrnic_dev->qp1_attr.mac_addr,
+ xrnic_dev->qp_attr[qp_num].mac_addr)
+ == 0) &&
+ (!memcmp(&xrnic_dev->qp1_attr.ipv6_addr,
+ &xrnic_dev->qp_attr[qp_num].ipv6_addr,
+ sizeof(struct in6_addr))))
+ return XRNIC_SUCCESS;
+ } else {
+ if (mad->data[0] ==
+ xrnic_dev->qp_attr[qp_num].remote_cm_id &&
+ xrnic_dev->qp1_attr.source_qp_num ==
+ xrnic_dev->qp_attr[qp_num].source_qp_num &&
+ (strcmp(xrnic_dev->qp1_attr.mac_addr,
+ xrnic_dev->qp_attr[qp_num].mac_addr)
+ == 0) &&
+ xrnic_dev->qp1_attr.ipv4_addr ==
+ xrnic_dev->qp_attr[qp_num].ipv4_addr)
+ return XRNIC_SUCCESS;
+ }
+ } else {
+ /* Need to Compare udp->source_port,ethernet->source_mac,
+ * ip->source_ip, deth->source_qp == 1, local_cm_id is le
+ */
+
+ if (qp1_attr->ip_addr_type == AF_INET6) {
+ if (mad->data[0] ==
+ xrnic_dev->qp_attr[qp_num].remote_cm_id &&
+ mad->data[1] ==
+ xrnic_dev->qp_attr[qp_num].local_cm_id &&
+ xrnic_dev->qp1_attr.source_qp_num ==
+ xrnic_dev->qp_attr[qp_num].source_qp_num &&
+ (strcmp(xrnic_dev->qp1_attr.mac_addr,
+ xrnic_dev->qp_attr[qp_num].mac_addr)
+ == 0) &&
+ (!memcmp(&xrnic_dev->qp1_attr.ipv6_addr,
+ &xrnic_dev->qp_attr[qp_num].ipv6_addr,
+ sizeof(struct in6_addr))))
+
+ return XRNIC_SUCCESS;
+ } else {
+ if (mad->data[0] ==
+ xrnic_dev->qp_attr[qp_num].remote_cm_id &&
+ mad->data[1] ==
+ xrnic_dev->qp_attr[qp_num].local_cm_id &&
+ xrnic_dev->qp1_attr.source_qp_num ==
+ xrnic_dev->qp_attr[qp_num].source_qp_num &&
+ (strcmp(xrnic_dev->qp1_attr.mac_addr,
+ xrnic_dev->qp_attr[qp_num].mac_addr)
+ == 0) &&
+ xrnic_dev->qp1_attr.ipv4_addr ==
+ xrnic_dev->qp_attr[qp_num].ipv4_addr)
+
+ return XRNIC_SUCCESS;
+ }
+ }
+ return XRNIC_FAILED;
+}
+
+/*
+ * xrnic_rdma_resolve_addr() - This function looks for a destination.
+ * address and initiates ARP if required
+ * @cm_id : CM channel ID which is being used for connection set up
+ * @src_addr : IPV4/IPV6 address of the source
+ * @dst_addr : IPV4/IPV6 address of the destination
+ * @timeout : Address resolve timeout
+ * @return: SUCCESS value if route resolved or error representative value
+ * otherwise
+ */
+int xrnic_rdma_resolve_addr(struct xrnic_rdma_cm_id *cm_id,
+ struct sockaddr *src_addr,
+ struct sockaddr *dst_addr, int timeout)
+{
+ struct flowi4 fl4;
+ struct rtable *rt;
+ struct neighbour *n;
+ int arp_retry = 3;
+ int ret = 0;
+ struct sockaddr_in sin4, *din4;
+ struct net_device *net_dev;
+ struct xrnic_rdma_cm_event_info event;
+
+ net_dev = dev_get_by_name(&init_net, "eth0");
+ memset(&fl4, 0, sizeof(fl4));
+ din4 = (struct sockaddr_in *)dst_addr;
+ fl4.daddr = din4->sin_addr.s_addr;
+ rt = ip_route_output_key(&init_net, &fl4);
+ if (IS_ERR(rt)) {
+ event.cm_event = XRNIC_CM_EVENT_ADDR_ERROR;
+ event.status = PTR_ERR(rt);
+ cm_id->xrnic_cm_handler(cm_id, &event);
+ ret = PTR_ERR(rt);
+ goto err;
+ }
+
+ event.cm_event = XRNIC_CM_EVENT_ADDR_RESOLVED;
+ event.status = 0;
+ cm_id->xrnic_cm_handler(cm_id, &event);
+
+ sin4.sin_addr.s_addr = fl4.saddr;
+ sin4.sin_port = cpu_to_be16(ERNIC_UDP_SRC_PORT);
+ sin4.sin_family = dst_addr->sa_family;
+
+ /* HACK: ARP is not resolved for the first time, retries are needed */
+ do {
+ n = rt->dst.ops->neigh_lookup(&rt->dst, NULL, &fl4.daddr);
+ } while (arp_retry-- > 0);
+
+ if (IS_ERR(n))
+ pr_info("ERNIC neigh lookup failed\n");
+
+ memcpy(&cm_id->route.s_addr, &sin4, sizeof(sin4));
+ memcpy(&cm_id->route.d_addr, dst_addr, sizeof(*dst_addr));
+ ether_addr_copy(cm_id->route.smac, net_dev->dev_addr);
+ ether_addr_copy(cm_id->route.dmac, n->ha);
+ event.cm_event = XRNIC_CM_EVENT_ROUTE_RESOLVED;
+ event.status = 0;
+ cm_id->xrnic_cm_handler(cm_id, &event);
+err:
+ return ret;
+}
+EXPORT_SYMBOL(xrnic_rdma_resolve_addr);
+
+/*
+ * fill_ipv4_headers() - This function fills the IPV4 address for an
+ * outgoing packet.
+ * @cm_id : CM ID info for addresses
+ * @send_sgl_qp1 : SGL info
+ * @cm_req_size : request size
+ * @return: pointer to SGL info
+ */
+char *fill_ipv4_headers(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size)
+{
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ struct udphdr *udph;
+ struct sockaddr_in *sin4, *din4;
+
+ sin4 = (struct sockaddr_in *)&cm_id->route.s_addr;
+ din4 = (struct sockaddr_in *)&cm_id->route.d_addr;
+
+ SET_ETH_HDR(send_sgl_qp1);
+ eth = (struct ethhdr *)send_sgl_qp1;
+ ether_addr_copy(&eth->h_dest, &cm_id->route.dmac);
+ ether_addr_copy(&eth->h_source, &cm_id->route.smac);
+ eth->h_proto = cpu_to_be16(ETH_P_IP);
+
+ SET_IP_HDR(send_sgl_qp1);
+ iph = (struct iphdr *)send_sgl_qp1;
+ iph->ihl = 5;
+ iph->version = 4;
+ iph->ttl = 32;
+ iph->tos = 0;
+ iph->protocol = IPPROTO_UDP;
+ iph->saddr = sin4->sin_addr.s_addr;
+ iph->daddr = din4->sin_addr.s_addr;
+ iph->id = 0;
+ iph->frag_off = cpu_to_be16(0x2 << 13);
+ iph->tot_len = cpu_to_be16(cm_req_size - ETH_HLEN);
+
+ ip_send_check(iph);
+
+ SET_NET_HDR(send_sgl_qp1);
+ udph = (struct udphdr *)send_sgl_qp1;
+ udph->source = sin4->sin_port;
+ udph->dest = din4->sin_port;
+ udph->len = cpu_to_be16(cm_req_size - ETH_HLEN - (iph->ihl * 4));
+ udph->check = 0;
+
+ return send_sgl_qp1;
+}
+
+/*
+ * fill_mad_common_header() - This function fills the MAD headers.
+ * @cm_id : CM ID info
+ * @send_sgl_qp1 : SGL info
+ * @cm_req_size : request size
+ * @cm_attr : cm attribute ID
+ * @return: pointer to SGL info
+ */
+char *fill_mad_common_header(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size,
+ int cm_attr)
+{
+ struct ib_bth *bth;
+ struct ib_deth *deth;
+ struct ib_mad_hdr *madh;
+ int val;
+
+ SET_BTH_HDR(send_sgl_qp1);
+ bth = (struct ib_bth *)send_sgl_qp1;
+ memset(bth, 0, sizeof(*bth));
+ val = (BTH_SET(OPCODE, IB_OPCODE_UD_SEND_ONLY) |
+ BTH_SET(SE, XRNIC_SET_SOLICT_EVENT) |
+ BTH_SET(MIG, XRNIC_MIGRATION_REQ) |
+ BTH_SET(PAD, XRNIC_PAD_COUNT) |
+ BTH_SET(TVER, XRNIC_TRANSPORT_HDR_VER) |
+ BTH_SET(PKEY, 65535));
+ bth->offset0 = cpu_to_be32(val);
+ bth->offset4 = cpu_to_be32(BTH_SET(DEST_QP, 1));
+ bth->offset8 = cpu_to_be32(BTH_SET(PSN, psn_num++));
+
+ SET_DETH_HDR(send_sgl_qp1);
+ deth = (struct ib_deth *)send_sgl_qp1;
+ deth->offset0 = cpu_to_be32 (IB_ENFORCED_QEY);
+ deth->offset4 = cpu_to_be32 (DETH_SET(SQP, 2));
+
+ SET_MAD_HDR(send_sgl_qp1);
+ madh = (struct ib_mad_hdr *)send_sgl_qp1;
+ memset(madh, 0, sizeof(*madh));
+ madh->base_version = IB_MGMT_BASE_VERSION;
+ madh->mgmt_class = IB_MGMT_CLASS_CM;
+ madh->class_version = IB_CM_CLASS_VER;
+ madh->method = IB_MGMT_METHOD_SEND;
+ madh->attr_id = cm_attr;
+ madh->tid = cpu_to_be64(mad_tid++);
+ madh->status = 0;
+ madh->class_specific = 0;
+ madh->attr_mod = 0;
+
+ return send_sgl_qp1;
+}
+
+/*
+ * xrnic_rdma_connect() - This function initiates connetion process.
+ * @cm_id : CM ID info
+ * @conn_param : Connection parameters for the new connection
+ * @return: XRNIC_SUCCESS
+ */
+int xrnic_rdma_connect(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_conn_param *conn_param)
+{
+ int cm_req_size;
+ char *send_sgl_qp1, *head;
+
+ cm_req_size = sizeof(struct ethhdr) + sizeof(struct iphdr) +
+ sizeof(struct udphdr) + IB_BTH_BYTES + IB_DETH_BYTES +
+ sizeof(struct ib_mad_hdr) +
+ sizeof(struct ernic_cm_req) + EXTRA_PKT_LEN;
+
+ head = kmalloc(cm_req_size, GFP_ATOMIC);
+ send_sgl_qp1 = head;
+ memcpy(&cm_id->conn_param, conn_param, sizeof(*conn_param));
+ fill_ipv4_cm_req(cm_id, send_sgl_qp1, cm_req_size);
+ xrnic_send_mad(head, cm_req_size - EXTRA_PKT_LEN);
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_connect);
+
+/*
+ * xrnic_process_mad_pkt() - This function process a received MAD packet.
+ * @rq_buf : receive queue pointer
+ * @return: XRNIC_SUCCESS if successfully processed the MAD packet otherwise
+ * XRNIC_FAILED
+ */
+static int xrnic_process_mad_pkt(void *rq_buf)
+{
+ int ret = 0;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct deth *deth;
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+
+ if (qp1_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ deth = (struct deth *)&recv_qp_pkt_ipv4->deth;
+ qp1_attr->ipv4_addr = recv_qp_pkt_ipv4->ipv4.src_addr;
+ memcpy(&qp1_attr->mac_addr,
+ &recv_qp_pkt_ipv4->eth.h_source, XRNIC_ETH_ALEN);
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ deth = (struct deth *)&recv_qp_pkt_ipv6->deth;
+ memcpy(&qp1_attr->ipv6_addr,
+ &recv_qp_pkt_ipv6->ipv6.saddr,
+ sizeof(struct in6_addr));
+ memcpy(&qp1_attr->mac_addr,
+ &recv_qp_pkt_ipv6->eth.h_source,
+ XRNIC_ETH_ALEN);
+ }
+ qp1_attr->source_qp_num = deth->src_qp;
+
+ ret = xrnic_cm_establishment_handler(rq_buf);
+ if (ret) {
+ pr_err("cm establishment failed with ret code %d\n", ret);
+ return XRNIC_FAILED;
+ }
+
+ return XRNIC_SUCCESS;
+}
+
+/*
+ * xrnic_mad_pkt_recv_intr_handler() - Interrupt handler for MAD packet
+ * interrupt type
+ * @data : XRNIC device info
+ */
+void xrnic_mad_pkt_recv_intr_handler(unsigned long data)
+{
+ struct xrnic_dev_info *xrnic_dev = (struct xrnic_dev_info *)data;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct xrnic_memory_map *xrnic_mmap = (struct xrnic_memory_map *)
+ qp1_attr->xrnic_mmap;
+ struct rdma_qp1_attr *rdma_qp1_attr = (struct rdma_qp1_attr *)
+ &xrnic_mmap->xrnic_regs->rdma_qp1_attr;
+ u32 config_value = 0;
+ u8 rq_buf[XRNIC_RECV_PKT_SIZE];
+ void *rq_buf_temp, *rq_buf_unaligned;
+ int ret = 0, j, rq_pkt_num = 0, rq_pkt_count = 0;
+ struct ethhdr_t *ethhdr;
+ unsigned long flag;
+
+ spin_lock_irqsave(&qp1_attr->qp_lock, flag);
+ rq_buf_unaligned = (void *)rq_buf;
+
+ /* We need to maintain sq_cmpl_db_local as per hardware update
+ * for Queue spesific sq_cmpl_db_local register
+ * Also in case of resend some packect we
+ * need to maintain this variable
+ */
+ config_value = ioread32((char *)xrnic_mmap->rq_wrptr_db_add +
+ (4 * (qp1_attr->qp_num - 1)));
+ pr_info("config_value = %d, db_local = %d\n",
+ config_value, qp1_attr->rq_wrptr_db_local);
+ if (qp1_attr->rq_wrptr_db_local == config_value) {
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+ return;
+ }
+
+ if (qp1_attr->rq_wrptr_db_local > config_value)
+ rq_pkt_count = (config_value + XRNIC_RQ_DEPTH) -
+ qp1_attr->rq_wrptr_db_local;
+ else
+ rq_pkt_count = config_value - qp1_attr->rq_wrptr_db_local;
+
+ DEBUG_LOG("rx pkt count = 0x%x\n", rq_pkt_count);
+ for (j = 0 ; j < rq_pkt_count ; j++) {
+ config_value = ioread32((char *)xrnic_mmap->sq_cmpl_db_add +
+ (4 * (qp1_attr->qp_num - 1)));
+
+ rq_pkt_num = qp1_attr->rq_wrptr_db_local;
+ if (rq_pkt_num >= XRNIC_RQ_DEPTH)
+ rq_pkt_num = rq_pkt_num - XRNIC_RQ_DEPTH;
+
+ ethhdr = (struct ethhdr_t *)((char *)qp1_attr->rq_buf_ba_ca +
+ (rq_pkt_num * XRNIC_RECV_PKT_SIZE));
+
+ if (ethhdr->eth_type == htons(XRNIC_ETH_P_IP)) {
+ rq_buf_temp = (char *)qp1_attr->rq_buf_ba_ca +
+ (rq_pkt_num * XRNIC_RECV_PKT_SIZE);
+ memcpy((char *)rq_buf_unaligned,
+ (char *)rq_buf_temp, XRNIC_RECV_PKT_SIZE);
+ qp1_attr->ip_addr_type = AF_INET;
+ } else {
+ rq_buf_temp = (char *)qp1_attr->rq_buf_ba_ca +
+ (rq_pkt_num * XRNIC_RECV_PKT_SIZE);
+ memcpy((char *)rq_buf_unaligned,
+ (char *)rq_buf_temp, XRNIC_RECV_PKT_SIZE);
+ qp1_attr->ip_addr_type = AF_INET6;
+ }
+ ret = xrnic_process_mad_pkt(rq_buf_unaligned);
+
+ if (ret) {
+ DEBUG_LOG("MAD pkt processing failed for pkt num %d\n",
+ rq_pkt_num);
+ }
+
+ qp1_attr->rq_wrptr_db_local = qp1_attr->rq_wrptr_db_local + 1;
+ config_value = qp1_attr->rq_wrptr_db_local;
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->rq_ci_db)));
+
+ if (qp1_attr->rq_wrptr_db_local == XRNIC_RQ_DEPTH)
+ qp1_attr->rq_wrptr_db_local = 0;
+ }
+
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_cm_establishment_handler() - handles the state after the
+ * communication is established.
+ * @rq_buf : receive queue buffer
+ * @return: 0 on success, -1 incase of failure
+ */
+int xrnic_cm_establishment_handler(void *rq_buf)
+{
+ struct qp_cm_pkt_hdr_ipv4 *recv_qp_pkt_ipv4;
+ struct qp_cm_pkt_hdr_ipv6 *recv_qp_pkt_ipv6;
+ struct mad *mad;
+ struct req *req;
+ struct rep *rep;
+ struct deth *deth;
+ struct xrnic_qp_attr *qp_attr;
+ int i = 0, ret;
+ enum xrnic_rej_reason reason;
+ enum xrnic_msg_rej msg;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ int qp1_send_pkt_size;
+ struct xrnic_rdma_cm_id *cm_id, *tmp;
+ struct sockaddr_in *din4;
+
+ DEBUG_LOG("Entering %s\n", __func__);
+
+ if (qp1_attr->ip_addr_type == AF_INET) {
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv4->mad;
+ req = (struct req *)&mad->data;
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv4);
+ } else {
+ recv_qp_pkt_ipv6 = (struct qp_cm_pkt_hdr_ipv6 *)rq_buf;
+ mad = (struct mad *)&recv_qp_pkt_ipv6->mad;
+ req = (struct req *)&mad->data;
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv6);
+ }
+ switch (htons(mad->attribute_id)) {
+ case CONNECT_REQUEST:
+ DEBUG_LOG("Connect request recevied\n");
+ for (i = 0 ; i < XRNIC_MAX_QP_SUPPORT ; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ ret = xrnic_find_free_qp();
+ DEBUG_LOG("Q pair no:%x, i = %d\n", ret, i);
+ if (ret < 0) {
+ qp_attr = qp1_attr;
+ qp_attr->ip_addr_type = qp1_attr->ip_addr_type;
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ pr_err("no QP is free for connection.\n");
+ reason = XRNIC_REJ_NO_QP_AVAILABLE;
+ msg = XRNIC_REJ_REQ;
+ qp_attr->remote_cm_id = req->local_cm_id;
+ xrnic_cm_prepare_rej(qp_attr, msg, reason);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ return XRNIC_FAILED;
+ }
+ i = ret;
+ }
+
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_LISTEN ||
+ qp_attr->curr_state == XRNIC_MRA_SENT ||
+ qp_attr->curr_state == XRNIC_REJ_SENT ||
+ qp_attr->curr_state == XRNIC_REP_SENT ||
+ qp_attr->curr_state == XRNIC_ESTABLISHD) {
+ qp_attr->ip_addr_type = qp1_attr->ip_addr_type;
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_connect_request_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state for Connect Request\n");
+ return XRNIC_FAILED;
+ }
+ break;
+
+ case READY_TO_USE:
+ DEBUG_LOG("RTU received\n");
+
+ for (i = 0 ; i < XRNIC_MAX_QP_SUPPORT ; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ pr_err("no QP is free for connection. in RTU\n");
+ return XRNIC_FAILED;
+ }
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_REP_SENT ||
+ qp_attr->curr_state == XRNIC_MRA_RCVD) {
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_ready_to_use_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state to serve RTU\n");
+ return XRNIC_FAILED;
+ }
+ break;
+
+ case MSG_RSP_ACK:
+ DEBUG_LOG("Message received Ack interrupt\n");
+ for (i = 0 ; i < XRNIC_MAX_QP_SUPPORT ; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ pr_err("no QP is free for connection\n");
+ return XRNIC_FAILED;
+ }
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_REP_SENT) {
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_msg_rsp_ack_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state to serve MSG RSP ACK\n");
+ return XRNIC_FAILED;
+ }
+ break;
+
+ case CONNECT_REPLY:
+ DEBUG_LOG("Connect reply received\n");
+ recv_qp_pkt_ipv4 = (struct qp_cm_pkt_hdr_ipv4 *)rq_buf;
+ rep = (struct rep *)&recv_qp_pkt_ipv4->mad.data;
+ deth = (struct deth *)&recv_qp_pkt_ipv4->deth;
+ list_for_each_entry_safe(cm_id, tmp, &cm_id_list, list) {
+ if (cm_id->local_cm_id ==
+ be32_to_cpu(rep->remote_comm_id))
+ break;
+ }
+ /* Something wrong if qp num is 0. Don't send Reply
+ * TODO: Send Reject instead of muting the Reply
+ */
+ if (cm_id->qp_info.qp_num == 0)
+ goto done;
+ cm_id->local_cm_id = rep->remote_comm_id;
+ cm_id->remote_cm_id = rep->local_cm_id;
+ qp_attr = &xrnic_dev->qp_attr[(cm_id->qp_info.qp_num - 2)];
+ qp_attr->local_cm_id = rep->remote_comm_id;
+ qp_attr->remote_cm_id = rep->local_cm_id;
+ qp_attr->remote_qp = (be32_to_cpu(rep->local_qpn) >> 8);
+ qp_attr->source_qp_num = (deth->src_qp);
+ qp_attr->starting_psn = (cm_id->qp_info.starting_psn - 1);
+ qp_attr->rem_starting_psn = (rep->start_psn[2] |
+ rep->start_psn[1] << 8 |
+ rep->start_psn[0] << 16);
+ ether_addr_copy(qp_attr->mac_addr, cm_id->route.dmac);
+ din4 = &cm_id->route.d_addr;
+ cm_id->port_num = be16_to_cpu(din4->sin_port);
+ xrnic_dev->port_status[cm_id->port_num - 1] =
+ XRNIC_PORT_QP_IN_USE;
+ qp_attr->ipv4_addr = din4->sin_addr.s_addr;
+ qp_attr->ip_addr_type = AF_INET;
+ qp_attr->cm_id = cm_id;
+ xrnic_qp_app_configuration(cm_id->qp_info.qp_num,
+ XRNIC_HW_QP_ENABLE);
+ xrnic_cm_connect_rep_handler(qp_attr, NULL);
+ xrnic_cm_send_rtu(cm_id, rep);
+ qp_attr->curr_state = XRNIC_ESTABLISHD;
+done:
+ break;
+
+ case CONNECT_REJECT:
+ DEBUG_LOG("Connect Reject received\n");
+ for (i = 0 ; i < XRNIC_MAX_QP_SUPPORT ; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ pr_err("no QP is free for connection.\n");
+ return XRNIC_FAILED;
+ }
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_MRA_SENT ||
+ qp_attr->curr_state == XRNIC_REP_SENT ||
+ qp_attr->curr_state == XRNIC_MRA_RCVD) {
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_connect_reject_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state to serve connect reject\n");
+ return XRNIC_FAILED;
+ }
+
+ break;
+
+ case DISCONNECT_REQUEST:
+ DEBUG_LOG("Disconnect request received\n");
+ for (i = 0; i < XRNIC_MAX_QP_SUPPORT; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ pr_err("no QPis free for connection.\n");
+ return XRNIC_FAILED;
+ }
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_ESTABLISHD ||
+ qp_attr->curr_state == XRNIC_DREQ_SENT ||
+ qp_attr->curr_state == XRNIC_TIMEWAIT) {
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_disconnect_request_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state to for Disconnect request\n");
+ return XRNIC_FAILED;
+ }
+ break;
+
+ case DISCONNECT_REPLY:
+ DEBUG_LOG("Disconnect reply received\n");
+ for (i = 0; i < XRNIC_MAX_QP_SUPPORT; i++) {
+ if (!xrnic_identify_remote_host(rq_buf, i))
+ break;
+ }
+ if (i == XRNIC_MAX_QP_SUPPORT) {
+ pr_err("no QP is free for connection.\n");
+ return XRNIC_FAILED;
+ }
+ qp_attr = &xrnic_dev->qp_attr[i];
+
+ if (qp_attr->curr_state == XRNIC_DREQ_SENT) {
+ xrnic_prepare_initial_headers(qp_attr, rq_buf);
+ xrnic_cm_disconnect_reply_handler(qp_attr, rq_buf);
+ } else {
+ pr_err("Invalid QP state to for Disconnect reply\n");
+ return XRNIC_FAILED;
+ }
+ break;
+
+ case SERVICE_ID_RESOLUTION_REQ:
+ DEBUG_LOG("Received service ID resolution request\n");
+ pr_err("Not handling service ID resolution request\n");
+ return XRNIC_FAILED;
+
+ case SERVICE_ID_RESOLUTION_REQ_REPLY:
+ DEBUG_LOG("Received service ID resolution reply\n");
+ pr_err("Not handling service ID resolution reply\n");
+ return XRNIC_FAILED;
+
+ case LOAD_ALTERNATE_PATH:
+ DEBUG_LOG("Received Load Alternate Path request\n");
+ pr_err("Not handling Load Alternate Path request\n");
+ return XRNIC_FAILED;
+
+ case ALTERNATE_PATH_RESPONSE:
+ DEBUG_LOG("Received LAP response\n");
+ pr_err("Not handling LAP response\n");
+ return XRNIC_FAILED;
+
+ default:
+ pr_err("default mad attribute 0x%x\n", mad->attribute_id);
+ break;
+ }
+
+ DEBUG_LOG("Exiting %s\n", __func__);
+ return XRNIC_SUCCESS;
+}
diff --git a/drivers/staging/xlnx_ernic/xcm.h b/drivers/staging/xlnx_ernic/xcm.h
new file mode 100644
index 000000000000..6640b83e5166
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xcm.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+#ifndef _CM_H
+#define _CM_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+/***************************** Include Files ********************************/
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <rdma/ib_mad.h>
+#include <rdma/ib_cm.h>
+
+/************************** Constant Definitions *****************************/
+
+/* EXTRA Bytes for Invariant CRC */
+#define ERNIC_INV_CRC 4
+/* ERNIC Doesn't have Variant CRC for P2P */
+#define ERNIC_VAR_CRC 0
+#define EXTRA_PKT_LEN (ERNIC_INV_CRC + ERNIC_VAR_CRC)
+/* As per RoCEv2 Annex17, SRC PORT can be Fixed for ordering issues.
+ * So, to make things simple, ERNIC also uses constant udp source port
+ */
+#define ERNIC_UDP_SRC_PORT 0xA000
+
+#define SET_VAL(start, size, val) ((((val) & ((1U << (size)) - 1)) << (start)))
+#define GET_VAL(start, size, val) (((val) >> (start)) & ((1U << (size)) - 1))
+#define BTH_SET(FIELD, v) SET_VAL(BTH_##FIELD##_OFF, \
+ BTH_##FIELD##_SZ, v)
+#define DETH_SET(FIELD, v) SET_VAL(DETH_##FIELD##_OFF, \
+ DETH_##FIELD##_SZ, v)
+
+#define SET_HDR_OFFSET(ptr, off) ((ptr) += off)
+#define SET_CM_HDR(ptr) SET_HDR_OFFSET(ptr, sizeof(struct ib_mad_hdr))
+#define SET_ETH_HDR(ptr) SET_HDR_OFFSET(ptr, 0)
+#define SET_IP_HDR(ptr) SET_HDR_OFFSET(ptr, sizeof(struct ethhdr))
+#define SET_NET_HDR(ptr) SET_HDR_OFFSET(ptr, sizeof(struct iphdr))
+#define SET_BTH_HDR(ptr) SET_HDR_OFFSET(ptr, sizeof(struct udphdr))
+#define SET_DETH_HDR(ptr) SET_HDR_OFFSET(ptr, IB_BTH_BYTES)
+#define SET_MAD_HDR(ptr) SET_HDR_OFFSET(ptr, IB_DETH_BYTES)
+
+#define CMA_VERSION 0
+#define IB_ENFORCED_QEY 0x80010000
+#define IB_CM_CLASS_VER 2
+/*****************************************************************************/
+struct ib_bth {
+ __be32 offset0;
+#define BTH_PKEY_OFF 0
+#define BTH_PKEY_SZ 16
+#define BTH_TVER_OFF 16
+#define BTH_TVER_SZ 4
+#define BTH_PAD_OFF 20
+#define BTH_PAD_SZ 2
+#define BTH_MIG_OFF 22
+#define BTH_MIG_SZ 1
+#define BTH_SE_OFF 23
+#define BTH_SE_SZ 1
+#define BTH_OPCODE_OFF 24
+#define BTH_OPCODE_SZ 8
+ __be32 offset4;
+#define BTH_DEST_QP_OFF 0
+#define BTH_DEST_QP_SZ 24
+ __be32 offset8;
+#define BTH_PSN_OFF 0
+#define BTH_PSN_SZ 24
+#define BTH_ACK_OFF 31
+#define BTH_ACK_SZ 1
+};
+
+struct ib_deth {
+ __be32 offset0;
+#define DETH_QKEY_OFF 0
+#define DETH_QKEY_SZ 32
+ __be32 offset4;
+#define DETH_SQP_OFF 0
+#define DETH_SQP_SZ 24
+};
+
+struct cma_rtu {
+ u32 local_comm_id;
+ u32 remote_comm_id;
+ u8 private_data[224];
+};
+
+union cma_ip_addr {
+ struct in6_addr ip6;
+ struct {
+ __be32 pad[3];
+ __be32 addr;
+ } ip4;
+};
+
+/* CA11-1: IP Addressing CM REQ Message Private Data Format */
+struct cma_hdr {
+ u8 cma_version;
+ u8 ip_version; /* IP version: 7:4 */
+ __be16 port;
+ union cma_ip_addr src_addr;
+ union cma_ip_addr dst_addr;
+};
+
+enum transport_svc_type {
+ XRNIC_SVC_TYPE_RC = 0,
+ XRNIC_SVC_TYPE_UC,
+ XRNIC_SVC_TYPE_RD,
+ XRNIC_SVC_TYPE_RSVD,
+};
+
+extern struct list_head cm_id_list;
+
+void xrnic_qp1_send_mad_pkt(void *send_sgl_temp,
+ struct xrnic_qp_attr *qp1_attr, u32 send_pkt_size);
+void xrnic_reset_io_qp(struct xrnic_qp_attr *qp_attr);
+void fill_ipv4_cm_req(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size);
+char *fill_ipv4_headers(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size);
+int xrnic_cm_establishment_handler(void *rq_buf);
+char *fill_mad_common_header(struct xrnic_rdma_cm_id *cm_id,
+ char *send_sgl_qp1, int cm_req_size,
+ int cm_attr);
+void xrnic_prepare_initial_headers(struct xrnic_qp_attr *qp_attr,
+ void *rq_buf);
+void xrnic_cm_msg_rsp_ack_handler(struct xrnic_qp_attr *qp_attr, void *rq_buf);
+void xrnic_cm_disconnect_send_handler(struct xrnic_qp_attr *qp_attr);
+void xrnic_cm_prepare_rej(struct xrnic_qp_attr *qp_attr,
+ enum xrnic_rej_reason reason,
+ enum xrnic_msg_rej msg);
+void xrnic_send_mad(void *send_buf, u32 size);
+int xrnic_identify_remote_host(void *rq_buf, int qp_num);
+void xrnic_mad_pkt_recv_intr_handler(unsigned long data);
+
+struct ernic_cm_req {
+ u32 local_comm_id;
+ u32 rsvd1;
+ __u64 service_id;
+ __u64 local_ca_guid;
+ u32 rsvd2;
+ u32 local_qkey;
+ u32 offset32;
+ u32 offset36;
+ u32 offset40;
+ u32 offset44;
+ u16 pkey;
+ u8 offset50;
+ u8 offset51;
+ u16 local_lid;
+ u16 remote_lid;
+ union ib_gid local_gid;
+ union ib_gid remote_gid;
+ u32 offset88;
+ u8 traffic_class;
+ u8 hop_limit;
+ u8 offset94;
+ u8 offset95;
+ u8 rsvd3[45];
+ u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE];
+} __packed;
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _CM_H*/
diff --git a/drivers/staging/xlnx_ernic/xcommon.h b/drivers/staging/xlnx_ernic/xcommon.h
new file mode 100644
index 000000000000..c7d9ff6c84b6
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xcommon.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef COMMOM_INCL_H
+#define COMMOM_INCL_H
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include "xif.h"
+#include "xrocev2.h"
+#include "xhw_def.h"
+#include "xqp.h"
+#include "xcm.h"
+#include "xmr.h"
+#include "xmain.h"
+
+#define XRNIC_FAILED -1
+#define XRNIC_SUCCESS 0
+#define DEBUG_LOG(x, ...) do { \
+ if (debug)\
+ pr_info(x, ##__VA_ARGS__); \
+ } while (0)
+
+extern int debug;
+
+struct xrnic_dev_info {
+ struct xrnic_memory_map xrnic_mmap;
+ struct xrnic_qp_attr qp1_attr;
+ /* TODO: Need to allocate qp_attr on heap.
+ * when max Queue Pairs increases in the design, static memory
+ * requirement will be huge.
+ */
+ struct xrnic_qp_attr qp_attr[XRNIC_MAX_QP_SUPPORT];
+ /* DESTINATION ADDR_FAMILY - IPv4/V6 */
+ u16 ip_addr_type;
+ /* DESTINATION addr in NBO */
+ u8 ipv6_addr[16];
+ u32 pmtu;
+ /* IPV4 address */
+ u8 ipv4_addr[4];
+ u32 qp_falat_local_ptr;
+ struct xrnic_rdma_cm_id_info *curr_cm_id_info;
+ /* TODO: Need to allocate cm_id_info and port_status on heap. */
+ struct xrnic_rdma_cm_id_info *cm_id_info[XRNIC_MAX_PORT_SUPPORT];
+ enum xrnic_port_qp_status port_status[XRNIC_MAX_PORT_SUPPORT];
+ /* Interrupt for RNIC */
+ u32 xrnic_irq;
+ struct tasklet_struct mad_pkt_recv_task;
+ struct tasklet_struct qp_pkt_recv_task;
+ struct tasklet_struct qp_fatal_task;
+ struct tasklet_struct wqe_completed_task;
+ u32 io_qp_count;
+ /*Character Driver Interface*/
+ struct device_node *dev_node;
+ struct resource resource;
+ struct cdev cdev;
+ char pkt_buffer[512];
+ struct device *dev;
+};
+
+extern struct xrnic_dev_info *xrnic_dev;
+#ifdef __cplusplus
+ }
+#endif
+#endif
diff --git a/drivers/staging/xlnx_ernic/xernic_bw_test.c b/drivers/staging/xlnx_ernic/xernic_bw_test.c
new file mode 100644
index 000000000000..0f0977660621
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xernic_bw_test.c
@@ -0,0 +1,482 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx RDMA NIC perftest driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <net/addrconf.h>
+#include "xcommon.h"
+#include "xperftest.h"
+
+/* Default Port Number for Perftest and Depths for XRNIC */
+#define PERFTEST_PORT 18515
+#define PERFTEST_SQ_DEPTH 0x80
+#define PERFTEST_RQ_DEPTH 0x40
+/* Admin and IO QPs */
+#define PERFTEST_ADMIN_QPS 1
+#define PERFTEST_IO_QPS 1
+#define PERFTEST_MAX_QPS (PERFTEST_ADMIN_QPS + PERFTEST_IO_QPS)
+#define PERFTEST_DEFAULT_MEM_SIZE (4 * 1024 * 1024)
+
+#define _1MB_BUF_SIZ (1024 * 1024)
+#define PERF_TEST_RQ_BUF_SIZ ((_1MB_BUF_SIZ + XRNIC_RECV_PKT_SIZE) *\
+ PERFTEST_RQ_DEPTH)
+
+struct xrnic_rdma_cm_id *cm_id;
+static char server_ip[32] = "0.0.0.0";
+struct ernic_pd *pd;
+int prev_qpn;
+
+/* TODO: currently, we have single instance.
+ * Need to convert as per-instance context.
+ */
+struct perftest_ctx {
+ struct xrnic_rdma_cm_id *cm_id;
+ struct ernic_pd *pd;
+ struct mr *reg_mr; /*registered MR */
+};
+
+phys_addr_t phys_mem[PERFTEST_MAX_QPS];
+int io_mr_idx;
+struct mr *perftest_io_mr[PERFTEST_IO_QPS];
+
+struct perftest_ctx perf_context[PERFTEST_MAX_QPS];
+
+struct perftest_wr {
+ union ctx ctx;
+ __u8 reserved1[2];
+ __u32 local_offset[2];
+ __u32 length;
+ __u8 opcode;
+ __u8 reserved2[3];
+ __u32 remote_offset[2];
+ __u32 remote_tag;
+ __u32 completion_info[4];
+ __u8 reserved4[16];
+} __packed;
+
+struct xrnic_qp_init_attr qp_attr;
+
+struct perftest_trinfo {
+ phys_addr_t rq_buf_ba_phys;
+ phys_addr_t send_sgl_phys;
+ phys_addr_t sq_ba_phys;
+ phys_addr_t cq_ba_phys;
+ phys_addr_t rq_wptr_db_phys;
+ phys_addr_t sq_cmpl_db_phys;
+ void __iomem *rq_buf_ba;
+ void __iomem *send_sgl;
+ void __iomem *sq_ba;
+ void __iomem *cq_ba;
+};
+
+struct perftest_trinfo trinfo;
+struct xrnic_rdma_conn_param conn_param;
+int rq_ci_db, sq_cmpl_db;
+
+int port = -1;
+module_param_string(server_ip, server_ip, sizeof(server_ip), 0444);
+module_param(port, int, 0444);
+MODULE_PARM_DESC(server_ip, "Target server ip address");
+
+/**
+ * perftest_parse_addr() - Parses the input IP address.
+ * @s_addr: IP address structure.
+ * @buf: Output IPV4 buffer pointer.
+ * return: 0 If address is either IPv6 or IPv4.
+ * else, returns EINVAL.
+ */
+int perftest_parse_addr(struct sockaddr_storage *s_addr, char *buf)
+{
+ size_t buflen = strlen(buf);
+ int ret;
+ const char *delim;
+
+ if (buflen <= INET_ADDRSTRLEN) {
+ struct sockaddr_in *sin_addr = (struct sockaddr_in *)s_addr;
+
+ ret = in4_pton(buf, buflen, (u8 *)&sin_addr->sin_addr.s_addr,
+ '\0', NULL);
+ if (!ret)
+ goto fail;
+
+ sin_addr->sin_family = AF_INET;
+ return 0;
+ }
+ if (buflen <= INET6_ADDRSTRLEN) {
+ struct sockaddr_in6 *sin6_addr = (struct sockaddr_in6 *)s_addr;
+
+ ret = in6_pton(buf, buflen,
+ (u8 *)&sin6_addr->sin6_addr.s6_addr,
+ -1, &delim);
+ if (!ret)
+ goto fail;
+
+ sin6_addr->sin6_family = AF_INET6;
+ return 0;
+ }
+fail:
+ return -EINVAL;
+}
+
+/**
+ * rq_handler() - receive packet callback routine.
+ * @rq_count: Rx packet count.
+ * @rq_context: context info.
+ */
+void rq_handler(u32 rq_count, void *rq_context)
+{
+ int i, qp_num, offset;
+ struct ernic_bwtest_struct *rq_buf;
+ struct xrnic_rdma_cm_id *cm_id;
+ struct perftest_wr *sq_wr;
+ struct mr *mem;
+ struct perftest_ctx *ctx;
+
+ ctx = (struct perftest_ctx *)rq_context;
+ cm_id = ctx->cm_id;
+ qp_num = cm_id->child_qp_num;
+ offset = sq_cmpl_db * XRNIC_SEND_SGL_SIZE;
+ for (i = 0; i < rq_count; i++) {
+ if (qp_num == 1) {
+ rq_buf = (struct ernic_bwtest_struct *)(char *)
+ cm_id->qp_info.rq_buf_ba_ca +
+ ((qp_num - 1) * rq_ci_db *
+ XRNIC_RECV_PKT_SIZE);
+ if (io_mr_idx > PERFTEST_IO_QPS)
+ goto done;
+ mem = perftest_io_mr[io_mr_idx];
+
+ rq_buf->rkey = htonl((unsigned int)mem->rkey);
+ rq_buf->vaddr = cpu_to_be64(mem->vaddr);
+
+ memcpy((u8 *)(trinfo.send_sgl + offset),
+ (u8 *)rq_buf,
+ sizeof(struct ernic_bwtest_struct));
+
+ sq_wr = (struct perftest_wr *)trinfo.sq_ba +
+ sq_cmpl_db;
+ sq_wr->ctx.wr_id = sq_cmpl_db;
+ sq_wr->length = sizeof(struct ernic_bwtest_struct);
+ sq_wr->remote_tag = ntohl(0xDEAD);
+ sq_wr->local_offset[0] = trinfo.send_sgl_phys + offset;
+ sq_wr->local_offset[1] = 0;
+
+ sq_wr->remote_offset[0] = 0x12345678;
+ sq_wr->remote_offset[1] = 0xABCDABCD;
+ sq_wr->completion_info[0] = htonl(0x11111111);
+ sq_wr->completion_info[1] = htonl(0x22222222);
+ sq_wr->completion_info[2] = htonl(0x33333333);
+ sq_wr->completion_info[3] = htonl(0x44444444);
+ sq_wr->opcode = XRNIC_SEND_ONLY;
+ }
+ xrnic_post_recv(&cm_id->qp_info, 1);
+ if (qp_num == 1) {
+ xrnic_post_send(&cm_id->qp_info, 1);
+ if (prev_qpn != rq_buf->qp_number) {
+ if (prev_qpn != 0)
+ io_mr_idx++;
+ prev_qpn = rq_buf->qp_number;
+ }
+ }
+
+done:
+ rq_ci_db++;
+
+ if (rq_ci_db >= (PERFTEST_RQ_DEPTH - 20))
+ rq_ci_db = 0;
+ if (qp_num == 1) {
+ sq_cmpl_db++;
+ if (sq_cmpl_db >= PERFTEST_SQ_DEPTH)
+ sq_cmpl_db = 0;
+ }
+ }
+}
+
+/**
+ * sq_handler() - completion call back.
+ * @sq_count: Tx packet count.
+ * @sq_context: context info.
+ */
+void sq_handler(u32 sq_count, void *sq_context)
+{
+/* TODO: This function is just a place holder for now.
+ * This function should handle completions for outgoing
+ * RDMA_SEND, RDMA_READ and RDMA_WRITE.
+ */
+ pr_info("XLNX[%d:%s]\n", __LINE__, __func__);
+}
+
+/**
+ * perftest_fill_wr() - Fills the workrequest in send queue base address.
+ * @sq_ba: send queue base address of the QP.
+ */
+void perftest_fill_wr(void __iomem *sq_ba)
+{
+ struct perftest_wr *sq_wr;
+ int i;
+
+ for (i = 0; i < XRNIC_SQ_DEPTH; i++) {
+ sq_wr = (struct perftest_wr *)sq_ba + i;
+ sq_wr->ctx.wr_id = i;
+ sq_wr->length = 16;
+ sq_wr->completion_info[0] = 0xAAAAAAAA;
+ sq_wr->completion_info[1] = 0xBBBBBBBB;
+ sq_wr->completion_info[2] = 0xCCCCCCCC;
+ sq_wr->completion_info[3] = 0xDDDDDDDD;
+ sq_wr->opcode = XRNIC_SEND_ONLY;
+ }
+}
+
+/**
+ * perftest_cm_handler() - CM handler call back routine.
+ * @cm_id: CM ID on which event received.
+ * @conn_event: Event information on the CM.
+ * @return: 0 on success or error code on failure.
+ */
+static int perftest_cm_handler(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_cm_event_info *conn_event)
+{
+ int qp_num, per_qp_size;
+ struct perftest_ctx *ctx;
+
+ qp_num = cm_id->child_qp_num;
+ memset(&qp_attr, 0, sizeof(struct xrnic_qp_init_attr));
+ ctx = &perf_context[qp_num - 1];
+ switch (conn_event->cm_event) {
+ case XRNIC_REQ_RCVD:
+ qp_attr.xrnic_rq_event_handler = rq_handler;
+ qp_attr.xrnic_sq_event_handler = sq_handler;
+ qp_attr.qp_type = XRNIC_QPT_RC;
+ if (qp_num > 1) {
+ qp_attr.recv_pkt_size = _1MB_BUF_SIZ;
+ per_qp_size = (qp_num - 2) * _1MB_BUF_SIZ *
+ PERFTEST_RQ_DEPTH + XRNIC_RECV_PKT_SIZE *
+ PERFTEST_RQ_DEPTH;
+ } else {
+ qp_attr.recv_pkt_size = XRNIC_RECV_PKT_SIZE;
+ per_qp_size = 0;
+ }
+ qp_attr.rq_buf_ba_ca_phys = trinfo.rq_buf_ba_phys +
+ per_qp_size;
+ qp_attr.rq_buf_ba_ca = (char *)trinfo.rq_buf_ba +
+ per_qp_size;
+ per_qp_size = (qp_num - 1) * sizeof(struct perftest_wr) *
+ PERFTEST_SQ_DEPTH;
+ qp_attr.sq_ba_phys = trinfo.sq_ba_phys + per_qp_size;
+ qp_attr.sq_ba = (char *)trinfo.sq_ba + per_qp_size;
+ per_qp_size = (qp_num - 1) * (PERFTEST_SQ_DEPTH * 4);
+ qp_attr.cq_ba_phys = trinfo.cq_ba_phys + per_qp_size;
+ qp_attr.cq_ba = (char *)trinfo.cq_ba + per_qp_size;
+ qp_attr.rq_context = ctx;
+ qp_attr.sq_context = ctx;
+ ctx->cm_id = cm_id;
+ qp_attr.sq_depth = PERFTEST_SQ_DEPTH;
+ qp_attr.rq_depth = PERFTEST_RQ_DEPTH;
+ ctx->reg_mr = reg_phys_mr(pd, phys_mem[qp_num - 1],
+ PERFTEST_DEFAULT_MEM_SIZE,
+ MR_ACCESS_RDWR, NULL);
+ if (qp_num > 1)
+ perftest_io_mr[qp_num - 2] = ctx->reg_mr;
+
+ xrnic_rdma_create_qp(cm_id, ctx->reg_mr->pd,
+ &qp_attr);
+
+ memset(&conn_param, 0, sizeof(conn_param));
+ conn_param.initiator_depth = 16;
+ conn_param.responder_resources = 16;
+ xrnic_rdma_accept(cm_id, &conn_param);
+ break;
+ case XRNIC_ESTABLISHD:
+ if (cm_id->child_qp_num > 1) {
+ perftest_fill_wr((char *)trinfo.sq_ba +
+ ((qp_num - 1) *
+ sizeof(struct perftest_wr) *
+ PERFTEST_SQ_DEPTH));
+ xrnic_hw_hs_reset_sq_cq(&cm_id->qp_info, NULL);
+ }
+ break;
+ case XRNIC_DREQ_RCVD:
+ xrnic_destroy_qp(&cm_id->qp_info);
+ xrnic_rdma_disconnect(cm_id);
+ xrnic_rdma_destroy_id(cm_id, 0);
+ dereg_mr(ctx->reg_mr);
+ io_mr_idx = 0;
+ prev_qpn = 0;
+ rq_ci_db = 0;
+ sq_cmpl_db = 0;
+ break;
+ default:
+ pr_info("Unhandled CM Event: %d\n",
+ conn_event->cm_event);
+ }
+ return 0;
+}
+
+/**
+ * perftest_init() - Perf test init function.
+ * @return: 0 on success or error code on failure.
+ */
+static int __init perftest_init(void)
+{
+ int ret, i;
+ struct sockaddr_storage s_addr;
+ struct sockaddr_in *sin_addr;
+ struct sockaddr_in6 *sin6_addr;
+
+ if (strcmp(server_ip, "0.0.0.0") == 0) {
+ pr_err("server ip module parameter not provided\n");
+ return -EINVAL;
+ }
+
+ /* If port number is not set, then it should point to the default */
+ if (-1 == port) {
+ port = PERFTEST_PORT;
+ pr_info("Using app default port number: %d\n", port);
+ } else if (port < 0) {
+ /* Any other -ve value */
+ /* Some ports are reserved and few other may be use,
+ * we could add check here to validate given port number
+ * is free to use or not
+ */
+ pr_err("port number should not be a negative value\n");
+ return -EINVAL;
+ }
+ pr_info("Using port number %d\n", port);
+
+ cm_id = xrnic_rdma_create_id(perftest_cm_handler, NULL, XRNIC_PS_TCP,
+ XRNIC_QPT_UC, PERFTEST_MAX_QPS);
+ if (!cm_id)
+ goto err;
+
+ if (perftest_parse_addr(&s_addr, server_ip))
+ goto err;
+
+ if (s_addr.ss_family == AF_INET) {
+ sin_addr = (struct sockaddr_in *)&s_addr;
+ ret = xrnic_rdma_bind_addr(cm_id,
+ (u8 *)&sin_addr->sin_addr.s_addr,
+ port, AF_INET);
+ if (ret < 0) {
+ pr_err("RDMA BIND Failed for IPv4\n");
+ goto err;
+ }
+ }
+ if (s_addr.ss_family == AF_INET6) {
+ sin6_addr = (struct sockaddr_in6 *)&s_addr;
+ ret = xrnic_rdma_bind_addr(cm_id,
+ (u8 *)&sin6_addr->sin6_addr.s6_addr,
+ port, AF_INET6);
+ if (ret < 0) {
+ pr_err("RDMA BIND Failed for IPv6\n");
+ goto err;
+ }
+ }
+
+ if (xrnic_rdma_listen(cm_id, 1) != XRNIC_SUCCESS)
+ goto err;
+
+ trinfo.rq_buf_ba_phys = alloc_mem(NULL, PERF_TEST_RQ_BUF_SIZ);
+ if (-ENOMEM == trinfo.rq_buf_ba_phys)
+ goto err;
+ trinfo.rq_buf_ba =
+ (void __iomem *)(uintptr_t)get_virt_addr
+ (trinfo.rq_buf_ba_phys);
+
+ trinfo.send_sgl_phys = alloc_mem(NULL, 0x400000);
+ if (-ENOMEM == trinfo.send_sgl_phys)
+ goto err;
+ trinfo.send_sgl =
+ (void __iomem *)(uintptr_t)get_virt_addr(trinfo.send_sgl_phys);
+
+ trinfo.sq_ba_phys = alloc_mem(NULL, 0x100000);
+ if (-ENOMEM == trinfo.sq_ba_phys)
+ goto err;
+ trinfo.sq_ba =
+ (void __iomem *)(uintptr_t)get_virt_addr(trinfo.sq_ba_phys);
+
+ trinfo.cq_ba_phys = alloc_mem(NULL, 0x40000);
+ if (-ENOMEM == trinfo.cq_ba_phys)
+ goto err;
+ trinfo.cq_ba =
+ (void __iomem *)(uintptr_t)get_virt_addr(trinfo.cq_ba_phys);
+ trinfo.rq_wptr_db_phys = alloc_mem(NULL, 8);
+ trinfo.sq_cmpl_db_phys = alloc_mem(NULL, 8);
+ pd = alloc_pd();
+ for (i = 0; i < PERFTEST_MAX_QPS; i++) {
+ phys_mem[i] = alloc_mem(pd, PERFTEST_DEFAULT_MEM_SIZE);
+ if (IS_ERR_VALUE(phys_mem[i])) {
+ pr_err("PERFTEST[%d:%s] Mem registration failed: %lld\n",
+ __LINE__, __func__, phys_mem[i]);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+/* free_mem() works on only valid physical address returned from alloc_mem(),
+ * and ignores if NULL or invalid address is passed.
+ * So, even if any of the above allocations fail in the middle,
+ * we can safely call free_mem() on all addresses.
+ *
+ * we are using carve-out memory for the requirements of ERNIC.
+ * so, we cannot use devm_kzalloc() as kernel cannot see these
+ * memories until ioremapped.
+ */
+ free_mem(trinfo.rq_buf_ba_phys);
+ free_mem(trinfo.send_sgl_phys);
+ free_mem(trinfo.sq_ba_phys);
+ free_mem(trinfo.cq_ba_phys);
+ free_mem(trinfo.rq_wptr_db_phys);
+ free_mem(trinfo.sq_cmpl_db_phys);
+ for (i = 0; i < PERFTEST_MAX_QPS; i++)
+ free_mem(phys_mem[i]);
+
+ dealloc_pd(pd);
+
+ return -EINVAL;
+}
+
+/**
+ * perftest_exit() - perftest module exit function.
+ */
+static void __exit perftest_exit(void)
+{
+ int i;
+
+ free_mem(trinfo.rq_buf_ba_phys);
+ free_mem(trinfo.send_sgl_phys);
+ free_mem(trinfo.sq_ba_phys);
+ free_mem(trinfo.cq_ba_phys);
+ free_mem(trinfo.rq_wptr_db_phys);
+ free_mem(trinfo.sq_cmpl_db_phys);
+ for (i = 0; i < PERFTEST_MAX_QPS; i++)
+ free_mem(phys_mem[i]);
+
+ dealloc_pd(pd);
+}
+
+/* This driver is an example driver, which uses the APIs exported in
+ * ernic driver, to demonstrate the RDMA communication between peers
+ * on the infiniband network. The remote peer can be any RDMA enbled NIC.
+ * There is no real device for this driver and so, compatibility string and
+ * probe function are not needed for this driver.
+ */
+module_init(perftest_init);
+module_exit(perftest_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Perftest Example driver");
+MODULE_AUTHOR("SDHANVAD");
diff --git a/drivers/staging/xlnx_ernic/xhw_config.h b/drivers/staging/xlnx_ernic/xhw_config.h
new file mode 100644
index 000000000000..7846abd18bec
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xhw_config.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef _XRNIC_HW_CONFIG_H
+#define _XRNIC_HW_CONFIG_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/types.h>
+
+#define XRNIC_HW_MAX_QP_ENABLE 30
+#define XRNIC_HW_MAX_QP_SUPPORT 28
+#define XRNIC_HW_FLOW_CONTROL_VALUE 0
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _XRNIC_HW_CONFIG_H*/
diff --git a/drivers/staging/xlnx_ernic/xhw_def.h b/drivers/staging/xlnx_ernic/xhw_def.h
new file mode 100644
index 000000000000..c59f266c03f6
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xhw_def.h
@@ -0,0 +1,641 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef _XRNIC_HW_DEF_H
+#define _XRNIC_HW_DEF_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/types.h>
+#include "xhw_config.h"
+
+#define XRNIC_MAX_QP_ENABLE XRNIC_HW_MAX_QP_ENABLE
+#define XRNIC_MAX_QP_SUPPORT XRNIC_HW_MAX_QP_SUPPORT
+#define XRNIC_MAX_PORT_SUPPORT 0xFFFE
+#define XRNIC_REG_WIDTH 32
+#define XRNIC_QPS_ENABLED XRNIC_MAX_QP_ENABLE
+#define XRNIC_QP1_SEND_PKT_SIZE 512
+#define XRNIC_FLOW_CONTROL_VALUE XRNIC_HW_FLOW_CONTROL_VALUE
+#define XRNIC_CONFIG_XRNIC_EN 0x1
+#define XRNIC_UDP_SRC_PORT 0x12B7
+#define XRNIC_CONFIG_IP_VERSION (0x1 << 1)
+#define XRNIC_CONFIG_DEPKT_BYPASS_EN (0x1 << 2)
+#define XRNIC_CONFIG_ERR_BUF_EN (0x1 << 5)
+#define XRNIC_CONFIG_FLOW_CONTROL_EN (XRNIC_FLOW_CONTROL_VALUE << 6)
+#define XRNIC_CONFIG_NUM_QPS_ENABLED (XRNIC_QPS_ENABLED << 8)
+#define XRNIC_CONFIG_UDP_SRC_PORT (XRNIC_UDP_SRC_PORT << 16)
+
+#define XRNIC_RQ_CQ_INTR_STS_REG_SUPPORTED 1
+
+/* Clear the the interrupt writing that bit to interrupt status register.*/
+#define RDMA_READ 4
+#define RDMA_SEND 2
+#define RDMA_WRITE 0
+
+#define XRNIC_QP_TIMEOUT_RETRY_CNT 0x3 /*0x3*/
+#define XRNIC_QP_TIMEOUT_RNR_NAK_TVAL 0x1F /*MAX*/
+#define XRNIC_QP_TIMEOUT_CONFIG_TIMEOUT 0x1F /*MAX 0x1f*/
+#define XRNIC_QP_TIMEOUT_CONFIG_RETRY_CNT \
+ (XRNIC_QP_TIMEOUT_RETRY_CNT << 8)
+#define XRNIC_QP_TIMEOUT_CONFIG_RNR_RETRY_CNT \
+ (XRNIC_QP_TIMEOUT_RETRY_CNT << 11)
+#define XRNIC_QP_TIMEOUT_CONFIG_RNR_NAK_TVAL \
+ (XRNIC_QP_TIMEOUT_RNR_NAK_TVAL << 16)
+
+#define XRNIC_QP_PMTU 0x4
+#define XRNIC_QP_MAX_RD_OS 0xFF
+#define XRNIC_QP_RQ_BUFF_SZ 0x2
+#define XRNIC_QP1_RQ_BUFF_SZ 0x02
+#define XRNIC_QP_CONFIG_QP_ENABLE 0x1
+#define XRNIC_QP_CONFIG_ACK_COALSE_EN BIT(1)
+#define XRNIC_QP_CONFIG_RQ_INTR_EN BIT(2)
+#define XRNIC_QP_CONFIG_CQE_INTR_EN BIT(3)
+#define XRNIC_QP_CONFIG_HW_HNDSHK_DIS BIT(4)
+#define XRNIC_QP_CONFIG_CQE_WRITE_EN BIT(5)
+#define XRNIC_QP_CONFIG_UNDER_RECOVERY BIT(6)
+#define XRNIC_QP_CONFIG_IPV6_EN BIT(7)
+#define XRNIC_QP_CONFIG_PMTU (0x4 << 8)
+#define XRNIC_QP_CONFIG_PMTU_256 (0x0 << 8)
+#define XRNIC_QP_CONFIG_PMTU_512 (0x1 << 8)
+#define XRNIC_QP_CONFIG_PMTU_1024 (0x2 << 8)
+#define XRNIC_QP_CONFIG_PMTU_2048 (0x3 << 8)
+#define XRNIC_QP_CONFIG_PMTU_4096 (0x4 << 8)
+#define XRNIC_QP_RQ_BUF_SIZ_DIV (256)
+#define XRNIC_QP_RQ_BUF_CFG_REG_BIT_OFS (16)
+#define XRNIC_QP_CONFIG_RQ_BUFF_SZ(x) (((x) / XRNIC_QP_RQ_BUF_SIZ_DIV)\
+ << XRNIC_QP_RQ_BUF_CFG_REG_BIT_OFS)
+#define XRNIC_QP1_CONFIG_RQ_BUFF_SZ (XRNIC_QP1_RQ_BUFF_SZ << 16)
+
+#define XRNIC_QP_PARTITION_KEY 0xFFFF
+#define XRNIC_QP_TIME_TO_LIVE 0x40
+
+#define XRNIC_QP_ADV_CONFIG_TRAFFIC_CLASS 0x3F
+#define XRNIC_QP_ADV_CONFIG_TIME_TO_LIVE (XRNIC_QP_TIME_TO_LIVE << 8)
+#define XRNIC_QP_ADV_CONFIG_PARTITION_KEY (XRNIC_QP_PARTITION_KEY << 16)
+
+#define XRNIC_REJ_RESEND_COUNT 3
+#define XRNIC_REP_RESEND_COUNT 3
+#define XRNIC_DREQ_RESEND_COUNT 3
+
+#define XNVEMEOF_RNIC_IF_RHOST_BASE_ADDRESS 0x8c000000
+#define XRNIC_CONFIG_ENABLE 1
+#define XRNIC_RESERVED_SPACE 0x4000
+#define XRNIC_NUM_OF_TX_HDR 128
+#define XRNIC_SIZE_OF_TX_HDR 128
+#define XRNIC_NUM_OF_TX_SGL 256
+#define XRNIC_SIZE_OF_TX_SGL 64
+#define XRNIC_NUM_OF_BYPASS_BUF 32
+#define XRNIC_SIZE_OF_BYPASS_BUF 512
+#define XRNIC_NUM_OF_ERROR_BUF 64
+#define XRNIC_SIZE_OF_ERROR_BUF 256
+#define XRNIC_OUT_ERRST_Q_NUM_ENTRIES 0x40
+#define XRNIC_OUT_ERRST_Q_WRPTR 0x0
+#define XRNIC_IN_ERRST_Q_NUM_ENTRIES 0x40
+#define XRNIC_IN_ERRST_Q_WRPTR 0x0
+#define XRNIC_NUM_OF_DATA_BUF 4096
+#define XRNIC_SIZE_OF_DATA_BUF 4096
+#define XRNIC_NUM_OF_RESP_ERR_BUF 64
+#define XRNIC_SIZE_OF_RESP_ERR_BUF 256
+#define XRNIC_MAD_HEADER 24
+#define XRNIC_MAD_DATA 232
+#define XRNIC_RECV_PKT_SIZE 512
+#define XRNIC_SEND_PKT_SIZE 64
+#define XRNIC_SEND_SGL_SIZE 4096
+#define XRNIC_MAX_SEND_SGL_SIZE 4096
+#define XRNIC_MAX_SEND_PKT_SIZE 4096
+#define XRNIC_MAX_RECV_PKT_SIZE 4096
+#define XRNIC_MAX_SQ_DEPTH 256
+#define XRNIC_MAX_RQ_DEPTH 256
+#define XRNIC_SQ_DEPTH 128
+#define XRNIC_RQ_DEPTH 64
+#define XRNIC_RQ_WRPTR_DBL 0xBC004000
+#define XRNIC_BYPASS_BUF_WRPTR 0xBC00C000
+#define XRNIC_ERROR_BUF_WRPTR 0xBC010000
+
+#define PKT_VALID_ERR_INTR_EN 0x1
+#define MAD_PKT_RCVD_INTR_EN (0x1 << 1)
+#define BYPASS_PKT_RCVD_INTR_EN (0x1 << 2)
+#define RNR_NACK_GEN_INTR_EN (0x1 << 3)
+#define WQE_COMPLETED_INTR_EN (0x1 << 4)
+#define ILL_OPC_SENDQ_INTR_EN (0x1 << 5)
+#define QP_PKT_RCVD_INTR_EN (0x1 << 6)
+#define FATAL_ERR_INTR_EN (0x1 << 7)
+#define ERNIC_MEM_REGISTER
+
+#define XRNIC_INTR_ENABLE_DEFAULT 0x000000FF
+#define XRNIC_VALID_INTR_ENABLE 0
+
+/* XRNIC Controller global configuration registers */
+
+struct xrnic_conf {
+ __u32 xrnic_en:1;
+ __u32 ip_version:1; //IPv6 or IPv4
+ __u32 depkt_bypass_en:1;
+ __u32 reserved:5;
+ __u32 num_qps_enabled:8;
+ __u32 udp_src_port:16;
+} __packed;
+
+struct tx_hdr_buf_sz {
+ __u32 num_hdrs:16;
+ __u32 buffer_sz:16; //in bytes
+} __packed;
+
+struct tx_sgl_buf_sz {
+ __u32 num_sgls:16;
+ __u32 buffer_sz:16; //in bytes
+} __packed;
+
+struct bypass_buf_sz {
+ __u32 num_bufs:16;
+ __u32 buffer_sz:16;
+} __packed;
+
+struct err_pkt_buf_sz {
+ __u32 num_bufs:16;
+ __u32 buffer_sz:16;
+} __packed;
+
+struct timeout_conf {
+ __u32 timeout:5;
+ __u32 reserved:3;
+ __u32 retry_cnt:3;
+ __u32 retry_cnt_rnr:3;
+ __u32 reserved1:2;
+ __u32 rnr_nak_tval:5;
+ __u32 reserved2:11;
+
+} __packed;
+
+struct out_errsts_q_sz {
+ __u32 num_entries:16;
+ __u32 reserved:16;
+} __packed;
+
+struct in_errsts_q_sz {
+ __u32 num_entries:16;
+ __u32 reserved:16;
+} __packed;
+
+struct inc_sr_pkt_cnt {
+ __u32 inc_send_cnt:16;
+ __u32 inc_rresp_cnt:16;
+} __packed;
+
+struct inc_am_pkt_cnt {
+ __u32 inc_acknack_cnt:16;
+ __u32 inc_mad_cnt:16;
+} __packed;
+
+struct out_io_pkt_cnt {
+ __u32 inc_send_cnt:16;
+ __u32 inc_rw_cnt:16;
+} __packed;
+
+struct out_am_pkt_cnt {
+ __u32 inc_acknack_cnt:16;
+ __u32 inc_mad_cnt:16;
+} __packed;
+
+struct last_in_pkt {
+ __u32 opcode:8;
+ __u32 qpid:8;
+ __u32 psn_lsb:16;
+} __packed;
+
+struct last_out_pkt {
+ __u32 opcode:8;
+ __u32 qpid:8;
+ __u32 psn_lsb:16;
+} __packed;
+
+/*Interrupt register definition.*/
+struct intr_en {
+ __u32 pkt_valdn_err_intr_en:1;
+ __u32 mad_pkt_rcvd_intr_en:1;
+ __u32 bypass_pkt_rcvd_intr_en:1;
+ __u32 rnr_nack_gen_intr_en:1;
+ __u32 wqe_completed_i:1;
+ __u32 ill_opc_in_sq_intr_en:1;
+ __u32 qp_pkt_rcvd_intr_en:1;
+ __u32 fatal_err_intr_en:1;
+ __u32 reverved:24;
+} __packed;
+
+struct data_buf_sz {
+ __u16 num_bufs;
+ __u16 buffer_sz;
+};
+
+struct resp_err_buf_sz {
+ __u16 num_bufs;
+ __u16 buffer_sz;
+};
+
+/*Global register configuration*/
+struct xrnic_ctrl_config {
+ struct xrnic_conf xrnic_conf;
+ __u32 xrnic_adv_conf;
+ __u32 reserved1[2];
+ __u32 mac_xrnic_src_addr_lsb;
+ __u32 mac_xrnic_src_addr_msb;
+ __u32 reserved2[2];
+ __u32 ip_xrnic_addr1; //0x0020
+ __u32 ip_xrnic_addr2; //0x0024
+ __u32 ip_xrnic_addr3; //0x0028
+ __u32 ip_xrnic_addr4; //0x002C
+ __u32 tx_hdr_buf_ba; //0x0030
+ __u32 reserved_0x34; //0x0034
+ struct tx_hdr_buf_sz tx_hdr_buf_sz; //0x0038
+ __u32 reserved_0x3c;
+
+ __u32 tx_sgl_buf_ba; //0x0040
+ __u32 reserved_0x44; //0x0044
+ struct tx_sgl_buf_sz tx_sgl_buf_sz; //0x0048
+ __u32 reserved_0x4c;
+
+ __u32 bypass_buf_ba; //0x0050
+ __u32 reserved_0x54; //0x0054
+ struct bypass_buf_sz bypass_buf_sz; //0x0058
+ __u32 bypass_buf_wrptr; //0x005C
+ __u32 err_pkt_buf_ba; //0x0060
+ __u32 reserved_0x64; //0x0064
+ struct err_pkt_buf_sz err_pkt_buf_sz; //0x0068
+ __u32 err_buf_wrptr; //0x006C
+ __u32 ipv4_address; //0x0070
+ __u32 reserved_0x74;
+
+ __u32 out_errsts_q_ba; //0x0078
+ __u32 reserved_0x7c;
+ struct out_errsts_q_sz out_errsts_q_sz; //0x0080
+ __u32 out_errsts_q_wrptr; //0x0084
+
+ __u32 in_errsts_q_ba; //0x0088
+ __u32 reserved_0x8c;
+ struct in_errsts_q_sz in_errsts_q_sz; //0x0090
+ __u32 in_errsts_q_wrptr; //0x0094
+
+ __u32 reserved_0x98; //0x0098
+ __u32 reserved_0x9c; //0x009C
+
+ __u32 data_buf_ba; //0x00A0
+ __u32 reserved_0xa4; //0x00A4
+ struct data_buf_sz data_buf_sz; //0x00A8
+
+ __u32 cnct_io_conf; //0x00AC
+
+ __u32 resp_err_pkt_buf_ba; //0x00B0
+ __u32 reserved_0xb4; //0x00B4
+ struct resp_err_buf_sz resp_err_buf_sz; //0x00B8
+
+ __u32 reserved3[17]; //0x0095
+
+ struct inc_sr_pkt_cnt inc_sr_pkt_cnt;//0x0100
+ struct inc_am_pkt_cnt inc_am_pkt_cnt;//0x0104
+ struct out_io_pkt_cnt out_io_pkt_cnt;//0x108
+ struct out_am_pkt_cnt out_am_pkt_cnt;//0x010c
+ struct last_in_pkt last_in_pkt; //0x0110
+ struct last_out_pkt last_out_pkt; //0x0114
+
+ __u32 inv_dup_pkt_cnt; //0x0118 incoming invalid duplicate
+
+ __u32 rnr_in_pkt_sts; //0x011C
+ __u32 rnr_out_pkt_sts; //0x0120
+
+ __u32 wqe_proc_sts; //0x0124
+
+ __u32 pkt_hdr_vld_sts; //0x0128
+ __u32 qp_mgr_sts; //0x012C
+
+ __u32 incoming_all_drop_count; //0x130
+ __u32 incoming_nack_pkt_count; //0x134
+ __u32 outgoing_nack_pkt_count; //0x138
+ __u32 resp_handler_status; //0x13C
+
+ __u32 reserved4[16];
+
+ struct intr_en intr_en; //0x0180
+ __u32 intr_sts; //0x0184
+ __u32 reserved5[2];
+ __u32 rq_intr_sts_1; //0x0190
+ __u32 rq_intr_sts_2; //0x0194
+ __u32 rq_intr_sts_3; //0x0198
+ __u32 rq_intr_sts_4; //0x019C
+ __u32 rq_intr_sts_5; //0x01A0
+ __u32 rq_intr_sts_6; //0x01A4
+ __u32 rq_intr_sts_7; //0x01A8
+ __u32 rq_intr_sts_8; //0x01AC
+
+ __u32 cq_intr_sts_1; //0x01B0
+ __u32 cq_intr_sts_2; //0x01B4
+ __u32 cq_intr_sts_3; //0x01B8
+ __u32 cq_intr_sts_4; //0x01BC
+ __u32 cq_intr_sts_5; //0x01B0
+ __u32 cq_intr_sts_6; //0x01B4
+ __u32 cq_intr_sts_7; //0x01B8
+ __u32 cq_intr_sts_8; //0x01BC
+
+ __u32 reserved6[12];
+};
+
+struct qp_conf {
+ __u32 qp_enable:1;
+ __u32 ack_coalsc_en:1;
+ __u32 rq_intr_en:1;
+ __u32 cq_intr_en:1;
+ __u32 hw_hndshk_dis:1;
+ __u32 cqe_write_en:1;
+ __u32 qp_under_recovery:1;
+ __u32 ip_version:1;
+ __u32 pmtu :3;
+ __u32 reserved2:5;
+ __u32 rq_buf_sz:16; //RQ buffer size (in multiples of 256B)
+} __packed;
+
+struct qp_adv_conf {
+ __u32 traffic_class:6;
+ __u32 reserved1 :2;
+ __u32 time_to_live:8;
+ __u32 partition_key:16;
+} __packed;
+
+struct time_out {
+ __u32 timeout:5;
+ __u32 reserved1:3;
+ __u32 retry_cnt:3;
+ __u32 reserved2:5;
+ __u32 rnr_nak_tval:5;
+ __u32 reserved3:3;
+ __u32 curr_retry_cnt:3;
+ __u32 reserved4:2;
+ __u32 curr_rnr_nack_cnt:3;
+ __u32 reserved:1;
+} __packed;
+
+struct qp_status {
+ __u32 qp_fatal:1;
+ __u32 rq_ovfl:1;
+ __u32 sq_full:1;
+ __u32 osq_full:1;
+ __u32 cq_full:1;
+ __u32 reserved1:4;
+ __u32 sq_empty:1;
+ __u32 osq_empty:1;
+ __u32 qp_retried:1;
+ __u32 reserved2:4;
+ __u32 nak_syndr_rcvd:7;
+ __u32 reserved3:1;
+ __u32 curr_retry_cnt:3;
+ __u32 reserved4:1;
+ __u32 curr_rnr_nack_cnt:3;
+ __u32 reserved5:1;
+} __packed;
+
+//This structure is applicable to the rdma queue pair other than QP1.
+struct rq_buf_ba_ca {
+ __u32 reserved:8; //0x308
+ __u32 rq_buf_ba:24;
+} __packed;
+
+struct sq_ba {
+ __u32 reserved1:5; //0x310
+ __u32 sq_ba:27;
+} __packed;
+
+struct cq_ba {
+ __u32 reserved2:5; //0x318
+ __u32 cq_ba:27;
+} __packed;
+
+struct cq_head {
+ __u32 cq_head:16; //0x330
+ __u32 reserved5:16;
+} __packed;
+
+struct rq_ci_db {
+ __u32 rq_ci_db:16; //0x334
+ __u32 reserved6:16;
+} __packed;
+
+struct sq_pi_db {
+ __u32 sq_pi_db:16; //0x338
+ __u32 reserved7:16;
+} __packed;
+
+struct q_depth {
+ __u32 sq_depth:16; //0x33c
+ __u32 cq_depth:16;
+} __packed;
+
+struct sq_psn {
+ __u32 sq_psn:24; //0x340
+ __u32 reserved8:8;
+} __packed;
+
+struct last_rq_req {
+ __u32 rq_psn:24; //0x344
+ __u32 rq_opcode:8;
+} __packed;
+
+struct dest_qp_conf {
+ __u32 dest_qpid:24; //0x348
+ __u32 reserved9:8;
+} __packed;
+
+struct stat_ssn {
+ __u32 exp_ssn:24; //0x380
+ __u32 reserved10:8;
+} __packed;
+
+struct stat_msn {
+ __u32 curr_msn:24; //0x384
+ __u32 reserved11:8;
+
+} __packed;
+
+struct stat_curr_sqptr_pro {
+ __u32 curr_sqptr_proc:16;
+ __u32 reserved12:16;
+} __packed;
+
+struct stat_resp_psn {
+ __u32 exp_resp_psn:24;
+ __u32 reserved:8;
+} __packed;
+
+struct stat_rq_buf_ca {
+ __u32 reserved:8;
+ __u32 rq_buf_ca:24;
+} __packed;
+
+/*QP1 is special attribue for all the management packets as per ROCEv2 spec */
+struct rdma_qp1_attr {
+ struct qp_conf qp_conf; //0x200
+ struct qp_adv_conf qp_adv_conf; //0x204
+ struct rq_buf_ba_ca rq_buf_ba_ca; //0x208
+ __u32 reserved_0x20c; //0x20c
+ struct sq_ba sq_ba; //0x210
+ __u32 reserved_0x214; //0x214
+ struct cq_ba cq_ba; //0x218
+ __u32 reserved_0x21c; //0x2c0
+ __u32 rq_wrptr_db_add; //0x220
+ __u32 reserved_0x224; //0x224
+ __u32 sq_cmpl_db_add; //0x228
+ __u32 reserved_0x22c; //0x22c
+ struct cq_head cq_head; //0x230
+ struct rq_ci_db rq_ci_db; //0x234
+ struct sq_pi_db sq_pi_db; //0x238
+ struct q_depth q_depth; //0x23c
+ __u32 reserved1[2]; //0x240
+ struct dest_qp_conf dest_qp_conf; //0x248
+ struct timeout_conf timeout_conf; //0x24C
+ __u32 mac_dest_addr_lsb; //0x250
+ __u32 mac_dest_addr_msb; //0x254
+ __u32 reserved2[2];
+ __u32 ip_dest_addr1; //0x260
+ __u32 ip_dest_addr2; //0x264
+ __u32 ip_dest_addr3; //0x268
+ __u32 ip_dest_addr4; //0x26C
+ __u32 reserved3[6]; //0x270-287(inclusive)
+ struct qp_status qp_status; //0x288
+ __u32 reserved4[2]; //0x240-287(inclusive)
+ struct stat_rq_buf_ca stat_rq_buf_ca;//0x294
+ __u32 reserved5[26]; //0x298-2Ff(inclusive)
+};
+
+/* General RDMA QP attribute*/
+struct rdma_qp_attr {
+ struct qp_conf qp_conf; //0x300
+ struct qp_adv_conf qp_adv_conf; //0x304
+ struct rq_buf_ba_ca rq_buf_ba_ca;//0x308
+ __u32 reserved_0x30c; //0x30c
+ struct sq_ba sq_ba; //0x310
+ __u32 reserved_0x314; //0x214
+ struct cq_ba cq_ba; //0x318
+ __u32 reserved_0x31c; //0x31c
+ __u32 rq_wrptr_db_add; //0x320
+ __u32 reserved_0x324; //0x324
+ __u32 sq_cmpl_db_add; //0x328
+ __u32 reserved_0x32c; //0x22c
+ struct cq_head cq_head; //0x330
+ struct rq_ci_db rq_ci_db;//0x334
+ struct sq_pi_db sq_pi_db; //0x338
+ struct q_depth q_depth;//0x33c
+ struct sq_psn sq_psn; //0x340
+ struct last_rq_req last_rq_req;//0x344
+ struct dest_qp_conf dest_qp_conf; //0x348
+ struct timeout_conf timeout_conf; //0x34C
+ __u32 mac_dest_addr_lsb; //0x350
+ __u32 mac_dest_addr_msb; //0x354
+ __u32 reserved1[2]; //0x358
+ __u32 ip_dest_addr1; //0x360
+ __u32 ip_dest_addr2; //0x364
+ __u32 ip_dest_addr3; //0x368
+ __u32 ip_dest_addr4; //0x36C
+ __u32 reserved2[4];
+ struct stat_ssn stat_ssn;//0x380
+ struct stat_msn stat_msn;//0x384
+ struct qp_status qp_status; //0x388
+ struct stat_curr_sqptr_pro stat_curr_sqptr_pro;//0x38C
+ struct stat_resp_psn stat_resp_psn; //0x0390
+ struct stat_rq_buf_ca stat_rq_buf_ca;//0x0394
+ __u32 stat_wqe; //0x398
+ __u32 stat_rq_pi_db; //0x39C
+#ifdef ERNIC_MEM_REGISTER
+ __u32 reserved3[4];
+ __u32 pd;
+ __u32 reserved[19];
+#else
+ __u32 reserved3[24];
+#endif
+};
+
+union ctx { // 2 Byte
+ __u16 context;
+ __u16 wr_id;
+} __packed;
+
+//Work request 64Byte size
+struct wr {
+ union ctx ctx; // 2 Byte
+ __u8 reserved1[2];
+ __u32 local_offset[2];
+ __u32 length;
+ __u8 opcode;
+ __u8 reserved2[3];
+ __u32 remote_offset[2];
+ __u32 remote_tag;
+ __u32 completion_info[4];
+ __u8 reserved4[16];
+} __packed;
+
+union ctxe {
+ __u16 context :16;
+ __u16 wr_id:16;
+} __packed;
+
+//Completion Queue Entry 16 Byte
+struct cqe {
+ union ctxe ctxe; // 2 Byte
+ __u8 opcode;
+ __u8 err_flag;
+} __packed;
+
+struct xrnic_reg_map {
+ struct xrnic_ctrl_config xrnic_ctrl_config;
+ struct rdma_qp1_attr rdma_qp1_attr;
+ struct rdma_qp_attr rdma_qp_attr[255];
+
+};
+
+struct xrnic_memory_map {
+ struct xrnic_reg_map *xrnic_regs;
+ u64 xrnic_regs_phys;
+ void *send_sgl;
+ u64 send_sgl_phys;
+ void *cq_ba;
+ u64 cq_ba_phys;
+ void *rq_buf_ba_ca;
+ u64 rq_buf_ba_ca_phys;
+ struct wr *sq_ba;
+ u64 sq_ba_phys;
+ void *tx_hdr_buf_ba;
+ u64 tx_hdr_buf_ba_phys;
+ void *tx_sgl_buf_ba;
+ u64 tx_sgl_buf_ba_phys;
+ void *bypass_buf_ba;
+ u64 bypass_buf_ba_phys;
+ void *err_pkt_buf_ba;
+ u64 err_pkt_buf_ba_phys;
+ void *out_errsts_q_ba;
+ u64 out_errsts_q_ba_phys;
+ void *in_errsts_q_ba;
+ u64 in_errsts_q_ba_phys;
+ void *rq_wrptr_db_add;
+ u64 rq_wrptr_db_add_phys;
+ void *sq_cmpl_db_add;
+ u64 sq_cmpl_db_add_phys;
+ void *stat_rq_buf_ca;
+ u64 stat_rq_buf_ca_phys;
+ void *data_buf_ba;
+ u64 data_buf_ba_phys;
+ u64 resp_err_pkt_buf_ba_phys;
+ void *resp_err_pkt_buf_ba;
+ u32 intr_en;
+ u32 cq_intr[8];
+ u32 rq_intr[8];
+ u64 xrnicif_phys;
+};
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _XRNIC_HW_DEF_H*/
diff --git a/drivers/staging/xlnx_ernic/xif.h b/drivers/staging/xlnx_ernic/xif.h
new file mode 100644
index 000000000000..fb5f02d8c08c
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xif.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef _XRNIC_IF_H
+#define _XRNIC_IF_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/types.h>
+#include <linux/udp.h>
+
+#define XRNIC_MAX_CHILD_CM_ID 255
+#define XRNIC_CM_PRVATE_DATA_LENGTH 32
+
+enum xrnic_wc_event {
+ XRNIC_WC_RDMA_WRITE = 0x0,
+ XRNIC_WC_SEND = 0x2,
+ XRNIC_WC_RDMA_READ = 0x4,
+};
+
+union xrnic_ctxe { // 2 Byte
+ __u16 context :16;
+ __u16 wr_id:16;
+} __packed;
+
+struct xrnic_cqe {
+ union xrnic_ctxe ctxe; // 2 Byte
+ __u8 opcode; // 1 Byte
+ __u8 err_flag; // 1 Byte
+} __packed;
+
+enum xrnic_port_space {
+ XRNIC_PS_SDP = 0x0001,
+ XRNIC_PS_IPOIB = 0x0002,
+ XRNIC_PS_IB = 0x013F,
+ XRNIC_PS_TCP = 0x0106,
+ XRNIC_PS_UDP = 0x0111,
+};
+
+enum xrnic_cm_error {
+ XRNIC_INVALID_CM_ID = 2,
+ XRNIC_INVALID_CM_OUTSTANDING = 3,
+ XRNIC_INVALID_QP_ID = 4,
+ XRNIC_INVALID_QP_INIT_ATTR = 5,
+ XRNIC_INVALID_NUM_CHILD = 6,
+ XRNIC_INVALID_CHILD_ID = 7,
+ XRNIC_INVALID_CHILD_NUM = 8,
+ XRNIC_INVALID_QP_TYPE = 9,
+ XRNIC_INVALID_PORT = 10,
+ XRNIC_INVALID_ADDR = 11,
+ XRNIC_INVALID_PKT_CNT = 12,
+ XRNIC_INVALID_ADDR_TYPE = 13,
+ XRNIC_INVALID_QP_CONN_PARAM = 14,
+ XRNIC_INVALID_QP_STATUS = 15,
+};
+
+enum xrnic_qp_type {
+ XRNIC_QPT_RC,
+ XRNIC_QPT_UC,
+ XRNIC_QPT_UD,
+};
+
+enum xrnic_rdma_cm_event_type {
+ XRNIC_LISTEN = 1,
+ XRNIC_REQ_RCVD,
+ XRNIC_MRA_SENT,
+ XRNIC_REJ_SENT,
+ XRNIC_REJ_RECV,
+ XRNIC_REP_SENT,
+ XRNIC_MRA_RCVD,
+ XRNIC_ESTABLISHD,
+ XRNIC_DREQ_RCVD,
+ XRNIC_DREQ_SENT,
+ XRNIC_RTU_TIMEOUT,
+ XRNIC_TIMEWAIT,
+ XRNIC_DREP_TIMEOUT,
+ XRNIC_REP_RCVD,
+ XRNIC_CM_EVENT_ADDR_ERROR,
+ XRNIC_CM_EVENT_ADDR_RESOLVED,
+ XRNIC_CM_EVENT_ROUTE_RESOLVED,
+};
+
+struct xrnic_hw_handshake_info {
+ u32 rq_wrptr_db_add;
+ u32 sq_cmpl_db_add;
+ u32 cnct_io_conf_l_16b;
+};
+
+struct xrnic_qp_info {
+ void (*xrnic_rq_event_handler)(u32 rq_count, void *rp_context);
+ void *rq_context;
+ void (*xrnic_sq_event_handler)(u32 cq_head, void *sp_context);
+ void *sq_context;
+ void *rq_buf_ba_ca;
+ u64 rq_buf_ba_ca_phys;
+ void *sq_ba;
+ u64 sq_ba_phys;
+ void *cq_ba;
+ u64 cq_ba_phys;
+ u32 sq_depth;
+ u32 rq_depth;
+ u32 send_sge_size;
+ u32 send_pkt_size;
+ u32 recv_pkt_size;
+ u32 qp_num;
+ u32 starting_psn;
+ struct ernic_pd *pd;
+};
+
+struct xrnic_qp_init_attr {
+ void (*xrnic_rq_event_handler)(u32 rq_count, void *rp_context);
+ void *rq_context;
+ void (*xrnic_sq_event_handler)(u32 cq_head, void *sp_context);
+ void *sq_context;
+ enum xrnic_qp_type qp_type;
+ void *rq_buf_ba_ca;
+ u64 rq_buf_ba_ca_phys;
+ void *sq_ba;
+ u64 sq_ba_phys;
+ void *cq_ba;
+ u64 cq_ba_phys;
+ u32 sq_depth;
+ u32 rq_depth;
+ u32 send_sge_size;
+ u32 send_pkt_size;
+ u32 recv_pkt_size;
+};
+
+struct xrnic_rdma_route {
+ u8 src_addr[16];
+ u8 dst_addr[16];
+ u16 ip_addr_type;
+ u8 smac[ETH_ALEN];
+ u8 dmac[ETH_ALEN];
+ struct sockaddr_storage s_addr;
+ struct sockaddr_storage d_addr;
+};
+
+enum xrnic_port_qp_status {
+ XRNIC_PORT_QP_FREE,
+ XRNIC_PORT_QP_IN_USE,
+};
+
+struct xrnic_rdma_cm_event_info {
+ enum xrnic_rdma_cm_event_type cm_event;
+ int status;
+ void *private_data;
+ u32 private_data_len;
+};
+
+struct xrnic_rdma_conn_param {
+ u8 private_data[XRNIC_CM_PRVATE_DATA_LENGTH];
+ u8 private_data_len;
+ u8 responder_resources;
+ u8 initiator_depth;
+ u8 flow_control;
+ u8 retry_count;
+ u8 rnr_retry_count;
+ u32 qp_num;
+ u32 srq;
+};
+
+enum xrnic_cm_state {
+ XRNIC_CM_REQ_SENT = 0,
+ XRNIC_CM_REP_RCVD,
+ XRNIC_CM_ESTABLISHED,
+};
+
+struct xrnic_rdma_cm_id {
+ int (*xrnic_cm_handler)(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_cm_event_info *event);
+ void *cm_context;
+ u32 local_cm_id;
+ u32 remote_cm_id;
+ struct xrnic_qp_info qp_info;
+ struct xrnic_rdma_route route;
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+ enum xrnic_port_space ps;
+ enum xrnic_qp_type qp_type;
+ u16 port_num;
+ u16 child_qp_num;
+ struct xrnic_rdma_conn_param conn_param;
+ enum xrnic_port_qp_status qp_status;
+ int cm_state;
+ struct list_head list;
+};
+
+struct xrnic_rdma_cm_id_info {
+ struct xrnic_rdma_cm_id parent_cm_id;
+ struct xrnic_rdma_cm_id *child_cm_id;
+ u32 num_child;
+ struct xrnic_rdma_cm_event_info conn_event_info;
+};
+
+void xrnic_rq_event_handler (u32 rq_count, void *user_arg);
+void xrnic_sq_event_handler (u32 cq_head, void *user_arg);
+int xrnic_cm_handler (struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_cm_event_info *conn_event_info);
+
+struct xrnic_rdma_cm_id *xrnic_rdma_create_id
+ (int (*xrnic_cm_handler)(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_cm_event_info *conn_event_info), void *cm_context,
+ enum xrnic_port_space ps, enum xrnic_qp_type qp_type,
+ int num_child_qp);
+
+int xrnic_rdma_bind_addr(struct xrnic_rdma_cm_id *cm_id,
+ u8 *addr, u16 port_num, u16 ip_addr_type);
+
+int xrnic_rdma_listen(struct xrnic_rdma_cm_id *cm_id, int outstanding);
+int xrnic_rdma_create_qp(struct xrnic_rdma_cm_id *cm_id, struct ernic_pd *pd,
+ struct xrnic_qp_init_attr *init_attr);
+int xrnic_rdma_accept(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_conn_param *conn_param);
+int xrnic_post_recv(struct xrnic_qp_info *qp_info, u32 rq_count);
+int xrnic_post_send(struct xrnic_qp_info *qp_info, u32 sq_count);
+int xrnic_destroy_qp(struct xrnic_qp_info *qp_info);
+int xrnic_rdma_disconnect(struct xrnic_rdma_cm_id *cm_id);
+int xrnic_rdma_destroy_id(struct xrnic_rdma_cm_id *cm_id, int flag);
+int xrnic_hw_hs_reset_sq_cq(struct xrnic_qp_info *qp_info,
+ struct xrnic_hw_handshake_info *hw_hs_info);
+int xrnic_hw_hs_reset_rq(struct xrnic_qp_info *qp_info);
+
+int xrnic_rdma_resolve_addr(struct xrnic_rdma_cm_id *cm_id,
+ struct sockaddr *src_addr,
+ struct sockaddr *dst_addr, int timeout);
+int xrnic_rdma_connect(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_conn_param *conn_param);
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _XRNIC_IF_H*/
diff --git a/drivers/staging/xlnx_ernic/xioctl.h b/drivers/staging/xlnx_ernic/xioctl.h
new file mode 100644
index 000000000000..8c9738e69383
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xioctl.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+#ifndef _XRNIC_IOCTL_H_
+#define _XRNIC_IOCTL_H_
+
+#include <asm/ioctl.h>
+#include "xlog.h"
+
+#define XRNIC_MAGIC 'L'
+
+#define XRNIC_DISPLAY_MMAP_ALL _IOW(XRNIC_MAGIC, 1, uint)
+#define XRNIC_DISPLAY_MMAP_CONFIG _IOW(XRNIC_MAGIC, 2, uint)
+#define XRNIC_DISPLAY_MMAP_QP1 _IOW(XRNIC_MAGIC, 3, uint)
+#define XRNIC_DISPLAY_MMAP_QPX _IOW(XRNIC_MAGIC, 4, uint)
+#define XRNIC_DISPLAY_PKT _IOW(XRNIC_MAGIC, 5, uint)
+
+#define XRNIC_MAX_CMDS 5
+
+#endif /* _XRNIC_IOCTL_H_ */
diff --git a/drivers/staging/xlnx_ernic/xmain.c b/drivers/staging/xlnx_ernic/xmain.c
new file mode 100644
index 000000000000..67d525b51716
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xmain.c
@@ -0,0 +1,1592 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ * Author : Sandeep Dhanvada <sandeep.dhanvada@xilinx.com>
+ * : Anjaneyulu Reddy Mule <anjaneyulu.reddy.mule@xilinx.com>
+ * : Srija Malyala <srija.malyala@xilinx.com>
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/inet.h>
+#include <linux/time.h>
+#include <linux/cdev.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <net/addrconf.h>
+#include <linux/types.h>
+#include "xcommon.h"
+
+/* TODO: Need to remove this macro as all the experimental code is verified.
+ * All the non-experimental code should be deleted.
+ */
+#define EXPERIMENTAL_CODE
+int debug;
+struct class *xrnic_class;
+/* Need to enable this using sysfs.*/
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none, 1=all)");
+
+#define XRNIC_REG_MAP_NODE 0
+#define cpu_to_be24(x) ((x) << 16)
+
+struct xrnic_conn_param {
+ const void *private_data;
+ u8 private_data_len;
+ u8 responder_resources;
+ u8 initiator_depth;
+ u8 flow_control;
+ u8 retry_count;
+ u8 rnr_retry_count;
+ u8 srq;
+ u8 qp_num;
+};
+
+/* EXTRA Bytes for Invariant CRC */
+#define ERNIC_INV_CRC 4
+/* ERNIC Doesn't have Variant CRC for P2P */
+#define ERNIC_VAR_CRC 0
+#define EXTRA_PKT_LEN (ERNIC_INV_CRC + ERNIC_VAR_CRC)
+
+struct xrnic_dev_info *xrnic_dev;
+static dev_t xrnic_dev_number;
+
+/*
+ * To store the IP address of the controller, which is passed as a
+ * module param
+ */
+static char server_ip[16];
+/* To store the port number. This is passed as a module param */
+static unsigned short port_num;
+/* To store the mac_address. This is passed as a module param */
+static ushort mac_address[6] = {0x1, 0x0, 0x0, 0x35, 0x0a, 0x00};
+/* To store the ethernet interface name, which is passed as a module param */
+static char *ifname = "eth0";
+
+module_param(port_num, ushort, 0444);
+MODULE_PARM_DESC(port_num, "network port number");
+
+module_param_array(mac_address, ushort, NULL, 0444);
+MODULE_PARM_DESC(mac_address, "mac address");
+
+module_param_string(server_ip, server_ip, 32, 0444);
+MODULE_PARM_DESC(server_ip, "Target server ip address");
+
+module_param(ifname, charp, 0444);
+MODULE_PARM_DESC(ifname, "Target server interface name eth0..");
+
+/**
+ * xrnic_rdma_create_id() - Creates and RDMA ID
+ * @xrnic_cm_handler: communication event handler
+ * @cm_context: CM context
+ * @ps: Port space
+ * @qp_type: Queue transport type
+ * @num_child: Max QP count
+ *
+ * @return: 0 on success, other value incase of failure
+ */
+struct xrnic_rdma_cm_id *xrnic_rdma_create_id
+ (int (*xrnic_cm_handler)(struct xrnic_rdma_cm_id *cm_id,
+ struct xrnic_rdma_cm_event_info *conn_event_info), void *cm_context,
+ enum xrnic_port_space ps, enum xrnic_qp_type qp_type, int num_child)
+{
+ struct xrnic_qp_attr *qp1_attr = NULL;
+ struct xrnic_rdma_cm_id *cm_id = NULL;
+ struct xrnic_qp_info *qp_info = NULL;
+ struct xrnic_rdma_cm_id_info *cm_id_info = NULL;
+
+ if (!xrnic_dev) {
+ pr_err("Received NULL pointer\n");
+ return (struct xrnic_rdma_cm_id *)NULL;
+ }
+
+ qp1_attr = &xrnic_dev->qp1_attr;
+ if (xrnic_dev->io_qp_count < num_child ||
+ num_child < 0 || qp_type != qp1_attr->qp_type) {
+ pr_err("Invalid info received\n");
+ return NULL;
+ }
+
+ cm_id_info = kzalloc(sizeof(*cm_id_info), GFP_KERNEL);
+ if (!cm_id_info)
+ return ERR_PTR(-ENOMEM);
+
+ xrnic_dev->curr_cm_id_info = cm_id_info;
+ cm_id = (struct xrnic_rdma_cm_id *)&cm_id_info->parent_cm_id;
+ cm_id->xrnic_cm_handler = xrnic_cm_handler;
+ cm_id->cm_context = cm_context;
+ cm_id->ps = ps;
+ cm_id->qp_type = qp_type;
+ cm_id->cm_id_info = cm_id_info;
+ cm_id->child_qp_num = 0;
+ cm_id->qp_status = XRNIC_PORT_QP_FREE;
+
+ qp_info = &cm_id->qp_info;
+ memset(qp_info, 0, sizeof(*qp_info));
+
+ qp_info->qp_num = qp1_attr->qp_num;
+ list_add_tail(&cm_id->list, &cm_id_list);
+
+ return cm_id;
+}
+EXPORT_SYMBOL(xrnic_rdma_create_id);
+
+/**
+ * ipv6_addr_compare() - Compares IPV6 addresses
+ * @addr1: Address 1 to compare
+ * @addr2: Address 2 to compare
+ * @size: size of the address
+ *
+ * @return: 0 on success, -1 incase of a mismatch
+ */
+static int ipv6_addr_compare(u8 *addr1, u8 *addr2, size_t size)
+{
+ int i;
+
+ for (i = 0; i < size; i++) {
+ if (addr1[(size - 1) - i] != addr2[i])
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * xrnic_rdma_bind_addr() - Binds IP-V4/V6 addresses
+ * @cm_id: CM ID to with address CM info
+ * @addr: Address to bind to
+ * @port_num: Tranport port number
+ * @ip_addr_type: IP-V4/V6
+ *
+ * @return: 0 on success, error indicative value incase of failure
+ */
+int xrnic_rdma_bind_addr(struct xrnic_rdma_cm_id *cm_id,
+ u8 *addr, u16 port_num, u16 ip_addr_type)
+{
+ if (!cm_id || !xrnic_dev) {
+ pr_err("Invalid CM ID or XRNIC device info\n");
+ return -EINVAL;
+ }
+
+ if (xrnic_dev->curr_cm_id_info != cm_id->cm_id_info)
+ return -XRNIC_INVALID_CM_ID;
+
+ if (port_num < 1UL || port_num > XRNIC_MAX_PORT_SUPPORT)
+ return -XRNIC_INVALID_PORT;
+
+ if (!cm_id)
+ return -XRNIC_INVALID_CM_ID;
+
+ if (cm_id->child_qp_num)
+ return -XRNIC_INVALID_CHILD_NUM;
+
+ if (xrnic_dev->cm_id_info[port_num - 1])
+ return -XRNIC_INVALID_PORT;
+
+ if (xrnic_dev->port_status[port_num - 1] == XRNIC_PORT_QP_IN_USE)
+ return XRNIC_INVALID_CM_ID;
+
+ if (cm_id->qp_status == XRNIC_PORT_QP_IN_USE)
+ return XRNIC_INVALID_CM_ID;
+
+ if (ip_addr_type == AF_INET6) {
+ if (ipv6_addr_compare((u8 *)&xrnic_dev->ipv6_addr, addr,
+ sizeof(struct in6_addr)))
+ return -XRNIC_INVALID_ADDR;
+ memcpy((void *)&cm_id->route.src_addr, (void *)addr,
+ sizeof(struct in6_addr));
+ } else if (ip_addr_type == AF_INET) {
+ if (memcmp(&xrnic_dev->ipv4_addr, addr,
+ sizeof(struct in_addr)))
+ return -XRNIC_INVALID_ADDR;
+ memcpy((void *)&cm_id->route.src_addr, (void *)addr,
+ sizeof(struct in_addr));
+ } else {
+ return -XRNIC_INVALID_ADDR_TYPE;
+ }
+ xrnic_dev->cm_id_info[port_num - 1] = cm_id->cm_id_info;
+ cm_id->port_num = port_num;
+ cm_id->route.ip_addr_type = ip_addr_type;
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_bind_addr);
+
+/**
+ * xrnic_rdma_listen() - Initiates listen on the socket
+ * @cm_id: CM ID
+ * @backlog: back log
+ *
+ * @return: 0 on success, error indicative value incase of failure
+ */
+int xrnic_rdma_listen(struct xrnic_rdma_cm_id *cm_id, int backlog)
+{
+ if (!cm_id || !xrnic_dev) {
+ pr_err("Rx invalid pointers\n");
+ return -EINVAL;
+ }
+
+ if (xrnic_dev->curr_cm_id_info != cm_id->cm_id_info)
+ return XRNIC_INVALID_CM_ID;
+
+ if (xrnic_dev->port_status[cm_id->port_num - 1] ==
+ XRNIC_PORT_QP_IN_USE)
+ return XRNIC_INVALID_PORT;
+
+ if (cm_id->qp_status == XRNIC_PORT_QP_IN_USE)
+ return XRNIC_INVALID_QP_ID;
+
+ xrnic_dev->port_status[cm_id->port_num - 1] = XRNIC_PORT_QP_IN_USE;
+ xrnic_dev->curr_cm_id_info = NULL;
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_listen);
+
+/**
+ * xrnic_hw_hs_reset_sq_cq() - Enables HW Handshake for a given QP
+ * @qp_info: QP which should be enabled for HW Handshake
+ * @hw_hs_info: HW Handshake info with which QP config needs to be updated
+ *
+ * @return: XRNIC_SUCCESS on success, error indicative value incase of failure
+ */
+int xrnic_hw_hs_reset_sq_cq(struct xrnic_qp_info *qp_info,
+ struct xrnic_hw_handshake_info *hw_hs_info)
+{
+ struct xrnic_qp_attr *qp_attr;
+
+ if (!qp_info) {
+ pr_err("Rx invalid qp info\n");
+ return -EINVAL;
+ }
+
+ if (!xrnic_dev) {
+ pr_err("Invalid ERNIC info\n");
+ return -EINVAL;
+ }
+
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ qp_attr = &xrnic_dev->qp_attr[qp_info->qp_num - 2];
+ if (qp_attr->remote_cm_id)
+ xrnic_reset_io_qp_sq_cq_ptr(qp_attr, hw_hs_info);
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_hw_hs_reset_sq_cq);
+
+/**
+ * xrnic_hw_hs_reset_rq() - Updates HW handshake for RQ
+ * @qp_info: QP which should be enabled for HW Handshake
+ *
+ * @return: XRNIC_SUCCESS on success, error indicative value incase of failure
+ */
+int xrnic_hw_hs_reset_rq(struct xrnic_qp_info *qp_info)
+{
+ struct xrnic_qp_attr *qp_attr;
+
+ if (!qp_info) {
+ pr_err("Rx invalid qp info\n");
+ return -EINVAL;
+ }
+
+ if (!xrnic_dev) {
+ pr_err("Invalid ERNIC info\n");
+ return -EINVAL;
+ }
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ qp_attr = &xrnic_dev->qp_attr[qp_info->qp_num - 2];
+ if (qp_attr->remote_cm_id)
+ xrnic_reset_io_qp_rq_ptr(qp_attr);
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_hw_hs_reset_rq);
+
+/**
+ * set_ipv4_ipaddress() - Configures XRNIC IP address
+ * @return: 0 on success, error indicative value incase of failure
+ */
+static int set_ipv4_ipaddress(void)
+{
+ struct xrnic_ctrl_config *xrnic_ctrl_config =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ u32 config_value = 0;
+ u32 ipv4_addr = 0;
+ struct net_device *dev = __dev_get_by_name(&init_net, ifname);
+ struct in_device *inet_dev;
+
+ inet_dev = (struct in_device *)dev->ip_ptr;
+
+ if (!dev) {
+ pr_err("CMAC interface not configured\n");
+ return XRNIC_FAILED;
+ }
+
+ if (((struct in_device *)dev->ip_ptr)->ifa_list) {
+ ipv4_addr = inet_dev->ifa_list->ifa_address;
+ if (!ipv4_addr) {
+ pr_err("cmac ip addr: ifa_address not available\n");
+ return XRNIC_FAILED;
+ }
+ snprintf(server_ip, 16, "%pI4", &ipv4_addr);
+ in4_pton(server_ip, strlen(server_ip), xrnic_dev->ipv4_addr,
+ '\0', NULL);
+ DEBUG_LOG("xcmac ip_address:%s\n", server_ip);
+ } else {
+ pr_info("xcmac ip address: not available at present\n");
+ return 0;
+ }
+
+ switch (dev->mtu) {
+ case 340:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_256;
+ break;
+ case 592:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_512;
+ break;
+ case 1500:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_1024;
+ break;
+ case 2200:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_2048;
+ break;
+ case 4200:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_4096;
+ break;
+ default:
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_4096;
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ }
+ config_value = (xrnic_dev->ipv4_addr[3] << 0) |
+ (xrnic_dev->ipv4_addr[2] << 8) |
+ (xrnic_dev->ipv4_addr[1] << 16) |
+ (xrnic_dev->ipv4_addr[0] << 24);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_config->ipv4_address)));
+ DEBUG_LOG("XRNIC IPV4 address [%x]\n", config_value);
+ return 0;
+}
+
+/**
+ * set_ipv6_ipaddress() - Configures XRNIC IPV6 address
+ * @return: 0 on success, error indicative value incase of failure
+ */
+static int set_ipv6_ipaddress(void)
+{
+ struct xrnic_ctrl_config *xrnic_ctrl_conf;
+ u32 config_value = 0;
+ struct inet6_dev *idev;
+ struct inet6_ifaddr *ifp, *tmp;
+ u8 i, ip6_set = 0;
+ struct net_device *dev = __dev_get_by_name(&init_net, ifname);
+
+ xrnic_ctrl_conf = &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ if (!dev) {
+ pr_err("CMAC interface not configured\n");
+ return XRNIC_FAILED;
+ }
+
+ idev = __in6_dev_get(dev);
+ if (!idev) {
+ pr_err("ipv6 inet device not found\n");
+ return 0;
+ }
+
+ list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
+ DEBUG_LOG("IP=%pI6, MAC=%pM\n", &ifp->addr, dev->dev_addr);
+ for (i = 0; i < 16; i++) {
+ DEBUG_LOG("IP=%x\n", ifp->addr.s6_addr[i]);
+ xrnic_dev->ipv6_addr[15 - i] = ifp->addr.s6_addr[i];
+ }
+ ip6_set = 1;
+ }
+ if (ip6_set == 0) {
+ pr_info("xcmac ipv6 address: not available at present\n");
+ return 0;
+ }
+
+ switch (dev->mtu) {
+ case 340:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_256;
+ break;
+ case 592:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_512;
+ break;
+ case 1500:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_1024;
+ break;
+ case 2200:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_2048;
+ break;
+ case 4200:
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_4096;
+ break;
+ default:
+ xrnic_dev->pmtu = XRNIC_QP_CONFIG_PMTU_4096;
+ DEBUG_LOG("MTU set to %d\n", dev->mtu);
+ }
+ config_value = (xrnic_dev->ipv6_addr[0] << 0) |
+ (xrnic_dev->ipv6_addr[1] << 8) |
+ (xrnic_dev->ipv6_addr[2] << 16) |
+ (xrnic_dev->ipv6_addr[3] << 24);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->ip_xrnic_addr1)));
+ DEBUG_LOG("XRNIC IPV6 address [%x]\n", config_value);
+
+ config_value = (xrnic_dev->ipv6_addr[4] << 0) |
+ (xrnic_dev->ipv6_addr[5] << 8) |
+ (xrnic_dev->ipv6_addr[6] << 16) |
+ (xrnic_dev->ipv6_addr[7] << 24);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->ip_xrnic_addr2)));
+ DEBUG_LOG("XRNIC IPV6 address [%x]\n", config_value);
+
+ config_value = (xrnic_dev->ipv6_addr[8] << 0) |
+ (xrnic_dev->ipv6_addr[9] << 8) |
+ (xrnic_dev->ipv6_addr[10] << 16) |
+ (xrnic_dev->ipv6_addr[11] << 24);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->ip_xrnic_addr3)));
+ DEBUG_LOG("XRNIC IPV6 address [%x]\n", config_value);
+
+ config_value = (xrnic_dev->ipv6_addr[12] << 0) |
+ (xrnic_dev->ipv6_addr[13] << 8) |
+ (xrnic_dev->ipv6_addr[14] << 16) |
+ (xrnic_dev->ipv6_addr[15] << 24);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->ip_xrnic_addr4)));
+ DEBUG_LOG("XRNIC IPV6 address [%x]\n", config_value);
+ return 0;
+}
+
+/**
+ * cmac_inet6addr_event() - Handles IPV6 events
+ * @notifier: notifier info
+ * @event: Rx event
+ * @data: Event specific data
+ *
+ * @return: 0 on success, error indicative value incase of failure
+ */
+static int cmac_inet6addr_event(struct notifier_block *notifier,
+ unsigned long event, void *data)
+{
+ switch (event) {
+ case NETDEV_DOWN:
+ pr_info("Driver link down\r\n");
+ break;
+ case NETDEV_UP:
+ pr_info("Driver link up ipv6\r\n");
+ if (set_ipv6_ipaddress() == XRNIC_FAILED)
+ return XRNIC_FAILED;
+ break;
+ case NETDEV_CHANGEADDR:
+ pr_info("Driver link change address ipv6\r\n");
+ if (set_ipv6_ipaddress() == XRNIC_FAILED)
+ return XRNIC_FAILED;
+ break;
+ }
+ return 0;
+}
+
+/**
+ * cmac_inetaddr_event() - Handles IPV4 events
+ * @notifier: notifier info
+ * @event: Rx event
+ * @data: Event specific data
+ * @return: 0 on success, error indicative value incase of failure
+ */
+static int cmac_inetaddr_event(struct notifier_block *notifier,
+ unsigned long event, void *data)
+{
+ struct in_ifaddr *ifa = data;
+ struct net_device *event_netdev = ifa->ifa_dev->dev;
+ struct net_device *dev = __dev_get_by_name(&init_net, ifname);
+
+ if (!dev) {
+ pr_err("CMAC interface not configured\n");
+ return XRNIC_FAILED;
+ }
+
+ if (event_netdev != dev)
+ return 0;
+ pr_info("Xrnic: event = %ld\n", event);
+ switch (event) {
+ case NETDEV_DOWN:
+ pr_info("Xrnic: link down\n");
+ break;
+ case NETDEV_UP:
+ pr_info("Xrnic: link up\n");
+ if (set_ipv4_ipaddress() == XRNIC_FAILED)
+ return XRNIC_FAILED;
+ break;
+ case NETDEV_CHANGEADDR:
+ pr_info("Xrnic: ip address change detected\n");
+ if (set_ipv4_ipaddress() == XRNIC_FAILED)
+ return XRNIC_FAILED;
+ break;
+ }
+ return 0;
+}
+
+struct notifier_block cmac_inetaddr_notifier = {
+ .notifier_call = cmac_inetaddr_event
+};
+
+struct notifier_block cmac_inet6addr_notifier = {
+ .notifier_call = cmac_inet6addr_event
+};
+
+static const struct file_operations xrnic_fops = {
+ /*TODO: Implement read/write/ioctl operations. */
+ .owner = THIS_MODULE, /* Owner */
+};
+
+/**
+ * xrnic_irq_handler() - XRNIC interrupt handler
+ * @irq: Irq number
+ * @data: Pointer to XRNIC device info structure
+ *
+ * @return: IRQ_HANDLED incase of success or other value in case of failure
+ */
+static irqreturn_t xrnic_irq_handler(int irq, void *data)
+{
+ struct xrnic_dev_info *xrnic_dev = (struct xrnic_dev_info *)data;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct xrnic_ctrl_config *xrnic_ctrl_config =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ u32 config_value = 0;
+ unsigned long flag;
+
+ spin_lock_irqsave(&qp1_attr->qp_lock, flag);
+ config_value = ioread32((void *)&xrnic_ctrl_config->intr_sts);
+
+ /* We are checking masked interrupt.*/
+ config_value = config_value & xrnic_dev->xrnic_mmap.intr_en;
+ if (!config_value)
+ pr_err("Rx disabled or masked interrupt\n");
+
+ if (config_value & PKT_VALID_ERR_INTR_EN) {
+ pr_info("Packet validation fail interrupt rx\n");
+ iowrite32(PKT_VALID_ERR_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ }
+
+ if (config_value & MAD_PKT_RCVD_INTR_EN) {
+ DEBUG_LOG("MAD Packet rx interrupt\n");
+ /* Clear the interrupt */
+ iowrite32(MAD_PKT_RCVD_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ /* process the MAD pkt */
+ tasklet_schedule(&xrnic_dev->mad_pkt_recv_task);
+ }
+
+ if (config_value & BYPASS_PKT_RCVD_INTR_EN) {
+ DEBUG_LOG("Bypass packet Rx interrupt\n");
+ iowrite32(BYPASS_PKT_RCVD_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ }
+
+ if (config_value & RNR_NACK_GEN_INTR_EN) {
+ DEBUG_LOG("Rx RNR Nack interrupt\n");
+ iowrite32(RNR_NACK_GEN_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ }
+
+ if (config_value & WQE_COMPLETED_INTR_EN) {
+ DEBUG_LOG("Rx WQE completion interrupt\n");
+ xrnic_dev->xrnic_mmap.intr_en = xrnic_dev->xrnic_mmap.intr_en &
+ (~WQE_COMPLETED_INTR_EN);
+ iowrite32(xrnic_dev->xrnic_mmap.intr_en,
+ ((void *)(&xrnic_ctrl_config->intr_en)));
+ tasklet_schedule(&xrnic_dev->wqe_completed_task);
+ }
+
+ if (config_value & ILL_OPC_SENDQ_INTR_EN) {
+ DEBUG_LOG("Rx illegal opcode interrupt\n");
+ iowrite32(ILL_OPC_SENDQ_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ }
+
+ if (config_value & QP_PKT_RCVD_INTR_EN) {
+ DEBUG_LOG("Rx data packet interrupt\n");
+ xrnic_dev->xrnic_mmap.intr_en = xrnic_dev->xrnic_mmap.intr_en &
+ (~QP_PKT_RCVD_INTR_EN);
+ iowrite32(xrnic_dev->xrnic_mmap.intr_en,
+ ((void *)(&xrnic_ctrl_config->intr_en)));
+ tasklet_schedule(&xrnic_dev->qp_pkt_recv_task);
+ }
+
+ if (config_value & FATAL_ERR_INTR_EN) {
+ pr_info("Rx Fatal error interrupt\n");
+
+ iowrite32(FATAL_ERR_INTR_EN,
+ (void __iomem *)&xrnic_ctrl_config->intr_sts);
+ /* 0 is some random value*/
+ xrnic_qp_fatal_handler(0);
+ }
+
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+ return IRQ_HANDLED;
+}
+
+/**
+ * xrnic_ctrl_hw_configuration() - Xrnic control configuration initizations
+ * @return: 0 on success, other value incase of failure
+ */
+static int xrnic_ctrl_hw_configuration(void)
+{
+ struct xrnic_memory_map *xrnic_mmap = &xrnic_dev->xrnic_mmap;
+ struct xrnic_ctrl_config *xrnic_ctrl_conf;
+ u32 config_value = 0;
+ struct net_device *dev = NULL;
+
+ xrnic_ctrl_conf = &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+
+ if (!xrnic_dev || !xrnic_dev->xrnic_mmap.xrnic_regs ||
+ !xrnic_ctrl_conf) {
+ pr_err("Invalid device pointers\n");
+ return -EINVAL;
+ }
+
+ xrnic_mmap = &xrnic_dev->xrnic_mmap;
+
+ dev = __dev_get_by_name(&init_net, ifname);
+ if (!dev) {
+ pr_err("Ethernet mac address not configured\n");
+ return XRNIC_FAILED;
+ }
+ /* Set the MAC address */
+ config_value = dev->dev_addr[5] | (dev->dev_addr[4] << 8) |
+ (dev->dev_addr[3] << 16) | (dev->dev_addr[2] << 24);
+ DEBUG_LOG("Source MAC address LSB [%x]\n", config_value);
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->mac_xrnic_src_addr_lsb)));
+
+ DEBUG_LOG("Source MAC address LSB [%x]\n", config_value);
+ config_value = dev->dev_addr[1] | (dev->dev_addr[0] << 8);
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->mac_xrnic_src_addr_msb)));
+ DEBUG_LOG("Source MAC address MSB [%x]\n", config_value);
+
+ if (set_ipv4_ipaddress() == XRNIC_FAILED) {
+ pr_err("ETH0 AF_INET address: ifa_list not available.\n");
+ return XRNIC_FAILED;
+ }
+
+ if (set_ipv6_ipaddress() == XRNIC_FAILED) {
+ pr_err("ETH0 AF_INET6 address: ifa_list not available.\n");
+ return XRNIC_FAILED;
+ }
+
+ /* At present 128 TX headers and each size 128 bytes */
+ config_value = xrnic_mmap->tx_hdr_buf_ba_phys;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->tx_hdr_buf_ba)));
+ DEBUG_LOG("Tx header buf base address [0x%x]\n", config_value);
+
+ config_value = XRNIC_NUM_OF_TX_HDR | (XRNIC_SIZE_OF_TX_HDR << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->tx_hdr_buf_sz)));
+ DEBUG_LOG("Tx header buf size [0x%x]\n", config_value);
+
+ /* At present 256 TX SGL and each size 16 bytes */
+ config_value = xrnic_mmap->tx_sgl_buf_ba_phys & 0xffffffff;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->tx_sgl_buf_ba)));
+ DEBUG_LOG("Tx SGL buf base address [0x%x]\n", config_value);
+
+ config_value = XRNIC_NUM_OF_TX_SGL | (XRNIC_SIZE_OF_TX_SGL << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->tx_sgl_buf_sz)));
+ DEBUG_LOG("Tx SGL buf size [0x%x]\n", config_value);
+
+ /* At present 32 Bypass buffers and each size 512 bytes */
+ config_value = xrnic_mmap->bypass_buf_ba_phys;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->bypass_buf_ba)));
+ DEBUG_LOG("Bypass buf base address [0x%x]\n", config_value);
+
+ config_value = XRNIC_NUM_OF_BYPASS_BUF |
+ (XRNIC_SIZE_OF_BYPASS_BUF << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->bypass_buf_sz)));
+ DEBUG_LOG("Bypass buf size [0x%x]\n", config_value);
+
+ config_value = XRNIC_BYPASS_BUF_WRPTR;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->bypass_buf_wrptr)));
+ DEBUG_LOG("Bypass buffer write pointer [0x%x]\n", config_value);
+
+ config_value = xrnic_mmap->err_pkt_buf_ba_phys;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->err_pkt_buf_ba)));
+ DEBUG_LOG("Error packet buf base address [0x%x]\n", config_value);
+
+ config_value = XRNIC_NUM_OF_ERROR_BUF |
+ (XRNIC_SIZE_OF_ERROR_BUF << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->err_pkt_buf_sz)));
+ DEBUG_LOG("Error packet buf size [0x%x]\n", config_value);
+
+ config_value = XRNIC_ERROR_BUF_WRPTR;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->err_buf_wrptr)));
+ DEBUG_LOG("Error pakcet buf write pointer [0x%x]\n", config_value);
+
+ config_value = xrnic_mmap->out_errsts_q_ba_phys;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->out_errsts_q_ba)));
+ DEBUG_LOG("Outgoing error status queue base address [0x%x]\n",
+ config_value);
+
+ config_value = XRNIC_OUT_ERRST_Q_NUM_ENTRIES;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->out_errsts_q_sz)));
+ DEBUG_LOG("Outgoing error status queue size [0x%x]\n", config_value);
+
+ config_value = xrnic_mmap->in_errsts_q_ba_phys;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->in_errsts_q_ba)));
+ DEBUG_LOG("Incoming error status queue base address [0x%x]\n",
+ config_value);
+
+ config_value = XRNIC_IN_ERRST_Q_NUM_ENTRIES;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->in_errsts_q_sz)));
+ DEBUG_LOG("Incoming error status queue size [0x%x]\n", config_value);
+
+ config_value = xrnic_mmap->data_buf_ba_phys;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->data_buf_ba)));
+ DEBUG_LOG("RDMA Outgoing data buf base addr [0x%x]\n", config_value);
+
+ config_value = XRNIC_NUM_OF_DATA_BUF | (XRNIC_SIZE_OF_DATA_BUF << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->data_buf_sz)));
+ DEBUG_LOG("RDMA Outgoing data buf size [0x%x]\n", config_value);
+
+ config_value = xrnic_mmap->resp_err_pkt_buf_ba_phys;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->resp_err_pkt_buf_ba)));
+ DEBUG_LOG("Response error packet buf base address [0x%x]\n",
+ config_value);
+
+ config_value = XRNIC_NUM_OF_RESP_ERR_BUF |
+ (XRNIC_SIZE_OF_RESP_ERR_BUF << 16);
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_conf->resp_err_buf_sz)));
+ DEBUG_LOG("Response error packet buf size [0x%x]\n", config_value);
+
+ /* Enable the RNIC configuration*/
+ config_value = (XRNIC_CONFIG_XRNIC_EN |
+ XRNIC_CONFIG_ERR_BUF_EN |
+ XRNIC_CONFIG_NUM_QPS_ENABLED |
+ XRNIC_CONFIG_FLOW_CONTROL_EN |
+ XRNIC_CONFIG_UDP_SRC_PORT);
+
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_conf->xrnic_conf)));
+ return XRNIC_SUCCESS;
+}
+
+/**
+ * xrnic_ctrl_hw_init() - Xrnic control configuration initizations
+ * @return: 0 on success, other value incase of failure
+ */
+static int xrnic_ctrl_hw_init(void)
+{
+ struct xrnic_ctrl_config *xrnic_ctrl_config =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ u32 config_value = 0;
+ int ret = 0, i;
+
+ /* Invoking rnic global initialization configuration */
+ ret = xrnic_ctrl_hw_configuration();
+ if (ret) {
+ pr_err("xrnic hw config failed with ret code [%d]\n", ret);
+ return ret;
+ }
+
+ /* Invoking RDMA QP1 configuration */
+ ret = xrnic_qp1_hw_configuration();
+ if (ret) {
+ pr_err("xrnic qp1 config failed with ret code [%d]\n", ret);
+ return ret;
+ }
+
+ /* Invoking RDMA other data path QP configuration, as we are not
+ * resgistring any data path interrupt handler so no ret.
+ */
+ for (i = 0; i < XRNIC_MAX_QP_SUPPORT; i++)
+ xrnic_qp_hw_configuration(i);
+
+ /* Enabling xrnic interrupts. */
+ config_value = MAD_PKT_RCVD_INTR_EN |
+ RNR_NACK_GEN_INTR_EN |
+ WQE_COMPLETED_INTR_EN | ILL_OPC_SENDQ_INTR_EN |
+ QP_PKT_RCVD_INTR_EN | FATAL_ERR_INTR_EN;
+
+ if (config_value & ~XRNIC_INTR_ENABLE_DEFAULT) {
+ DEBUG_LOG("Setting the default interrupt enable config\n");
+ config_value = XRNIC_INTR_ENABLE_DEFAULT;
+ }
+
+ /*Writing to interrupt enable register.*/
+ xrnic_dev->xrnic_mmap.intr_en = config_value;
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_config->intr_en)));
+
+ DEBUG_LOG("Interrupt enable reg value [%#x]\n",
+ ioread32((void __iomem *)&xrnic_ctrl_config->intr_en));
+ return ret;
+}
+
+/**
+ * xrnic_fill_wr() - This function fills the Send queue work request info
+ * @qp_attr: qp config info to fill the WR
+ * @qp_depth: Depth of the Queue
+ */
+void xrnic_fill_wr(struct xrnic_qp_attr *qp_attr, u32 qp_depth)
+{
+ int i;
+ struct wr *sq_wr; /*sq_ba*/
+
+ for (i = 0; i < qp_depth; i++) {
+ sq_wr = (struct wr *)qp_attr->sq_ba + i;
+ sq_wr->ctx.wr_id = i;
+ sq_wr->local_offset[0] = (qp_attr->send_sgl_phys & 0xffffffff)
+ + (i * XRNIC_SEND_SGL_SIZE);
+ sq_wr->local_offset[1] = 0;
+ sq_wr->length = XRNIC_SEND_SGL_SIZE;
+ sq_wr->opcode = XRNIC_SEND_ONLY;
+ sq_wr->remote_offset[0] = 0;
+ sq_wr->remote_offset[1] = 0;
+ sq_wr->remote_tag = 0;
+ }
+}
+
+static int xernic_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct device_node *np = NULL;
+ struct resource resource;
+ void __iomem *virt_addr;
+ u64 start_addr;
+ int status;
+ int len;
+/* TODO: Not using pdev. Rather using a global data structure xrnic_dev,
+ * which is shared among all the objects in ernic driver.
+ * Need to set platform private data as xrnic_dev and all the objects of
+ * ernic driver has to retrieve from platform_device pointer.
+ */
+#ifdef EXPERIMENTAL_CODE
+ int val = 0;
+#endif
+ phys_addr_t phy_addr;
+
+ pr_info("XRNIC driver Version = %s\n", XRNIC_VERSION);
+
+ register_inetaddr_notifier(&cmac_inetaddr_notifier);
+ register_inet6addr_notifier(&cmac_inet6addr_notifier);
+ init_mr(MEMORY_REGION_BASE, MEMORY_REGION_LEN);
+
+ np = of_find_node_by_name(NULL, "ernic");
+ if (!np) {
+ pr_err("xrnic can't find compatible node in device tree.\n");
+ return -ENODEV;
+ }
+
+ xrnic_dev = kzalloc(sizeof(*xrnic_dev), GFP_KERNEL);
+ if (!xrnic_dev)
+ return -ENOMEM;
+ ret = alloc_chrdev_region(&xrnic_dev_number, 0,
+ NUM_XRNIC_DEVS, DEVICE_NAME);
+ if (ret) {
+ DEBUG_LOG("XRNIC:: Failed to register char device\n");
+ goto alloc_failed;
+ } else {
+ DEBUG_LOG(KERN_INFO "XRNIC Registered with :\n");
+ DEBUG_LOG(KERN_INFO "Major : %u || ", MAJOR(xrnic_dev_number));
+ DEBUG_LOG(KERN_INFO "Minor : %u\n", MINOR(xrnic_dev_number));
+ }
+/* TODO: xrnic_class is created but not used. Need to enable debug and
+ * statistic counters though this interface.
+ */
+ xrnic_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(xrnic_class)) {
+ ret = PTR_ERR(xrnic_class);
+ goto class_failed;
+ }
+
+ /* Connect the file operations with the cdev */
+ /* TODO: cdev created but not used. Need to implement when
+ * userspace applications are implemented. Currently all the
+ * callbacks in xrnic_fops are dummy.
+ */
+ cdev_init(&xrnic_dev->cdev, &xrnic_fops);
+ xrnic_dev->cdev.owner = THIS_MODULE;
+
+ /* Connect the major/minor number to the cdev */
+ ret = cdev_add(&xrnic_dev->cdev, xrnic_dev_number, 1);
+ if (IS_ERR(ERR_PTR(ret))) {
+ DEBUG_LOG("ERROR: XRNIC cdev allocation failed\n");
+ goto cdev_failed;
+ }
+
+ device_create(xrnic_class, NULL, xrnic_dev_number, NULL,
+ "%s", "xrnic0");
+
+ /* The node offset argument 0 xrnic 0x0 0x84000000 len 128K*/
+ ret = of_address_to_resource(np, XRNIC_REG_MAP_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 0.\n");
+ goto dev_failed;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_REG_MAP_NODE);
+ DEBUG_LOG("xrnic memory 0x%llx of size=%x bytes mapped at 0x%p\n",
+ start_addr, (u32)resource.end - (u32)resource.start,
+ virt_addr);
+
+ xrnic_dev->xrnic_mmap.xrnic_regs_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.xrnic_regs = (struct xrnic_reg_map *)virt_addr;
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (IS_ERR_VALUE(phy_addr)) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.tx_hdr_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.tx_hdr_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.tx_hdr_buf_ba, 0, len);
+ DEBUG_LOG("xrnic memory Tx HDR BUF Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.tx_hdr_buf_ba_phys);
+#else
+ /*Mapping for Xrnic TX HEADERS 0x20100000 to 16 kb.*/
+ ret = of_address_to_resource(np, XRNIC_TX_HDR_BUF_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 5.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_TX_HDR_BUF_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory TX header 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.tx_hdr_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.tx_hdr_buf_ba = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (IS_ERR_VALUE(phy_addr)) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.tx_sgl_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.tx_sgl_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.tx_sgl_buf_ba, 0, len);
+ DEBUG_LOG("xrnic memory Tx SGL Buf Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.tx_sgl_buf_ba_phys);
+#else
+ /*Mapping for Xrnic TX DMA SGL 0xB4000000 to 16 kb.*/
+ ret = of_address_to_resource(np, XRNIC_TX_SGL_BUF_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 6.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_TX_SGL_BUF_NODE);
+ DEBUG_LOG("xrnic memory TX SGL 0x%llx of size=%x\n",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.tx_sgl_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.tx_sgl_buf_ba = (void *)(uintptr_t)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (IS_ERR_VALUE(phy_addr)) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.bypass_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.bypass_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.bypass_buf_ba, 0, len);
+ DEBUG_LOG("xrnic memory Bypass Buf Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.bypass_buf_ba_phys);
+#else
+ /*Mapping for Xrnic BYPASS PL 0x20120000 to 16 kb.*/
+ /*Mapping for Xrnic BYPASS PS 0x20120000 to 16 kb.*/
+ ret = of_address_to_resource(np, XRNIC_BYPASS_BUF_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 7.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_BYPASS_BUF_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory BYPASS:0x%llx of siz:%xb mapped at 0x%p\n",
+ start_addr, (u32)resource.end - (u32)resource.start,
+ virt_addr);
+
+ xrnic_dev->xrnic_mmap.bypass_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.bypass_buf_ba = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_NUM_OF_ERROR_BUF * XRNIC_SIZE_OF_ERROR_BUF;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.err_pkt_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.err_pkt_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.err_pkt_buf_ba, 0, len);
+ DEBUG_LOG("xrnic memory ERR PKT Buf Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.err_pkt_buf_ba_phys);
+#else
+ /*Mapping for Xrnic ERROR-DROPP PL 0x20110000 to 16 kb.*/
+ /*Mapping for Xrnic ERROR-DROPP PS 0x20110000 to 16 kb.*/
+ ret = of_address_to_resource(np, XRNIC_ERRPKT_BUF_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 8.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_ERRPKT_BUF_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory ERROR PKT 0x%llx of size=%x\n",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.err_pkt_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.err_pkt_buf_ba = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_OUT_ERRST_Q_NUM_ENTRIES;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.out_errsts_q_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.out_errsts_q_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.out_errsts_q_ba, 0, len);
+ DEBUG_LOG("xrnic memory OUT ERR STS Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.out_errsts_q_ba_phys);
+#else
+ /*Mapping for Xrnic OUT ERR_STS 0x29000000 to 4 kb.*/
+ ret = of_address_to_resource(np, XRNIC_OUTERR_STS_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 9.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_OUTERR_STS_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory 0x%llx of size=%x bytes mapped at 0x%p\n",
+ start_addr, (u32)resource.end - (u32)resource.start,
+ virt_addr);
+
+ xrnic_dev->xrnic_mmap.out_errsts_q_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.out_errsts_q_ba = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_IN_ERRST_Q_NUM_ENTRIES;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.in_errsts_q_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.in_errsts_q_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.in_errsts_q_ba, 0, len);
+ DEBUG_LOG("xrnic memory IN ERR STS Base Address = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.in_errsts_q_ba_phys);
+#else
+ /*Mapping for Xrnic IN ERR_STS PL 0x29001000 to 16 kb.*/
+ /*Mapping for Xrnic IN ERR_STS PS 0x29001000 to 16 kb.*/
+ ret = of_address_to_resource(np, XRNIC_INERR_STS_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 10.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_INERR_STS_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory 0x%llx of size=%x bytes mapped at 0x%p\n",
+ start_addr, (u32)resource.end - (u32)resource.start,
+ virt_addr);
+
+ xrnic_dev->xrnic_mmap.in_errsts_q_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.in_errsts_q_ba = (void *)virt_addr;
+#endif
+
+ /*Mapping for Xrnic RQ WR DBRL PL 0x29002000 to 4 kb.*/
+ /*Mapping for Xrnic RQ WR DBRL PS 0x29002000 to 4 kb.*/
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_NUM_OF_DATA_BUF * XRNIC_SIZE_OF_DATA_BUF;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.data_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.data_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.data_buf_ba, 0, len);
+#else
+ /*Mapping for Xrnic RQ STATUS PER QP 0x29040000 to 4 kb.*/
+ ret = of_address_to_resource(np, XRNIC_DATA_BUF_BA_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 14.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_DATA_BUF_BA_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory DATA BUFF BA 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.data_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.data_buf_ba = (void *)virt_addr;
+#endif
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_NUM_OF_RESP_ERR_BUF * XRNIC_SIZE_OF_RESP_ERR_BUF;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba, 0, len);
+#else
+ /*Mapping for Xrnic RQ STATUS PER QP 0x20130000 to 16kb.*/
+ ret = of_address_to_resource(np, XRNIC_RESP_ERR_PKT_BUF_BA, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 14.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_RESP_ERR_PKT_BUF_BA);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic response error packet buffer base address [0x%llx]",
+ start_addr);
+ DEBUG_LOG(" of size=%x bytes mapped at 0x%p\n",
+ (u32)resource.end - (u32)resource.start, virt_addr);
+
+ xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba = (void *)virt_addr;
+#endif
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_SEND_SGL_SIZE * XRNIC_SQ_DEPTH;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.send_sgl_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.send_sgl =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+
+ memset(xrnic_dev->xrnic_mmap.send_sgl, 0, len);
+ DEBUG_LOG("xrnic memory Send SGL Base Addr = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.send_sgl_phys);
+
+#else /* EXPERIMENTAL_CODE */
+ ret = of_address_to_resource(np, XRNIC_SEND_SGL_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 1.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_SEND_SGL_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+
+ DEBUG_LOG("xrnic memory send sgl 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.send_sgl_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.send_sgl = (void *)virt_addr;
+#endif /* EXPERIMENTAL_CODE */
+
+ DEBUG_LOG("send SGL physical address :%llx\n",
+ xrnic_dev->xrnic_mmap.send_sgl_phys);
+ DEBUG_LOG("xrnic mmap:%p\n", &xrnic_dev->xrnic_mmap);
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_SQ_DEPTH * sizeof(struct xrnic_cqe);
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.cq_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.cq_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.cq_ba, 0, len);
+ DEBUG_LOG("xrnic memory CQ BA Base Addr = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.cq_ba_phys);
+
+#else
+ ret = of_address_to_resource(np, XRNIC_CQ_BA_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 2.\n");
+ goto mem_config_err;
+ }
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_CQ_BA_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory send CQ 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.cq_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.cq_ba = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_RECV_PKT_SIZE * XRNIC_RQ_DEPTH;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.rq_buf_ba_ca_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.rq_buf_ba_ca =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+
+ memset(xrnic_dev->xrnic_mmap.rq_buf_ba_ca, 0, len);
+ DEBUG_LOG("xrnic memory Receive Q Buffer = %#x, %llx\n",
+ val, xrnic_dev->xrnic_mmap.rq_buf_ba_ca_phys);
+
+#else /* EXPERIMENTAL_CODE */
+ ret = of_address_to_resource(np, XRNIC_RQ_BUF_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 3.\n");
+ goto mem_config_err;
+ }
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_RQ_BUF_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory receive Q Buf 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.rq_buf_ba_ca_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.rq_buf_ba_ca = (void *)virt_addr;
+#endif /* EXPERIMENTAL_CODE */
+
+#ifdef EXPERIMENTAL_CODE
+ len = XRNIC_SEND_PKT_SIZE * XRNIC_SQ_DEPTH;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.sq_ba_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.sq_ba =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.sq_ba, 0, len);
+ DEBUG_LOG("xrnic memory Send Q Base Addr = %#x, %llx.\n",
+ val, xrnic_dev->xrnic_mmap.sq_ba_phys);
+#else
+ ret = of_address_to_resource(np, XRNIC_SQ_BA_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 4.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_SQ_BA_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory SEND Q 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.sq_ba_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.sq_ba = (struct wr *)virt_addr;
+#endif
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.rq_wrptr_db_add_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.rq_wrptr_db_add =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.rq_wrptr_db_add, 0, len);
+#else
+ ret = of_address_to_resource(np, XRNIC_RQWR_PTR_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 11.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_RQWR_PTR_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory RQ WPTR 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.rq_wrptr_db_add_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.rq_wrptr_db_add = (void *)virt_addr;
+#endif
+
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.sq_cmpl_db_add_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.sq_cmpl_db_add =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.sq_cmpl_db_add, 0, len);
+#else
+ ret = of_address_to_resource(np, XRNIC_SQ_CMPL_NODE, &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 12.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_SQ_CMPL_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory SQ CMPL 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG("bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.sq_cmpl_db_add_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.sq_cmpl_db_add = (void *)virt_addr;
+#endif
+#ifdef EXPERIMENTAL_CODE
+ len = 0x100;
+ phy_addr = alloc_mem(NULL, len);
+ if (phy_addr == -ENOMEM) {
+ ret = -ENOMEM;
+ goto mem_config_err;
+ }
+ xrnic_dev->xrnic_mmap.stat_rq_buf_ca_phys = phy_addr;
+ xrnic_dev->xrnic_mmap.stat_rq_buf_ca =
+ (void *)(uintptr_t)get_virt_addr(phy_addr);
+ memset(xrnic_dev->xrnic_mmap.stat_rq_buf_ca, 0, len);
+#else
+ ret = of_address_to_resource(np, XRNIC_STAT_XRNIC_RQ_BUF_NODE,
+ &resource);
+ if (ret < 0) {
+ pr_err("xrnic can't find resource 13.\n");
+ goto mem_config_err;
+ }
+
+ start_addr = (unsigned int)resource.start;
+ virt_addr = of_iomap(np, XRNIC_STAT_XRNIC_RQ_BUF_NODE);
+ memset((void *)virt_addr, 0, ((u32)resource.end -
+ (u32)resource.start + 1));
+ DEBUG_LOG("xrnic memory STAT RQ BUF 0x%llx of size=%x",
+ start_addr, (u32)resource.end - (u32)resource.start);
+ DEBUG_LOG(" bytes mapped at 0x%p\n", virt_addr);
+
+ xrnic_dev->xrnic_mmap.stat_rq_buf_ca_phys = (u64)start_addr;
+ xrnic_dev->xrnic_mmap.stat_rq_buf_ca = (void *)virt_addr;
+#endif
+ xrnic_dev->io_qp_count = XRNIC_MAX_QP_SUPPORT;
+ /* XRNIC controller H/W configuration which includes XRNIC
+ * global configuration, QP1 initialization and interrupt enable.
+ */
+ ret = xrnic_ctrl_hw_init();
+ if (ret < 0) {
+ pr_err("xrnic hw init failed.\n");
+ goto mem_config_err;
+ }
+ /* TODO: Currently, ERNIC IP is exporting 8 interrupt lines in DTS.
+ * But, IP will assert only first interrupt line for all 8 lines.
+ * Internally, all 8 lines are logically ORed and given as
+ * Single interrupt with interrupt status register showing which
+ * line is asserted. So, we are parsing just the 0th index of irq_map
+ * from DTS and in interrupt handler routine, we are reading the
+ * interrupt status register to identify which interrupt is asserted.
+ *
+ * Need to fix the design to export only 1 interrupt line in DTS.
+ */
+ xrnic_dev->xrnic_irq = irq_of_parse_and_map(np, 0);
+ if (!xrnic_dev->xrnic_irq) {
+ pr_err("xrnic can't determine irq.\n");
+ ret = XRNIC_FAILED;
+ }
+ status = request_irq(xrnic_dev->xrnic_irq, xrnic_irq_handler, 0,
+ "xrnic_irq", xrnic_dev);
+ if (status) {
+ pr_err("XRNIC irq request handler failed\n");
+ goto err_irq;
+ }
+
+ tasklet_init(&xrnic_dev->mad_pkt_recv_task,
+ xrnic_mad_pkt_recv_intr_handler,
+ (unsigned long)xrnic_dev);
+ tasklet_init(&xrnic_dev->qp_pkt_recv_task,
+ xrnic_qp_pkt_recv_intr_handler, (unsigned long)xrnic_dev);
+ tasklet_init(&xrnic_dev->qp_fatal_task,
+ xrnic_qp_fatal_handler, (unsigned long)xrnic_dev);
+ tasklet_init(&xrnic_dev->wqe_completed_task,
+ xrnic_wqe_completed_intr_handler,
+ (unsigned long)xrnic_dev);
+ INIT_LIST_HEAD(&cm_id_list);
+
+ return XRNIC_SUCCESS;
+err_irq:
+mem_config_err:
+/* free_mem() works on only valid physical address returned from alloc_mem(),
+ * and ignores if NULL or invalid address is passed.
+ * So, even if any of the above allocations fail in the middle,
+ * we can safely call free_mem() on all addresses.
+ *
+ * we are using carve-out memory for the requirements of ERNIC.
+ * so, we cannot use devm_kzalloc() as kernel cannot see these
+ * memories until ioremapped.
+ */
+ iounmap(xrnic_dev->xrnic_mmap.xrnic_regs);
+ free_mem(xrnic_dev->xrnic_mmap.send_sgl_phys);
+ free_mem(xrnic_dev->xrnic_mmap.cq_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.rq_buf_ba_ca_phys);
+ free_mem(xrnic_dev->xrnic_mmap.sq_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.tx_hdr_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.tx_sgl_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.bypass_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.err_pkt_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.out_errsts_q_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.in_errsts_q_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.rq_wrptr_db_add_phys);
+ free_mem(xrnic_dev->xrnic_mmap.sq_cmpl_db_add_phys);
+ free_mem(xrnic_dev->xrnic_mmap.stat_rq_buf_ca_phys);
+ free_mem(xrnic_dev->xrnic_mmap.data_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba_phys);
+
+dev_failed:
+ /* Remove the cdev */
+ cdev_del(&xrnic_dev->cdev);
+
+ /* Remove the device node entry */
+ device_destroy(xrnic_class, xrnic_dev_number);
+
+cdev_failed:
+ /* Destroy xrnic_class */
+ class_destroy(xrnic_class);
+
+class_failed:
+ /* Release the major number */
+ unregister_chrdev_region(MAJOR(xrnic_dev_number), 1);
+
+alloc_failed:
+ kfree(xrnic_dev);
+ return ret;
+}
+
+static int xernic_remove(struct platform_device *pdev)
+{
+/* TODO: Not using pdev. Rather using a global data structure xrnic_dev,
+ * which is shared among all the objects in ernic driver.
+ * Need to get xrnic_dev from platform_device pointer.
+ */
+ iounmap(xrnic_dev->xrnic_mmap.xrnic_regs);
+ free_mem(xrnic_dev->xrnic_mmap.send_sgl_phys);
+ free_mem(xrnic_dev->xrnic_mmap.cq_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.rq_buf_ba_ca_phys);
+ free_mem(xrnic_dev->xrnic_mmap.sq_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.tx_hdr_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.tx_sgl_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.bypass_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.err_pkt_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.out_errsts_q_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.in_errsts_q_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.rq_wrptr_db_add_phys);
+ free_mem(xrnic_dev->xrnic_mmap.sq_cmpl_db_add_phys);
+ free_mem(xrnic_dev->xrnic_mmap.stat_rq_buf_ca_phys);
+ free_mem(xrnic_dev->xrnic_mmap.data_buf_ba_phys);
+ free_mem(xrnic_dev->xrnic_mmap.resp_err_pkt_buf_ba_phys);
+
+ cdev_del(&xrnic_dev->cdev);
+ device_destroy(xrnic_class, xrnic_dev_number);
+ cdev_del(&xrnic_dev->cdev);
+ unregister_chrdev_region(MAJOR(xrnic_dev_number), 1);
+ free_irq(xrnic_dev->xrnic_irq, xrnic_dev);
+ kfree(xrnic_dev);
+ class_destroy(xrnic_class);
+ unregister_inetaddr_notifier(&cmac_inetaddr_notifier);
+ unregister_inet6addr_notifier(&cmac_inet6addr_notifier);
+
+ return 0;
+}
+
+static const struct of_device_id xernic_of_match[] = {
+ { .compatible = "xlnx,ernic-1.0", },
+ { /* end of table*/ }
+};
+MODULE_DEVICE_TABLE(of, xernic_of_match);
+
+static struct platform_driver xernic_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = xernic_of_match,
+ },
+ .probe = xernic_probe,
+ .remove = xernic_remove,
+};
+
+module_platform_driver(xernic_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xilinx RNIC driver");
+MODULE_AUTHOR("Sandeep Dhanvada");
diff --git a/drivers/staging/xlnx_ernic/xmain.h b/drivers/staging/xlnx_ernic/xmain.h
new file mode 100644
index 000000000000..2f45f94d2f85
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xmain.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef _XLNX_MAIN_H_
+#define _XLNX_MAIN_H_
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#define XRNIC_VERSION "1.2"
+#define NUM_XRNIC_DEVS 1
+#define DEVICE_NAME "xrnic"
+#define DRIVER_NAME "xrnic"
+
+int xrnic_open(struct inode *inode, struct file *file);
+int xrnic_release(struct inode *inode, struct file *file);
+long xrnic_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+ssize_t xrnic_read(struct file *file, char *buf,
+ size_t count, loff_t *ppos);
+ssize_t xrnic_write(struct file *file, const char *buf,
+ size_t count, loff_t *ppos);
+void xrnic_fill_wr(struct xrnic_qp_attr *qp_attr, u32 qp_depth);
+#ifdef __cplusplus
+ }
+#endif
+
+#endif
diff --git a/drivers/staging/xlnx_ernic/xmr.c b/drivers/staging/xlnx_ernic/xmr.c
new file mode 100644
index 000000000000..4959595d48d0
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xmr.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Memory registrations helpers for RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#include "xcommon.h"
+#include "xhw_config.h"
+
+struct list_head mr_free;
+struct list_head mr_alloc;
+
+atomic_t pd_index = ATOMIC_INIT(0);
+int free_mem_ceil;
+int free_mem_remain;
+void __iomem *mtt_va;
+
+DECLARE_BITMAP(ernic_memtable, XRNIC_HW_MAX_QP_SUPPORT);
+/**
+ * alloc_pool_remove() - remove an entry from alloc pool
+ * @chunk: memory region to be removed from alloc pool.
+ * @return: 0 on success.
+ *
+ * TODO: Need to modify the return value as void and remove return statement.
+ */
+int alloc_pool_remove(struct mr *chunk)
+{
+ struct mr *next, *tmp;
+
+ list_for_each_entry_safe(next, tmp, &mr_alloc, list) {
+ if (next->paddr == chunk->paddr) {
+ __list_del_entry(&next->list);
+ free_mem_remain += chunk->len;
+ }
+ }
+ return 0;
+}
+
+/**
+ * free_pool_insert() - inserts specified memory region in the free pool
+ * @chunk: memory region to be inserted in free pool.
+ * @return: 0 on success. else, returns -ENOMEM.
+ *
+ * Adds the specified memory to the free pool and if possible,
+ * merges it with adjacent regions in free pool.
+ */
+int free_pool_insert(struct mr *chunk)
+{
+ struct mr *next, *dup, *tmp;
+ struct mr *prev = NULL;
+
+ dup = kzalloc(sizeof(*dup), GFP_ATOMIC);
+ memcpy(dup, chunk, sizeof(*dup));
+
+ /* If list is empty, then, add the new region to the free pool */
+ if (list_empty(&mr_free)) {
+ list_add_tail(&dup->list, &mr_free);
+ goto done;
+ }
+
+ /* If the new region size exceeds the free memory limit,
+ * return error.
+ */
+ if (free_mem_ceil < (free_mem_remain + dup->len))
+ return -ENOMEM;
+
+ /* For a non-empty list, add the region at a suitable place
+ * in the free pool.
+ */
+ list_for_each_entry_safe(next, tmp, &mr_free, list) {
+ if (dup->paddr < next->paddr) {
+ prev = list_prev_entry(next, list);
+ list_add(&dup->list, &prev->list);
+ goto merge_free_pool;
+ }
+ }
+ /*
+ * If no suitable position to insert within free pool, then,
+ * append at the tail.
+ */
+ list_add_tail(&dup->list, &mr_free);
+
+ /* If possible, merge the region with previous and next regions. */
+merge_free_pool:
+ if (next && (dup->paddr + dup->len == next->paddr)) {
+ dup->len += next->len;
+ __list_del_entry(&next->list);
+ }
+
+ if (prev && (prev->paddr + prev->len == dup->paddr)) {
+ prev->len += dup->len;
+ __list_del_entry(&dup->list);
+ }
+ /* Except Phys and Virt address, clear all the contents of the region,
+ * If this region is in alloc pool, remove it from alloc pool.
+ */
+done:
+ dup->lkey = 0;
+ dup->rkey = 0;
+ dup->vaddr = 0;
+ dup->access = MR_ACCESS_RESVD;
+ alloc_pool_remove(chunk);
+ return 0;
+}
+EXPORT_SYMBOL(free_pool_insert);
+
+/**
+ * alloc_pd() - Allocates a Protection Domain
+ * @return: returns pointer to ernic_pd struct.
+ *
+ */
+struct ernic_pd *alloc_pd(void)
+{
+ struct ernic_pd *new_pd;
+ /* TODO: Need to check for return value and return ENOMEM */
+ new_pd = kzalloc(sizeof(*new_pd), GFP_ATOMIC);
+ atomic_inc(&pd_index);
+ atomic_set(&new_pd->id, atomic_read(&pd_index));
+ return new_pd;
+}
+EXPORT_SYMBOL(alloc_pd);
+
+/**
+ * dealloc_pd() - Allocates a Protection Domain
+ * @pd: protection domain to be deallocated.
+ *
+ */
+void dealloc_pd(struct ernic_pd *pd)
+{
+ atomic_dec(&pd_index);
+ kfree(pd);
+}
+EXPORT_SYMBOL(dealloc_pd);
+
+/**
+ * dereg_mr() - deregisters the memory region from the Channel adapter.
+ * @mr: memory region to be de-registered.
+ *
+ * dereg_mr() de-registers a memory region with CA and clears the memory region
+ * registered with CA.
+ */
+void dereg_mr(struct mr *mr)
+{
+ int mtt_idx = (mr->rkey & 0xFF);
+
+ //memset(mtt_va + mtt_offset, 0, sizeof(struct ernic_mtt));
+ clear_bit(mtt_idx, ernic_memtable);
+}
+EXPORT_SYMBOL(dereg_mr);
+
+/**
+ * alloc_mem() - Allocates a Memory Region
+ * @pd: Protection domain mapped to the memory region
+ * @len: Length of the memory region required
+ * @return: on success, returns the physical address.
+ * else, returns -ENOMEM.
+ */
+phys_addr_t alloc_mem(struct ernic_pd *pd, int len)
+{
+ struct mr *next, *new_alloc, *new_free, *tmp;
+ int _len;
+
+ _len = round_up(len, 256);
+ new_alloc = kzalloc(sizeof(*new_alloc), GFP_KERNEL);
+ new_free = kzalloc(sizeof(*new_free), GFP_KERNEL);
+
+ /* requested more memory than the free pool capacity? */
+ if (free_mem_remain < _len)
+ goto err;
+
+ list_for_each_entry_safe(next, tmp, &mr_free, list) {
+ if (next->len == _len) {
+ new_alloc->paddr = next->paddr;
+ __list_del_entry(&next->list);
+ goto reg_mr;
+ }
+ if (next->len > _len) {
+ __list_del_entry(&next->list);
+ new_alloc->paddr = next->paddr;
+ new_free->paddr = next->paddr + _len;
+ new_free->len = next->len - _len;
+ free_pool_insert(new_free);
+ goto reg_mr;
+ }
+ }
+
+err:
+ /* No free memory of requested size */
+ kfree(new_alloc);
+ kfree(new_free);
+
+ return -ENOMEM;
+reg_mr:
+ free_mem_remain = free_mem_remain - _len;
+ new_alloc->pd = pd;
+ new_alloc->len = _len;
+ new_alloc->vaddr = (u64)(uintptr_t)ioremap(new_alloc->paddr, _len);
+ list_add_tail(&new_alloc->list, &mr_alloc);
+ return new_alloc->paddr;
+}
+EXPORT_SYMBOL(alloc_mem);
+
+u64 get_virt_addr(phys_addr_t phys_addr)
+{
+ struct mr *next;
+
+ list_for_each_entry(next, &mr_alloc, list) {
+ if (next->paddr == phys_addr)
+ return next->vaddr;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(get_virt_addr);
+
+/**
+ * free_mem() - inserts a memory region in free pool and
+ * removes from alloc pool
+ * @paddr: physical address to be freed.
+ *
+ */
+void free_mem(phys_addr_t paddr)
+{
+ struct mr *next;
+
+ list_for_each_entry(next, &mr_alloc, list) {
+ if (next->paddr == paddr)
+ goto found;
+ }
+ return;
+found:
+ iounmap((void __iomem *)(unsigned long)next->vaddr);
+ free_pool_insert(next);
+}
+EXPORT_SYMBOL(free_mem);
+
+/**
+ * register_mem_to_ca() - Registers a memory region with the Channel Adapter
+ * @mr: memory region to register.
+ * @return: a pointer to struct mr
+ *
+ * register_mem_to_ca() validates the memory region provided and registers
+ * the memory region with the CA and updates the mkey in the registered region.
+ *
+ */
+static struct mr *register_mem_to_ca(struct mr *mr)
+{
+ int bit, mtt_idx, offset;
+ struct ernic_mtt mtt;
+
+ bit = find_first_zero_bit(ernic_memtable, XRNIC_HW_MAX_QP_SUPPORT);
+ set_bit(bit, ernic_memtable);
+ mtt_idx = bit;
+ mtt.pa = mr->paddr;
+ mtt.iova = mr->vaddr;
+ mtt.pd = atomic_read(&mr->pd->id);
+ mr->rkey = (mtt_idx << 8) | bit;
+ mtt.rkey = mr->rkey;
+ mtt.access = mr->access;
+ mtt.len = mr->len;
+ offset = (int)(mtt_va + (mtt_idx * 0x100));
+
+ iowrite32(mtt.pd, (void __iomem *)(offset + ERNIC_PD_OFFSET));
+ iowrite32((mtt.iova & 0xFFFFFFFF),
+ (void __iomem *)(offset + ERNIC_IOVA_OFFSET));
+ iowrite32(((mtt.iova >> 32) & 0xFFFFFFFF),
+ (void __iomem *)(offset + ERNIC_IOVA_OFFSET + 4));
+ iowrite32((mtt.pa & 0xFFFFFFFF),
+ (void __iomem *)(offset + ERNIC_PA_OFFSET));
+ iowrite32(((mtt.pa >> 32) & 0xFFFFFFFF),
+ (void __iomem *)(offset + ERNIC_PA_OFFSET + 4));
+ iowrite32((mtt.rkey & 0xFFFF),
+ (void __iomem *)(offset + ERNIC_RKEY_OFFSET));
+ iowrite32(mtt.len, (void __iomem *)(offset + ERNIC_LEN_OFFSET));
+ iowrite32(mtt.access, (void __iomem *)(offset + ERNIC_ACCESS_OFFSET));
+ return mr;
+}
+
+/**
+ * reg_phys_mr() - Registers a physical address with the Channel Adapter
+ * @pd: Protection domian associtated with the physical address.
+ * @phys_addr: The physical address to be registered.
+ * @len: length of the buffer to be registered.
+ * @access: access permissions for the registered buffer.
+ * @va_reg_base: Virtual address. Currently, ERNIC doesn't support either
+ * Base Memory Extensions or Zero Based VA. So, this arg is
+ * ignired for now. This is just to satisfy the Verbs Signature.
+ * @return: on success, returns a pointer to struct mr.
+ * else, returns a pointer to error.
+ *
+ * register_mem_to_ca() validates the memory region provided and registers
+ * the memory region with the CA and updates the mkey in the registered region.
+ */
+struct mr *reg_phys_mr(struct ernic_pd *pd, phys_addr_t phys_addr,
+ int len, int access, void *va_reg_base)
+{
+ struct mr *phys_mr;
+ struct mr *next;
+
+ list_for_each_entry(next, &mr_alloc, list) {
+ if (next->paddr == phys_addr)
+ goto found;
+ }
+ /* Physical Address of the requested region is invalid */
+ return ERR_PTR(-EINVAL);
+found:
+ phys_mr = kzalloc(sizeof(*phys_mr), GFP_KERNEL);
+ phys_mr->paddr = phys_addr;
+ phys_mr->vaddr = next->vaddr;
+ phys_mr->len = len;
+ phys_mr->access = access;
+ phys_mr->pd = pd;
+
+ return register_mem_to_ca(phys_mr);
+}
+EXPORT_SYMBOL(reg_phys_mr);
+
+struct mr *query_mr(struct ernic_pd *pd)
+{
+ struct mr *next, *tmp;
+
+ list_for_each_entry_safe(next, tmp, &mr_alloc, list) {
+ if (atomic_read(&next->pd->id) == atomic_read(&pd->id)) {
+ pr_info("Found MR\n");
+ goto ret;
+ }
+ }
+ return ERR_PTR(-EINVAL);
+ret:
+ return next;
+}
+EXPORT_SYMBOL(query_mr);
+
+/**
+ * dump_list() - prints all the regions for the specified list.
+ * @head: HEAD pointer for the list to be printed.
+ *
+ * dump_list() iterates over the specified list HEAD and
+ * prints all the physical address and length at each node in the list.
+ */
+static void dump_list(struct list_head *head)
+{
+ struct mr *next;
+
+ list_for_each_entry(next, head, list) {
+ pr_info("MR [%d:%s] Phys_addr = %#x, vaddr = %llx, len = %d\n",
+ __LINE__, __func__,
+ next->paddr, next->vaddr, next->len);
+ }
+}
+
+/**
+ * dump_free_list() - prints all the regions in the free pool.
+ *
+ * dump_free_list() is a wrapper function for dump_list()
+ * to print free pool data
+ *
+ */
+void dump_free_list(void)
+{
+ dump_list(&mr_free);
+}
+EXPORT_SYMBOL(dump_free_list);
+
+/**
+ * dump_alloc_list() - prints all the regions in the alloc pool.
+ *
+ * dump_alloc_list() is a wrapper function for dump_list()
+ * to print alloc pool data
+ */
+void dump_alloc_list(void)
+{
+ dump_list(&mr_alloc);
+}
+EXPORT_SYMBOL(dump_alloc_list);
+
+/**
+ * init_mr() - Initialization function for memory region.
+ * @addr: Physical Address of the starting memory region.
+ * @length: Length of the region to initialize.
+ * @return: 0 on success.
+ * else, -EINVAL.
+ *
+ * init_mr() initializes a region of free memory
+ *
+ * Note: This should be called only once by the RNIC driver.
+ */
+int init_mr(phys_addr_t addr, int length)
+{
+ struct mr *reg = kmalloc(sizeof(struct mr *), GFP_KERNEL);
+
+ /* Multiple init_mr() calls? */
+ if (free_mem_ceil > 0)
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&mr_free);
+ INIT_LIST_HEAD(&mr_alloc);
+ reg->paddr = addr;
+ reg->len = length;
+ free_pool_insert(reg);
+ free_mem_remain = reg->len;
+ free_mem_ceil = free_mem_remain;
+/* TODO: 0x2000 is the current Protection domain length for 255
+ * Protection Domains.
+ * Need to retrieve number of Protections doamins and length of each
+ * protection domains from DTS and calculate the overall remap size for
+ * all protection domains, instead of using a hard-coded value.
+ * currently, length of each protection domain is not exported in DTS.
+ */
+ mtt_va = ioremap(MTT_BASE, 0x2000);
+ return 0;
+}
diff --git a/drivers/staging/xlnx_ernic/xmr.h b/drivers/staging/xlnx_ernic/xmr.h
new file mode 100644
index 000000000000..7c822b22eff9
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xmr.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+struct mr {
+ phys_addr_t paddr;
+ u64 vaddr;
+ int len;
+ unsigned int access;
+ struct ernic_pd *pd;
+ int lkey;
+ int rkey;
+ struct list_head list;
+};
+
+struct ernic_pd {
+ atomic_t id;
+};
+
+struct ernic_mtt {
+ unsigned long pd;
+#define ERNIC_PD_OFFSET 0
+ u64 iova;
+#define ERNIC_IOVA_OFFSET 4
+ u64 pa;
+#define ERNIC_PA_OFFSET 12
+ int rkey;
+#define ERNIC_RKEY_OFFSET 20
+ int len;
+#define ERNIC_LEN_OFFSET 24
+ unsigned int access;
+#define ERNIC_ACCESS_OFFSET 28
+};
+
+phys_addr_t alloc_mem(struct ernic_pd *pd, int len);
+void free_mem(phys_addr_t paddr);
+struct mr *query_mr(struct ernic_pd *pd);
+struct ernic_pd *alloc_pd(void);
+void dealloc_pd(struct ernic_pd *pd);
+void dump_free_list(void);
+void dump_alloc_list(void);
+int init_mr(phys_addr_t addr, int len);
+int free_pool_insert(struct mr *chunk);
+void dereg_mr(struct mr *mr);
+u64 get_virt_addr(phys_addr_t phys_addr);
+struct mr *reg_phys_mr(struct ernic_pd *pd, phys_addr_t phys_addr,
+ int len, int access, void *va_reg_base);
+int alloc_pool_remove(struct mr *chunk);
+
+extern void __iomem *mtt_va;
+/* TODO: Get the Base address and Length from DTS, instead of Macro.
+ * Currently, the design is only for Microblaze with a fixed memory
+ * in the design.
+ *
+ * MEMORY_REGION_BASE is a carve-out memory which will be ioremapped
+ * when required for ERNIC Configuration and Queue Pairs.
+ */
+#define MEMORY_REGION_BASE 0xC4000000
+#define MEMORY_REGION_LEN 0x3BFFFFFF
+/* TODO: Get MTT_BASE from DTS instead of Macro. */
+#define MTT_BASE 0x84000000
+#define MR_ACCESS_READ 0
+#define MR_ACCESS_WRITE 1
+#define MR_ACCESS_RDWR 2
+#define MR_ACCESS_RESVD 3
diff --git a/drivers/staging/xlnx_ernic/xperftest.h b/drivers/staging/xlnx_ernic/xperftest.h
new file mode 100644
index 000000000000..609469450a9f
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xperftest.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ */
+
+#ifndef _PERF_TEST_H
+#define _PERF_TEST_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+struct ernic_bwtest_struct {
+ u64 reserved1;
+ int qp_number;
+ int reserved2;
+ unsigned long long rkey;
+ unsigned long long vaddr;
+ char reserved3[24];
+};
+
+int perftest_parse_addr(struct sockaddr_storage *s_addr, char *buf);
+void rq_handler(u32 rq_count, void *rq_context);
+void sq_handler(u32 rq_count, void *sq_context);
+void perftest_fill_wr(void __iomem *sq_ba);
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _PERF_TEST_H*/
diff --git a/drivers/staging/xlnx_ernic/xqp.c b/drivers/staging/xlnx_ernic/xqp.c
new file mode 100644
index 000000000000..dae21fda5da6
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xqp.c
@@ -0,0 +1,1310 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#include "xcommon.h"
+
+#define DISPLAY_REGS_ON_DISCONNECT
+#define EXPERIMENTAL_CODE
+
+struct xrnic_conn_param {
+ const void *private_data;
+ u8 private_data_len;
+ u8 responder_resources;
+ u8 initiator_depth;
+ u8 flow_control;
+ u8 retry_count;
+ u8 rnr_retry_count;
+ u8 srq;
+ u8 qp_num;
+};
+
+/* EXTRA Bytes for Invariant CRC */
+#define ERNIC_INV_CRC 4
+/* ERNIC Doesn't have Variant CRC for P2P */
+#define ERNIC_VAR_CRC 0
+#define EXTRA_PKT_LEN (ERNIC_INV_CRC + ERNIC_VAR_CRC)
+
+#define cpu_to_be24(x) ((x) << 16)
+
+#define CMA_VERSION 0
+#define QP_STAT_SQ_EMPTY_BIT_POS (9)
+#define QP_STAT_OUTSTANDG_EMPTY_Q_BIT_POS (10)
+
+int in_err_wr_ptr;
+struct list_head cm_id_list;
+
+/**
+ * xrnic_set_qp_state() - Sets the qp state to the desired state
+ * @qp_num: XRNIC QP number
+ * @state: State to set
+ *
+ * @return: XRNIC_SUCCESS in case of success or a error representative value
+ */
+int xrnic_set_qp_state(int qp_num, int state)
+{
+ if (qp_num < 0)
+ return -XRNIC_INVALID_QP_ID;
+
+ if (state != XRNIC_QP_IN_USE && state != XRNIC_QP_FREE)
+ return -XRNIC_INVALID_QP_STATUS;
+
+ xrnic_dev->qp_attr[qp_num].qp_status = state;
+ return XRNIC_SUCCESS;
+}
+
+/**
+ * xrnic_find_free_qp() - Finds the free qp to use
+ * @return: free QP Num or error value incase of no free QP
+ */
+int xrnic_find_free_qp(void)
+{
+ int i;
+
+ for (i = 0 ; i < XRNIC_MAX_QP_SUPPORT ; i++) {
+ /*Checking for QP with ZERO REMOTE and LOCAL cm id*/
+ if (xrnic_dev->qp_attr[i].qp_status == XRNIC_QP_FREE)
+ return i;
+ }
+ return XRNIC_FAILED;
+}
+
+/**
+ * xrnic_rdma_create_qp() - Finds the free qp to use
+ * @cm_id: CM ID to associate with QP
+ * @pd: Protection domain to assosciate the QP with
+ * @init_attr: QP attributes or config values
+ * @return: XRNIC_SUCCESS if successful otherwise error representing code
+ */
+int xrnic_rdma_create_qp(struct xrnic_rdma_cm_id *cm_id, struct ernic_pd *pd,
+ struct xrnic_qp_init_attr *init_attr)
+{
+ struct xrnic_qp_attr *qp_attr;
+ struct xrnic_qp_info *qp_info;
+ int ret;
+
+ if (init_attr->sq_depth > XRNIC_MAX_SQ_DEPTH ||
+ init_attr->rq_depth > XRNIC_MAX_RQ_DEPTH ||
+ init_attr->send_sge_size > XRNIC_MAX_SEND_SGL_SIZE ||
+ init_attr->send_pkt_size > XRNIC_MAX_SEND_PKT_SIZE) {
+ return -XRNIC_INVALID_QP_INIT_ATTR;
+ }
+
+ qp_info = &cm_id->qp_info;
+
+ qp_info->qp_num = xrnic_find_free_qp();
+ qp_info->qp_num += 2;
+
+ ret = xrnic_set_qp_state((qp_info->qp_num - 2), XRNIC_QP_IN_USE);
+ if (ret < 0)
+ return ret;
+
+ qp_attr = &xrnic_dev->qp_attr[qp_info->qp_num - 2];
+
+ if (qp_info->qp_num < 2 || qp_attr->qp_type != init_attr->qp_type)
+ return -XRNIC_INVALID_QP_ID;
+
+ cm_id->qp_type = init_attr->qp_type;
+ cm_id->local_cm_id = (qp_info->qp_num);
+
+ qp_info->xrnic_rq_event_handler = init_attr->xrnic_rq_event_handler;
+ qp_info->rq_context = init_attr->rq_context;
+ qp_info->xrnic_sq_event_handler = init_attr->xrnic_sq_event_handler;
+ qp_info->sq_context = init_attr->sq_context;
+
+ qp_info->rq_buf_ba_ca = init_attr->rq_buf_ba_ca;
+ qp_info->rq_buf_ba_ca_phys = init_attr->rq_buf_ba_ca_phys;
+ qp_info->sq_ba = init_attr->sq_ba;
+ qp_info->sq_ba_phys = init_attr->sq_ba_phys;
+ qp_info->cq_ba = init_attr->cq_ba;
+ qp_info->cq_ba_phys = init_attr->cq_ba_phys;
+
+ qp_info->sq_depth = init_attr->sq_depth;
+ qp_info->rq_depth = init_attr->rq_depth;
+ qp_info->send_sge_size = init_attr->send_sge_size;
+ qp_info->send_pkt_size = init_attr->send_pkt_size;
+ qp_info->recv_pkt_size = init_attr->recv_pkt_size;
+
+ qp_attr->rq_buf_ba_ca = qp_info->rq_buf_ba_ca;
+ qp_attr->rq_buf_ba_ca_phys = qp_info->rq_buf_ba_ca_phys;
+ qp_attr->sq_ba = qp_info->sq_ba;
+ qp_attr->sq_ba_phys = qp_info->sq_ba_phys;
+ qp_attr->cq_ba = qp_info->cq_ba;
+ qp_attr->cq_ba_phys = qp_info->cq_ba_phys;
+
+ qp_attr->sq_depth = qp_info->sq_depth;
+ qp_attr->rq_depth = qp_info->rq_depth;
+ qp_attr->send_sge_size = qp_info->send_sge_size;
+ qp_attr->send_pkt_size = qp_info->send_pkt_size;
+ qp_attr->recv_pkt_size = qp_info->recv_pkt_size;
+#ifdef ERNIC_MEM_REGISTER
+ if (pd)
+ qp_attr->pd = atomic_read(&pd->id);
+#endif
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_rdma_create_qp);
+
+/**
+ * xrnic_post_recv() - This function receives an incoming packet
+ * @qp_info: QP info on which packet should be received
+ * @rq_count: Number of packets to receive
+ * @return: SUCCESS if received required number of packets else error
+ * representative value
+ */
+int xrnic_post_recv(struct xrnic_qp_info *qp_info, u32 rq_count)
+{
+ struct xrnic_qp_attr *qp_attr;
+ int ret = -XRNIC_INVALID_QP_ID;
+
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ qp_attr = &xrnic_dev->qp_attr[qp_info->qp_num - 2];
+ if (qp_attr->remote_cm_id)
+ ret = xrnic_qp_recv_pkt(qp_attr, rq_count);
+
+ return ret;
+}
+EXPORT_SYMBOL(xrnic_post_recv);
+
+/**
+ * xrnic_post_send() - This function post a SEND WR
+ * @qp_info: QP info to post the request
+ * @sq_count: SEND packet count
+ * @return: SUCCESS if successfully posts a SEND,
+ * otherwise error representative value
+ */
+int xrnic_post_send(struct xrnic_qp_info *qp_info, u32 sq_count)
+{
+ struct xrnic_qp_attr *qp_attr;
+ int ret = -XRNIC_INVALID_QP_ID;
+
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ qp_attr = &xrnic_dev->qp_attr[qp_info->qp_num - 2];
+ if (qp_attr->remote_cm_id)
+ ret = xrnic_qp_send_pkt(qp_attr, sq_count);
+
+ return ret;
+}
+EXPORT_SYMBOL(xrnic_post_send);
+
+/**
+ * xrnic_destroy_qp() - Function destroys QP and reset the QP info
+ * @qp_info: QP info or config
+ * @return: XRNIC_SUCCESS if successfully destroys the QP,
+ * otherwise error representative value
+ */
+int xrnic_destroy_qp(struct xrnic_qp_info *qp_info)
+{
+ u32 qp_num;
+ struct xrnic_qp_attr *qp_attr;
+
+ if (qp_info->qp_num < 2 ||
+ (qp_info->qp_num > XRNIC_MAX_QP_SUPPORT + 2))
+ return -XRNIC_INVALID_QP_ID;
+
+ if (qp_info->qp_num >= 2) {
+ qp_num = qp_info->qp_num;
+ qp_attr = &xrnic_dev->qp_attr[qp_num - 2];
+ xrnic_set_qp_state((qp_num - 2), XRNIC_QP_FREE);
+
+ memset((void *)qp_info, 0, sizeof(struct xrnic_qp_info));
+
+ qp_attr->rq_buf_ba_ca = qp_info->rq_buf_ba_ca;
+ qp_attr->rq_buf_ba_ca_phys = qp_info->rq_buf_ba_ca_phys;
+ qp_attr->sq_ba = qp_info->sq_ba;
+ qp_attr->sq_ba_phys = qp_info->sq_ba_phys;
+ qp_attr->cq_ba = qp_info->cq_ba;
+ qp_attr->cq_ba_phys = qp_info->cq_ba_phys;
+
+ qp_attr->sq_depth = qp_info->sq_depth;
+ qp_attr->rq_depth = qp_info->rq_depth;
+ qp_attr->send_sge_size = qp_info->send_sge_size;
+ qp_attr->send_pkt_size = qp_info->send_pkt_size;
+ qp_attr->recv_pkt_size = qp_info->recv_pkt_size;
+ qp_attr->cm_id = NULL;
+ } else {
+ pr_err("Received invalid QP ID\n");
+ return -XRNIC_INVALID_QP_ID;
+ }
+
+ return XRNIC_SUCCESS;
+}
+EXPORT_SYMBOL(xrnic_destroy_qp);
+
+/**
+ * xrnic_reset_io_qp() - This function reset the QP config
+ * @qp_attr: QP memory map or config
+ */
+void xrnic_reset_io_qp(struct xrnic_qp_attr *qp_attr)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct xrnic_ctrl_config *xrnic_ctrl_config;
+ struct xrnic_reg_map *reg_map;
+ unsigned long timeout;
+ u32 sq_pi_db_val, cq_head_val;
+ u32 rq_ci_db_val, stat_rq_pi_db_val;
+ u32 config_value;
+ int qp_num = qp_attr->qp_num - 2;
+ struct rdma_qp_attr *rdma_qp_attr;
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp_attr->xrnic_mmap;
+ reg_map = xrnic_dev->xrnic_mmap.xrnic_regs;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+ xrnic_ctrl_config = &reg_map->xrnic_ctrl_config;
+
+ /* 1. WAIT FOR SQ/OSQ EMPTY TO BE SET */
+ while (!((ioread32(&rdma_qp_attr->qp_status) >> 9) & 0x3))
+ ;
+
+ /* 2 WAIT FOR register values SQ_PI_DB == CQ_HEAD */
+ sq_pi_db_val = ioread32(((void *)(&rdma_qp_attr->sq_pi_db)));
+
+ cq_head_val = ioread32(((void *)(&rdma_qp_attr->cq_head)));
+
+ timeout = jiffies;
+ while (!(sq_pi_db_val == cq_head_val)) {
+ sq_pi_db_val = ioread32(((void *)(&rdma_qp_attr->sq_pi_db)));
+ cq_head_val = ioread32(((void *)(&rdma_qp_attr->cq_head)));
+ if (time_after(jiffies, (timeout + 1 * HZ)))
+ break;
+ }
+
+ /* 3. WAIT FOR register values STAT_RQ_PI_DB == RQ_CI_DB */
+ rq_ci_db_val = ioread32(((void *)(&rdma_qp_attr->rq_ci_db)));
+
+ stat_rq_pi_db_val = ioread32(((void *)(&rdma_qp_attr->stat_rq_pi_db)));
+
+ timeout = jiffies;
+ while (!(rq_ci_db_val == stat_rq_pi_db_val)) {
+ rq_ci_db_val = ioread32(((void *)(&rdma_qp_attr->rq_ci_db)));
+ stat_rq_pi_db_val = ioread32(((void *)
+ (&rdma_qp_attr->stat_rq_pi_db)));
+ if (time_after(jiffies, (timeout + 1 * HZ)))
+ break;
+ }
+ /* 4. SET QP_CONF register HW handshake disable to 1 */
+ config_value = ioread32(((void *)(&rdma_qp_attr->qp_conf)));
+ config_value = config_value | XRNIC_QP_CONFIG_HW_HNDSHK_DIS |
+ XRNIC_QP_CONFIG_RQ_INTR_EN | XRNIC_QP_CONFIG_CQE_INTR_EN;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+ DEBUG_LOG("QP config value is 0x%x\n", config_value);
+
+ config_value = ioread32((char *)xrnic_mmap->rq_wrptr_db_add +
+ (4 * (qp_attr->qp_num - 1)));
+ config_value = (xrnic_mmap->rq_wrptr_db_add_phys +
+ (4 * (qp_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_wrptr_db_add)));
+
+ config_value = (xrnic_mmap->sq_cmpl_db_add_phys +
+ (4 * (qp_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_cmpl_db_add)));
+
+ /* 5. SET QP_CONF register QP ENABLE TO 0 and QP_ADV_CONF register
+ * SW OVERRIDE TO 1
+ */
+ config_value = ioread32(((void *)(&rdma_qp_attr->qp_conf)));
+ config_value = config_value & ~XRNIC_QP_CONFIG_QP_ENABLE;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+ /* Enable SW override enable */
+ config_value = 0x1;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ /* 6. Initialized QP under reset: */
+ config_value = 0x0;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->stat_rq_pi_db)));
+
+ config_value = qp_attr->rq_buf_ba_ca_phys & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_buf_ba_ca)));
+
+ config_value = 0x0;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_ci_db)));
+
+ iowrite32(config_value,
+ ((void *)(&rdma_qp_attr->stat_curr_sqptr_pro)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_pi_db)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->cq_head)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_psn)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->last_rq_req)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->stat_msn)));
+
+ /* 7.Initialized Ethernet side registers */
+ /* NO need as we are doing during connect initiatlization */
+
+ /* 8. Set QP_CONF register QP ENABLE TO 1 */
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->qp_conf)));
+ config_value = config_value | XRNIC_QP_CONFIG_QP_ENABLE;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+
+ config_value = ioread32((void *)&rdma_qp_attr->qp_conf);
+ config_value = config_value & ~XRNIC_QP_CONFIG_UNDER_RECOVERY;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+
+ /* 9.Set QP_ADV_CONF register SW_OVERRIDE SET TO 0 */
+ /* Disable SW override enable */
+ config_value = 0;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ qp_attr->rq_wrptr_db_local = 0;
+ qp_attr->sq_cmpl_db_local = 0;
+ qp_attr->rq_ci_db_local = 0;
+ qp_attr->sq_pi_db_local = 0;
+ qp_attr->sqhd = 0;
+}
+
+/**
+ * xrnic_reset_io_qp_sq_cq_ptr() - This function resets SQ, CQ pointers of QP
+ * @qp_attr: QP config
+ * @hw_hs_info: QP HW handshake config
+ */
+void xrnic_reset_io_qp_sq_cq_ptr(struct xrnic_qp_attr *qp_attr,
+ struct xrnic_hw_handshake_info *hw_hs_info)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct xrnic_reg_map *reg_map;
+ struct xrnic_ctrl_config *xrnic_ctrl_config;
+ struct rdma_qp_attr *rdma_qp_attr;
+ u32 config_value = 0;
+ int qp_num = qp_attr->qp_num - 2;
+
+ xrnic_mmap = qp_attr->xrnic_mmap;
+ reg_map = xrnic_dev->xrnic_mmap.xrnic_regs;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+ xrnic_ctrl_config = &reg_map->xrnic_ctrl_config;
+
+ /* Enable SW override enable */
+ config_value = 0x1;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ if (!hw_hs_info)
+ goto enable_hw_hs;
+
+ config_value = 0;
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->cq_head)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_pi_db)));
+
+ iowrite32(config_value,
+ ((void *)(&rdma_qp_attr->stat_curr_sqptr_pro)));
+
+ config_value = hw_hs_info->rq_wrptr_db_add;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_wrptr_db_add)));
+
+ config_value = hw_hs_info->sq_cmpl_db_add;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_cmpl_db_add)));
+
+ config_value = ioread32((void *)(&rdma_qp_attr->stat_rq_pi_db));
+
+ config_value = hw_hs_info->cnct_io_conf_l_16b |
+ ((config_value & 0xFFFF) << 16);
+ iowrite32(config_value, ((void *)(&xrnic_ctrl_config->cnct_io_conf)));
+enable_hw_hs:
+ config_value = XRNIC_QP_CONFIG_QP_ENABLE |
+ xrnic_dev->pmtu |
+ XRNIC_QP_CONFIG_RQ_BUFF_SZ(qp_attr->recv_pkt_size);
+
+ if (qp_attr->ip_addr_type == AF_INET6)
+ config_value = config_value | XRNIC_QP_CONFIG_IPV6_EN;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+
+ /* Disable SW override enable */
+
+ config_value = 0;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->cq_head)));
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->sq_pi_db)));
+
+ config_value = ioread32(((void *)
+ (&rdma_qp_attr->stat_curr_sqptr_pro)));
+
+ qp_attr->rq_wrptr_db_local = 0;
+ qp_attr->sq_cmpl_db_local = 0;
+ qp_attr->rq_ci_db_local = 0;
+ qp_attr->sq_pi_db_local = 0;
+ qp_attr->sqhd = 0;
+}
+
+/**
+ * xrnic_reset_io_qp_rq_ptr() - This function resets RQ pointers of QP
+ * @qp_attr: QP config
+ */
+void xrnic_reset_io_qp_rq_ptr(struct xrnic_qp_attr *qp_attr)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct xrnic_reg_map *reg_map;
+ struct xrnic_ctrl_config *xrnic_ctrl_config;
+ struct rdma_qp_attr *rdma_qp_attr;
+ u32 config_value = 0;
+ int qp_num = qp_attr->qp_num - 2;
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp_attr->xrnic_mmap;
+ reg_map = xrnic_dev->xrnic_mmap.xrnic_regs;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+ xrnic_ctrl_config = &reg_map->xrnic_ctrl_config;
+
+ config_value = 0x1;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ config_value = 0x0;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_ci_db)));
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->stat_rq_pi_db)));
+
+ config_value = qp_attr->rq_buf_ba_ca_phys & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_buf_ba_ca)));
+
+ config_value = XRNIC_QP_CONFIG_QP_ENABLE |
+ XRNIC_QP_CONFIG_CQE_INTR_EN | xrnic_dev->pmtu |
+ XRNIC_QP_CONFIG_RQ_BUFF_SZ(qp_attr->recv_pkt_size) |
+ XRNIC_QP_CONFIG_HW_HNDSHK_DIS |
+ XRNIC_QP_CONFIG_CQE_WRITE_EN;
+ if (qp_attr->ip_addr_type == AF_INET6)
+ config_value = config_value | XRNIC_QP_CONFIG_IPV6_EN;
+
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+ /* Disable SW override enable */
+ config_value = 0x0;
+ iowrite32(config_value,
+ ((void *)(&xrnic_ctrl_config->xrnic_adv_conf)));
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->rq_ci_db)));
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->stat_rq_buf_ca)));
+
+ config_value = ioread32(((void *)(&rdma_qp_attr->stat_rq_pi_db)));
+}
+
+/**
+ * xrnic_qp_send_pkt() - This function sends packets
+ * @qp_attr: QP config
+ * @sq_pkt_count: Number of packets to send
+ * @return: XRNIC_SUCCESS if successful
+ * otherwise error representative value
+ */
+int xrnic_qp_send_pkt(struct xrnic_qp_attr *qp_attr, u32 sq_pkt_count)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct rdma_qp_attr *rdma_qp_attr;
+ u32 config_value = 0, sq_pkt_count_tmp;
+ int qp_num = qp_attr->qp_num - 2;
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp_attr->xrnic_mmap;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+
+ config_value = ioread32((char *)xrnic_mmap->sq_cmpl_db_add +
+ (4 * (qp_attr->qp_num - 1)));
+ if (config_value == 0)
+ sq_pkt_count_tmp = qp_attr->sq_depth;
+ else if (qp_attr->sq_cmpl_db_local >= config_value)
+ sq_pkt_count_tmp = (config_value + qp_attr->sq_depth) -
+ qp_attr->sq_cmpl_db_local;
+ else
+ sq_pkt_count_tmp = config_value - qp_attr->sq_cmpl_db_local;
+ if (sq_pkt_count_tmp < sq_pkt_count)
+ return -XRNIC_INVALID_PKT_CNT;
+
+ /* We need to maintain sq_cmpl_db_local as per hardware
+ * update for Queue spesific sq_cmpl_db_local register.
+ * Also in case of resend some packect
+ * we need to maintain this variable.
+ */
+
+ qp_attr->sq_cmpl_db_local = qp_attr->sq_cmpl_db_local + sq_pkt_count;
+ if (qp_attr->sq_cmpl_db_local > qp_attr->sq_depth)
+ qp_attr->sq_cmpl_db_local = qp_attr->sq_cmpl_db_local
+ - qp_attr->sq_depth;
+ config_value = qp_attr->sq_cmpl_db_local;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_pi_db)));
+
+ return XRNIC_SUCCESS;
+}
+
+/**
+ * xrnic_qp_recv_pkt() - This function receives packets
+ * @qp_attr: QP config
+ * @rq_pkt_count: receive packet count
+ * @return: XRNIC_SUCCESS if successful
+ * otherwise error representative value
+ */
+int xrnic_qp_recv_pkt(struct xrnic_qp_attr *qp_attr, u32 rq_pkt_count)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct rdma_qp_attr *rdma_qp_attr;
+ u32 config_value = 0, rq_pkt_count_tmp;
+ int qp_num = qp_attr->qp_num - 2;
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp_attr->xrnic_mmap;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+
+ config_value = ioread32((char *)xrnic_mmap->rq_wrptr_db_add +
+ (4 * (qp_attr->qp_num - 1)));
+ if (config_value == 0)
+ rq_pkt_count_tmp = qp_attr->rq_depth;
+ else if (qp_attr->rq_wrptr_db_local >= config_value)
+ rq_pkt_count_tmp = (config_value + qp_attr->rq_depth) -
+ qp_attr->rq_wrptr_db_local;
+ else
+ rq_pkt_count_tmp = config_value - qp_attr->rq_wrptr_db_local;
+
+ if (rq_pkt_count_tmp < rq_pkt_count)
+ return -XRNIC_INVALID_PKT_CNT;
+ /* We need to maintain sq_cmpl_db_local as per hardware
+ * update for Queue spesific sq_cmpl_db_local register.
+ * Also in case of resend some packect
+ * we need to maintain this variable.
+ */
+
+ qp_attr->rq_wrptr_db_local = qp_attr->rq_wrptr_db_local + rq_pkt_count;
+ if (qp_attr->rq_wrptr_db_local > qp_attr->rq_depth)
+ qp_attr->rq_wrptr_db_local = qp_attr->rq_wrptr_db_local
+ - qp_attr->rq_depth;
+
+ config_value = qp_attr->rq_wrptr_db_local;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_ci_db)));
+
+ return XRNIC_SUCCESS;
+}
+
+/**
+ * xrnic_qp1_send_mad_pkt() - This function initiates sending a management
+ * datagram packet.
+ * @send_sgl_temp: Scatter gather list
+ * @qp1_attr: QP1 info
+ * @send_pkt_size: Send packe size
+ */
+void xrnic_qp1_send_mad_pkt(void *send_sgl_temp,
+ struct xrnic_qp_attr *qp1_attr, u32 send_pkt_size)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct rdma_qp1_attr *rdma_qp1_attr;
+ u32 config_value = 0;
+ struct wr *sq_wr; /*sq_ba*/
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp1_attr->xrnic_mmap;
+ rdma_qp1_attr = &xrnic_mmap->xrnic_regs->rdma_qp1_attr;
+
+ /* We need to maintain sq_cmpl_db_local as per hardware
+ * update for Queue spesific sq_cmpl_db_local register.
+ * Also in case of resend some packect
+ * we need to maintain this variable.
+ */
+ sq_wr = (struct wr *)qp1_attr->sq_ba + qp1_attr->sq_cmpl_db_local;
+ /* All will be 4096 that is madatory.*/
+ sq_wr->length = send_pkt_size;
+ memcpy((void *)((char *)qp1_attr->send_sgl +
+ (qp1_attr->sq_cmpl_db_local * XRNIC_SEND_SGL_SIZE)),
+ (const void *)send_sgl_temp,
+ XRNIC_SEND_SGL_SIZE);
+ qp1_attr->sq_cmpl_db_local = qp1_attr->sq_cmpl_db_local + 1;
+
+ config_value = qp1_attr->sq_cmpl_db_local;
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->sq_pi_db)));
+
+ if (qp1_attr->sq_cmpl_db_local == XRNIC_SQ_DEPTH)
+ qp1_attr->sq_cmpl_db_local = 0;
+}
+
+/**
+ * xrnic_qp_pkt_recv() - This function process received data packets
+ * @qp_attr: QP info on which data packet has been received
+ */
+static void xrnic_qp_pkt_recv(struct xrnic_qp_attr *qp_attr)
+{
+ struct xrnic_memory_map *xrnic_mmap = (struct xrnic_memory_map *)
+ qp_attr->xrnic_mmap;
+ u32 config_value = 0;
+ unsigned long flag;
+ int rq_pkt_count = 0;
+ struct xrnic_rdma_cm_id *cm_id = qp_attr->cm_id;
+
+ spin_lock_irqsave(&qp_attr->qp_lock, flag);
+ config_value = ioread32((char *)xrnic_mmap->rq_wrptr_db_add +
+ (4 * (qp_attr->qp_num - 1)));
+ if (qp_attr->rq_wrptr_db_local == config_value) {
+ spin_unlock_irqrestore(&qp_attr->qp_lock, flag);
+ return;
+ }
+ if (qp_attr->rq_wrptr_db_local > config_value) {
+ rq_pkt_count = (config_value + qp_attr->rq_depth) -
+ qp_attr->rq_wrptr_db_local;
+ } else {
+ rq_pkt_count = config_value - qp_attr->rq_wrptr_db_local;
+ }
+
+ cm_id->qp_info.xrnic_rq_event_handler(rq_pkt_count,
+ cm_id->qp_info.rq_context);
+
+ spin_unlock_irqrestore(&qp_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_wqe_completed() - This function process completion interrupts
+ * @qp_attr: QP info for which completion is received
+ */
+static void xrnic_wqe_completed(struct xrnic_qp_attr *qp_attr)
+{
+ struct xrnic_memory_map *xrnic_mmap;
+ struct rdma_qp_attr *rdma_qp_attr;
+ u32 config_value = 0;
+ unsigned long flag;
+ struct xrnic_rdma_cm_id *cm_id = qp_attr->cm_id;
+ int qp_num = qp_attr->qp_num;
+
+ xrnic_mmap = (struct xrnic_memory_map *)qp_attr->xrnic_mmap;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num - 2];
+ /* We need to maintain sq_cmpl_db_local as per hardware update
+ * for Queue spesific sq_cmpl_db_local register.
+ * Also in case of resend some packect we
+ * need to maintain this variable.
+ */
+ spin_lock_irqsave(&qp_attr->qp_lock, flag);
+ config_value = ioread32((char *)&rdma_qp_attr->cq_head);
+ cm_id->qp_info.xrnic_sq_event_handler(config_value,
+ cm_id->qp_info.sq_context);
+ spin_unlock_irqrestore(&qp_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_wqe_completed_intr_handler() - Interrupt handler for completion
+ * interrupt type
+ * @data: XRNIC device info
+ */
+void xrnic_wqe_completed_intr_handler(unsigned long data)
+{
+ struct xrnic_dev_info *xrnic_dev = (struct xrnic_dev_info *)data;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct xrnic_qp_attr *qp_attr;
+ struct xrnic_ctrl_config *xrnic_ctrl_config =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ unsigned long cq_intr = 0, qp_num, i, j;
+ unsigned long flag;
+
+ for (i = 0 ; i < XRNIC_RQ_CQ_INTR_STS_REG_SUPPORTED ; i++) {
+ cq_intr = ioread32((void __iomem *)
+ ((&xrnic_ctrl_config->cq_intr_sts_1) +
+ (i * 4)));
+
+ if (!cq_intr)
+ continue;
+
+ for (j = find_first_bit(&cq_intr, XRNIC_REG_WIDTH);
+ j < XRNIC_REG_WIDTH;
+ j = find_next_bit(&cq_intr, XRNIC_REG_WIDTH, j + 1)) {
+ qp_num = (i << 5) + j;
+ iowrite32((1 << j), (void __iomem *)
+ ((&xrnic_ctrl_config->cq_intr_sts_1) +
+ (i * 4)));
+ qp_attr = &xrnic_dev->qp_attr[qp_num - 2];
+ if (qp_attr->cm_id)
+ xrnic_wqe_completed(qp_attr);
+ else
+ pr_err("Received CM ID is NULL\n");
+ }
+ }
+ spin_lock_irqsave(&qp1_attr->qp_lock, flag);
+ xrnic_dev->xrnic_mmap.intr_en = xrnic_dev->xrnic_mmap.intr_en |
+ WQE_COMPLETED_INTR_EN;
+ iowrite32(xrnic_dev->xrnic_mmap.intr_en,
+ ((void *)(&xrnic_ctrl_config->intr_en)));
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_qp_pkt_recv_intr_handler() - Interrupt handler for data
+ * packet interrupt
+ * @data: XRNIC device info
+ */
+void xrnic_qp_pkt_recv_intr_handler(unsigned long data)
+{
+ struct xrnic_dev_info *xrnic_dev = (struct xrnic_dev_info *)data;
+ struct xrnic_memory_map *xrnic_mmap =
+ (struct xrnic_memory_map *)&xrnic_dev->xrnic_mmap;
+ struct xrnic_qp_attr *qp1_attr = &xrnic_dev->qp1_attr;
+ struct xrnic_qp_attr *qp_attr;
+ struct rdma_qp_attr *rdma_qp_attr;
+ struct xrnic_reg_map *regs;
+ struct xrnic_ctrl_config *xrnic_ctrl_config =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ unsigned long rq_intr = 0, qp_num, i, j, config_value;
+ unsigned long flag;
+
+ for (i = 0 ; i < XRNIC_RQ_CQ_INTR_STS_REG_SUPPORTED ; i++) {
+ rq_intr = ioread32((void __iomem *)
+ (&xrnic_ctrl_config->rq_intr_sts_1 + (i * 4)));
+
+ if (!rq_intr)
+ continue;
+
+ for (j = find_first_bit(&rq_intr, XRNIC_REG_WIDTH);
+ j < XRNIC_REG_WIDTH; j = find_next_bit
+ (&rq_intr, XRNIC_REG_WIDTH, j + 1)) {
+ qp_num = (i << 5) + j;
+ /* We need to change this with Work Request as
+ * for other Admin QP required wait events.
+ */
+ iowrite32((1 << j), ((void __iomem *)
+ (&xrnic_ctrl_config->rq_intr_sts_1) +
+ (i * 4)));
+ qp_attr = &xrnic_dev->qp_attr[qp_num - 2];
+ regs = xrnic_mmap->xrnic_regs;
+ rdma_qp_attr = &regs->rdma_qp_attr[qp_num - 2];
+ config_value = ioread32((void *)
+ (&rdma_qp_attr->qp_conf));
+ if (qp_attr->cm_id &&
+ (config_value & XRNIC_QP_CONFIG_HW_HNDSHK_DIS)) {
+ xrnic_qp_pkt_recv(qp_attr);
+ } else {
+ if (qp_attr->cm_id)
+ pr_err("Received CM ID is NULL\n");
+ else
+ pr_err("HW handshake is enabled\n");
+ }
+ }
+ }
+ spin_lock_irqsave(&qp1_attr->qp_lock, flag);
+ xrnic_dev->xrnic_mmap.intr_en = xrnic_dev->xrnic_mmap.intr_en |
+ QP_PKT_RCVD_INTR_EN;
+ iowrite32(xrnic_dev->xrnic_mmap.intr_en,
+ ((void *)(&xrnic_ctrl_config->intr_en)));
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_qp_fatal_handler() - Interrupt handler for QP fatal interrupt type
+ * @data: XRNIC device info
+ */
+void xrnic_qp_fatal_handler(unsigned long data)
+{
+ struct xrnic_memory_map *xrnic_mmap =
+ (struct xrnic_memory_map *)&xrnic_dev->xrnic_mmap;
+ struct xrnic_ctrl_config *xrnic_conf =
+ &xrnic_dev->xrnic_mmap.xrnic_regs->xrnic_ctrl_config;
+ struct rdma_qp_attr *rdma_qp_attr;
+ int i, err_entries;
+ unsigned long timeout;
+ unsigned long config_value, qp_num, qp, sq_pi_db_val, cq_head_val;
+ struct xrnic_qp_attr *qp_attr;
+ struct xrnic_rdma_cm_id_info *cm_id_info;
+
+ err_entries = ioread32((void *)&xrnic_conf->in_errsts_q_wrptr);
+ pr_info("No of QPs in Fatal: %d\r\n", err_entries - in_err_wr_ptr);
+ for (i = 0; i < (err_entries - in_err_wr_ptr); i++) {
+ qp_num = ioread32((char *)xrnic_mmap->in_errsts_q_ba +
+ ((8 * in_err_wr_ptr) + (8 * i)));
+ qp_num = (qp_num & 0xFFFF0000) >> 16;
+ qp = qp_num - 2;
+ rdma_qp_attr = &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp];
+ if (rdma_qp_attr) {
+ while (!((ioread32(&rdma_qp_attr->qp_status) >> 9) &
+ 0x3))
+ DEBUG_LOG("Fatal wait for SQ/OSQ empty\n");
+
+ /* 2 WAIT FOR register values SQ_PI_DB == CQ_HEAD */
+ sq_pi_db_val = ioread32(((void *)
+ (&rdma_qp_attr->sq_pi_db)));
+
+ cq_head_val = ioread32((void *)&rdma_qp_attr->cq_head);
+
+ timeout = jiffies;
+ while (!(sq_pi_db_val == cq_head_val)) {
+ sq_pi_db_val = ioread32(((void *)
+ (&rdma_qp_attr->sq_pi_db)));
+ cq_head_val = ioread32(((void *)
+ (&rdma_qp_attr->cq_head)));
+ if (time_after(jiffies, (timeout + 1 * HZ))) {
+ pr_info("SQ PI != CQ Head\n");
+ break;
+ }
+ }
+
+ /* Poll and wait for register value
+ * RESP_HNDL_STS.sq_pici_db_check_en == ‘1’
+ */
+ while (!((ioread32(&xrnic_conf->resp_handler_status)
+ >> 16) & 0x1))
+ DEBUG_LOG("waiting for RESP_HNDL_STS\n");
+
+ config_value = ioread32((void *)
+ &rdma_qp_attr->qp_conf);
+ config_value = config_value &
+ (~XRNIC_QP_CONFIG_QP_ENABLE);
+ iowrite32(config_value,
+ ((void *)(&rdma_qp_attr->qp_conf)));
+
+ config_value = ioread32((void *)
+ &rdma_qp_attr->qp_conf);
+ config_value = config_value |
+ XRNIC_QP_CONFIG_UNDER_RECOVERY;
+ iowrite32(config_value,
+ ((void *)(&rdma_qp_attr->qp_conf)));
+
+ /* Calling CM Handler to disconnect QP.*/
+ qp_attr = &xrnic_dev->qp_attr[qp_num - 2];
+ if (qp_attr->cm_id) {
+ cm_id_info = qp_attr->cm_id->cm_id_info;
+ cm_id_info->conn_event_info.cm_event =
+ XRNIC_DREQ_RCVD;
+ cm_id_info->conn_event_info.status = 1;
+ cm_id_info->conn_event_info.private_data_len =
+ 0;
+ cm_id_info->conn_event_info.private_data =
+ NULL;
+ qp_attr->cm_id->xrnic_cm_handler
+ (qp_attr->cm_id,
+ &cm_id_info->conn_event_info);
+ qp_attr->cm_id = NULL;
+ } else {
+ pr_err("Received CM ID is NULL\n");
+ }
+ }
+ in_err_wr_ptr++;
+ }
+}
+
+/**
+ * xrnic_qp1_hw_configuration() - This function configures the QP1 registers
+ * @return: 0 if successfully configures QP1
+ */
+int xrnic_qp1_hw_configuration(void)
+{
+ struct xrnic_memory_map *xrnic_mmap = (struct xrnic_memory_map *)
+ &xrnic_dev->xrnic_mmap;
+ struct xrnic_qp_attr *qp1_attr = (struct xrnic_qp_attr *)
+ &xrnic_dev->qp1_attr;
+ struct rdma_qp1_attr *rdma_qp1_attr;
+ u32 config_value = 0;
+
+ qp1_attr->qp_num = 1;
+ rdma_qp1_attr = &xrnic_dev->xrnic_mmap.xrnic_regs->rdma_qp1_attr;
+ config_value = XRNIC_QP_CONFIG_QP_ENABLE | xrnic_dev->pmtu |
+ XRNIC_QP1_CONFIG_RQ_BUFF_SZ |
+ XRNIC_QP_CONFIG_RQ_INTR_EN |
+ XRNIC_QP_CONFIG_HW_HNDSHK_DIS;
+
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->qp_conf)));
+
+ config_value = (xrnic_mmap->rq_buf_ba_ca_phys +
+ ((qp1_attr->qp_num - 1) * XRNIC_RECV_PKT_SIZE *
+ XRNIC_RQ_DEPTH)) & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->rq_buf_ba_ca)));
+
+ qp1_attr->rq_buf_ba_ca = xrnic_mmap->rq_buf_ba_ca +
+ ((qp1_attr->qp_num - 1) *
+ XRNIC_RECV_PKT_SIZE *
+ XRNIC_RQ_DEPTH);
+
+ qp1_attr->rq_buf_ba_ca_phys = config_value;
+
+ config_value = xrnic_mmap->sq_ba_phys + ((qp1_attr->qp_num - 1) *
+ XRNIC_SEND_PKT_SIZE * XRNIC_SQ_DEPTH);
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->sq_ba)));
+
+ qp1_attr->sq_ba = (struct wr *)((void *)xrnic_mmap->sq_ba +
+ ((qp1_attr->qp_num - 1) *
+ XRNIC_SEND_PKT_SIZE *
+ XRNIC_SQ_DEPTH));
+ qp1_attr->sq_ba_phys = config_value;
+
+ qp1_attr->send_sgl_phys = xrnic_mmap->send_sgl_phys +
+ (XRNIC_SEND_SGL_SIZE *
+ XRNIC_SQ_DEPTH *
+ (qp1_attr->qp_num - 1));
+ qp1_attr->send_sgl = xrnic_mmap->send_sgl +
+ (XRNIC_SEND_SGL_SIZE *
+ XRNIC_SQ_DEPTH *
+ (qp1_attr->qp_num - 1));
+
+ xrnic_fill_wr(qp1_attr, XRNIC_SQ_DEPTH);
+
+ config_value = xrnic_mmap->cq_ba_phys + ((qp1_attr->qp_num - 1) *
+ XRNIC_SQ_DEPTH * sizeof(struct cqe));
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->cq_ba)));
+
+ qp1_attr->cq_ba = (struct cqe *)(xrnic_mmap->cq_ba +
+ ((qp1_attr->qp_num - 1) *
+ XRNIC_SQ_DEPTH *
+ sizeof(struct cqe)));
+ config_value = (xrnic_mmap->rq_wrptr_db_add_phys +
+ (4 * (qp1_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value,
+ ((void *)(&rdma_qp1_attr->rq_wrptr_db_add)));
+
+ config_value = (xrnic_mmap->sq_cmpl_db_add_phys +
+ (4 * (qp1_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value,
+ ((void *)(&rdma_qp1_attr->sq_cmpl_db_add)));
+
+ config_value = XRNIC_SQ_DEPTH | (XRNIC_RQ_DEPTH << 16);
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->q_depth)));
+
+ config_value = (xrnic_mmap->stat_rq_buf_ca_phys +
+ (4 * (qp1_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value,
+ ((void *)(&rdma_qp1_attr->stat_rq_buf_ca)));
+
+ config_value = XRNIC_QP_TIMEOUT_CONFIG_TIMEOUT |
+ XRNIC_QP_TIMEOUT_CONFIG_RETRY_CNT |
+ XRNIC_QP_TIMEOUT_CONFIG_RNR_RETRY_CNT |
+ XRNIC_QP_TIMEOUT_CONFIG_RNR_NAK_TVAL;
+ iowrite32(config_value, ((void *)(&rdma_qp1_attr->timeout_conf)));
+ qp1_attr->qp1_attr = (struct xrnic_qp_attr *)&xrnic_dev->qp1_attr;
+ qp1_attr->rq_wrptr_db_local = 0;
+ qp1_attr->sq_cmpl_db_local = 0;
+ qp1_attr->rq_ci_db_local = 0;
+ qp1_attr->sq_pi_db_local = 0;
+
+ qp1_attr->resend_count = 0;
+ qp1_attr->local_cm_id = htonl(qp1_attr->qp_num);
+ qp1_attr->remote_cm_id = 0;
+
+ qp1_attr->curr_state = XRNIC_LISTEN;
+
+ qp1_attr->sqhd = 0;
+ qp1_attr->qp_type = XRNIC_QPT_UC;
+ qp1_attr->ip_addr_type = 0;
+
+ qp1_attr->xrnic_mmap = &xrnic_dev->xrnic_mmap;
+
+ spin_lock_init(&qp1_attr->qp_lock);
+ return 0;
+}
+
+/**
+ * xrnic_display_qp_reg() - This function displays qp register info
+ * @qp_num: QP num for which register dump is required
+ */
+void xrnic_display_qp_reg(int qp_num)
+{
+ int i;
+ struct xrnic_memory_map *xrnic_mmap = &xrnic_dev->xrnic_mmap;
+ struct rdma_qp_attr *rdma_qp_attr =
+ &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num - 2];
+
+ for (i = 0; i < 45; i++)
+ pr_info("0x%X: 0x%08X\n",
+ (0x84020000 + (0x100 * (qp_num + 1)) + (i * 4)),
+ ioread32((void __iomem *)rdma_qp_attr + (i * 4)));
+}
+
+/**
+ * xrnic_qp_timer() - This function configures QP timer
+ * @data: QP attribute info
+ */
+void xrnic_qp_timer(struct timer_list *data)
+{
+ struct xrnic_qp_attr *qp_attr = (struct xrnic_qp_attr *)data;
+ struct xrnic_qp_attr *qp1_attr = qp_attr->qp1_attr;
+ enum xrnic_rej_reason reason;
+ enum xrnic_msg_rej msg;
+ unsigned long flag;
+ int qp1_send_pkt_size;
+
+ spin_lock_irqsave(&qp1_attr->qp_lock, flag);
+ if (qp_attr->ip_addr_type == AF_INET)
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv4);
+ else
+ qp1_send_pkt_size = sizeof(struct qp_cm_pkt_hdr_ipv6);
+ if (qp_attr->curr_state == XRNIC_REJ_SENT) {
+ DEBUG_LOG("REJ SENT\n");
+ if (qp_attr->resend_count < XRNIC_REJ_RESEND_COUNT) {
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ qp_attr->resend_count = qp_attr->resend_count + 1;
+ qp_attr->curr_state = XRNIC_REJ_SENT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ } else {
+ qp_attr->resend_count = 0;
+ qp_attr->remote_cm_id = 0;
+ xrnic_reset_io_qp(qp_attr);
+ memset((void *)&qp_attr->mac_addr, 0,
+ XRNIC_ETH_ALEN);
+ qp_attr->ip_addr_type = 0;
+ xrnic_qp_app_configuration(qp_attr->qp_num,
+ XRNIC_HW_QP_DISABLE);
+ qp_attr->curr_state = XRNIC_LISTEN;
+ }
+ } else if (qp_attr->curr_state == XRNIC_REP_SENT) {
+ DEBUG_LOG("REP SENT\n");
+ if (qp_attr->resend_count < XRNIC_REJ_RESEND_COUNT) {
+ qp_attr->curr_state = XRNIC_RTU_TIMEOUT;
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ qp_attr->resend_count = qp_attr->resend_count + 1;
+ qp_attr->curr_state = XRNIC_REP_SENT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ } else {
+ reason = XRNIC_REJ_TIMEOUT;
+ msg = XRNIC_REJ_REP;
+ xrnic_cm_prepare_rej(qp_attr, msg, reason);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_TIMEWAIT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ }
+ } else if (qp_attr->curr_state == XRNIC_MRA_RCVD) {
+ DEBUG_LOG("MRA Received\n");
+ qp_attr->curr_state = XRNIC_RTU_TIMEOUT;
+
+ reason = XRNIC_REJ_TIMEOUT;
+ msg = XRNIC_REJ_TIMEOUT;
+ xrnic_cm_prepare_rej(qp_attr, msg, reason);
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_TIMEWAIT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ } else if (qp_attr->curr_state == XRNIC_DREQ_SENT) {
+ DEBUG_LOG("Disconnect Req Sent\n");
+ if (qp_attr->resend_count < XRNIC_DREQ_RESEND_COUNT) {
+ qp_attr->curr_state = XRNIC_DREP_TIMEOUT;
+ xrnic_qp1_send_mad_pkt(&qp_attr->send_sgl_temp,
+ qp_attr->qp1_attr,
+ qp1_send_pkt_size);
+ qp_attr->resend_count = qp_attr->resend_count + 1;
+ qp_attr->curr_state = XRNIC_DREQ_SENT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ } else {
+ qp_attr->resend_count = 0;
+ qp_attr->curr_state = XRNIC_TIMEWAIT;
+ qp_attr->qp_timer.expires = jiffies +
+ usecs_to_jiffies(XRNIC_CM_TIMEOUT *
+ (1 << XRNIC_CM_TIMER_TIMEOUT));
+ add_timer(&qp_attr->qp_timer);
+ }
+ } else if (qp_attr->curr_state == XRNIC_TIMEWAIT) {
+ DEBUG_LOG("In time wait state\n");
+ qp_attr->resend_count = 0;
+ qp_attr->remote_cm_id = 0;
+#ifdef DISPLAY_REGS_ON_DISCONNECT
+ xrnic_display_qp_reg(qp_attr->qp_num);
+#endif
+ xrnic_reset_io_qp(qp_attr);
+ memset((void *)&qp_attr->mac_addr, 0, XRNIC_ETH_ALEN);
+ qp_attr->ip_addr_type = 0;
+ xrnic_qp_app_configuration(qp_attr->qp_num,
+ XRNIC_HW_QP_DISABLE);
+ qp_attr->curr_state = XRNIC_LISTEN;
+ } else {
+ qp_attr->resend_count = 0;
+ qp_attr->qp_timer.expires = 0;
+ }
+ spin_unlock_irqrestore(&qp1_attr->qp_lock, flag);
+}
+
+/**
+ * xrnic_qp_app_configuration() - This function programs the QP registers
+ * @qp_num: QP num to configure
+ * @hw_qp_status: value to indicae HW QP or not
+ */
+void xrnic_qp_app_configuration(int qp_num,
+ enum xrnic_hw_qp_status hw_qp_status)
+{
+ struct xrnic_memory_map *xrnic_mmap = &xrnic_dev->xrnic_mmap;
+ struct xrnic_qp_attr *qp_attr = &xrnic_dev->qp_attr[qp_num - 2];
+ struct rdma_qp_attr *rdma_qp_attr =
+ &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num - 2];
+ u32 config_value = 0;
+ int recv_pkt_size = qp_attr->recv_pkt_size;
+
+ /* Host number will directly map to local cm id.*/
+ if (hw_qp_status == XRNIC_HW_QP_ENABLE) {
+ config_value = XRNIC_QP_CONFIG_QP_ENABLE |
+ XRNIC_QP_CONFIG_RQ_INTR_EN |
+ XRNIC_QP_CONFIG_CQE_INTR_EN | xrnic_dev->pmtu |
+ XRNIC_QP_CONFIG_RQ_BUFF_SZ(recv_pkt_size) |
+ XRNIC_QP_CONFIG_HW_HNDSHK_DIS |
+ XRNIC_QP_CONFIG_CQE_WRITE_EN;
+ } else if (hw_qp_status == XRNIC_HW_QP_DISABLE) {
+ config_value = XRNIC_QP_CONFIG_RQ_INTR_EN |
+ XRNIC_QP_CONFIG_CQE_INTR_EN | xrnic_dev->pmtu |
+ XRNIC_QP_CONFIG_RQ_BUFF_SZ(recv_pkt_size) |
+ XRNIC_QP_CONFIG_HW_HNDSHK_DIS |
+ XRNIC_QP_CONFIG_CQE_WRITE_EN;
+ config_value = 0;
+ } else {
+ DEBUG_LOG("Invalid HW QP status\n");
+ }
+ if (qp_attr->ip_addr_type == AF_INET6)
+ config_value = config_value | XRNIC_QP_CONFIG_IPV6_EN;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_conf)));
+
+ config_value = qp_attr->rq_buf_ba_ca_phys;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_buf_ba_ca)));
+
+ config_value = qp_attr->sq_ba_phys;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_ba)));
+
+ config_value = qp_attr->cq_ba_phys;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->cq_ba)));
+
+ config_value = qp_attr->sq_depth | (qp_attr->rq_depth << 16);
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->q_depth)));
+
+ config_value = (qp_attr->starting_psn |
+ (IB_OPCODE_RC_SEND_ONLY << 24));
+ iowrite32(config_value, (void *)&rdma_qp_attr->last_rq_req);
+
+ config_value = be32_to_cpu(qp_attr->ipv4_addr);
+ iowrite32(config_value, (void *)&rdma_qp_attr->ip_dest_addr1);
+ config_value = ((qp_attr->mac_addr[2] << 24) |
+ (qp_attr->mac_addr[3] << 16) |
+ (qp_attr->mac_addr[4] << 8) |
+ qp_attr->mac_addr[5]);
+ iowrite32(config_value, (void *)&rdma_qp_attr->mac_dest_addr_lsb);
+
+ config_value = ((qp_attr->mac_addr[0] << 8) | qp_attr->mac_addr[1]);
+ iowrite32(config_value, (void *)&rdma_qp_attr->mac_dest_addr_msb);
+
+ config_value = qp_attr->remote_qp;
+ iowrite32(config_value, (void *)&rdma_qp_attr->dest_qp_conf);
+
+ iowrite32(qp_attr->rem_starting_psn, (void *)&rdma_qp_attr->sq_psn);
+#ifdef ERNIC_MEM_REGISTER
+ if (qp_attr->pd)
+ iowrite32(qp_attr->pd, ((void *)(&rdma_qp_attr->pd)));
+#endif
+}
+
+/**
+ * xrnic_qp_hw_configuration() - This function configures QP registers
+ * @qp_num: QP num
+ */
+void xrnic_qp_hw_configuration(int qp_num)
+{
+ struct xrnic_memory_map *xrnic_mmap = &xrnic_dev->xrnic_mmap;
+ struct xrnic_qp_attr *qp_attr = &xrnic_dev->qp_attr[qp_num];
+ struct rdma_qp_attr *rdma_qp_attr =
+ &xrnic_mmap->xrnic_regs->rdma_qp_attr[qp_num];
+ u32 config_value = 0;
+
+ /* As qp_num start from 0 and data QP start from 2 */
+ qp_attr->qp_num = qp_num + 2;
+
+ config_value = XRNIC_QP_ADV_CONFIG_TRAFFIC_CLASS |
+ XRNIC_QP_ADV_CONFIG_TIME_TO_LIVE |
+ XRNIC_QP_ADV_CONFIG_PARTITION_KEY;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->qp_adv_conf)));
+
+ /*DDR address for RQ and SQ doorbell.*/
+
+ config_value = xrnic_mmap->rq_wrptr_db_add_phys +
+ (4 * (qp_attr->qp_num - 1));
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->rq_wrptr_db_add)));
+
+ config_value = (xrnic_mmap->sq_cmpl_db_add_phys +
+ (4 * (qp_attr->qp_num - 1)))
+ & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->sq_cmpl_db_add)));
+
+ config_value = (xrnic_mmap->stat_rq_buf_ca_phys +
+ (4 * (qp_attr->qp_num - 1))) & 0xffffffff;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->stat_rq_buf_ca)));
+
+ config_value = XRNIC_QP_TIMEOUT_CONFIG_TIMEOUT |
+ XRNIC_QP_TIMEOUT_CONFIG_RETRY_CNT |
+ XRNIC_QP_TIMEOUT_CONFIG_RNR_RETRY_CNT |
+ XRNIC_QP_TIMEOUT_CONFIG_RNR_NAK_TVAL;
+ iowrite32(config_value, ((void *)(&rdma_qp_attr->timeout_conf)));
+ qp_attr->qp1_attr = (struct xrnic_qp_attr *)&xrnic_dev->qp1_attr;
+ qp_attr->rq_wrptr_db_local = 0;
+ qp_attr->sq_cmpl_db_local = 0;
+ qp_attr->rq_ci_db_local = 0;
+ qp_attr->sq_pi_db_local = 0;
+ qp_attr->cm_id = NULL;
+ qp_attr->resend_count = 0;
+ qp_attr->local_cm_id = qp_attr->qp_num;
+ qp_attr->remote_cm_id = 0;
+ memset((void *)&qp_attr->mac_addr, 0, XRNIC_ETH_ALEN);
+ qp_attr->ip_addr_type = 0;
+ qp_attr->sqhd = 0;
+ qp_attr->qp_type = XRNIC_QPT_RC;
+ qp_attr->ip_addr_type = 0;
+
+ qp_attr->curr_state = XRNIC_LISTEN;
+
+ qp_attr->xrnic_mmap = &xrnic_dev->xrnic_mmap;
+
+ /* Intitialize State with XRNIC_LISTEN */
+ timer_setup(&qp_attr->qp_timer, xrnic_qp_timer,
+ (unsigned long)qp_attr);
+
+ spin_lock_init(&qp_attr->qp_lock);
+}
+
+#ifdef EXPERIMENTAL_CODE
+#define XRNIC_REG_MAP_NODE 0
+#define XRNIC_SEND_SGL_NODE 1
+#define XRNIC_CQ_BA_NODE 1
+#define XRNIC_RQ_BUF_NODE 1
+#define XRNIC_SQ_BA_NODE 1
+#define XRNIC_TX_HDR_BUF_NODE 1
+#define XRNIC_TX_SGL_BUF_NODE 1
+#define XRNIC_BYPASS_BUF_NODE 1
+#define XRNIC_ERRPKT_BUF_NODE 1
+#define XRNIC_OUTERR_STS_NODE 1
+
+#define XRNIC_RQWR_PTR_NODE 1
+#define XRNIC_SQ_CMPL_NODE 2
+#define XRNIC_STAT_XRNIC_RQ_BUF_NODE 3
+#else /* ! EXPERIMENTAL_CODE */
+#define XRNIC_REG_MAP_NODE 0
+#define XRNIC_SEND_SGL_NODE 1
+#define XRNIC_CQ_BA_NODE 2
+#define XRNIC_RQ_BUF_NODE 3
+#define XRNIC_SQ_BA_NODE 4
+#define XRNIC_TX_HDR_BUF_NODE 5
+#define XRNIC_TX_SGL_BUF_NODE 6
+#define XRNIC_BYPASS_BUF_NODE 7
+#define XRNIC_ERRPKT_BUF_NODE 8
+#define XRNIC_OUTERR_STS_NODE 9
+#define XRNIC_INERR_STS_NODE 10
+#define XRNIC_RQWR_PTR_NODE 11
+#define XRNIC_SQ_CMPL_NODE 12
+#define XRNIC_STAT_XRNIC_RQ_BUF_NODE 13
+#define XRNIC_DATA_BUF_BA_NODE 14
+#define XRNIC_RESP_ERR_PKT_BUF_BA 15
+#endif /* EXPERIMENTAL_CODE */
diff --git a/drivers/staging/xlnx_ernic/xqp.h b/drivers/staging/xlnx_ernic/xqp.h
new file mode 100644
index 000000000000..442932f66daf
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xqp.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef QP_H
+#define QP_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/interrupt.h>
+enum qp_type {
+ XRNIC_NOT_ALLOCATED = 1,
+ XRNIC_DISC_CTRL_QP = 2,
+ XRNIC_NVMEOF_CTRL_QP = 3,
+ XRNIC_NVMEOF_IO_QP = 4,
+};
+
+enum ernic_qp_status {
+ XRNIC_QP_FREE,
+ XRNIC_QP_IN_USE,
+};
+
+struct xrnic_qp_attr {
+ struct xrnic_memory_map *xrnic_mmap;
+ struct xrnic_qp_attr *qp1_attr;
+ struct xrnic_rdma_cm_id *cm_id;
+ void *send_sgl;
+ u64 send_sgl_phys;
+ void *rq_buf_ba_ca;
+ u64 rq_buf_ba_ca_phys;
+ void *sq_ba;
+ u64 sq_ba_phys;
+ void *cq_ba;
+ u64 cq_ba_phys;
+ u32 sq_depth;
+ u32 rq_depth;
+ u32 send_sge_size;
+ u32 send_pkt_size;
+ u32 recv_pkt_size;
+ u32 qp_num;
+ u32 local_cm_id;
+ u32 remote_cm_id;
+ u32 remote_qpn;
+ u32 qp_status;
+ u32 starting_psn;
+ u32 rem_starting_psn;
+ u8 send_sgl_temp[XRNIC_QP1_SEND_PKT_SIZE];
+ u32 resend_count;
+ u32 rq_wrptr_db_local;
+ u32 sq_cmpl_db_local;
+ u32 rq_ci_db_local;
+ u32 sq_pi_db_local;
+ u16 ip_addr_type; /* DESTINATION ADDR_FAMILY */
+ u32 ipv4_addr; /* DESTINATION IP addr */
+ u8 ipv6_addr[16];
+ u8 mac_addr[6];
+ u32 source_qp_num;
+ /* remote qpn used in Active CM. source_qp_num is the source
+ * queue pair in deth
+ */
+ u32 remote_qp;
+ enum xrnic_rdma_cm_event_type curr_state;
+ /* DISC or NVMECTRL Its direct mapping to host ID to
+ * particular host_no.
+ */
+ enum xrnic_qp_type qp_type;
+ u16 sqhd;
+ /*Its direct mapping to host ID to access particular host_no.*/
+ u16 nvmeof_cntlid;
+ u32 nvmeof_qp_id;
+ struct timer_list qp_timer;
+ struct tasklet_struct qp_task;
+ /* kernel locking primitive */
+ spinlock_t qp_lock;
+ char irq_name[32];
+ u32 irq_vect;
+ u32 pd;
+};
+
+enum xrnic_hw_qp_status {
+ XRNIC_HW_QP_ENABLE,
+ XRNIC_HW_QP_DISABLE,
+};
+
+void xrnic_display_qp_reg(int qp_num);
+void xrnic_qp_fatal_handler(unsigned long data);
+void xrnic_qp_timer(struct timer_list *data);
+void xrnic_qp_pkt_recv_intr_handler(unsigned long data);
+void xrnic_qp_task_handler(unsigned long data);
+void xrnic_wqe_completed_intr_handler(unsigned long data);
+
+/* QP Specific function templates */
+int xrnic_qp_recv_pkt(struct xrnic_qp_attr *qp_attr, u32 rq_pkt_count);
+int xrnic_qp_send_pkt(struct xrnic_qp_attr *qp_attr, u32 sq_pkt_count);
+void xrnic_reset_io_qp_rq_ptr(struct xrnic_qp_attr *qp_attr);
+void xrnic_reset_io_qp_sq_cq_ptr(struct xrnic_qp_attr *qp_attr,
+ struct xrnic_hw_handshake_info *hw_hs_info);
+void xrnic_qp_hw_configuration(int qp_num);
+int xrnic_qp1_hw_configuration(void);
+void xrnic_qp_app_configuration(int qp_num,
+ enum xrnic_hw_qp_status hw_qp_status);
+int xrnic_find_free_qp(void);
+int xrnic_set_qp_state(int qp_num, int state);
+
+#ifdef __cplusplus
+ }
+#endif
+#endif
diff --git a/drivers/staging/xlnx_ernic/xrocev2.h b/drivers/staging/xlnx_ernic/xrocev2.h
new file mode 100644
index 000000000000..fec90081d094
--- /dev/null
+++ b/drivers/staging/xlnx_ernic/xrocev2.h
@@ -0,0 +1,409 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Xilinx FPGA Xilinx RDMA NIC driver
+ *
+ * Copyright (c) 2018-2019 Xilinx Pvt., Ltd
+ *
+ */
+
+#ifndef _XRNIC_ROCEV2_H
+#define _XRNIC_ROCEV2_H
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#include <linux/types.h>
+#include <linux/udp.h>
+#include <rdma/ib_pack.h>
+
+#define XRNIC_REQ_QPN 0x1
+#define XRNIC_RESPONDER_RESOURCES 0x10
+#define XRNIC_INITIATOR_DEPTH 0x10
+#define XRNIC_REQ_LOCAL_CM_RESP_TOUT 0x11
+#define XRNIC_REQ_REMOTE_CM_RESP_TOUT 0x14
+#define XRNIC_REQ_PATH_PKT_PAYLOAD_MTU 92
+#define XRNIC_REQ_RETRY_COUNT 0x7
+#define XRNIC_REQ_RDC_EXISTS 1
+#define XRNIC_REQ_SRQ 0
+
+#define XRNIC_REJ_INFO_LEN 0
+
+#define XRNIC_MRA_SERVICE_TIMEOUT 0x11
+
+#define XRNIC_REP_END_END_FLOW_CONTROL 0x0
+#define XRNIC_REP_FAIL_OVER_ACCEPTED 0x3
+#define XRNIC_REP_TARGET_ACK_DELAY 0x1F
+#define XRNIC_REP_RNR_RETRY_COUNT 0x7
+
+#define XRNIC_CM_TIMEOUT 0x4
+#define XRNIC_CM_TIMER_TIMEOUT 0x11
+
+enum xrnic_wc_opcod {
+ XRNIC_RDMA_WRITE = 0x0,
+ XRNIC_SEND_ONLY = 0x2,
+ XRNIC_RDMA_READ = 0x4
+};
+
+enum xrnic_msg_rej {
+ XRNIC_REJ_REQ = 0x0,
+ XRNIC_REJ_REP = 0x1,
+ XRNIC_REJ_OTHERS = 0x2,
+};
+
+enum xrnic_msg_mra {
+ XRNIC_MRA_REQ = 0x0,
+ XRNIC_MRA_REP = 0x1,
+ XRNIC_MRA_LAP = 0x2,
+};
+
+enum xrnic_rej_reason {
+ XRNIC_REJ_NO_QP_AVAILABLE = 1,
+ XRNIC_REJ_NO_EE_AVAILABLE = 2,
+ XRNIC_REJ_NO_RESOURCE_AVAILABLE = 3,
+ XRNIC_REJ_TIMEOUT = 4,
+ XRNIC_REJ_UNSUPPORTED_REQ = 5,
+ XRNIC_REJ_INVALID_CM_ID = 6,
+ XRNIC_REJ_INVALID_QPN = 7,
+ XRNIC_REJ_RDC_NOT_EXIST = 11,
+ XRNIC_REJ_PRIM_LID_PORT_NOT_EXIST = 13,
+ XRNIC_REJ_INVALID_MTU = 26,
+ XRNIC_REJ_INSUFFICIENT_RESP_RESOURCE = 27,
+ XRNIC_REJ_CONSUMER_REJECT = 28,
+ XRNIC_REJ_DUPLICATE_LOCAL_CM_ID = 30,
+ XRNIC_REJ_UNSUPPORTED_CLASS_VERSION = 31,
+};
+
+//mad common status field
+struct mad_comm_status {
+ __u8 busy:1;
+ __u8 redir_reqd:1;
+ __u8 invalid_field_code:3;
+ __u8 reserved:3;
+ __u8 class_specific;
+} __packed;
+
+#define XRNIC_MAD_BASE_VER 1
+#define XRNIC_MAD_MGMT_CLASS 0x07
+#define XRNIC_MAD_RESP_BIT 0x0
+#define XRNIC_MAD_COMM_SEND 0x3
+#define XRNIC_MAD_RESERVED 0x0
+
+/* Management data gram (MAD's) */
+struct mad //Size 256Byte
+{
+ __u8 base_ver;
+ __u8 mgmt_class;
+ __u8 class_version;
+ __u8 resp_bit_method;
+ struct mad_comm_status status;// 2 bytes
+ __be16 class_specific;
+ __be64 transaction_id;
+ __be16 attribute_id;
+ __be16 reserved;
+ __be32 attrb_modifier;
+ __be32 data[58];
+} __packed;
+
+struct req {
+ __u32 local_cm_id;
+ __u32 reserved1;
+ __u8 service_id[8];
+ __u8 local_ca_guid[8];
+ __u32 reserved2;
+ __u32 local_q_key;
+ __u32 local_qpn:24;
+ __u8 responder_resources:8;
+ __u32 local_eecn:24;
+ __u32 initiator_depth:8;
+ __u32 remote_eecn:24;
+
+ __u32 remote_cm_resp_tout:5;
+ __u32 transport_svc_type:2;
+ __u32 e2e_flow_control:1;
+ __u8 start_psn[3];
+ __u8 local_cm_resp_tout:5;
+ __u8 retry_count: 3;
+ __u16 p_key;
+ __u8 path_packet_payload_mtu:4;
+ __u8 rdc_exists:1;
+ __u8 rnr_retry_count:3;
+ __u8 max_cm_retries:4;
+ __u8 srq:1;
+ __u8 reserved3:3;
+ __u16 primary_local_port_lid;
+ __u16 primary_remote_port_lid;
+ __u64 primary_local_port_gid[2];
+ __u64 primary_remote_port_gid[2];
+ __u32 primary_flow_label:20;
+ __u32 reserved4:6;
+ __u32 primary_packet_rate:6;
+ __u32 primary_traffic_class:8;
+ __u32 primary_hop_limit:8;
+ __u32 primary_sl:4;
+ __u32 primary_subnet_local:1;
+ __u32 reserved5:3;
+ __u32 primary_local_ack_tout:5;
+ __u32 reserved6:3;
+ __u32 alternate_local_port_lid:16;
+ __u32 alternate_remote_port_lid:16;
+ __u64 alternate_local_port_gid[2];
+ __u64 alternate_remote_port_gid[2];
+ __u32 alternate_flow_labe:20;
+ __u32 reserved7:6;
+ __u32 alternate_packet_rate:6;
+ __u32 alternate_traffic_class:8;
+ __u32 alternate_hop_limit:8;
+ __u32 alternate_sl:4;
+ __u32 alternate_subnet_local:1;
+ __u32 reserved8:3;
+ __u32 alternate_local_ack_timeout: 5;
+ __u32 reserved9:3;
+ __u8 private_data[92];
+} __packed;
+
+/* MRA Message contents */
+/* Message Receipt Acknoldgement */
+struct mra {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u8 message_mraed:2;
+ __u8 reserved1:6;
+ __u8 service_timeout:5;
+ __u8 reserved2:3;
+ __u8 private_data[222];
+} __packed;
+
+/* REJ Message contents */
+struct rej {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u8 message_rejected:2;
+ __u8 reserved1:6;
+ __u8 reject_info_length:7;
+ __u8 reserved2:1;
+ __u16 reason;
+ __u8 additional_reject_info[72];
+ __u8 private_data[148];
+} __packed;
+
+/* REP Message contents */
+struct rep {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u32 local_q_key;
+ __u32 local_qpn:24;
+ __u8 reserved1:8;
+ __u32 local_ee_context:24;
+ __u32 reserved2:8;
+ __u8 start_psn[3];
+ __u8 reserved3;
+ __u8 responder_resources;
+ __u8 initiator_depth;
+ union {
+ __u8 target_fail_end;
+ __u8 target_ack_delay:5;
+ __u8 fail_over_accepted:2;
+ };
+ __u8 end_end_flow_control:1;
+ __u8 rnr_retry_count:3;
+ __u8 sqr:1;
+ __u8 reserved4:4;
+ __u8 local_ca_guid[8];
+ __u8 private_data[196];
+} __packed;
+
+/* RTU indicates that the connection is established,
+ * and that the recipient
+ * may begin transmitting
+ */
+struct rtu {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u8 private_data[224];
+} __packed;
+
+#define XRNIC_SEND_UD 0x64
+#define XRNIC_SET_SOLICT_EVENT 0x0
+#define XRNIC_RESET_SOLICT_EVENT 0x0
+#define XRNIC_MIGRATION_REQ 0x0
+#define XRNIC_PAD_COUNT 0x0
+#define XRNIC_TRANSPORT_HDR_VER 0x0
+#define XRNIC_DESTINATION_QP 0x1
+#define XRNIC_RESERVED1 0x0
+#define XRNIC_ACK_REQ 0x0
+#define XRNIC_RESERVED2 0x0
+
+struct bth {
+ __u8 opcode;
+ __u8 solicited_event:1;
+ __u8 migration_req:1;
+ __u8 pad_count:2;
+ __u8 transport_hdr_ver:4;
+ __be16 partition_key;
+ __u8 reserved1;
+ __u8 destination_qp[3];
+ __u32 ack_request:1;
+ __u32 reserved2:7;
+ __u32 pkt_seq_num:24;
+} __packed;
+
+#define XRNIC_DETH_RESERVED 0
+struct deth {
+ __be32 q_key;
+ __u8 reserved;
+ __be32 src_qp:24;
+} __packed;
+
+/* DREQ request for communication release*/
+struct dreq {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u32 remote_qpn_eecn:24;
+ __u32 reserved:8;
+ __u8 private_data[220];
+} __packed;
+
+/* DREP - reply to request for communication release */
+struct drep {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u8 private_data[228];
+} __packed;
+
+/* LAP - load alternate path */
+struct lap {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u32 reserved1;
+ __u32 remote_QPN_EECN:24;
+ __u32 remote_cm_response_timeout:5;
+ __u32 reserved2:3;
+ __u32 reserved3;
+ __u32 alt_local_port_id:16;
+ __u32 alt_remote_port_id:16;
+ __u64 alt_local_port_gid[2];
+ __u64 alt_remote_port_gid[2];
+ __u32 alt_flow_label:20;
+ __u32 reserved4:4;
+ __u32 alt_traffic_class:8;
+ __u32 alt_hope_limit:8;
+ __u32 reserved5:2;
+ __u32 alt_pkt_rate:6;
+ __u32 alt_sl:4;
+ __u32 alt_subnet_local:1;
+ __u32 reserved6:3;
+ __u32 alt_local_ack_timeout:5;
+ __u32 reserved7:3;
+ __u8 private_data[168];
+} __packed;
+
+/* APR - alternate path response */
+struct apr {
+ __u32 local_cm_id;
+ __u32 remote_comm_id;
+ __u8 additional_info_length;
+ __u8 ap_status;
+ __u8 reserved1[2];
+ __u8 additional_info[72];
+ __u8 private_data[148];
+} __packed;
+
+enum cm_establishment_states {
+ CLASS_PORT_INFO = 0x1,
+ CONNECT_REQUEST = 0x10, /* Request for connection */
+ MSG_RSP_ACK = 0x11, /* Message Response Ack */
+ CONNECT_REJECT = 0x12, /* Connect Reject */
+ CONNECT_REPLY = 0x13, /* Reply for request communication */
+ READY_TO_USE = 0x14, /* Ready to use */
+ DISCONNECT_REQUEST = 0x15, /* Receive Disconnect req */
+ DISCONNECT_REPLY = 0x16, /* Send Disconnect reply */
+ SERVICE_ID_RESOLUTION_REQ = 0x17,
+ SERVICE_ID_RESOLUTION_REQ_REPLY = 0x18,
+ LOAD_ALTERNATE_PATH = 0x19,
+ ALTERNATE_PATH_RESPONSE = 0x1a,
+};
+
+#define XRNIC_ETH_ALEN 6
+#define XRNIC_ETH_P_IP 0x0800
+#define XRNIC_ETH_P_ARP 0x0806
+#define XRNIC_ETH_HLEN 14
+#define XRNIC_ICRC_SIZE 4
+
+//Ethernet header
+struct ethhdr_t {
+ unsigned char h_dest[XRNIC_ETH_ALEN];
+ unsigned char h_source[XRNIC_ETH_ALEN];
+ __be16 eth_type; /*< packet type ID field */
+} __packed;
+
+struct ipv4hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 ihl:4,
+ version:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 version:4, /*< Internet Header Length */
+ ihl:4; /*< Version */
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __u8 tos; /*< Type of service */
+ __be16 total_length; /*< Total length */
+ __be16 id; /*< Identification */
+ u16 frag_off; /*< Fragment offset */
+ __u8 time_to_live; /*< Time to live */
+ __u8 protocol; /*< Protocol */
+ __be16 hdr_chksum; /*< Header checksum */
+ __be32 src_addr; /*< Source address */
+ __be32 dest_addr; /*< Destination address */
+} __packed;
+
+struct qp_cm_pkt {
+ struct ethhdr_t eth; //14 Byte
+ union {
+ struct ipv4hdr ipv4; //20 bytes
+ struct ipv4hdr ipv6; //20 bytes
+ } ip;
+ struct udphdr udp; //8 Byte
+ struct bth bth; //12 Bytes
+ struct deth deth; //8 Byte
+ struct mad mad; //[XRNIC_MAD_HEADER + XRNIC_MAD_DATA]
+} __packed;
+
+/*
+ * RoCEv2 packet for receiver. Duplicated for ease of code readability.
+ */
+struct qp_cm_pkt_hdr_ipv4 {
+ struct ethhdr_t eth; //14 Byte
+ struct ipv4hdr ipv4;
+ struct udphdr udp; //8 Byte
+ struct bth bth;
+ struct deth deth; //8 Byte
+ struct mad mad; //[XRNIC_MAD_HEADER + XRNIC_MAD_DATA]
+} __packed;
+
+struct qp_cm_pkt_hdr_ipv6 {
+ struct ethhdr_t eth; //14 Byte
+ struct ipv6hdr ipv6;
+ struct udphdr udp; //8 Byte
+ struct bth bth;
+ struct deth deth; //8 Byte
+ struct mad mad; //[XRNIC_MAD_HEADER + XRNIC_MAD_DATA]
+} __packed;
+
+/* MAD Packet validation defines */
+#define MAD_BASIC_VER 1
+#define OPCODE_SEND_UD 0x64
+
+#define MAD_SUBNET_CLASS 0x1
+#define MAD_DIRECT_SUBNET_CLASS 0x81
+
+#define MAD_SEND_CM_MSG 0x03
+#define MAD_VERF_FAILED -1
+#define MAD_VERF_SUCCESS 0
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif /* _XRNIC_ROCEV2_H*/