aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell/octeontx2/bphy
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/bphy')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/Makefile12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/bphy_common.h39
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/bphy_netdev_comm_if.h45
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_hw.h482
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_netdev_comm_if.h296
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.c1427
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.h142
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ethtool.c149
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ptp.c79
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy.h74
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.c165
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.h31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_hw.h381
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_main.c887
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.c755
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.h150
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri_ethtool.c102
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.c1697
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.h227
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ethtool.c152
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ptp.c268
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_bphy_netdev_comm_if.h190
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_common.h132
23 files changed, 7882 insertions, 0 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/Makefile b/drivers/net/ethernet/marvell/octeontx2/bphy/Makefile
new file mode 100644
index 000000000000..a4dfa1b5c9d4
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Marvell's OcteonTX2 BPHY RFOE netdev driver
+#
+
+obj-$(CONFIG_OCTEONTX2_BPHY_RFOE_NETDEV) += octeontx2_bphy_netdev.o
+
+#EXTRA_CFLAGS += -DDEBUG
+
+octeontx2_bphy_netdev-y := otx2_bphy_main.o otx2_rfoe.o otx2_rfoe_ethtool.o otx2_rfoe_ptp.o \
+ otx2_cpri.o otx2_cpri_ethtool.o otx2_bphy_debugfs.o \
+ cnf10k_rfoe.o cnf10k_rfoe_ethtool.o cnf10k_rfoe_ptp.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/bphy_common.h b/drivers/net/ethernet/marvell/octeontx2/bphy/bphy_common.h
new file mode 100644
index 000000000000..41018b33b07a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/bphy_common.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell BPHY Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef _BPHY_COMMON_H_
+#define _BPHY_COMMON_H_
+
+/* BPHY definitions */
+#define OTX2_BPHY_PCI_VENDOR_ID 0x177D
+#define OTX2_BPHY_PCI_DEVICE_ID 0xA089
+
+/* eCPRI ethertype */
+#define ETH_P_ECPRI 0xAEFE
+
+/* max ptp tx requests */
+extern int max_ptp_req;
+
+/* reg base address */
+extern void __iomem *bphy_reg_base;
+extern void __iomem *psm_reg_base;
+extern void __iomem *rfoe_reg_base;
+extern void __iomem *bcn_reg_base;
+extern void __iomem *ptp_reg_base;
+extern void __iomem *cpri_reg_base;
+
+enum port_link_state {
+ LINK_STATE_DOWN,
+ LINK_STATE_UP,
+};
+
+/* iova to kernel virtual addr */
+static inline void *otx2_iova_to_virt(struct iommu_domain *domain, u64 iova)
+{
+ return phys_to_virt(iommu_iova_to_phys(domain, iova));
+}
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/bphy_netdev_comm_if.h b/drivers/net/ethernet/marvell/octeontx2/bphy/bphy_netdev_comm_if.h
new file mode 100644
index 000000000000..9fdeba5be2a3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/bphy_netdev_comm_if.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell BPHY Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef _BPHY_NETDEV_COMM_IF_H_
+#define _BPHY_NETDEV_COMM_IF_H_
+
+/* Max LMAC's per RFOE MHAB */
+#define MAX_LMAC_PER_RFOE 4
+
+/* Max Lanes per CPRI MHAB */
+#define MAX_LANE_PER_CPRI 4
+
+#define MAX_PTP_MSG_PER_LMAC 4 /* 16 Per RFoE */
+#define MAX_OTH_MSG_PER_LMAC 16 /* 64 Per RFoE */
+/* 64 per RFoE; RFoE2 shall have 32 entries */
+#define MAX_OTH_MSG_PER_RFOE (MAX_OTH_MSG_PER_LMAC * MAX_LMAC_PER_RFOE)
+
+/**
+ * @enum bphy_netdev_if_type
+ * @brief BPHY Interface Types
+ *
+ */
+enum bphy_netdev_if_type {
+ IF_TYPE_ETHERNET = 0,
+ IF_TYPE_CPRI = 1,
+ IF_TYPE_NONE = 2,
+ IF_TYPE_MAX,
+};
+
+/**
+ * @enum bphy_netdev_packet_type
+ * @brief Packet types
+ *
+ */
+enum bphy_netdev_packet_type {
+ PACKET_TYPE_PTP = 0,
+ PACKET_TYPE_ECPRI = 1,
+ PACKET_TYPE_OTHER = 2,
+ PACKET_TYPE_MAX,
+};
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_hw.h b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_hw.h
new file mode 100644
index 000000000000..9b2a7a02b564
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_hw.h
@@ -0,0 +1,482 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CNF10K BPHY Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef _CNF10K_BPHY_HW_H_
+#define _CNF10K_BPHY_HW_H_
+
+#include <linux/types.h>
+
+/* PSM register offsets */
+#define PSM_QUEUE_CMD_LO(a) (0x0 + (a) * 0x10)
+#define PSM_QUEUE_CMD_HI(a) (0x8 + (a) * 0x10)
+#define PSM_QUEUE_CFG(a) (0x1000 + (a) * 0x10)
+#define PSM_QUEUE_PTR(a) (0x2000 + (a) * 0x10)
+#define PSM_QUEUE_SPACE(a) (0x3000 + (a) * 0x10)
+#define PSM_QUEUE_TIMEOUT_CFG(a) (0x4000 + (a) * 0x10)
+#define PSM_QUEUE_INFO(a) (0x5000 + (a) * 0x10)
+#define PSM_QUEUE_ENA_W1S(a) (0x10000 + (a) * 0x8)
+#define PSM_QUEUE_ENA_W1C(a) (0x10100 + (a) * 0x8)
+#define PSM_QUEUE_FULL_STS(a) (0x10200 + (a) * 0x8)
+#define PSM_QUEUE_BUSY_STS(a) (0x10300 + (a) * 0x8)
+
+/* BPHY PSM GPINT register offsets */
+#define PSM_INT_GP_SUM_W1C(a) (0x10E0000 + (a) * 0x100)
+#define PSM_INT_GP_SUM_W1S(a) (0x10E0040 + (a) * 0x100)
+#define PSM_INT_GP_ENA_W1C(a) (0x10E0080 + (a) * 0x100)
+#define PSM_INT_GP_ENA_W1S(a) (0x10E00C0 + (a) * 0x100)
+
+/* RFOE MHAB register offsets */
+#define RFOEX_RX_CTL(a) (0x0818ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_VLANX_CFG(a, b) (0x0870ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((b) << 3))
+#define RFOEX_RX_INDIRECT_INDEX_OFFSET(a) (0x13F8ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_FTX_CFG(a, b) (0x1400ULL | \
+ (((unsigned long)(a) << 24)) + \
+ ((b) << 3))
+#define RFOEX_RX_IND_MBT_CFG(a) (0x1420ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_MBT_CFG2(a) (0x1428ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_MBT_ADDR(a) (0x1430ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_MBT_SEG_STATE(a) (0x1438ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_VLANX_FWD(a, b) (0x14D0ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((b) << 3))
+#define RFOEX_RX_IND_JDT_CFG0(a) (0x1440ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_JDT_CFG1(a) (0x1448ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_JDT_CFG2(a) (0x1490ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_JDT_PTR(a) (0x1450ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_JDT_STATE(a) (0x1478ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_RX_IND_ECPRI_FT_CFG(a) (0x14C0ULL | \
+ ((unsigned long)(a) << 24))
+#define RFOEX_TX_PTP_TSTMP_W0(a, b) (0x7A0ULL | \
+ (((unsigned long)(a) << 24)) | \
+ ((b) << 3))
+#define RFOEX_TX_PTP_TSTMP_W1(a, b) (0x7C0ULL | \
+ (((unsigned long)(a) << 24)) | \
+ ((b) << 3))
+#define RFOEX_TX_PKT_STAT(a, b) (0x720ULL | \
+ (((unsigned long)(a) << 24)) | \
+ ((b) << 3))
+#define RFOEX_TX_OCTS_STAT(a, b) (0x740ULL | \
+ (((unsigned long)(a) << 24)) | \
+ ((b) << 3))
+#define RFOEX_RX_VLAN_DROP_STAT(a, b) (0x8A0ULL | \
+ (((unsigned long)(a) << 24)) | \
+ ((b) << 3))
+#define RFOEX_RX_RPM_PKT_STAT(a, b) (0x15C0ULL | \
+ (((unsigned long)(a) << 24)) | \
+ ((b) << 3))
+#define RFOEX_RX_RPM_OCTS_STAT(a, b) (0x15E0ULL | \
+ (((unsigned long)(a) << 24)) | \
+ ((b) << 3))
+
+/* BCN register offsets and definitions */
+#define BCN_CAPTURE_CFG 0x400
+#define BCN_CAPTURE_N1_N2 0x410
+#define BCN_CAPTURE_PTP 0x430
+
+/* BCN_CAPTURE_CFG register definitions */
+#define CAPT_EN BIT(0)
+#define CAPT_TRIG_SW (3UL << 8)
+
+/* CPRI register offsets */
+#define CPRIX_RXD_GMII_UL_CBUF_CFG1(a) (0x1000ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_RXD_GMII_UL_CBUF_CFG2(a) (0x1008ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_RXD_GMII_UL_RD_DOORBELL(a) (0x1010ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_RXD_GMII_UL_SW_RD_PTR(a) (0x1018ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_RXD_GMII_UL_NXT_WR_PTR(a) (0x1020ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_RXD_GMII_UL_PKT_COUNT(a) (0x1028ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_TXD_GMII_DL_CBUF_CFG1(a) (0x1100ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_TXD_GMII_DL_CBUF_CFG2(a) (0x1108ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_TXD_GMII_DL_WR_DOORBELL(a) (0x1110ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_TXD_GMII_DL_SW_WR_PTR(a) (0x1118ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_TXD_GMII_DL_NXT_RD_PTR(a) (0x1120ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_ETH_UL_INT(a) (0x280ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_ETH_UL_INT_ENA_W1S(a) (0x288ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_ETH_UL_INT_ENA_W1C(a) (0x290ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_ETH_UL_INT_W1S(a) (0x298ULL | \
+ ((unsigned long)(a) << 24))
+#define CPRIX_ETH_BAD_CRC_CNT(a, b) (0x400ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_ERR_CNT(a, b) (0x408ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_OSIZE_CNT(a, b) (0x410ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_USIZE_CNT(a, b) (0x418ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_FIFO_ORUN_CNT(a, b) (0x420ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_GPKTS_CNT(a, b) (0x428ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_BOCT_CNT(a, b) (0x430ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_GOCT_CNT(a, b) (0x438ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_MALFORMED_CNT(a, b) (0x440ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_DL_GOCTETS_CNT(a, b) (0x450ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_DL_GPKTS_CNT(a, b) (0x458ULL | \
+ ((unsigned long)(a) << 24) | \
+ ((unsigned long)(b) << 11))
+
+/* MHAB Structures */
+struct mhbw_jd_dma_cfg_word_0_s {
+ u64 dma_mode : 3;
+ u64 reserved1 : 1;
+ u64 dswap : 3;
+ u64 cmd_type : 2;
+ u64 reserved2 : 7;
+ u64 chunk_size : 16;
+ u64 block_size : 16;
+ u64 thread_id : 6;
+ u64 reserved3 : 2;
+ u64 group_id : 4;
+ u64 reserved4 : 4;
+};
+
+struct mhbw_jd_dma_cfg_word_1_s {
+ u64 start_addr : 53;
+ u64 reserved1 : 11;
+};
+
+struct rfoex_abx_slotx_configuration {
+ u64 pkt_mode : 2;
+ u64 da_sel : 3;
+ u64 sa_sel : 3;
+ u64 etype_sel : 3;
+ u64 flowid : 8;
+ u64 subtype : 8;
+ u64 reserved1 : 2;
+ u64 sample_mode : 1;
+ u64 sample_width : 5;
+ u64 sample_width_option : 1;
+ u64 sample_width_sat_bypass : 1;
+ u64 orderinfotype : 1;
+ u64 orderinfooffset : 5;
+ u64 antenna : 8;
+ u64 symbol : 8;
+ u64 sos : 1;
+ u64 eos : 1;
+ u64 orderinfo_insert : 1;
+ u64 custom_timestamp_insert : 1;
+ u64 rfoe_mode : 1;
+};
+
+struct rfoex_abx_slotx_configuration1 {
+ u64 rbmap_bytes : 8;
+ u64 reserved1 : 16;
+ u64 hdr_len : 8;
+ u64 presentation_time_offset : 29;
+ u64 reserved2 : 1;
+ u64 sof_mode : 2;
+};
+
+struct rfoex_abx_slotx_configuration2 {
+ u64 vlan_sel : 3;
+ u64 vlan_num : 2;
+ u64 ptp_mode : 1;
+ u64 ecpri_id_insert : 1;
+ u64 ecpri_seq_id_insert : 1;
+ u64 ecpri_rev : 8;
+ u64 ecpri_msgtype : 8;
+ u64 ecpri_id : 16;
+ u64 ecpri_seq_id : 16;
+ u64 cc_mac_sec_en : 1;
+ u64 ptp_ring_id : 2;
+ u64 reserved1 : 5;
+};
+
+struct rfoex_abx_slotx_configuration3 {
+ u64 pkt_len : 16;
+ u64 lmacid : 2;
+ u64 tx_err : 1;
+ u64 reserved : 45;
+};
+
+struct mhab_job_desc_cfg {
+ struct rfoex_abx_slotx_configuration cfg;
+ struct rfoex_abx_slotx_configuration1 cfg1;
+ struct rfoex_abx_slotx_configuration2 cfg2;
+ struct rfoex_abx_slotx_configuration3 cfg3;
+} __packed;
+
+/* PSM Enumerations */
+enum psm_opcode_e {
+ PSM_OP_NOP = 0x0,
+ PSM_OP_ADDJOB = 0x1,
+ PSM_OP_CONTJOB = 0x2,
+ PSM_OP_DJCNT = 0x10,
+ PSM_OP_GPINT = 0x11,
+ PSM_OP_WAIT = 0x12,
+ PSM_OP_ADDWORK = 0x13,
+ PSM_OP_FREE = 0x14,
+ PSM_OP_WRSTS = 0x15,
+ PSM_OP_WRMSG = 0x16,
+ PSM_OP_ADDNOTIF = 0x17,
+ PSM_OP_QRST = 0x20,
+ PSM_OP_QBLK = 0x21,
+ PSM_OP_QRUN = 0x22,
+ PSM_OP_BCAST = 0x3E,
+ PSM_OP_RSP = 0x3F,
+};
+
+/* PSM Structures */
+struct psm_cmd_addjob_s {
+ /* W0 */
+ u64 opcode : 6;
+ u64 rsrc_set : 2;
+ u64 qid : 8;
+ u64 waitcond : 8;
+ u64 jobtag : 16;
+ u64 reserved1 : 8;
+ u64 mabq : 1;
+ u64 reserved2 : 3;
+ u64 tmem : 1;
+ u64 reserved3 : 3;
+ u64 jobtype : 8;
+ /* W1 */
+ u64 jobptr : 53;
+ u64 reserved4 : 8;
+ u64 gm_id : 3;
+};
+
+/* RFOE Enumerations */
+enum rfoe_ecpri_hdr_err_type_e {
+ NONE = 0x0,
+ CONCATENATION = 0x1,
+ ILLEGAL_VERSION = 0x2,
+ ILLEGAL_RSVD = 0x3,
+ PC_ID = 0x4,
+};
+
+enum rfoe_ecpri_pcid_flowid_mode_e {
+ HASH = 0x0,
+ BASE = 0x1,
+ LMAC_TRUNCATE = 0x2,
+ SHIFT = 0x3,
+};
+
+enum rfoe_order_info_type_e {
+ SEQNUM = 0x0,
+ TIMESTAMP = 0x1,
+};
+
+enum rfoe_rx_dir_ctl_pkt_type_e {
+ ROE = 0x0,
+ CHI = 0x1,
+ ALT = 0x2,
+ ECPRI = 0x4,
+ GENERIC = 0x8,
+};
+
+enum rfoe_rx_pswt_e {
+ RSVD5 = 0x0,
+ ROE_BCN_TYPE = 0x1,
+ RSVD6 = 0x2,
+ ECPRI_BCN_TYPE = 0x3,
+};
+
+enum rfoe_rx_pkt_err_e {
+ RE_NONE = 0x0,
+ RE_PARTIAL = 0x1,
+ RE_JABBER = 0x2,
+ RE_FCS = 0x7,
+ RE_FCS_RCV = 0x8,
+ RE_TERMINATE = 0x9,
+ RE_RX_CTL = 0xB,
+ RE_SKIP = 0xC,
+};
+
+enum rfoe_rx_pkt_logger_idx_e {
+ RX_PKT = 0x0,
+ TX_PKT = 0x1,
+};
+
+/* RFOE Structures */
+struct ecpri_hdr_s {
+ u64 seq_id : 16;
+ u64 pc_id : 16;
+ u64 pyld_size : 16;
+ u64 msg_type : 8;
+ u64 concatenation : 1;
+ u64 reserved : 3;
+ u64 version : 4;
+};
+
+struct rfoe_ab_cfg_w3_s {
+ u64 pkt_len : 16;
+ u64 lmac_id : 2;
+ u64 tx_err : 1;
+ u64 reserved : 45;
+};
+
+struct rfoe_psw_s {
+ /* W0 */
+ u64 jd_ptr : 53;
+ u64 jd_ptr_tmem : 1;
+ u64 jd_ptr_type : 1;
+ u64 reserved1 : 1;
+ u64 gm_id : 3;
+ u64 reserved2 : 3;
+ u64 pswt : 2;
+ /* W1 */
+ u64 ethertype : 16;
+ u64 eindex : 5;
+ u64 reserved3 : 3;
+ u64 pkt_len : 16;
+ u64 mcs_err_sts : 8;
+ u64 mac_err_sts : 6;
+ u64 reserved4 : 2;
+ u64 pkt_type : 4;
+ u64 reserved5 : 4;
+ /* W2 */
+ u64 proto_sts_word;
+ /* W3 */
+ u64 rfoe_tstamp;
+ /* W4 */
+ u64 ptp_timestamp;
+ /* W5 */
+ u64 reserved6;
+ /* W6 */
+ u64 reserved7 : 24;
+ u64 dec_error : 8;
+ u64 dec_num_sections : 8;
+ u64 dec_num_syminc : 8;
+ u64 reserved8 : 16;
+ /* W7 */
+ u64 reserved9;
+};
+
+struct rfoe_psw_w0_s {
+ u64 jd_ptr : 53;
+ u64 jd_ptr_tmem : 1;
+ u64 jd_ptr_type : 1;
+ u64 reserved1 : 1;
+ u64 gm_id : 3;
+ u64 reserved2 : 3;
+ u64 pswt : 2;
+};
+
+struct rfoe_psw_w1_s {
+ u64 ethertype : 16;
+ u64 eindex : 5;
+ u64 reserved3 : 3;
+ u64 pkt_len : 16;
+ u64 mcs_err_sts : 8;
+ u64 mac_err_sts : 6;
+ u64 reserved4 : 2;
+ u64 pkt_type : 4;
+ u64 reserved5 : 4;
+};
+
+struct rfoe_psw_w2_ecpri_s {
+ u64 msg_type : 8;
+ u64 pc_id : 16;
+ u64 seq_id : 16;
+ u64 flow_id : 10;
+ u64 lmac_id : 2;
+ u64 rfoe_id : 4;
+ u64 sa_table_index : 7;
+ u64 reserved : 1;
+};
+
+struct rfoe_psw_w2_roe_s {
+ u64 subtype : 8;
+ u64 fd_symbol : 8;
+ u64 fd_antid : 8;
+ u64 reserved1 : 16;
+ u64 flowid : 8;
+ u64 reserved2 : 2;
+ u64 lmac_id : 2;
+ u64 rfoe_id : 4;
+ u64 sa_table_index : 7;
+ u64 reserved3 : 1;
+};
+
+struct rfoe_psw_w3_bcn_s {
+ u64 n2 : 24;
+ u64 n1 : 40;
+};
+
+struct rfoe_psw_w4_s {
+ u64 ptp_timestamp;
+};
+
+struct rfoe_rx_pkt_log_s {
+ u64 timestamp;
+ u64 psw_w2;
+ u64 psw_w1;
+ u64 psw_w0;
+};
+
+struct rfoe_timestamp_s {
+ u32 time_tick : 16;
+ u32 sf : 4;
+ u32 bfn : 12;
+};
+
+struct rfoe_tx_pkt_log_s {
+ u64 timestamp;
+ u64 lmac_id : 2;
+ u64 rfoe_id : 4;
+ u64 jobid : 16;
+ u64 drop : 1;
+ u64 tx_err : 1;
+ u64 reserved : 40;
+};
+
+struct rfoe_tx_ptp_tstmp_s {
+ u64 ptp_timestamp;
+ u64 reserved1 : 2;
+ u64 rfoe_id : 4;
+ u64 jobid : 16;
+ u64 drop : 1;
+ u64 tx_err : 1;
+ u64 reserved2 : 39;
+ u64 valid : 1;
+};
+
+struct rfoe_rx_ind_vlanx_fwd {
+ u64 fwd : 64;
+};
+
+#endif /* _CNF10K_BPHY_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_netdev_comm_if.h b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_netdev_comm_if.h
new file mode 100644
index 000000000000..f9307b1e489e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_bphy_netdev_comm_if.h
@@ -0,0 +1,296 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CNF10K BPHY Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef _CNF10K_BPHY_NETDEV_COMM_IF_H_
+#define _CNF10K_BPHY_NETDEV_COMM_IF_H_
+
+#include <linux/etherdevice.h>
+#include "bphy_netdev_comm_if.h"
+
+#define BPHY_MAX_RFOE_MHAB 8 /* Max RFOE MHAB instances */
+#define BPHY_MAX_CPRI_MHAB 4 /* Max CPRI MHAB instances */
+
+#define MAX_PTP_RING 4 /* Max ptp rings per lmac */
+
+#define CNF10KB_VERSION 2 /* chip version */
+#define CNF10KA_VERSION 3 /* chip version */
+
+#define CHIP_CNF10KB(v) (((v) == CNF10KB_VERSION) ? 1 : 0)
+#define CHIP_CNF10KA(v) (((v) == CNF10KA_VERSION) ? 1 : 0)
+
+#define CHIP_CNF10K(v) ({ \
+ typeof(v) _v = (v); \
+ (CHIP_CNF10KB(_v) | CHIP_CNF10KA(_v)); \
+})
+
+/**
+ * @enum BPHY_NETDEV_CPRI_RX_GP_INT_e_
+ * @brief GP_INT numbers for CPRI Ethernet packet Rx notification
+ * by BPHY to netdev.
+ *
+ */
+enum bphy_netdev_cpri_rx_gp_int {
+ CNF10K_RX_GP_INT_CPRI0_ETH = 93, //PSM_GPINT93,
+ CNF10K_RX_GP_INT_CPRI1_ETH = 94, //PSM_GPINT94,
+ CNF10K_RX_GP_INT_CPRI2_ETH = 95, //PSM_GPINT95
+};
+
+/**
+ * @enum BPHY_NETDEV_TX_GP_INT_e_
+ * @brief GP_INT numbers for packet notification by netdev to BPHY.
+ *
+ */
+#ifdef CNF10KB
+enum bphy_netdev_tx_gp_int {
+ CNF10K_TX_GP_INT_RFOE0_LMAC0 = 32, //PSM_GPINT32,
+ CNF10K_TX_GP_INT_RFOE0_LMAC1 = 33, //PSM_GPINT33,
+
+ CNF10K_TX_GP_INT_RFOE1_LMAC2 = 34, //PSM_GPINT34,
+ CNF10K_TX_GP_INT_RFOE1_LMAC3 = 35, //PSM_GPINT35,
+
+ CNF10K_TX_GP_INT_RFOE2_LMAC0 = 36, //PSM_GPINT36,
+ CNF10K_TX_GP_INT_RFOE2_LMAC1 = 37, //PSM_GPINT37,
+
+ CNF10K_TX_GP_INT_RFOE3_LMAC2 = 38, //PSM_GPINT38,
+ CNF10K_TX_GP_INT_RFOE3_LMAC3 = 39, //PSM_GPINT39,
+
+ CNF10K_TX_GP_INT_RFOE4_LMAC0 = 40, //PSM_GPINT40,
+ CNF10K_TX_GP_INT_RFOE4_LMAC1 = 41, //PSM_GPINT41
+
+ CNF10K_TX_GP_INT_RFOE5_LMAC0 = 42, //PSM_GPINT42,
+ CNF10K_TX_GP_INT_RFOE5_LMAC1 = 43, //PSM_GPINT43,
+
+ CNF10K_TX_GP_INT_RFOE6_LMAC2 = 44, //PSM_GPINT44,
+ CNF10K_TX_GP_INT_RFOE6_LMAC3 = 45, //PSM_GPINT45,
+};
+#else
+enum bphy_netdev_tx_gp_int {
+ CNF10K_TX_GP_INT_RFOE0_LMAC0 = 32, //PSM_GPINT32,
+ CNF10K_TX_GP_INT_RFOE0_LMAC1 = 33, //PSM_GPINT33,
+ CNF10K_TX_GP_INT_RFOE0_LMAC2 = 34, //PSM_GPINT34,
+ CNF10K_TX_GP_INT_RFOE0_LMAC3 = 35, //PSM_GPINT35,
+
+ CNF10K_TX_GP_INT_RFOE1_LMAC0 = 36, //PSM_GPINT36,
+ CNF10K_TX_GP_INT_RFOE1_LMAC1 = 37, //PSM_GPINT37,
+ CNF10K_TX_GP_INT_RFOE1_LMAC2 = 38, //PSM_GPINT38,
+ CNF10K_TX_GP_INT_RFOE1_LMAC3 = 39, //PSM_GPINT39,
+};
+#endif
+
+/**
+ * @enum BPHY_NETDEV_CNF10K_RX_GP_INT_e_
+ * @brief GP_INT numbers for packet notification by BPHY to netdev.
+ *
+ */
+enum bphy_netdev_rx_gp_int {
+ CNF10K_RX_GP_INT_RFOE0_PTP = 63, //PSM_GPINT63,
+ CNF10K_RX_GP_INT_RFOE0_ECPRI = 62, //PSM_GPINT62,
+ CNF10K_RX_GP_INT_RFOE0_GENERIC = 61, //PSM_GPINT61,
+
+ CNF10K_RX_GP_INT_RFOE1_PTP = 60, //PSM_GPINT60,
+ CNF10K_RX_GP_INT_RFOE1_ECPRI = 59, //PSM_GPINT59,
+ CNF10K_RX_GP_INT_RFOE1_GENERIC = 58, //PSM_GPINT58,
+#ifdef CNF10KB
+ CNF10K_RX_GP_INT_RFOE2_PTP = 57, //PSM_GPINT57,
+ CNF10K_RX_GP_INT_RFOE2_ECPRI = 56, //PSM_GPINT56,
+ CNF10K_RX_GP_INT_RFOE2_GENERIC = 55, //PSM_GPINT55,
+
+ CNF10K_RX_GP_INT_RFOE3_PTP = 54, //PSM_GPINT54,
+ CNF10K_RX_GP_INT_RFOE3_ECPRI = 53, //PSM_GPINT53,
+ CNF10K_RX_GP_INT_RFOE3_GENERIC = 52, //PSM_GPINT52,
+
+ CNF10K_RX_GP_INT_RFOE4_PTP = 51, //PSM_GPINT51,
+ CNF10K_RX_GP_INT_RFOE4_ECPRI = 50, //PSM_GPINT50,
+ CNF10K_RX_GP_INT_RFOE4_GENERIC = 49, //PSM_GPINT49,
+
+ CNF10K_RX_GP_INT_RFOE5_PTP = 48, //PSM_GPINT48,
+ CNF10K_RX_GP_INT_RFOE5_ECPRI = 47, //PSM_GPINT47,
+ CNF10K_RX_GP_INT_RFOE5_GENERIC = 46, //PSM_GPINT46,
+
+ CNF10K_RX_GP_INT_RFOE6_PTP = 66, //PSM_GPINT66,
+ CNF10K_RX_GP_INT_RFOE6_ECPRI = 65, //PSM_GPINT65,
+ CNF10K_RX_GP_INT_RFOE6_GENERIC = 64, //PSM_GPINT64,
+#endif
+};
+
+/**
+ * @struct BPHY_NETDEV_RBUF_INFO_s
+ * @brief Information about the packet ring buffer which shall be used to
+ * send the packets from BPHY to netdev.
+ *
+ */
+struct cnf10k_bphy_ndev_rbuf_info {
+ enum bphy_netdev_packet_type pkt_type;
+ enum bphy_netdev_rx_gp_int gp_int_num;
+ u16 flow_id;
+ u16 mbt_index;
+ /**Maximum number of buffers in the Ring/Pool*/
+ u16 num_bufs;
+ /**MAX Buffer Size configured */
+ u16 buf_size; // TBC: 1536?
+ /**MBT byffer target memory*/
+ u8 mbt_target_mem;
+ /**Buffers starting address*/
+ u64 mbt_iova_addr;
+ u16 jdt_index;
+ /**Maximum number of JD buffers in the Ring/Pool*/
+ u16 num_jd;
+ /**MAX JD size configured */
+ u8 jd_size;
+ /**MBT byffer target memory*/
+ u8 jdt_target_mem;
+ /**Buffers starting address*/
+ u64 jdt_iova_addr;
+ u64 reserved[4];
+};
+
+/**
+ * @struct BPHY_NETDEV_TX_PSM_CMD_INFO_s
+ * @brief TX PSM command information defnition to be shared with
+ * netdev for TX communication.
+ *
+ */
+struct cnf10k_bphy_ndev_tx_psm_cmd_info {
+ enum bphy_netdev_tx_gp_int gp_int_num; // Valid only for PTP messages
+ u64 jd_iova_addr;
+ u64 rd_dma_iova_addr;
+ u64 low_cmd;
+ u64 high_cmd;
+ u64 reserved[4];
+};
+
+/**
+ * @struct BPHY_NETDEV_TX_PTP_RING_INFO_s
+ * @brief TX PTP timestamp ring buffer configuration to be shared
+ * with netdev for reading ptp timestamp.
+ *
+ */
+struct cnf10k_bphy_ndev_tx_ptp_ring_info {
+ u8 is_enable;
+ u8 ring_idx;
+ /**Number of TX PTP timestamp entries in ring */
+ u8 ring_size;
+ /**PTP Ring buffer target memory*/
+ u8 ring_target_mem;
+ /**PTP Ring buffer byte swap mode when TMEM is LLC/DRAM*/
+ u8 dswap;
+ /**Stream ID*/
+ u8 gmid;
+ /**Buffers starting address*/
+ u64 ring_iova_addr;
+ u64 reserved[4];
+};
+
+/**
+ * @struct cnf10k_bphy_netdev_intf_info
+ * @brief LMAC lane number, mac address and status information
+ *
+ */
+struct cnf10k_bphy_ndev_intf_info {
+ u8 rfoe_num;
+ u8 lane_num;
+ /* Source mac address */
+ u8 eth_addr[ETH_ALEN];
+ /* LMAC interface status */
+ u8 status; //0-DOWN, 1-UP
+ /* Configuration valid status; This interface shall be
+ * invalid if this field is set to 0
+ */
+ u8 is_valid;
+ u64 reserved;
+};
+
+/**
+ * @struct BPHY_NETDEV_COMM_IF_s
+ * @brief The communication interface defnitions which would be used
+ * by the netdev and bphy application.
+ *
+ */
+struct cnf10k_bphy_ndev_comm_if {
+ struct cnf10k_bphy_ndev_intf_info lmac_info;
+ struct cnf10k_bphy_ndev_rbuf_info rbuf_info[PACKET_TYPE_MAX];
+ /** Defining single array to handle both PTP and OTHER cmds info.
+ */
+ struct cnf10k_bphy_ndev_tx_psm_cmd_info
+ ptp_pkt_info[MAX_PTP_MSG_PER_LMAC];
+ struct cnf10k_bphy_ndev_tx_ptp_ring_info
+ ptp_ts_ring_info[MAX_PTP_RING];
+ u64 reserved[4];
+};
+
+/**
+ * @struct BPHY_NETDEV_CPRI_IF_s
+ * @brief Communication interface structure defnition to be used by BPHY
+ * and NETDEV applications for CPRI Interface.
+ *
+ */
+struct cnf10k_bphy_ndev_cpri_intf_cfg {
+ u8 id; /**< CPRI_ID 0..2 */
+ u8 active_lane_mask; /**< Lane Id mask */
+ u8 ul_gp_int_num; /**< UL GP INT NUM */
+ u8 ul_int_threshold; /**< UL INT THRESHOLD */
+ u8 num_ul_buf; /**< Num UL Buffers */
+ u8 num_dl_buf; /**< Num DL Buffers */
+ u64 ul_circ_buf_iova_addr; /**< UL circular buffer base address */
+ u64 dl_circ_buf_iova_addr; /**< DL circular buffer base address */
+ u8 eth_addr[MAX_LANE_PER_CPRI][ETH_ALEN];
+ u64 reserved[4];
+};
+
+/**
+ * @struct BPHY_NETDEV_RFOE_10x_IF_s
+ * @brief New Communication interface structure defnition to be used
+ * by BPHY and NETDEV applications for RFOE Interface.
+ *
+ */
+struct cnf10k_bphy_ndev_rfoe_if {
+ /**< Interface configuration */
+ struct cnf10k_bphy_ndev_comm_if if_cfg[MAX_LMAC_PER_RFOE];
+ /**TX JD cmds to send packets other than PTP;
+ * These are defined per RFoE and all LMAC can share
+ */
+ struct cnf10k_bphy_ndev_tx_psm_cmd_info
+ oth_pkt_info[MAX_OTH_MSG_PER_RFOE];
+ /**Packet types for which the RX flows are configured.*/
+ u8 pkt_type_mask;
+ u64 reserved[4];
+};
+
+/* hardware specific information */
+struct bphy_hw_params {
+ u32 chip_ver; /* (version << 4) | revision */
+ u32 reserved[15]; /* reserved for future extension */
+};
+
+/**
+ * @struct BPHY_NETDEV_COMM_INTF_CFG_s
+ * @brief ODP-NETDEV communication interface defnition structure to
+ * share the RX/TX intrefaces information.
+ *
+ */
+struct cnf10k_rfoe_ndev_comm_intf_cfg {
+ /**< BPHY Hardware parameters */
+ struct bphy_hw_params hw_params;
+ /**< RFOE Interface Configuration */
+ struct cnf10k_bphy_ndev_rfoe_if rfoe_if_cfg[BPHY_MAX_RFOE_MHAB];
+ u64 reserved[4];
+};
+
+/**
+ * @struct BPHY_CPRI_NETDEV_COMM_INTF_CFG_s
+ * @brief Main Communication interface structure definition to be used
+ * by BPHY and NETDEV applications for CPRI Interface.
+ *
+ */
+struct cnf10k_bphy_cpri_netdev_comm_intf_cfg {
+ /**< BPHY Hardware parameters */
+ struct bphy_hw_params hw_params;
+ /**< RFOE Interface Configuration */
+ struct cnf10k_bphy_ndev_cpri_intf_cfg cpri_if_cfg[BPHY_MAX_CPRI_MHAB];
+ u64 reserved[4];
+};
+
+#endif //_CNF10K_BPHY_NETDEV_COMM_IF_H_
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.c b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.c
new file mode 100644
index 000000000000..283ee7f51431
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.c
@@ -0,0 +1,1427 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CNF10K BPHY RFOE Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include "cnf10k_rfoe.h"
+#include "cnf10k_bphy_hw.h"
+
+/* global driver ctx */
+struct cnf10k_rfoe_drv_ctx cnf10k_rfoe_drv_ctx[CNF10K_RFOE_MAX_INTF];
+
+void cnf10k_bphy_intr_handler(struct otx2_bphy_cdev_priv *cdev_priv, u32 status)
+{
+ struct cnf10k_rfoe_drv_ctx *cnf10k_drv_ctx;
+ struct cnf10k_rfoe_ndev_priv *priv;
+ struct net_device *netdev;
+ int rfoe_num, i;
+ u32 intr_mask;
+
+ /* rx intr processing */
+ for (rfoe_num = 0; rfoe_num < cdev_priv->num_rfoe_mhab; rfoe_num++) {
+ intr_mask = CNF10K_RFOE_RX_INTR_MASK(rfoe_num);
+ if (status & intr_mask)
+ cnf10k_rfoe_rx_napi_schedule(rfoe_num, status);
+ }
+
+ /* tx intr processing */
+ for (i = 0; i < CNF10K_RFOE_MAX_INTF; i++) {
+ cnf10k_drv_ctx = &cnf10k_rfoe_drv_ctx[i];
+ if (cnf10k_drv_ctx->valid) {
+ netdev = cnf10k_drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ intr_mask = CNF10K_RFOE_TX_PTP_INTR_MASK(priv->rfoe_num,
+ priv->lmac_id,
+ cdev_priv->num_rfoe_lmac);
+ if ((status & intr_mask) && priv->ptp_tx_skb)
+ schedule_work(&priv->ptp_tx_work);
+ }
+ }
+}
+
+void cnf10k_rfoe_disable_intf(int rfoe_num)
+{
+ struct cnf10k_rfoe_drv_ctx *drv_ctx;
+ struct cnf10k_rfoe_ndev_priv *priv;
+ struct net_device *netdev;
+ int idx;
+
+ for (idx = 0; idx < CNF10K_RFOE_MAX_INTF; idx++) {
+ drv_ctx = &cnf10k_rfoe_drv_ctx[idx];
+ if (drv_ctx->rfoe_num == rfoe_num && drv_ctx->valid) {
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ priv->if_type = IF_TYPE_NONE;
+ }
+ }
+}
+
+void cnf10k_bphy_rfoe_cleanup(void)
+{
+ struct cnf10k_rfoe_drv_ctx *drv_ctx = NULL;
+ struct cnf10k_rfoe_ndev_priv *priv;
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ struct net_device *netdev;
+ int i, idx;
+
+ for (i = 0; i < CNF10K_RFOE_MAX_INTF; i++) {
+ drv_ctx = &cnf10k_rfoe_drv_ctx[i];
+ if (drv_ctx->valid) {
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ cnf10k_rfoe_ptp_destroy(priv);
+ unregister_netdev(netdev);
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ netif_napi_del(&ft_cfg->napi);
+ }
+ --(priv->rfoe_common->refcnt);
+ if (priv->rfoe_common->refcnt == 0)
+ kfree(priv->rfoe_common);
+ free_netdev(netdev);
+ drv_ctx->valid = 0;
+ }
+ }
+}
+
+/* submit pending ptp tx requests */
+static void cnf10k_rfoe_ptp_submit_work(struct work_struct *work)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = container_of(work,
+ struct cnf10k_rfoe_ndev_priv,
+ ptp_queue_work);
+ struct mhbw_jd_dma_cfg_word_0_s *jd_dma_cfg_word_0;
+ struct mhbw_jd_dma_cfg_word_1_s *jd_dma_cfg_word_1;
+ struct mhab_job_desc_cfg *jd_cfg_ptr;
+ struct rfoe_tx_ptp_tstmp_s *tx_tstmp;
+ struct psm_cmd_addjob_s *psm_cmd_lo;
+ struct tx_job_queue_cfg *job_cfg;
+ struct tx_job_entry *job_entry;
+ struct ptp_tstamp_skb *ts_skb;
+ u16 psm_queue_id, queue_space;
+ struct sk_buff *skb = NULL;
+ struct list_head *head;
+ u64 jd_cfg_ptr_iova;
+ unsigned long flags;
+ u64 regval;
+
+ job_cfg = &priv->tx_ptp_job_cfg;
+
+ spin_lock_irqsave(&job_cfg->lock, flags);
+
+ /* check pending ptp requests */
+ if (list_empty(&priv->ptp_skb_list.list)) {
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "no pending ptp tx requests\n");
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ return;
+ }
+
+ /* check psm queue space available */
+ psm_queue_id = job_cfg->psm_queue_id;
+ regval = readq(priv->psm_reg_base + PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space < 1) {
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "ptp tx psm queue %d full\n",
+ psm_queue_id);
+ /* reschedule to check later */
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ schedule_work(&priv->ptp_queue_work);
+ return;
+ }
+
+ if (test_and_set_bit_lock(PTP_TX_IN_PROGRESS, &priv->state)) {
+ netif_dbg(priv, tx_queued, priv->netdev, "ptp tx ongoing\n");
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ return;
+ }
+
+ head = &priv->ptp_skb_list.list;
+ ts_skb = list_entry(head->next, struct ptp_tstamp_skb, list);
+ skb = ts_skb->skb;
+ list_del(&ts_skb->list);
+ kfree(ts_skb);
+ priv->ptp_skb_list.count--;
+
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "submitting ptp tx skb %pS\n", skb);
+
+ priv->last_tx_ptp_jiffies = jiffies;
+
+ /* ptp timestamp entry is 128-bit in size */
+ tx_tstmp = (struct rfoe_tx_ptp_tstmp_s *)
+ ((u8 *)priv->ptp_ring_cfg.ptp_ring_base +
+ (16 * priv->ptp_ring_cfg.ptp_ring_idx));
+ memset(tx_tstmp, 0, sizeof(struct rfoe_tx_ptp_tstmp_s));
+
+ /* get the tx job entry */
+ job_entry = (struct tx_job_entry *)
+ &job_cfg->job_entries[job_cfg->q_idx];
+
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "rfoe=%d lmac=%d psm_queue=%d tx_job_entry %d job_cmd_lo=0x%llx job_cmd_high=0x%llx jd_iova_addr=0x%llx\n",
+ priv->rfoe_num, priv->lmac_id, psm_queue_id, job_cfg->q_idx,
+ job_entry->job_cmd_lo, job_entry->job_cmd_hi,
+ job_entry->jd_iova_addr);
+
+ priv->ptp_tx_skb = skb;
+ psm_cmd_lo = (struct psm_cmd_addjob_s *)&job_entry->job_cmd_lo;
+ priv->ptp_job_tag = psm_cmd_lo->jobtag;
+
+ /* update length and block size in jd dma cfg word */
+ jd_cfg_ptr_iova = *(u64 *)((u8 *)job_entry->jd_ptr + 8);
+ jd_cfg_ptr = otx2_iova_to_virt(priv->iommu_domain, jd_cfg_ptr_iova);
+ jd_cfg_ptr->cfg3.pkt_len = skb->len;
+ jd_dma_cfg_word_0 = (struct mhbw_jd_dma_cfg_word_0_s *)
+ job_entry->rd_dma_ptr;
+ jd_dma_cfg_word_0->block_size = (((skb->len + 15) >> 4) * 4);
+
+ /* copy packet data to rd_dma_ptr start addr */
+ jd_dma_cfg_word_1 = (struct mhbw_jd_dma_cfg_word_1_s *)
+ ((u8 *)job_entry->rd_dma_ptr + 8);
+ memcpy(otx2_iova_to_virt(priv->iommu_domain,
+ jd_dma_cfg_word_1->start_addr),
+ skb->data, skb->len);
+
+ /* make sure that all memory writes are completed */
+ dma_wmb();
+
+ /* submit PSM job */
+ writeq(job_entry->job_cmd_lo,
+ priv->psm_reg_base + PSM_QUEUE_CMD_LO(psm_queue_id));
+ writeq(job_entry->job_cmd_hi,
+ priv->psm_reg_base + PSM_QUEUE_CMD_HI(psm_queue_id));
+
+ /* increment queue index */
+ job_cfg->q_idx++;
+ if (job_cfg->q_idx == job_cfg->num_entries)
+ job_cfg->q_idx = 0;
+
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+}
+
+#define OTX2_RFOE_PTP_TSTMP_POLL_CNT 100
+
+/* ptp interrupt processing bottom half */
+static void cnf10k_rfoe_ptp_tx_work(struct work_struct *work)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = container_of(work,
+ struct cnf10k_rfoe_ndev_priv,
+ ptp_tx_work);
+ struct rfoe_tx_ptp_tstmp_s *tx_tstmp;
+ struct skb_shared_hwtstamps ts;
+ u64 timestamp;
+ u16 jobid;
+
+ if (!priv->ptp_tx_skb) {
+ netif_err(priv, tx_done, priv->netdev,
+ "ptp tx skb not found, something wrong!\n");
+ goto submit_next_req;
+ }
+
+ /* make sure that all memory writes by rfoe are completed */
+ dma_rmb();
+
+ /* ptp timestamp entry is 128-bit in size */
+ tx_tstmp = (struct rfoe_tx_ptp_tstmp_s *)
+ ((u8 *)priv->ptp_ring_cfg.ptp_ring_base +
+ (16 * priv->ptp_ring_cfg.ptp_ring_idx));
+
+ /* match job id */
+ jobid = tx_tstmp->jobid;
+ if (jobid != priv->ptp_job_tag) {
+ netif_err(priv, tx_done, priv->netdev,
+ "ptp job id doesn't match, job_id=0x%x skb->job_tag=0x%x\n",
+ jobid, priv->ptp_job_tag);
+ priv->stats.tx_hwtstamp_failures++;
+ goto submit_next_req;
+ }
+
+ if (tx_tstmp->drop || tx_tstmp->tx_err) {
+ netif_err(priv, tx_done, priv->netdev,
+ "ptp tx timstamp error\n");
+ priv->stats.tx_hwtstamp_failures++;
+ goto submit_next_req;
+ }
+
+ /* update timestamp value in skb */
+ timestamp = tx_tstmp->ptp_timestamp;
+
+ memset(&ts, 0, sizeof(ts));
+ ts.hwtstamp = ns_to_ktime(timestamp);
+ skb_tstamp_tx(priv->ptp_tx_skb, &ts);
+
+submit_next_req:
+ priv->ptp_ring_cfg.ptp_ring_idx++;
+ if (priv->ptp_ring_cfg.ptp_ring_idx >= priv->ptp_ring_cfg.ptp_ring_size)
+ priv->ptp_ring_cfg.ptp_ring_idx = 0;
+ if (priv->ptp_tx_skb)
+ dev_kfree_skb_any(priv->ptp_tx_skb);
+ priv->ptp_tx_skb = NULL;
+ clear_bit_unlock(PTP_TX_IN_PROGRESS, &priv->state);
+ schedule_work(&priv->ptp_queue_work);
+}
+
+/* psm queue timer callback to check queue space */
+static void cnf10k_rfoe_tx_timer_cb(struct timer_list *t)
+{
+ struct cnf10k_rfoe_ndev_priv *priv =
+ container_of(t, struct cnf10k_rfoe_ndev_priv, tx_timer);
+ u16 psm_queue_id, queue_space;
+ int reschedule = 0;
+ u64 regval;
+
+ /* check psm queue space for both ptp and oth packets */
+ if (netif_queue_stopped(priv->netdev)) {
+ psm_queue_id = priv->tx_ptp_job_cfg.psm_queue_id;
+ // check queue space
+ regval = readq(priv->psm_reg_base +
+ PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space > 1) {
+ netif_wake_queue(priv->netdev);
+ reschedule = 0;
+ } else {
+ reschedule = 1;
+ }
+
+ psm_queue_id = priv->rfoe_common->tx_oth_job_cfg.psm_queue_id;
+ // check queue space
+ regval = readq(priv->psm_reg_base +
+ PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space > 1) {
+ netif_wake_queue(priv->netdev);
+ reschedule = 0;
+ } else {
+ reschedule = 1;
+ }
+ }
+
+ if (reschedule)
+ mod_timer(&priv->tx_timer, jiffies + msecs_to_jiffies(100));
+}
+
+static void cnf10k_rfoe_process_rx_pkt(struct cnf10k_rfoe_ndev_priv *priv,
+ struct cnf10k_rx_ft_cfg *ft_cfg,
+ int mbt_buf_idx)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ struct mhbw_jd_dma_cfg_word_0_s *jd_dma_cfg_word_0;
+ u64 tstamp = 0, mbt_state, jdt_iova_addr;
+ struct rfoe_psw_w2_ecpri_s *ecpri_psw_w2;
+ struct rfoe_psw_w2_roe_s *rfoe_psw_w2;
+ struct cnf10k_rfoe_ndev_priv *priv2;
+ struct cnf10k_rfoe_drv_ctx *drv_ctx;
+ int found = 0, idx, len, pkt_type;
+ unsigned int ptp_message_len = 0;
+ struct rfoe_psw_s *psw = NULL;
+ struct net_device *netdev;
+ u8 *buf_ptr, *jdt_ptr;
+ struct sk_buff *skb;
+ u8 lmac_id;
+
+ /* read mbt state */
+ spin_lock(&cdev_priv->mbt_lock);
+ writeq(mbt_buf_idx, (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ mbt_state = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_MBT_SEG_STATE(priv->rfoe_num));
+ spin_unlock(&cdev_priv->mbt_lock);
+
+ if ((mbt_state >> 16 & 0xf) != 0) {
+ pr_err("rx pkt error: mbt_buf_idx=%d, err=%d\n",
+ mbt_buf_idx, (u8)(mbt_state >> 16 & 0xf));
+ return;
+ }
+ if (mbt_state >> 20 & 0x1) {
+ pr_err("rx dma error: mbt_buf_idx=%d\n", mbt_buf_idx);
+ return;
+ }
+
+ buf_ptr = (u8 *)ft_cfg->mbt_virt_addr +
+ (ft_cfg->buf_size * mbt_buf_idx);
+
+ pkt_type = ft_cfg->pkt_type;
+
+ psw = (struct rfoe_psw_s *)buf_ptr;
+ if (psw->mac_err_sts || psw->mcs_err_sts) {
+ net_warn_ratelimited("%s: psw mac_err_sts = 0x%x, mcs_err_sts=0x%x\n",
+ priv->netdev->name,
+ psw->mac_err_sts,
+ psw->mcs_err_sts);
+ return;
+ }
+
+ if (pkt_type != PACKET_TYPE_ECPRI) {
+ /* check that the psw type is correct: */
+ if (unlikely(psw->pkt_type == ECPRI)) {
+ net_warn_ratelimited("%s: pswt is eCPRI for pkt_type = %d\n",
+ priv->netdev->name, pkt_type);
+ return;
+ }
+ jdt_iova_addr = (u64)psw->jd_ptr;
+ rfoe_psw_w2 = (struct rfoe_psw_w2_roe_s *)&psw->proto_sts_word;
+ lmac_id = rfoe_psw_w2->lmac_id;
+ tstamp = psw->ptp_timestamp;
+ } else {
+ /* check that the psw type is correct: */
+ if (unlikely(psw->pkt_type != ECPRI)) {
+ net_warn_ratelimited("%s: pswt is not eCPRI for pkt_type = %d\n",
+ priv->netdev->name, pkt_type);
+ return;
+ }
+ jdt_iova_addr = (u64)psw->jd_ptr;
+ ecpri_psw_w2 = (struct rfoe_psw_w2_ecpri_s *)
+ &psw->proto_sts_word;
+ lmac_id = ecpri_psw_w2->lmac_id;
+ tstamp = psw->ptp_timestamp;
+ }
+
+ netif_dbg(priv, rx_status, priv->netdev,
+ "Rx: rfoe=%d lmac=%d mbt_buf_idx=%d\n",
+ priv->rfoe_num, lmac_id, mbt_buf_idx);
+
+ /* read jd ptr from psw */
+ jdt_ptr = otx2_iova_to_virt(priv->iommu_domain, jdt_iova_addr);
+ jd_dma_cfg_word_0 = (struct mhbw_jd_dma_cfg_word_0_s *)
+ ((u8 *)jdt_ptr + ft_cfg->jd_rd_offset);
+ len = (jd_dma_cfg_word_0->block_size) << 2;
+ netif_dbg(priv, rx_status, priv->netdev, "jd rd_dma len = %d\n", len);
+
+ if (unlikely(netif_msg_pktdata(priv))) {
+ netdev_printk(KERN_DEBUG, priv->netdev, "RX MBUF DATA:");
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
+ buf_ptr, len, true);
+ }
+
+ buf_ptr += (ft_cfg->pkt_offset * 16);
+ len -= (ft_cfg->pkt_offset * 16);
+
+ for (idx = 0; idx < CNF10K_RFOE_MAX_INTF; idx++) {
+ drv_ctx = &cnf10k_rfoe_drv_ctx[idx];
+ if (drv_ctx->valid && drv_ctx->rfoe_num == priv->rfoe_num &&
+ drv_ctx->lmac_id == lmac_id) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ netdev = cnf10k_rfoe_drv_ctx[idx].netdev;
+ priv2 = netdev_priv(netdev);
+ } else {
+ pr_err("netdev not found, something went wrong!\n");
+ return;
+ }
+
+ /* drop the packet if interface is down */
+ if (unlikely(!netif_carrier_ok(netdev))) {
+ netif_err(priv2, rx_err, netdev,
+ "%s {rfoe%d lmac%d} link down, drop pkt\n",
+ netdev->name, priv2->rfoe_num,
+ priv2->lmac_id);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_PTP) {
+ priv2->stats.ptp_rx_dropped++;
+ priv2->last_rx_ptp_dropped_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv2->stats.ecpri_rx_dropped++;
+ priv2->last_rx_dropped_jiffies = jiffies;
+ } else {
+ priv2->stats.rx_dropped++;
+ priv2->last_rx_dropped_jiffies = jiffies;
+ }
+ return;
+ }
+
+ skb = netdev_alloc_skb_ip_align(netdev, len);
+ if (!skb) {
+ netif_err(priv2, rx_err, netdev, "Rx: alloc skb failed\n");
+ return;
+ }
+
+ memcpy(skb->data, buf_ptr, len);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ /* remove trailing padding for ptp packets */
+ if (skb->protocol == htons(ETH_P_1588)) {
+ ptp_message_len = skb->data[2] << 8 | skb->data[3];
+ skb_trim(skb, ptp_message_len);
+ }
+
+ if (priv2->rx_hw_tstamp_en)
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tstamp);
+
+ netif_receive_skb(skb);
+
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_PTP) {
+ priv2->stats.ptp_rx_packets++;
+ priv2->last_rx_ptp_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv2->stats.ecpri_rx_packets++;
+ priv2->last_rx_jiffies = jiffies;
+ } else {
+ priv2->stats.rx_packets++;
+ priv2->last_rx_jiffies = jiffies;
+ }
+ priv2->stats.rx_bytes += skb->len;
+}
+
+static int cnf10k_rfoe_process_rx_flow(struct cnf10k_rfoe_ndev_priv *priv,
+ int pkt_type, int budget)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ int count = 0, processed_pkts = 0;
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ u64 mbt_cfg;
+ u16 nxt_buf;
+ int *mbt_last_idx = &priv->rfoe_common->rx_mbt_last_idx[pkt_type];
+ u16 *prv_nxt_buf = &priv->rfoe_common->nxt_buf[pkt_type];
+
+ ft_cfg = &priv->rx_ft_cfg[pkt_type];
+
+ spin_lock(&cdev_priv->mbt_lock);
+ /* read mbt nxt_buf */
+ writeq(ft_cfg->mbt_idx,
+ priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num));
+ mbt_cfg = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_MBT_CFG(priv->rfoe_num));
+ spin_unlock(&cdev_priv->mbt_lock);
+
+ nxt_buf = (mbt_cfg >> 32) & 0xffff;
+
+ /* no mbt entries to process */
+ if (nxt_buf == *prv_nxt_buf) {
+ netif_dbg(priv, rx_status, priv->netdev,
+ "no rx packets to process, rfoe=%d pkt_type=%d mbt_idx=%d nxt_buf=%d mbt_buf_sw_head=%d\n",
+ priv->rfoe_num, pkt_type, ft_cfg->mbt_idx, nxt_buf,
+ *mbt_last_idx);
+ return 0;
+ }
+
+ *prv_nxt_buf = nxt_buf;
+
+ /* get count of pkts to process, check ring wrap condition */
+ if (*mbt_last_idx > nxt_buf) {
+ count = ft_cfg->num_bufs - *mbt_last_idx;
+ count += nxt_buf;
+ } else {
+ count = nxt_buf - *mbt_last_idx;
+ }
+
+ netif_dbg(priv, rx_status, priv->netdev,
+ "rfoe=%d pkt_type=%d mbt_idx=%d nxt_buf=%d mbt_buf_sw_head=%d count=%d\n",
+ priv->rfoe_num, pkt_type, ft_cfg->mbt_idx, nxt_buf,
+ *mbt_last_idx, count);
+
+ while (likely((processed_pkts < budget) && (processed_pkts < count))) {
+ cnf10k_rfoe_process_rx_pkt(priv, ft_cfg, *mbt_last_idx);
+
+ (*mbt_last_idx)++;
+ if (*mbt_last_idx == ft_cfg->num_bufs)
+ *mbt_last_idx = 0;
+
+ processed_pkts++;
+ }
+
+ return processed_pkts;
+}
+
+/* napi poll routine */
+static int cnf10k_rfoe_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct cnf10k_rfoe_ndev_priv *priv;
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ int workdone = 0, pkt_type;
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ u64 intr_en, regval;
+
+ ft_cfg = container_of(napi, struct cnf10k_rx_ft_cfg, napi);
+ priv = ft_cfg->priv;
+ cdev_priv = priv->cdev_priv;
+ pkt_type = ft_cfg->pkt_type;
+
+ /* pkt processing loop */
+ workdone += cnf10k_rfoe_process_rx_flow(priv, pkt_type, budget);
+
+ if (workdone < budget) {
+ napi_complete_done(napi, workdone);
+
+ /* Re enable the Rx interrupts */
+ intr_en = PKT_TYPE_TO_INTR(pkt_type) <<
+ CNF10K_RFOE_RX_INTR_SHIFT(priv->rfoe_num);
+ spin_lock(&cdev_priv->lock);
+ if (priv->rfoe_num < 6) {
+ regval = readq(bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+ regval |= intr_en;
+ writeq(regval, bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+ } else {
+ regval = readq(bphy_reg_base + PSM_INT_GP_ENA_W1S(2));
+ regval |= intr_en;
+ writeq(regval, bphy_reg_base + PSM_INT_GP_ENA_W1S(2));
+ }
+ spin_unlock(&cdev_priv->lock);
+ }
+
+ return workdone;
+}
+
+/* Rx GPINT napi schedule api */
+void cnf10k_rfoe_rx_napi_schedule(int rfoe_num, u32 status)
+{
+ enum bphy_netdev_packet_type pkt_type;
+ struct cnf10k_rfoe_drv_ctx *drv_ctx;
+ struct cnf10k_rfoe_ndev_priv *priv;
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ int intf, bit_idx;
+ u32 intr_sts;
+ u64 regval;
+
+ for (intf = 0; intf < CNF10K_RFOE_MAX_INTF; intf++) {
+ drv_ctx = &cnf10k_rfoe_drv_ctx[intf];
+ /* ignore lmac, one interrupt/pkt_type/rfoe */
+ if (!(drv_ctx->valid && drv_ctx->rfoe_num == rfoe_num))
+ continue;
+ /* check if i/f down, napi disabled */
+ priv = netdev_priv(drv_ctx->netdev);
+ if (test_bit(RFOE_INTF_DOWN, &priv->state))
+ continue;
+ /* check rx pkt type */
+ intr_sts = ((status >> CNF10K_RFOE_RX_INTR_SHIFT(rfoe_num)) &
+ RFOE_RX_INTR_EN);
+ for (bit_idx = 0; bit_idx < PACKET_TYPE_MAX; bit_idx++) {
+ if (!(intr_sts & BIT(bit_idx)))
+ continue;
+ pkt_type = INTR_TO_PKT_TYPE(bit_idx);
+ if (unlikely(!(priv->pkt_type_mask & (1U << pkt_type))))
+ continue;
+ /* clear intr enable bit, re-enable in napi handler */
+ regval = PKT_TYPE_TO_INTR(pkt_type) <<
+ CNF10K_RFOE_RX_INTR_SHIFT(rfoe_num);
+ if (rfoe_num < 6)
+ writeq(regval, bphy_reg_base + PSM_INT_GP_ENA_W1C(1));
+ else
+ writeq(regval, bphy_reg_base + PSM_INT_GP_ENA_W1C(2));
+ /* schedule napi */
+ ft_cfg = &drv_ctx->ft_cfg[pkt_type];
+ napi_schedule(&ft_cfg->napi);
+ }
+ /* napi scheduled per pkt_type, return */
+ return;
+ }
+}
+
+static void cnf10k_rfoe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct otx2_rfoe_stats *dev_stats = &priv->stats;
+
+ stats->rx_bytes = dev_stats->rx_bytes;
+ stats->rx_packets = dev_stats->rx_packets +
+ dev_stats->ptp_rx_packets +
+ dev_stats->ecpri_rx_packets;
+ stats->rx_dropped = dev_stats->rx_dropped +
+ dev_stats->ptp_rx_dropped +
+ dev_stats->ecpri_rx_dropped;
+
+ stats->tx_bytes = dev_stats->tx_bytes;
+ stats->tx_packets = dev_stats->tx_packets +
+ dev_stats->ptp_tx_packets +
+ dev_stats->ecpri_tx_packets;
+ stats->tx_dropped = dev_stats->tx_dropped +
+ dev_stats->ptp_tx_dropped +
+ dev_stats->ecpri_tx_dropped;
+}
+
+static int cnf10k_rfoe_config_hwtstamp(struct net_device *netdev,
+ struct ifreq *ifr)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ /* ptp hw timestamp is always enabled, mark the sw flags
+ * so that tx ptp requests are submitted to ptp psm queue
+ * and rx timestamp is copied to skb
+ */
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->tx_hw_tstamp_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->tx_hw_tstamp_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->rx_hw_tstamp_en = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ priv->rx_hw_tstamp_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/* netdev ioctl */
+static int cnf10k_rfoe_ioctl(struct net_device *netdev, struct ifreq *req,
+ int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return cnf10k_rfoe_config_hwtstamp(netdev, req);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* netdev xmit */
+static netdev_tx_t cnf10k_rfoe_eth_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct mhbw_jd_dma_cfg_word_0_s *jd_dma_cfg_word_0;
+ struct mhbw_jd_dma_cfg_word_1_s *jd_dma_cfg_word_1;
+ struct mhab_job_desc_cfg *jd_cfg_ptr;
+ struct rfoe_tx_ptp_tstmp_s *tx_tstmp;
+ struct psm_cmd_addjob_s *psm_cmd_lo;
+ struct tx_job_queue_cfg *job_cfg;
+ struct tx_job_entry *job_entry;
+ struct ptp_tstamp_skb *ts_skb;
+ int psm_queue_id, queue_space;
+ u64 jd_cfg_ptr_iova, regval;
+ unsigned long flags;
+ struct ethhdr *eth;
+ int pkt_type = 0;
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ if (!priv->tx_hw_tstamp_en) {
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "skb HW timestamp requested but not enabled, this packet will not be timestamped\n");
+ job_cfg = &priv->rfoe_common->tx_oth_job_cfg;
+ pkt_type = PACKET_TYPE_OTHER;
+ } else {
+ job_cfg = &priv->tx_ptp_job_cfg;
+ pkt_type = PACKET_TYPE_PTP;
+ }
+ } else {
+ job_cfg = &priv->rfoe_common->tx_oth_job_cfg;
+ eth = (struct ethhdr *)skb->data;
+ if (htons(eth->h_proto) == ETH_P_ECPRI)
+ pkt_type = PACKET_TYPE_ECPRI;
+ else
+ pkt_type = PACKET_TYPE_OTHER;
+ }
+
+ spin_lock_irqsave(&job_cfg->lock, flags);
+
+ if (unlikely(priv->if_type != IF_TYPE_ETHERNET)) {
+ netif_err(priv, tx_queued, netdev,
+ "%s {rfoe%d lmac%d} invalid intf mode, drop pkt\n",
+ netdev->name, priv->rfoe_num, priv->lmac_id);
+ /* update stats */
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ goto exit;
+ }
+
+ if (unlikely(!netif_carrier_ok(netdev))) {
+ netif_err(priv, tx_err, netdev,
+ "%s {rfoe%d lmac%d} link down, drop pkt\n",
+ netdev->name, priv->rfoe_num,
+ priv->lmac_id);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv->stats.ecpri_tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_PTP) {
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ } else {
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ }
+
+ goto exit;
+ }
+
+ if (unlikely(!(priv->pkt_type_mask & (1U << pkt_type)))) {
+ netif_err(priv, tx_queued, netdev,
+ "%s {rfoe%d lmac%d} pkt not supported, drop pkt\n",
+ netdev->name, priv->rfoe_num,
+ priv->lmac_id);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv->stats.ecpri_tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_PTP) {
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ } else {
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ }
+
+ goto exit;
+ }
+
+ /* get psm queue number */
+ psm_queue_id = job_cfg->psm_queue_id;
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "psm: queue(%d): cfg=0x%llx ptr=0x%llx space=0x%llx\n",
+ psm_queue_id,
+ readq(priv->psm_reg_base + PSM_QUEUE_CFG(psm_queue_id)),
+ readq(priv->psm_reg_base + PSM_QUEUE_PTR(psm_queue_id)),
+ readq(priv->psm_reg_base + PSM_QUEUE_SPACE(psm_queue_id)));
+
+ /* check psm queue space available */
+ regval = readq(priv->psm_reg_base + PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space < 1 && pkt_type != PACKET_TYPE_PTP) {
+ netif_err(priv, tx_err, netdev,
+ "no space in psm queue %d, dropping pkt\n",
+ psm_queue_id);
+ netif_stop_queue(netdev);
+ dev_kfree_skb_any(skb);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI)
+ priv->stats.ecpri_tx_dropped++;
+ else
+ priv->stats.tx_dropped++;
+
+ priv->last_tx_dropped_jiffies = jiffies;
+
+ mod_timer(&priv->tx_timer, jiffies + msecs_to_jiffies(100));
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ return NETDEV_TX_OK;
+ }
+
+ /* get the tx job entry */
+ job_entry = (struct tx_job_entry *)
+ &job_cfg->job_entries[job_cfg->q_idx];
+
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "rfoe=%d lmac=%d psm_queue=%d tx_job_entry %d job_cmd_lo=0x%llx job_cmd_high=0x%llx jd_iova_addr=0x%llx\n",
+ priv->rfoe_num, priv->lmac_id, psm_queue_id, job_cfg->q_idx,
+ job_entry->job_cmd_lo, job_entry->job_cmd_hi,
+ job_entry->jd_iova_addr);
+
+ /* hw timestamp */
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->tx_hw_tstamp_en) {
+ if (list_empty(&priv->ptp_skb_list.list) &&
+ !test_and_set_bit_lock(PTP_TX_IN_PROGRESS, &priv->state)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->ptp_tx_skb = skb;
+ psm_cmd_lo = (struct psm_cmd_addjob_s *)
+ &job_entry->job_cmd_lo;
+ priv->ptp_job_tag = psm_cmd_lo->jobtag;
+
+ /* ptp timestamp entry is 128-bit in size */
+ tx_tstmp = (struct rfoe_tx_ptp_tstmp_s *)
+ ((u8 *)priv->ptp_ring_cfg.ptp_ring_base +
+ (16 * priv->ptp_ring_cfg.ptp_ring_idx));
+ memset(tx_tstmp, 0, sizeof(struct rfoe_tx_ptp_tstmp_s));
+ } else {
+ /* check ptp queue count */
+ if (priv->ptp_skb_list.count >= max_ptp_req) {
+ netif_err(priv, tx_err, netdev,
+ "ptp list full, dropping pkt\n");
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ goto exit;
+ }
+ /* allocate and add ptp req to queue */
+ ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
+ if (!ts_skb) {
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ goto exit;
+ }
+ ts_skb->skb = skb;
+ list_add_tail(&ts_skb->list, &priv->ptp_skb_list.list);
+ priv->ptp_skb_list.count++;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->stats.ptp_tx_packets++;
+ priv->stats.tx_bytes += skb->len;
+ /* sw timestamp */
+ skb_tx_timestamp(skb);
+ goto exit; /* submit the packet later */
+ }
+ }
+
+ /* sw timestamp */
+ skb_tx_timestamp(skb);
+
+ if (unlikely(netif_msg_pktdata(priv))) {
+ netdev_printk(KERN_DEBUG, priv->netdev, "Tx: skb %pS len=%d\n",
+ skb, skb->len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
+ skb->data, skb->len, true);
+ }
+
+ /* update length and block size in jd dma cfg word */
+ jd_cfg_ptr_iova = *(u64 *)((u8 *)job_entry->jd_ptr + 8);
+ jd_cfg_ptr = otx2_iova_to_virt(priv->iommu_domain, jd_cfg_ptr_iova);
+ jd_cfg_ptr->cfg3.pkt_len = skb->len;
+ jd_dma_cfg_word_0 = (struct mhbw_jd_dma_cfg_word_0_s *)
+ job_entry->rd_dma_ptr;
+ jd_dma_cfg_word_0->block_size = (((skb->len + 15) >> 4) * 4);
+
+ /* update rfoe_mode and lmac id for non-ptp (shared) psm job entry */
+ if (pkt_type != PACKET_TYPE_PTP) {
+ jd_cfg_ptr->cfg3.lmacid = priv->lmac_id & 0x3;
+ if (pkt_type == PACKET_TYPE_ECPRI)
+ jd_cfg_ptr->cfg.rfoe_mode = 1;
+ else
+ jd_cfg_ptr->cfg.rfoe_mode = 0;
+ }
+
+ /* copy packet data to rd_dma_ptr start addr */
+ jd_dma_cfg_word_1 = (struct mhbw_jd_dma_cfg_word_1_s *)
+ ((u8 *)job_entry->rd_dma_ptr + 8);
+ memcpy(otx2_iova_to_virt(priv->iommu_domain,
+ jd_dma_cfg_word_1->start_addr),
+ skb->data, skb->len);
+
+ /* make sure that all memory writes are completed */
+ dma_wmb();
+
+ /* submit PSM job */
+ writeq(job_entry->job_cmd_lo,
+ priv->psm_reg_base + PSM_QUEUE_CMD_LO(psm_queue_id));
+ writeq(job_entry->job_cmd_hi,
+ priv->psm_reg_base + PSM_QUEUE_CMD_HI(psm_queue_id));
+
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv->stats.ecpri_tx_packets++;
+ priv->last_tx_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_PTP) {
+ priv->stats.ptp_tx_packets++;
+ priv->last_tx_ptp_jiffies = jiffies;
+ } else {
+ priv->stats.tx_packets++;
+ priv->last_tx_jiffies = jiffies;
+ }
+ priv->stats.tx_bytes += skb->len;
+
+ /* increment queue index */
+ job_cfg->q_idx++;
+ if (job_cfg->q_idx == job_cfg->num_entries)
+ job_cfg->q_idx = 0;
+exit:
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+ dev_kfree_skb_any(skb);
+
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/* netdev open */
+static int cnf10k_rfoe_eth_open(struct net_device *netdev)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ int idx;
+
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ napi_enable(&priv->rx_ft_cfg[idx].napi);
+ }
+
+ priv->ptp_tx_skb = NULL;
+
+ netif_carrier_on(netdev);
+ netif_start_queue(netdev);
+
+ clear_bit(RFOE_INTF_DOWN, &priv->state);
+ priv->link_state = 1;
+
+ return 0;
+}
+
+/* netdev close */
+static int cnf10k_rfoe_eth_stop(struct net_device *netdev)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct ptp_tstamp_skb *ts_skb, *ts_skb2;
+ int idx;
+
+ set_bit(RFOE_INTF_DOWN, &priv->state);
+
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+ priv->link_state = 0;
+
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ napi_disable(&priv->rx_ft_cfg[idx].napi);
+ }
+
+ del_timer_sync(&priv->tx_timer);
+
+ /* cancel any pending ptp work item in progress */
+ cancel_work_sync(&priv->ptp_tx_work);
+ if (priv->ptp_tx_skb) {
+ dev_kfree_skb_any(priv->ptp_tx_skb);
+ priv->ptp_tx_skb = NULL;
+ clear_bit_unlock(PTP_TX_IN_PROGRESS, &priv->state);
+ }
+
+ /* clear ptp skb list */
+ cancel_work_sync(&priv->ptp_queue_work);
+ list_for_each_entry_safe(ts_skb, ts_skb2,
+ &priv->ptp_skb_list.list, list) {
+ list_del(&ts_skb->list);
+ kfree(ts_skb);
+ }
+ priv->ptp_skb_list.count = 0;
+
+ return 0;
+}
+
+static int cnf10k_rfoe_init(struct net_device *netdev)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ /* Enable VLAN TPID match */
+ writeq(0x18100, (priv->rfoe_reg_base +
+ RFOEX_RX_VLANX_CFG(priv->rfoe_num, 0)));
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ return 0;
+}
+
+static int cnf10k_rfoe_vlan_rx_configure(struct net_device *netdev, u16 vid,
+ bool forward)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ struct rfoe_rx_ind_vlanx_fwd fwd;
+ unsigned long flags;
+ u64 mask, index;
+
+ if (vid >= VLAN_N_VID) {
+ netdev_err(netdev, "Invalid VLAN ID %d\n", vid);
+ return -EINVAL;
+ }
+
+ mask = (0x1ll << (vid & 0x3F));
+ index = (vid >> 6) & 0x3F;
+
+ spin_lock_irqsave(&cdev_priv->mbt_lock, flags);
+
+ if (forward && priv->rfoe_common->rx_vlan_fwd_refcnt[vid]++)
+ goto out;
+
+ if (!forward && --priv->rfoe_common->rx_vlan_fwd_refcnt[vid])
+ goto out;
+
+ /* read current fwd mask */
+ writeq(index, (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ fwd.fwd = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_VLANX_FWD(priv->rfoe_num, 0));
+
+ if (forward)
+ fwd.fwd |= mask;
+ else
+ fwd.fwd &= ~mask;
+
+ /* write the new fwd mask */
+ writeq(index, (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ writeq(fwd.fwd, (priv->rfoe_reg_base +
+ RFOEX_RX_IND_VLANX_FWD(priv->rfoe_num, 0)));
+
+out:
+ spin_unlock_irqrestore(&cdev_priv->mbt_lock, flags);
+
+ return 0;
+}
+
+static int cnf10k_rfoe_vlan_rx_add(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ return cnf10k_rfoe_vlan_rx_configure(netdev, vid, true);
+}
+
+static int cnf10k_rfoe_vlan_rx_kill(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ return cnf10k_rfoe_vlan_rx_configure(netdev, vid, false);
+}
+
+static const struct net_device_ops cnf10k_rfoe_netdev_ops = {
+ .ndo_init = cnf10k_rfoe_init,
+ .ndo_open = cnf10k_rfoe_eth_open,
+ .ndo_stop = cnf10k_rfoe_eth_stop,
+ .ndo_start_xmit = cnf10k_rfoe_eth_start_xmit,
+ .ndo_do_ioctl = cnf10k_rfoe_ioctl,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = cnf10k_rfoe_get_stats64,
+ .ndo_vlan_rx_add_vid = cnf10k_rfoe_vlan_rx_add,
+ .ndo_vlan_rx_kill_vid = cnf10k_rfoe_vlan_rx_kill,
+};
+
+static void cnf10k_rfoe_dump_rx_ft_cfg(struct cnf10k_rfoe_ndev_priv *priv)
+{
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ int idx;
+
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ pr_debug("rfoe=%d lmac=%d pkttype=%d flowid=%d mbt: idx=%d size=%d nbufs=%d iova=0x%llx jdt: idx=%d size=%d num_jd=%d iova=0x%llx\n",
+ priv->rfoe_num, priv->lmac_id, ft_cfg->pkt_type,
+ ft_cfg->flow_id, ft_cfg->mbt_idx, ft_cfg->buf_size,
+ ft_cfg->num_bufs, ft_cfg->mbt_iova_addr,
+ ft_cfg->jdt_idx, ft_cfg->jd_size, ft_cfg->num_jd,
+ ft_cfg->jdt_iova_addr);
+ }
+}
+
+static void cnf10k_rfoe_fill_rx_ft_cfg(struct cnf10k_rfoe_ndev_priv *priv,
+ struct cnf10k_bphy_ndev_comm_if *if_cfg)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ struct cnf10k_bphy_ndev_rbuf_info *rbuf_info;
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ u64 jdt_cfg0, iova;
+ int idx;
+
+ /* RX flow table configuration */
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ rbuf_info = &if_cfg->rbuf_info[idx];
+ ft_cfg->pkt_type = rbuf_info->pkt_type;
+ ft_cfg->gp_int_num = rbuf_info->gp_int_num;
+ ft_cfg->flow_id = rbuf_info->flow_id;
+ ft_cfg->mbt_idx = rbuf_info->mbt_index;
+ ft_cfg->buf_size = rbuf_info->buf_size * 16;
+ ft_cfg->num_bufs = rbuf_info->num_bufs;
+ ft_cfg->mbt_iova_addr = rbuf_info->mbt_iova_addr;
+ iova = ft_cfg->mbt_iova_addr;
+ ft_cfg->mbt_virt_addr = otx2_iova_to_virt(priv->iommu_domain,
+ iova);
+ ft_cfg->jdt_idx = rbuf_info->jdt_index;
+ ft_cfg->jd_size = rbuf_info->jd_size * 8;
+ ft_cfg->num_jd = rbuf_info->num_jd;
+ ft_cfg->jdt_iova_addr = rbuf_info->jdt_iova_addr;
+ iova = ft_cfg->jdt_iova_addr;
+ ft_cfg->jdt_virt_addr = otx2_iova_to_virt(priv->iommu_domain,
+ iova);
+ spin_lock(&cdev_priv->mbt_lock);
+ writeq(ft_cfg->jdt_idx,
+ (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ jdt_cfg0 = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_JDT_CFG0(priv->rfoe_num));
+ spin_unlock(&cdev_priv->mbt_lock);
+ ft_cfg->jd_rd_offset = ((jdt_cfg0 >> 27) & 0x3f) * 8;
+ ft_cfg->pkt_offset = (u8)((jdt_cfg0 >> 52) & 0x1f);
+ ft_cfg->priv = priv;
+ netif_napi_add(priv->netdev, &ft_cfg->napi,
+ cnf10k_rfoe_napi_poll,
+ NAPI_POLL_WEIGHT);
+ }
+}
+
+static void cnf10k_rfoe_fill_tx_job_entries(struct cnf10k_rfoe_ndev_priv *priv,
+ struct tx_job_queue_cfg *job_cfg,
+ struct cnf10k_bphy_ndev_tx_psm_cmd_info *tx_job,
+ int num_entries)
+{
+ struct tx_job_entry *job_entry;
+ u64 jd_cfg_iova, iova;
+ int i;
+
+ for (i = 0; i < num_entries; i++) {
+ job_entry = &job_cfg->job_entries[i];
+ job_entry->job_cmd_lo = tx_job->low_cmd;
+ job_entry->job_cmd_hi = tx_job->high_cmd;
+ job_entry->jd_iova_addr = tx_job->jd_iova_addr;
+ iova = job_entry->jd_iova_addr;
+ job_entry->jd_ptr = otx2_iova_to_virt(priv->iommu_domain, iova);
+ jd_cfg_iova = *(u64 *)((u8 *)job_entry->jd_ptr + 8);
+ job_entry->jd_cfg_ptr = otx2_iova_to_virt(priv->iommu_domain,
+ jd_cfg_iova);
+ job_entry->rd_dma_iova_addr = tx_job->rd_dma_iova_addr;
+ iova = job_entry->rd_dma_iova_addr;
+ job_entry->rd_dma_ptr = otx2_iova_to_virt(priv->iommu_domain,
+ iova);
+ pr_debug("job_cmd_lo=0x%llx job_cmd_hi=0x%llx jd_iova_addr=0x%llx rd_dma_iova_addr=%llx\n",
+ tx_job->low_cmd, tx_job->high_cmd,
+ tx_job->jd_iova_addr, tx_job->rd_dma_iova_addr);
+ tx_job++;
+ }
+ /* get psm queue id */
+ job_entry = &job_cfg->job_entries[0];
+ job_cfg->psm_queue_id = (job_entry->job_cmd_lo >> 8) & 0xff;
+ job_cfg->q_idx = 0;
+ job_cfg->num_entries = num_entries;
+ spin_lock_init(&job_cfg->lock);
+}
+
+int cnf10k_rfoe_parse_and_init_intf(struct otx2_bphy_cdev_priv *cdev,
+ struct cnf10k_rfoe_ndev_comm_intf_cfg *cfg)
+{
+ int i, intf_idx = 0, num_entries, lmac, idx, ret;
+ struct cnf10k_bphy_ndev_tx_psm_cmd_info *tx_info;
+ struct cnf10k_bphy_ndev_tx_ptp_ring_info *info;
+ struct cnf10k_rfoe_drv_ctx *drv_ctx = NULL;
+ struct cnf10k_rfoe_ndev_priv *priv, *priv2;
+ struct cnf10k_bphy_ndev_rfoe_if *rfoe_cfg;
+ struct cnf10k_bphy_ndev_comm_if *if_cfg;
+ struct tx_ptp_ring_cfg *ptp_ring_cfg;
+ struct tx_job_queue_cfg *tx_cfg;
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ struct net_device *netdev;
+ u8 pkt_type_mask;
+
+ cdev->hw_version = cfg->hw_params.chip_ver;
+ dev_dbg(cdev->dev, "hw_version = 0x%x\n", cfg->hw_params.chip_ver);
+
+ if (CHIP_CNF10KB(cdev->hw_version)) {
+ cdev->num_rfoe_mhab = 7;
+ cdev->num_rfoe_lmac = 2;
+ cdev->tot_rfoe_intf = 14;
+ } else if (CHIP_CNF10KA(cdev->hw_version)) {
+ cdev->num_rfoe_mhab = 2;
+ cdev->num_rfoe_lmac = 4;
+ cdev->tot_rfoe_intf = 8;
+ } else {
+ dev_err(cdev->dev, "unsupported chip version\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < BPHY_MAX_RFOE_MHAB; i++) {
+ priv2 = NULL;
+ rfoe_cfg = &cfg->rfoe_if_cfg[i];
+ pkt_type_mask = rfoe_cfg->pkt_type_mask;
+ for (lmac = 0; lmac < MAX_LMAC_PER_RFOE; lmac++) {
+ if_cfg = &rfoe_cfg->if_cfg[lmac];
+ /* check if lmac is valid */
+ if (!if_cfg->lmac_info.is_valid) {
+ dev_dbg(cdev->dev,
+ "rfoe%d lmac%d invalid, skipping\n",
+ i, lmac);
+ continue;
+ }
+ if (lmac >= cdev->num_rfoe_lmac) {
+ dev_dbg(cdev->dev,
+ "rfoe%d, lmac%d not supported, skipping\n",
+ i, lmac);
+ continue;
+ }
+ netdev = alloc_etherdev(sizeof(*priv));
+ if (!netdev) {
+ dev_err(cdev->dev,
+ "error allocating net device\n");
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+ priv = netdev_priv(netdev);
+ memset(priv, 0, sizeof(*priv));
+ if (!priv2) {
+ priv->rfoe_common =
+ kzalloc(sizeof(struct rfoe_common_cfg),
+ GFP_KERNEL);
+ if (!priv->rfoe_common) {
+ dev_err(cdev->dev, "kzalloc failed\n");
+ free_netdev(netdev);
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+ priv->rfoe_common->refcnt = 1;
+ }
+ spin_lock_init(&priv->lock);
+ priv->netdev = netdev;
+ priv->cdev_priv = cdev;
+ priv->msg_enable = netif_msg_init(-1, 0);
+ spin_lock_init(&priv->stats.lock);
+ priv->rfoe_num = if_cfg->lmac_info.rfoe_num;
+ priv->lmac_id = if_cfg->lmac_info.lane_num;
+ priv->if_type = IF_TYPE_ETHERNET;
+ memcpy(priv->mac_addr, if_cfg->lmac_info.eth_addr,
+ ETH_ALEN);
+ if (is_valid_ether_addr(priv->mac_addr))
+ ether_addr_copy(netdev->dev_addr,
+ priv->mac_addr);
+ else
+ random_ether_addr(netdev->dev_addr);
+ priv->pdev = pci_get_device(OTX2_BPHY_PCI_VENDOR_ID,
+ OTX2_BPHY_PCI_DEVICE_ID,
+ NULL);
+ priv->iommu_domain =
+ iommu_get_domain_for_dev(&priv->pdev->dev);
+ priv->bphy_reg_base = bphy_reg_base;
+ priv->psm_reg_base = psm_reg_base;
+ priv->rfoe_reg_base = rfoe_reg_base;
+ priv->bcn_reg_base = bcn_reg_base;
+ priv->ptp_reg_base = ptp_reg_base;
+
+ /* Initialise PTP TX work queue */
+ INIT_WORK(&priv->ptp_tx_work, cnf10k_rfoe_ptp_tx_work);
+ INIT_WORK(&priv->ptp_queue_work,
+ cnf10k_rfoe_ptp_submit_work);
+
+ /* Initialise PTP skb list */
+ INIT_LIST_HEAD(&priv->ptp_skb_list.list);
+ priv->ptp_skb_list.count = 0;
+ timer_setup(&priv->tx_timer,
+ cnf10k_rfoe_tx_timer_cb, 0);
+
+ priv->pkt_type_mask = pkt_type_mask;
+ cnf10k_rfoe_fill_rx_ft_cfg(priv, if_cfg);
+ cnf10k_rfoe_dump_rx_ft_cfg(priv);
+
+ /* TX PTP job configuration */
+ if (priv->pkt_type_mask & (1U << PACKET_TYPE_PTP)) {
+ tx_cfg = &priv->tx_ptp_job_cfg;
+ tx_info = &if_cfg->ptp_pkt_info[0];
+ num_entries = MAX_PTP_MSG_PER_LMAC;
+ cnf10k_rfoe_fill_tx_job_entries(priv, tx_cfg,
+ tx_info,
+ num_entries);
+ /* fill ptp ring info */
+ ptp_ring_cfg = &priv->ptp_ring_cfg;
+ info = &if_cfg->ptp_ts_ring_info[0];
+ ptp_ring_cfg->ptp_ring_base =
+ otx2_iova_to_virt(priv->iommu_domain,
+ info->ring_iova_addr);
+ ptp_ring_cfg->ptp_ring_id = info->ring_idx;
+ ptp_ring_cfg->ptp_ring_size = info->ring_size;
+ ptp_ring_cfg->ptp_ring_idx = 0;
+ }
+
+ /* TX ECPRI/OTH(PTP) job configuration */
+ if (!priv2 &&
+ ((priv->pkt_type_mask &
+ (1U << PACKET_TYPE_OTHER)) ||
+ (priv->pkt_type_mask &
+ (1U << PACKET_TYPE_ECPRI)))) {
+ num_entries = cdev->num_rfoe_lmac *
+ MAX_OTH_MSG_PER_LMAC;
+ tx_cfg = &priv->rfoe_common->tx_oth_job_cfg;
+ tx_info = &rfoe_cfg->oth_pkt_info[0];
+ cnf10k_rfoe_fill_tx_job_entries(priv, tx_cfg,
+ tx_info,
+ num_entries);
+ } else {
+ /* share rfoe_common data */
+ priv->rfoe_common = priv2->rfoe_common;
+ ++(priv->rfoe_common->refcnt);
+ }
+
+ /* keep last (rfoe + lmac) priv structure */
+ if (!priv2)
+ priv2 = priv;
+
+ intf_idx = (i * cdev->num_rfoe_lmac) + lmac;
+ snprintf(netdev->name, sizeof(netdev->name),
+ "rfoe%d", intf_idx);
+ netdev->netdev_ops = &cnf10k_rfoe_netdev_ops;
+ cnf10k_rfoe_set_ethtool_ops(netdev);
+ cnf10k_rfoe_ptp_init(priv);
+ netdev->watchdog_timeo = (15 * HZ);
+ netdev->mtu = 1500U;
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = 1500U;
+ ret = register_netdev(netdev);
+ if (ret < 0) {
+ dev_err(cdev->dev,
+ "failed to register net device %s\n",
+ netdev->name);
+ free_netdev(netdev);
+ ret = -ENODEV;
+ goto err_exit;
+ }
+ dev_dbg(cdev->dev, "net device %s registered\n",
+ netdev->name);
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ set_bit(RFOE_INTF_DOWN, &priv->state);
+ priv->link_state = 0;
+
+ /* initialize global ctx */
+ drv_ctx = &cnf10k_rfoe_drv_ctx[intf_idx];
+ drv_ctx->rfoe_num = priv->rfoe_num;
+ drv_ctx->lmac_id = priv->lmac_id;
+ drv_ctx->valid = 1;
+ drv_ctx->netdev = netdev;
+ drv_ctx->ft_cfg = &priv->rx_ft_cfg[0];
+ }
+ }
+
+ return 0;
+
+err_exit:
+ for (i = 0; i < CNF10K_RFOE_MAX_INTF; i++) {
+ drv_ctx = &cnf10k_rfoe_drv_ctx[i];
+ if (drv_ctx->valid) {
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ cnf10k_rfoe_ptp_destroy(priv);
+ unregister_netdev(netdev);
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ netif_napi_del(&ft_cfg->napi);
+ }
+ --(priv->rfoe_common->refcnt);
+ if (priv->rfoe_common->refcnt == 0)
+ kfree(priv->rfoe_common);
+ free_netdev(netdev);
+ drv_ctx->valid = 0;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.h b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.h
new file mode 100644
index 000000000000..215056a1c7ca
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CNF10K BPHY RFOE Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef _CNF10K_RFOE_H_
+#define _CNF10K_RFOE_H_
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/if_vlan.h>
+
+#include "rfoe_common.h"
+#include "otx2_bphy.h"
+
+#define DEBUG
+
+#define CNF10K_RFOE_RX_INTR_SHIFT(a) ({ \
+ typeof(a) _a = (a); \
+ ((_a) < 6) ? (32 - ((_a) + 1) * 3) : (((_a) - 6) * 3); \
+})
+#define CNF10K_RFOE_RX_INTR_MASK(a) (RFOE_RX_INTR_EN << \
+ CNF10K_RFOE_RX_INTR_SHIFT(a))
+#define CNF10K_RFOE_TX_PTP_INTR_MASK(a, b, n) (1UL << ((a) * (n) + (b)))
+
+#define CNF10K_RFOE_MAX_INTF 14
+
+/* global driver context */
+struct cnf10k_rfoe_drv_ctx {
+ u8 rfoe_num;
+ u8 lmac_id;
+ int valid;
+ struct net_device *netdev;
+ struct cnf10k_rx_ft_cfg *ft_cfg;
+ int tx_gpint_bit;
+};
+
+extern struct cnf10k_rfoe_drv_ctx cnf10k_rfoe_drv_ctx[CNF10K_RFOE_MAX_INTF];
+
+/* rx flow table configuration */
+struct cnf10k_rx_ft_cfg {
+ enum bphy_netdev_packet_type pkt_type; /* pkt_type for psw */
+ enum bphy_netdev_rx_gpint gp_int_num;
+ u16 flow_id; /* flow id */
+ u16 mbt_idx; /* mbt index */
+ u16 buf_size; /* mbt buf size */
+ u16 num_bufs; /* mbt num bufs */
+ u64 mbt_iova_addr;
+ void __iomem *mbt_virt_addr;
+ u16 jdt_idx; /* jdt index */
+ u8 jd_size; /* jd size */
+ u16 num_jd; /* num jd's */
+ u64 jdt_iova_addr;
+ void __iomem *jdt_virt_addr;
+ u8 jd_rd_offset; /* jd rd offset */
+ u8 pkt_offset;
+ struct napi_struct napi;
+ struct cnf10k_rfoe_ndev_priv *priv;
+};
+
+struct tx_ptp_ring_cfg {
+ u8 ptp_ring_id;
+ void __iomem *ptp_ring_base;
+ u8 ptp_ring_size;
+ u8 ptp_ring_idx;
+};
+
+/* netdev priv */
+struct cnf10k_rfoe_ndev_priv {
+ u8 rfoe_num;
+ u8 lmac_id;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ u32 msg_enable;
+ void __iomem *bphy_reg_base;
+ void __iomem *psm_reg_base;
+ void __iomem *rfoe_reg_base;
+ void __iomem *bcn_reg_base;
+ void __iomem *ptp_reg_base;
+ struct iommu_domain *iommu_domain;
+ struct cnf10k_rx_ft_cfg rx_ft_cfg[PACKET_TYPE_MAX];
+ struct tx_job_queue_cfg tx_ptp_job_cfg;
+ struct tx_ptp_ring_cfg ptp_ring_cfg;
+ struct rfoe_common_cfg *rfoe_common;
+ u8 pkt_type_mask;
+ /* priv lock */
+ spinlock_t lock;
+ int rx_hw_tstamp_en;
+ int tx_hw_tstamp_en;
+ struct sk_buff *ptp_tx_skb;
+ u16 ptp_job_tag;
+ struct timer_list tx_timer;
+ unsigned long state;
+ struct work_struct ptp_tx_work;
+ struct work_struct ptp_queue_work;
+ struct ptp_tx_skb_list ptp_skb_list;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct otx2_rfoe_stats stats;
+ u8 mac_addr[ETH_ALEN];
+ s32 sec_bcn_offset;
+ int if_type;
+ u8 link_state;
+ unsigned long last_tx_jiffies;
+ unsigned long last_tx_ptp_jiffies;
+ unsigned long last_rx_jiffies;
+ unsigned long last_rx_ptp_jiffies;
+ unsigned long last_tx_dropped_jiffies;
+ unsigned long last_tx_ptp_dropped_jiffies;
+ unsigned long last_rx_dropped_jiffies;
+ unsigned long last_rx_ptp_dropped_jiffies;
+};
+
+void cnf10k_rfoe_rx_napi_schedule(int rfoe_num, u32 status);
+
+int cnf10k_rfoe_parse_and_init_intf(struct otx2_bphy_cdev_priv *cdev,
+ struct cnf10k_rfoe_ndev_comm_intf_cfg *cfg);
+
+void cnf10k_bphy_rfoe_cleanup(void);
+
+void cnf10k_rfoe_disable_intf(int rfoe_num);
+
+/* ethtool */
+void cnf10k_rfoe_set_ethtool_ops(struct net_device *netdev);
+
+/* ptp */
+int cnf10k_rfoe_ptp_init(struct cnf10k_rfoe_ndev_priv *priv);
+void cnf10k_rfoe_ptp_destroy(struct cnf10k_rfoe_ndev_priv *priv);
+
+void cnf10k_bphy_intr_handler(struct otx2_bphy_cdev_priv *cdev_priv,
+ u32 status);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ethtool.c
new file mode 100644
index 000000000000..5d7bbd9fc82f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ethtool.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CNF10K BPHY RFOE Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include "cnf10k_rfoe.h"
+#include "cnf10k_bphy_hw.h"
+
+static const char ethtool_stat_strings[][ETH_GSTRING_LEN] = {
+ "oth_rx_packets",
+ "ptp_rx_packets",
+ "ecpri_rx_packets",
+ "rx_bytes",
+ "oth_rx_dropped",
+ "ptp_rx_dropped",
+ "ecpri_rx_dropped",
+ "oth_tx_packets",
+ "ptp_tx_packets",
+ "ecpri_tx_packets",
+ "tx_bytes",
+ "oth_tx_dropped",
+ "ptp_tx_dropped",
+ "ecpri_tx_dropped",
+ "ptp_tx_hwtstamp_failures",
+ "EthIfInFrames",
+ "EthIfInOctets",
+ "EthIfOutFrames",
+ "EthIfOutOctets",
+ "EthIfInUnknownVlan",
+};
+
+static void cnf10k_rfoe_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ memcpy(data, *ethtool_stat_strings,
+ sizeof(ethtool_stat_strings));
+ break;
+ }
+}
+
+static int cnf10k_rfoe_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(ethtool_stat_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void cnf10k_rfoe_update_lmac_stats(struct cnf10k_rfoe_ndev_priv *priv)
+{
+ struct otx2_rfoe_stats *stats = &priv->stats;
+
+ stats->EthIfInFrames = readq(priv->rfoe_reg_base +
+ RFOEX_RX_RPM_PKT_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfInOctets = readq(priv->rfoe_reg_base +
+ RFOEX_RX_RPM_OCTS_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfOutFrames = readq(priv->rfoe_reg_base +
+ RFOEX_TX_PKT_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfOutOctets = readq(priv->rfoe_reg_base +
+ RFOEX_TX_OCTS_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfInUnknownVlan =
+ readq(priv->rfoe_reg_base +
+ RFOEX_RX_VLAN_DROP_STAT(priv->rfoe_num,
+ priv->lmac_id));
+}
+
+static void cnf10k_rfoe_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ cnf10k_rfoe_update_lmac_stats(priv);
+ spin_lock(&priv->stats.lock);
+ memcpy(data, &priv->stats,
+ ARRAY_SIZE(ethtool_stat_strings) * sizeof(u64));
+ spin_unlock(&priv->stats.lock);
+}
+
+static void cnf10k_rfoe_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *p)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ snprintf(p->driver, sizeof(p->driver), "cnf10k_rfoe {rfoe%d lmac%d}",
+ priv->rfoe_num, priv->lmac_id);
+ strlcpy(p->bus_info, "platform", sizeof(p->bus_info));
+}
+
+static int cnf10k_rfoe_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = ptp_clock_index(priv->ptp_clock);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static u32 cnf10k_rfoe_get_msglevel(struct net_device *netdev)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ return priv->msg_enable;
+}
+
+static void cnf10k_rfoe_set_msglevel(struct net_device *netdev, u32 level)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ priv->msg_enable = level;
+}
+
+static const struct ethtool_ops cnf10k_rfoe_ethtool_ops = {
+ .get_drvinfo = cnf10k_rfoe_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = cnf10k_rfoe_get_ts_info,
+ .get_strings = cnf10k_rfoe_get_strings,
+ .get_sset_count = cnf10k_rfoe_get_sset_count,
+ .get_ethtool_stats = cnf10k_rfoe_get_ethtool_stats,
+ .get_msglevel = cnf10k_rfoe_get_msglevel,
+ .set_msglevel = cnf10k_rfoe_set_msglevel,
+};
+
+void cnf10k_rfoe_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &cnf10k_rfoe_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ptp.c b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ptp.c
new file mode 100644
index 000000000000..4ea2fc29ee71
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/cnf10k_rfoe_ptp.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CNF10K BPHY RFOE Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include "cnf10k_rfoe.h"
+
+static int cnf10k_rfoe_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ return -EOPNOTSUPP;
+}
+
+static int cnf10k_rfoe_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ return -EOPNOTSUPP;
+}
+
+static int cnf10k_rfoe_ptp_gettime(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts)
+{
+ struct cnf10k_rfoe_ndev_priv *priv = container_of(ptp_info,
+ struct cnf10k_rfoe_ndev_priv,
+ ptp_clock_info);
+ u64 nsec;
+
+ nsec = readq(priv->ptp_reg_base + MIO_PTP_CLOCK_HI);
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+static int cnf10k_rfoe_ptp_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
+static int cnf10k_rfoe_ptp_enable(struct ptp_clock_info *ptp_info,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static const struct ptp_clock_info cnf10k_rfoe_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .max_adj = 1000000000ull,
+ .n_ext_ts = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfine = cnf10k_rfoe_ptp_adjfine,
+ .adjtime = cnf10k_rfoe_ptp_adjtime,
+ .gettime64 = cnf10k_rfoe_ptp_gettime,
+ .settime64 = cnf10k_rfoe_ptp_settime,
+ .enable = cnf10k_rfoe_ptp_enable,
+};
+
+int cnf10k_rfoe_ptp_init(struct cnf10k_rfoe_ndev_priv *priv)
+{
+ int err;
+
+ priv->ptp_clock_info = cnf10k_rfoe_ptp_clock_info;
+ snprintf(priv->ptp_clock_info.name, 16, "%s", priv->netdev->name);
+ priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_info,
+ &priv->pdev->dev);
+ if (IS_ERR_OR_NULL(priv->ptp_clock)) {
+ priv->ptp_clock = NULL;
+ err = PTR_ERR(priv->ptp_clock);
+ return err;
+ }
+
+ return 0;
+}
+
+void cnf10k_rfoe_ptp_destroy(struct cnf10k_rfoe_ndev_priv *priv)
+{
+ ptp_clock_unregister(priv->ptp_clock);
+ priv->ptp_clock = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy.h b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy.h
new file mode 100644
index 000000000000..5cb8a89eef0b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _OTX2_BPHY_H_
+#define _OTX2_BPHY_H_
+
+#include <linux/device.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+
+#include "bphy_common.h"
+#include "rfoe_bphy_netdev_comm_if.h"
+#include "cnf10k_bphy_netdev_comm_if.h"
+
+#define DEVICE_NAME "otx2_rfoe"
+#define DRV_NAME "octeontx2-bphy-netdev"
+#define DRV_STRING "Marvell OcteonTX2 BPHY Ethernet Driver"
+
+/* char device ioctl numbers */
+#define OTX2_RFOE_IOCTL_BASE 0xCC /* Temporary */
+#define OTX2_RFOE_IOCTL_ODP_INTF_CFG _IOW(OTX2_RFOE_IOCTL_BASE, 0x01, \
+ struct bphy_netdev_comm_intf_cfg)
+#define OTX2_RFOE_IOCTL_ODP_DEINIT _IO(OTX2_RFOE_IOCTL_BASE, 0x02)
+#define OTX2_RFOE_IOCTL_RX_IND_CFG _IOWR(OTX2_RFOE_IOCTL_BASE, 0x03, \
+ struct otx2_rfoe_rx_ind_cfg)
+#define OTX2_RFOE_IOCTL_PTP_OFFSET _IOW(OTX2_RFOE_IOCTL_BASE, 0x04, \
+ struct ptp_clk_cfg)
+#define OTX2_RFOE_IOCTL_SEC_BCN_OFFSET _IOW(OTX2_RFOE_IOCTL_BASE, 0x05, \
+ struct bcn_sec_offset_cfg)
+#define OTX2_RFOE_IOCTL_MODE_CPRI _IOW(OTX2_RFOE_IOCTL_BASE, 0x06, \
+ int)
+#define OTX2_RFOE_IOCTL_LINK_EVENT _IOW(OTX2_RFOE_IOCTL_BASE, 0x07, \
+ struct otx2_rfoe_link_event)
+#define OTX2_CPRI_IOCTL_LINK_EVENT _IOW(OTX2_RFOE_IOCTL_BASE, 0x08, \
+ struct otx2_cpri_link_event)
+#define OTX2_IOCTL_RFOE_10x_CFG _IOW(OTX2_RFOE_IOCTL_BASE, 0x0A, \
+ uint64_t)
+#define OTX2_IOCTL_CPRI_INTF_CFG _IOW(OTX2_RFOE_IOCTL_BASE, 0x0B, \
+ uint64_t)
+
+//#define ASIM /* ASIM environment */
+
+#define OTX2_BPHY_MHAB_INST 3
+
+/* char driver private data */
+struct otx2_bphy_cdev_priv {
+ struct device *dev;
+ struct cdev cdev;
+ dev_t devt;
+ int is_open;
+ int odp_intf_cfg;
+ int irq;
+ struct mutex mutex_lock; /* mutex */
+ spinlock_t lock; /* irq lock */
+ spinlock_t mbt_lock; /* mbt ind lock */
+ u8 mhab_mode[BPHY_MAX_RFOE_MHAB];
+ /* cnf10k specific information */
+ u32 hw_version;
+ u8 num_rfoe_mhab;
+ u8 num_rfoe_lmac;
+ u8 tot_rfoe_intf;
+ int gpint2_irq;
+};
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.c
new file mode 100644
index 000000000000..a2d03352c89d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2021 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+
+#include "otx2_bphy_debugfs.h"
+#include "otx2_bphy.h"
+
+#define OTX2_BPHY_DEBUGFS_MODE 0400
+
+struct otx2_bphy_debugfs_reader_info {
+ atomic_t refcnt;
+ size_t buffer_size;
+ void *priv;
+ otx2_bphy_debugfs_reader reader;
+ struct dentry *entry;
+ char buffer[1];
+};
+
+static struct dentry *otx2_bphy_debugfs;
+
+static int otx2_bphy_debugfs_open(struct inode *inode, struct file *file);
+
+static int otx2_bphy_debugfs_release(struct inode *inode, struct file *file);
+
+static ssize_t otx2_bphy_debugfs_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *offset);
+
+static const struct file_operations otx2_bphy_debugfs_foper = {
+ .owner = THIS_MODULE,
+ .open = otx2_bphy_debugfs_open,
+ .release = otx2_bphy_debugfs_release,
+ .read = otx2_bphy_debugfs_read,
+};
+
+void __init otx2_bphy_debugfs_init(void)
+{
+ otx2_bphy_debugfs = debugfs_create_dir(DRV_NAME, NULL);
+ if (!otx2_bphy_debugfs)
+ pr_info("%s: debugfs is not enabled\n", DRV_NAME);
+}
+
+void *otx2_bphy_debugfs_add_file(const char *name,
+ size_t buffer_size,
+ void *priv,
+ otx2_bphy_debugfs_reader reader)
+{
+ struct otx2_bphy_debugfs_reader_info *info = NULL;
+ size_t total_size = 0;
+
+ if (!otx2_bphy_debugfs) {
+ pr_info("%s: debugfs not enabled, ignoring %s\n", DRV_NAME,
+ name);
+ goto out;
+ }
+
+ total_size = buffer_size +
+ offsetof(struct otx2_bphy_debugfs_reader_info,
+ buffer);
+
+ info = kzalloc(total_size, GFP_KERNEL);
+
+ if (!info)
+ goto out;
+
+ info->buffer_size = buffer_size;
+ info->priv = priv;
+ info->reader = reader;
+
+ atomic_set(&info->refcnt, 0);
+
+ info->entry = debugfs_create_file(name, OTX2_BPHY_DEBUGFS_MODE,
+ otx2_bphy_debugfs, info,
+ &otx2_bphy_debugfs_foper);
+
+ if (!info->entry) {
+ pr_err("%s: debugfs failed to add file %s\n", DRV_NAME, name);
+ kfree(info);
+ info = NULL;
+ goto out;
+ }
+
+ pr_info("%s: debugfs created successfully for %s\n", DRV_NAME, name);
+
+out:
+ return info;
+}
+
+void otx2_bphy_debugfs_remove_file(void *entry)
+{
+ struct otx2_bphy_debugfs_reader_info *info = entry;
+
+ debugfs_remove(info->entry);
+
+ kfree(info);
+}
+
+void __exit otx2_bphy_debugfs_exit(void)
+{
+ debugfs_remove_recursive(otx2_bphy_debugfs);
+}
+
+static int otx2_bphy_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct otx2_bphy_debugfs_reader_info *info = NULL;
+
+ info = inode->i_private;
+
+ if (!atomic_cmpxchg(&info->refcnt, 0, 1)) {
+ file->private_data = info;
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+static int otx2_bphy_debugfs_release(struct inode *inode, struct file *file)
+{
+ struct otx2_bphy_debugfs_reader_info *info = NULL;
+
+ info = inode->i_private;
+
+ atomic_cmpxchg(&info->refcnt, 1, 0);
+
+ return 0;
+}
+
+static ssize_t otx2_bphy_debugfs_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *offset)
+{
+ struct otx2_bphy_debugfs_reader_info *info = NULL;
+ ssize_t retval = 0;
+
+ info = file->private_data;
+
+ if (!(*offset))
+ info->reader(&info->buffer[0], info->buffer_size, info->priv);
+
+ if (*offset >= info->buffer_size)
+ goto out;
+
+ if (*offset + count > info->buffer_size)
+ count = info->buffer_size - *offset;
+
+ if (copy_to_user((void __user *)buffer, info->buffer + *offset,
+ count)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ *offset += count;
+ retval = count;
+
+out:
+ return retval;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.h b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.h
new file mode 100644
index 000000000000..099290565bfa
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_debugfs.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2021 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+
+#ifndef _OTX2_BPHY_DEBUGFS_H_
+#define _OTX2_BPHY_DEBUGFS_H_
+
+typedef void (*otx2_bphy_debugfs_reader)(char *buffer, size_t buffer_size,
+ void *priv);
+
+void otx2_bphy_debugfs_init(void);
+
+void *otx2_bphy_debugfs_add_file(const char *name,
+ size_t buffer_size,
+ void *priv,
+ otx2_bphy_debugfs_reader reader);
+
+void otx2_bphy_debugfs_remove_file(void *entry);
+
+void otx2_bphy_debugfs_exit(void);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_hw.h b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_hw.h
new file mode 100644
index 000000000000..48bfd2017ea1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_hw.h
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _OTX2_BPHY_HW_H_
+#define _OTX2_BPHY_HW_H_
+
+#include <linux/types.h>
+
+/* PSM register offsets */
+#define PSM_QUEUE_CMD_LO(a) (0x0 + (a) * 0x10)
+#define PSM_QUEUE_CMD_HI(a) (0x8 + (a) * 0x10)
+#define PSM_QUEUE_CFG(a) (0x1000 + (a) * 0x10)
+#define PSM_QUEUE_PTR(a) (0x2000 + (a) * 0x10)
+#define PSM_QUEUE_SPACE(a) (0x3000 + (a) * 0x10)
+#define PSM_QUEUE_TIMEOUT_CFG(a) (0x4000 + (a) * 0x10)
+#define PSM_QUEUE_INFO(a) (0x5000 + (a) * 0x10)
+#define PSM_QUEUE_ENA_W1S(a) (0x10000 + (a) * 0x8)
+#define PSM_QUEUE_ENA_W1C(a) (0x10100 + (a) * 0x8)
+#define PSM_QUEUE_FULL_STS(a) (0x10200 + (a) * 0x8)
+#define PSM_QUEUE_BUSY_STS(a) (0x10300 + (a) * 0x8)
+
+/* BPHY PSM GPINT register offsets */
+#define PSM_INT_GP_SUM_W1C(a) (0x10E0000 + (a) * 0x100)
+#define PSM_INT_GP_SUM_W1S(a) (0x10E0040 + (a) * 0x100)
+#define PSM_INT_GP_ENA_W1C(a) (0x10E0080 + (a) * 0x100)
+#define PSM_INT_GP_ENA_W1S(a) (0x10E00C0 + (a) * 0x100)
+
+/* RFOE MHAB register offsets */
+#define RFOEX_RX_CTL(a) (0x0818ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_VLANX_CFG(a, b) (0x0870ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((b) << 3))
+#define RFOEX_RX_INDIRECT_INDEX_OFFSET(a) (0x13F8ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_FTX_CFG(a, b) (0x1400ULL | \
+ (((unsigned long)(a) << 36)) + \
+ ((b) << 3))
+#define RFOEX_RX_IND_MBT_CFG(a) (0x1420ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_MBT_ADDR(a) (0x1428ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_MBT_SEG_STATE(a) (0x1430ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_VLANX_FWD(a, b) (0x14D0ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((b) << 3))
+#define RFOEX_RX_IND_JDT_CFG0(a) (0x1440ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_JDT_CFG1(a) (0x1448ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_JDT_PTR(a) (0x1450ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_JDT_STATE(a) (0x1478ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_RX_IND_ECPRI_FT_CFG(a) (0x14C0ULL | \
+ ((unsigned long)(a) << 36))
+#define RFOEX_TX_PTP_TSTMP_W0(a, b) (0x7A0ULL | \
+ (((unsigned long)(a) << 36)) | \
+ ((b) << 3))
+#define RFOEX_TX_PTP_TSTMP_W1(a, b) (0x7C0ULL | \
+ (((unsigned long)(a) << 36)) | \
+ ((b) << 3))
+#define RFOEX_TX_PKT_STAT(a, b) (0x720ULL | \
+ (((unsigned long)(a) << 36)) | \
+ ((b) << 3))
+#define RFOEX_TX_OCTS_STAT(a, b) (0x740ULL | \
+ (((unsigned long)(a) << 36)) | \
+ ((b) << 3))
+#define RFOEX_RX_VLAN_DROP_STAT(a, b) (0x8A0ULL | \
+ (((unsigned long)(a) << 36)) | \
+ ((b) << 3))
+#define RFOEX_RX_CGX_PKT_STAT(a, b) (0x15C0ULL | \
+ (((unsigned long)(a) << 36)) | \
+ ((b) << 3))
+#define RFOEX_RX_CGX_OCTS_STAT(a, b) (0x15E0ULL | \
+ (((unsigned long)(a) << 36)) | \
+ ((b) << 3))
+
+/* BCN register offsets and definitions */
+#define BCN_CAPTURE_CFG 0x10400
+#define BCN_CAPTURE_N1_N2 0x10410
+#define BCN_CAPTURE_PTP 0x10430
+
+/* BCN_CAPTURE_CFG register definitions */
+#define CAPT_EN BIT(0)
+#define CAPT_TRIG_SW (3UL << 8)
+
+/* CPRI register offsets */
+#define CPRIX_RXD_GMII_UL_CBUF_CFG1(a) (0x1000ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_RXD_GMII_UL_CBUF_CFG2(a) (0x1008ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_RXD_GMII_UL_RD_DOORBELL(a) (0x1010ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_RXD_GMII_UL_SW_RD_PTR(a) (0x1018ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_RXD_GMII_UL_NXT_WR_PTR(a) (0x1020ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_RXD_GMII_UL_PKT_COUNT(a) (0x1028ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_TXD_GMII_DL_CBUF_CFG1(a) (0x1100ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_TXD_GMII_DL_CBUF_CFG2(a) (0x1108ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_TXD_GMII_DL_WR_DOORBELL(a) (0x1110ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_TXD_GMII_DL_SW_WR_PTR(a) (0x1118ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_TXD_GMII_DL_NXT_RD_PTR(a) (0x1120ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_ETH_UL_INT(a) (0x280ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_ETH_UL_INT_ENA_W1S(a) (0x288ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_ETH_UL_INT_ENA_W1C(a) (0x290ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_ETH_UL_INT_W1S(a) (0x298ULL | \
+ ((unsigned long)(a) << 36))
+#define CPRIX_ETH_BAD_CRC_CNT(a, b) (0x400ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_ERR_CNT(a, b) (0x408ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_OSIZE_CNT(a, b) (0x410ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_USIZE_CNT(a, b) (0x418ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_FIFO_ORUN_CNT(a, b) (0x420ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_GPKTS_CNT(a, b) (0x428ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_BOCT_CNT(a, b) (0x430ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_UL_GOCT_CNT(a, b) (0x438ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_DL_GOCTETS_CNT(a, b) (0x440ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+#define CPRIX_ETH_DL_GPKTS_CNT(a, b) (0x448ULL | \
+ ((unsigned long)(a) << 36) | \
+ ((unsigned long)(b) << 11))
+
+/* MHAB definitions */
+struct mhbw_jd_dma_cfg_word_0_s {
+ u64 dma_mode : 3;
+ u64 target_mem : 1;
+ u64 dswap : 3;
+ u64 cmd_type : 2;
+ u64 reserved1 : 7;
+ u64 chunk_size : 16;
+ u64 block_size : 16;
+ u64 thread_id : 6;
+ u64 reserved2 : 2;
+ u64 group_id : 4;
+ u64 reserved3 : 4;
+};
+
+struct mhbw_jd_dma_cfg_word_1_s {
+ u64 start_addr : 53;
+ u64 reserved1 : 11;
+};
+
+/* RFOE definitions */
+enum rfoe_rx_dir_ctl_pkt_type_e {
+ ROE = 0x0,
+ CHI = 0x1,
+ ALT = 0x2,
+ ECPRI = 0x3,
+ GENERIC = 0x8,
+};
+
+enum rfoe_rx_pswt_e {
+ ROE_TYPE = 0x0,
+ ECPRI_TYPE = 0x2,
+};
+
+enum rfoe_rx_pkt_err_e {
+ RE_NONE = 0x0,
+ RE_PARTIAL = 0x1,
+ RE_JABBER = 0x2,
+ RE_FCS = 0x7,
+ RE_FCS_RCV = 0x8,
+ RE_TERMINATE = 0x9,
+ RE_RX_CTL = 0xB,
+ RE_SKIP = 0xC,
+};
+
+enum rfoe_rx_pkt_logger_idx_e {
+ RX_PKT = 0x0,
+ TX_PKT = 0x1,
+};
+
+struct psm_cmd_addjob_s {
+ /* W0 */
+ u64 opcode : 6;
+ u64 rsrc_set : 2;
+ u64 qid : 8;
+ u64 waitcond : 8;
+ u64 jobtag : 16;
+ u64 reserved1 : 8;
+ u64 mabq : 1;
+ u64 reserved2 : 3;
+ u64 tmem : 1;
+ u64 reserved3 : 3;
+ u64 jobtype : 8;
+ /* W1 */
+ u64 jobptr : 53;
+ u64 reserved4 : 11;
+};
+
+struct rfoe_ecpri_psw0_s {
+ /* W0 */
+ u64 jd_ptr : 53;
+ u64 jd_ptr_tmem : 1;
+ u64 reserved1 : 2;
+ u64 src_id : 4;
+ u64 reserved2 : 2;
+ u64 pswt : 2;
+ /* W1 */
+ u64 msg_type : 8;
+ u64 ecpri_id : 16;
+ u64 flow_id : 8;
+ u64 reserved3 : 6;
+ u64 err_sts : 6;
+ u64 reserved4 : 2;
+ u64 seq_id : 16;
+};
+
+struct rfoe_ecpri_psw1_s {
+ /* W0 */
+ u64 ptp_timestamp;
+ /* W1 */
+ u64 ethertype : 16;
+ u64 eindex : 5;
+ u64 reserved1 : 3;
+ u64 dec_error : 8;
+ u64 dec_num_sections : 8;
+ u64 dec_num_syminc : 8;
+ u64 reserved2 : 8;
+ u64 ptype : 4;
+ u64 reserved3 : 4;
+};
+
+struct rfoe_psw0_s {
+ /* W0 */
+ u64 pkt_err_sts : 4;
+ u64 dma_error : 1;
+ u64 jd_ptr : 53;
+ u64 jd_target_mem : 1;
+ u64 orderinfo_status : 1;
+ u64 lmac_id : 2;
+ u64 pswt : 2;
+ /* W1 */
+ u64 roe_subtype : 8;
+ u64 roe_flowid : 8;
+ u64 fd_symbol : 8;
+ u64 fd_antid : 8;
+ u64 rfoe_timestamp : 32;
+};
+
+struct rfoe_psw1_s {
+ /* W0 */
+ u64 ptp_timestamp;
+ /* W1 */
+ u64 ethertype : 16;
+ u64 eindex : 5;
+ u64 reserved1 : 3;
+ u64 dec_error : 8;
+ u64 dec_num_sections : 8;
+ u64 dec_num_syminc : 8;
+ u64 reserved2 : 8;
+ u64 ptype : 4;
+ u64 reserved3 : 4;
+};
+
+struct rfoex_tx_ptp_tstmp_w1 {
+ u64 lmac_id : 2;
+ u64 rfoe_id : 2;
+ u64 jobid : 16;
+ u64 drop : 1;
+ u64 tx_err : 1;
+ u64 reserved1 : 41;
+ u64 valid : 1;
+};
+
+struct rfoex_abx_slotx_configuration {
+ u64 pkt_mode : 2;
+ u64 da_sel : 3;
+ u64 sa_sel : 3;
+ u64 etype_sel : 3;
+ u64 flowid : 8;
+ u64 subtype : 8;
+ u64 lmacid : 2;
+ u64 sample_mode : 1;
+ u64 sample_widt : 5;
+ u64 sample_width_option : 1;
+ u64 sample_width_sat_bypass : 1;
+ u64 orderinfotype : 1;
+ u64 orderinfooffset : 5;
+ u64 antenna : 8;
+ u64 symbol : 8;
+ u64 sos : 1;
+ u64 eos : 1;
+ u64 orderinfo_insert : 1;
+ u64 custom_timestamp_insert : 1;
+ u64 rfoe_mode : 1;
+};
+
+struct rfoex_abx_slotx_configuration1 {
+ u64 rbmap_bytes : 8;
+ u64 pkt_len : 16;
+ u64 hdr_len : 8;
+ u64 presentation_time_offset : 29;
+ u64 reserved1 : 1;
+ u64 sof_mode : 2;
+};
+
+struct rfoex_abx_slotx_configuration2 {
+ u64 vlan_sel : 3;
+ u64 vlan_num : 2;
+ u64 ptp_mode : 1;
+ u64 ecpri_id_insert : 1;
+ u64 ecpri_seq_id_insert : 1;
+ u64 ecpri_rev : 8;
+ u64 ecpri_msgtype : 8;
+ u64 ecpri_id : 16;
+ u64 ecpri_seq_id : 16;
+ u64 reserved1 : 8;
+};
+
+struct rfoe_rx_ind_vlanx_fwd {
+ u64 fwd : 64;
+};
+
+struct mhab_job_desc_cfg {
+ struct rfoex_abx_slotx_configuration cfg;
+ struct rfoex_abx_slotx_configuration1 cfg1;
+ struct rfoex_abx_slotx_configuration2 cfg2;
+} __packed;
+
+/* CPRI definitions */
+struct cpri_pkt_dl_wqe_hdr {
+ u64 lane_id : 2;
+ u64 reserved1 : 2;
+ u64 mhab_id : 2;
+ u64 reserved2 : 2;
+ u64 pkt_length : 11;
+ u64 reserved3 : 45;
+ u64 w1;
+};
+
+struct cpri_pkt_ul_wqe_hdr {
+ u64 lane_id : 2;
+ u64 reserved1 : 2;
+ u64 mhab_id : 2;
+ u64 reserved2 : 2;
+ u64 pkt_length : 11;
+ u64 reserved3 : 5;
+ u64 fcserr : 1;
+ u64 rsp_ferr : 1;
+ u64 rsp_nferr : 1;
+ u64 reserved4 : 37;
+ u64 w1;
+};
+
+#endif /* _OTX2_BPHY_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_main.c b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_main.c
new file mode 100644
index 000000000000..d0c222aeaa75
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_bphy_main.c
@@ -0,0 +1,887 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/interrupt.h>
+
+#include "otx2_bphy.h"
+#include "otx2_rfoe.h"
+#include "otx2_cpri.h"
+#include "otx2_bphy_debugfs.h"
+#include "cnf10k_rfoe.h"
+
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_DESCRIPTION(DRV_STRING);
+MODULE_LICENSE("GPL v2");
+
+/* max ptp tx requests */
+int max_ptp_req = 16;
+module_param(max_ptp_req, int, 0644);
+MODULE_PARM_DESC(max_ptp_req, "Maximum PTP Tx requests");
+
+/* cdev */
+static struct class *otx2rfoe_class;
+
+/* reg base address */
+void __iomem *bphy_reg_base;
+void __iomem *psm_reg_base;
+void __iomem *rfoe_reg_base;
+void __iomem *bcn_reg_base;
+void __iomem *ptp_reg_base;
+void __iomem *cpri_reg_base;
+
+/* check if cpri block is available */
+#define cpri_available() ((cpri_reg_base) ? 1 : 0)
+
+/* GPINT(2) interrupt handler routine */
+static irqreturn_t cnf10k_gpint2_intr_handler(int irq, void *dev_id)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ u32 status, intr_mask;
+ int rfoe_num;
+
+ cdev_priv = (struct otx2_bphy_cdev_priv *)dev_id;
+
+ /* clear interrupt status */
+ status = readq(bphy_reg_base + PSM_INT_GP_SUM_W1C(2)) & 0xFFFFFFFF;
+ writeq(status, bphy_reg_base + PSM_INT_GP_SUM_W1C(2));
+
+ pr_debug("gpint2 status = 0x%x\n", status);
+
+ /* rx intr processing */
+ for (rfoe_num = 0; rfoe_num < cdev_priv->num_rfoe_mhab; rfoe_num++) {
+ intr_mask = CNF10K_RFOE_RX_INTR_MASK(rfoe_num);
+ if (status & intr_mask)
+ cnf10k_rfoe_rx_napi_schedule(rfoe_num, status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* GPINT(1) interrupt handler routine */
+static irqreturn_t otx2_bphy_intr_handler(int irq, void *dev_id)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ struct otx2_rfoe_drv_ctx *drv_ctx;
+ struct otx2_rfoe_ndev_priv *priv;
+ struct net_device *netdev;
+ int rfoe_num, cpri_num, i;
+ u32 intr_mask, status;
+
+ cdev_priv = (struct otx2_bphy_cdev_priv *)dev_id;
+
+ /* clear interrupt status */
+ status = readq(bphy_reg_base + PSM_INT_GP_SUM_W1C(1)) & 0xFFFFFFFF;
+ writeq(status, bphy_reg_base + PSM_INT_GP_SUM_W1C(1));
+
+ pr_debug("gpint status = 0x%x\n", status);
+
+ /* CNF10K intr processing */
+ if (CHIP_CNF10K(cdev_priv->hw_version)) {
+ cnf10k_bphy_intr_handler(cdev_priv, status);
+ return IRQ_HANDLED;
+ }
+
+ /* CNF95 intr processing */
+ for (rfoe_num = 0; rfoe_num < MAX_RFOE_INTF; rfoe_num++) {
+ intr_mask = RFOE_RX_INTR_MASK(rfoe_num);
+ if (status & intr_mask)
+ otx2_rfoe_rx_napi_schedule(rfoe_num, status);
+ }
+
+ for (cpri_num = 0; cpri_num < OTX2_BPHY_CPRI_MAX_MHAB; cpri_num++) {
+ intr_mask = CPRI_RX_INTR_MASK(cpri_num);
+ if (status & intr_mask) {
+ /* clear UL ETH interrupt */
+ writeq(0x1, cpri_reg_base + CPRIX_ETH_UL_INT(cpri_num));
+ otx2_cpri_rx_napi_schedule(cpri_num, status);
+ }
+ }
+
+ /* tx intr processing */
+ for (i = 0; i < RFOE_MAX_INTF; i++) {
+ drv_ctx = &rfoe_drv_ctx[i];
+ if (drv_ctx->valid) {
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ intr_mask = RFOE_TX_PTP_INTR_MASK(priv->rfoe_num,
+ priv->lmac_id);
+ if ((status & intr_mask) && priv->ptp_tx_skb)
+ schedule_work(&priv->ptp_tx_work);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static inline void msix_enable_ctrl(struct pci_dev *dev)
+{
+ u16 control;
+
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
+ control |= PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
+}
+
+static long otx2_bphy_cdev_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct otx2_bphy_cdev_priv *cdev = filp->private_data;
+ int ret;
+
+ if (!cdev) {
+ pr_warn("ioctl: device not opened\n");
+ return -EIO;
+ }
+
+ mutex_lock(&cdev->mutex_lock);
+
+ switch (cmd) {
+ case OTX2_RFOE_IOCTL_ODP_INTF_CFG:
+ {
+ struct bphy_netdev_comm_intf_cfg *intf_cfg;
+ struct pci_dev *bphy_pdev;
+ int idx;
+
+ if (cdev->odp_intf_cfg) {
+ dev_info(cdev->dev, "odp interface cfg already done\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ intf_cfg = kzalloc(MAX_RFOE_INTF * sizeof(*intf_cfg),
+ GFP_KERNEL);
+ if (!intf_cfg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(intf_cfg, (void __user *)arg,
+ (MAX_RFOE_INTF *
+ sizeof(struct bphy_netdev_comm_intf_cfg)))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ for (idx = 0; idx < OTX2_BPHY_MHAB_INST; idx++)
+ cdev->mhab_mode[idx] = intf_cfg[idx].if_type;
+
+ ret = otx2_rfoe_parse_and_init_intf(cdev, intf_cfg);
+ if (ret < 0) {
+ dev_err(cdev->dev, "odp <-> netdev parse error\n");
+ goto out;
+ }
+
+ if (cpri_available()) {
+ ret = otx2_cpri_parse_and_init_intf(cdev, intf_cfg);
+ if (ret < 0) {
+ dev_err(cdev->dev, "odp <-> netdev parse error\n");
+ goto out;
+ }
+ }
+
+ /* The MSIXEN bit is getting cleared when ODP BPHY driver
+ * resets BPHY. So enabling it back in IOCTL.
+ */
+ bphy_pdev = pci_get_device(OTX2_BPHY_PCI_VENDOR_ID,
+ OTX2_BPHY_PCI_DEVICE_ID, NULL);
+ if (!bphy_pdev) {
+ dev_err(cdev->dev, "Couldn't find BPHY PCI device %x\n",
+ OTX2_BPHY_PCI_DEVICE_ID);
+ ret = -ENODEV;
+ goto out;
+ }
+ msix_enable_ctrl(bphy_pdev);
+
+ /* Enable CPRI ETH UL INT */
+ for (idx = 0; idx < OTX2_BPHY_CPRI_MAX_MHAB; idx++) {
+ if (intf_cfg[idx].if_type == IF_TYPE_CPRI)
+ writeq(0x1, cpri_reg_base +
+ CPRIX_ETH_UL_INT_ENA_W1S(idx));
+ }
+
+ /* Enable GPINT Rx and Tx interrupts */
+ writeq(0xFFFFFFFF, bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+
+ cdev->odp_intf_cfg = 1;
+
+ kfree(intf_cfg);
+
+ ret = 0;
+ goto out;
+ }
+ case OTX2_RFOE_IOCTL_ODP_DEINIT:
+ {
+ u32 status;
+
+ /* Disable GPINT Rx and Tx interrupts */
+ writeq(0xFFFFFFFF, bphy_reg_base + PSM_INT_GP_ENA_W1C(1));
+
+ /* clear interrupt status */
+ status = readq(bphy_reg_base + PSM_INT_GP_SUM_W1C(1)) &
+ 0xFFFFFFFF;
+ writeq(status, bphy_reg_base + PSM_INT_GP_SUM_W1C(1));
+
+ otx2_bphy_rfoe_cleanup();
+ if (cpri_available())
+ otx2_bphy_cpri_cleanup();
+
+ cdev->odp_intf_cfg = 0;
+
+ ret = 0;
+ goto out;
+ }
+ case OTX2_RFOE_IOCTL_RX_IND_CFG:
+ {
+ struct otx2_rfoe_rx_ind_cfg cfg;
+ unsigned long flags;
+
+ if (copy_from_user(&cfg, (void __user *)arg,
+ sizeof(struct otx2_rfoe_rx_ind_cfg))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ spin_lock_irqsave(&cdev->mbt_lock, flags);
+ writeq(cfg.rx_ind_idx, (rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(cfg.rfoe_num)));
+ if (cfg.dir == OTX2_RFOE_RX_IND_READ)
+ cfg.regval = readq(rfoe_reg_base + cfg.regoff);
+ else
+ writeq(cfg.regval, rfoe_reg_base + cfg.regoff);
+ spin_unlock_irqrestore(&cdev->mbt_lock, flags);
+ if (copy_to_user((void __user *)(unsigned long)arg, &cfg,
+ sizeof(struct otx2_rfoe_rx_ind_cfg))) {
+ dev_err(cdev->dev, "copy to user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+ ret = 0;
+ goto out;
+ }
+ case OTX2_RFOE_IOCTL_PTP_OFFSET:
+ {
+ u64 bcn_n1, bcn_n2, bcn_n1_ns, bcn_n2_ps, ptp0_ns, regval;
+ struct otx2_rfoe_drv_ctx *drv_ctx = NULL;
+ struct otx2_rfoe_ndev_priv *priv;
+ struct ptp_bcn_off_cfg *ptp_cfg;
+ struct ptp_clk_cfg clk_cfg;
+ struct net_device *netdev;
+ struct ptp_bcn_ref ref;
+ unsigned long expires;
+ int idx;
+
+ if (!cdev->odp_intf_cfg) {
+ dev_info(cdev->dev, "odp interface cfg is not done\n");
+ ret = -EBUSY;
+ goto out;
+ }
+ if (copy_from_user(&clk_cfg, (void __user *)arg,
+ sizeof(struct ptp_clk_cfg))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+ if (!(clk_cfg.clk_freq_ghz && clk_cfg.clk_freq_div)) {
+ dev_err(cdev->dev, "Invalid ptp clk parameters\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ for (idx = 0; idx < RFOE_MAX_INTF; idx++) {
+ drv_ctx = &rfoe_drv_ctx[idx];
+ if (drv_ctx->valid)
+ break;
+ }
+ if (idx >= RFOE_MAX_INTF) {
+ dev_err(cdev->dev, "drv ctx not found\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ ptp_cfg = priv->ptp_cfg;
+ ptp_cfg->clk_cfg.clk_freq_ghz = clk_cfg.clk_freq_ghz;
+ ptp_cfg->clk_cfg.clk_freq_div = clk_cfg.clk_freq_div;
+ /* capture ptp and bcn timestamp using BCN_CAPTURE_CFG */
+ writeq((CAPT_EN | CAPT_TRIG_SW),
+ priv->bcn_reg_base + BCN_CAPTURE_CFG);
+ /* poll for capt_en to become 0 */
+ while ((readq(priv->bcn_reg_base + BCN_CAPTURE_CFG) & CAPT_EN))
+ cpu_relax();
+ ptp0_ns = readq(priv->bcn_reg_base + BCN_CAPTURE_PTP);
+ regval = readq(priv->bcn_reg_base + BCN_CAPTURE_N1_N2);
+ bcn_n1 = (regval >> 24) & 0xFFFFFFFFFF;
+ bcn_n2 = regval & 0xFFFFFF;
+ /* BCN N1 10 msec counter to nsec */
+ bcn_n1_ns = bcn_n1 * 10 * NSEC_PER_MSEC;
+ bcn_n1_ns += UTC_GPS_EPOCH_DIFF * NSEC_PER_SEC;
+ /* BCN N2 clock period 0.813802083 nsec to pico secs */
+ bcn_n2_ps = (bcn_n2 * 813802083UL) / 1000000;
+ ref.ptp0_ns = ptp0_ns;
+ ref.bcn0_n1_ns = bcn_n1_ns;
+ ref.bcn0_n2_ps = bcn_n2_ps;
+ memcpy(&ptp_cfg->old_ref, &ref, sizeof(struct ptp_bcn_ref));
+ memcpy(&ptp_cfg->new_ref, &ref, sizeof(struct ptp_bcn_ref));
+ ptp_cfg->use_ptp_alg = 1;
+ expires = jiffies + PTP_OFF_RESAMPLE_THRESH * HZ;
+ mod_timer(&ptp_cfg->ptp_timer, expires);
+ ret = 0;
+ goto out;
+ }
+ case OTX2_RFOE_IOCTL_SEC_BCN_OFFSET:
+ {
+ struct otx2_rfoe_drv_ctx *drv_ctx = NULL;
+ struct otx2_rfoe_ndev_priv *priv;
+ struct bcn_sec_offset_cfg cfg;
+ struct net_device *netdev;
+ int idx;
+
+ if (!cdev->odp_intf_cfg) {
+ dev_info(cdev->dev, "odp interface cfg is not done\n");
+ ret = -EBUSY;
+ goto out;
+ }
+ if (copy_from_user(&cfg, (void __user *)arg,
+ sizeof(struct bcn_sec_offset_cfg))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+ for (idx = 0; idx < RFOE_MAX_INTF; idx++) {
+ drv_ctx = &rfoe_drv_ctx[idx];
+ if (drv_ctx->valid &&
+ drv_ctx->rfoe_num == cfg.rfoe_num &&
+ drv_ctx->lmac_id == cfg.lmac_id)
+ break;
+ }
+ if (idx >= RFOE_MAX_INTF) {
+ dev_err(cdev->dev, "drv ctx not found\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ priv->sec_bcn_offset = cfg.sec_bcn_offset;
+ ret = 0;
+ goto out;
+ }
+ case OTX2_RFOE_IOCTL_MODE_CPRI:
+ {
+ int id = 0;
+
+ if (!cdev->odp_intf_cfg) {
+ dev_info(cdev->dev, "odp interface cfg is not done\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (copy_from_user(&id, (void __user *)arg, sizeof(int))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (cdev->mhab_mode[id] == IF_TYPE_ETHERNET) {
+ otx2_rfoe_disable_intf(id);
+ otx2_cpri_enable_intf(id);
+ cdev->mhab_mode[id] = IF_TYPE_CPRI;
+ }
+
+ ret = 0;
+ goto out;
+ }
+ case OTX2_RFOE_IOCTL_LINK_EVENT:
+ {
+ struct otx2_rfoe_drv_ctx *drv_ctx = NULL;
+ struct otx2_rfoe_link_event cfg;
+ struct net_device *netdev;
+ int idx;
+
+ if (!cdev->odp_intf_cfg) {
+ dev_info(cdev->dev, "odp interface cfg is not done\n");
+ ret = -EBUSY;
+ goto out;
+ }
+ if (copy_from_user(&cfg, (void __user *)arg,
+ sizeof(struct otx2_rfoe_link_event))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+ for (idx = 0; idx < RFOE_MAX_INTF; idx++) {
+ drv_ctx = &rfoe_drv_ctx[idx];
+ if (drv_ctx->valid &&
+ drv_ctx->rfoe_num == cfg.rfoe_num &&
+ drv_ctx->lmac_id == cfg.lmac_id)
+ break;
+ }
+ if (idx >= RFOE_MAX_INTF) {
+ dev_err(cdev->dev, "drv ctx not found\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ netdev = drv_ctx->netdev;
+ otx2_rfoe_set_link_state(netdev, cfg.link_state);
+ ret = 0;
+ goto out;
+ }
+ case OTX2_CPRI_IOCTL_LINK_EVENT:
+ {
+ struct otx2_cpri_drv_ctx *drv_ctx = NULL;
+ struct otx2_cpri_link_event cfg;
+ struct net_device *netdev;
+ int idx;
+
+ if (!cdev->odp_intf_cfg) {
+ dev_info(cdev->dev, "odp interface cfg is not done\n");
+ ret = -EBUSY;
+ goto out;
+ }
+ if (copy_from_user(&cfg, (void __user *)arg,
+ sizeof(struct otx2_cpri_link_event))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+ for (idx = 0; idx < OTX2_BPHY_CPRI_MAX_INTF; idx++) {
+ drv_ctx = &cpri_drv_ctx[idx];
+ if (drv_ctx->valid &&
+ drv_ctx->cpri_num == cfg.cpri_num &&
+ drv_ctx->lmac_id == cfg.lmac_id)
+ break;
+ }
+ if (idx >= OTX2_BPHY_CPRI_MAX_INTF) {
+ dev_err(cdev->dev, "drv ctx not found\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ netdev = drv_ctx->netdev;
+ otx2_cpri_set_link_state(netdev, cfg.link_state);
+ ret = 0;
+ goto out;
+ }
+ case OTX2_IOCTL_RFOE_10x_CFG:
+ {
+ struct cnf10k_rfoe_ndev_comm_intf_cfg *intf_cfg;
+ struct pci_dev *bphy_pdev;
+ int idx;
+
+ if (cdev->odp_intf_cfg) {
+ dev_info(cdev->dev, "odp interface cfg already done\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ intf_cfg = kzalloc(BPHY_MAX_RFOE_MHAB * sizeof(*intf_cfg),
+ GFP_KERNEL);
+ if (!intf_cfg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(intf_cfg, (void __user *)arg,
+ (BPHY_MAX_RFOE_MHAB *
+ sizeof(*intf_cfg)))) {
+ dev_err(cdev->dev, "copy from user fault\n");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ for (idx = 0; idx < BPHY_MAX_RFOE_MHAB; idx++)
+ cdev->mhab_mode[idx] = IF_TYPE_ETHERNET;
+
+ ret = cnf10k_rfoe_parse_and_init_intf(cdev, intf_cfg);
+ if (ret < 0) {
+ dev_err(cdev->dev, "odp <-> netdev parse error\n");
+ goto out;
+ }
+
+ /* The MSIXEN bit is getting cleared when ODP BPHY driver
+ * resets BPHY. So enabling it back in IOCTL.
+ */
+ bphy_pdev = pci_get_device(OTX2_BPHY_PCI_VENDOR_ID,
+ OTX2_BPHY_PCI_DEVICE_ID, NULL);
+ if (!bphy_pdev) {
+ dev_err(cdev->dev, "Couldn't find BPHY PCI device %x\n",
+ OTX2_BPHY_PCI_DEVICE_ID);
+ ret = -ENODEV;
+ goto out;
+ }
+ msix_enable_ctrl(bphy_pdev);
+
+ /* Enable GPINT Rx and Tx interrupts */
+ writeq(0xFFFFFFFF, bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+ if (cdev->gpint2_irq)
+ writeq(0xFFFFFFFF, bphy_reg_base + PSM_INT_GP_ENA_W1S(2));
+
+ cdev->odp_intf_cfg = 1;
+
+ kfree(intf_cfg);
+
+ ret = 0;
+ goto out;
+ }
+ default:
+ {
+ dev_info(cdev->dev, "ioctl: no match\n");
+ ret = -EINVAL;
+ }
+ }
+
+out:
+ mutex_unlock(&cdev->mutex_lock);
+ return ret;
+}
+
+static int otx2_bphy_cdev_open(struct inode *inode, struct file *filp)
+{
+ struct otx2_bphy_cdev_priv *cdev;
+ int status = 0;
+
+ cdev = container_of(inode->i_cdev, struct otx2_bphy_cdev_priv, cdev);
+
+ mutex_lock(&cdev->mutex_lock);
+
+ if (cdev->is_open) {
+ dev_err(cdev->dev, "failed to open the device\n");
+ status = -EBUSY;
+ goto error;
+ }
+ cdev->is_open = 1;
+ filp->private_data = cdev;
+
+error:
+ mutex_unlock(&cdev->mutex_lock);
+
+ return status;
+}
+
+static int otx2_bphy_cdev_release(struct inode *inode, struct file *filp)
+{
+ struct otx2_bphy_cdev_priv *cdev = filp->private_data;
+ u32 status;
+
+ mutex_lock(&cdev->mutex_lock);
+
+ if (!cdev->odp_intf_cfg)
+ goto cdev_release_exit;
+
+ /* Disable GPINT Rx and Tx interrupts */
+ writeq(0xFFFFFFFF, bphy_reg_base + PSM_INT_GP_ENA_W1C(1));
+ if (cdev->gpint2_irq)
+ writeq(0xFFFFFFFF, bphy_reg_base + PSM_INT_GP_ENA_W1C(2));
+
+ /* clear interrupt status */
+ status = readq(bphy_reg_base + PSM_INT_GP_SUM_W1C(1)) & 0xFFFFFFFF;
+ writeq(status, bphy_reg_base + PSM_INT_GP_SUM_W1C(1));
+ if (cdev->gpint2_irq) {
+ status = readq(bphy_reg_base + PSM_INT_GP_SUM_W1C(2)) &
+ 0xFFFFFFFF;
+ writeq(status, bphy_reg_base + PSM_INT_GP_SUM_W1C(2));
+ }
+
+ otx2_bphy_rfoe_cleanup();
+ if (cpri_available())
+ otx2_bphy_cpri_cleanup();
+
+ cdev->odp_intf_cfg = 0;
+
+cdev_release_exit:
+ cdev->is_open = 0;
+ mutex_unlock(&cdev->mutex_lock);
+
+ return 0;
+}
+
+static const struct file_operations otx2_bphy_cdev_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = otx2_bphy_cdev_ioctl,
+ .open = otx2_bphy_cdev_open,
+ .release = otx2_bphy_cdev_release,
+};
+
+static int otx2_bphy_probe(struct platform_device *pdev)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ struct pci_dev *bphy_pdev;
+ struct resource *res;
+ int err = 0;
+ dev_t devt;
+
+ /* allocate priv structure */
+ cdev_priv = kzalloc(sizeof(*cdev_priv), GFP_KERNEL);
+ if (!cdev_priv) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* BPHY is a PCI device and the kernel resets the MSIXEN bit during
+ * enumeration. So enable it back for interrupts to be generated.
+ */
+ bphy_pdev = pci_get_device(OTX2_BPHY_PCI_VENDOR_ID,
+ OTX2_BPHY_PCI_DEVICE_ID, NULL);
+ if (!bphy_pdev) {
+ dev_err(&pdev->dev, "Couldn't find BPHY PCI device %x\n",
+ OTX2_BPHY_PCI_DEVICE_ID);
+ err = -ENODEV;
+ goto free_cdev_priv;
+ }
+ msix_enable_ctrl(bphy_pdev);
+
+ /* bphy registers ioremap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get bphy resource\n");
+ err = -ENXIO;
+ goto free_cdev_priv;
+ }
+ bphy_reg_base = ioremap(res->start, resource_size(res));
+ if (IS_ERR(bphy_reg_base)) {
+ dev_err(&pdev->dev, "failed to ioremap bphy registers\n");
+ err = PTR_ERR(bphy_reg_base);
+ goto free_cdev_priv;
+ }
+ /* psm registers ioremap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get psm resource\n");
+ err = -ENXIO;
+ goto out_unmap_bphy_reg;
+ }
+ psm_reg_base = ioremap(res->start, resource_size(res));
+ if (IS_ERR(psm_reg_base)) {
+ dev_err(&pdev->dev, "failed to ioremap psm registers\n");
+ err = PTR_ERR(psm_reg_base);
+ goto out_unmap_bphy_reg;
+ }
+ /* rfoe registers ioremap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get rfoe resource\n");
+ err = -ENXIO;
+ goto out_unmap_psm_reg;
+ }
+ rfoe_reg_base = ioremap(res->start, resource_size(res));
+ if (IS_ERR(rfoe_reg_base)) {
+ dev_err(&pdev->dev, "failed to ioremap rfoe registers\n");
+ err = PTR_ERR(rfoe_reg_base);
+ goto out_unmap_psm_reg;
+ }
+ /* bcn register ioremap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get bcn resource\n");
+ err = -ENXIO;
+ goto out_unmap_rfoe_reg;
+ }
+ bcn_reg_base = ioremap(res->start, resource_size(res));
+ if (IS_ERR(bcn_reg_base)) {
+ dev_err(&pdev->dev, "failed to ioremap bcn registers\n");
+ err = PTR_ERR(bcn_reg_base);
+ goto out_unmap_rfoe_reg;
+ }
+ /* ptp register ioremap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 4);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get ptp resource\n");
+ err = -ENXIO;
+ goto out_unmap_bcn_reg;
+ }
+ ptp_reg_base = ioremap(res->start, resource_size(res));
+ if (IS_ERR(ptp_reg_base)) {
+ dev_err(&pdev->dev, "failed to ioremap ptp registers\n");
+ err = PTR_ERR(ptp_reg_base);
+ goto out_unmap_bcn_reg;
+ }
+ /* cpri registers ioremap */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 5);
+ if (!res) {
+ cpri_reg_base = NULL;
+ } else {
+ dev_info(&pdev->dev, "cpri mem resource found\n");
+ cpri_reg_base = ioremap(res->start, resource_size(res));
+ if (IS_ERR(cpri_reg_base)) {
+ dev_err(&pdev->dev, "failed to ioremap cpri registers\n");
+ err = PTR_ERR(cpri_reg_base);
+ goto out_unmap_ptp_reg;
+ }
+ }
+ /* get irq */
+ cdev_priv->irq = platform_get_irq(pdev, 0);
+ if (cdev_priv->irq <= 0) {
+ dev_err(&pdev->dev, "irq resource not found\n");
+ goto out_unmap_cpri_reg;
+ }
+ cdev_priv->gpint2_irq = platform_get_irq(pdev, 1);
+ if (cdev_priv->gpint2_irq < 0)
+ cdev_priv->gpint2_irq = 0;
+ else
+ dev_info(&pdev->dev, "gpint2 irq resource found\n");
+
+ /* create a character device */
+ err = alloc_chrdev_region(&devt, 0, 1, DEVICE_NAME);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to alloc chrdev device region\n");
+ goto out_unmap_cpri_reg;
+ }
+
+ otx2rfoe_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(otx2rfoe_class)) {
+ dev_err(&pdev->dev, "couldn't create class %s\n", DEVICE_NAME);
+ err = PTR_ERR(otx2rfoe_class);
+ goto out_unregister_chrdev_region;
+ }
+
+ cdev_priv->devt = devt;
+ cdev_priv->is_open = 0;
+ spin_lock_init(&cdev_priv->lock);
+ spin_lock_init(&cdev_priv->mbt_lock);
+ mutex_init(&cdev_priv->mutex_lock);
+
+ cdev_init(&cdev_priv->cdev, &otx2_bphy_cdev_fops);
+ cdev_priv->cdev.owner = THIS_MODULE;
+
+ err = cdev_add(&cdev_priv->cdev, devt, 1);
+ if (err < 0) {
+ dev_err(&pdev->dev, "cdev_add() failed\n");
+ goto out_class_destroy;
+ }
+
+ cdev_priv->dev = device_create(otx2rfoe_class, &pdev->dev,
+ cdev_priv->cdev.dev, cdev_priv,
+ DEVICE_NAME);
+ if (IS_ERR(cdev_priv->dev)) {
+ dev_err(&pdev->dev, "device_create failed\n");
+ err = PTR_ERR(cdev_priv->dev);
+ goto out_cdev_del;
+ }
+
+ dev_info(&pdev->dev, "successfully registered char device, major=%d\n",
+ MAJOR(cdev_priv->cdev.dev));
+
+ err = request_irq(cdev_priv->irq, otx2_bphy_intr_handler, 0,
+ "otx2_bphy_int", cdev_priv);
+ if (err) {
+ dev_err(&pdev->dev, "can't assign irq %d\n", cdev_priv->irq);
+ goto out_device_destroy;
+ }
+
+ if (cdev_priv->gpint2_irq) {
+ err = request_irq(cdev_priv->gpint2_irq,
+ cnf10k_gpint2_intr_handler, 0,
+ "cn10k_bphy_int", cdev_priv);
+ if (err) {
+ dev_err(&pdev->dev, "can't assign irq %d\n",
+ cdev_priv->gpint2_irq);
+ goto free_irq;
+ }
+ }
+
+ err = 0;
+ goto out;
+
+free_irq:
+ free_irq(cdev_priv->irq, cdev_priv);
+out_device_destroy:
+ device_destroy(otx2rfoe_class, cdev_priv->cdev.dev);
+out_cdev_del:
+ cdev_del(&cdev_priv->cdev);
+out_class_destroy:
+ class_destroy(otx2rfoe_class);
+out_unregister_chrdev_region:
+ unregister_chrdev_region(devt, 1);
+out_unmap_cpri_reg:
+ iounmap(cpri_reg_base);
+out_unmap_ptp_reg:
+ iounmap(ptp_reg_base);
+out_unmap_bcn_reg:
+ iounmap(bcn_reg_base);
+out_unmap_rfoe_reg:
+ iounmap(rfoe_reg_base);
+out_unmap_psm_reg:
+ iounmap(psm_reg_base);
+out_unmap_bphy_reg:
+ iounmap(bphy_reg_base);
+free_cdev_priv:
+ kfree(cdev_priv);
+out:
+ return err;
+}
+
+static int otx2_bphy_remove(struct platform_device *pdev)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv = dev_get_drvdata(&pdev->dev);
+
+ /* unmap register regions */
+ iounmap(cpri_reg_base);
+ iounmap(ptp_reg_base);
+ iounmap(bcn_reg_base);
+ iounmap(rfoe_reg_base);
+ iounmap(psm_reg_base);
+ iounmap(bphy_reg_base);
+
+ /* free irq */
+ free_irq(cdev_priv->irq, cdev_priv);
+
+ /* char device cleanup */
+ device_destroy(otx2rfoe_class, cdev_priv->cdev.dev);
+ cdev_del(&cdev_priv->cdev);
+ class_destroy(otx2rfoe_class);
+ unregister_chrdev_region(cdev_priv->cdev.dev, 1);
+ kfree(cdev_priv);
+
+ return 0;
+}
+
+static const struct of_device_id otx2_bphy_of_match[] = {
+ { .compatible = "marvell,bphy-netdev" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, otx2_bphy_of_match);
+
+static struct platform_driver otx2_bphy_driver = {
+ .probe = otx2_bphy_probe,
+ .remove = otx2_bphy_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = otx2_bphy_of_match,
+ },
+};
+
+static int __init otx2_bphy_init(void)
+{
+ int ret;
+
+ pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
+
+ ret = platform_driver_register(&otx2_bphy_driver);
+ if (ret < 0)
+ return ret;
+
+ otx2_bphy_debugfs_init();
+
+ return 0;
+}
+
+static void __exit otx2_bphy_exit(void)
+{
+ otx2_bphy_debugfs_exit();
+
+ platform_driver_unregister(&otx2_bphy_driver);
+}
+
+module_init(otx2_bphy_init);
+module_exit(otx2_bphy_exit);
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.c b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.c
new file mode 100644
index 000000000000..2fda900e22c9
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.c
@@ -0,0 +1,755 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "otx2_cpri.h"
+#include "otx2_bphy_debugfs.h"
+
+/* Theory of Operation
+ *
+ * I. General
+ *
+ * The BPHY CPRI netdev processes ethernet packets which are received
+ * and transmitted by CPRI MHAB. The ODP BPHY application shares the
+ * CPRI ETH UL/DL configuration information using ioctl. The Rx
+ * notification is sent to netdev using PSM GPINT.
+ *
+ * II. Driver Operation
+ *
+ * This driver register's a character device and provides ioctl for
+ * ODP application to initialize the netdev(s) to process CPRI Ethernet
+ * packets. Each netdev instance created by the driver corresponds to
+ * a unique CPRI MHAB id and Lane id. The ODP application shares the
+ * information such as CPRI ETH UL/DL circular buffers and Rx GPINT
+ * number per CPRI MHAB. The CPRI ETH UL/DL circular buffers are shared
+ * per each CPRI MHAB id. The Rx/Tx packet memory(DDR) is also allocated
+ * by ODP application. The GPINT is setup using CPRI_ETH_UL_INT_PSM_MSG_W0
+ * and CPRI_ETH_UL_INT_PSM_MSG_W1 registers.
+ *
+ * III. Transmit
+ *
+ * The driver xmit routine selects DL circular buffer ring based on MHAB
+ * id and if there is a free entry available, the driver updates the WQE
+ * header and packet data to the DL entry and updates the DL_WR_DOORBELL
+ * with number of packets written for the hardware to process.
+ *
+ * IV. Receive
+ *
+ * The driver receives GPINT interrupt notification per each MHAB and
+ * invokes NAPI handler. The NAPI handler reads the UL circular buffer
+ * ring parameters UL_SW_RD_PTR and UL_NXT_WR_PTR to get the count of
+ * packets to be processed. For each packet received, the driver allocates
+ * skb and copies the packet data to skb. The driver updates
+ * UL_RD_DOORBELL register with count of packets processed by the driver.
+ *
+ * V. Miscellaneous
+ *
+ * Ethtool:
+ * The ethtool stats shows packet stats for each netdev instance.
+ *
+ */
+
+/* global driver ctx */
+struct otx2_cpri_drv_ctx cpri_drv_ctx[OTX2_BPHY_CPRI_MAX_INTF];
+
+/* debugfs */
+static void otx2_cpri_debugfs_reader(char *buffer, size_t count, void *priv);
+static const char *otx2_cpri_debugfs_get_formatter(void);
+static size_t otx2_cpri_debugfs_get_buffer_size(void);
+static void otx2_cpri_debugfs_create(struct otx2_cpri_drv_ctx *ctx);
+static void otx2_cpri_debugfs_remove(struct otx2_cpri_drv_ctx *ctx);
+
+static struct net_device *otx2_cpri_get_netdev(int mhab_id, int lmac_id)
+{
+ struct net_device *netdev = NULL;
+ int idx;
+
+ for (idx = 0; idx < OTX2_BPHY_CPRI_MAX_INTF; idx++) {
+ if (cpri_drv_ctx[idx].cpri_num == mhab_id &&
+ cpri_drv_ctx[idx].lmac_id == lmac_id &&
+ cpri_drv_ctx[idx].valid) {
+ netdev = cpri_drv_ctx[idx].netdev;
+ break;
+ }
+ }
+
+ return netdev;
+}
+
+void otx2_cpri_enable_intf(int cpri_num)
+{
+ struct otx2_cpri_drv_ctx *drv_ctx;
+ struct otx2_cpri_ndev_priv *priv;
+ struct net_device *netdev;
+ int idx;
+
+ for (idx = 0; idx < OTX2_BPHY_CPRI_MAX_INTF; idx++) {
+ drv_ctx = &cpri_drv_ctx[idx];
+ if (drv_ctx->cpri_num == cpri_num && drv_ctx->valid) {
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ priv->if_type = IF_TYPE_CPRI;
+ }
+ }
+}
+
+void otx2_bphy_cpri_cleanup(void)
+{
+ struct otx2_cpri_drv_ctx *drv_ctx = NULL;
+ struct otx2_cpri_ndev_priv *priv;
+ struct net_device *netdev;
+ int i;
+
+ for (i = 0; i < OTX2_BPHY_CPRI_MAX_INTF; i++) {
+ drv_ctx = &cpri_drv_ctx[i];
+ if (drv_ctx->valid) {
+ otx2_cpri_debugfs_remove(drv_ctx);
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ unregister_netdev(netdev);
+ netif_napi_del(&priv->napi);
+ --(priv->cpri_common->refcnt);
+ if (priv->cpri_common->refcnt == 0)
+ kfree(priv->cpri_common);
+ free_netdev(netdev);
+ drv_ctx->valid = 0;
+ }
+ }
+
+ /* Disable CPRI ETH UL INT */
+ for (i = 0; i < OTX2_BPHY_CPRI_MAX_MHAB; i++)
+ writeq(0x1, cpri_reg_base +
+ CPRIX_ETH_UL_INT_ENA_W1C(i));
+}
+
+static int otx2_cpri_process_rx_pkts(struct otx2_cpri_ndev_priv *priv,
+ int budget)
+{
+ int count, head, processed_pkts = 0;
+ struct otx2_cpri_ndev_priv *priv2;
+ struct cpri_pkt_ul_wqe_hdr *wqe;
+ struct ul_cbuf_cfg *ul_cfg;
+ struct net_device *netdev;
+ u16 nxt_wr_ptr, len;
+ struct sk_buff *skb;
+ u8 *pkt_buf;
+
+ ul_cfg = &priv->cpri_common->ul_cfg;
+
+ nxt_wr_ptr = readq(priv->cpri_reg_base +
+ CPRIX_RXD_GMII_UL_NXT_WR_PTR(priv->cpri_num)) &
+ 0xFFFF;
+ /* get the HW head */
+ head = CIRC_BUF_ENTRY(nxt_wr_ptr);
+
+ if (ul_cfg->sw_rd_ptr > head) {
+ count = ul_cfg->num_entries - ul_cfg->sw_rd_ptr;
+ count += head;
+ } else {
+ count = head - ul_cfg->sw_rd_ptr;
+ }
+
+ while (likely((processed_pkts < budget) && (processed_pkts < count))) {
+ pkt_buf = (u8 *)ul_cfg->cbuf_virt_addr +
+ (OTX2_BPHY_CPRI_PKT_BUF_SIZE * ul_cfg->sw_rd_ptr);
+ wqe = (struct cpri_pkt_ul_wqe_hdr *)pkt_buf;
+ netdev = otx2_cpri_get_netdev(wqe->mhab_id, wqe->lane_id);
+ if (unlikely(!netdev)) {
+ net_err_ratelimited("CPRI Rx netdev not found, cpri%d lmac%d\n",
+ wqe->mhab_id, wqe->lane_id);
+ priv->stats.rx_dropped++;
+ priv->last_rx_dropped_jiffies = jiffies;
+ goto update_processed_pkts;
+ }
+ priv2 = netdev_priv(netdev);
+ if (wqe->fcserr || wqe->rsp_ferr || wqe->rsp_nferr) {
+ net_err_ratelimited("%s: CPRI Rx err,cpri%d lmac%d sw_rd_ptr=%d\n",
+ netdev->name,
+ wqe->mhab_id, wqe->lane_id,
+ ul_cfg->sw_rd_ptr);
+ priv2->stats.rx_dropped++;
+ priv2->last_rx_dropped_jiffies = jiffies;
+ goto update_processed_pkts;
+ }
+ if (unlikely(!netif_carrier_ok(netdev))) {
+ net_err_ratelimited("%s {cpri%d lmac%d} link down, drop pkt\n",
+ netdev->name, priv2->cpri_num,
+ priv2->lmac_id);
+ priv2->stats.rx_dropped++;
+ priv2->last_rx_dropped_jiffies = jiffies;
+ goto update_processed_pkts;
+ }
+
+ len = wqe->pkt_length;
+
+ if (unlikely(netif_msg_pktdata(priv2))) {
+ netdev_printk(KERN_DEBUG, priv2->netdev, "RX DATA:");
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16,
+ 4, pkt_buf,
+ len + OTX2_BPHY_CPRI_WQE_SIZE, true);
+ }
+
+ pkt_buf += OTX2_BPHY_CPRI_WQE_SIZE;
+
+ skb = netdev_alloc_skb_ip_align(netdev, len);
+ if (!skb) {
+ net_err_ratelimited("%s:CPRI Rx: alloc skb failed\n",
+ netdev->name);
+ priv->stats.rx_dropped++;
+ priv->last_rx_dropped_jiffies = jiffies;
+ goto update_processed_pkts;
+ }
+
+ memcpy(skb->data, pkt_buf, len);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ netif_receive_skb(skb);
+
+ priv2->last_rx_jiffies = jiffies;
+
+update_processed_pkts:
+ processed_pkts++;
+ ul_cfg->sw_rd_ptr++;
+ if (ul_cfg->sw_rd_ptr == ul_cfg->num_entries)
+ ul_cfg->sw_rd_ptr = 0;
+
+ }
+
+ if (processed_pkts)
+ writeq(processed_pkts, priv->cpri_reg_base +
+ CPRIX_RXD_GMII_UL_RD_DOORBELL(priv->cpri_num));
+
+ return processed_pkts;
+}
+
+/* napi poll routine */
+static int otx2_cpri_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ struct otx2_cpri_ndev_priv *priv;
+ u64 intr_en, regval;
+ int workdone = 0;
+
+ priv = container_of(napi, struct otx2_cpri_ndev_priv, napi);
+ cdev_priv = priv->cdev_priv;
+
+ /* pkt processing loop */
+ workdone += otx2_cpri_process_rx_pkts(priv, budget);
+
+ if (workdone < budget) {
+ napi_complete_done(napi, workdone);
+
+ /* Re enable the Rx interrupts */
+ intr_en = 1 << CPRI_RX_INTR_SHIFT(priv->cpri_num);
+ spin_lock(&cdev_priv->lock);
+ regval = readq(priv->bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+ regval |= intr_en;
+ writeq(regval, priv->bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+ spin_unlock(&cdev_priv->lock);
+ }
+
+ return workdone;
+}
+
+void otx2_cpri_rx_napi_schedule(int cpri_num, u32 status)
+{
+ struct otx2_cpri_drv_ctx *drv_ctx;
+ struct otx2_cpri_ndev_priv *priv;
+ u64 regval;
+ int idx;
+
+ for (idx = 0; idx < OTX2_BPHY_CPRI_MAX_INTF; idx++) {
+ drv_ctx = &cpri_drv_ctx[idx];
+ /* ignore lmac, one UL interrupt/cpri */
+ if (!(drv_ctx->valid && drv_ctx->cpri_num == cpri_num))
+ continue;
+ /* check if i/f down, napi disabled */
+ priv = netdev_priv(drv_ctx->netdev);
+ if (test_bit(CPRI_INTF_DOWN, &priv->state))
+ continue;
+ /* clear intr enable bit, re-enable in napi handler */
+ regval = 1 << CPRI_RX_INTR_SHIFT(cpri_num);
+ writeq(regval, priv->bphy_reg_base + PSM_INT_GP_ENA_W1C(1));
+ /* schedule napi */
+ napi_schedule(&priv->napi);
+ /* napi scheduled per MHAB, return */
+ return;
+ }
+}
+
+void otx2_cpri_update_stats(struct otx2_cpri_ndev_priv *priv)
+{
+ struct otx2_cpri_stats *dev_stats = &priv->stats;
+
+ dev_stats->rx_frames += readq(priv->cpri_reg_base +
+ CPRIX_ETH_UL_GPKTS_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->rx_octets += readq(priv->cpri_reg_base +
+ CPRIX_ETH_UL_GOCT_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->rx_err += readq(priv->cpri_reg_base +
+ CPRIX_ETH_UL_ERR_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->bad_crc += readq(priv->cpri_reg_base +
+ CPRIX_ETH_BAD_CRC_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->oversize += readq(priv->cpri_reg_base +
+ CPRIX_ETH_UL_OSIZE_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->undersize += readq(priv->cpri_reg_base +
+ CPRIX_ETH_UL_USIZE_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->fifo_ovr += readq(priv->cpri_reg_base +
+ CPRIX_ETH_UL_FIFO_ORUN_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->tx_frames += readq(priv->cpri_reg_base +
+ CPRIX_ETH_DL_GPKTS_CNT(priv->cpri_num,
+ priv->lmac_id));
+ dev_stats->tx_octets += readq(priv->cpri_reg_base +
+ CPRIX_ETH_DL_GOCTETS_CNT(priv->cpri_num,
+ priv->lmac_id));
+}
+
+static void otx2_cpri_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+ struct otx2_cpri_stats *dev_stats = &priv->stats;
+
+ otx2_cpri_update_stats(priv);
+
+ stats->rx_bytes = dev_stats->rx_octets;
+ stats->rx_packets = dev_stats->rx_frames;
+ stats->rx_dropped = dev_stats->rx_dropped;
+ stats->rx_errors = dev_stats->rx_err;
+ stats->rx_crc_errors = dev_stats->bad_crc;
+ stats->rx_fifo_errors = dev_stats->fifo_ovr;
+ stats->rx_length_errors = dev_stats->oversize + dev_stats->undersize;
+
+ stats->tx_bytes = dev_stats->tx_octets;
+ stats->tx_packets = dev_stats->tx_frames;
+}
+
+/* netdev ioctl */
+static int otx2_cpri_ioctl(struct net_device *netdev, struct ifreq *req,
+ int cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+/* netdev xmit */
+static netdev_tx_t otx2_cpri_eth_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+ struct cpri_pkt_dl_wqe_hdr *wqe;
+ struct dl_cbuf_cfg *dl_cfg;
+ unsigned long flags;
+ u8 *buf_ptr;
+ int tail, count;
+ u16 nxt_rd_ptr;
+
+ dl_cfg = &priv->cpri_common->dl_cfg;
+
+ spin_lock_irqsave(&dl_cfg->lock, flags);
+
+ if (unlikely(priv->if_type != IF_TYPE_CPRI)) {
+ netif_err(priv, tx_queued, netdev,
+ "%s {cpri%d lmac%d} invalid intf mode, drop pkt\n",
+ netdev->name, priv->cpri_num, priv->lmac_id);
+ /* update stats */
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ goto exit;
+ }
+
+ if (unlikely(!netif_carrier_ok(netdev))) {
+ /* update stats */
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ goto exit;
+ }
+
+ /* Read CPRI(0..2)_TXD_GMII_DL_WR_DOORBELL to become 0 */
+ while ((readq(priv->cpri_reg_base +
+ CPRIX_TXD_GMII_DL_WR_DOORBELL(priv->cpri_num)) & 0xFF))
+ cpu_relax();
+
+ nxt_rd_ptr = readq(priv->cpri_reg_base +
+ CPRIX_TXD_GMII_DL_NXT_RD_PTR(priv->cpri_num)) &
+ 0xFFFF;
+ /* get the HW tail */
+ tail = CIRC_BUF_ENTRY(nxt_rd_ptr);
+ if (dl_cfg->sw_wr_ptr >= tail)
+ count = dl_cfg->num_entries - dl_cfg->sw_wr_ptr + tail;
+ else
+ count = tail - dl_cfg->sw_wr_ptr;
+
+ if (count == 0) {
+ spin_unlock_irqrestore(&dl_cfg->lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(netif_msg_pktdata(priv))) {
+ netdev_printk(KERN_DEBUG, priv->netdev, "Tx: skb %pS len=%d\n",
+ skb, skb->len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
+ skb->data, skb->len, true);
+ }
+
+ buf_ptr = (u8 *)dl_cfg->cbuf_virt_addr +
+ (OTX2_BPHY_CPRI_PKT_BUF_SIZE * dl_cfg->sw_wr_ptr);
+ wqe = (struct cpri_pkt_dl_wqe_hdr *)buf_ptr;
+ wqe->mhab_id = priv->cpri_num;
+ wqe->lane_id = priv->lmac_id;
+ buf_ptr += OTX2_BPHY_CPRI_WQE_SIZE;
+ /* zero pad for short pkts, since there is no HW support */
+ if (skb->len < 64)
+ memset(buf_ptr, 0, 64);
+ memcpy(buf_ptr, skb->data, skb->len);
+ wqe->pkt_length = skb->len > 64 ? skb->len : 64;
+
+ /* ensure the memory is updated before ringing doorbell */
+ dma_wmb();
+ writeq(1, priv->cpri_reg_base +
+ CPRIX_TXD_GMII_DL_WR_DOORBELL(priv->cpri_num));
+
+ /* increment queue index */
+ dl_cfg->sw_wr_ptr++;
+ if (dl_cfg->sw_wr_ptr == dl_cfg->num_entries)
+ dl_cfg->sw_wr_ptr = 0;
+
+ priv->last_tx_jiffies = jiffies;
+exit:
+ dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&dl_cfg->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/* netdev open */
+static int otx2_cpri_eth_open(struct net_device *netdev)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+
+ napi_enable(&priv->napi);
+
+ spin_lock(&priv->lock);
+ clear_bit(CPRI_INTF_DOWN, &priv->state);
+ if (priv->link_state == LINK_STATE_UP) {
+ netif_carrier_on(netdev);
+ netif_start_queue(netdev);
+ }
+ spin_unlock(&priv->lock);
+
+ return 0;
+}
+
+/* netdev close */
+static int otx2_cpri_eth_stop(struct net_device *netdev)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+
+ spin_lock(&priv->lock);
+ set_bit(CPRI_INTF_DOWN, &priv->state);
+
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+ spin_unlock(&priv->lock);
+
+ napi_disable(&priv->napi);
+
+ return 0;
+}
+
+static const struct net_device_ops otx2_cpri_netdev_ops = {
+ .ndo_open = otx2_cpri_eth_open,
+ .ndo_stop = otx2_cpri_eth_stop,
+ .ndo_start_xmit = otx2_cpri_eth_start_xmit,
+ .ndo_do_ioctl = otx2_cpri_ioctl,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = otx2_cpri_get_stats64,
+};
+
+static void otx2_cpri_dump_ul_cbuf(struct otx2_cpri_ndev_priv *priv)
+{
+ struct ul_cbuf_cfg *ul_cfg = &priv->cpri_common->ul_cfg;
+
+ pr_debug("%s: num_entries=%d iova=0x%llx\n",
+ __func__, ul_cfg->num_entries, ul_cfg->cbuf_iova_addr);
+}
+
+static void otx2_cpri_dump_dl_cbuf(struct otx2_cpri_ndev_priv *priv)
+{
+ struct dl_cbuf_cfg *dl_cfg = &priv->cpri_common->dl_cfg;
+
+ pr_debug("%s: num_entries=%d iova=0x%llx\n",
+ __func__, dl_cfg->num_entries, dl_cfg->cbuf_iova_addr);
+}
+
+static void otx2_cpri_fill_dl_ul_cfg(struct otx2_cpri_ndev_priv *priv,
+ struct bphy_netdev_cpri_if *cpri_cfg)
+{
+ struct dl_cbuf_cfg *dl_cfg;
+ struct ul_cbuf_cfg *ul_cfg;
+ u64 iova;
+
+ dl_cfg = &priv->cpri_common->dl_cfg;
+ dl_cfg->num_entries = cpri_cfg->num_dl_buf;
+ iova = cpri_cfg->dl_buf_iova_addr;
+ dl_cfg->cbuf_iova_addr = iova;
+ dl_cfg->cbuf_virt_addr = otx2_iova_to_virt(priv->iommu_domain, iova);
+ dl_cfg->sw_wr_ptr = 0;
+ spin_lock_init(&dl_cfg->lock);
+ otx2_cpri_dump_dl_cbuf(priv);
+
+ ul_cfg = &priv->cpri_common->ul_cfg;
+ ul_cfg->num_entries = cpri_cfg->num_ul_buf;
+ iova = cpri_cfg->ul_buf_iova_addr;
+ ul_cfg->cbuf_iova_addr = iova;
+ ul_cfg->cbuf_virt_addr = otx2_iova_to_virt(priv->iommu_domain, iova);
+ ul_cfg->sw_rd_ptr = 0;
+ spin_lock_init(&ul_cfg->lock);
+ otx2_cpri_dump_ul_cbuf(priv);
+}
+
+int otx2_cpri_parse_and_init_intf(struct otx2_bphy_cdev_priv *cdev,
+ struct bphy_netdev_comm_intf_cfg *cfg)
+{
+ struct otx2_cpri_drv_ctx *drv_ctx = NULL;
+ struct otx2_cpri_ndev_priv *priv, *priv2;
+ struct bphy_netdev_cpri_if *cpri_cfg;
+ int i, intf_idx = 0, lmac, ret;
+ struct net_device *netdev;
+
+ for (i = 0; i < OTX2_BPHY_CPRI_MAX_MHAB; i++) {
+ priv2 = NULL;
+ cpri_cfg = &cfg[i].cpri_if_cfg;
+ for (lmac = 0; lmac < OTX2_BPHY_CPRI_MAX_LMAC; lmac++) {
+ if (!(cpri_cfg->active_lane_mask & (1 << lmac)))
+ continue;
+ netdev =
+ alloc_etherdev(sizeof(struct otx2_cpri_ndev_priv));
+ if (!netdev) {
+ dev_err(cdev->dev,
+ "error allocating net device\n");
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+ priv = netdev_priv(netdev);
+ memset(priv, 0, sizeof(*priv));
+ if (!priv2) {
+ priv->cpri_common =
+ kzalloc(sizeof(struct cpri_common_cfg),
+ GFP_KERNEL);
+ if (!priv->cpri_common) {
+ dev_err(cdev->dev, "kzalloc failed\n");
+ free_netdev(netdev);
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+ priv->cpri_common->refcnt = 1;
+ }
+ spin_lock_init(&priv->lock);
+ priv->netdev = netdev;
+ priv->cdev_priv = cdev;
+ priv->msg_enable = netif_msg_init(-1, 0);
+ spin_lock_init(&priv->stats.lock);
+ priv->cpri_num = cpri_cfg->id;
+ priv->lmac_id = lmac;
+ priv->if_type = cfg[i].if_type;
+ memcpy(priv->mac_addr, &cpri_cfg->eth_addr[lmac],
+ ETH_ALEN);
+ if (is_valid_ether_addr(priv->mac_addr))
+ ether_addr_copy(netdev->dev_addr,
+ priv->mac_addr);
+ else
+ random_ether_addr(netdev->dev_addr);
+ priv->pdev = pci_get_device(OTX2_BPHY_PCI_VENDOR_ID,
+ OTX2_BPHY_PCI_DEVICE_ID,
+ NULL);
+ priv->iommu_domain =
+ iommu_get_domain_for_dev(&priv->pdev->dev);
+ priv->bphy_reg_base = bphy_reg_base;
+ priv->cpri_reg_base = cpri_reg_base;
+
+ if (!priv2) {
+ otx2_cpri_fill_dl_ul_cfg(priv, cpri_cfg);
+ } else {
+ /* share cpri_common data */
+ priv->cpri_common = priv2->cpri_common;
+ ++(priv->cpri_common->refcnt);
+ }
+
+ netif_napi_add(priv->netdev, &priv->napi,
+ otx2_cpri_napi_poll, NAPI_POLL_WEIGHT);
+
+ /* keep last (cpri + lmac) priv structure */
+ if (!priv2)
+ priv2 = priv;
+
+ intf_idx = (i * 4) + lmac;
+ snprintf(netdev->name, sizeof(netdev->name),
+ "cpri%d", intf_idx);
+ netdev->netdev_ops = &otx2_cpri_netdev_ops;
+ otx2_cpri_set_ethtool_ops(netdev);
+ netdev->mtu = 1500U;
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = 1500U;
+ ret = register_netdev(netdev);
+ if (ret < 0) {
+ dev_err(cdev->dev,
+ "failed to register net device %s\n",
+ netdev->name);
+ free_netdev(netdev);
+ ret = -ENODEV;
+ goto err_exit;
+ }
+ dev_dbg(cdev->dev, "net device %s registered\n",
+ netdev->name);
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ set_bit(CPRI_INTF_DOWN, &priv->state);
+ priv->link_state = LINK_STATE_UP;
+
+ /* initialize global ctx */
+ drv_ctx = &cpri_drv_ctx[intf_idx];
+ drv_ctx->cpri_num = priv->cpri_num;
+ drv_ctx->lmac_id = priv->lmac_id;
+ drv_ctx->valid = 1;
+ drv_ctx->netdev = netdev;
+
+ /* create debugfs entry */
+ otx2_cpri_debugfs_create(drv_ctx);
+ }
+ }
+
+ return 0;
+
+err_exit:
+ for (i = 0; i < OTX2_BPHY_CPRI_MAX_INTF; i++) {
+ drv_ctx = &cpri_drv_ctx[i];
+ if (drv_ctx->valid) {
+ otx2_cpri_debugfs_remove(drv_ctx);
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ unregister_netdev(netdev);
+ netif_napi_del(&priv->napi);
+ --(priv->cpri_common->refcnt);
+ if (priv->cpri_common->refcnt == 0)
+ kfree(priv->cpri_common);
+ free_netdev(netdev);
+ drv_ctx->valid = 0;
+ }
+ }
+ return ret;
+}
+
+static void otx2_cpri_debugfs_reader(char *buffer, size_t count, void *priv)
+{
+ struct otx2_cpri_drv_ctx *ctx;
+ struct otx2_cpri_ndev_priv *netdev;
+ u8 queue_stopped, state_up;
+ const char *formatter;
+
+ ctx = priv;
+ netdev = netdev_priv(ctx->netdev);
+ queue_stopped = netif_queue_stopped(ctx->netdev);
+ state_up = netdev->link_state;
+ formatter = otx2_cpri_debugfs_get_formatter();
+
+ snprintf(buffer, count, formatter,
+ queue_stopped,
+ state_up,
+ netdev->last_tx_jiffies,
+ netdev->last_tx_dropped_jiffies,
+ netdev->last_rx_jiffies,
+ netdev->last_rx_dropped_jiffies,
+ jiffies);
+}
+
+static const char *otx2_cpri_debugfs_get_formatter(void)
+{
+ static const char *buffer_format = "queue-stopped: %u\n"
+ "state-up: %u\n"
+ "last-tx-jiffies: %lu\n"
+ "last-tx-dropped-jiffies: %lu\n"
+ "last-rx-jiffies: %lu\n"
+ "last-rx-dropped-jiffies: %lu\n"
+ "current-jiffies: %lu\n";
+
+ return buffer_format;
+}
+
+static size_t otx2_cpri_debugfs_get_buffer_size(void)
+{
+ static size_t buffer_size;
+
+ if (!buffer_size) {
+ const char *formatter = otx2_cpri_debugfs_get_formatter();
+ u8 max_boolean = 1;
+ unsigned long max_jiffies = (unsigned long)-1;
+
+ buffer_size = snprintf(NULL, 0, formatter,
+ max_boolean,
+ max_boolean,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies);
+ ++buffer_size;
+ }
+
+ return buffer_size;
+}
+
+static void otx2_cpri_debugfs_create(struct otx2_cpri_drv_ctx *ctx)
+{
+ size_t buffer_size = otx2_cpri_debugfs_get_buffer_size();
+
+ ctx->debugfs = otx2_bphy_debugfs_add_file(ctx->netdev->name,
+ buffer_size, ctx,
+ otx2_cpri_debugfs_reader);
+}
+
+static void otx2_cpri_debugfs_remove(struct otx2_cpri_drv_ctx *ctx)
+{
+ if (ctx->debugfs)
+ otx2_bphy_debugfs_remove_file(ctx->debugfs);
+}
+
+void otx2_cpri_set_link_state(struct net_device *netdev, u8 state)
+{
+ struct otx2_cpri_ndev_priv *priv;
+
+ priv = netdev_priv(netdev);
+
+ spin_lock(&priv->lock);
+ if (priv->link_state != state) {
+ priv->link_state = state;
+ if (state == LINK_STATE_DOWN) {
+ netdev_info(netdev, "Link DOWN\n");
+ if (netif_running(netdev)) {
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ } else {
+ netdev_info(netdev, "Link UP\n");
+ if (netif_running(netdev)) {
+ netif_carrier_on(netdev);
+ netif_start_queue(netdev);
+ }
+ }
+ }
+ spin_unlock(&priv->lock);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.h b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.h
new file mode 100644
index 000000000000..e8b88384cd3d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _OTX2_CPRI_H_
+#define _OTX2_CPRI_H_
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/net_tstamp.h>
+
+#include "otx2_bphy.h"
+#include "otx2_bphy_hw.h"
+#include "rfoe_bphy_netdev_comm_if.h"
+
+#define OTX2_BPHY_CPRI_MAX_MHAB 3
+#define OTX2_BPHY_CPRI_MAX_LMAC 4
+#define OTX2_BPHY_CPRI_MAX_INTF 10
+
+#define OTX2_BPHY_CPRI_PKT_BUF_SIZE 1664 /* wqe 128 bytes + 1536 bytes */
+#define OTX2_BPHY_CPRI_WQE_SIZE 128
+
+#define CPRI_RX_INTR_MASK(a) ((1UL << (a)) << 13)
+#define CPRI_RX_INTR_SHIFT(a) (13 + (a))
+
+/* Each entry increments by cnt 0x68, 1 unit = 16 bytes */
+#define CIRC_BUF_ENTRY(a) ((a) / 0x68)
+
+enum cpri_state {
+ CPRI_INTF_DOWN = 1,
+};
+
+/* CPRI support */
+struct otx2_cpri_drv_ctx {
+ u8 cpri_num;
+ u8 lmac_id;
+ int valid;
+ void *debugfs;
+ struct net_device *netdev;
+};
+
+extern struct otx2_cpri_drv_ctx cpri_drv_ctx[OTX2_BPHY_CPRI_MAX_INTF];
+
+struct otx2_cpri_stats {
+ /* Rx */
+ u64 rx_frames;
+ u64 rx_octets;
+ u64 rx_err;
+ u64 bad_crc;
+ u64 oversize;
+ u64 undersize;
+ u64 fifo_ovr;
+ u64 rx_dropped;
+ /* Tx */
+ u64 tx_frames;
+ u64 tx_octets;
+ u64 tx_dropped;
+ /* stats lock */
+ spinlock_t lock;
+};
+
+/* cpri dl cbuf cfg */
+struct dl_cbuf_cfg {
+ int num_entries;
+ u64 cbuf_iova_addr;
+ void __iomem *cbuf_virt_addr;
+ /* sw */
+ u64 sw_wr_ptr;
+ /* dl lock */
+ spinlock_t lock;
+};
+
+/* cpri ul cbuf cfg */
+struct ul_cbuf_cfg {
+ int num_entries;
+ u64 cbuf_iova_addr;
+ void __iomem *cbuf_virt_addr;
+ /* sw */
+ int sw_rd_ptr;
+ /* ul lock */
+ spinlock_t lock;
+};
+
+struct cpri_common_cfg {
+ struct dl_cbuf_cfg dl_cfg;
+ struct ul_cbuf_cfg ul_cfg;
+ u8 refcnt;
+};
+
+struct otx2_cpri_link_event {
+ u8 cpri_num;
+ u8 lmac_id;
+ u8 link_state;
+};
+
+/* cpri netdev priv */
+struct otx2_cpri_ndev_priv {
+ u8 cpri_num;
+ u8 lmac_id;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ u32 msg_enable;
+ void __iomem *bphy_reg_base;
+ void __iomem *cpri_reg_base;
+ struct iommu_domain *iommu_domain;
+ struct cpri_common_cfg *cpri_common;
+ struct napi_struct napi;
+ unsigned long state;
+ struct otx2_cpri_stats stats;
+ u8 mac_addr[ETH_ALEN];
+ /* priv lock */
+ spinlock_t lock;
+ int if_type;
+ u8 link_state;
+ unsigned long last_tx_jiffies;
+ unsigned long last_rx_jiffies;
+ unsigned long last_tx_dropped_jiffies;
+ unsigned long last_rx_dropped_jiffies;
+};
+
+int otx2_cpri_parse_and_init_intf(struct otx2_bphy_cdev_priv *cdev,
+ struct bphy_netdev_comm_intf_cfg *cfg);
+
+void otx2_cpri_rx_napi_schedule(int cpri_num, u32 status);
+
+void otx2_cpri_update_stats(struct otx2_cpri_ndev_priv *priv);
+
+void otx2_bphy_cpri_cleanup(void);
+
+void otx2_cpri_enable_intf(int cpri_num);
+
+/* ethtool */
+void otx2_cpri_set_ethtool_ops(struct net_device *netdev);
+
+/* update carrier state */
+void otx2_cpri_set_link_state(struct net_device *netdev, u8 state);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri_ethtool.c
new file mode 100644
index 000000000000..ae70cfa36043
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_cpri_ethtool.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/net_tstamp.h>
+
+#include "otx2_cpri.h"
+
+static const char ethtool_stat_strings[][ETH_GSTRING_LEN] = {
+ "rx_frames",
+ "rx_octets",
+ "rx_err",
+ "bad_crc",
+ "oversize",
+ "undersize",
+ "rx_fifo_overrun",
+ "rx_dropped",
+ "tx_frames",
+ "tx_octets",
+ "tx_dropped",
+};
+
+static void otx2_cpri_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ memcpy(data, *ethtool_stat_strings,
+ sizeof(ethtool_stat_strings));
+ break;
+ }
+}
+
+static int otx2_cpri_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(ethtool_stat_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void otx2_cpri_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+
+ otx2_cpri_update_stats(priv);
+
+ spin_lock(&priv->stats.lock);
+ memcpy(data, &priv->stats,
+ ARRAY_SIZE(ethtool_stat_strings) * sizeof(u64));
+ spin_unlock(&priv->stats.lock);
+}
+
+static void otx2_cpri_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *p)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+
+ snprintf(p->driver, sizeof(p->driver), "otx2_cpri {cpri%d lmac%d}",
+ priv->cpri_num, priv->lmac_id);
+ strlcpy(p->bus_info, "platform", sizeof(p->bus_info));
+}
+
+static u32 otx2_cpri_get_msglevel(struct net_device *netdev)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+
+ return priv->msg_enable;
+}
+
+static void otx2_cpri_set_msglevel(struct net_device *netdev, u32 level)
+{
+ struct otx2_cpri_ndev_priv *priv = netdev_priv(netdev);
+
+ priv->msg_enable = level;
+}
+
+static const struct ethtool_ops otx2_cpri_ethtool_ops = {
+ .get_drvinfo = otx2_cpri_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = otx2_cpri_get_strings,
+ .get_sset_count = otx2_cpri_get_sset_count,
+ .get_ethtool_stats = otx2_cpri_get_ethtool_stats,
+ .get_msglevel = otx2_cpri_get_msglevel,
+ .set_msglevel = otx2_cpri_set_msglevel,
+};
+
+void otx2_cpri_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &otx2_cpri_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.c b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.c
new file mode 100644
index 000000000000..0bf0d1a50024
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.c
@@ -0,0 +1,1697 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "otx2_rfoe.h"
+#include "otx2_bphy_hw.h"
+#include "otx2_bphy_debugfs.h"
+
+/* Theory of Operation
+ *
+ * I. General
+ *
+ * The BPHY RFOE netdev driver handles packets such as eCPRI control,
+ * PTP and other ethernet packets received from/sent to BPHY RFOE MHAB
+ * in Linux kernel. All other packets such as ROE and eCPRI non-control
+ * are handled by ODP application in user space. The ODP application
+ * initializes the JDT/MBT/PSM-queues to process the Rx/Tx packets in
+ * netdev and shares the information through driver ioctl. The Rx/TX
+ * notification will be sent to netdev using one of the PSM GPINT.
+ *
+ * II. Driver Operation
+ *
+ * This driver register's a character device and provides ioctl for
+ * ODP application to initialize the netdev(s) to process eCPRI and
+ * other Ethernet packets. Each netdev corresponds to a unique RFOE
+ * index and LMAC id. The ODP application initializes the flow tables,
+ * Rx JDT and RX MBT to process Rx packets. There will be a unique
+ * Flow Table, JDT, MBT for processing eCPRI, PTP and other Ethernet
+ * packets separately. The Rx packet memory (DDR) is also allocated
+ * by ODP and configured in MBT. All LMAC's in a single RFOE MHAB share
+ * the Rx configuration tuple {Flow Id, JDT and MBT}. The Rx event is
+ * notified to the netdev via PSM GPINT1. Each PSM GPINT supports 32-bits
+ * and can be used as interrupt status bits. For each Rx packet type
+ * per RFOE, one PSM GPINT bit is reserved to notify the Rx event for
+ * that packet type. The ODP application configures PSM_CMD_GPINT_S
+ * in the JCE section of JD for each packet. There are total 32 JDT
+ * and MBT entries per packet type. These entries will be reused when
+ * the JDT/MBT circular entries wraps around.
+ *
+ * On Tx side, the ODP application creates preconfigured job commands
+ * for the driver use. Each job command contains information such as
+ * PSM cmd (ADDJOB) info, JD iova address. The packet memory is also
+ * allocated by ODP app. The JD rd dma cfg section contains the memory
+ * addr for packet DMA. There are two PSM queues/RFOE reserved for Tx
+ * puropose. One queue handles PTP traffic and other queue is used for
+ * eCPRI and regular Ethernet traffic. The PTP job descriptor's (JD) are
+ * configured to generate Tx completion event through GPINT mechanism.
+ * For each LMAC/RFOE there will be one GPINT bit reserved for this
+ * purpose. For eCPRI and other Ethernet traffic there is no GPINT event
+ * to signal Tx completion to the driver. The driver Tx interrupt handler
+ * reads RFOE(0..2)_TX_PTP_TSTMP_W0 and RFOE(0..2)_TX_PTP_TSTMP_W1
+ * registers for PTP timestamp and fills the time stamp in PTP skb. The
+ * number of preconfigured job commands are 64 for non-ptp shared by all
+ * LMAC's in RFOE and 4 for PTP per each LMAC in RFOE. The PTP job cmds
+ * are not shared because the timestamp registers are unique per LMAC.
+ *
+ * III. Transmit
+ *
+ * The driver xmit routine selects the PSM queue based on whether the
+ * packet needs to be timestamped in HW by checking SKBTX_HW_TSTAMP flag.
+ * In case of PTP packet, if there is pending PTP packet in progress then
+ * the drivers adds this skb to a list and returns success. This list
+ * is processed after the previous PTP packet is sent and timestamp is
+ * copied to the skb successfully in the Tx interrupt handler.
+ *
+ * Once the PSM queue is selected, the driver checks whether there is
+ * enough space in that PSM queue by reading PSM_QUEUE(0..127)_SPACE
+ * reister. If the PSM queue is not full, then the driver get's the
+ * corresponding job entries associated with that queue and updates the
+ * length in JD DMA cfg word0 and copied the packet data to JD DMA
+ * cfg word1. For eCPRI/non-PTP packets, the driver also updates JD CFG
+ * RFOE_MODE.
+ *
+ * IV. Receive
+ *
+ * The driver receives an interrupt per pkt_type and invokes NAPI handler.
+ * The NAPI handler reads the corresponding MBT cfg (nxt_buf) to see the
+ * number of packets to be processed. For each successful mbt_entry, the
+ * packet handler get's corresponding mbt entry buffer address and based
+ * on packet type, the PSW0/ECPRI_PSW0 is read to get the JD iova addr
+ * corresponding to that MBT entry. The DMA block size is read from the
+ * JDT entry to know the number of bytes DMA'd including PSW bytes. The
+ * MBT entry buffer address is moved by pkt_offset bytes and length is
+ * decremented by pkt_offset to get actual pkt data and length. For each
+ * pkt, skb is allocated and packet data is copied to skb->data. In case
+ * of PTP packets, the PSW1 contains the PTP timestamp value and will be
+ * copied to the skb.
+ *
+ * V. Miscellaneous
+ *
+ * Ethtool:
+ * The ethtool stats shows packet stats for each packet type.
+ *
+ */
+
+/* global driver ctx */
+struct otx2_rfoe_drv_ctx rfoe_drv_ctx[RFOE_MAX_INTF];
+
+/* debugfs */
+static void otx2_rfoe_debugfs_reader(char *buffer, size_t count, void *priv);
+static const char *otx2_rfoe_debugfs_get_formatter(void);
+static size_t otx2_rfoe_debugfs_get_buffer_size(void);
+static void otx2_rfoe_debugfs_create(struct otx2_rfoe_drv_ctx *ctx);
+static void otx2_rfoe_debugfs_remove(struct otx2_rfoe_drv_ctx *ctx);
+
+void otx2_rfoe_disable_intf(int rfoe_num)
+{
+ struct otx2_rfoe_drv_ctx *drv_ctx;
+ struct otx2_rfoe_ndev_priv *priv;
+ struct net_device *netdev;
+ int idx;
+
+ for (idx = 0; idx < RFOE_MAX_INTF; idx++) {
+ drv_ctx = &rfoe_drv_ctx[idx];
+ if (drv_ctx->rfoe_num == rfoe_num && drv_ctx->valid) {
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ priv->if_type = IF_TYPE_NONE;
+ }
+ }
+}
+
+void otx2_bphy_rfoe_cleanup(void)
+{
+ struct otx2_rfoe_drv_ctx *drv_ctx = NULL;
+ struct otx2_rfoe_ndev_priv *priv;
+ struct net_device *netdev;
+ struct rx_ft_cfg *ft_cfg;
+ int i, idx;
+
+ for (i = 0; i < RFOE_MAX_INTF; i++) {
+ drv_ctx = &rfoe_drv_ctx[i];
+ if (drv_ctx->valid) {
+ otx2_rfoe_debugfs_remove(drv_ctx);
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ --(priv->ptp_cfg->refcnt);
+ if (!priv->ptp_cfg->refcnt) {
+ del_timer_sync(&priv->ptp_cfg->ptp_timer);
+ kfree(priv->ptp_cfg);
+ }
+ otx2_rfoe_ptp_destroy(priv);
+ unregister_netdev(netdev);
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ netif_napi_del(&ft_cfg->napi);
+ }
+ --(priv->rfoe_common->refcnt);
+ if (priv->rfoe_common->refcnt == 0)
+ kfree(priv->rfoe_common);
+ free_netdev(netdev);
+ drv_ctx->valid = 0;
+ }
+ }
+}
+
+void otx2_rfoe_calc_ptp_ts(struct otx2_rfoe_ndev_priv *priv, u64 *ts)
+{
+ u64 ptp_diff_nsec, ptp_diff_psec;
+ struct ptp_bcn_off_cfg *ptp_cfg;
+ struct ptp_clk_cfg *clk_cfg;
+ struct ptp_bcn_ref *ref;
+ unsigned long flags;
+ u64 timestamp = *ts;
+
+ ptp_cfg = priv->ptp_cfg;
+ if (!ptp_cfg->use_ptp_alg)
+ return;
+ clk_cfg = &ptp_cfg->clk_cfg;
+
+ spin_lock_irqsave(&ptp_cfg->lock, flags);
+
+ if (likely(timestamp > ptp_cfg->new_ref.ptp0_ns))
+ ref = &ptp_cfg->new_ref;
+ else
+ ref = &ptp_cfg->old_ref;
+
+ /* calculate ptp timestamp diff in pico sec */
+ ptp_diff_psec = ((timestamp - ref->ptp0_ns) * PICO_SEC_PER_NSEC *
+ clk_cfg->clk_freq_div) / clk_cfg->clk_freq_ghz;
+ ptp_diff_nsec = (ptp_diff_psec + ref->bcn0_n2_ps + 500) /
+ PICO_SEC_PER_NSEC;
+ timestamp = ref->bcn0_n1_ns - priv->sec_bcn_offset + ptp_diff_nsec;
+
+ spin_unlock_irqrestore(&ptp_cfg->lock, flags);
+
+ *ts = timestamp;
+}
+
+static void otx2_rfoe_ptp_offset_timer(struct timer_list *t)
+{
+ struct ptp_bcn_off_cfg *ptp_cfg = from_timer(ptp_cfg, t, ptp_timer);
+ u64 mio_ptp_ts, ptp_ts_diff, ptp_diff_nsec, ptp_diff_psec;
+ struct ptp_clk_cfg *clk_cfg = &ptp_cfg->clk_cfg;
+ unsigned long expires, flags;
+
+ spin_lock_irqsave(&ptp_cfg->lock, flags);
+
+ memcpy(&ptp_cfg->old_ref, &ptp_cfg->new_ref,
+ sizeof(struct ptp_bcn_ref));
+
+ mio_ptp_ts = readq(ptp_reg_base + MIO_PTP_CLOCK_HI);
+ ptp_ts_diff = mio_ptp_ts - ptp_cfg->new_ref.ptp0_ns;
+ ptp_diff_psec = (ptp_ts_diff * PICO_SEC_PER_NSEC *
+ clk_cfg->clk_freq_div) / clk_cfg->clk_freq_ghz;
+ ptp_diff_nsec = ptp_diff_psec / PICO_SEC_PER_NSEC;
+ ptp_cfg->new_ref.ptp0_ns += ptp_ts_diff;
+ ptp_cfg->new_ref.bcn0_n1_ns += ptp_diff_nsec;
+ ptp_cfg->new_ref.bcn0_n2_ps += ptp_diff_psec -
+ (ptp_diff_nsec * PICO_SEC_PER_NSEC);
+
+ spin_unlock_irqrestore(&ptp_cfg->lock, flags);
+
+ expires = jiffies + PTP_OFF_RESAMPLE_THRESH * HZ;
+ mod_timer(&ptp_cfg->ptp_timer, expires);
+}
+
+/* submit pending ptp tx requests */
+static void otx2_rfoe_ptp_submit_work(struct work_struct *work)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(work,
+ struct otx2_rfoe_ndev_priv,
+ ptp_queue_work);
+ struct mhbw_jd_dma_cfg_word_0_s *jd_dma_cfg_word_0;
+ struct mhbw_jd_dma_cfg_word_1_s *jd_dma_cfg_word_1;
+ struct mhab_job_desc_cfg *jd_cfg_ptr;
+ struct psm_cmd_addjob_s *psm_cmd_lo;
+ struct tx_job_queue_cfg *job_cfg;
+ struct tx_job_entry *job_entry;
+ struct ptp_tstamp_skb *ts_skb;
+ u16 psm_queue_id, queue_space;
+ struct sk_buff *skb = NULL;
+ struct list_head *head;
+ u64 jd_cfg_ptr_iova;
+ unsigned long flags;
+ u64 regval;
+
+ job_cfg = &priv->tx_ptp_job_cfg;
+
+ spin_lock_irqsave(&job_cfg->lock, flags);
+
+ /* check pending ptp requests */
+ if (list_empty(&priv->ptp_skb_list.list)) {
+ netif_dbg(priv, tx_queued, priv->netdev, "no pending ptp tx requests\n");
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ return;
+ }
+
+ /* check psm queue space available */
+ psm_queue_id = job_cfg->psm_queue_id;
+ regval = readq(priv->psm_reg_base + PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space < 1) {
+ netif_dbg(priv, tx_queued, priv->netdev, "ptp tx psm queue %d full\n",
+ psm_queue_id);
+ /* reschedule to check later */
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ schedule_work(&priv->ptp_queue_work);
+ return;
+ }
+
+ if (test_and_set_bit_lock(PTP_TX_IN_PROGRESS, &priv->state)) {
+ netif_dbg(priv, tx_queued, priv->netdev, "ptp tx ongoing\n");
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ return;
+ }
+
+ head = &priv->ptp_skb_list.list;
+ ts_skb = list_entry(head->next, struct ptp_tstamp_skb, list);
+ skb = ts_skb->skb;
+ list_del(&ts_skb->list);
+ kfree(ts_skb);
+ priv->ptp_skb_list.count--;
+
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "submitting ptp tx skb %pS\n", skb);
+
+ priv->last_tx_ptp_jiffies = jiffies;
+
+ /* get the tx job entry */
+ job_entry = (struct tx_job_entry *)
+ &job_cfg->job_entries[job_cfg->q_idx];
+
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "rfoe=%d lmac=%d psm_queue=%d tx_job_entry %d job_cmd_lo=0x%llx job_cmd_high=0x%llx jd_iova_addr=0x%llx\n",
+ priv->rfoe_num, priv->lmac_id, psm_queue_id, job_cfg->q_idx,
+ job_entry->job_cmd_lo, job_entry->job_cmd_hi,
+ job_entry->jd_iova_addr);
+
+ priv->ptp_tx_skb = skb;
+ psm_cmd_lo = (struct psm_cmd_addjob_s *)&job_entry->job_cmd_lo;
+ priv->ptp_job_tag = psm_cmd_lo->jobtag;
+
+ /* update length and block size in jd dma cfg word */
+ jd_cfg_ptr_iova = *(u64 *)((u8 *)job_entry->jd_ptr + 8);
+ jd_cfg_ptr = otx2_iova_to_virt(priv->iommu_domain, jd_cfg_ptr_iova);
+ jd_cfg_ptr->cfg1.pkt_len = skb->len;
+ jd_dma_cfg_word_0 = (struct mhbw_jd_dma_cfg_word_0_s *)
+ job_entry->rd_dma_ptr;
+ jd_dma_cfg_word_0->block_size = (((skb->len + 15) >> 4) * 4);
+
+ /* copy packet data to rd_dma_ptr start addr */
+ jd_dma_cfg_word_1 = (struct mhbw_jd_dma_cfg_word_1_s *)
+ ((u8 *)job_entry->rd_dma_ptr + 8);
+ memcpy(otx2_iova_to_virt(priv->iommu_domain,
+ jd_dma_cfg_word_1->start_addr),
+ skb->data, skb->len);
+
+ /* make sure that all memory writes are completed */
+ dma_wmb();
+
+ /* submit PSM job */
+ writeq(job_entry->job_cmd_lo,
+ priv->psm_reg_base + PSM_QUEUE_CMD_LO(psm_queue_id));
+ writeq(job_entry->job_cmd_hi,
+ priv->psm_reg_base + PSM_QUEUE_CMD_HI(psm_queue_id));
+
+ /* increment queue index */
+ job_cfg->q_idx++;
+ if (job_cfg->q_idx == job_cfg->num_entries)
+ job_cfg->q_idx = 0;
+
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+}
+
+#define OTX2_RFOE_PTP_TSTMP_POLL_CNT 100
+
+/* ptp interrupt processing bottom half */
+static void otx2_rfoe_ptp_tx_work(struct work_struct *work)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(work,
+ struct otx2_rfoe_ndev_priv,
+ ptp_tx_work);
+ struct skb_shared_hwtstamps ts;
+ u64 timestamp, tstmp_w1;
+ u16 jobid;
+ int cnt;
+
+ if (!priv->ptp_tx_skb) {
+ netif_err(priv, tx_done, priv->netdev,
+ "ptp tx skb not found, something wrong!\n");
+ goto submit_next_req;
+ }
+
+ /* poll for timestamp valid bit to go high */
+ for (cnt = 0; cnt < OTX2_RFOE_PTP_TSTMP_POLL_CNT; cnt++) {
+ /* read RFOE(0..2)_TX_PTP_TSTMP_W1(0..3) */
+ tstmp_w1 = readq(priv->rfoe_reg_base +
+ RFOEX_TX_PTP_TSTMP_W1(priv->rfoe_num,
+ priv->lmac_id));
+ /* check valid bit */
+ if (tstmp_w1 & (1ULL << 63))
+ break;
+ usleep_range(5, 10);
+ }
+
+ if (cnt >= OTX2_RFOE_PTP_TSTMP_POLL_CNT) {
+ netif_err(priv, tx_err, priv->netdev,
+ "ptp tx timestamp polling timeout, skb=%pS\n",
+ priv->ptp_tx_skb);
+ priv->stats.tx_hwtstamp_failures++;
+ goto submit_next_req;
+ }
+
+ /* check err or drop condition */
+ if ((tstmp_w1 & (1ULL << 21)) || (tstmp_w1 & (1ULL << 20))) {
+ netif_err(priv, tx_done, priv->netdev,
+ "ptp timestamp error tstmp_w1=0x%llx\n",
+ tstmp_w1);
+ goto submit_next_req;
+ }
+ /* match job id */
+ jobid = (tstmp_w1 >> 4) & 0xffff;
+ if (jobid != priv->ptp_job_tag) {
+ netif_err(priv, tx_done, priv->netdev,
+ "ptp job id doesn't match, tstmp_w1->job_id=0x%x skb->job_tag=0x%x\n",
+ jobid, priv->ptp_job_tag);
+ goto submit_next_req;
+ }
+ /* update timestamp value in skb */
+ timestamp = readq(priv->rfoe_reg_base +
+ RFOEX_TX_PTP_TSTMP_W0(priv->rfoe_num,
+ priv->lmac_id));
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN)
+ otx2_rfoe_calc_ptp_ts(priv, &timestamp);
+ else
+ timestamp = timecounter_cyc2time(&priv->time_counter, timestamp);
+
+ memset(&ts, 0, sizeof(ts));
+ ts.hwtstamp = ns_to_ktime(timestamp);
+ skb_tstamp_tx(priv->ptp_tx_skb, &ts);
+
+submit_next_req:
+ if (priv->ptp_tx_skb)
+ dev_kfree_skb_any(priv->ptp_tx_skb);
+ priv->ptp_tx_skb = NULL;
+ clear_bit_unlock(PTP_TX_IN_PROGRESS, &priv->state);
+ schedule_work(&priv->ptp_queue_work);
+}
+
+/* psm queue timer callback to check queue space */
+static void otx2_rfoe_tx_timer_cb(struct timer_list *t)
+{
+ struct otx2_rfoe_ndev_priv *priv =
+ container_of(t, struct otx2_rfoe_ndev_priv, tx_timer);
+ u16 psm_queue_id, queue_space;
+ int reschedule = 0;
+ u64 regval;
+
+ /* check psm queue space for both ptp and oth packets */
+ if (netif_queue_stopped(priv->netdev)) {
+ psm_queue_id = priv->tx_ptp_job_cfg.psm_queue_id;
+ // check queue space
+ regval = readq(priv->psm_reg_base +
+ PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space > 1) {
+ netif_wake_queue(priv->netdev);
+ reschedule = 0;
+ } else {
+ reschedule = 1;
+ }
+
+ psm_queue_id = priv->rfoe_common->tx_oth_job_cfg.psm_queue_id;
+ // check queue space
+ regval = readq(priv->psm_reg_base +
+ PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space > 1) {
+ netif_wake_queue(priv->netdev);
+ reschedule = 0;
+ } else {
+ reschedule = 1;
+ }
+ }
+
+ if (reschedule)
+ mod_timer(&priv->tx_timer, jiffies + msecs_to_jiffies(100));
+}
+
+static void otx2_rfoe_process_rx_pkt(struct otx2_rfoe_ndev_priv *priv,
+ struct rx_ft_cfg *ft_cfg, int mbt_buf_idx)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ struct mhbw_jd_dma_cfg_word_0_s *jd_dma_cfg_word_0;
+ struct rfoe_ecpri_psw0_s *ecpri_psw0 = NULL;
+ struct rfoe_ecpri_psw1_s *ecpri_psw1 = NULL;
+ u64 tstamp = 0, mbt_state, jdt_iova_addr;
+ int found = 0, idx, len, pkt_type;
+ struct otx2_rfoe_ndev_priv *priv2;
+ struct otx2_rfoe_drv_ctx *drv_ctx;
+ unsigned int ptp_message_len = 0;
+ struct rfoe_psw0_s *psw0 = NULL;
+ struct rfoe_psw1_s *psw1 = NULL;
+ struct net_device *netdev;
+ u8 *buf_ptr, *jdt_ptr;
+ struct sk_buff *skb;
+ u8 lmac_id;
+
+ /* read mbt state */
+ spin_lock(&cdev_priv->mbt_lock);
+ writeq(mbt_buf_idx, (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ mbt_state = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_MBT_SEG_STATE(priv->rfoe_num));
+ spin_unlock(&cdev_priv->mbt_lock);
+
+ if ((mbt_state >> 16 & 0xf) != 0) {
+ pr_err("rx pkt error: mbt_buf_idx=%d, err=%d\n",
+ mbt_buf_idx, (u8)(mbt_state >> 16 & 0xf));
+ return;
+ }
+ if (mbt_state >> 20 & 0x1) {
+ pr_err("rx dma error: mbt_buf_idx=%d\n", mbt_buf_idx);
+ return;
+ }
+
+ buf_ptr = (u8 *)ft_cfg->mbt_virt_addr +
+ (ft_cfg->buf_size * mbt_buf_idx);
+
+ pkt_type = ft_cfg->pkt_type;
+#ifdef ASIM
+ // ASIM issue, all rx packets will hit eCPRI flow table
+ pkt_type = PACKET_TYPE_ECPRI;
+#endif
+ if (pkt_type != PACKET_TYPE_ECPRI) {
+ psw0 = (struct rfoe_psw0_s *)buf_ptr;
+ if (psw0->pkt_err_sts || psw0->dma_error) {
+ net_warn_ratelimited("%s: psw0 pkt_err_sts = 0x%x, dma_err=0x%x\n",
+ priv->netdev->name,
+ psw0->pkt_err_sts,
+ psw0->dma_error);
+ return;
+ }
+ /* check that the psw type is correct: */
+ if (unlikely(psw0->pswt == ECPRI_TYPE)) {
+ net_warn_ratelimited("%s: pswt is eCPRI for pkt_type = %d\n",
+ priv->netdev->name, pkt_type);
+ return;
+ }
+ lmac_id = psw0->lmac_id;
+ jdt_iova_addr = (u64)psw0->jd_ptr;
+ psw1 = (struct rfoe_psw1_s *)(buf_ptr + 16);
+ tstamp = psw1->ptp_timestamp;
+ } else {
+ ecpri_psw0 = (struct rfoe_ecpri_psw0_s *)buf_ptr;
+ if (ecpri_psw0->err_sts & 0x1F) {
+ net_warn_ratelimited("%s: ecpri_psw0 err_sts = 0x%x\n",
+ priv->netdev->name,
+ ecpri_psw0->err_sts);
+ return;
+ }
+ /* check that the psw type is correct: */
+ if (unlikely(ecpri_psw0->pswt != ECPRI_TYPE)) {
+ net_warn_ratelimited("%s: pswt is not eCPRI for pkt_type = %d\n",
+ priv->netdev->name, pkt_type);
+ return;
+ }
+ lmac_id = ecpri_psw0->src_id & 0x3;
+ jdt_iova_addr = (u64)ecpri_psw0->jd_ptr;
+ ecpri_psw1 = (struct rfoe_ecpri_psw1_s *)(buf_ptr + 16);
+ tstamp = ecpri_psw1->ptp_timestamp;
+ }
+
+ netif_dbg(priv, rx_status, priv->netdev,
+ "Rx: rfoe=%d lmac=%d mbt_buf_idx=%d psw0(w0)=0x%llx psw0(w1)=0x%llx psw1(w0)=0x%llx psw1(w1)=0x%llx jd:iova=0x%llx\n",
+ priv->rfoe_num, lmac_id, mbt_buf_idx,
+ *(u64 *)buf_ptr, *((u64 *)buf_ptr + 1),
+ *((u64 *)buf_ptr + 2), *((u64 *)buf_ptr + 3),
+ jdt_iova_addr);
+
+ /* read jd ptr from psw */
+ jdt_ptr = otx2_iova_to_virt(priv->iommu_domain, jdt_iova_addr);
+ jd_dma_cfg_word_0 = (struct mhbw_jd_dma_cfg_word_0_s *)
+ ((u8 *)jdt_ptr + ft_cfg->jd_rd_offset);
+ len = (jd_dma_cfg_word_0->block_size) << 2;
+ netif_dbg(priv, rx_status, priv->netdev, "jd rd_dma len = %d\n", len);
+
+ if (unlikely(netif_msg_pktdata(priv))) {
+ netdev_printk(KERN_DEBUG, priv->netdev, "RX MBUF DATA:");
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
+ buf_ptr, len, true);
+ }
+
+ buf_ptr += (ft_cfg->pkt_offset * 16);
+ len -= (ft_cfg->pkt_offset * 16);
+
+ for (idx = 0; idx < RFOE_MAX_INTF; idx++) {
+ drv_ctx = &rfoe_drv_ctx[idx];
+ if (drv_ctx->valid && drv_ctx->rfoe_num == priv->rfoe_num &&
+ drv_ctx->lmac_id == lmac_id) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ netdev = rfoe_drv_ctx[idx].netdev;
+ priv2 = netdev_priv(netdev);
+ } else {
+ pr_err("netdev not found, something went wrong!\n");
+ return;
+ }
+
+ /* drop the packet if interface is down */
+ if (unlikely(!netif_carrier_ok(netdev))) {
+ netif_err(priv2, rx_err, netdev,
+ "%s {rfoe%d lmac%d} link down, drop pkt\n",
+ netdev->name, priv2->rfoe_num,
+ priv2->lmac_id);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_PTP) {
+ priv2->stats.ptp_rx_dropped++;
+ priv2->last_rx_ptp_dropped_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv2->stats.ecpri_rx_dropped++;
+ priv2->last_rx_dropped_jiffies = jiffies;
+ } else {
+ priv2->stats.rx_dropped++;
+ priv2->last_rx_dropped_jiffies = jiffies;
+ }
+ return;
+ }
+
+ skb = netdev_alloc_skb_ip_align(netdev, len);
+ if (!skb) {
+ netif_err(priv2, rx_err, netdev, "Rx: alloc skb failed\n");
+ return;
+ }
+
+ memcpy(skb->data, buf_ptr, len);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ /* remove trailing padding for ptp packets */
+ if (skb->protocol == htons(ETH_P_1588)) {
+ ptp_message_len = skb->data[2] << 8 | skb->data[3];
+ skb_trim(skb, ptp_message_len);
+ }
+
+ if (priv2->rx_hw_tstamp_en) {
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN)
+ otx2_rfoe_calc_ptp_ts(priv, &tstamp);
+ else
+ tstamp = timecounter_cyc2time(&priv->time_counter, tstamp);
+
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tstamp);
+ }
+
+ netif_receive_skb(skb);
+
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_PTP) {
+ priv2->stats.ptp_rx_packets++;
+ priv2->last_rx_ptp_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv2->stats.ecpri_rx_packets++;
+ priv2->last_rx_jiffies = jiffies;
+ } else {
+ priv2->stats.rx_packets++;
+ priv2->last_rx_jiffies = jiffies;
+ }
+ priv2->stats.rx_bytes += skb->len;
+}
+
+static int otx2_rfoe_process_rx_flow(struct otx2_rfoe_ndev_priv *priv,
+ int pkt_type, int budget)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ int count = 0, processed_pkts = 0;
+ struct rx_ft_cfg *ft_cfg;
+ u64 mbt_cfg;
+ u16 nxt_buf;
+ int *mbt_last_idx = &priv->rfoe_common->rx_mbt_last_idx[pkt_type];
+ u16 *prv_nxt_buf = &priv->rfoe_common->nxt_buf[pkt_type];
+
+ ft_cfg = &priv->rx_ft_cfg[pkt_type];
+
+ spin_lock(&cdev_priv->mbt_lock);
+ /* read mbt nxt_buf */
+ writeq(ft_cfg->mbt_idx,
+ priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num));
+ mbt_cfg = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_MBT_CFG(priv->rfoe_num));
+ spin_unlock(&cdev_priv->mbt_lock);
+
+ nxt_buf = (mbt_cfg >> 32) & 0xffff;
+
+ /* no mbt entries to process */
+ if (nxt_buf == *prv_nxt_buf) {
+ netif_dbg(priv, rx_status, priv->netdev,
+ "no rx packets to process, rfoe=%d pkt_type=%d mbt_idx=%d nxt_buf=%d mbt_buf_sw_head=%d\n",
+ priv->rfoe_num, pkt_type, ft_cfg->mbt_idx, nxt_buf,
+ *mbt_last_idx);
+ return 0;
+ }
+
+ *prv_nxt_buf = nxt_buf;
+
+ /* get count of pkts to process, check ring wrap condition */
+ if (*mbt_last_idx > nxt_buf) {
+ count = ft_cfg->num_bufs - *mbt_last_idx;
+ count += nxt_buf;
+ } else {
+ count = nxt_buf - *mbt_last_idx;
+ }
+
+ netif_dbg(priv, rx_status, priv->netdev,
+ "rfoe=%d pkt_type=%d mbt_idx=%d nxt_buf=%d mbt_buf_sw_head=%d count=%d\n",
+ priv->rfoe_num, pkt_type, ft_cfg->mbt_idx, nxt_buf,
+ *mbt_last_idx, count);
+
+ while (likely((processed_pkts < budget) && (processed_pkts < count))) {
+ otx2_rfoe_process_rx_pkt(priv, ft_cfg, *mbt_last_idx);
+
+ (*mbt_last_idx)++;
+ if (*mbt_last_idx == ft_cfg->num_bufs)
+ *mbt_last_idx = 0;
+
+ processed_pkts++;
+ }
+
+ return processed_pkts;
+}
+
+/* napi poll routine */
+static int otx2_rfoe_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ struct otx2_rfoe_ndev_priv *priv;
+ int workdone = 0, pkt_type;
+ struct rx_ft_cfg *ft_cfg;
+ u64 intr_en, regval;
+
+ ft_cfg = container_of(napi, struct rx_ft_cfg, napi);
+ priv = ft_cfg->priv;
+ cdev_priv = priv->cdev_priv;
+ pkt_type = ft_cfg->pkt_type;
+
+ /* pkt processing loop */
+ workdone += otx2_rfoe_process_rx_flow(priv, pkt_type, budget);
+
+ if (workdone < budget) {
+ napi_complete_done(napi, workdone);
+
+ /* Re enable the Rx interrupts */
+ intr_en = PKT_TYPE_TO_INTR(pkt_type) <<
+ RFOE_RX_INTR_SHIFT(priv->rfoe_num);
+ spin_lock(&cdev_priv->lock);
+ regval = readq(bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+ regval |= intr_en;
+ writeq(regval, bphy_reg_base + PSM_INT_GP_ENA_W1S(1));
+ spin_unlock(&cdev_priv->lock);
+ }
+
+ return workdone;
+}
+
+/* Rx GPINT napi schedule api */
+void otx2_rfoe_rx_napi_schedule(int rfoe_num, u32 status)
+{
+ enum bphy_netdev_packet_type pkt_type;
+ struct otx2_rfoe_drv_ctx *drv_ctx;
+ struct otx2_rfoe_ndev_priv *priv;
+ struct rx_ft_cfg *ft_cfg;
+ int intf, bit_idx;
+ u32 intr_sts;
+ u64 regval;
+
+ for (intf = 0; intf < RFOE_MAX_INTF; intf++) {
+ drv_ctx = &rfoe_drv_ctx[intf];
+ /* ignore lmac, one interrupt/pkt_type/rfoe */
+ if (!(drv_ctx->valid && drv_ctx->rfoe_num == rfoe_num))
+ continue;
+ /* check if i/f down, napi disabled */
+ priv = netdev_priv(drv_ctx->netdev);
+ if (test_bit(RFOE_INTF_DOWN, &priv->state))
+ continue;
+ /* check rx pkt type */
+ intr_sts = ((status >> RFOE_RX_INTR_SHIFT(rfoe_num)) &
+ RFOE_RX_INTR_EN);
+ for (bit_idx = 0; bit_idx < PACKET_TYPE_MAX; bit_idx++) {
+ if (!(intr_sts & BIT(bit_idx)))
+ continue;
+ pkt_type = INTR_TO_PKT_TYPE(bit_idx);
+ if (unlikely(!(priv->pkt_type_mask & (1U << pkt_type))))
+ continue;
+ /* clear intr enable bit, re-enable in napi handler */
+ regval = PKT_TYPE_TO_INTR(pkt_type) <<
+ RFOE_RX_INTR_SHIFT(rfoe_num);
+ writeq(regval, bphy_reg_base + PSM_INT_GP_ENA_W1C(1));
+ /* schedule napi */
+ ft_cfg = &drv_ctx->ft_cfg[pkt_type];
+ napi_schedule(&ft_cfg->napi);
+ }
+ /* napi scheduled per pkt_type, return */
+ return;
+ }
+}
+
+static void otx2_rfoe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct otx2_rfoe_stats *dev_stats = &priv->stats;
+
+ stats->rx_bytes = dev_stats->rx_bytes;
+ stats->rx_packets = dev_stats->rx_packets +
+ dev_stats->ptp_rx_packets +
+ dev_stats->ecpri_rx_packets;
+ stats->rx_dropped = dev_stats->rx_dropped +
+ dev_stats->ptp_rx_dropped +
+ dev_stats->ecpri_rx_dropped;
+
+ stats->tx_bytes = dev_stats->tx_bytes;
+ stats->tx_packets = dev_stats->tx_packets +
+ dev_stats->ptp_tx_packets +
+ dev_stats->ecpri_tx_packets;
+ stats->tx_dropped = dev_stats->tx_dropped +
+ dev_stats->ptp_tx_dropped +
+ dev_stats->ecpri_tx_dropped;
+}
+
+static int otx2_rfoe_config_hwtstamp(struct net_device *netdev,
+ struct ifreq *ifr)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ /* ptp hw timestamp is always enabled, mark the sw flags
+ * so that tx ptp requests are submitted to ptp psm queue
+ * and rx timestamp is copied to skb
+ */
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ priv->tx_hw_tstamp_en = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ priv->tx_hw_tstamp_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->rx_hw_tstamp_en = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ priv->rx_hw_tstamp_en = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/* netdev ioctl */
+static int otx2_rfoe_ioctl(struct net_device *netdev, struct ifreq *req,
+ int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return otx2_rfoe_config_hwtstamp(netdev, req);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* netdev xmit */
+static netdev_tx_t otx2_rfoe_eth_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct mhbw_jd_dma_cfg_word_0_s *jd_dma_cfg_word_0;
+ struct mhbw_jd_dma_cfg_word_1_s *jd_dma_cfg_word_1;
+ struct mhab_job_desc_cfg *jd_cfg_ptr;
+ struct psm_cmd_addjob_s *psm_cmd_lo;
+ struct tx_job_queue_cfg *job_cfg;
+ u64 jd_cfg_ptr_iova, regval;
+ struct tx_job_entry *job_entry;
+ struct ptp_tstamp_skb *ts_skb;
+ int psm_queue_id, queue_space;
+ int pkt_type = 0;
+ unsigned long flags;
+ struct ethhdr *eth;
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ if (!priv->tx_hw_tstamp_en) {
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "skb HW timestamp requested but not enabled, this packet will not be timestamped\n");
+ job_cfg = &priv->rfoe_common->tx_oth_job_cfg;
+ pkt_type = PACKET_TYPE_OTHER;
+ } else {
+ job_cfg = &priv->tx_ptp_job_cfg;
+ pkt_type = PACKET_TYPE_PTP;
+ }
+ } else {
+ job_cfg = &priv->rfoe_common->tx_oth_job_cfg;
+ eth = (struct ethhdr *)skb->data;
+ if (htons(eth->h_proto) == ETH_P_ECPRI)
+ pkt_type = PACKET_TYPE_ECPRI;
+ else
+ pkt_type = PACKET_TYPE_OTHER;
+ }
+
+ spin_lock_irqsave(&job_cfg->lock, flags);
+
+ if (unlikely(priv->if_type != IF_TYPE_ETHERNET)) {
+ netif_err(priv, tx_queued, netdev,
+ "%s {rfoe%d lmac%d} invalid intf mode, drop pkt\n",
+ netdev->name, priv->rfoe_num, priv->lmac_id);
+ /* update stats */
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ goto exit;
+ }
+
+ if (unlikely(!netif_carrier_ok(netdev))) {
+ netif_err(priv, tx_err, netdev,
+ "%s {rfoe%d lmac%d} link down, drop pkt\n",
+ netdev->name, priv->rfoe_num,
+ priv->lmac_id);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv->stats.ecpri_tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_PTP) {
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ } else {
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ }
+
+ goto exit;
+ }
+
+ if (unlikely(!(priv->pkt_type_mask & (1U << pkt_type)))) {
+ netif_err(priv, tx_queued, netdev,
+ "%s {rfoe%d lmac%d} pkt not supported, drop pkt\n",
+ netdev->name, priv->rfoe_num,
+ priv->lmac_id);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv->stats.ecpri_tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_PTP) {
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ } else {
+ priv->stats.tx_dropped++;
+ priv->last_tx_dropped_jiffies = jiffies;
+ }
+
+ goto exit;
+ }
+
+ /* get psm queue number */
+ psm_queue_id = job_cfg->psm_queue_id;
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "psm: queue(%d): cfg=0x%llx ptr=0x%llx space=0x%llx\n",
+ psm_queue_id,
+ readq(priv->psm_reg_base + PSM_QUEUE_CFG(psm_queue_id)),
+ readq(priv->psm_reg_base + PSM_QUEUE_PTR(psm_queue_id)),
+ readq(priv->psm_reg_base + PSM_QUEUE_SPACE(psm_queue_id)));
+
+ /* check psm queue space available */
+ regval = readq(priv->psm_reg_base + PSM_QUEUE_SPACE(psm_queue_id));
+ queue_space = regval & 0x7FFF;
+ if (queue_space < 1 && pkt_type != PACKET_TYPE_PTP) {
+ netif_err(priv, tx_err, netdev,
+ "no space in psm queue %d, dropping pkt\n",
+ psm_queue_id);
+ netif_stop_queue(netdev);
+ dev_kfree_skb_any(skb);
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI)
+ priv->stats.ecpri_tx_dropped++;
+ else
+ priv->stats.tx_dropped++;
+
+ priv->last_tx_dropped_jiffies = jiffies;
+
+ mod_timer(&priv->tx_timer, jiffies + msecs_to_jiffies(100));
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+ return NETDEV_TX_OK;
+ }
+
+ /* get the tx job entry */
+ job_entry = (struct tx_job_entry *)
+ &job_cfg->job_entries[job_cfg->q_idx];
+
+ netif_dbg(priv, tx_queued, priv->netdev,
+ "rfoe=%d lmac=%d psm_queue=%d tx_job_entry %d job_cmd_lo=0x%llx job_cmd_high=0x%llx jd_iova_addr=0x%llx\n",
+ priv->rfoe_num, priv->lmac_id, psm_queue_id, job_cfg->q_idx,
+ job_entry->job_cmd_lo, job_entry->job_cmd_hi,
+ job_entry->jd_iova_addr);
+
+ /* hw timestamp */
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->tx_hw_tstamp_en) {
+ if (list_empty(&priv->ptp_skb_list.list) &&
+ !test_and_set_bit_lock(PTP_TX_IN_PROGRESS, &priv->state)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->ptp_tx_skb = skb;
+ psm_cmd_lo = (struct psm_cmd_addjob_s *)
+ &job_entry->job_cmd_lo;
+ priv->ptp_job_tag = psm_cmd_lo->jobtag;
+ } else {
+ /* check ptp queue count */
+ if (priv->ptp_skb_list.count >= max_ptp_req) {
+ netif_err(priv, tx_err, netdev,
+ "ptp list full, dropping pkt\n");
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ goto exit;
+ }
+ /* allocate and add ptp req to queue */
+ ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
+ if (!ts_skb) {
+ priv->stats.ptp_tx_dropped++;
+ priv->last_tx_ptp_dropped_jiffies = jiffies;
+ goto exit;
+ }
+ ts_skb->skb = skb;
+ list_add_tail(&ts_skb->list, &priv->ptp_skb_list.list);
+ priv->ptp_skb_list.count++;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->stats.ptp_tx_packets++;
+ priv->stats.tx_bytes += skb->len;
+ /* sw timestamp */
+ skb_tx_timestamp(skb);
+ goto exit; /* submit the packet later */
+ }
+ }
+
+ /* sw timestamp */
+ skb_tx_timestamp(skb);
+
+ if (unlikely(netif_msg_pktdata(priv))) {
+ netdev_printk(KERN_DEBUG, priv->netdev, "Tx: skb %pS len=%d\n",
+ skb, skb->len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 4,
+ skb->data, skb->len, true);
+ }
+
+ /* update length and block size in jd dma cfg word */
+ jd_cfg_ptr_iova = *(u64 *)((u8 *)job_entry->jd_ptr + 8);
+ jd_cfg_ptr = otx2_iova_to_virt(priv->iommu_domain, jd_cfg_ptr_iova);
+ jd_cfg_ptr->cfg1.pkt_len = skb->len;
+ jd_dma_cfg_word_0 = (struct mhbw_jd_dma_cfg_word_0_s *)
+ job_entry->rd_dma_ptr;
+ jd_dma_cfg_word_0->block_size = (((skb->len + 15) >> 4) * 4);
+
+ /* update rfoe_mode and lmac id for non-ptp (shared) psm job entry */
+ if (pkt_type != PACKET_TYPE_PTP) {
+ jd_cfg_ptr->cfg.lmacid = priv->lmac_id & 0x3;
+ if (pkt_type == PACKET_TYPE_ECPRI)
+ jd_cfg_ptr->cfg.rfoe_mode = 1;
+ else
+ jd_cfg_ptr->cfg.rfoe_mode = 0;
+ }
+
+ /* copy packet data to rd_dma_ptr start addr */
+ jd_dma_cfg_word_1 = (struct mhbw_jd_dma_cfg_word_1_s *)
+ ((u8 *)job_entry->rd_dma_ptr + 8);
+ memcpy(otx2_iova_to_virt(priv->iommu_domain,
+ jd_dma_cfg_word_1->start_addr),
+ skb->data, skb->len);
+
+ /* make sure that all memory writes are completed */
+ dma_wmb();
+
+ /* submit PSM job */
+ writeq(job_entry->job_cmd_lo,
+ priv->psm_reg_base + PSM_QUEUE_CMD_LO(psm_queue_id));
+ writeq(job_entry->job_cmd_hi,
+ priv->psm_reg_base + PSM_QUEUE_CMD_HI(psm_queue_id));
+
+ /* update stats */
+ if (pkt_type == PACKET_TYPE_ECPRI) {
+ priv->stats.ecpri_tx_packets++;
+ priv->last_tx_jiffies = jiffies;
+ } else if (pkt_type == PACKET_TYPE_PTP) {
+ priv->stats.ptp_tx_packets++;
+ priv->last_tx_ptp_jiffies = jiffies;
+ } else {
+ priv->stats.tx_packets++;
+ priv->last_tx_jiffies = jiffies;
+ }
+ priv->stats.tx_bytes += skb->len;
+
+ /* increment queue index */
+ job_cfg->q_idx++;
+ if (job_cfg->q_idx == job_cfg->num_entries)
+ job_cfg->q_idx = 0;
+exit:
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+ dev_kfree_skb_any(skb);
+
+ spin_unlock_irqrestore(&job_cfg->lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/* netdev open */
+static int otx2_rfoe_eth_open(struct net_device *netdev)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ int idx;
+
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ napi_enable(&priv->rx_ft_cfg[idx].napi);
+ }
+
+ priv->ptp_tx_skb = NULL;
+
+ spin_lock(&priv->lock);
+ clear_bit(RFOE_INTF_DOWN, &priv->state);
+
+ if (priv->link_state == LINK_STATE_UP) {
+ netif_carrier_on(netdev);
+ netif_start_queue(netdev);
+ }
+ spin_unlock(&priv->lock);
+
+ return 0;
+}
+
+/* netdev close */
+static int otx2_rfoe_eth_stop(struct net_device *netdev)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct ptp_tstamp_skb *ts_skb, *ts_skb2;
+ int idx;
+
+ spin_lock(&priv->lock);
+ set_bit(RFOE_INTF_DOWN, &priv->state);
+
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+
+ spin_unlock(&priv->lock);
+
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ napi_disable(&priv->rx_ft_cfg[idx].napi);
+ }
+
+ del_timer_sync(&priv->tx_timer);
+
+ /* cancel any pending ptp work item in progress */
+ cancel_work_sync(&priv->ptp_tx_work);
+ if (priv->ptp_tx_skb) {
+ dev_kfree_skb_any(priv->ptp_tx_skb);
+ priv->ptp_tx_skb = NULL;
+ clear_bit_unlock(PTP_TX_IN_PROGRESS, &priv->state);
+ }
+
+ /* clear ptp skb list */
+ cancel_work_sync(&priv->ptp_queue_work);
+ list_for_each_entry_safe(ts_skb, ts_skb2,
+ &priv->ptp_skb_list.list, list) {
+ list_del(&ts_skb->list);
+ kfree(ts_skb);
+ }
+ priv->ptp_skb_list.count = 0;
+
+ return 0;
+}
+
+static int otx2_rfoe_init(struct net_device *netdev)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ /* Enable VLAN TPID match */
+ writeq(0x18100, (priv->rfoe_reg_base +
+ RFOEX_RX_VLANX_CFG(priv->rfoe_num, 0)));
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ return 0;
+}
+
+static int otx2_rfoe_vlan_rx_configure(struct net_device *netdev, u16 vid,
+ bool forward)
+{
+ struct rfoe_rx_ind_vlanx_fwd fwd;
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ u64 index = (vid >> 6) & 0x3F;
+ u64 mask = (0x1ll << (vid & 0x3F));
+ unsigned long flags;
+
+ if (vid >= VLAN_N_VID) {
+ netdev_err(netdev, "Invalid VLAN ID %d\n", vid);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&cdev_priv->mbt_lock, flags);
+
+ if (forward && priv->rfoe_common->rx_vlan_fwd_refcnt[vid]++)
+ goto out;
+
+ if (!forward && --priv->rfoe_common->rx_vlan_fwd_refcnt[vid])
+ goto out;
+
+ /* read current fwd mask */
+ writeq(index, (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ fwd.fwd = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_VLANX_FWD(priv->rfoe_num, 0));
+
+ if (forward)
+ fwd.fwd |= mask;
+ else
+ fwd.fwd &= ~mask;
+
+ /* write the new fwd mask */
+ writeq(index, (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ writeq(fwd.fwd, (priv->rfoe_reg_base +
+ RFOEX_RX_IND_VLANX_FWD(priv->rfoe_num, 0)));
+
+out:
+ spin_unlock_irqrestore(&cdev_priv->mbt_lock, flags);
+
+ return 0;
+}
+
+static int otx2_rfoe_vlan_rx_add(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ return otx2_rfoe_vlan_rx_configure(netdev, vid, true);
+}
+
+static int otx2_rfoe_vlan_rx_kill(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ return otx2_rfoe_vlan_rx_configure(netdev, vid, false);
+}
+
+static const struct net_device_ops otx2_rfoe_netdev_ops = {
+ .ndo_init = otx2_rfoe_init,
+ .ndo_open = otx2_rfoe_eth_open,
+ .ndo_stop = otx2_rfoe_eth_stop,
+ .ndo_start_xmit = otx2_rfoe_eth_start_xmit,
+ .ndo_do_ioctl = otx2_rfoe_ioctl,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = otx2_rfoe_get_stats64,
+ .ndo_vlan_rx_add_vid = otx2_rfoe_vlan_rx_add,
+ .ndo_vlan_rx_kill_vid = otx2_rfoe_vlan_rx_kill,
+};
+
+static void otx2_rfoe_dump_rx_ft_cfg(struct otx2_rfoe_ndev_priv *priv)
+{
+ struct rx_ft_cfg *ft_cfg;
+ int idx;
+
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ pr_debug("rfoe=%d lmac=%d pkttype=%d flowid=%d mbt: idx=%d size=%d nbufs=%d iova=0x%llx jdt: idx=%d size=%d num_jd=%d iova=0x%llx\n",
+ priv->rfoe_num, priv->lmac_id, ft_cfg->pkt_type,
+ ft_cfg->flow_id, ft_cfg->mbt_idx, ft_cfg->buf_size,
+ ft_cfg->num_bufs, ft_cfg->mbt_iova_addr,
+ ft_cfg->jdt_idx, ft_cfg->jd_size, ft_cfg->num_jd,
+ ft_cfg->jdt_iova_addr);
+ }
+}
+
+static inline void otx2_rfoe_fill_rx_ft_cfg(struct otx2_rfoe_ndev_priv *priv,
+ struct bphy_netdev_comm_if *if_cfg)
+{
+ struct otx2_bphy_cdev_priv *cdev_priv = priv->cdev_priv;
+ struct bphy_netdev_rbuf_info *rbuf_info;
+ struct rx_ft_cfg *ft_cfg;
+ u64 jdt_cfg0, iova;
+ int idx;
+
+ /* RX flow table configuration */
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ rbuf_info = &if_cfg->rbuf_info[idx];
+ ft_cfg->pkt_type = rbuf_info->pkt_type;
+ ft_cfg->gp_int_num = rbuf_info->gp_int_num;
+ ft_cfg->flow_id = rbuf_info->flow_id;
+ ft_cfg->mbt_idx = rbuf_info->mbt_index;
+ ft_cfg->buf_size = rbuf_info->buf_size * 16;
+ ft_cfg->num_bufs = rbuf_info->num_bufs;
+ ft_cfg->mbt_iova_addr = rbuf_info->mbt_iova_addr;
+ iova = ft_cfg->mbt_iova_addr;
+ ft_cfg->mbt_virt_addr = otx2_iova_to_virt(priv->iommu_domain,
+ iova);
+ ft_cfg->jdt_idx = rbuf_info->jdt_index;
+ ft_cfg->jd_size = rbuf_info->jd_size * 8;
+ ft_cfg->num_jd = rbuf_info->num_jd;
+ ft_cfg->jdt_iova_addr = rbuf_info->jdt_iova_addr;
+ iova = ft_cfg->jdt_iova_addr;
+ ft_cfg->jdt_virt_addr = otx2_iova_to_virt(priv->iommu_domain,
+ iova);
+ spin_lock(&cdev_priv->mbt_lock);
+ writeq(ft_cfg->jdt_idx,
+ (priv->rfoe_reg_base +
+ RFOEX_RX_INDIRECT_INDEX_OFFSET(priv->rfoe_num)));
+ jdt_cfg0 = readq(priv->rfoe_reg_base +
+ RFOEX_RX_IND_JDT_CFG0(priv->rfoe_num));
+ spin_unlock(&cdev_priv->mbt_lock);
+ ft_cfg->jd_rd_offset = ((jdt_cfg0 >> 28) & 0xf) * 8;
+ ft_cfg->pkt_offset = (u8)((jdt_cfg0 >> 52) & 0x7);
+ ft_cfg->priv = priv;
+ netif_napi_add(priv->netdev, &ft_cfg->napi,
+ otx2_rfoe_napi_poll,
+ NAPI_POLL_WEIGHT);
+ }
+}
+
+static void otx2_rfoe_fill_tx_job_entries(struct otx2_rfoe_ndev_priv *priv,
+ struct tx_job_queue_cfg *job_cfg,
+ struct bphy_netdev_tx_psm_cmd_info *tx_job,
+ int num_entries)
+{
+ struct tx_job_entry *job_entry;
+ u64 jd_cfg_iova, iova;
+ int i;
+
+ for (i = 0; i < num_entries; i++) {
+ job_entry = &job_cfg->job_entries[i];
+ job_entry->job_cmd_lo = tx_job->low_cmd;
+ job_entry->job_cmd_hi = tx_job->high_cmd;
+ job_entry->jd_iova_addr = tx_job->jd_iova_addr;
+ iova = job_entry->jd_iova_addr;
+ job_entry->jd_ptr = otx2_iova_to_virt(priv->iommu_domain, iova);
+ jd_cfg_iova = *(u64 *)((u8 *)job_entry->jd_ptr + 8);
+ job_entry->jd_cfg_ptr = otx2_iova_to_virt(priv->iommu_domain,
+ jd_cfg_iova);
+ job_entry->rd_dma_iova_addr = tx_job->rd_dma_iova_addr;
+ iova = job_entry->rd_dma_iova_addr;
+ job_entry->rd_dma_ptr = otx2_iova_to_virt(priv->iommu_domain,
+ iova);
+ pr_debug("job_cmd_lo=0x%llx job_cmd_hi=0x%llx jd_iova_addr=0x%llx rd_dma_iova_addr=%llx\n",
+ tx_job->low_cmd, tx_job->high_cmd,
+ tx_job->jd_iova_addr, tx_job->rd_dma_iova_addr);
+ tx_job++;
+ }
+ /* get psm queue id */
+ job_entry = &job_cfg->job_entries[0];
+ job_cfg->psm_queue_id = (job_entry->job_cmd_lo >> 8) & 0xff;
+ job_cfg->q_idx = 0;
+ job_cfg->num_entries = num_entries;
+ spin_lock_init(&job_cfg->lock);
+}
+
+int otx2_rfoe_parse_and_init_intf(struct otx2_bphy_cdev_priv *cdev,
+ struct bphy_netdev_comm_intf_cfg *cfg)
+{
+ int i, intf_idx = 0, num_entries, lmac, idx, ret;
+ struct bphy_netdev_tx_psm_cmd_info *tx_info;
+ struct otx2_rfoe_drv_ctx *drv_ctx = NULL;
+ struct otx2_rfoe_ndev_priv *priv, *priv2;
+ struct bphy_netdev_rfoe_if *rfoe_cfg;
+ struct bphy_netdev_comm_if *if_cfg;
+ struct tx_job_queue_cfg *tx_cfg;
+ struct ptp_bcn_off_cfg *ptp_cfg;
+ struct net_device *netdev;
+ struct rx_ft_cfg *ft_cfg;
+ u8 pkt_type_mask;
+
+ ptp_cfg = kzalloc(sizeof(*ptp_cfg), GFP_KERNEL);
+ if (!ptp_cfg)
+ return -ENOMEM;
+ timer_setup(&ptp_cfg->ptp_timer, otx2_rfoe_ptp_offset_timer, 0);
+ ptp_cfg->clk_cfg.clk_freq_ghz = PTP_CLK_FREQ_GHZ;
+ ptp_cfg->clk_cfg.clk_freq_div = PTP_CLK_FREQ_DIV;
+ spin_lock_init(&ptp_cfg->lock);
+
+ for (i = 0; i < MAX_RFOE_INTF; i++) {
+ priv2 = NULL;
+ rfoe_cfg = &cfg[i].rfoe_if_cfg;
+ pkt_type_mask = rfoe_cfg->pkt_type_mask;
+ for (lmac = 0; lmac < MAX_LMAC_PER_RFOE; lmac++) {
+ if_cfg = &rfoe_cfg->if_cfg[lmac];
+ /* check if lmac is valid */
+ if (!if_cfg->lmac_info.is_valid) {
+ dev_dbg(cdev->dev,
+ "rfoe%d lmac%d invalid\n", i, lmac);
+ continue;
+ }
+ netdev =
+ alloc_etherdev(sizeof(struct otx2_rfoe_ndev_priv));
+ if (!netdev) {
+ dev_err(cdev->dev,
+ "error allocating net device\n");
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+ priv = netdev_priv(netdev);
+ memset(priv, 0, sizeof(*priv));
+ if (!priv2) {
+ priv->rfoe_common =
+ kzalloc(sizeof(struct rfoe_common_cfg),
+ GFP_KERNEL);
+ if (!priv->rfoe_common) {
+ dev_err(cdev->dev, "kzalloc failed\n");
+ free_netdev(netdev);
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+ priv->rfoe_common->refcnt = 1;
+ }
+ spin_lock_init(&priv->lock);
+ priv->netdev = netdev;
+ priv->cdev_priv = cdev;
+ priv->msg_enable = netif_msg_init(-1, 0);
+ spin_lock_init(&priv->stats.lock);
+ priv->rfoe_num = if_cfg->lmac_info.rfoe_num;
+ priv->lmac_id = if_cfg->lmac_info.lane_num;
+ priv->if_type = cfg[i].if_type;
+ memcpy(priv->mac_addr, if_cfg->lmac_info.eth_addr,
+ ETH_ALEN);
+ if (is_valid_ether_addr(priv->mac_addr))
+ ether_addr_copy(netdev->dev_addr,
+ priv->mac_addr);
+ else
+ random_ether_addr(netdev->dev_addr);
+ priv->pdev = pci_get_device(OTX2_BPHY_PCI_VENDOR_ID,
+ OTX2_BPHY_PCI_DEVICE_ID,
+ NULL);
+ priv->iommu_domain =
+ iommu_get_domain_for_dev(&priv->pdev->dev);
+ priv->bphy_reg_base = bphy_reg_base;
+ priv->psm_reg_base = psm_reg_base;
+ priv->rfoe_reg_base = rfoe_reg_base;
+ priv->bcn_reg_base = bcn_reg_base;
+ priv->ptp_reg_base = ptp_reg_base;
+ priv->ptp_cfg = ptp_cfg;
+ ++(priv->ptp_cfg->refcnt);
+
+ /* Initialise PTP TX work queue */
+ INIT_WORK(&priv->ptp_tx_work, otx2_rfoe_ptp_tx_work);
+ INIT_WORK(&priv->ptp_queue_work,
+ otx2_rfoe_ptp_submit_work);
+
+ /* Initialise PTP skb list */
+ INIT_LIST_HEAD(&priv->ptp_skb_list.list);
+ priv->ptp_skb_list.count = 0;
+ timer_setup(&priv->tx_timer, otx2_rfoe_tx_timer_cb, 0);
+
+ priv->pkt_type_mask = pkt_type_mask;
+ otx2_rfoe_fill_rx_ft_cfg(priv, if_cfg);
+ otx2_rfoe_dump_rx_ft_cfg(priv);
+
+ /* TX PTP job configuration */
+ if (priv->pkt_type_mask & (1U << PACKET_TYPE_PTP)) {
+ tx_cfg = &priv->tx_ptp_job_cfg;
+ tx_info = &if_cfg->ptp_pkt_info[0];
+ num_entries = MAX_PTP_MSG_PER_LMAC;
+ otx2_rfoe_fill_tx_job_entries(priv, tx_cfg,
+ tx_info,
+ num_entries);
+ }
+
+ /* TX ECPRI/OTH(PTP) job configuration */
+ if (!priv2 &&
+ ((priv->pkt_type_mask &
+ (1U << PACKET_TYPE_OTHER)) ||
+ (priv->pkt_type_mask &
+ (1U << PACKET_TYPE_ECPRI)))) {
+ /* RFOE 2 will have 2 LMAC's */
+ num_entries = (priv->rfoe_num < 2) ?
+ MAX_OTH_MSG_PER_RFOE : 32;
+ tx_cfg = &priv->rfoe_common->tx_oth_job_cfg;
+ tx_info = &rfoe_cfg->oth_pkt_info[0];
+ otx2_rfoe_fill_tx_job_entries(priv, tx_cfg,
+ tx_info,
+ num_entries);
+ } else {
+ /* share rfoe_common data */
+ priv->rfoe_common = priv2->rfoe_common;
+ ++(priv->rfoe_common->refcnt);
+ }
+
+ /* keep last (rfoe + lmac) priv structure */
+ if (!priv2)
+ priv2 = priv;
+
+ intf_idx = (i * 4) + lmac;
+ snprintf(netdev->name, sizeof(netdev->name),
+ "rfoe%d", intf_idx);
+ netdev->netdev_ops = &otx2_rfoe_netdev_ops;
+ otx2_rfoe_set_ethtool_ops(netdev);
+ otx2_rfoe_ptp_init(priv);
+ netdev->watchdog_timeo = (15 * HZ);
+ netdev->mtu = 1500U;
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = 1500U;
+ ret = register_netdev(netdev);
+ if (ret < 0) {
+ dev_err(cdev->dev,
+ "failed to register net device %s\n",
+ netdev->name);
+ free_netdev(netdev);
+ ret = -ENODEV;
+ goto err_exit;
+ }
+ dev_dbg(cdev->dev, "net device %s registered\n",
+ netdev->name);
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ set_bit(RFOE_INTF_DOWN, &priv->state);
+ priv->link_state = LINK_STATE_UP;
+
+ /* initialize global ctx */
+ drv_ctx = &rfoe_drv_ctx[intf_idx];
+ drv_ctx->rfoe_num = priv->rfoe_num;
+ drv_ctx->lmac_id = priv->lmac_id;
+ drv_ctx->valid = 1;
+ drv_ctx->netdev = netdev;
+ drv_ctx->ft_cfg = &priv->rx_ft_cfg[0];
+
+ /* create debugfs entry */
+ otx2_rfoe_debugfs_create(drv_ctx);
+ }
+ }
+
+ return 0;
+
+err_exit:
+ for (i = 0; i < RFOE_MAX_INTF; i++) {
+ drv_ctx = &rfoe_drv_ctx[i];
+ if (drv_ctx->valid) {
+ otx2_rfoe_debugfs_remove(drv_ctx);
+ netdev = drv_ctx->netdev;
+ priv = netdev_priv(netdev);
+ otx2_rfoe_ptp_destroy(priv);
+ unregister_netdev(netdev);
+ for (idx = 0; idx < PACKET_TYPE_MAX; idx++) {
+ if (!(priv->pkt_type_mask & (1U << idx)))
+ continue;
+ ft_cfg = &priv->rx_ft_cfg[idx];
+ netif_napi_del(&ft_cfg->napi);
+ }
+ --(priv->rfoe_common->refcnt);
+ if (priv->rfoe_common->refcnt == 0)
+ kfree(priv->rfoe_common);
+ free_netdev(netdev);
+ drv_ctx->valid = 0;
+ }
+ }
+ del_timer_sync(&ptp_cfg->ptp_timer);
+ kfree(ptp_cfg);
+
+ return ret;
+}
+
+static void otx2_rfoe_debugfs_reader(char *buffer, size_t count, void *priv)
+{
+ struct otx2_rfoe_drv_ctx *ctx;
+ struct otx2_rfoe_ndev_priv *netdev;
+ u8 ptp_tx_in_progress;
+ unsigned int queued_ptp_reqs;
+ u8 queue_stopped, state_up;
+ u16 other_tx_psm_space, ptp_tx_psm_space, queue_id;
+ u64 regval;
+ const char *formatter;
+
+ ctx = priv;
+ netdev = netdev_priv(ctx->netdev);
+ ptp_tx_in_progress = test_bit(PTP_TX_IN_PROGRESS, &netdev->state);
+ queued_ptp_reqs = netdev->ptp_skb_list.count;
+ queue_stopped = netif_queue_stopped(ctx->netdev);
+ state_up = netdev->link_state;
+ formatter = otx2_rfoe_debugfs_get_formatter();
+
+ /* other tx psm space */
+ queue_id = netdev->rfoe_common->tx_oth_job_cfg.psm_queue_id;
+ regval = readq(netdev->psm_reg_base + PSM_QUEUE_SPACE(queue_id));
+ other_tx_psm_space = regval & 0x7FFF;
+
+ /* ptp tx psm space */
+ queue_id = netdev->tx_ptp_job_cfg.psm_queue_id;
+ regval = readq(netdev->psm_reg_base + PSM_QUEUE_SPACE(queue_id));
+ ptp_tx_psm_space = regval & 0x7FFF;
+
+ snprintf(buffer, count, formatter,
+ ptp_tx_in_progress,
+ queued_ptp_reqs,
+ queue_stopped,
+ state_up,
+ netdev->last_tx_jiffies,
+ netdev->last_tx_dropped_jiffies,
+ netdev->last_tx_ptp_jiffies,
+ netdev->last_tx_ptp_dropped_jiffies,
+ netdev->last_rx_jiffies,
+ netdev->last_rx_dropped_jiffies,
+ netdev->last_rx_ptp_jiffies,
+ netdev->last_rx_ptp_dropped_jiffies,
+ jiffies,
+ other_tx_psm_space,
+ ptp_tx_psm_space);
+}
+
+static const char *otx2_rfoe_debugfs_get_formatter(void)
+{
+ static const char *buffer_format = "ptp-tx-in-progress: %u\n"
+ "queued-ptp-reqs: %u\n"
+ "queue-stopped: %u\n"
+ "state-up: %u\n"
+ "last-tx-jiffies: %lu\n"
+ "last-tx-dropped-jiffies: %lu\n"
+ "last-tx-ptp-jiffies: %lu\n"
+ "last-tx-ptp-dropped-jiffies: %lu\n"
+ "last-rx-jiffies: %lu\n"
+ "last-rx-dropped-jiffies: %lu\n"
+ "last-rx-ptp-jiffies: %lu\n"
+ "last-rx-ptp-dropped-jiffies: %lu\n"
+ "current-jiffies: %lu\n"
+ "other-tx-psm-space: %u\n"
+ "ptp-tx-psm-space: %u\n";
+
+ return buffer_format;
+}
+
+static size_t otx2_rfoe_debugfs_get_buffer_size(void)
+{
+ static size_t buffer_size;
+
+ if (!buffer_size) {
+ const char *formatter = otx2_rfoe_debugfs_get_formatter();
+ u8 max_boolean = 1;
+ int max_ptp_req_count = max_ptp_req;
+ unsigned long max_jiffies = (unsigned long)-1;
+ u16 max_psm_space = (u16)-1;
+
+ buffer_size = snprintf(NULL, 0, formatter,
+ max_boolean,
+ max_ptp_req_count,
+ max_boolean,
+ max_boolean,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_jiffies,
+ max_psm_space,
+ max_psm_space);
+ ++buffer_size;
+ }
+
+ return buffer_size;
+}
+
+static void otx2_rfoe_debugfs_create(struct otx2_rfoe_drv_ctx *ctx)
+{
+ size_t buffer_size = otx2_rfoe_debugfs_get_buffer_size();
+
+ ctx->debugfs = otx2_bphy_debugfs_add_file(ctx->netdev->name,
+ buffer_size, ctx,
+ otx2_rfoe_debugfs_reader);
+}
+
+static void otx2_rfoe_debugfs_remove(struct otx2_rfoe_drv_ctx *ctx)
+{
+ if (ctx->debugfs)
+ otx2_bphy_debugfs_remove_file(ctx->debugfs);
+}
+
+void otx2_rfoe_set_link_state(struct net_device *netdev, u8 state)
+{
+ struct otx2_rfoe_ndev_priv *priv;
+
+ priv = netdev_priv(netdev);
+
+ spin_lock(&priv->lock);
+ if (priv->link_state != state) {
+ priv->link_state = state;
+ if (state == LINK_STATE_DOWN) {
+ netdev_info(netdev, "Link DOWN\n");
+ if (netif_running(netdev)) {
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ } else {
+ netdev_info(netdev, "Link UP\n");
+ if (netif_running(netdev)) {
+ netif_carrier_on(netdev);
+ netif_start_queue(netdev);
+ }
+ }
+ }
+ spin_unlock(&priv->lock);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.h b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.h
new file mode 100644
index 000000000000..da26a77d3cc6
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _OTX2_RFOE_H_
+#define _OTX2_RFOE_H_
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/if_vlan.h>
+
+#include "otx2_bphy.h"
+#include "rfoe_common.h"
+
+#define RFOE_RX_INTR_SHIFT(a) (32 - ((a) + 1) * 3)
+#define RFOE_RX_INTR_MASK(a) (RFOE_RX_INTR_EN << \
+ RFOE_RX_INTR_SHIFT(a))
+#define RFOE_TX_PTP_INTR_MASK(a, b) (1UL << ((a) * 4 + (b)))
+
+#define MAX_RFOE_INTF 3 /* Max RFOE instances */
+#define RFOE_MAX_INTF 10 /* 2 rfoe x 4 lmac + 1 rfoe x 2 lmac */
+#define PCI_SUBSYS_DEVID_OCTX2_95XXN 0xB400
+
+/* ethtool msg */
+#define OTX2_RFOE_MSG_DEFAULT (NETIF_MSG_DRV)
+
+/* PTP clock time operates by adding a constant increment every clock
+ * cycle. That increment is expressed (MIO_PTP_CLOCK_COMP) as a Q32.32
+ * number of nanoseconds (32 integer bits and 32 fractional bits). The
+ * value must be equal to 1/(PTP clock frequency in Hz). If the PTP clock
+ * freq is 1 GHz, there is no issue but for other input clock frequency
+ * values for example 950 MHz which is SLCK or 153.6 MHz (bcn_clk/2) the
+ * MIO_PTP_CLOCK_COMP register value can't be expressed exactly and there
+ * will be error accumulated over the time depending on the direction the
+ * PTP_CLOCK_COMP value is rounded. The accumulated error will be around
+ * -70ps or +150ps per second in case of 950 MHz.
+ *
+ * To solve this issue, the driver calculates the PTP timestamps using
+ * BCN clock as reference as per the algorithm proposed as given below.
+ *
+ * Set PTP tick (= MIO_PTP_CLOCK_COMP) to 1.0 ns
+ * Sample once, at exactly the same time, BCN and PTP to (BCN0, PTP0).
+ * Calculate (applying BCN-to-PTP epoch difference and an OAM parameter
+ * secondaryBcnOffset)
+ * PTPbase[ns] = NanoSec(BCN0) + NanoSec(315964819[s]) - secondaryBcnOffset[ns]
+ * When reading packet timestamp (tick count) PTPn, convert it to nanoseconds.
+ * PTP pkt timestamp = PTPbase[ns] + (PTPn - PTP0) / (PTP Clock in GHz)
+ *
+ * The intermediate values generated need to be of pico-second precision to
+ * achieve PTP accuracy < 1ns. The calculations should not overflow 64-bit
+ * value at anytime. Added timer to adjust the PTP and BCN base values
+ * periodically to fix the overflow issue.
+ */
+#define PTP_CLK_FREQ_GHZ 95 /* Clock freq GHz dividend */
+#define PTP_CLK_FREQ_DIV 100 /* Clock freq GHz divisor */
+#define PTP_OFF_RESAMPLE_THRESH 1800 /* resample period in seconds */
+#define PICO_SEC_PER_NSEC 1000 /* pico seconds per nano sec */
+#define UTC_GPS_EPOCH_DIFF 315964819UL /* UTC - GPS epoch secs */
+
+/* global driver context */
+struct otx2_rfoe_drv_ctx {
+ u8 rfoe_num;
+ u8 lmac_id;
+ int valid;
+ struct net_device *netdev;
+ struct rx_ft_cfg *ft_cfg;
+ int tx_gpint_bit;
+ void *debugfs;
+};
+
+extern struct otx2_rfoe_drv_ctx rfoe_drv_ctx[RFOE_MAX_INTF];
+
+/* rx flow table configuration */
+struct rx_ft_cfg {
+ enum bphy_netdev_packet_type pkt_type; /* pkt_type for psw */
+ enum bphy_netdev_rx_gpint gp_int_num;
+ u16 flow_id; /* flow id */
+ u16 mbt_idx; /* mbt index */
+ u16 buf_size; /* mbt buf size */
+ u16 num_bufs; /* mbt num bufs */
+ u64 mbt_iova_addr;
+ void __iomem *mbt_virt_addr;
+ u16 jdt_idx; /* jdt index */
+ u8 jd_size; /* jd size */
+ u16 num_jd; /* num jd's */
+ u64 jdt_iova_addr;
+ void __iomem *jdt_virt_addr;
+ u8 jd_rd_offset; /* jd rd offset */
+ u8 pkt_offset;
+ struct napi_struct napi;
+ struct otx2_rfoe_ndev_priv *priv;
+};
+
+/* PTP clk freq in GHz represented as integer numbers.
+ * This information is passed to netdev by the ODP BPHY
+ * application via ioctl. The values are used in PTP
+ * timestamp calculation algorithm.
+ *
+ * For 950MHz PTP clock =0.95GHz, the values are:
+ * clk_freq_ghz = 95
+ * clk_freq_div = 100
+ *
+ * For 153.6MHz PTP clock =0.1536GHz, the values are:
+ * clk_freq_ghz = 1536
+ * clk_freq_div = 10000
+ *
+ */
+struct ptp_clk_cfg {
+ int clk_freq_ghz; /* ptp clk freq */
+ int clk_freq_div; /* ptp clk divisor */
+};
+
+struct bcn_sec_offset_cfg {
+ u8 rfoe_num;
+ u8 lmac_id;
+ s32 sec_bcn_offset;
+};
+
+struct ptp_bcn_ref {
+ u64 ptp0_ns; /* PTP nanosec */
+ u64 bcn0_n1_ns; /* BCN N1 nanosec */
+ u64 bcn0_n2_ps; /* BCN N2 picosec */
+};
+
+struct ptp_bcn_off_cfg {
+ struct ptp_bcn_ref old_ref;
+ struct ptp_bcn_ref new_ref;
+ struct ptp_clk_cfg clk_cfg;
+ struct timer_list ptp_timer;
+ int use_ptp_alg;
+ u8 refcnt;
+ /* protection lock for updating ref */
+ spinlock_t lock;
+};
+
+/* netdev priv */
+struct otx2_rfoe_ndev_priv {
+ u8 rfoe_num;
+ u8 lmac_id;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct otx2_bphy_cdev_priv *cdev_priv;
+ u32 msg_enable;
+ u32 ptp_ext_clk_rate;
+ void __iomem *bphy_reg_base;
+ void __iomem *psm_reg_base;
+ void __iomem *rfoe_reg_base;
+ void __iomem *bcn_reg_base;
+ void __iomem *ptp_reg_base;
+ struct iommu_domain *iommu_domain;
+ struct rx_ft_cfg rx_ft_cfg[PACKET_TYPE_MAX];
+ struct tx_job_queue_cfg tx_ptp_job_cfg;
+ struct rfoe_common_cfg *rfoe_common;
+ u8 pkt_type_mask;
+ /* priv lock */
+ spinlock_t lock;
+ int rx_hw_tstamp_en;
+ int tx_hw_tstamp_en;
+ struct sk_buff *ptp_tx_skb;
+ u16 ptp_job_tag;
+ struct timer_list tx_timer;
+ unsigned long state;
+ struct work_struct ptp_tx_work;
+ struct work_struct ptp_queue_work;
+ struct ptp_tx_skb_list ptp_skb_list;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct cyclecounter cycle_counter;
+ struct timecounter time_counter;
+
+ struct delayed_work extts_work;
+ u64 last_extts;
+ u64 thresh;
+
+ struct ptp_pin_desc extts_config;
+ /* ptp lock */
+ struct mutex ptp_lock;
+ struct otx2_rfoe_stats stats;
+ u8 mac_addr[ETH_ALEN];
+ struct ptp_bcn_off_cfg *ptp_cfg;
+ s32 sec_bcn_offset;
+ int if_type;
+ u8 link_state;
+ unsigned long last_tx_jiffies;
+ unsigned long last_tx_ptp_jiffies;
+ unsigned long last_rx_jiffies;
+ unsigned long last_rx_ptp_jiffies;
+ unsigned long last_tx_dropped_jiffies;
+ unsigned long last_tx_ptp_dropped_jiffies;
+ unsigned long last_rx_dropped_jiffies;
+ unsigned long last_rx_ptp_dropped_jiffies;
+};
+
+void otx2_rfoe_rx_napi_schedule(int rfoe_num, u32 status);
+
+int otx2_rfoe_parse_and_init_intf(struct otx2_bphy_cdev_priv *cdev,
+ struct bphy_netdev_comm_intf_cfg *cfg);
+
+void otx2_bphy_rfoe_cleanup(void);
+
+void otx2_rfoe_disable_intf(int rfoe_num);
+
+/* ethtool */
+void otx2_rfoe_set_ethtool_ops(struct net_device *netdev);
+
+/* ptp */
+void otx2_rfoe_calc_ptp_ts(struct otx2_rfoe_ndev_priv *priv, u64 *ts);
+int otx2_rfoe_ptp_init(struct otx2_rfoe_ndev_priv *priv);
+void otx2_rfoe_ptp_destroy(struct otx2_rfoe_ndev_priv *priv);
+
+/* update carrier state */
+void otx2_rfoe_set_link_state(struct net_device *netdev, u8 state);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ethtool.c
new file mode 100644
index 000000000000..d697c2e27bec
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ethtool.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "otx2_rfoe.h"
+#include "otx2_bphy_hw.h"
+
+static const char ethtool_stat_strings[][ETH_GSTRING_LEN] = {
+ "oth_rx_packets",
+ "ptp_rx_packets",
+ "ecpri_rx_packets",
+ "rx_bytes",
+ "oth_rx_dropped",
+ "ptp_rx_dropped",
+ "ecpri_rx_dropped",
+ "oth_tx_packets",
+ "ptp_tx_packets",
+ "ecpri_tx_packets",
+ "tx_bytes",
+ "oth_tx_dropped",
+ "ptp_tx_dropped",
+ "ecpri_tx_dropped",
+ "ptp_tx_hwtstamp_failures",
+ "EthIfInFrames",
+ "EthIfInOctets",
+ "EthIfOutFrames",
+ "EthIfOutOctets",
+ "EthIfInUnknownVlan",
+};
+
+static void otx2_rfoe_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ memcpy(data, *ethtool_stat_strings,
+ sizeof(ethtool_stat_strings));
+ break;
+ }
+}
+
+static int otx2_rfoe_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(ethtool_stat_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void otx2_rfoe_update_lmac_stats(struct otx2_rfoe_ndev_priv *priv)
+{
+ struct otx2_rfoe_stats *stats = &priv->stats;
+
+ stats->EthIfInFrames = readq(priv->rfoe_reg_base +
+ RFOEX_RX_CGX_PKT_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfInOctets = readq(priv->rfoe_reg_base +
+ RFOEX_RX_CGX_OCTS_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfOutFrames = readq(priv->rfoe_reg_base +
+ RFOEX_TX_PKT_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfOutOctets = readq(priv->rfoe_reg_base +
+ RFOEX_TX_OCTS_STAT(priv->rfoe_num,
+ priv->lmac_id));
+ stats->EthIfInUnknownVlan =
+ readq(priv->rfoe_reg_base +
+ RFOEX_RX_VLAN_DROP_STAT(priv->rfoe_num,
+ priv->lmac_id));
+}
+
+static void otx2_rfoe_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ otx2_rfoe_update_lmac_stats(priv);
+ spin_lock(&priv->stats.lock);
+ memcpy(data, &priv->stats,
+ ARRAY_SIZE(ethtool_stat_strings) * sizeof(u64));
+ spin_unlock(&priv->stats.lock);
+}
+
+static void otx2_rfoe_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *p)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ snprintf(p->driver, sizeof(p->driver), "otx2_rfoe {rfoe%d lmac%d}",
+ priv->rfoe_num, priv->lmac_id);
+ strlcpy(p->bus_info, "platform", sizeof(p->bus_info));
+}
+
+static int otx2_rfoe_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = ptp_clock_index(priv->ptp_clock);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static u32 otx2_rfoe_get_msglevel(struct net_device *netdev)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ return priv->msg_enable;
+}
+
+static void otx2_rfoe_set_msglevel(struct net_device *netdev, u32 level)
+{
+ struct otx2_rfoe_ndev_priv *priv = netdev_priv(netdev);
+
+ priv->msg_enable = level;
+}
+
+static const struct ethtool_ops otx2_rfoe_ethtool_ops = {
+ .get_drvinfo = otx2_rfoe_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = otx2_rfoe_get_ts_info,
+ .get_strings = otx2_rfoe_get_strings,
+ .get_sset_count = otx2_rfoe_get_sset_count,
+ .get_ethtool_stats = otx2_rfoe_get_ethtool_stats,
+ .get_msglevel = otx2_rfoe_get_msglevel,
+ .set_msglevel = otx2_rfoe_set_msglevel,
+};
+
+void otx2_rfoe_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &otx2_rfoe_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ptp.c b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ptp.c
new file mode 100644
index 000000000000..a9f58c3bd0ab
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/otx2_rfoe_ptp.c
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell BPHY RFOE PTP PHC support.
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#include "otx2_rfoe.h"
+
+#define EXT_PTP_CLK_RATE (125 * 1000000) /* Ext PTP clk rate */
+
+static int otx2_rfoe_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(ptp_info,
+ struct
+ otx2_rfoe_ndev_priv,
+ ptp_clock_info);
+
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->ptp_lock);
+ timecounter_adjtime(&priv->time_counter, delta);
+ mutex_unlock(&priv->ptp_lock);
+
+ return 0;
+}
+
+static int otx2_rfoe_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(ptp,
+ struct
+ otx2_rfoe_ndev_priv,
+ ptp_clock_info);
+ bool neg_adj = false;
+ u64 comp, adj;
+ s64 ppb;
+
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN)
+ return -EOPNOTSUPP;
+
+ if (scaled_ppm < 0) {
+ neg_adj = true;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* The hardware adds the clock compensation value to the PTP clock
+ * on every coprocessor clock cycle. Typical convention is that it
+ * represent number of nanosecond betwen each cycle. In this
+ * convention compensation value is in 64 bit fixed-point
+ * representation where upper 32 bits are number of nanoseconds
+ * and lower is fractions of nanosecond.
+ * The scaled_ppm represent the ratio in "parts per million" by which
+ * the compensation value should be corrected.
+ * To calculate new compenstation value we use 64bit fixed point
+ * arithmetic on following formula
+ * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
+ * where tbase is the basic compensation value calculated
+ * initialy in the probe function.
+ */
+ /* convert scaled_ppm to ppb */
+ ppb = 1 + scaled_ppm;
+ ppb *= 125;
+ ppb >>= 13;
+
+ comp = ((u64)1000000000ull << 32) / priv->ptp_ext_clk_rate;
+ adj = comp * ppb;
+ adj = div_u64(adj, 1000000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+
+ writeq(comp, priv->ptp_reg_base + MIO_PTP_CLOCK_COMP);
+
+ return 0;
+}
+
+static u64 otx2_rfoe_ptp_cc_read(const struct cyclecounter *cc)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(cc, struct
+ otx2_rfoe_ndev_priv,
+ cycle_counter);
+
+ return readq(priv->ptp_reg_base + MIO_PTP_CLOCK_HI);
+}
+
+static int otx2_rfoe_ptp_gettime(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(ptp_info,
+ struct
+ otx2_rfoe_ndev_priv,
+ ptp_clock_info);
+ u64 nsec;
+
+ mutex_lock(&priv->ptp_lock);
+
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN) {
+ nsec = readq(priv->ptp_reg_base + MIO_PTP_CLOCK_HI);
+ otx2_rfoe_calc_ptp_ts(priv, &nsec);
+ } else {
+ nsec = timecounter_read(&priv->time_counter);
+ }
+ mutex_unlock(&priv->ptp_lock);
+
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+static int otx2_rfoe_ptp_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(ptp_info,
+ struct
+ otx2_rfoe_ndev_priv,
+ ptp_clock_info);
+ u64 nsec;
+
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN)
+ return -EOPNOTSUPP;
+
+ nsec = timespec64_to_ns(ts);
+
+ mutex_lock(&priv->ptp_lock);
+ timecounter_init(&priv->time_counter, &priv->cycle_counter, nsec);
+ mutex_unlock(&priv->ptp_lock);
+
+ return 0;
+}
+
+static int otx2_rfoe_ptp_verify_pin(struct ptp_clock_info *ptp,
+ unsigned int pin,
+ enum ptp_pin_function func,
+ unsigned int chan)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(ptp,
+ struct
+ otx2_rfoe_ndev_priv,
+ ptp_clock_info);
+
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN)
+ return -EOPNOTSUPP;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ break;
+ case PTP_PF_PEROUT:
+ case PTP_PF_PHYSYNC:
+ return -1;
+ }
+ return 0;
+}
+
+static void otx2_rfoe_ptp_extts_check(struct work_struct *work)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(work, struct
+ otx2_rfoe_ndev_priv,
+ extts_work.work);
+ struct ptp_clock_event event;
+ u64 tstmp, new_thresh;
+
+ mutex_lock(&priv->ptp_lock);
+ tstmp = readq(priv->ptp_reg_base + MIO_PTP_TIMESTAMP);
+ mutex_unlock(&priv->ptp_lock);
+
+ if (tstmp != priv->last_extts) {
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = timecounter_cyc2time(&priv->time_counter, tstmp);
+ ptp_clock_event(priv->ptp_clock, &event);
+ priv->last_extts = tstmp;
+
+ new_thresh = tstmp % 500000000;
+ if (priv->thresh != new_thresh) {
+ mutex_lock(&priv->ptp_lock);
+ writeq(new_thresh,
+ priv->ptp_reg_base + MIO_PTP_PPS_THRESH_HI);
+ mutex_unlock(&priv->ptp_lock);
+ priv->thresh = new_thresh;
+ }
+ }
+ schedule_delayed_work(&priv->extts_work, msecs_to_jiffies(200));
+}
+
+static int otx2_rfoe_ptp_enable(struct ptp_clock_info *ptp_info,
+ struct ptp_clock_request *rq, int on)
+{
+ struct otx2_rfoe_ndev_priv *priv = container_of(ptp_info,
+ struct
+ otx2_rfoe_ndev_priv,
+ ptp_clock_info);
+ int pin = -1;
+
+ if (priv->pdev->subsystem_device == PCI_SUBSYS_DEVID_OCTX2_95XXN)
+ return -EOPNOTSUPP;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS,
+ rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+ if (on)
+ schedule_delayed_work(&priv->extts_work,
+ msecs_to_jiffies(200));
+ else
+ cancel_delayed_work_sync(&priv->extts_work);
+ return 0;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static const struct ptp_clock_info otx2_rfoe_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "RFOE PTP",
+ .max_adj = 1000000000ull,
+ .n_ext_ts = 1,
+ .n_pins = 1,
+ .pps = 0,
+ .adjfine = otx2_rfoe_ptp_adjfine,
+ .adjtime = otx2_rfoe_ptp_adjtime,
+ .gettime64 = otx2_rfoe_ptp_gettime,
+ .settime64 = otx2_rfoe_ptp_settime,
+ .enable = otx2_rfoe_ptp_enable,
+ .verify = otx2_rfoe_ptp_verify_pin,
+};
+
+int otx2_rfoe_ptp_init(struct otx2_rfoe_ndev_priv *priv)
+{
+ struct cyclecounter *cc;
+ int err;
+
+ cc = &priv->cycle_counter;
+ cc->read = otx2_rfoe_ptp_cc_read;
+ cc->mask = CYCLECOUNTER_MASK(64);
+ cc->mult = 1;
+ cc->shift = 0;
+
+ timecounter_init(&priv->time_counter, &priv->cycle_counter,
+ ktime_to_ns(ktime_get_real()));
+ snprintf(priv->extts_config.name, sizeof(priv->extts_config.name),
+ "RFOE TSTAMP");
+ priv->extts_config.index = 0;
+ priv->extts_config.func = PTP_PF_NONE;
+ priv->ptp_clock_info = otx2_rfoe_ptp_clock_info;
+ priv->ptp_ext_clk_rate = EXT_PTP_CLK_RATE;
+ snprintf(priv->ptp_clock_info.name, 16, "%s", priv->netdev->name);
+ priv->ptp_clock_info.pin_config = &priv->extts_config;
+ INIT_DELAYED_WORK(&priv->extts_work, otx2_rfoe_ptp_extts_check);
+ priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_info,
+ &priv->pdev->dev);
+ if (IS_ERR_OR_NULL(priv->ptp_clock)) {
+ priv->ptp_clock = NULL;
+ err = PTR_ERR(priv->ptp_clock);
+ return err;
+ }
+
+ mutex_init(&priv->ptp_lock);
+
+ return 0;
+}
+
+void otx2_rfoe_ptp_destroy(struct otx2_rfoe_ndev_priv *priv)
+{
+ ptp_clock_unregister(priv->ptp_clock);
+ priv->ptp_clock = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_bphy_netdev_comm_if.h b/drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_bphy_netdev_comm_if.h
new file mode 100644
index 000000000000..06ce9660988f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_bphy_netdev_comm_if.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 BPHY RFOE/CPRI Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _RFOE_BPHY_NETDEV_COMM_IF_H_
+#define _RFOE_BPHY_NETDEV_COMM_IF_H_
+
+#include <linux/etherdevice.h>
+#include "bphy_netdev_comm_if.h"
+
+/**
+ * @enum bphy_netdev_tx_gpint
+ * @brief GP_INT numbers for packet notification by netdev to BPHY.
+ *
+ */
+enum bphy_netdev_tx_gpint {
+ TX_GP_INT_RFOE0_LMAC0 = 32, //PSM_GPINT32,
+ TX_GP_INT_RFOE0_LMAC1 = 33, //PSM_GPINT33,
+ TX_GP_INT_RFOE0_LMAC2 = 34, //PSM_GPINT34,
+ TX_GP_INT_RFOE0_LMAC3 = 35, //PSM_GPINT35,
+
+ TX_GP_INT_RFOE1_LMAC0 = 36, //PSM_GPINT36,
+ TX_GP_INT_RFOE1_LMAC1 = 37, //PSM_GPINT37,
+ TX_GP_INT_RFOE1_LMAC2 = 38, //PSM_GPINT38,
+ TX_GP_INT_RFOE1_LMAC3 = 39, //PSM_GPINT39,
+
+ TX_GP_INT_RFOE2_LMAC0 = 40, //PSM_GPINT40,
+ TX_GP_INT_RFOE2_LMAC1 = 41, //PSM_GPINT41
+};
+
+/**
+ * @enum bphy_netdev_rx_gpint
+ * @brief GP_INT numbers for packet notification by BPHY to netdev.
+ *
+ */
+enum bphy_netdev_rx_gpint {
+ RX_GP_INT_RFOE0_PTP = 63, //PSM_GPINT63,
+ RX_GP_INT_RFOE0_ECPRI = 62, //PSM_GPINT62,
+ RX_GP_INT_RFOE0_GENERIC = 61, //PSM_GPINT61,
+
+ RX_GP_INT_RFOE1_PTP = 60, //PSM_GPINT60,
+ RX_GP_INT_RFOE1_ECPRI = 59, //PSM_GPINT59,
+ RX_GP_INT_RFOE1_GENERIC = 58, //PSM_GPINT58,
+
+ RX_GP_INT_RFOE2_PTP = 57, //PSM_GPINT57,
+ RX_GP_INT_RFOE2_ECPRI = 56, //PSM_GPINT56,
+ RX_GP_INT_RFOE2_GENERIC = 55, //PSM_GPINT55
+};
+
+/**
+ * @enum bphy_netdev_cpri_rx_gpint
+ * @brief GP_INT numbers for CPRI Ethernet packet Rx notification to netdev.
+ *
+ */
+enum bphy_netdev_cpri_rx_gpint {
+ RX_GP_INT_CPRI0_ETH = 45, //PSM_GPINT45,
+ RX_GP_INT_CPRI1_ETH = 46, //PSM_GPINT46,
+ RX_GP_INT_CPRI2_ETH = 47, //PSM_GPINT47
+};
+
+/**
+ * @struct bphy_netdev_intf_info
+ * @brief LMAC lane number, mac address and status information
+ *
+ */
+struct bphy_netdev_intf_info {
+ u8 rfoe_num;
+ u8 lane_num;
+ /* Source mac address */
+ u8 eth_addr[ETH_ALEN];
+ /* LMAC interface status */
+ u8 status; //0-DOWN, 1-UP
+ /* Configuration valid status; This interface shall be
+ * invalid if this field is set to 0
+ */
+ u8 is_valid;
+};
+
+/**
+ * @struct bphy_netdev_rbuf_info
+ * @brief Information abnout the packet ring buffer which shall be used to send
+ * the packets from BPHY to netdev.
+ *
+ */
+struct bphy_netdev_rbuf_info {
+ enum bphy_netdev_packet_type pkt_type;
+ /* gp_int = 0 can be treated as pkt type not enabled */
+ enum bphy_netdev_rx_gpint gp_int_num;
+ u16 flow_id;
+ u16 mbt_index;
+ /* Maximum number of buffers in the Ring/Pool */
+ u16 num_bufs;
+ /* MAX Buffer Size configured */
+ u16 buf_size; // TBC: 1536?
+ /* MBT byffer target memory */
+ u8 mbt_target_mem;
+ u8 reserved;
+ /* Buffers starting address */
+ u64 mbt_iova_addr;
+ u16 jdt_index;
+ /* Maximum number of JD buffers in the Ring/Pool */
+ u16 num_jd;
+ /* MAX JD size configured */
+ u8 jd_size;
+ /* MBT byffer target memory */
+ u8 jdt_target_mem;
+ /* Buffers starting address */
+ u64 jdt_iova_addr;
+};
+
+/**
+ * @brief
+ *
+ */
+struct bphy_netdev_tx_psm_cmd_info {
+ enum bphy_netdev_tx_gpint gp_int_num; /* Valid only for PTP messages */
+ u64 jd_iova_addr;
+ u64 rd_dma_iova_addr;
+ u64 low_cmd;
+ u64 high_cmd;
+};
+
+/**
+ * @struct bphy_netdev_comm_if
+ * @brief The communication interface defnitions which would be used by
+ * the netdev and bphy application.
+ *
+ */
+struct bphy_netdev_comm_if {
+ struct bphy_netdev_intf_info lmac_info;
+ struct bphy_netdev_rbuf_info rbuf_info[PACKET_TYPE_MAX];
+ /* Defining single array to handle both PTP and OTHER cmds info */
+ struct bphy_netdev_tx_psm_cmd_info ptp_pkt_info[MAX_PTP_MSG_PER_LMAC];
+};
+
+/**
+ * @struct bphy_netdev_cpri_if
+ * @brief communication interface structure defnition to be used by
+ * BPHY and NETDEV applications for CPRI Interface.
+ *
+ */
+struct bphy_netdev_cpri_if {
+ u8 id; /* CPRI ID 0..2 */
+ u8 active_lane_mask; /* lane mask */
+ u8 ul_gp_int_num; /* UL GP INT NUM */
+ u8 ul_int_threshold; /* UL INT THRESHOLD */
+ u8 num_ul_buf; /* Num UL Buffers */
+ u8 num_dl_buf; /* Num DL Buffers */
+ u8 reserved[2];
+ u64 ul_buf_iova_addr;
+ u64 dl_buf_iova_addr;
+ u8 eth_addr[MAX_LANE_PER_CPRI][ETH_ALEN];
+};
+
+/**
+ * @struct bphy_netdev_rfoe_if
+ * @brief communication interface structure defnition to be used by
+ * BPHY and NETDEV applications for RFOE Interface.
+ *
+ */
+struct bphy_netdev_rfoe_if {
+ /* Interface configuration */
+ struct bphy_netdev_comm_if if_cfg[MAX_LMAC_PER_RFOE];
+ /* TX JD cmds to send packets other than PTP;
+ * These are defined per RFoE and all LMAC can share
+ */
+ struct bphy_netdev_tx_psm_cmd_info oth_pkt_info[MAX_OTH_MSG_PER_RFOE];
+ /* Packet types for which the RX flows are configured.*/
+ u8 pkt_type_mask;
+};
+
+/**
+ * @struct bphy_netdev_comm_intf_cfg
+ * @brief ODP-NETDEV communication interface defnition structure to share
+ * the RX/TX intrefaces information.
+ *
+ */
+struct bphy_netdev_comm_intf_cfg {
+ enum bphy_netdev_if_type if_type; /* 0 --> ETHERNET, 1 --> CPRI */
+ struct bphy_netdev_rfoe_if rfoe_if_cfg; /* RFOE INTF configuration */
+ struct bphy_netdev_cpri_if cpri_if_cfg; /* CPRI INTF configuration */
+};
+
+#endif //_BPHY_NETDEV_COMM_IF_H_
diff --git a/drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_common.h b/drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_common.h
new file mode 100644
index 000000000000..6fb7c315bd0f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/bphy/rfoe_common.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell BPHY RFOE Netdev Driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#ifndef _OTX2_RFOE_COMMON_H_
+#define _OTX2_RFOE_COMMON_H_
+
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+
+#include "bphy_netdev_comm_if.h"
+
+/* PTP register offsets */
+#define MIO_PTP_CLOCK_HI 0x10
+#define MIO_PTP_TIMESTAMP 0x20
+#define MIO_PTP_PPS_THRESH_HI 0x58ULL
+#define MIO_PTP_CLOCK_COMP 0x18ULL
+
+/* max tx job entries */
+#define MAX_TX_JOB_ENTRIES 64
+
+/* GPINT(1) RFOE definitions */
+#define RX_PTP_INTR BIT(2) /* PTP packet intr */
+#define RX_ECPRI_INTR BIT(1) /* ECPRI packet intr */
+#define RX_GEN_INTR BIT(0) /* GENERIC packet intr */
+#define RFOE_RX_INTR_EN (RX_PTP_INTR | \
+ RX_ECPRI_INTR | \
+ RX_GEN_INTR)
+/* Interrupt processing definitions */
+#define INTR_TO_PKT_TYPE(a) (PACKET_TYPE_OTHER - (a))
+#define PKT_TYPE_TO_INTR(a) (1UL << (PACKET_TYPE_OTHER - (a)))
+
+enum state {
+ PTP_TX_IN_PROGRESS = 1,
+ RFOE_INTF_DOWN,
+};
+
+/* rfoe rx ind register configuration */
+struct otx2_rfoe_rx_ind_cfg {
+ u8 rfoe_num; /* rfoe idx */
+ u16 rx_ind_idx; /* RFOE(0..2)_RX_INDIRECT_INDEX */
+ u64 regoff; /* RFOE(0..2)_RX_IND_* reg offset */
+ u64 regval; /* input when write, output when read */
+#define OTX2_RFOE_RX_IND_READ 0
+#define OTX2_RFOE_RX_IND_WRITE 1
+ u8 dir; /* register access dir (read/write) */
+};
+
+/* tx job entry */
+struct tx_job_entry {
+ u64 job_cmd_lo;
+ u64 job_cmd_hi;
+ u64 jd_iova_addr;
+ u64 rd_dma_iova_addr;
+ void __iomem *jd_ptr;
+ void __iomem *rd_dma_ptr;
+ void __iomem *jd_cfg_ptr;
+};
+
+/* tx job queue */
+struct tx_job_queue_cfg {
+ u8 psm_queue_id;
+ struct tx_job_entry job_entries[MAX_TX_JOB_ENTRIES];
+ /* actual number of entries configured by ODP */
+ int num_entries;
+ /* queue index */
+ int q_idx;
+ /* lmac protection lock */
+ spinlock_t lock;
+};
+
+/* rfoe common (for all lmac's) */
+struct rfoe_common_cfg {
+ struct tx_job_queue_cfg tx_oth_job_cfg;
+ int rx_mbt_last_idx[PACKET_TYPE_MAX];
+ u16 nxt_buf[PACKET_TYPE_MAX];
+ u8 refcnt;
+ u8 rx_vlan_fwd_refcnt[VLAN_N_VID];
+};
+
+/* ptp pending skb list */
+struct ptp_tx_skb_list {
+ struct list_head list;
+ unsigned int count;
+};
+
+/* ptp skb list entry */
+struct ptp_tstamp_skb {
+ struct list_head list;
+ struct sk_buff *skb;
+};
+
+struct otx2_rfoe_stats {
+ /* rx */
+ u64 rx_packets; /* rx packets */
+ u64 ptp_rx_packets; /* ptp rx packets */
+ u64 ecpri_rx_packets; /* ecpri rx packets */
+ u64 rx_bytes; /* rx bytes count */
+ u64 rx_dropped; /* rx dropped */
+ u64 ptp_rx_dropped; /* ptp rx dropped */
+ u64 ecpri_rx_dropped; /* ptp rx dropped */
+
+ /* tx */
+ u64 tx_packets; /* tx packets */
+ u64 ptp_tx_packets; /* ptp rx packets */
+ u64 ecpri_tx_packets; /* ecpri rx packets */
+ u64 tx_bytes; /* tx bytes count */
+ u64 tx_dropped; /* tx dropped */
+ u64 ptp_tx_dropped; /* ptp tx dropped */
+ u64 ecpri_tx_dropped; /* ptp tx dropped */
+ u64 tx_hwtstamp_failures; /* ptp tx timestamp failures */
+
+ /* per LMAC stats */
+ u64 EthIfInFrames;
+ u64 EthIfInOctets;
+ u64 EthIfOutFrames;
+ u64 EthIfOutOctets;
+ u64 EthIfInUnknownVlan;
+
+ /* stats update lock */
+ spinlock_t lock;
+};
+
+struct otx2_rfoe_link_event {
+ u8 rfoe_num;
+ u8 lmac_id;
+ u8 link_state;
+};
+
+#endif