aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
blob: 7cdc6cbe678fc37b9a3e34c68e06c3204424e8c8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2021 Marvell. */

#include "otx2_cptpf.h"
#include "otx2_cptvf.h"
#include "otx2_cptlf.h"
#include "cn10k_cpt.h"

#if defined(CONFIG_ARM64)
#define cn10k_lmt_flush(val, addr)			\
({							\
	__asm__ volatile(".cpu  generic+lse\n"		\
			 "steor %x[rf],[%[rs]]"		\
			 : [rf]"+r"(val)		\
			 : [rs]"r"(addr));		\
})
#else
#define cn10k_lmt_flush(val, addr)	({ addr = val; })
#endif

static struct cpt_hw_ops otx2_hw_ops = {
	.send_cmd = otx2_cpt_send_cmd,
	.cpt_get_compcode = otx2_cpt_get_compcode,
	.cpt_get_uc_compcode = otx2_cpt_get_uc_compcode,
};

static struct cpt_hw_ops cn10k_hw_ops = {
	.send_cmd = cn10k_cpt_send_cmd,
	.cpt_get_compcode = cn10k_cpt_get_compcode,
	.cpt_get_uc_compcode = cn10k_cpt_get_uc_compcode,
};

void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
			struct otx2_cptlf_info *lf)
{
	void __iomem *lmtline = lf->lmtline;
	u64 val = (lf->slot & 0x7FF);
	u64 tar_addr = 0;

	/* tar_addr<6:4> = Size of first LMTST - 1 in units of 128b. */
	tar_addr |= (__force u64)lf->ioreg |
		     (((OTX2_CPT_INST_SIZE/16) - 1) & 0x7) << 4;
	/*
	 * Make sure memory areas pointed in CPT_INST_S
	 * are flushed before the instruction is sent to CPT
	 */
	dma_wmb();

	/* Copy CPT command to LMTLINE */
	memcpy_toio(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE);
	cn10k_lmt_flush(val, tar_addr);
}

int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
{
	struct pci_dev *pdev = cptpf->pdev;
	resource_size_t size;
	u64 lmt_base;

	if (!test_bit(CN10K_LMTST, &cptpf->cap_flag)) {
		cptpf->lfs.ops = &otx2_hw_ops;
		return 0;
	}

	cptpf->lfs.ops = &cn10k_hw_ops;
	lmt_base = readq(cptpf->reg_base + RVU_PF_LMTLINE_ADDR);
	if (!lmt_base) {
		dev_err(&pdev->dev, "PF LMTLINE address not configured\n");
		return -ENOMEM;
	}
	size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
	size -= ((1 + cptpf->max_vfs) * MBOX_SIZE);
	cptpf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, lmt_base, size);
	if (!cptpf->lfs.lmt_base) {
		dev_err(&pdev->dev,
			"Mapping of PF LMTLINE address failed\n");
		return -ENOMEM;
	}

	return 0;
}

int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
{
	struct pci_dev *pdev = cptvf->pdev;
	resource_size_t offset, size;

	if (!test_bit(CN10K_LMTST, &cptvf->cap_flag)) {
		cptvf->lfs.ops = &otx2_hw_ops;
		return 0;
	}

	cptvf->lfs.ops = &cn10k_hw_ops;
	offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
	size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
	/* Map VF LMILINE region */
	cptvf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, offset, size);
	if (!cptvf->lfs.lmt_base) {
		dev_err(&pdev->dev, "Unable to map BAR4\n");
		return -ENOMEM;
	}

	return 0;
}