aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/ccree/cc_ivgen.c
blob: 7694583233944df97713dab29ce081dd6d51176e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */

#include <crypto/ctr.h>
#include "cc_driver.h"
#include "cc_ivgen.h"
#include "cc_request_mgr.h"
#include "cc_sram_mgr.h"
#include "cc_buffer_mgr.h"

/* The max. size of pool *MUST* be <= SRAM total size */
#define CC_IVPOOL_SIZE 1024
/* The first 32B fraction of pool are dedicated to the
 * next encryption "key" & "IV" for pool regeneration
 */
#define CC_IVPOOL_META_SIZE (CC_AES_IV_SIZE + AES_KEYSIZE_128)
#define CC_IVPOOL_GEN_SEQ_LEN	4

/**
 * struct cc_ivgen_ctx -IV pool generation context
 * @pool:          the start address of the iv-pool resides in internal RAM
 * @ctr_key_dma:   address of pool's encryption key material in internal RAM
 * @ctr_iv_dma:    address of pool's counter iv in internal RAM
 * @next_iv_ofs:   the offset to the next available IV in pool
 * @pool_meta:     virt. address of the initial enc. key/IV
 * @pool_meta_dma: phys. address of the initial enc. key/IV
 */
struct cc_ivgen_ctx {
	cc_sram_addr_t pool;
	cc_sram_addr_t ctr_key;
	cc_sram_addr_t ctr_iv;
	u32 next_iv_ofs;
	u8 *pool_meta;
	dma_addr_t pool_meta_dma;
};

/*!
 * Generates CC_IVPOOL_SIZE of random bytes by
 * encrypting 0's using AES128-CTR.
 *
 * \param ivgen iv-pool context
 * \param iv_seq IN/OUT array to the descriptors sequence
 * \param iv_seq_len IN/OUT pointer to the sequence length
 */
static int cc_gen_iv_pool(struct cc_ivgen_ctx *ivgen_ctx,
			  struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
{
	unsigned int idx = *iv_seq_len;

	if ((*iv_seq_len + CC_IVPOOL_GEN_SEQ_LEN) > CC_IVPOOL_SEQ_LEN) {
		/* The sequence will be longer than allowed */
		return -EINVAL;
	}
	/* Setup key */
	hw_desc_init(&iv_seq[idx]);
	set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_key, AES_KEYSIZE_128);
	set_setup_mode(&iv_seq[idx], SETUP_LOAD_KEY0);
	set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
	set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
	set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
	set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
	idx++;

	/* Setup cipher state */
	hw_desc_init(&iv_seq[idx]);
	set_din_sram(&iv_seq[idx], ivgen_ctx->ctr_iv, CC_AES_IV_SIZE);
	set_cipher_config0(&iv_seq[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
	set_flow_mode(&iv_seq[idx], S_DIN_to_AES);
	set_setup_mode(&iv_seq[idx], SETUP_LOAD_STATE1);
	set_key_size_aes(&iv_seq[idx], CC_AES_128_BIT_KEY_SIZE);
	set_cipher_mode(&iv_seq[idx], DRV_CIPHER_CTR);
	idx++;

	/* Perform dummy encrypt to skip first block */
	hw_desc_init(&iv_seq[idx]);
	set_din_const(&iv_seq[idx], 0, CC_AES_IV_SIZE);
	set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_AES_IV_SIZE);
	set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
	idx++;

	/* Generate IV pool */
	hw_desc_init(&iv_seq[idx]);
	set_din_const(&iv_seq[idx], 0, CC_IVPOOL_SIZE);
	set_dout_sram(&iv_seq[idx], ivgen_ctx->pool, CC_IVPOOL_SIZE);
	set_flow_mode(&iv_seq[idx], DIN_AES_DOUT);
	idx++;

	*iv_seq_len = idx; /* Update sequence length */

	/* queue ordering assures pool readiness */
	ivgen_ctx->next_iv_ofs = CC_IVPOOL_META_SIZE;

	return 0;
}

/*!
 * Generates the initial pool in SRAM.
 * This function should be invoked when resuming driver.
 *
 * \param drvdata
 *
 * \return int Zero for success, negative value otherwise.
 */
int cc_init_iv_sram(struct cc_drvdata *drvdata)
{
	struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
	struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
	unsigned int iv_seq_len = 0;
	int rc;

	/* Generate initial enc. key/iv */
	get_random_bytes(ivgen_ctx->pool_meta, CC_IVPOOL_META_SIZE);

	/* The first 32B reserved for the enc. Key/IV */
	ivgen_ctx->ctr_key = ivgen_ctx->pool;
	ivgen_ctx->ctr_iv = ivgen_ctx->pool + AES_KEYSIZE_128;

	/* Copy initial enc. key and IV to SRAM at a single descriptor */
	hw_desc_init(&iv_seq[iv_seq_len]);
	set_din_type(&iv_seq[iv_seq_len], DMA_DLLI, ivgen_ctx->pool_meta_dma,
		     CC_IVPOOL_META_SIZE, NS_BIT);
	set_dout_sram(&iv_seq[iv_seq_len], ivgen_ctx->pool,
		      CC_IVPOOL_META_SIZE);
	set_flow_mode(&iv_seq[iv_seq_len], BYPASS);
	iv_seq_len++;

	/* Generate initial pool */
	rc = cc_gen_iv_pool(ivgen_ctx, iv_seq, &iv_seq_len);
	if (rc)
		return rc;

	/* Fire-and-forget */
	return send_request_init(drvdata, iv_seq, iv_seq_len);
}

/*!
 * Free iv-pool and ivgen context.
 *
 * \param drvdata
 */
void cc_ivgen_fini(struct cc_drvdata *drvdata)
{
	struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
	struct device *device = &drvdata->plat_dev->dev;

	if (!ivgen_ctx)
		return;

	if (ivgen_ctx->pool_meta) {
		memset(ivgen_ctx->pool_meta, 0, CC_IVPOOL_META_SIZE);
		dma_free_coherent(device, CC_IVPOOL_META_SIZE,
				  ivgen_ctx->pool_meta,
				  ivgen_ctx->pool_meta_dma);
	}

	ivgen_ctx->pool = NULL_SRAM_ADDR;

	/* release "this" context */
	kfree(ivgen_ctx);
}

/*!
 * Allocates iv-pool and maps resources.
 * This function generates the first IV pool.
 *
 * \param drvdata Driver's private context
 *
 * \return int Zero for success, negative value otherwise.
 */
int cc_ivgen_init(struct cc_drvdata *drvdata)
{
	struct cc_ivgen_ctx *ivgen_ctx;
	struct device *device = &drvdata->plat_dev->dev;
	int rc;

	/* Allocate "this" context */
	ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL);
	if (!ivgen_ctx)
		return -ENOMEM;

	/* Allocate pool's header for initial enc. key/IV */
	ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
						  &ivgen_ctx->pool_meta_dma,
						  GFP_KERNEL);
	if (!ivgen_ctx->pool_meta) {
		dev_err(device, "Not enough memory to allocate DMA of pool_meta (%u B)\n",
			CC_IVPOOL_META_SIZE);
		rc = -ENOMEM;
		goto out;
	}
	/* Allocate IV pool in SRAM */
	ivgen_ctx->pool = cc_sram_alloc(drvdata, CC_IVPOOL_SIZE);
	if (ivgen_ctx->pool == NULL_SRAM_ADDR) {
		dev_err(device, "SRAM pool exhausted\n");
		rc = -ENOMEM;
		goto out;
	}

	drvdata->ivgen_handle = ivgen_ctx;

	return cc_init_iv_sram(drvdata);

out:
	cc_ivgen_fini(drvdata);
	return rc;
}

/*!
 * Acquires 16 Bytes IV from the iv-pool
 *
 * \param drvdata Driver private context
 * \param iv_out_dma Array of physical IV out addresses
 * \param iv_out_dma_len Length of iv_out_dma array (additional elements
 *                       of iv_out_dma array are ignore)
 * \param iv_out_size May be 8 or 16 bytes long
 * \param iv_seq IN/OUT array to the descriptors sequence
 * \param iv_seq_len IN/OUT pointer to the sequence length
 *
 * \return int Zero for success, negative value otherwise.
 */
int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
	      unsigned int iv_out_dma_len, unsigned int iv_out_size,
	      struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
{
	struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
	unsigned int idx = *iv_seq_len;
	struct device *dev = drvdata_to_dev(drvdata);
	unsigned int t;

	if (iv_out_size != CC_AES_IV_SIZE &&
	    iv_out_size != CTR_RFC3686_IV_SIZE) {
		return -EINVAL;
	}
	if ((iv_out_dma_len + 1) > CC_IVPOOL_SEQ_LEN) {
		/* The sequence will be longer than allowed */
		return -EINVAL;
	}

	/* check that number of generated IV is limited to max dma address
	 * iv buffer size
	 */
	if (iv_out_dma_len > CC_MAX_IVGEN_DMA_ADDRESSES) {
		/* The sequence will be longer than allowed */
		return -EINVAL;
	}

	for (t = 0; t < iv_out_dma_len; t++) {
		/* Acquire IV from pool */
		hw_desc_init(&iv_seq[idx]);
		set_din_sram(&iv_seq[idx], (ivgen_ctx->pool +
					    ivgen_ctx->next_iv_ofs),
			     iv_out_size);
		set_dout_dlli(&iv_seq[idx], iv_out_dma[t], iv_out_size,
			      NS_BIT, 0);
		set_flow_mode(&iv_seq[idx], BYPASS);
		idx++;
	}

	/* Bypass operation is proceeded by crypto sequence, hence must
	 *  assure bypass-write-transaction by a memory barrier
	 */
	hw_desc_init(&iv_seq[idx]);
	set_din_no_dma(&iv_seq[idx], 0, 0xfffff0);
	set_dout_no_dma(&iv_seq[idx], 0, 0, 1);
	idx++;

	*iv_seq_len = idx; /* update seq length */

	/* Update iv index */
	ivgen_ctx->next_iv_ofs += iv_out_size;

	if ((CC_IVPOOL_SIZE - ivgen_ctx->next_iv_ofs) < CC_AES_IV_SIZE) {
		dev_dbg(dev, "Pool exhausted, regenerating iv-pool\n");
		/* pool is drained -regenerate it! */
		return cc_gen_iv_pool(ivgen_ctx, iv_seq, iv_seq_len);
	}

	return 0;
}