aboutsummaryrefslogtreecommitdiffstats
path: root/recipes-connectivity/openssl/openssl-qoriq/qoriq/0017-cryptodev-add-support-for-aes-gcm-algorithm-offloadi.patch
blob: bd9e61ac0f112e12aca74e18eb404569860df561 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
From 11b55103463bac614e00d74e9f196ec4ec6bade1 Mon Sep 17 00:00:00 2001
From: Cristian Stoica <cristian.stoica@freescale.com>
Date: Mon, 16 Jun 2014 14:06:21 +0300
Subject: [PATCH 17/26] cryptodev: add support for aes-gcm algorithm offloading

Change-Id: I3b77dc5ef8b8f707309549244a02852d95b36168
Signed-off-by: Cristian Stoica <cristian.stoica@freescale.com>
Reviewed-on: http://git.am.freescale.net:8181/17226
---
 apps/speed.c                  |   6 +-
 crypto/engine/eng_cryptodev.c | 229 +++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 233 insertions(+), 2 deletions(-)

diff --git a/apps/speed.c b/apps/speed.c
index 9886ca3..099dede 100644
--- a/apps/speed.c
+++ b/apps/speed.c
@@ -224,7 +224,11 @@
 #endif
 
 #undef BUFSIZE
-#define BUFSIZE	((long)1024*8+1)
+/* The buffer overhead allows GCM tag at the end of the encrypted data. This
+   avoids buffer overflows from cryptodev since Linux kernel GCM
+   implementation allways adds the tag - unlike e_aes.c:aes_gcm_cipher()
+   which doesn't */
+#define BUFSIZE	((long)1024*8 + EVP_GCM_TLS_TAG_LEN)
 int run=0;
 
 static int mr=0;
diff --git a/crypto/engine/eng_cryptodev.c b/crypto/engine/eng_cryptodev.c
index 13d924f..4493490 100644
--- a/crypto/engine/eng_cryptodev.c
+++ b/crypto/engine/eng_cryptodev.c
@@ -78,8 +78,10 @@ struct dev_crypto_state {
 	struct session_op d_sess;
 	int d_fd;
 	unsigned char *aad;
-	unsigned int aad_len;
+	int aad_len;
 	unsigned int len;
+	unsigned char *iv;
+	int ivlen;
 
 #ifdef USE_CRYPTODEV_DIGESTS
 	char dummy_mac_key[HASH_MAX_LEN];
@@ -251,6 +253,7 @@ static struct {
 	{ CRYPTO_SKIPJACK_CBC,  NID_undef,        0,  0,  0},
 	{ CRYPTO_TLS10_AES_CBC_HMAC_SHA1, NID_aes_128_cbc_hmac_sha1, 16, 16, 20},
 	{ CRYPTO_TLS10_AES_CBC_HMAC_SHA1, NID_aes_256_cbc_hmac_sha1, 16, 32, 20},
+	{ CRYPTO_AES_GCM,       NID_aes_128_gcm,  16, 16, 0},
 	{ 0, NID_undef,	0, 0, 0},
 };
 
@@ -271,6 +274,19 @@ static struct {
 };
 #endif
 
+/* increment counter (64-bit int) by 1 */
+static void ctr64_inc(unsigned char *counter) {
+	int n=8;
+	unsigned char  c;
+
+	do {
+		--n;
+		c = counter[n];
+		++c;
+		counter[n] = c;
+		if (c) return;
+	} while (n);
+}
 /*
  * Return a fd if /dev/crypto seems usable, 0 otherwise.
  */
@@ -762,6 +778,197 @@ static int cryptodev_cbc_hmac_sha1_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg,
 	}
 }
 
+static int cryptodev_init_gcm_key(EVP_CIPHER_CTX *ctx,
+	const unsigned char *key, const unsigned char *iv, int enc)
+{
+	struct dev_crypto_state *state = ctx->cipher_data;
+	struct session_op *sess = &state->d_sess;
+	int cipher = -1, i;
+	if (!iv && !key)
+		return 1;
+
+	if (iv)
+		memcpy(ctx->iv, iv, ctx->cipher->iv_len);
+
+	for (i = 0; ciphers[i].id; i++)
+		if (ctx->cipher->nid == ciphers[i].nid &&
+		    ctx->cipher->iv_len <= ciphers[i].ivmax &&
+		    ctx->key_len == ciphers[i].keylen) {
+			cipher = ciphers[i].id;
+			break;
+		}
+
+	if (!ciphers[i].id) {
+		state->d_fd = -1;
+		return 0;
+	}
+
+	memset(sess, 0, sizeof(struct session_op));
+
+	if ((state->d_fd = get_dev_crypto()) < 0)
+		return 0;
+
+	sess->key = (unsigned char *) key;
+	sess->keylen = ctx->key_len;
+	sess->cipher = cipher;
+
+	if (ioctl(state->d_fd, CIOCGSESSION, sess) == -1) {
+		put_dev_crypto(state->d_fd);
+		state->d_fd = -1;
+		return 0;
+	}
+	return 1;
+}
+
+static int cryptodev_gcm_tls_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
+		const unsigned char *in, size_t len)
+{
+	struct crypt_auth_op cryp = {0};
+	struct dev_crypto_state *state = ctx->cipher_data;
+	struct session_op *sess = &state->d_sess;
+	int rv = len;
+
+	if (EVP_CIPHER_CTX_ctrl(ctx, ctx->encrypt ?
+			EVP_CTRL_GCM_IV_GEN : EVP_CTRL_GCM_SET_IV_INV,
+			EVP_GCM_TLS_EXPLICIT_IV_LEN, out) <= 0)
+		return 0;
+
+	in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
+	out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
+	len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
+
+	if (ctx->encrypt) {
+		len -= EVP_GCM_TLS_TAG_LEN;
+	}
+	cryp.ses = sess->ses;
+	cryp.len = len;
+	cryp.src = (unsigned char*) in;
+	cryp.dst = out;
+	cryp.auth_src = state->aad;
+	cryp.auth_len = state->aad_len;
+	cryp.iv = ctx->iv;
+	cryp.op = ctx->encrypt ? COP_ENCRYPT : COP_DECRYPT;
+
+	if (ioctl(state->d_fd, CIOCAUTHCRYPT, &cryp) == -1) {
+		return 0;
+	}
+
+	if (ctx->encrypt)
+		ctr64_inc(state->iv + state->ivlen - 8);
+	else
+		rv = len - EVP_GCM_TLS_TAG_LEN;
+
+	return rv;
+}
+
+static int cryptodev_gcm_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
+		const unsigned char *in, size_t len)
+{
+	struct crypt_auth_op cryp;
+	struct dev_crypto_state *state = ctx->cipher_data;
+	struct session_op *sess = &state->d_sess;
+
+	if (state->d_fd < 0)
+		return 0;
+
+	if ((len % ctx->cipher->block_size) != 0)
+		return 0;
+
+	if (state->aad_len >= 0)
+		return cryptodev_gcm_tls_cipher(ctx, out, in, len);
+
+	memset(&cryp, 0, sizeof(cryp));
+
+	cryp.ses = sess->ses;
+	cryp.len = len;
+	cryp.src = (unsigned char*) in;
+	cryp.dst = out;
+	cryp.auth_src = NULL;
+	cryp.auth_len = 0;
+	cryp.iv = ctx->iv;
+	cryp.op = ctx->encrypt ? COP_ENCRYPT : COP_DECRYPT;
+
+	if (ioctl(state->d_fd, CIOCAUTHCRYPT, &cryp) == -1) {
+		return 0;
+	}
+
+	return len;
+}
+
+static int cryptodev_gcm_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg,
+		void *ptr)
+{
+	struct dev_crypto_state *state = ctx->cipher_data;
+	switch (type) {
+	case EVP_CTRL_INIT:
+	{
+		state->ivlen = ctx->cipher->iv_len;
+		state->iv = ctx->iv;
+		state->aad_len = -1;
+		return 1;
+	}
+	case EVP_CTRL_GCM_SET_IV_FIXED:
+	{
+		/* Special case: -1 length restores whole IV */
+		if (arg == -1)
+			{
+			memcpy(state->iv, ptr, state->ivlen);
+			return 1;
+			}
+		/* Fixed field must be at least 4 bytes and invocation field
+		 * at least 8.
+		 */
+		if ((arg < 4) || (state->ivlen - arg) < 8)
+			return 0;
+		if (arg)
+			memcpy(state->iv, ptr, arg);
+		if (ctx->encrypt &&
+			RAND_bytes(state->iv + arg, state->ivlen - arg) <= 0)
+			return 0;
+		return 1;
+	}
+	case EVP_CTRL_AEAD_TLS1_AAD:
+	{
+		unsigned int len;
+		if (arg != 13)
+			return 0;
+
+		memcpy(ctx->buf, ptr, arg);
+		len=ctx->buf[arg-2] << 8 | ctx->buf[arg-1];
+
+		/* Correct length for explicit IV */
+		len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
+
+		/* If decrypting correct for tag too */
+		if (!ctx->encrypt)
+			len -= EVP_GCM_TLS_TAG_LEN;
+
+		ctx->buf[arg-2] = len >> 8;
+		ctx->buf[arg-1] = len & 0xff;
+
+		state->aad = ctx->buf;
+		state->aad_len = arg;
+		state->len = len;
+
+		/* Extra padding: tag appended to record */
+		return EVP_GCM_TLS_TAG_LEN;
+	}
+	case EVP_CTRL_GCM_SET_IV_INV:
+	{
+		if (ctx->encrypt)
+			return 0;
+		memcpy(state->iv + state->ivlen - arg, ptr, arg);
+		return 1;
+	}
+	case EVP_CTRL_GCM_IV_GEN:
+		if (arg <= 0 || arg > state->ivlen)
+			arg = state->ivlen;
+		memcpy(ptr, state->iv + state->ivlen - arg, arg);
+		return 1;
+	default:
+		return -1;
+	}
+}
 /*
  * libcrypto EVP stuff - this is how we get wired to EVP so the engine
  * gets called when libcrypto requests a cipher NID.
@@ -901,6 +1108,23 @@ const EVP_CIPHER cryptodev_aes_256_cbc_hmac_sha1 = {
 	cryptodev_cbc_hmac_sha1_ctrl,
 	NULL
 };
+
+const EVP_CIPHER cryptodev_aes_128_gcm = {
+	NID_aes_128_gcm,
+	1, 16, 12,
+	EVP_CIPH_GCM_MODE | EVP_CIPH_FLAG_AEAD_CIPHER | EVP_CIPH_FLAG_DEFAULT_ASN1 \
+	| EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \
+	| EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT,
+	cryptodev_init_gcm_key,
+	cryptodev_gcm_cipher,
+	cryptodev_cleanup,
+	sizeof(struct dev_crypto_state),
+	EVP_CIPHER_set_asn1_iv,
+	EVP_CIPHER_get_asn1_iv,
+	cryptodev_gcm_ctrl,
+	NULL
+};
+
 /*
  * Registered by the ENGINE when used to find out how to deal with
  * a particular NID in the ENGINE. this says what we'll do at the
@@ -944,6 +1168,9 @@ cryptodev_engine_ciphers(ENGINE *e, const EVP_CIPHER **cipher,
 	case NID_aes_256_cbc_hmac_sha1:
 		*cipher = &cryptodev_aes_256_cbc_hmac_sha1;
 		break;
+	case NID_aes_128_gcm:
+		*cipher = &cryptodev_aes_128_gcm;
+		break;
 	default:
 		*cipher = NULL;
 		break;
-- 
2.3.5