aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJakub Kicinski <jakub.kicinski@netronome.com>2019-04-25 17:35:10 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-05-05 14:42:40 +0200
commitd0771bd41c27e08af7bb7bb95e3c9450633770f7 (patch)
treeb6d835fdddad240c58503abef7352b3917495d33
parentdd424182bc2d5af26b67f5885c182a02d00a18a6 (diff)
downloadlinux-yocto-d0771bd41c27e08af7bb7bb95e3c9450633770f7.tar.gz
linux-yocto-d0771bd41c27e08af7bb7bb95e3c9450633770f7.tar.bz2
linux-yocto-d0771bd41c27e08af7bb7bb95e3c9450633770f7.zip
net/tls: fix copy to fragments in reencrypt
[ Upstream commit eb3d38d5adb520435d4e4af32529ccb13ccc9935 ] Fragments may contain data from other records so we have to account for that when we calculate the destination and max length of copy we can perform. Note that 'offset' is the offset within the message, so it can't be passed as offset within the frag.. Here skb_store_bits() would have realised the call is wrong and simply not copy data. Fixes: 4799ac81e52a ("tls: Add rx inline crypto offload") Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: John Hurley <john.hurley@netronome.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--net/tls/tls_device.c29
1 files changed, 22 insertions, 7 deletions
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 8538ee22a582..f4a19eac975d 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -569,7 +569,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
{
struct strp_msg *rxm = strp_msg(skb);
- int err = 0, offset = rxm->offset, copy, nsg;
+ int err = 0, offset = rxm->offset, copy, nsg, data_len, pos;
struct sk_buff *skb_iter, *unused;
struct scatterlist sg[1];
char *orig_buf, *buf;
@@ -600,9 +600,10 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
else
err = 0;
+ data_len = rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE;
+
if (skb_pagelen(skb) > offset) {
- copy = min_t(int, skb_pagelen(skb) - offset,
- rxm->full_len - TLS_CIPHER_AES_GCM_128_TAG_SIZE);
+ copy = min_t(int, skb_pagelen(skb) - offset, data_len);
if (skb->decrypted)
skb_store_bits(skb, offset, buf, copy);
@@ -611,16 +612,30 @@ static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
buf += copy;
}
+ pos = skb_pagelen(skb);
skb_walk_frags(skb, skb_iter) {
- copy = min_t(int, skb_iter->len,
- rxm->full_len - offset + rxm->offset -
- TLS_CIPHER_AES_GCM_128_TAG_SIZE);
+ int frag_pos;
+
+ /* Practically all frags must belong to msg if reencrypt
+ * is needed with current strparser and coalescing logic,
+ * but strparser may "get optimized", so let's be safe.
+ */
+ if (pos + skb_iter->len <= offset)
+ goto done_with_frag;
+ if (pos >= data_len + rxm->offset)
+ break;
+
+ frag_pos = offset - pos;
+ copy = min_t(int, skb_iter->len - frag_pos,
+ data_len + rxm->offset - offset);
if (skb_iter->decrypted)
- skb_store_bits(skb_iter, offset, buf, copy);
+ skb_store_bits(skb_iter, frag_pos, buf, copy);
offset += copy;
buf += copy;
+done_with_frag:
+ pos += skb_iter->len;
}
free_buf: