aboutsummaryrefslogtreecommitdiffstats
path: root/net/tls/tls_sw.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/tls/tls_sw.c')
-rw-r--r--net/tls/tls_sw.c108
1 files changed, 87 insertions, 21 deletions
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 98483c75f095..eff098d15e56 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -203,10 +203,12 @@ static void tls_decrypt_done(struct crypto_async_request *req, int err)
kfree(aead_req);
+ spin_lock_bh(&ctx->decrypt_compl_lock);
pending = atomic_dec_return(&ctx->decrypt_pending);
- if (!pending && READ_ONCE(ctx->async_notify))
+ if (!pending && ctx->async_notify)
complete(&ctx->async_wait.completion);
+ spin_unlock_bh(&ctx->decrypt_compl_lock);
}
static int tls_do_decryption(struct sock *sk,
@@ -464,10 +466,12 @@ static void tls_encrypt_done(struct crypto_async_request *req, int err)
ready = true;
}
+ spin_lock_bh(&ctx->encrypt_compl_lock);
pending = atomic_dec_return(&ctx->encrypt_pending);
- if (!pending && READ_ONCE(ctx->async_notify))
+ if (!pending && ctx->async_notify)
complete(&ctx->async_wait.completion);
+ spin_unlock_bh(&ctx->encrypt_compl_lock);
if (!ready)
return;
@@ -677,12 +681,32 @@ static int tls_push_record(struct sock *sk, int flags,
split_point = msg_pl->apply_bytes;
split = split_point && split_point < msg_pl->sg.size;
+ if (unlikely((!split &&
+ msg_pl->sg.size +
+ prot->overhead_size > msg_en->sg.size) ||
+ (split &&
+ split_point +
+ prot->overhead_size > msg_en->sg.size))) {
+ split = true;
+ split_point = msg_en->sg.size;
+ }
if (split) {
rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
split_point, prot->overhead_size,
&orig_end);
if (rc < 0)
return rc;
+ /* This can happen if above tls_split_open_record allocates
+ * a single large encryption buffer instead of two smaller
+ * ones. In this case adjust pointers and continue without
+ * split.
+ */
+ if (!msg_pl->sg.size) {
+ tls_merge_open_record(sk, rec, tmp, orig_end);
+ msg_pl = &rec->msg_plaintext;
+ msg_en = &rec->msg_encrypted;
+ split = false;
+ }
sk_msg_trim(sk, msg_en, msg_pl->sg.size +
prot->overhead_size);
}
@@ -704,6 +728,12 @@ static int tls_push_record(struct sock *sk, int flags,
sg_mark_end(sk_msg_elem(msg_pl, i));
}
+ if (msg_pl->sg.end < msg_pl->sg.start) {
+ sg_chain(&msg_pl->sg.data[msg_pl->sg.start],
+ MAX_SKB_FRAGS - msg_pl->sg.start + 1,
+ msg_pl->sg.data);
+ }
+
i = msg_pl->sg.start;
sg_chain(rec->sg_aead_in, 2, &msg_pl->sg.data[i]);
@@ -751,7 +781,7 @@ static int tls_push_record(struct sock *sk, int flags,
static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
bool full_record, u8 record_type,
- size_t *copied, int flags)
+ ssize_t *copied, int flags)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
@@ -767,10 +797,13 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
psock = sk_psock_get(sk);
if (!psock || !policy) {
err = tls_push_record(sk, flags, record_type);
- if (err) {
+ if (err && sk->sk_err == EBADMSG) {
*copied -= sk_msg_free(sk, msg);
tls_free_open_rec(sk);
+ err = -sk->sk_err;
}
+ if (psock)
+ sk_psock_put(sk, psock);
return err;
}
more_data:
@@ -778,10 +811,7 @@ more_data:
if (psock->eval == __SK_NONE) {
delta = msg->sg.size;
psock->eval = sk_psock_msg_verdict(sk, psock, msg);
- if (delta < msg->sg.size)
- delta -= msg->sg.size;
- else
- delta = 0;
+ delta -= msg->sg.size;
}
if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
!enospc && !full_record) {
@@ -796,9 +826,10 @@ more_data:
switch (psock->eval) {
case __SK_PASS:
err = tls_push_record(sk, flags, record_type);
- if (err < 0) {
+ if (err && sk->sk_err == EBADMSG) {
*copied -= sk_msg_free(sk, msg);
tls_free_open_rec(sk);
+ err = -sk->sk_err;
goto out_err;
}
break;
@@ -888,7 +919,8 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
unsigned char record_type = TLS_RECORD_TYPE_DATA;
bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
bool eor = !(msg->msg_flags & MSG_MORE);
- size_t try_to_copy, copied = 0;
+ size_t try_to_copy;
+ ssize_t copied = 0;
struct sk_msg *msg_pl, *msg_en;
struct tls_rec *rec;
int required_size;
@@ -898,6 +930,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
int num_zc = 0;
int orig_size;
int ret = 0;
+ int pending;
if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
return -EOPNOTSUPP;
@@ -1064,13 +1097,19 @@ trim_sgl:
goto send_end;
} else if (num_zc) {
/* Wait for pending encryptions to get completed */
- smp_store_mb(ctx->async_notify, true);
+ spin_lock_bh(&ctx->encrypt_compl_lock);
+ ctx->async_notify = true;
- if (atomic_read(&ctx->encrypt_pending))
+ pending = atomic_read(&ctx->encrypt_pending);
+ spin_unlock_bh(&ctx->encrypt_compl_lock);
+ if (pending)
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
else
reinit_completion(&ctx->async_wait.completion);
+ /* There can be no concurrent accesses, since we have no
+ * pending encrypt operations
+ */
WRITE_ONCE(ctx->async_notify, false);
if (ctx->async_wait.err) {
@@ -1090,7 +1129,7 @@ send_end:
release_sock(sk);
mutex_unlock(&tls_ctx->tx_lock);
- return copied ? copied : ret;
+ return copied > 0 ? copied : ret;
}
static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
@@ -1104,7 +1143,7 @@ static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
struct sk_msg *msg_pl;
struct tls_rec *rec;
int num_async = 0;
- size_t copied = 0;
+ ssize_t copied = 0;
bool full_record;
int record_room;
int ret = 0;
@@ -1206,7 +1245,7 @@ wait_for_memory:
}
sendpage_end:
ret = sk_stream_error(sk, flags, ret);
- return copied ? copied : ret;
+ return copied > 0 ? copied : ret;
}
int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
@@ -1698,7 +1737,9 @@ int tls_sw_recvmsg(struct sock *sk,
long timeo;
bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
bool is_peek = flags & MSG_PEEK;
+ bool bpf_strp_enabled;
int num_async = 0;
+ int pending;
flags |= nonblock;
@@ -1707,6 +1748,7 @@ int tls_sw_recvmsg(struct sock *sk,
psock = sk_psock_get(sk);
lock_sock(sk);
+ bpf_strp_enabled = sk_psock_strp_enabled(psock);
/* Process pending decrypted records. It must be non-zero-copy */
err = process_rx_list(ctx, msg, &control, &cmsg, 0, len, false,
@@ -1760,11 +1802,12 @@ int tls_sw_recvmsg(struct sock *sk,
if (to_decrypt <= len && !is_kvec && !is_peek &&
ctx->control == TLS_RECORD_TYPE_DATA &&
- prot->version != TLS_1_3_VERSION)
+ prot->version != TLS_1_3_VERSION &&
+ !bpf_strp_enabled)
zc = true;
/* Do not use async mode if record is non-data */
- if (ctx->control == TLS_RECORD_TYPE_DATA)
+ if (ctx->control == TLS_RECORD_TYPE_DATA && !bpf_strp_enabled)
async_capable = ctx->async_capable;
else
async_capable = false;
@@ -1814,6 +1857,19 @@ int tls_sw_recvmsg(struct sock *sk,
goto pick_next_record;
if (!zc) {
+ if (bpf_strp_enabled) {
+ err = sk_psock_tls_strp_read(psock, skb);
+ if (err != __SK_PASS) {
+ rxm->offset = rxm->offset + rxm->full_len;
+ rxm->full_len = 0;
+ if (err == __SK_DROP)
+ consume_skb(skb);
+ ctx->recv_pkt = NULL;
+ __strp_unpause(&ctx->strp);
+ continue;
+ }
+ }
+
if (rxm->full_len > len) {
retain_skb = true;
chunk = len;
@@ -1861,8 +1917,11 @@ pick_next_record:
recv_end:
if (num_async) {
/* Wait for all previously submitted records to be decrypted */
- smp_store_mb(ctx->async_notify, true);
- if (atomic_read(&ctx->decrypt_pending)) {
+ spin_lock_bh(&ctx->decrypt_compl_lock);
+ ctx->async_notify = true;
+ pending = atomic_read(&ctx->decrypt_pending);
+ spin_unlock_bh(&ctx->decrypt_compl_lock);
+ if (pending) {
err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
if (err) {
/* one of async decrypt failed */
@@ -1874,6 +1933,10 @@ recv_end:
} else {
reinit_completion(&ctx->async_wait.completion);
}
+
+ /* There can be no concurrent accesses, since we have no
+ * pending decrypt operations
+ */
WRITE_ONCE(ctx->async_notify, false);
/* Drain records from the rx_list & copy if required */
@@ -2054,8 +2117,9 @@ static void tls_data_ready(struct sock *sk)
strp_data_ready(&ctx->strp);
psock = sk_psock_get(sk);
- if (psock && !list_empty(&psock->ingress_msg)) {
- ctx->saved_data_ready(sk);
+ if (psock) {
+ if (!list_empty(&psock->ingress_msg))
+ ctx->saved_data_ready(sk);
sk_psock_put(sk, psock);
}
}
@@ -2218,6 +2282,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
if (tx) {
crypto_init_wait(&sw_ctx_tx->async_wait);
+ spin_lock_init(&sw_ctx_tx->encrypt_compl_lock);
crypto_info = &ctx->crypto_send.info;
cctx = &ctx->tx;
aead = &sw_ctx_tx->aead_send;
@@ -2226,6 +2291,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
sw_ctx_tx->tx_work.sk = sk;
} else {
crypto_init_wait(&sw_ctx_rx->async_wait);
+ spin_lock_init(&sw_ctx_rx->decrypt_compl_lock);
crypto_info = &ctx->crypto_recv.info;
cctx = &ctx->rx;
skb_queue_head_init(&sw_ctx_rx->rx_list);