| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. | 
|  | 3 | * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. | 
|  | 4 | * Copyright (c) 2016-2017, Lance Chao <lancerchao@fb.com>. All rights reserved. | 
|  | 5 | * Copyright (c) 2016, Fridolin Pokorny <fridolin.pokorny@gmail.com>. All rights reserved. | 
|  | 6 | * Copyright (c) 2016, Nikos Mavrogiannopoulos <nmav@gnutls.org>. All rights reserved. | 
|  | 7 | * | 
|  | 8 | * This software is available to you under a choice of one of two | 
|  | 9 | * licenses.  You may choose to be licensed under the terms of the GNU | 
|  | 10 | * General Public License (GPL) Version 2, available from the file | 
|  | 11 | * COPYING in the main directory of this source tree, or the | 
|  | 12 | * OpenIB.org BSD license below: | 
|  | 13 | * | 
|  | 14 | *     Redistribution and use in source and binary forms, with or | 
|  | 15 | *     without modification, are permitted provided that the following | 
|  | 16 | *     conditions are met: | 
|  | 17 | * | 
|  | 18 | *      - Redistributions of source code must retain the above | 
|  | 19 | *        copyright notice, this list of conditions and the following | 
|  | 20 | *        disclaimer. | 
|  | 21 | * | 
|  | 22 | *      - Redistributions in binary form must reproduce the above | 
|  | 23 | *        copyright notice, this list of conditions and the following | 
|  | 24 | *        disclaimer in the documentation and/or other materials | 
|  | 25 | *        provided with the distribution. | 
|  | 26 | * | 
|  | 27 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 
|  | 28 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 
|  | 29 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | 
|  | 30 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | 
|  | 31 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | 
|  | 32 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | 
|  | 33 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 
|  | 34 | * SOFTWARE. | 
|  | 35 | */ | 
|  | 36 |  | 
|  | 37 | #include <linux/sched/signal.h> | 
|  | 38 | #include <linux/module.h> | 
|  | 39 | #include <crypto/aead.h> | 
|  | 40 |  | 
|  | 41 | #include <net/strparser.h> | 
|  | 42 | #include <net/tls.h> | 
|  | 43 |  | 
|  | 44 | #define MAX_IV_SIZE	TLS_CIPHER_AES_GCM_128_IV_SIZE | 
|  | 45 |  | 
|  | 46 | static int tls_do_decryption(struct sock *sk, | 
|  | 47 | struct scatterlist *sgin, | 
|  | 48 | struct scatterlist *sgout, | 
|  | 49 | char *iv_recv, | 
|  | 50 | size_t data_len, | 
|  | 51 | struct aead_request *aead_req) | 
|  | 52 | { | 
|  | 53 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 
|  | 54 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); | 
|  | 55 | int ret; | 
|  | 56 |  | 
|  | 57 | aead_request_set_tfm(aead_req, ctx->aead_recv); | 
|  | 58 | aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); | 
|  | 59 | aead_request_set_crypt(aead_req, sgin, sgout, | 
|  | 60 | data_len + tls_ctx->rx.tag_size, | 
|  | 61 | (u8 *)iv_recv); | 
|  | 62 | aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 
|  | 63 | crypto_req_done, &ctx->async_wait); | 
|  | 64 |  | 
|  | 65 | ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait); | 
|  | 66 | return ret; | 
|  | 67 | } | 
|  | 68 |  | 
|  | 69 | static void trim_sg(struct sock *sk, struct scatterlist *sg, | 
|  | 70 | int *sg_num_elem, unsigned int *sg_size, int target_size) | 
|  | 71 | { | 
|  | 72 | int i = *sg_num_elem - 1; | 
|  | 73 | int trim = *sg_size - target_size; | 
|  | 74 |  | 
|  | 75 | if (trim <= 0) { | 
|  | 76 | WARN_ON(trim < 0); | 
|  | 77 | return; | 
|  | 78 | } | 
|  | 79 |  | 
|  | 80 | *sg_size = target_size; | 
|  | 81 | while (trim >= sg[i].length) { | 
|  | 82 | trim -= sg[i].length; | 
|  | 83 | sk_mem_uncharge(sk, sg[i].length); | 
|  | 84 | put_page(sg_page(&sg[i])); | 
|  | 85 | i--; | 
|  | 86 |  | 
|  | 87 | if (i < 0) | 
|  | 88 | goto out; | 
|  | 89 | } | 
|  | 90 |  | 
|  | 91 | sg[i].length -= trim; | 
|  | 92 | sk_mem_uncharge(sk, trim); | 
|  | 93 |  | 
|  | 94 | out: | 
|  | 95 | *sg_num_elem = i + 1; | 
|  | 96 | } | 
|  | 97 |  | 
|  | 98 | static void trim_both_sgl(struct sock *sk, int target_size) | 
|  | 99 | { | 
|  | 100 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 
|  | 101 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); | 
|  | 102 |  | 
|  | 103 | trim_sg(sk, ctx->sg_plaintext_data, | 
|  | 104 | &ctx->sg_plaintext_num_elem, | 
|  | 105 | &ctx->sg_plaintext_size, | 
|  | 106 | target_size); | 
|  | 107 |  | 
|  | 108 | if (target_size > 0) | 
|  | 109 | target_size += tls_ctx->tx.overhead_size; | 
|  | 110 |  | 
|  | 111 | trim_sg(sk, ctx->sg_encrypted_data, | 
|  | 112 | &ctx->sg_encrypted_num_elem, | 
|  | 113 | &ctx->sg_encrypted_size, | 
|  | 114 | target_size); | 
|  | 115 | } | 
|  | 116 |  | 
|  | 117 | static int alloc_encrypted_sg(struct sock *sk, int len) | 
|  | 118 | { | 
|  | 119 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 
|  | 120 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); | 
|  | 121 | int rc = 0; | 
|  | 122 |  | 
|  | 123 | rc = sk_alloc_sg(sk, len, | 
|  | 124 | ctx->sg_encrypted_data, 0, | 
|  | 125 | &ctx->sg_encrypted_num_elem, | 
|  | 126 | &ctx->sg_encrypted_size, 0); | 
|  | 127 |  | 
|  | 128 | if (rc == -ENOSPC) | 
|  | 129 | ctx->sg_encrypted_num_elem = ARRAY_SIZE(ctx->sg_encrypted_data); | 
|  | 130 |  | 
|  | 131 | return rc; | 
|  | 132 | } | 
|  | 133 |  | 
|  | 134 | static int alloc_plaintext_sg(struct sock *sk, int len) | 
|  | 135 | { | 
|  | 136 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 
|  | 137 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); | 
|  | 138 | int rc = 0; | 
|  | 139 |  | 
|  | 140 | rc = sk_alloc_sg(sk, len, ctx->sg_plaintext_data, 0, | 
|  | 141 | &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size, | 
|  | 142 | tls_ctx->pending_open_record_frags); | 
|  | 143 |  | 
|  | 144 | if (rc == -ENOSPC) | 
|  | 145 | ctx->sg_plaintext_num_elem = ARRAY_SIZE(ctx->sg_plaintext_data); | 
|  | 146 |  | 
|  | 147 | return rc; | 
|  | 148 | } | 
|  | 149 |  | 
|  | 150 | static void free_sg(struct sock *sk, struct scatterlist *sg, | 
|  | 151 | int *sg_num_elem, unsigned int *sg_size) | 
|  | 152 | { | 
|  | 153 | int i, n = *sg_num_elem; | 
|  | 154 |  | 
|  | 155 | for (i = 0; i < n; ++i) { | 
|  | 156 | sk_mem_uncharge(sk, sg[i].length); | 
|  | 157 | put_page(sg_page(&sg[i])); | 
|  | 158 | } | 
|  | 159 | *sg_num_elem = 0; | 
|  | 160 | *sg_size = 0; | 
|  | 161 | } | 
|  | 162 |  | 
|  | 163 | static void tls_free_both_sg(struct sock *sk) | 
|  | 164 | { | 
|  | 165 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 
|  | 166 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); | 
|  | 167 |  | 
|  | 168 | free_sg(sk, ctx->sg_encrypted_data, &ctx->sg_encrypted_num_elem, | 
|  | 169 | &ctx->sg_encrypted_size); | 
|  | 170 |  | 
|  | 171 | free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem, | 
|  | 172 | &ctx->sg_plaintext_size); | 
|  | 173 | } | 
|  | 174 |  | 
|  | 175 | static int tls_do_encryption(struct tls_context *tls_ctx, | 
|  | 176 | struct tls_sw_context_tx *ctx, | 
|  | 177 | struct aead_request *aead_req, | 
|  | 178 | size_t data_len) | 
|  | 179 | { | 
|  | 180 | int rc; | 
|  | 181 |  | 
|  | 182 | ctx->sg_encrypted_data[0].offset += tls_ctx->tx.prepend_size; | 
|  | 183 | ctx->sg_encrypted_data[0].length -= tls_ctx->tx.prepend_size; | 
|  | 184 |  | 
|  | 185 | aead_request_set_tfm(aead_req, ctx->aead_send); | 
|  | 186 | aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); | 
|  | 187 | aead_request_set_crypt(aead_req, ctx->sg_aead_in, ctx->sg_aead_out, | 
|  | 188 | data_len, tls_ctx->tx.iv); | 
|  | 189 |  | 
|  | 190 | aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 
|  | 191 | crypto_req_done, &ctx->async_wait); | 
|  | 192 |  | 
|  | 193 | rc = crypto_wait_req(crypto_aead_encrypt(aead_req), &ctx->async_wait); | 
|  | 194 |  | 
|  | 195 | ctx->sg_encrypted_data[0].offset -= tls_ctx->tx.prepend_size; | 
|  | 196 | ctx->sg_encrypted_data[0].length += tls_ctx->tx.prepend_size; | 
|  | 197 |  | 
|  | 198 | return rc; | 
|  | 199 | } | 
|  | 200 |  | 
|  | 201 | static int tls_push_record(struct sock *sk, int flags, | 
|  | 202 | unsigned char record_type) | 
|  | 203 | { | 
|  | 204 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 
|  | 205 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); | 
|  | 206 | struct aead_request *req; | 
|  | 207 | int rc; | 
|  | 208 |  | 
|  | 209 | req = aead_request_alloc(ctx->aead_send, sk->sk_allocation); | 
|  | 210 | if (!req) | 
|  | 211 | return -ENOMEM; | 
|  | 212 |  | 
|  | 213 | sg_mark_end(ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem - 1); | 
|  | 214 | sg_mark_end(ctx->sg_encrypted_data + ctx->sg_encrypted_num_elem - 1); | 
|  | 215 |  | 
|  | 216 | tls_make_aad(ctx->aad_space, ctx->sg_plaintext_size, | 
|  | 217 | tls_ctx->tx.rec_seq, tls_ctx->tx.rec_seq_size, | 
|  | 218 | record_type); | 
|  | 219 |  | 
|  | 220 | tls_fill_prepend(tls_ctx, | 
|  | 221 | page_address(sg_page(&ctx->sg_encrypted_data[0])) + | 
|  | 222 | ctx->sg_encrypted_data[0].offset, | 
|  | 223 | ctx->sg_plaintext_size, record_type); | 
|  | 224 |  | 
|  | 225 | tls_ctx->pending_open_record_frags = 0; | 
|  | 226 | set_bit(TLS_PENDING_CLOSED_RECORD, &tls_ctx->flags); | 
|  | 227 |  | 
|  | 228 | rc = tls_do_encryption(tls_ctx, ctx, req, ctx->sg_plaintext_size); | 
|  | 229 | if (rc < 0) { | 
|  | 230 | /* If we are called from write_space and | 
|  | 231 | * we fail, we need to set this SOCK_NOSPACE | 
|  | 232 | * to trigger another write_space in the future. | 
|  | 233 | */ | 
|  | 234 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | 
|  | 235 | goto out_req; | 
|  | 236 | } | 
|  | 237 |  | 
|  | 238 | free_sg(sk, ctx->sg_plaintext_data, &ctx->sg_plaintext_num_elem, | 
|  | 239 | &ctx->sg_plaintext_size); | 
|  | 240 |  | 
|  | 241 | ctx->sg_encrypted_num_elem = 0; | 
|  | 242 | ctx->sg_encrypted_size = 0; | 
|  | 243 |  | 
|  | 244 | /* Only pass through MSG_DONTWAIT and MSG_NOSIGNAL flags */ | 
|  | 245 | rc = tls_push_sg(sk, tls_ctx, ctx->sg_encrypted_data, 0, flags); | 
|  | 246 | if (rc < 0 && rc != -EAGAIN) | 
|  | 247 | tls_err_abort(sk, EBADMSG); | 
|  | 248 |  | 
|  | 249 | tls_advance_record_sn(sk, &tls_ctx->tx); | 
|  | 250 | out_req: | 
|  | 251 | aead_request_free(req); | 
|  | 252 | return rc; | 
|  | 253 | } | 
|  | 254 |  | 
|  | 255 | static int tls_sw_push_pending_record(struct sock *sk, int flags) | 
|  | 256 | { | 
|  | 257 | return tls_push_record(sk, flags, TLS_RECORD_TYPE_DATA); | 
|  | 258 | } | 
|  | 259 |  | 
|  | 260 | static int zerocopy_from_iter(struct sock *sk, struct iov_iter *from, | 
|  | 261 | int length, int *pages_used, | 
|  | 262 | unsigned int *size_used, | 
|  | 263 | struct scatterlist *to, int to_max_pages, | 
|  | 264 | bool charge) | 
|  | 265 | { | 
|  | 266 | struct page *pages[MAX_SKB_FRAGS]; | 
|  | 267 |  | 
|  | 268 | size_t offset; | 
|  | 269 | ssize_t copied, use; | 
|  | 270 | int i = 0; | 
|  | 271 | unsigned int size = *size_used; | 
|  | 272 | int num_elem = *pages_used; | 
|  | 273 | int rc = 0; | 
|  | 274 | int maxpages; | 
|  | 275 |  | 
|  | 276 | while (length > 0) { | 
|  | 277 | i = 0; | 
|  | 278 | maxpages = to_max_pages - num_elem; | 
|  | 279 | if (maxpages == 0) { | 
|  | 280 | rc = -EFAULT; | 
|  | 281 | goto out; | 
|  | 282 | } | 
|  | 283 | copied = iov_iter_get_pages(from, pages, | 
|  | 284 | length, | 
|  | 285 | maxpages, &offset); | 
|  | 286 | if (copied <= 0) { | 
|  | 287 | rc = -EFAULT; | 
|  | 288 | goto out; | 
|  | 289 | } | 
|  | 290 |  | 
|  | 291 | iov_iter_advance(from, copied); | 
|  | 292 |  | 
|  | 293 | length -= copied; | 
|  | 294 | size += copied; | 
|  | 295 | while (copied) { | 
|  | 296 | use = min_t(int, copied, PAGE_SIZE - offset); | 
|  | 297 |  | 
|  | 298 | sg_set_page(&to[num_elem], | 
|  | 299 | pages[i], use, offset); | 
|  | 300 | sg_unmark_end(&to[num_elem]); | 
|  | 301 | if (charge) | 
|  | 302 | sk_mem_charge(sk, use); | 
|  | 303 |  | 
|  | 304 | offset = 0; | 
|  | 305 | copied -= use; | 
|  | 306 |  | 
|  | 307 | ++i; | 
|  | 308 | ++num_elem; | 
|  | 309 | } | 
|  | 310 | } | 
|  | 311 |  | 
|  | 312 | /* Mark the end in the last sg entry if newly added */ | 
|  | 313 | if (num_elem > *pages_used) | 
|  | 314 | sg_mark_end(&to[num_elem - 1]); | 
|  | 315 | out: | 
|  | 316 | if (rc) | 
|  | 317 | iov_iter_revert(from, size - *size_used); | 
|  | 318 | *size_used = size; | 
|  | 319 | *pages_used = num_elem; | 
|  | 320 |  | 
|  | 321 | return rc; | 
|  | 322 | } | 
|  | 323 |  | 
|  | 324 | static int memcopy_from_iter(struct sock *sk, struct iov_iter *from, | 
|  | 325 | int bytes) | 
|  | 326 | { | 
|  | 327 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 
|  | 328 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); | 
|  | 329 | struct scatterlist *sg = ctx->sg_plaintext_data; | 
|  | 330 | int copy, i, rc = 0; | 
|  | 331 |  | 
|  | 332 | for (i = tls_ctx->pending_open_record_frags; | 
|  | 333 | i < ctx->sg_plaintext_num_elem; ++i) { | 
|  | 334 | copy = sg[i].length; | 
|  | 335 | if (copy_from_iter( | 
|  | 336 | page_address(sg_page(&sg[i])) + sg[i].offset, | 
|  | 337 | copy, from) != copy) { | 
|  | 338 | rc = -EFAULT; | 
|  | 339 | goto out; | 
|  | 340 | } | 
|  | 341 | bytes -= copy; | 
|  | 342 |  | 
|  | 343 | ++tls_ctx->pending_open_record_frags; | 
|  | 344 |  | 
|  | 345 | if (!bytes) | 
|  | 346 | break; | 
|  | 347 | } | 
|  | 348 |  | 
|  | 349 | out: | 
|  | 350 | return rc; | 
|  | 351 | } | 
|  | 352 |  | 
|  | 353 | int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) | 
|  | 354 | { | 
|  | 355 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 
|  | 356 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); | 
|  | 357 | int ret; | 
|  | 358 | int required_size; | 
|  | 359 | long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); | 
|  | 360 | bool eor = !(msg->msg_flags & MSG_MORE); | 
|  | 361 | size_t try_to_copy, copied = 0; | 
|  | 362 | unsigned char record_type = TLS_RECORD_TYPE_DATA; | 
|  | 363 | int record_room; | 
|  | 364 | bool full_record; | 
|  | 365 | int orig_size; | 
|  | 366 | bool is_kvec = msg->msg_iter.type & ITER_KVEC; | 
|  | 367 |  | 
|  | 368 | if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) | 
|  | 369 | return -ENOTSUPP; | 
|  | 370 |  | 
|  | 371 | lock_sock(sk); | 
|  | 372 |  | 
|  | 373 | ret = tls_complete_pending_work(sk, tls_ctx, msg->msg_flags, &timeo); | 
|  | 374 | if (ret) | 
|  | 375 | goto send_end; | 
|  | 376 |  | 
|  | 377 | if (unlikely(msg->msg_controllen)) { | 
|  | 378 | ret = tls_proccess_cmsg(sk, msg, &record_type); | 
|  | 379 | if (ret) | 
|  | 380 | goto send_end; | 
|  | 381 | } | 
|  | 382 |  | 
|  | 383 | while (msg_data_left(msg)) { | 
|  | 384 | if (sk->sk_err) { | 
|  | 385 | ret = -sk->sk_err; | 
|  | 386 | goto send_end; | 
|  | 387 | } | 
|  | 388 |  | 
|  | 389 | orig_size = ctx->sg_plaintext_size; | 
|  | 390 | full_record = false; | 
|  | 391 | try_to_copy = msg_data_left(msg); | 
|  | 392 | record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size; | 
|  | 393 | if (try_to_copy >= record_room) { | 
|  | 394 | try_to_copy = record_room; | 
|  | 395 | full_record = true; | 
|  | 396 | } | 
|  | 397 |  | 
|  | 398 | required_size = ctx->sg_plaintext_size + try_to_copy + | 
|  | 399 | tls_ctx->tx.overhead_size; | 
|  | 400 |  | 
|  | 401 | if (!sk_stream_memory_free(sk)) | 
|  | 402 | goto wait_for_sndbuf; | 
|  | 403 | alloc_encrypted: | 
|  | 404 | ret = alloc_encrypted_sg(sk, required_size); | 
|  | 405 | if (ret) { | 
|  | 406 | if (ret != -ENOSPC) | 
|  | 407 | goto wait_for_memory; | 
|  | 408 |  | 
|  | 409 | /* Adjust try_to_copy according to the amount that was | 
|  | 410 | * actually allocated. The difference is due | 
|  | 411 | * to max sg elements limit | 
|  | 412 | */ | 
|  | 413 | try_to_copy -= required_size - ctx->sg_encrypted_size; | 
|  | 414 | full_record = true; | 
|  | 415 | } | 
|  | 416 | if (!is_kvec && (full_record || eor)) { | 
|  | 417 | ret = zerocopy_from_iter(sk, &msg->msg_iter, | 
|  | 418 | try_to_copy, &ctx->sg_plaintext_num_elem, | 
|  | 419 | &ctx->sg_plaintext_size, | 
|  | 420 | ctx->sg_plaintext_data, | 
|  | 421 | ARRAY_SIZE(ctx->sg_plaintext_data), | 
|  | 422 | true); | 
|  | 423 | if (ret) | 
|  | 424 | goto fallback_to_reg_send; | 
|  | 425 |  | 
|  | 426 | copied += try_to_copy; | 
|  | 427 | ret = tls_push_record(sk, msg->msg_flags, record_type); | 
|  | 428 | if (ret) | 
|  | 429 | goto send_end; | 
|  | 430 | continue; | 
|  | 431 |  | 
|  | 432 | fallback_to_reg_send: | 
|  | 433 | trim_sg(sk, ctx->sg_plaintext_data, | 
|  | 434 | &ctx->sg_plaintext_num_elem, | 
|  | 435 | &ctx->sg_plaintext_size, | 
|  | 436 | orig_size); | 
|  | 437 | } | 
|  | 438 |  | 
|  | 439 | required_size = ctx->sg_plaintext_size + try_to_copy; | 
|  | 440 | alloc_plaintext: | 
|  | 441 | ret = alloc_plaintext_sg(sk, required_size); | 
|  | 442 | if (ret) { | 
|  | 443 | if (ret != -ENOSPC) | 
|  | 444 | goto wait_for_memory; | 
|  | 445 |  | 
|  | 446 | /* Adjust try_to_copy according to the amount that was | 
|  | 447 | * actually allocated. The difference is due | 
|  | 448 | * to max sg elements limit | 
|  | 449 | */ | 
|  | 450 | try_to_copy -= required_size - ctx->sg_plaintext_size; | 
|  | 451 | full_record = true; | 
|  | 452 |  | 
|  | 453 | trim_sg(sk, ctx->sg_encrypted_data, | 
|  | 454 | &ctx->sg_encrypted_num_elem, | 
|  | 455 | &ctx->sg_encrypted_size, | 
|  | 456 | ctx->sg_plaintext_size + | 
|  | 457 | tls_ctx->tx.overhead_size); | 
|  | 458 | } | 
|  | 459 |  | 
|  | 460 | ret = memcopy_from_iter(sk, &msg->msg_iter, try_to_copy); | 
|  | 461 | if (ret) | 
|  | 462 | goto trim_sgl; | 
|  | 463 |  | 
|  | 464 | copied += try_to_copy; | 
|  | 465 | if (full_record || eor) { | 
|  | 466 | push_record: | 
|  | 467 | ret = tls_push_record(sk, msg->msg_flags, record_type); | 
|  | 468 | if (ret) { | 
|  | 469 | if (ret == -ENOMEM) | 
|  | 470 | goto wait_for_memory; | 
|  | 471 |  | 
|  | 472 | goto send_end; | 
|  | 473 | } | 
|  | 474 | } | 
|  | 475 |  | 
|  | 476 | continue; | 
|  | 477 |  | 
|  | 478 | wait_for_sndbuf: | 
|  | 479 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | 
|  | 480 | wait_for_memory: | 
|  | 481 | ret = sk_stream_wait_memory(sk, &timeo); | 
|  | 482 | if (ret) { | 
|  | 483 | trim_sgl: | 
|  | 484 | trim_both_sgl(sk, orig_size); | 
|  | 485 | goto send_end; | 
|  | 486 | } | 
|  | 487 |  | 
|  | 488 | if (tls_is_pending_closed_record(tls_ctx)) | 
|  | 489 | goto push_record; | 
|  | 490 |  | 
|  | 491 | if (ctx->sg_encrypted_size < required_size) | 
|  | 492 | goto alloc_encrypted; | 
|  | 493 |  | 
|  | 494 | goto alloc_plaintext; | 
|  | 495 | } | 
|  | 496 |  | 
|  | 497 | send_end: | 
|  | 498 | ret = sk_stream_error(sk, msg->msg_flags, ret); | 
|  | 499 |  | 
|  | 500 | release_sock(sk); | 
|  | 501 | return copied ? copied : ret; | 
|  | 502 | } | 
|  | 503 |  | 
|  | 504 | int tls_sw_sendpage(struct sock *sk, struct page *page, | 
|  | 505 | int offset, size_t size, int flags) | 
|  | 506 | { | 
|  | 507 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 
|  | 508 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); | 
|  | 509 | int ret; | 
|  | 510 | long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); | 
|  | 511 | bool eor; | 
|  | 512 | size_t orig_size = size; | 
|  | 513 | unsigned char record_type = TLS_RECORD_TYPE_DATA; | 
|  | 514 | struct scatterlist *sg; | 
|  | 515 | bool full_record; | 
|  | 516 | int record_room; | 
|  | 517 |  | 
|  | 518 | if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | | 
|  | 519 | MSG_SENDPAGE_NOTLAST)) | 
|  | 520 | return -ENOTSUPP; | 
|  | 521 |  | 
|  | 522 | /* No MSG_EOR from splice, only look at MSG_MORE */ | 
|  | 523 | eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST)); | 
|  | 524 |  | 
|  | 525 | lock_sock(sk); | 
|  | 526 |  | 
|  | 527 | sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); | 
|  | 528 |  | 
|  | 529 | ret = tls_complete_pending_work(sk, tls_ctx, flags, &timeo); | 
|  | 530 | if (ret) | 
|  | 531 | goto sendpage_end; | 
|  | 532 |  | 
|  | 533 | /* Call the sk_stream functions to manage the sndbuf mem. */ | 
|  | 534 | while (size > 0) { | 
|  | 535 | size_t copy, required_size; | 
|  | 536 |  | 
|  | 537 | if (sk->sk_err) { | 
|  | 538 | ret = -sk->sk_err; | 
|  | 539 | goto sendpage_end; | 
|  | 540 | } | 
|  | 541 |  | 
|  | 542 | full_record = false; | 
|  | 543 | record_room = TLS_MAX_PAYLOAD_SIZE - ctx->sg_plaintext_size; | 
|  | 544 | copy = size; | 
|  | 545 | if (copy >= record_room) { | 
|  | 546 | copy = record_room; | 
|  | 547 | full_record = true; | 
|  | 548 | } | 
|  | 549 | required_size = ctx->sg_plaintext_size + copy + | 
|  | 550 | tls_ctx->tx.overhead_size; | 
|  | 551 |  | 
|  | 552 | if (!sk_stream_memory_free(sk)) | 
|  | 553 | goto wait_for_sndbuf; | 
|  | 554 | alloc_payload: | 
|  | 555 | ret = alloc_encrypted_sg(sk, required_size); | 
|  | 556 | if (ret) { | 
|  | 557 | if (ret != -ENOSPC) | 
|  | 558 | goto wait_for_memory; | 
|  | 559 |  | 
|  | 560 | /* Adjust copy according to the amount that was | 
|  | 561 | * actually allocated. The difference is due | 
|  | 562 | * to max sg elements limit | 
|  | 563 | */ | 
|  | 564 | copy -= required_size - ctx->sg_plaintext_size; | 
|  | 565 | full_record = true; | 
|  | 566 | } | 
|  | 567 |  | 
|  | 568 | get_page(page); | 
|  | 569 | sg = ctx->sg_plaintext_data + ctx->sg_plaintext_num_elem; | 
|  | 570 | sg_set_page(sg, page, copy, offset); | 
|  | 571 | sg_unmark_end(sg); | 
|  | 572 |  | 
|  | 573 | ctx->sg_plaintext_num_elem++; | 
|  | 574 |  | 
|  | 575 | sk_mem_charge(sk, copy); | 
|  | 576 | offset += copy; | 
|  | 577 | size -= copy; | 
|  | 578 | ctx->sg_plaintext_size += copy; | 
|  | 579 | tls_ctx->pending_open_record_frags = ctx->sg_plaintext_num_elem; | 
|  | 580 |  | 
|  | 581 | if (full_record || eor || | 
|  | 582 | ctx->sg_plaintext_num_elem == | 
|  | 583 | ARRAY_SIZE(ctx->sg_plaintext_data)) { | 
|  | 584 | push_record: | 
|  | 585 | ret = tls_push_record(sk, flags, record_type); | 
|  | 586 | if (ret) { | 
|  | 587 | if (ret == -ENOMEM) | 
|  | 588 | goto wait_for_memory; | 
|  | 589 |  | 
|  | 590 | goto sendpage_end; | 
|  | 591 | } | 
|  | 592 | } | 
|  | 593 | continue; | 
|  | 594 | wait_for_sndbuf: | 
|  | 595 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | 
|  | 596 | wait_for_memory: | 
|  | 597 | ret = sk_stream_wait_memory(sk, &timeo); | 
|  | 598 | if (ret) { | 
|  | 599 | trim_both_sgl(sk, ctx->sg_plaintext_size); | 
|  | 600 | goto sendpage_end; | 
|  | 601 | } | 
|  | 602 |  | 
|  | 603 | if (tls_is_pending_closed_record(tls_ctx)) | 
|  | 604 | goto push_record; | 
|  | 605 |  | 
|  | 606 | goto alloc_payload; | 
|  | 607 | } | 
|  | 608 |  | 
|  | 609 | sendpage_end: | 
|  | 610 | if (orig_size > size) | 
|  | 611 | ret = orig_size - size; | 
|  | 612 | else | 
|  | 613 | ret = sk_stream_error(sk, flags, ret); | 
|  | 614 |  | 
|  | 615 | release_sock(sk); | 
|  | 616 | return ret; | 
|  | 617 | } | 
|  | 618 |  | 
|  | 619 | static struct sk_buff *tls_wait_data(struct sock *sk, int flags, | 
|  | 620 | long timeo, int *err) | 
|  | 621 | { | 
|  | 622 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 
|  | 623 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); | 
|  | 624 | struct sk_buff *skb; | 
|  | 625 | DEFINE_WAIT_FUNC(wait, woken_wake_function); | 
|  | 626 |  | 
|  | 627 | while (!(skb = ctx->recv_pkt)) { | 
|  | 628 | if (sk->sk_err) { | 
|  | 629 | *err = sock_error(sk); | 
|  | 630 | return NULL; | 
|  | 631 | } | 
|  | 632 |  | 
|  | 633 | if (sk->sk_shutdown & RCV_SHUTDOWN) | 
|  | 634 | return NULL; | 
|  | 635 |  | 
|  | 636 | if (sock_flag(sk, SOCK_DONE)) | 
|  | 637 | return NULL; | 
|  | 638 |  | 
|  | 639 | if ((flags & MSG_DONTWAIT) || !timeo) { | 
|  | 640 | *err = -EAGAIN; | 
|  | 641 | return NULL; | 
|  | 642 | } | 
|  | 643 |  | 
|  | 644 | add_wait_queue(sk_sleep(sk), &wait); | 
|  | 645 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); | 
|  | 646 | sk_wait_event(sk, &timeo, ctx->recv_pkt != skb, &wait); | 
|  | 647 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); | 
|  | 648 | remove_wait_queue(sk_sleep(sk), &wait); | 
|  | 649 |  | 
|  | 650 | /* Handle signals */ | 
|  | 651 | if (signal_pending(current)) { | 
|  | 652 | *err = sock_intr_errno(timeo); | 
|  | 653 | return NULL; | 
|  | 654 | } | 
|  | 655 | } | 
|  | 656 |  | 
|  | 657 | return skb; | 
|  | 658 | } | 
|  | 659 |  | 
|  | 660 | /* This function decrypts the input skb into either out_iov or in out_sg | 
|  | 661 | * or in skb buffers itself. The input parameter 'zc' indicates if | 
|  | 662 | * zero-copy mode needs to be tried or not. With zero-copy mode, either | 
|  | 663 | * out_iov or out_sg must be non-NULL. In case both out_iov and out_sg are | 
|  | 664 | * NULL, then the decryption happens inside skb buffers itself, i.e. | 
|  | 665 | * zero-copy gets disabled and 'zc' is updated. | 
|  | 666 | */ | 
|  | 667 |  | 
|  | 668 | static int decrypt_internal(struct sock *sk, struct sk_buff *skb, | 
|  | 669 | struct iov_iter *out_iov, | 
|  | 670 | struct scatterlist *out_sg, | 
|  | 671 | int *chunk, bool *zc) | 
|  | 672 | { | 
|  | 673 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 
|  | 674 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); | 
|  | 675 | struct strp_msg *rxm = strp_msg(skb); | 
|  | 676 | int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0; | 
|  | 677 | struct aead_request *aead_req; | 
|  | 678 | struct sk_buff *unused; | 
|  | 679 | u8 *aad, *iv, *mem = NULL; | 
|  | 680 | struct scatterlist *sgin = NULL; | 
|  | 681 | struct scatterlist *sgout = NULL; | 
|  | 682 | const int data_len = rxm->full_len - tls_ctx->rx.overhead_size; | 
|  | 683 |  | 
|  | 684 | if (*zc && (out_iov || out_sg)) { | 
|  | 685 | if (out_iov) | 
|  | 686 | n_sgout = iov_iter_npages(out_iov, INT_MAX) + 1; | 
|  | 687 | else | 
|  | 688 | n_sgout = sg_nents(out_sg); | 
|  | 689 | } else { | 
|  | 690 | n_sgout = 0; | 
|  | 691 | *zc = false; | 
|  | 692 | } | 
|  | 693 |  | 
|  | 694 | n_sgin = skb_cow_data(skb, 0, &unused); | 
|  | 695 | if (n_sgin < 1) | 
|  | 696 | return -EBADMSG; | 
|  | 697 |  | 
|  | 698 | /* Increment to accommodate AAD */ | 
|  | 699 | n_sgin = n_sgin + 1; | 
|  | 700 |  | 
|  | 701 | nsg = n_sgin + n_sgout; | 
|  | 702 |  | 
|  | 703 | aead_size = sizeof(*aead_req) + crypto_aead_reqsize(ctx->aead_recv); | 
|  | 704 | mem_size = aead_size + (nsg * sizeof(struct scatterlist)); | 
|  | 705 | mem_size = mem_size + TLS_AAD_SPACE_SIZE; | 
|  | 706 | mem_size = mem_size + crypto_aead_ivsize(ctx->aead_recv); | 
|  | 707 |  | 
|  | 708 | /* Allocate a single block of memory which contains | 
|  | 709 | * aead_req || sgin[] || sgout[] || aad || iv. | 
|  | 710 | * This order achieves correct alignment for aead_req, sgin, sgout. | 
|  | 711 | */ | 
|  | 712 | mem = kmalloc(mem_size, sk->sk_allocation); | 
|  | 713 | if (!mem) | 
|  | 714 | return -ENOMEM; | 
|  | 715 |  | 
|  | 716 | /* Segment the allocated memory */ | 
|  | 717 | aead_req = (struct aead_request *)mem; | 
|  | 718 | sgin = (struct scatterlist *)(mem + aead_size); | 
|  | 719 | sgout = sgin + n_sgin; | 
|  | 720 | aad = (u8 *)(sgout + n_sgout); | 
|  | 721 | iv = aad + TLS_AAD_SPACE_SIZE; | 
|  | 722 |  | 
|  | 723 | /* Prepare IV */ | 
|  | 724 | err = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE, | 
|  | 725 | iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, | 
|  | 726 | tls_ctx->rx.iv_size); | 
|  | 727 | if (err < 0) { | 
|  | 728 | kfree(mem); | 
|  | 729 | return err; | 
|  | 730 | } | 
|  | 731 | memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE); | 
|  | 732 |  | 
|  | 733 | /* Prepare AAD */ | 
|  | 734 | tls_make_aad(aad, rxm->full_len - tls_ctx->rx.overhead_size, | 
|  | 735 | tls_ctx->rx.rec_seq, tls_ctx->rx.rec_seq_size, | 
|  | 736 | ctx->control); | 
|  | 737 |  | 
|  | 738 | /* Prepare sgin */ | 
|  | 739 | sg_init_table(sgin, n_sgin); | 
|  | 740 | sg_set_buf(&sgin[0], aad, TLS_AAD_SPACE_SIZE); | 
|  | 741 | err = skb_to_sgvec(skb, &sgin[1], | 
|  | 742 | rxm->offset + tls_ctx->rx.prepend_size, | 
|  | 743 | rxm->full_len - tls_ctx->rx.prepend_size); | 
|  | 744 | if (err < 0) { | 
|  | 745 | kfree(mem); | 
|  | 746 | return err; | 
|  | 747 | } | 
|  | 748 |  | 
|  | 749 | if (n_sgout) { | 
|  | 750 | if (out_iov) { | 
|  | 751 | sg_init_table(sgout, n_sgout); | 
|  | 752 | sg_set_buf(&sgout[0], aad, TLS_AAD_SPACE_SIZE); | 
|  | 753 |  | 
|  | 754 | *chunk = 0; | 
|  | 755 | err = zerocopy_from_iter(sk, out_iov, data_len, &pages, | 
|  | 756 | chunk, &sgout[1], | 
|  | 757 | (n_sgout - 1), false); | 
|  | 758 | if (err < 0) | 
|  | 759 | goto fallback_to_reg_recv; | 
|  | 760 | } else if (out_sg) { | 
|  | 761 | memcpy(sgout, out_sg, n_sgout * sizeof(*sgout)); | 
|  | 762 | } else { | 
|  | 763 | goto fallback_to_reg_recv; | 
|  | 764 | } | 
|  | 765 | } else { | 
|  | 766 | fallback_to_reg_recv: | 
|  | 767 | sgout = sgin; | 
|  | 768 | pages = 0; | 
|  | 769 | *chunk = 0; | 
|  | 770 | *zc = false; | 
|  | 771 | } | 
|  | 772 |  | 
|  | 773 | /* Prepare and submit AEAD request */ | 
|  | 774 | err = tls_do_decryption(sk, sgin, sgout, iv, data_len, aead_req); | 
|  | 775 |  | 
|  | 776 | /* Release the pages in case iov was mapped to pages */ | 
|  | 777 | for (; pages > 0; pages--) | 
|  | 778 | put_page(sg_page(&sgout[pages])); | 
|  | 779 |  | 
|  | 780 | kfree(mem); | 
|  | 781 | return err; | 
|  | 782 | } | 
|  | 783 |  | 
|  | 784 | static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb, | 
|  | 785 | struct iov_iter *dest, int *chunk, bool *zc) | 
|  | 786 | { | 
|  | 787 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 
|  | 788 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); | 
|  | 789 | struct strp_msg *rxm = strp_msg(skb); | 
|  | 790 | int err = 0; | 
|  | 791 |  | 
|  | 792 | #ifdef CONFIG_TLS_DEVICE | 
|  | 793 | err = tls_device_decrypted(sk, skb); | 
|  | 794 | if (err < 0) | 
|  | 795 | return err; | 
|  | 796 | #endif | 
|  | 797 | if (!ctx->decrypted) { | 
|  | 798 | err = decrypt_internal(sk, skb, dest, NULL, chunk, zc); | 
|  | 799 | if (err < 0) | 
|  | 800 | return err; | 
|  | 801 | } else { | 
|  | 802 | *zc = false; | 
|  | 803 | } | 
|  | 804 |  | 
|  | 805 | rxm->offset += tls_ctx->rx.prepend_size; | 
|  | 806 | rxm->full_len -= tls_ctx->rx.overhead_size; | 
|  | 807 | tls_advance_record_sn(sk, &tls_ctx->rx); | 
|  | 808 | ctx->decrypted = true; | 
|  | 809 | ctx->saved_data_ready(sk); | 
|  | 810 |  | 
|  | 811 | return err; | 
|  | 812 | } | 
|  | 813 |  | 
|  | 814 | int decrypt_skb(struct sock *sk, struct sk_buff *skb, | 
|  | 815 | struct scatterlist *sgout) | 
|  | 816 | { | 
|  | 817 | bool zc = true; | 
|  | 818 | int chunk; | 
|  | 819 |  | 
|  | 820 | return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc); | 
|  | 821 | } | 
|  | 822 |  | 
|  | 823 | static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb, | 
|  | 824 | unsigned int len) | 
|  | 825 | { | 
|  | 826 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 
|  | 827 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); | 
|  | 828 | struct strp_msg *rxm = strp_msg(skb); | 
|  | 829 |  | 
|  | 830 | if (len < rxm->full_len) { | 
|  | 831 | rxm->offset += len; | 
|  | 832 | rxm->full_len -= len; | 
|  | 833 |  | 
|  | 834 | return false; | 
|  | 835 | } | 
|  | 836 |  | 
|  | 837 | /* Finished with message */ | 
|  | 838 | ctx->recv_pkt = NULL; | 
|  | 839 | kfree_skb(skb); | 
|  | 840 | __strp_unpause(&ctx->strp); | 
|  | 841 |  | 
|  | 842 | return true; | 
|  | 843 | } | 
|  | 844 |  | 
|  | 845 | int tls_sw_recvmsg(struct sock *sk, | 
|  | 846 | struct msghdr *msg, | 
|  | 847 | size_t len, | 
|  | 848 | int nonblock, | 
|  | 849 | int flags, | 
|  | 850 | int *addr_len) | 
|  | 851 | { | 
|  | 852 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 
|  | 853 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); | 
|  | 854 | unsigned char control; | 
|  | 855 | struct strp_msg *rxm; | 
|  | 856 | struct sk_buff *skb; | 
|  | 857 | ssize_t copied = 0; | 
|  | 858 | bool cmsg = false; | 
|  | 859 | int target, err = 0; | 
|  | 860 | long timeo; | 
|  | 861 | bool is_kvec = msg->msg_iter.type & ITER_KVEC; | 
|  | 862 |  | 
|  | 863 | flags |= nonblock; | 
|  | 864 |  | 
|  | 865 | if (unlikely(flags & MSG_ERRQUEUE)) | 
|  | 866 | return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR); | 
|  | 867 |  | 
|  | 868 | lock_sock(sk); | 
|  | 869 |  | 
|  | 870 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); | 
|  | 871 | timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); | 
|  | 872 | do { | 
|  | 873 | bool zc = false; | 
|  | 874 | int chunk = 0; | 
|  | 875 |  | 
|  | 876 | skb = tls_wait_data(sk, flags, timeo, &err); | 
|  | 877 | if (!skb) | 
|  | 878 | goto recv_end; | 
|  | 879 |  | 
|  | 880 | rxm = strp_msg(skb); | 
|  | 881 | if (!cmsg) { | 
|  | 882 | int cerr; | 
|  | 883 |  | 
|  | 884 | cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE, | 
|  | 885 | sizeof(ctx->control), &ctx->control); | 
|  | 886 | cmsg = true; | 
|  | 887 | control = ctx->control; | 
|  | 888 | if (ctx->control != TLS_RECORD_TYPE_DATA) { | 
|  | 889 | if (cerr || msg->msg_flags & MSG_CTRUNC) { | 
|  | 890 | err = -EIO; | 
|  | 891 | goto recv_end; | 
|  | 892 | } | 
|  | 893 | } | 
|  | 894 | } else if (control != ctx->control) { | 
|  | 895 | goto recv_end; | 
|  | 896 | } | 
|  | 897 |  | 
|  | 898 | if (!ctx->decrypted) { | 
|  | 899 | int to_copy = rxm->full_len - tls_ctx->rx.overhead_size; | 
|  | 900 |  | 
|  | 901 | if (!is_kvec && to_copy <= len && | 
|  | 902 | likely(!(flags & MSG_PEEK))) | 
|  | 903 | zc = true; | 
|  | 904 |  | 
|  | 905 | err = decrypt_skb_update(sk, skb, &msg->msg_iter, | 
|  | 906 | &chunk, &zc); | 
|  | 907 | if (err < 0) { | 
|  | 908 | tls_err_abort(sk, EBADMSG); | 
|  | 909 | goto recv_end; | 
|  | 910 | } | 
|  | 911 | ctx->decrypted = true; | 
|  | 912 | } | 
|  | 913 |  | 
|  | 914 | if (!zc) { | 
|  | 915 | chunk = min_t(unsigned int, rxm->full_len, len); | 
|  | 916 | err = skb_copy_datagram_msg(skb, rxm->offset, msg, | 
|  | 917 | chunk); | 
|  | 918 | if (err < 0) | 
|  | 919 | goto recv_end; | 
|  | 920 | } | 
|  | 921 |  | 
|  | 922 | copied += chunk; | 
|  | 923 | len -= chunk; | 
|  | 924 | if (likely(!(flags & MSG_PEEK))) { | 
|  | 925 | u8 control = ctx->control; | 
|  | 926 |  | 
|  | 927 | if (tls_sw_advance_skb(sk, skb, chunk)) { | 
|  | 928 | /* Return full control message to | 
|  | 929 | * userspace before trying to parse | 
|  | 930 | * another message type | 
|  | 931 | */ | 
|  | 932 | msg->msg_flags |= MSG_EOR; | 
|  | 933 | if (control != TLS_RECORD_TYPE_DATA) | 
|  | 934 | goto recv_end; | 
|  | 935 | } | 
|  | 936 | } else { | 
|  | 937 | /* MSG_PEEK right now cannot look beyond current skb | 
|  | 938 | * from strparser, meaning we cannot advance skb here | 
|  | 939 | * and thus unpause strparser since we'd loose original | 
|  | 940 | * one. | 
|  | 941 | */ | 
|  | 942 | break; | 
|  | 943 | } | 
|  | 944 |  | 
|  | 945 | /* If we have a new message from strparser, continue now. */ | 
|  | 946 | if (copied >= target && !ctx->recv_pkt) | 
|  | 947 | break; | 
|  | 948 | } while (len); | 
|  | 949 |  | 
|  | 950 | recv_end: | 
|  | 951 | release_sock(sk); | 
|  | 952 | return copied ? : err; | 
|  | 953 | } | 
|  | 954 |  | 
|  | 955 | ssize_t tls_sw_splice_read(struct socket *sock,  loff_t *ppos, | 
|  | 956 | struct pipe_inode_info *pipe, | 
|  | 957 | size_t len, unsigned int flags) | 
|  | 958 | { | 
|  | 959 | struct tls_context *tls_ctx = tls_get_ctx(sock->sk); | 
|  | 960 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); | 
|  | 961 | struct strp_msg *rxm = NULL; | 
|  | 962 | struct sock *sk = sock->sk; | 
|  | 963 | struct sk_buff *skb; | 
|  | 964 | ssize_t copied = 0; | 
|  | 965 | int err = 0; | 
|  | 966 | long timeo; | 
|  | 967 | int chunk; | 
|  | 968 | bool zc = false; | 
|  | 969 |  | 
|  | 970 | lock_sock(sk); | 
|  | 971 |  | 
|  | 972 | timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); | 
|  | 973 |  | 
|  | 974 | skb = tls_wait_data(sk, flags, timeo, &err); | 
|  | 975 | if (!skb) | 
|  | 976 | goto splice_read_end; | 
|  | 977 |  | 
|  | 978 | /* splice does not support reading control messages */ | 
|  | 979 | if (ctx->control != TLS_RECORD_TYPE_DATA) { | 
|  | 980 | err = -ENOTSUPP; | 
|  | 981 | goto splice_read_end; | 
|  | 982 | } | 
|  | 983 |  | 
|  | 984 | if (!ctx->decrypted) { | 
|  | 985 | err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc); | 
|  | 986 |  | 
|  | 987 | if (err < 0) { | 
|  | 988 | tls_err_abort(sk, EBADMSG); | 
|  | 989 | goto splice_read_end; | 
|  | 990 | } | 
|  | 991 | ctx->decrypted = true; | 
|  | 992 | } | 
|  | 993 | rxm = strp_msg(skb); | 
|  | 994 |  | 
|  | 995 | chunk = min_t(unsigned int, rxm->full_len, len); | 
|  | 996 | copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags); | 
|  | 997 | if (copied < 0) | 
|  | 998 | goto splice_read_end; | 
|  | 999 |  | 
|  | 1000 | if (likely(!(flags & MSG_PEEK))) | 
|  | 1001 | tls_sw_advance_skb(sk, skb, copied); | 
|  | 1002 |  | 
|  | 1003 | splice_read_end: | 
|  | 1004 | release_sock(sk); | 
|  | 1005 | return copied ? : err; | 
|  | 1006 | } | 
|  | 1007 |  | 
|  | 1008 | unsigned int tls_sw_poll(struct file *file, struct socket *sock, | 
|  | 1009 | struct poll_table_struct *wait) | 
|  | 1010 | { | 
|  | 1011 | unsigned int ret; | 
|  | 1012 | struct sock *sk = sock->sk; | 
|  | 1013 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 
|  | 1014 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); | 
|  | 1015 |  | 
|  | 1016 | /* Grab POLLOUT and POLLHUP from the underlying socket */ | 
|  | 1017 | ret = ctx->sk_poll(file, sock, wait); | 
|  | 1018 |  | 
|  | 1019 | /* Clear POLLIN bits, and set based on recv_pkt */ | 
|  | 1020 | ret &= ~(POLLIN | POLLRDNORM); | 
|  | 1021 | if (ctx->recv_pkt) | 
|  | 1022 | ret |= POLLIN | POLLRDNORM; | 
|  | 1023 |  | 
|  | 1024 | return ret; | 
|  | 1025 | } | 
|  | 1026 |  | 
|  | 1027 | static int tls_read_size(struct strparser *strp, struct sk_buff *skb) | 
|  | 1028 | { | 
|  | 1029 | struct tls_context *tls_ctx = tls_get_ctx(strp->sk); | 
|  | 1030 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); | 
|  | 1031 | char header[TLS_HEADER_SIZE + MAX_IV_SIZE]; | 
|  | 1032 | struct strp_msg *rxm = strp_msg(skb); | 
|  | 1033 | size_t cipher_overhead; | 
|  | 1034 | size_t data_len = 0; | 
|  | 1035 | int ret; | 
|  | 1036 |  | 
|  | 1037 | /* Verify that we have a full TLS header, or wait for more data */ | 
|  | 1038 | if (rxm->offset + tls_ctx->rx.prepend_size > skb->len) | 
|  | 1039 | return 0; | 
|  | 1040 |  | 
|  | 1041 | /* Sanity-check size of on-stack buffer. */ | 
|  | 1042 | if (WARN_ON(tls_ctx->rx.prepend_size > sizeof(header))) { | 
|  | 1043 | ret = -EINVAL; | 
|  | 1044 | goto read_failure; | 
|  | 1045 | } | 
|  | 1046 |  | 
|  | 1047 | /* Linearize header to local buffer */ | 
|  | 1048 | ret = skb_copy_bits(skb, rxm->offset, header, tls_ctx->rx.prepend_size); | 
|  | 1049 |  | 
|  | 1050 | if (ret < 0) | 
|  | 1051 | goto read_failure; | 
|  | 1052 |  | 
|  | 1053 | ctx->control = header[0]; | 
|  | 1054 |  | 
|  | 1055 | data_len = ((header[4] & 0xFF) | (header[3] << 8)); | 
|  | 1056 |  | 
|  | 1057 | cipher_overhead = tls_ctx->rx.tag_size + tls_ctx->rx.iv_size; | 
|  | 1058 |  | 
|  | 1059 | if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead) { | 
|  | 1060 | ret = -EMSGSIZE; | 
|  | 1061 | goto read_failure; | 
|  | 1062 | } | 
|  | 1063 | if (data_len < cipher_overhead) { | 
|  | 1064 | ret = -EBADMSG; | 
|  | 1065 | goto read_failure; | 
|  | 1066 | } | 
|  | 1067 |  | 
|  | 1068 | if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) || | 
|  | 1069 | header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) { | 
|  | 1070 | ret = -EINVAL; | 
|  | 1071 | goto read_failure; | 
|  | 1072 | } | 
|  | 1073 |  | 
|  | 1074 | #ifdef CONFIG_TLS_DEVICE | 
|  | 1075 | handle_device_resync(strp->sk, TCP_SKB_CB(skb)->seq + rxm->offset, | 
|  | 1076 | *(u64*)tls_ctx->rx.rec_seq); | 
|  | 1077 | #endif | 
|  | 1078 | return data_len + TLS_HEADER_SIZE; | 
|  | 1079 |  | 
|  | 1080 | read_failure: | 
|  | 1081 | tls_err_abort(strp->sk, ret); | 
|  | 1082 |  | 
|  | 1083 | return ret; | 
|  | 1084 | } | 
|  | 1085 |  | 
|  | 1086 | static void tls_queue(struct strparser *strp, struct sk_buff *skb) | 
|  | 1087 | { | 
|  | 1088 | struct tls_context *tls_ctx = tls_get_ctx(strp->sk); | 
|  | 1089 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); | 
|  | 1090 |  | 
|  | 1091 | ctx->decrypted = false; | 
|  | 1092 |  | 
|  | 1093 | ctx->recv_pkt = skb; | 
|  | 1094 | strp_pause(strp); | 
|  | 1095 |  | 
|  | 1096 | ctx->saved_data_ready(strp->sk); | 
|  | 1097 | } | 
|  | 1098 |  | 
|  | 1099 | static void tls_data_ready(struct sock *sk) | 
|  | 1100 | { | 
|  | 1101 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 
|  | 1102 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); | 
|  | 1103 |  | 
|  | 1104 | strp_data_ready(&ctx->strp); | 
|  | 1105 | } | 
|  | 1106 |  | 
|  | 1107 | void tls_sw_free_resources_tx(struct sock *sk) | 
|  | 1108 | { | 
|  | 1109 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 
|  | 1110 | struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); | 
|  | 1111 |  | 
|  | 1112 | crypto_free_aead(ctx->aead_send); | 
|  | 1113 | tls_free_both_sg(sk); | 
|  | 1114 |  | 
|  | 1115 | kfree(ctx); | 
|  | 1116 | } | 
|  | 1117 |  | 
|  | 1118 | void tls_sw_release_resources_rx(struct sock *sk) | 
|  | 1119 | { | 
|  | 1120 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 
|  | 1121 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); | 
|  | 1122 |  | 
|  | 1123 | kfree(tls_ctx->rx.rec_seq); | 
|  | 1124 | kfree(tls_ctx->rx.iv); | 
|  | 1125 |  | 
|  | 1126 | if (ctx->aead_recv) { | 
|  | 1127 | kfree_skb(ctx->recv_pkt); | 
|  | 1128 | ctx->recv_pkt = NULL; | 
|  | 1129 | crypto_free_aead(ctx->aead_recv); | 
|  | 1130 | strp_stop(&ctx->strp); | 
|  | 1131 | write_lock_bh(&sk->sk_callback_lock); | 
|  | 1132 | sk->sk_data_ready = ctx->saved_data_ready; | 
|  | 1133 | write_unlock_bh(&sk->sk_callback_lock); | 
|  | 1134 | release_sock(sk); | 
|  | 1135 | strp_done(&ctx->strp); | 
|  | 1136 | lock_sock(sk); | 
|  | 1137 | } | 
|  | 1138 | } | 
|  | 1139 |  | 
|  | 1140 | void tls_sw_free_resources_rx(struct sock *sk) | 
|  | 1141 | { | 
|  | 1142 | struct tls_context *tls_ctx = tls_get_ctx(sk); | 
|  | 1143 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); | 
|  | 1144 |  | 
|  | 1145 | tls_sw_release_resources_rx(sk); | 
|  | 1146 |  | 
|  | 1147 | kfree(ctx); | 
|  | 1148 | } | 
|  | 1149 |  | 
|  | 1150 | int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) | 
|  | 1151 | { | 
|  | 1152 | struct tls_crypto_info *crypto_info; | 
|  | 1153 | struct tls12_crypto_info_aes_gcm_128 *gcm_128_info; | 
|  | 1154 | struct tls_sw_context_tx *sw_ctx_tx = NULL; | 
|  | 1155 | struct tls_sw_context_rx *sw_ctx_rx = NULL; | 
|  | 1156 | struct cipher_context *cctx; | 
|  | 1157 | struct crypto_aead **aead; | 
|  | 1158 | struct strp_callbacks cb; | 
|  | 1159 | u16 nonce_size, tag_size, iv_size, rec_seq_size; | 
|  | 1160 | char *iv, *rec_seq; | 
|  | 1161 | int rc = 0; | 
|  | 1162 |  | 
|  | 1163 | if (!ctx) { | 
|  | 1164 | rc = -EINVAL; | 
|  | 1165 | goto out; | 
|  | 1166 | } | 
|  | 1167 |  | 
|  | 1168 | if (tx) { | 
|  | 1169 | if (!ctx->priv_ctx_tx) { | 
|  | 1170 | sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL); | 
|  | 1171 | if (!sw_ctx_tx) { | 
|  | 1172 | rc = -ENOMEM; | 
|  | 1173 | goto out; | 
|  | 1174 | } | 
|  | 1175 | ctx->priv_ctx_tx = sw_ctx_tx; | 
|  | 1176 | } else { | 
|  | 1177 | sw_ctx_tx = | 
|  | 1178 | (struct tls_sw_context_tx *)ctx->priv_ctx_tx; | 
|  | 1179 | } | 
|  | 1180 | } else { | 
|  | 1181 | if (!ctx->priv_ctx_rx) { | 
|  | 1182 | sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL); | 
|  | 1183 | if (!sw_ctx_rx) { | 
|  | 1184 | rc = -ENOMEM; | 
|  | 1185 | goto out; | 
|  | 1186 | } | 
|  | 1187 | ctx->priv_ctx_rx = sw_ctx_rx; | 
|  | 1188 | } else { | 
|  | 1189 | sw_ctx_rx = | 
|  | 1190 | (struct tls_sw_context_rx *)ctx->priv_ctx_rx; | 
|  | 1191 | } | 
|  | 1192 | } | 
|  | 1193 |  | 
|  | 1194 | if (tx) { | 
|  | 1195 | crypto_init_wait(&sw_ctx_tx->async_wait); | 
|  | 1196 | crypto_info = &ctx->crypto_send.info; | 
|  | 1197 | cctx = &ctx->tx; | 
|  | 1198 | aead = &sw_ctx_tx->aead_send; | 
|  | 1199 | } else { | 
|  | 1200 | crypto_init_wait(&sw_ctx_rx->async_wait); | 
|  | 1201 | crypto_info = &ctx->crypto_recv.info; | 
|  | 1202 | cctx = &ctx->rx; | 
|  | 1203 | aead = &sw_ctx_rx->aead_recv; | 
|  | 1204 | } | 
|  | 1205 |  | 
|  | 1206 | switch (crypto_info->cipher_type) { | 
|  | 1207 | case TLS_CIPHER_AES_GCM_128: { | 
|  | 1208 | nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; | 
|  | 1209 | tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE; | 
|  | 1210 | iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE; | 
|  | 1211 | iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv; | 
|  | 1212 | rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE; | 
|  | 1213 | rec_seq = | 
|  | 1214 | ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq; | 
|  | 1215 | gcm_128_info = | 
|  | 1216 | (struct tls12_crypto_info_aes_gcm_128 *)crypto_info; | 
|  | 1217 | break; | 
|  | 1218 | } | 
|  | 1219 | default: | 
|  | 1220 | rc = -EINVAL; | 
|  | 1221 | goto free_priv; | 
|  | 1222 | } | 
|  | 1223 |  | 
|  | 1224 | /* Sanity-check the IV size for stack allocations. */ | 
|  | 1225 | if (iv_size > MAX_IV_SIZE || nonce_size > MAX_IV_SIZE) { | 
|  | 1226 | rc = -EINVAL; | 
|  | 1227 | goto free_priv; | 
|  | 1228 | } | 
|  | 1229 |  | 
|  | 1230 | cctx->prepend_size = TLS_HEADER_SIZE + nonce_size; | 
|  | 1231 | cctx->tag_size = tag_size; | 
|  | 1232 | cctx->overhead_size = cctx->prepend_size + cctx->tag_size; | 
|  | 1233 | cctx->iv_size = iv_size; | 
|  | 1234 | cctx->iv = kmalloc(iv_size + TLS_CIPHER_AES_GCM_128_SALT_SIZE, | 
|  | 1235 | GFP_KERNEL); | 
|  | 1236 | if (!cctx->iv) { | 
|  | 1237 | rc = -ENOMEM; | 
|  | 1238 | goto free_priv; | 
|  | 1239 | } | 
|  | 1240 | memcpy(cctx->iv, gcm_128_info->salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); | 
|  | 1241 | memcpy(cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv, iv_size); | 
|  | 1242 | cctx->rec_seq_size = rec_seq_size; | 
|  | 1243 | cctx->rec_seq = kmemdup(rec_seq, rec_seq_size, GFP_KERNEL); | 
|  | 1244 | if (!cctx->rec_seq) { | 
|  | 1245 | rc = -ENOMEM; | 
|  | 1246 | goto free_iv; | 
|  | 1247 | } | 
|  | 1248 |  | 
|  | 1249 | if (sw_ctx_tx) { | 
|  | 1250 | sg_init_table(sw_ctx_tx->sg_encrypted_data, | 
|  | 1251 | ARRAY_SIZE(sw_ctx_tx->sg_encrypted_data)); | 
|  | 1252 | sg_init_table(sw_ctx_tx->sg_plaintext_data, | 
|  | 1253 | ARRAY_SIZE(sw_ctx_tx->sg_plaintext_data)); | 
|  | 1254 |  | 
|  | 1255 | sg_init_table(sw_ctx_tx->sg_aead_in, 2); | 
|  | 1256 | sg_set_buf(&sw_ctx_tx->sg_aead_in[0], sw_ctx_tx->aad_space, | 
|  | 1257 | sizeof(sw_ctx_tx->aad_space)); | 
|  | 1258 | sg_unmark_end(&sw_ctx_tx->sg_aead_in[1]); | 
|  | 1259 | sg_chain(sw_ctx_tx->sg_aead_in, 2, | 
|  | 1260 | sw_ctx_tx->sg_plaintext_data); | 
|  | 1261 | sg_init_table(sw_ctx_tx->sg_aead_out, 2); | 
|  | 1262 | sg_set_buf(&sw_ctx_tx->sg_aead_out[0], sw_ctx_tx->aad_space, | 
|  | 1263 | sizeof(sw_ctx_tx->aad_space)); | 
|  | 1264 | sg_unmark_end(&sw_ctx_tx->sg_aead_out[1]); | 
|  | 1265 | sg_chain(sw_ctx_tx->sg_aead_out, 2, | 
|  | 1266 | sw_ctx_tx->sg_encrypted_data); | 
|  | 1267 | } | 
|  | 1268 |  | 
|  | 1269 | if (!*aead) { | 
|  | 1270 | *aead = crypto_alloc_aead("gcm(aes)", 0, 0); | 
|  | 1271 | if (IS_ERR(*aead)) { | 
|  | 1272 | rc = PTR_ERR(*aead); | 
|  | 1273 | *aead = NULL; | 
|  | 1274 | goto free_rec_seq; | 
|  | 1275 | } | 
|  | 1276 | } | 
|  | 1277 |  | 
|  | 1278 | ctx->push_pending_record = tls_sw_push_pending_record; | 
|  | 1279 |  | 
|  | 1280 | rc = crypto_aead_setkey(*aead, gcm_128_info->key, | 
|  | 1281 | TLS_CIPHER_AES_GCM_128_KEY_SIZE); | 
|  | 1282 | if (rc) | 
|  | 1283 | goto free_aead; | 
|  | 1284 |  | 
|  | 1285 | rc = crypto_aead_setauthsize(*aead, cctx->tag_size); | 
|  | 1286 | if (rc) | 
|  | 1287 | goto free_aead; | 
|  | 1288 |  | 
|  | 1289 | if (sw_ctx_rx) { | 
|  | 1290 | /* Set up strparser */ | 
|  | 1291 | memset(&cb, 0, sizeof(cb)); | 
|  | 1292 | cb.rcv_msg = tls_queue; | 
|  | 1293 | cb.parse_msg = tls_read_size; | 
|  | 1294 |  | 
|  | 1295 | strp_init(&sw_ctx_rx->strp, sk, &cb); | 
|  | 1296 |  | 
|  | 1297 | write_lock_bh(&sk->sk_callback_lock); | 
|  | 1298 | sw_ctx_rx->saved_data_ready = sk->sk_data_ready; | 
|  | 1299 | sk->sk_data_ready = tls_data_ready; | 
|  | 1300 | write_unlock_bh(&sk->sk_callback_lock); | 
|  | 1301 |  | 
|  | 1302 | sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll; | 
|  | 1303 |  | 
|  | 1304 | strp_check_rcv(&sw_ctx_rx->strp); | 
|  | 1305 | } | 
|  | 1306 |  | 
|  | 1307 | goto out; | 
|  | 1308 |  | 
|  | 1309 | free_aead: | 
|  | 1310 | crypto_free_aead(*aead); | 
|  | 1311 | *aead = NULL; | 
|  | 1312 | free_rec_seq: | 
|  | 1313 | kfree(cctx->rec_seq); | 
|  | 1314 | cctx->rec_seq = NULL; | 
|  | 1315 | free_iv: | 
|  | 1316 | kfree(cctx->iv); | 
|  | 1317 | cctx->iv = NULL; | 
|  | 1318 | free_priv: | 
|  | 1319 | if (tx) { | 
|  | 1320 | kfree(ctx->priv_ctx_tx); | 
|  | 1321 | ctx->priv_ctx_tx = NULL; | 
|  | 1322 | } else { | 
|  | 1323 | kfree(ctx->priv_ctx_rx); | 
|  | 1324 | ctx->priv_ctx_rx = NULL; | 
|  | 1325 | } | 
|  | 1326 | out: | 
|  | 1327 | return rc; | 
|  | 1328 | } |