b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Copyright (c) 2018 Chelsio Communications, Inc. |
| 4 | * |
| 5 | * Written by: Atul Gupta (atul.gupta@chelsio.com) |
| 6 | */ |
| 7 | #include <linux/kernel.h> |
| 8 | #include <linux/module.h> |
| 9 | #include <linux/skbuff.h> |
| 10 | #include <linux/socket.h> |
| 11 | #include <linux/hash.h> |
| 12 | #include <linux/in.h> |
| 13 | #include <linux/net.h> |
| 14 | #include <linux/ip.h> |
| 15 | #include <linux/tcp.h> |
| 16 | #include <net/tcp.h> |
| 17 | #include <net/tls.h> |
| 18 | |
| 19 | #include "chtls.h" |
| 20 | #include "chtls_cm.h" |
| 21 | |
| 22 | #define DRV_NAME "chtls" |
| 23 | |
| 24 | /* |
| 25 | * chtls device management |
| 26 | * maintains a list of the chtls devices |
| 27 | */ |
| 28 | static LIST_HEAD(cdev_list); |
| 29 | static DEFINE_MUTEX(cdev_mutex); |
| 30 | |
| 31 | static DEFINE_MUTEX(notify_mutex); |
| 32 | static RAW_NOTIFIER_HEAD(listen_notify_list); |
| 33 | static struct proto chtls_cpl_prot; |
| 34 | struct request_sock_ops chtls_rsk_ops; |
| 35 | static uint send_page_order = (14 - PAGE_SHIFT < 0) ? 0 : 14 - PAGE_SHIFT; |
| 36 | |
| 37 | static void register_listen_notifier(struct notifier_block *nb) |
| 38 | { |
| 39 | mutex_lock(¬ify_mutex); |
| 40 | raw_notifier_chain_register(&listen_notify_list, nb); |
| 41 | mutex_unlock(¬ify_mutex); |
| 42 | } |
| 43 | |
| 44 | static void unregister_listen_notifier(struct notifier_block *nb) |
| 45 | { |
| 46 | mutex_lock(¬ify_mutex); |
| 47 | raw_notifier_chain_unregister(&listen_notify_list, nb); |
| 48 | mutex_unlock(¬ify_mutex); |
| 49 | } |
| 50 | |
| 51 | static int listen_notify_handler(struct notifier_block *this, |
| 52 | unsigned long event, void *data) |
| 53 | { |
| 54 | struct chtls_listen *clisten; |
| 55 | int ret = NOTIFY_DONE; |
| 56 | |
| 57 | clisten = (struct chtls_listen *)data; |
| 58 | |
| 59 | switch (event) { |
| 60 | case CHTLS_LISTEN_START: |
| 61 | ret = chtls_listen_start(clisten->cdev, clisten->sk); |
| 62 | kfree(clisten); |
| 63 | break; |
| 64 | case CHTLS_LISTEN_STOP: |
| 65 | chtls_listen_stop(clisten->cdev, clisten->sk); |
| 66 | kfree(clisten); |
| 67 | break; |
| 68 | } |
| 69 | return ret; |
| 70 | } |
| 71 | |
| 72 | static struct notifier_block listen_notifier = { |
| 73 | .notifier_call = listen_notify_handler |
| 74 | }; |
| 75 | |
| 76 | static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb) |
| 77 | { |
| 78 | if (likely(skb_transport_header(skb) != skb_network_header(skb))) |
| 79 | return tcp_v4_do_rcv(sk, skb); |
| 80 | BLOG_SKB_CB(skb)->backlog_rcv(sk, skb); |
| 81 | return 0; |
| 82 | } |
| 83 | |
| 84 | static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk) |
| 85 | { |
| 86 | struct chtls_listen *clisten; |
| 87 | int err; |
| 88 | |
| 89 | if (sk->sk_protocol != IPPROTO_TCP) |
| 90 | return -EPROTONOSUPPORT; |
| 91 | |
| 92 | if (sk->sk_family == PF_INET && |
| 93 | LOOPBACK(inet_sk(sk)->inet_rcv_saddr)) |
| 94 | return -EADDRNOTAVAIL; |
| 95 | |
| 96 | sk->sk_backlog_rcv = listen_backlog_rcv; |
| 97 | clisten = kmalloc(sizeof(*clisten), GFP_KERNEL); |
| 98 | if (!clisten) |
| 99 | return -ENOMEM; |
| 100 | clisten->cdev = cdev; |
| 101 | clisten->sk = sk; |
| 102 | mutex_lock(¬ify_mutex); |
| 103 | err = raw_notifier_call_chain(&listen_notify_list, |
| 104 | CHTLS_LISTEN_START, clisten); |
| 105 | mutex_unlock(¬ify_mutex); |
| 106 | return err; |
| 107 | } |
| 108 | |
| 109 | static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk) |
| 110 | { |
| 111 | struct chtls_listen *clisten; |
| 112 | |
| 113 | if (sk->sk_protocol != IPPROTO_TCP) |
| 114 | return; |
| 115 | |
| 116 | clisten = kmalloc(sizeof(*clisten), GFP_KERNEL); |
| 117 | if (!clisten) |
| 118 | return; |
| 119 | clisten->cdev = cdev; |
| 120 | clisten->sk = sk; |
| 121 | mutex_lock(¬ify_mutex); |
| 122 | raw_notifier_call_chain(&listen_notify_list, |
| 123 | CHTLS_LISTEN_STOP, clisten); |
| 124 | mutex_unlock(¬ify_mutex); |
| 125 | } |
| 126 | |
| 127 | static int chtls_inline_feature(struct tls_device *dev) |
| 128 | { |
| 129 | struct net_device *netdev; |
| 130 | struct chtls_dev *cdev; |
| 131 | int i; |
| 132 | |
| 133 | cdev = to_chtls_dev(dev); |
| 134 | |
| 135 | for (i = 0; i < cdev->lldi->nports; i++) { |
| 136 | netdev = cdev->ports[i]; |
| 137 | if (netdev->features & NETIF_F_HW_TLS_RECORD) |
| 138 | return 1; |
| 139 | } |
| 140 | return 0; |
| 141 | } |
| 142 | |
| 143 | static int chtls_create_hash(struct tls_device *dev, struct sock *sk) |
| 144 | { |
| 145 | struct chtls_dev *cdev = to_chtls_dev(dev); |
| 146 | |
| 147 | if (sk->sk_state == TCP_LISTEN) |
| 148 | return chtls_start_listen(cdev, sk); |
| 149 | return 0; |
| 150 | } |
| 151 | |
| 152 | static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk) |
| 153 | { |
| 154 | struct chtls_dev *cdev = to_chtls_dev(dev); |
| 155 | |
| 156 | if (sk->sk_state == TCP_LISTEN) |
| 157 | chtls_stop_listen(cdev, sk); |
| 158 | } |
| 159 | |
| 160 | static void chtls_free_uld(struct chtls_dev *cdev) |
| 161 | { |
| 162 | int i; |
| 163 | |
| 164 | tls_unregister_device(&cdev->tlsdev); |
| 165 | kvfree(cdev->kmap.addr); |
| 166 | idr_destroy(&cdev->hwtid_idr); |
| 167 | for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) |
| 168 | kfree_skb(cdev->rspq_skb_cache[i]); |
| 169 | kfree(cdev->lldi); |
| 170 | kfree_skb(cdev->askb); |
| 171 | kfree(cdev); |
| 172 | } |
| 173 | |
| 174 | static inline void chtls_dev_release(struct kref *kref) |
| 175 | { |
| 176 | struct chtls_dev *cdev; |
| 177 | struct tls_device *dev; |
| 178 | |
| 179 | dev = container_of(kref, struct tls_device, kref); |
| 180 | cdev = to_chtls_dev(dev); |
| 181 | chtls_free_uld(cdev); |
| 182 | } |
| 183 | |
| 184 | static void chtls_register_dev(struct chtls_dev *cdev) |
| 185 | { |
| 186 | struct tls_device *tlsdev = &cdev->tlsdev; |
| 187 | |
| 188 | strlcpy(tlsdev->name, "chtls", TLS_DEVICE_NAME_MAX); |
| 189 | strlcat(tlsdev->name, cdev->lldi->ports[0]->name, |
| 190 | TLS_DEVICE_NAME_MAX); |
| 191 | tlsdev->feature = chtls_inline_feature; |
| 192 | tlsdev->hash = chtls_create_hash; |
| 193 | tlsdev->unhash = chtls_destroy_hash; |
| 194 | tlsdev->release = chtls_dev_release; |
| 195 | kref_init(&tlsdev->kref); |
| 196 | tls_register_device(tlsdev); |
| 197 | cdev->cdev_state = CHTLS_CDEV_STATE_UP; |
| 198 | } |
| 199 | |
| 200 | static void process_deferq(struct work_struct *task_param) |
| 201 | { |
| 202 | struct chtls_dev *cdev = container_of(task_param, |
| 203 | struct chtls_dev, deferq_task); |
| 204 | struct sk_buff *skb; |
| 205 | |
| 206 | spin_lock_bh(&cdev->deferq.lock); |
| 207 | while ((skb = __skb_dequeue(&cdev->deferq)) != NULL) { |
| 208 | spin_unlock_bh(&cdev->deferq.lock); |
| 209 | DEFERRED_SKB_CB(skb)->handler(cdev, skb); |
| 210 | spin_lock_bh(&cdev->deferq.lock); |
| 211 | } |
| 212 | spin_unlock_bh(&cdev->deferq.lock); |
| 213 | } |
| 214 | |
| 215 | static int chtls_get_skb(struct chtls_dev *cdev) |
| 216 | { |
| 217 | cdev->askb = alloc_skb(sizeof(struct tcphdr), GFP_KERNEL); |
| 218 | if (!cdev->askb) |
| 219 | return -ENOMEM; |
| 220 | |
| 221 | skb_put(cdev->askb, sizeof(struct tcphdr)); |
| 222 | skb_reset_transport_header(cdev->askb); |
| 223 | memset(cdev->askb->data, 0, cdev->askb->len); |
| 224 | return 0; |
| 225 | } |
| 226 | |
| 227 | static void *chtls_uld_add(const struct cxgb4_lld_info *info) |
| 228 | { |
| 229 | struct cxgb4_lld_info *lldi; |
| 230 | struct chtls_dev *cdev; |
| 231 | int i, j; |
| 232 | |
| 233 | cdev = kzalloc(sizeof(*cdev) + info->nports * |
| 234 | (sizeof(struct net_device *)), GFP_KERNEL); |
| 235 | if (!cdev) |
| 236 | goto out; |
| 237 | |
| 238 | lldi = kzalloc(sizeof(*lldi), GFP_KERNEL); |
| 239 | if (!lldi) |
| 240 | goto out_lldi; |
| 241 | |
| 242 | if (chtls_get_skb(cdev)) |
| 243 | goto out_skb; |
| 244 | |
| 245 | *lldi = *info; |
| 246 | cdev->lldi = lldi; |
| 247 | cdev->pdev = lldi->pdev; |
| 248 | cdev->tids = lldi->tids; |
| 249 | cdev->ports = lldi->ports; |
| 250 | cdev->mtus = lldi->mtus; |
| 251 | cdev->tids = lldi->tids; |
| 252 | cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0])) |
| 253 | << FW_VIID_PFN_S; |
| 254 | |
| 255 | for (i = 0; i < (1 << RSPQ_HASH_BITS); i++) { |
| 256 | unsigned int size = 64 - sizeof(struct rsp_ctrl) - 8; |
| 257 | |
| 258 | cdev->rspq_skb_cache[i] = __alloc_skb(size, |
| 259 | gfp_any(), 0, |
| 260 | lldi->nodeid); |
| 261 | if (unlikely(!cdev->rspq_skb_cache[i])) |
| 262 | goto out_rspq_skb; |
| 263 | } |
| 264 | |
| 265 | idr_init(&cdev->hwtid_idr); |
| 266 | INIT_WORK(&cdev->deferq_task, process_deferq); |
| 267 | spin_lock_init(&cdev->listen_lock); |
| 268 | spin_lock_init(&cdev->idr_lock); |
| 269 | cdev->send_page_order = min_t(uint, get_order(32768), |
| 270 | send_page_order); |
| 271 | cdev->max_host_sndbuf = 48 * 1024; |
| 272 | |
| 273 | if (lldi->vr->key.size) |
| 274 | if (chtls_init_kmap(cdev, lldi)) |
| 275 | goto out_rspq_skb; |
| 276 | |
| 277 | mutex_lock(&cdev_mutex); |
| 278 | list_add_tail(&cdev->list, &cdev_list); |
| 279 | mutex_unlock(&cdev_mutex); |
| 280 | |
| 281 | return cdev; |
| 282 | out_rspq_skb: |
| 283 | for (j = 0; j < i; j++) |
| 284 | kfree_skb(cdev->rspq_skb_cache[j]); |
| 285 | kfree_skb(cdev->askb); |
| 286 | out_skb: |
| 287 | kfree(lldi); |
| 288 | out_lldi: |
| 289 | kfree(cdev); |
| 290 | out: |
| 291 | return NULL; |
| 292 | } |
| 293 | |
| 294 | static void chtls_free_all_uld(void) |
| 295 | { |
| 296 | struct chtls_dev *cdev, *tmp; |
| 297 | |
| 298 | mutex_lock(&cdev_mutex); |
| 299 | list_for_each_entry_safe(cdev, tmp, &cdev_list, list) { |
| 300 | if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) { |
| 301 | list_del(&cdev->list); |
| 302 | kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release); |
| 303 | } |
| 304 | } |
| 305 | mutex_unlock(&cdev_mutex); |
| 306 | } |
| 307 | |
| 308 | static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state) |
| 309 | { |
| 310 | struct chtls_dev *cdev = handle; |
| 311 | |
| 312 | switch (new_state) { |
| 313 | case CXGB4_STATE_UP: |
| 314 | chtls_register_dev(cdev); |
| 315 | break; |
| 316 | case CXGB4_STATE_DOWN: |
| 317 | break; |
| 318 | case CXGB4_STATE_START_RECOVERY: |
| 319 | break; |
| 320 | case CXGB4_STATE_DETACH: |
| 321 | mutex_lock(&cdev_mutex); |
| 322 | list_del(&cdev->list); |
| 323 | mutex_unlock(&cdev_mutex); |
| 324 | kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release); |
| 325 | break; |
| 326 | default: |
| 327 | break; |
| 328 | } |
| 329 | return 0; |
| 330 | } |
| 331 | |
| 332 | static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, |
| 333 | const __be64 *rsp, |
| 334 | u32 pktshift) |
| 335 | { |
| 336 | struct sk_buff *skb; |
| 337 | |
| 338 | /* Allocate space for cpl_pass_accpet_req which will be synthesized by |
| 339 | * driver. Once driver synthesizes cpl_pass_accpet_req the skb will go |
| 340 | * through the regular cpl_pass_accept_req processing in TOM. |
| 341 | */ |
| 342 | skb = alloc_skb(size_add(gl->tot_len, |
| 343 | sizeof(struct cpl_pass_accept_req)) - |
| 344 | pktshift, GFP_ATOMIC); |
| 345 | if (unlikely(!skb)) |
| 346 | return NULL; |
| 347 | __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) |
| 348 | - pktshift); |
| 349 | /* For now we will copy cpl_rx_pkt in the skb */ |
| 350 | skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_rx_pkt)); |
| 351 | skb_copy_to_linear_data_offset(skb, sizeof(struct cpl_pass_accept_req) |
| 352 | , gl->va + pktshift, |
| 353 | gl->tot_len - pktshift); |
| 354 | |
| 355 | return skb; |
| 356 | } |
| 357 | |
| 358 | static int chtls_recv_packet(struct chtls_dev *cdev, |
| 359 | const struct pkt_gl *gl, const __be64 *rsp) |
| 360 | { |
| 361 | unsigned int opcode = *(u8 *)rsp; |
| 362 | struct sk_buff *skb; |
| 363 | int ret; |
| 364 | |
| 365 | skb = copy_gl_to_skb_pkt(gl, rsp, cdev->lldi->sge_pktshift); |
| 366 | if (!skb) |
| 367 | return -ENOMEM; |
| 368 | |
| 369 | ret = chtls_handlers[opcode](cdev, skb); |
| 370 | if (ret & CPL_RET_BUF_DONE) |
| 371 | kfree_skb(skb); |
| 372 | |
| 373 | return 0; |
| 374 | } |
| 375 | |
| 376 | static int chtls_recv_rsp(struct chtls_dev *cdev, const __be64 *rsp) |
| 377 | { |
| 378 | unsigned long rspq_bin; |
| 379 | unsigned int opcode; |
| 380 | struct sk_buff *skb; |
| 381 | unsigned int len; |
| 382 | int ret; |
| 383 | |
| 384 | len = 64 - sizeof(struct rsp_ctrl) - 8; |
| 385 | opcode = *(u8 *)rsp; |
| 386 | |
| 387 | rspq_bin = hash_ptr((void *)rsp, RSPQ_HASH_BITS); |
| 388 | skb = cdev->rspq_skb_cache[rspq_bin]; |
| 389 | if (skb && !skb_is_nonlinear(skb) && |
| 390 | !skb_shared(skb) && !skb_cloned(skb)) { |
| 391 | refcount_inc(&skb->users); |
| 392 | if (refcount_read(&skb->users) == 2) { |
| 393 | __skb_trim(skb, 0); |
| 394 | if (skb_tailroom(skb) >= len) |
| 395 | goto copy_out; |
| 396 | } |
| 397 | refcount_dec(&skb->users); |
| 398 | } |
| 399 | skb = alloc_skb(len, GFP_ATOMIC); |
| 400 | if (unlikely(!skb)) |
| 401 | return -ENOMEM; |
| 402 | |
| 403 | copy_out: |
| 404 | __skb_put(skb, len); |
| 405 | skb_copy_to_linear_data(skb, rsp, len); |
| 406 | skb_reset_network_header(skb); |
| 407 | skb_reset_transport_header(skb); |
| 408 | ret = chtls_handlers[opcode](cdev, skb); |
| 409 | |
| 410 | if (ret & CPL_RET_BUF_DONE) |
| 411 | kfree_skb(skb); |
| 412 | return 0; |
| 413 | } |
| 414 | |
| 415 | static void chtls_recv(struct chtls_dev *cdev, |
| 416 | struct sk_buff **skbs, const __be64 *rsp) |
| 417 | { |
| 418 | struct sk_buff *skb = *skbs; |
| 419 | unsigned int opcode; |
| 420 | int ret; |
| 421 | |
| 422 | opcode = *(u8 *)rsp; |
| 423 | |
| 424 | __skb_push(skb, sizeof(struct rss_header)); |
| 425 | skb_copy_to_linear_data(skb, rsp, sizeof(struct rss_header)); |
| 426 | |
| 427 | ret = chtls_handlers[opcode](cdev, skb); |
| 428 | if (ret & CPL_RET_BUF_DONE) |
| 429 | kfree_skb(skb); |
| 430 | } |
| 431 | |
| 432 | static int chtls_uld_rx_handler(void *handle, const __be64 *rsp, |
| 433 | const struct pkt_gl *gl) |
| 434 | { |
| 435 | struct chtls_dev *cdev = handle; |
| 436 | unsigned int opcode; |
| 437 | struct sk_buff *skb; |
| 438 | |
| 439 | opcode = *(u8 *)rsp; |
| 440 | |
| 441 | if (unlikely(opcode == CPL_RX_PKT)) { |
| 442 | if (chtls_recv_packet(cdev, gl, rsp) < 0) |
| 443 | goto nomem; |
| 444 | return 0; |
| 445 | } |
| 446 | |
| 447 | if (!gl) |
| 448 | return chtls_recv_rsp(cdev, rsp); |
| 449 | |
| 450 | #define RX_PULL_LEN 128 |
| 451 | skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); |
| 452 | if (unlikely(!skb)) |
| 453 | goto nomem; |
| 454 | chtls_recv(cdev, &skb, rsp); |
| 455 | return 0; |
| 456 | |
| 457 | nomem: |
| 458 | return -ENOMEM; |
| 459 | } |
| 460 | |
| 461 | static int do_chtls_getsockopt(struct sock *sk, char __user *optval, |
| 462 | int __user *optlen) |
| 463 | { |
| 464 | struct tls_crypto_info crypto_info = { 0 }; |
| 465 | |
| 466 | crypto_info.version = TLS_1_2_VERSION; |
| 467 | if (copy_to_user(optval, &crypto_info, sizeof(struct tls_crypto_info))) |
| 468 | return -EFAULT; |
| 469 | return 0; |
| 470 | } |
| 471 | |
| 472 | static int chtls_getsockopt(struct sock *sk, int level, int optname, |
| 473 | char __user *optval, int __user *optlen) |
| 474 | { |
| 475 | struct tls_context *ctx = tls_get_ctx(sk); |
| 476 | |
| 477 | if (level != SOL_TLS) |
| 478 | return ctx->sk_proto->getsockopt(sk, level, |
| 479 | optname, optval, optlen); |
| 480 | |
| 481 | return do_chtls_getsockopt(sk, optval, optlen); |
| 482 | } |
| 483 | |
| 484 | static int do_chtls_setsockopt(struct sock *sk, int optname, |
| 485 | char __user *optval, unsigned int optlen) |
| 486 | { |
| 487 | struct tls_crypto_info *crypto_info, tmp_crypto_info; |
| 488 | struct chtls_sock *csk; |
| 489 | int keylen; |
| 490 | int rc = 0; |
| 491 | |
| 492 | csk = rcu_dereference_sk_user_data(sk); |
| 493 | |
| 494 | if (!optval || optlen < sizeof(*crypto_info)) { |
| 495 | rc = -EINVAL; |
| 496 | goto out; |
| 497 | } |
| 498 | |
| 499 | rc = copy_from_user(&tmp_crypto_info, optval, sizeof(*crypto_info)); |
| 500 | if (rc) { |
| 501 | rc = -EFAULT; |
| 502 | goto out; |
| 503 | } |
| 504 | |
| 505 | /* check version */ |
| 506 | if (tmp_crypto_info.version != TLS_1_2_VERSION) { |
| 507 | rc = -ENOTSUPP; |
| 508 | goto out; |
| 509 | } |
| 510 | |
| 511 | crypto_info = (struct tls_crypto_info *)&csk->tlshws.crypto_info; |
| 512 | |
| 513 | switch (tmp_crypto_info.cipher_type) { |
| 514 | case TLS_CIPHER_AES_GCM_128: { |
| 515 | /* Obtain version and type from previous copy */ |
| 516 | crypto_info[0] = tmp_crypto_info; |
| 517 | /* Now copy the following data */ |
| 518 | rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info), |
| 519 | optval + sizeof(*crypto_info), |
| 520 | sizeof(struct tls12_crypto_info_aes_gcm_128) |
| 521 | - sizeof(*crypto_info)); |
| 522 | |
| 523 | if (rc) { |
| 524 | rc = -EFAULT; |
| 525 | goto out; |
| 526 | } |
| 527 | |
| 528 | keylen = TLS_CIPHER_AES_GCM_128_KEY_SIZE; |
| 529 | rc = chtls_setkey(csk, keylen, optname); |
| 530 | break; |
| 531 | } |
| 532 | default: |
| 533 | rc = -EINVAL; |
| 534 | goto out; |
| 535 | } |
| 536 | out: |
| 537 | return rc; |
| 538 | } |
| 539 | |
| 540 | static int chtls_setsockopt(struct sock *sk, int level, int optname, |
| 541 | char __user *optval, unsigned int optlen) |
| 542 | { |
| 543 | struct tls_context *ctx = tls_get_ctx(sk); |
| 544 | |
| 545 | if (level != SOL_TLS) |
| 546 | return ctx->sk_proto->setsockopt(sk, level, |
| 547 | optname, optval, optlen); |
| 548 | |
| 549 | return do_chtls_setsockopt(sk, optname, optval, optlen); |
| 550 | } |
| 551 | |
| 552 | static struct cxgb4_uld_info chtls_uld_info = { |
| 553 | .name = DRV_NAME, |
| 554 | .nrxq = MAX_ULD_QSETS, |
| 555 | .ntxq = MAX_ULD_QSETS, |
| 556 | .rxq_size = 1024, |
| 557 | .add = chtls_uld_add, |
| 558 | .state_change = chtls_uld_state_change, |
| 559 | .rx_handler = chtls_uld_rx_handler, |
| 560 | }; |
| 561 | |
| 562 | void chtls_install_cpl_ops(struct sock *sk) |
| 563 | { |
| 564 | sk->sk_prot = &chtls_cpl_prot; |
| 565 | } |
| 566 | |
| 567 | static void __init chtls_init_ulp_ops(void) |
| 568 | { |
| 569 | chtls_cpl_prot = tcp_prot; |
| 570 | chtls_init_rsk_ops(&chtls_cpl_prot, &chtls_rsk_ops, |
| 571 | &tcp_prot, PF_INET); |
| 572 | chtls_cpl_prot.close = chtls_close; |
| 573 | chtls_cpl_prot.disconnect = chtls_disconnect; |
| 574 | chtls_cpl_prot.destroy = chtls_destroy_sock; |
| 575 | chtls_cpl_prot.shutdown = chtls_shutdown; |
| 576 | chtls_cpl_prot.sendmsg = chtls_sendmsg; |
| 577 | chtls_cpl_prot.sendpage = chtls_sendpage; |
| 578 | chtls_cpl_prot.recvmsg = chtls_recvmsg; |
| 579 | chtls_cpl_prot.setsockopt = chtls_setsockopt; |
| 580 | chtls_cpl_prot.getsockopt = chtls_getsockopt; |
| 581 | } |
| 582 | |
| 583 | static int __init chtls_register(void) |
| 584 | { |
| 585 | chtls_init_ulp_ops(); |
| 586 | register_listen_notifier(&listen_notifier); |
| 587 | cxgb4_register_uld(CXGB4_ULD_TLS, &chtls_uld_info); |
| 588 | return 0; |
| 589 | } |
| 590 | |
| 591 | static void __exit chtls_unregister(void) |
| 592 | { |
| 593 | unregister_listen_notifier(&listen_notifier); |
| 594 | chtls_free_all_uld(); |
| 595 | cxgb4_unregister_uld(CXGB4_ULD_TLS); |
| 596 | } |
| 597 | |
| 598 | module_init(chtls_register); |
| 599 | module_exit(chtls_unregister); |
| 600 | |
| 601 | MODULE_DESCRIPTION("Chelsio TLS Inline driver"); |
| 602 | MODULE_LICENSE("GPL"); |
| 603 | MODULE_AUTHOR("Chelsio Communications"); |
| 604 | MODULE_VERSION(DRV_VERSION); |