blob: 1373a0082b5ae27f6ddbc3e95cb12cdc1ac8f136 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * AF_SMC protocol family socket handler keeping the AF_INET sock address type
6 * applies to SOCK_STREAM sockets only
7 * offers an alternative communication option for TCP-protocol sockets
8 * applicable with RoCE-cards only
9 *
10 * Initial restrictions:
11 * - support for alternate links postponed
12 *
13 * Copyright IBM Corp. 2016, 2018
14 *
15 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
16 * based on prototype from Frank Blaschka
17 */
18
19#define KMSG_COMPONENT "smc"
20#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21
22#include <linux/module.h>
23#include <linux/socket.h>
24#include <linux/workqueue.h>
25#include <linux/in.h>
26#include <linux/sched/signal.h>
27#include <linux/if_vlan.h>
28
29#include <net/sock.h>
30#include <net/tcp.h>
31#include <net/smc.h>
32#include <asm/ioctls.h>
33
34#include <net/net_namespace.h>
35#include <net/netns/generic.h>
36#include "smc_netns.h"
37
38#include "smc.h"
39#include "smc_clc.h"
40#include "smc_llc.h"
41#include "smc_cdc.h"
42#include "smc_core.h"
43#include "smc_ib.h"
44#include "smc_ism.h"
45#include "smc_pnet.h"
46#include "smc_tx.h"
47#include "smc_rx.h"
48#include "smc_close.h"
49
50static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group
51 * creation on server
52 */
53static DEFINE_MUTEX(smc_client_lgr_pending); /* serialize link group
54 * creation on client
55 */
56
57static void smc_tcp_listen_work(struct work_struct *);
58static void smc_connect_work(struct work_struct *);
59
60static void smc_set_keepalive(struct sock *sk, int val)
61{
62 struct smc_sock *smc = smc_sk(sk);
63
64 smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
65}
66
67static struct smc_hashinfo smc_v4_hashinfo = {
68 .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
69};
70
71static struct smc_hashinfo smc_v6_hashinfo = {
72 .lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock),
73};
74
75int smc_hash_sk(struct sock *sk)
76{
77 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
78 struct hlist_head *head;
79
80 head = &h->ht;
81
82 write_lock_bh(&h->lock);
83 sk_add_node(sk, head);
84 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
85 write_unlock_bh(&h->lock);
86
87 return 0;
88}
89EXPORT_SYMBOL_GPL(smc_hash_sk);
90
91void smc_unhash_sk(struct sock *sk)
92{
93 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
94
95 write_lock_bh(&h->lock);
96 if (sk_del_node_init(sk))
97 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
98 write_unlock_bh(&h->lock);
99}
100EXPORT_SYMBOL_GPL(smc_unhash_sk);
101
102struct proto smc_proto = {
103 .name = "SMC",
104 .owner = THIS_MODULE,
105 .keepalive = smc_set_keepalive,
106 .hash = smc_hash_sk,
107 .unhash = smc_unhash_sk,
108 .obj_size = sizeof(struct smc_sock),
109 .h.smc_hash = &smc_v4_hashinfo,
110 .slab_flags = SLAB_TYPESAFE_BY_RCU,
111};
112EXPORT_SYMBOL_GPL(smc_proto);
113
114struct proto smc_proto6 = {
115 .name = "SMC6",
116 .owner = THIS_MODULE,
117 .keepalive = smc_set_keepalive,
118 .hash = smc_hash_sk,
119 .unhash = smc_unhash_sk,
120 .obj_size = sizeof(struct smc_sock),
121 .h.smc_hash = &smc_v6_hashinfo,
122 .slab_flags = SLAB_TYPESAFE_BY_RCU,
123};
124EXPORT_SYMBOL_GPL(smc_proto6);
125
126static void smc_restore_fallback_changes(struct smc_sock *smc)
127{
128 smc->clcsock->file->private_data = smc->sk.sk_socket;
129 smc->clcsock->file = NULL;
130}
131
132static int __smc_release(struct smc_sock *smc)
133{
134 struct sock *sk = &smc->sk;
135 int rc = 0;
136
137 if (!smc->use_fallback) {
138 rc = smc_close_active(smc);
139 smc_sock_set_flag(sk, SOCK_DEAD);
140 sk->sk_shutdown |= SHUTDOWN_MASK;
141 } else {
142 if (sk->sk_state != SMC_CLOSED) {
143 if (sk->sk_state != SMC_LISTEN &&
144 sk->sk_state != SMC_INIT)
145 sock_put(sk); /* passive closing */
146 if (sk->sk_state == SMC_LISTEN) {
147 /* wake up clcsock accept */
148 rc = kernel_sock_shutdown(smc->clcsock,
149 SHUT_RDWR);
150 }
151 sk->sk_state = SMC_CLOSED;
152 sk->sk_state_change(sk);
153 }
154 smc_restore_fallback_changes(smc);
155 }
156
157 sk->sk_prot->unhash(sk);
158
159 if (sk->sk_state == SMC_CLOSED) {
160 if (smc->clcsock) {
161 release_sock(sk);
162 smc_clcsock_release(smc);
163 lock_sock(sk);
164 }
165 if (!smc->use_fallback)
166 smc_conn_free(&smc->conn);
167 }
168
169 return rc;
170}
171
172static int smc_release(struct socket *sock)
173{
174 struct sock *sk = sock->sk;
175 struct smc_sock *smc;
176 int rc = 0;
177
178 if (!sk)
179 goto out;
180
181 smc = smc_sk(sk);
182
183 /* cleanup for a dangling non-blocking connect */
184 if (smc->connect_nonblock && sk->sk_state == SMC_INIT)
185 tcp_abort(smc->clcsock->sk, ECONNABORTED);
186
187 if (cancel_work_sync(&smc->connect_work))
188 sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */
189
190 if (sk->sk_state == SMC_LISTEN)
191 /* smc_close_non_accepted() is called and acquires
192 * sock lock for child sockets again
193 */
194 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
195 else
196 lock_sock(sk);
197
198 rc = __smc_release(smc);
199
200 /* detach socket */
201 sock_orphan(sk);
202 sock->sk = NULL;
203 release_sock(sk);
204
205 sock_put(sk); /* final sock_put */
206out:
207 return rc;
208}
209
210static void smc_destruct(struct sock *sk)
211{
212 if (sk->sk_state != SMC_CLOSED)
213 return;
214 if (!sock_flag(sk, SOCK_DEAD))
215 return;
216
217 sk_refcnt_debug_dec(sk);
218}
219
220static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
221 int protocol)
222{
223 struct smc_sock *smc;
224 struct proto *prot;
225 struct sock *sk;
226
227 prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
228 sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
229 if (!sk)
230 return NULL;
231
232 sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
233 sk->sk_state = SMC_INIT;
234 sk->sk_destruct = smc_destruct;
235 sk->sk_protocol = protocol;
236 smc = smc_sk(sk);
237 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
238 INIT_WORK(&smc->connect_work, smc_connect_work);
239 INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
240 INIT_LIST_HEAD(&smc->accept_q);
241 spin_lock_init(&smc->accept_q_lock);
242 spin_lock_init(&smc->conn.send_lock);
243 sk->sk_prot->hash(sk);
244 sk_refcnt_debug_inc(sk);
245 mutex_init(&smc->clcsock_release_lock);
246
247 return sk;
248}
249
250static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
251 int addr_len)
252{
253 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
254 struct sock *sk = sock->sk;
255 struct smc_sock *smc;
256 int rc;
257
258 smc = smc_sk(sk);
259
260 /* replicate tests from inet_bind(), to be safe wrt. future changes */
261 rc = -EINVAL;
262 if (addr_len < sizeof(struct sockaddr_in))
263 goto out;
264
265 rc = -EAFNOSUPPORT;
266 if (addr->sin_family != AF_INET &&
267 addr->sin_family != AF_INET6 &&
268 addr->sin_family != AF_UNSPEC)
269 goto out;
270 /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
271 if (addr->sin_family == AF_UNSPEC &&
272 addr->sin_addr.s_addr != htonl(INADDR_ANY))
273 goto out;
274
275 lock_sock(sk);
276
277 /* Check if socket is already active */
278 rc = -EINVAL;
279 if (sk->sk_state != SMC_INIT || smc->connect_nonblock)
280 goto out_rel;
281
282 smc->clcsock->sk->sk_reuse = sk->sk_reuse;
283 rc = kernel_bind(smc->clcsock, uaddr, addr_len);
284
285out_rel:
286 release_sock(sk);
287out:
288 return rc;
289}
290
291static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
292 unsigned long mask)
293{
294 /* options we don't get control via setsockopt for */
295 nsk->sk_type = osk->sk_type;
296 nsk->sk_sndbuf = osk->sk_sndbuf;
297 nsk->sk_rcvbuf = osk->sk_rcvbuf;
298 nsk->sk_sndtimeo = osk->sk_sndtimeo;
299 nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
300 nsk->sk_mark = osk->sk_mark;
301 nsk->sk_priority = osk->sk_priority;
302 nsk->sk_rcvlowat = osk->sk_rcvlowat;
303 nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
304 nsk->sk_err = osk->sk_err;
305
306 nsk->sk_flags &= ~mask;
307 nsk->sk_flags |= osk->sk_flags & mask;
308}
309
310#define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
311 (1UL << SOCK_KEEPOPEN) | \
312 (1UL << SOCK_LINGER) | \
313 (1UL << SOCK_BROADCAST) | \
314 (1UL << SOCK_TIMESTAMP) | \
315 (1UL << SOCK_DBG) | \
316 (1UL << SOCK_RCVTSTAMP) | \
317 (1UL << SOCK_RCVTSTAMPNS) | \
318 (1UL << SOCK_LOCALROUTE) | \
319 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
320 (1UL << SOCK_RXQ_OVFL) | \
321 (1UL << SOCK_WIFI_STATUS) | \
322 (1UL << SOCK_NOFCS) | \
323 (1UL << SOCK_FILTER_LOCKED) | \
324 (1UL << SOCK_TSTAMP_NEW))
325/* copy only relevant settings and flags of SOL_SOCKET level from smc to
326 * clc socket (since smc is not called for these options from net/core)
327 */
328static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
329{
330 smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
331}
332
333#define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
334 (1UL << SOCK_KEEPOPEN) | \
335 (1UL << SOCK_LINGER) | \
336 (1UL << SOCK_DBG))
337/* copy only settings and flags relevant for smc from clc to smc socket */
338static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
339{
340 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
341}
342
343/* register a new rmb, send confirm_rkey msg to register with peer */
344static int smc_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc,
345 bool conf_rkey)
346{
347 if (!rmb_desc->wr_reg) {
348 /* register memory region for new rmb */
349 if (smc_wr_reg_send(link, rmb_desc->mr_rx[SMC_SINGLE_LINK])) {
350 rmb_desc->regerr = 1;
351 return -EFAULT;
352 }
353 rmb_desc->wr_reg = 1;
354 }
355 if (!conf_rkey)
356 return 0;
357 /* exchange confirm_rkey msg with peer */
358 if (smc_llc_do_confirm_rkey(link, rmb_desc)) {
359 rmb_desc->regerr = 1;
360 return -EFAULT;
361 }
362 return 0;
363}
364
365static int smc_clnt_conf_first_link(struct smc_sock *smc)
366{
367 struct net *net = sock_net(smc->clcsock->sk);
368 struct smc_link_group *lgr = smc->conn.lgr;
369 struct smc_link *link;
370 int rest;
371 int rc;
372
373 link = &lgr->lnk[SMC_SINGLE_LINK];
374 /* receive CONFIRM LINK request from server over RoCE fabric */
375 rest = wait_for_completion_interruptible_timeout(
376 &link->llc_confirm,
377 SMC_LLC_WAIT_FIRST_TIME);
378 if (rest <= 0) {
379 struct smc_clc_msg_decline dclc;
380
381 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
382 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
383 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
384 }
385
386 if (link->llc_confirm_rc)
387 return SMC_CLC_DECL_RMBE_EC;
388
389 rc = smc_ib_modify_qp_rts(link);
390 if (rc)
391 return SMC_CLC_DECL_ERR_RDYLNK;
392
393 smc_wr_remember_qp_attr(link);
394
395 if (smc_reg_rmb(link, smc->conn.rmb_desc, false))
396 return SMC_CLC_DECL_ERR_REGRMB;
397
398 /* send CONFIRM LINK response over RoCE fabric */
399 rc = smc_llc_send_confirm_link(link, SMC_LLC_RESP);
400 if (rc < 0)
401 return SMC_CLC_DECL_TIMEOUT_CL;
402
403 /* receive ADD LINK request from server over RoCE fabric */
404 rest = wait_for_completion_interruptible_timeout(&link->llc_add,
405 SMC_LLC_WAIT_TIME);
406 if (rest <= 0) {
407 struct smc_clc_msg_decline dclc;
408
409 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
410 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
411 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_AL : rc;
412 }
413
414 /* send add link reject message, only one link supported for now */
415 rc = smc_llc_send_add_link(link,
416 link->smcibdev->mac[link->ibport - 1],
417 link->gid, SMC_LLC_RESP);
418 if (rc < 0)
419 return SMC_CLC_DECL_TIMEOUT_AL;
420
421 smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);
422
423 return 0;
424}
425
426static void smcr_conn_save_peer_info(struct smc_sock *smc,
427 struct smc_clc_msg_accept_confirm *clc)
428{
429 int bufsize = smc_uncompress_bufsize(clc->rmbe_size);
430
431 smc->conn.peer_rmbe_idx = clc->rmbe_idx;
432 smc->conn.local_tx_ctrl.token = ntohl(clc->rmbe_alert_token);
433 smc->conn.peer_rmbe_size = bufsize;
434 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
435 smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
436}
437
438static void smcd_conn_save_peer_info(struct smc_sock *smc,
439 struct smc_clc_msg_accept_confirm *clc)
440{
441 int bufsize = smc_uncompress_bufsize(clc->dmbe_size);
442
443 smc->conn.peer_rmbe_idx = clc->dmbe_idx;
444 smc->conn.peer_token = clc->token;
445 /* msg header takes up space in the buffer */
446 smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
447 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
448 smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
449}
450
451static void smc_conn_save_peer_info(struct smc_sock *smc,
452 struct smc_clc_msg_accept_confirm *clc)
453{
454 if (smc->conn.lgr->is_smcd)
455 smcd_conn_save_peer_info(smc, clc);
456 else
457 smcr_conn_save_peer_info(smc, clc);
458}
459
460static void smc_link_save_peer_info(struct smc_link *link,
461 struct smc_clc_msg_accept_confirm *clc)
462{
463 link->peer_qpn = ntoh24(clc->qpn);
464 memcpy(link->peer_gid, clc->lcl.gid, SMC_GID_SIZE);
465 memcpy(link->peer_mac, clc->lcl.mac, sizeof(link->peer_mac));
466 link->peer_psn = ntoh24(clc->psn);
467 link->peer_mtu = clc->qp_mtu;
468}
469
470static void smc_switch_to_fallback(struct smc_sock *smc)
471{
472 wait_queue_head_t *smc_wait = sk_sleep(&smc->sk);
473 wait_queue_head_t *clc_wait = sk_sleep(smc->clcsock->sk);
474 unsigned long flags;
475
476 smc->use_fallback = true;
477 if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
478 smc->clcsock->file = smc->sk.sk_socket->file;
479 smc->clcsock->file->private_data = smc->clcsock;
480 smc->clcsock->wq.fasync_list =
481 smc->sk.sk_socket->wq.fasync_list;
482
483 /* There may be some entries remaining in
484 * smc socket->wq, which should be removed
485 * to clcsocket->wq during the fallback.
486 */
487 spin_lock_irqsave(&smc_wait->lock, flags);
488 spin_lock_nested(&clc_wait->lock, SINGLE_DEPTH_NESTING);
489 list_splice_init(&smc_wait->head, &clc_wait->head);
490 spin_unlock(&clc_wait->lock);
491 spin_unlock_irqrestore(&smc_wait->lock, flags);
492 }
493}
494
495/* fall back during connect */
496static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
497{
498 smc_switch_to_fallback(smc);
499 smc->fallback_rsn = reason_code;
500 smc_copy_sock_settings_to_clc(smc);
501 smc->connect_nonblock = 0;
502 if (smc->sk.sk_state == SMC_INIT)
503 smc->sk.sk_state = SMC_ACTIVE;
504 return 0;
505}
506
507/* decline and fall back during connect */
508static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code)
509{
510 int rc;
511
512 if (reason_code < 0) { /* error, fallback is not possible */
513 if (smc->sk.sk_state == SMC_INIT)
514 sock_put(&smc->sk); /* passive closing */
515 return reason_code;
516 }
517 if (reason_code != SMC_CLC_DECL_PEERDECL) {
518 rc = smc_clc_send_decline(smc, reason_code);
519 if (rc < 0) {
520 if (smc->sk.sk_state == SMC_INIT)
521 sock_put(&smc->sk); /* passive closing */
522 return rc;
523 }
524 }
525 return smc_connect_fallback(smc, reason_code);
526}
527
528/* abort connecting */
529static int smc_connect_abort(struct smc_sock *smc, int reason_code,
530 int local_contact)
531{
532 if (local_contact == SMC_FIRST_CONTACT)
533 smc_lgr_forget(smc->conn.lgr);
534 if (smc->conn.lgr->is_smcd)
535 /* there is only one lgr role for SMC-D; use server lock */
536 mutex_unlock(&smc_server_lgr_pending);
537 else
538 mutex_unlock(&smc_client_lgr_pending);
539
540 smc_conn_free(&smc->conn);
541 smc->connect_nonblock = 0;
542 return reason_code;
543}
544
545/* check if there is a rdma device available for this connection. */
546/* called for connect and listen */
547static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini)
548{
549 /* PNET table look up: search active ib_device and port
550 * within same PNETID that also contains the ethernet device
551 * used for the internal TCP socket
552 */
553 smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
554 if (!ini->ib_dev)
555 return SMC_CLC_DECL_NOSMCRDEV;
556 return 0;
557}
558
559/* check if there is an ISM device available for this connection. */
560/* called for connect and listen */
561static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini)
562{
563 /* Find ISM device with same PNETID as connecting interface */
564 smc_pnet_find_ism_resource(smc->clcsock->sk, ini);
565 if (!ini->ism_dev)
566 return SMC_CLC_DECL_NOSMCDDEV;
567 return 0;
568}
569
570/* Check for VLAN ID and register it on ISM device just for CLC handshake */
571static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
572 struct smc_init_info *ini)
573{
574 if (ini->vlan_id && smc_ism_get_vlan(ini->ism_dev, ini->vlan_id))
575 return SMC_CLC_DECL_ISMVLANERR;
576 return 0;
577}
578
579/* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
580 * used, the VLAN ID will be registered again during the connection setup.
581 */
582static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc, bool is_smcd,
583 struct smc_init_info *ini)
584{
585 if (!is_smcd)
586 return 0;
587 if (ini->vlan_id && smc_ism_put_vlan(ini->ism_dev, ini->vlan_id))
588 return SMC_CLC_DECL_CNFERR;
589 return 0;
590}
591
592/* CLC handshake during connect */
593static int smc_connect_clc(struct smc_sock *smc, int smc_type,
594 struct smc_clc_msg_accept_confirm *aclc,
595 struct smc_init_info *ini)
596{
597 int rc = 0;
598
599 /* do inband token exchange */
600 rc = smc_clc_send_proposal(smc, smc_type, ini);
601 if (rc)
602 return rc;
603 /* receive SMC Accept CLC message */
604 return smc_clc_wait_msg(smc, aclc, sizeof(*aclc), SMC_CLC_ACCEPT,
605 CLC_WAIT_TIME);
606}
607
608/* setup for RDMA connection of client */
609static int smc_connect_rdma(struct smc_sock *smc,
610 struct smc_clc_msg_accept_confirm *aclc,
611 struct smc_init_info *ini)
612{
613 struct smc_link *link;
614 int reason_code = 0;
615
616 ini->is_smcd = false;
617 ini->ib_lcl = &aclc->lcl;
618 ini->ib_clcqpn = ntoh24(aclc->qpn);
619 ini->srv_first_contact = aclc->hdr.flag;
620
621 mutex_lock(&smc_client_lgr_pending);
622 reason_code = smc_conn_create(smc, ini);
623 if (reason_code) {
624 mutex_unlock(&smc_client_lgr_pending);
625 return reason_code;
626 }
627 link = &smc->conn.lgr->lnk[SMC_SINGLE_LINK];
628
629 smc_conn_save_peer_info(smc, aclc);
630
631 /* create send buffer and rmb */
632 if (smc_buf_create(smc, false))
633 return smc_connect_abort(smc, SMC_CLC_DECL_MEM,
634 ini->cln_first_contact);
635
636 if (ini->cln_first_contact == SMC_FIRST_CONTACT)
637 smc_link_save_peer_info(link, aclc);
638
639 if (smc_rmb_rtoken_handling(&smc->conn, aclc))
640 return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RTOK,
641 ini->cln_first_contact);
642
643 smc_close_init(smc);
644 smc_rx_init(smc);
645
646 if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
647 if (smc_ib_ready_link(link))
648 return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RDYLNK,
649 ini->cln_first_contact);
650 } else {
651 if (smc_reg_rmb(link, smc->conn.rmb_desc, true))
652 return smc_connect_abort(smc, SMC_CLC_DECL_ERR_REGRMB,
653 ini->cln_first_contact);
654 }
655 smc_rmb_sync_sg_for_device(&smc->conn);
656
657 reason_code = smc_clc_send_confirm(smc);
658 if (reason_code)
659 return smc_connect_abort(smc, reason_code,
660 ini->cln_first_contact);
661
662 smc_tx_init(smc);
663
664 if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
665 /* QP confirmation over RoCE fabric */
666 reason_code = smc_clnt_conf_first_link(smc);
667 if (reason_code)
668 return smc_connect_abort(smc, reason_code,
669 ini->cln_first_contact);
670 }
671 mutex_unlock(&smc_client_lgr_pending);
672
673 smc_copy_sock_settings_to_clc(smc);
674 smc->connect_nonblock = 0;
675 if (smc->sk.sk_state == SMC_INIT)
676 smc->sk.sk_state = SMC_ACTIVE;
677
678 return 0;
679}
680
681/* setup for ISM connection of client */
682static int smc_connect_ism(struct smc_sock *smc,
683 struct smc_clc_msg_accept_confirm *aclc,
684 struct smc_init_info *ini)
685{
686 int rc = 0;
687
688 ini->is_smcd = true;
689 ini->ism_gid = aclc->gid;
690 ini->srv_first_contact = aclc->hdr.flag;
691
692 /* there is only one lgr role for SMC-D; use server lock */
693 mutex_lock(&smc_server_lgr_pending);
694 rc = smc_conn_create(smc, ini);
695 if (rc) {
696 mutex_unlock(&smc_server_lgr_pending);
697 return rc;
698 }
699
700 /* Create send and receive buffers */
701 if (smc_buf_create(smc, true))
702 return smc_connect_abort(smc, SMC_CLC_DECL_MEM,
703 ini->cln_first_contact);
704
705 smc_conn_save_peer_info(smc, aclc);
706 smc_close_init(smc);
707 smc_rx_init(smc);
708 smc_tx_init(smc);
709
710 rc = smc_clc_send_confirm(smc);
711 if (rc)
712 return smc_connect_abort(smc, rc, ini->cln_first_contact);
713 mutex_unlock(&smc_server_lgr_pending);
714
715 smc_copy_sock_settings_to_clc(smc);
716 smc->connect_nonblock = 0;
717 if (smc->sk.sk_state == SMC_INIT)
718 smc->sk.sk_state = SMC_ACTIVE;
719
720 return 0;
721}
722
723/* perform steps before actually connecting */
724static int __smc_connect(struct smc_sock *smc)
725{
726 bool ism_supported = false, rdma_supported = false;
727 struct smc_clc_msg_accept_confirm aclc;
728 struct smc_init_info ini = {0};
729 int smc_type;
730 int rc = 0;
731
732 if (smc->use_fallback)
733 return smc_connect_fallback(smc, smc->fallback_rsn);
734
735 /* if peer has not signalled SMC-capability, fall back */
736 if (!tcp_sk(smc->clcsock->sk)->syn_smc)
737 return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC);
738
739 /* IPSec connections opt out of SMC-R optimizations */
740 if (using_ipsec(smc))
741 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC);
742
743 /* get vlan id from IP device */
744 if (smc_vlan_by_tcpsk(smc->clcsock, &ini))
745 return smc_connect_decline_fallback(smc,
746 SMC_CLC_DECL_GETVLANERR);
747
748 /* check if there is an ism device available */
749 if (!smc_find_ism_device(smc, &ini) &&
750 !smc_connect_ism_vlan_setup(smc, &ini)) {
751 /* ISM is supported for this connection */
752 ism_supported = true;
753 smc_type = SMC_TYPE_D;
754 }
755
756 /* check if there is a rdma device available */
757 if (!smc_find_rdma_device(smc, &ini)) {
758 /* RDMA is supported for this connection */
759 rdma_supported = true;
760 if (ism_supported)
761 smc_type = SMC_TYPE_B; /* both */
762 else
763 smc_type = SMC_TYPE_R; /* only RDMA */
764 }
765
766 /* if neither ISM nor RDMA are supported, fallback */
767 if (!rdma_supported && !ism_supported)
768 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_NOSMCDEV);
769
770 /* perform CLC handshake */
771 rc = smc_connect_clc(smc, smc_type, &aclc, &ini);
772 if (rc) {
773 smc_connect_ism_vlan_cleanup(smc, ism_supported, &ini);
774 return smc_connect_decline_fallback(smc, rc);
775 }
776
777 /* depending on previous steps, connect using rdma or ism */
778 if (rdma_supported && aclc.hdr.path == SMC_TYPE_R)
779 rc = smc_connect_rdma(smc, &aclc, &ini);
780 else if (ism_supported && aclc.hdr.path == SMC_TYPE_D)
781 rc = smc_connect_ism(smc, &aclc, &ini);
782 else
783 rc = SMC_CLC_DECL_MODEUNSUPP;
784 if (rc) {
785 smc_connect_ism_vlan_cleanup(smc, ism_supported, &ini);
786 return smc_connect_decline_fallback(smc, rc);
787 }
788
789 smc_connect_ism_vlan_cleanup(smc, ism_supported, &ini);
790 return 0;
791}
792
793static void smc_connect_work(struct work_struct *work)
794{
795 struct smc_sock *smc = container_of(work, struct smc_sock,
796 connect_work);
797 long timeo = smc->sk.sk_sndtimeo;
798 int rc = 0;
799
800 if (!timeo)
801 timeo = MAX_SCHEDULE_TIMEOUT;
802 lock_sock(smc->clcsock->sk);
803 if (smc->clcsock->sk->sk_err) {
804 smc->sk.sk_err = smc->clcsock->sk->sk_err;
805 } else if ((1 << smc->clcsock->sk->sk_state) &
806 (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
807 rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
808 if ((rc == -EPIPE) &&
809 ((1 << smc->clcsock->sk->sk_state) &
810 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)))
811 rc = 0;
812 }
813 release_sock(smc->clcsock->sk);
814 lock_sock(&smc->sk);
815 if (rc != 0 || smc->sk.sk_err) {
816 smc->sk.sk_state = SMC_CLOSED;
817 if (rc == -EPIPE || rc == -EAGAIN)
818 smc->sk.sk_err = EPIPE;
819 else if (rc == -ECONNREFUSED)
820 smc->sk.sk_err = ECONNREFUSED;
821 else if (signal_pending(current))
822 smc->sk.sk_err = -sock_intr_errno(timeo);
823 sock_put(&smc->sk); /* passive closing */
824 goto out;
825 }
826
827 rc = __smc_connect(smc);
828 if (rc < 0)
829 smc->sk.sk_err = -rc;
830
831out:
832 if (!sock_flag(&smc->sk, SOCK_DEAD)) {
833 if (smc->sk.sk_err) {
834 smc->sk.sk_state_change(&smc->sk);
835 } else { /* allow polling before and after fallback decision */
836 smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
837 smc->sk.sk_write_space(&smc->sk);
838 }
839 }
840 release_sock(&smc->sk);
841}
842
843static int smc_connect(struct socket *sock, struct sockaddr *addr,
844 int alen, int flags)
845{
846 struct sock *sk = sock->sk;
847 struct smc_sock *smc;
848 int rc = -EINVAL;
849
850 smc = smc_sk(sk);
851
852 /* separate smc parameter checking to be safe */
853 if (alen < sizeof(addr->sa_family))
854 goto out_err;
855 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
856 goto out_err;
857
858 lock_sock(sk);
859 switch (sk->sk_state) {
860 default:
861 goto out;
862 case SMC_ACTIVE:
863 rc = -EISCONN;
864 goto out;
865 case SMC_INIT:
866 rc = 0;
867 break;
868 }
869
870 smc_copy_sock_settings_to_clc(smc);
871 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
872 if (smc->connect_nonblock) {
873 rc = -EALREADY;
874 goto out;
875 }
876 rc = kernel_connect(smc->clcsock, addr, alen, flags);
877 if (rc && rc != -EINPROGRESS)
878 goto out;
879
880 if (smc->use_fallback)
881 goto out;
882 sock_hold(&smc->sk); /* sock put in passive closing */
883 if (flags & O_NONBLOCK) {
884 if (schedule_work(&smc->connect_work))
885 smc->connect_nonblock = 1;
886 rc = -EINPROGRESS;
887 } else {
888 rc = __smc_connect(smc);
889 if (rc < 0)
890 goto out;
891 else
892 rc = 0; /* success cases including fallback */
893 }
894
895out:
896 release_sock(sk);
897out_err:
898 return rc;
899}
900
901static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
902{
903 struct socket *new_clcsock = NULL;
904 struct sock *lsk = &lsmc->sk;
905 struct sock *new_sk;
906 int rc = -EINVAL;
907
908 release_sock(lsk);
909 new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
910 if (!new_sk) {
911 rc = -ENOMEM;
912 lsk->sk_err = ENOMEM;
913 *new_smc = NULL;
914 lock_sock(lsk);
915 goto out;
916 }
917 *new_smc = smc_sk(new_sk);
918
919 mutex_lock(&lsmc->clcsock_release_lock);
920 if (lsmc->clcsock)
921 rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
922 mutex_unlock(&lsmc->clcsock_release_lock);
923 lock_sock(lsk);
924 if (rc < 0)
925 lsk->sk_err = -rc;
926 if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
927 new_sk->sk_prot->unhash(new_sk);
928 if (new_clcsock)
929 sock_release(new_clcsock);
930 new_sk->sk_state = SMC_CLOSED;
931 smc_sock_set_flag(new_sk, SOCK_DEAD);
932 sock_put(new_sk); /* final */
933 *new_smc = NULL;
934 goto out;
935 }
936
937 (*new_smc)->clcsock = new_clcsock;
938out:
939 return rc;
940}
941
942/* add a just created sock to the accept queue of the listen sock as
943 * candidate for a following socket accept call from user space
944 */
945static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
946{
947 struct smc_sock *par = smc_sk(parent);
948
949 sock_hold(sk); /* sock_put in smc_accept_unlink () */
950 spin_lock(&par->accept_q_lock);
951 list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
952 spin_unlock(&par->accept_q_lock);
953 sk_acceptq_added(parent);
954}
955
956/* remove a socket from the accept queue of its parental listening socket */
957static void smc_accept_unlink(struct sock *sk)
958{
959 struct smc_sock *par = smc_sk(sk)->listen_smc;
960
961 spin_lock(&par->accept_q_lock);
962 list_del_init(&smc_sk(sk)->accept_q);
963 spin_unlock(&par->accept_q_lock);
964 sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
965 sock_put(sk); /* sock_hold in smc_accept_enqueue */
966}
967
968/* remove a sock from the accept queue to bind it to a new socket created
969 * for a socket accept call from user space
970 */
971struct sock *smc_accept_dequeue(struct sock *parent,
972 struct socket *new_sock)
973{
974 struct smc_sock *isk, *n;
975 struct sock *new_sk;
976
977 list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) {
978 new_sk = (struct sock *)isk;
979
980 smc_accept_unlink(new_sk);
981 if (new_sk->sk_state == SMC_CLOSED) {
982 new_sk->sk_prot->unhash(new_sk);
983 if (isk->clcsock) {
984 sock_release(isk->clcsock);
985 isk->clcsock = NULL;
986 }
987 sock_put(new_sk); /* final */
988 continue;
989 }
990 if (new_sock) {
991 sock_graft(new_sk, new_sock);
992 if (isk->use_fallback) {
993 smc_sk(new_sk)->clcsock->file = new_sock->file;
994 isk->clcsock->file->private_data = isk->clcsock;
995 }
996 }
997 return new_sk;
998 }
999 return NULL;
1000}
1001
1002/* clean up for a created but never accepted sock */
1003void smc_close_non_accepted(struct sock *sk)
1004{
1005 struct smc_sock *smc = smc_sk(sk);
1006
1007 lock_sock(sk);
1008 if (!sk->sk_lingertime)
1009 /* wait for peer closing */
1010 sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
1011 __smc_release(smc);
1012 release_sock(sk);
1013 sock_put(sk); /* final sock_put */
1014}
1015
1016static int smc_serv_conf_first_link(struct smc_sock *smc)
1017{
1018 struct net *net = sock_net(smc->clcsock->sk);
1019 struct smc_link_group *lgr = smc->conn.lgr;
1020 struct smc_link *link;
1021 int rest;
1022 int rc;
1023
1024 link = &lgr->lnk[SMC_SINGLE_LINK];
1025
1026 if (smc_reg_rmb(link, smc->conn.rmb_desc, false))
1027 return SMC_CLC_DECL_ERR_REGRMB;
1028
1029 /* send CONFIRM LINK request to client over the RoCE fabric */
1030 rc = smc_llc_send_confirm_link(link, SMC_LLC_REQ);
1031 if (rc < 0)
1032 return SMC_CLC_DECL_TIMEOUT_CL;
1033
1034 /* receive CONFIRM LINK response from client over the RoCE fabric */
1035 rest = wait_for_completion_interruptible_timeout(
1036 &link->llc_confirm_resp,
1037 SMC_LLC_WAIT_FIRST_TIME);
1038 if (rest <= 0) {
1039 struct smc_clc_msg_decline dclc;
1040
1041 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
1042 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
1043 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
1044 }
1045
1046 if (link->llc_confirm_resp_rc)
1047 return SMC_CLC_DECL_RMBE_EC;
1048
1049 /* send ADD LINK request to client over the RoCE fabric */
1050 rc = smc_llc_send_add_link(link,
1051 link->smcibdev->mac[link->ibport - 1],
1052 link->gid, SMC_LLC_REQ);
1053 if (rc < 0)
1054 return SMC_CLC_DECL_TIMEOUT_AL;
1055
1056 /* receive ADD LINK response from client over the RoCE fabric */
1057 rest = wait_for_completion_interruptible_timeout(&link->llc_add_resp,
1058 SMC_LLC_WAIT_TIME);
1059 if (rest <= 0) {
1060 struct smc_clc_msg_decline dclc;
1061
1062 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
1063 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
1064 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_AL : rc;
1065 }
1066
1067 smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);
1068
1069 return 0;
1070}
1071
1072/* listen worker: finish */
1073static void smc_listen_out(struct smc_sock *new_smc)
1074{
1075 struct smc_sock *lsmc = new_smc->listen_smc;
1076 struct sock *newsmcsk = &new_smc->sk;
1077
1078 if (lsmc->sk.sk_state == SMC_LISTEN) {
1079 lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
1080 smc_accept_enqueue(&lsmc->sk, newsmcsk);
1081 release_sock(&lsmc->sk);
1082 } else { /* no longer listening */
1083 smc_close_non_accepted(newsmcsk);
1084 }
1085
1086 /* Wake up accept */
1087 lsmc->sk.sk_data_ready(&lsmc->sk);
1088 sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
1089}
1090
1091/* listen worker: finish in state connected */
1092static void smc_listen_out_connected(struct smc_sock *new_smc)
1093{
1094 struct sock *newsmcsk = &new_smc->sk;
1095
1096 if (newsmcsk->sk_state == SMC_INIT)
1097 newsmcsk->sk_state = SMC_ACTIVE;
1098
1099 smc_listen_out(new_smc);
1100}
1101
1102/* listen worker: finish in error state */
1103static void smc_listen_out_err(struct smc_sock *new_smc)
1104{
1105 struct sock *newsmcsk = &new_smc->sk;
1106
1107 if (newsmcsk->sk_state == SMC_INIT)
1108 sock_put(&new_smc->sk); /* passive closing */
1109 newsmcsk->sk_state = SMC_CLOSED;
1110 smc_conn_free(&new_smc->conn);
1111
1112 smc_listen_out(new_smc);
1113}
1114
1115/* listen worker: decline and fall back if possible */
1116static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
1117 int local_contact)
1118{
1119 /* RDMA setup failed, switch back to TCP */
1120 if (local_contact == SMC_FIRST_CONTACT)
1121 smc_lgr_forget(new_smc->conn.lgr);
1122 if (reason_code < 0) { /* error, no fallback possible */
1123 smc_listen_out_err(new_smc);
1124 return;
1125 }
1126 smc_conn_free(&new_smc->conn);
1127 smc_switch_to_fallback(new_smc);
1128 new_smc->fallback_rsn = reason_code;
1129 if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
1130 if (smc_clc_send_decline(new_smc, reason_code) < 0) {
1131 smc_listen_out_err(new_smc);
1132 return;
1133 }
1134 }
1135 smc_listen_out_connected(new_smc);
1136}
1137
1138/* listen worker: check prefixes */
1139static int smc_listen_prfx_check(struct smc_sock *new_smc,
1140 struct smc_clc_msg_proposal *pclc)
1141{
1142 struct smc_clc_msg_proposal_prefix *pclc_prfx;
1143 struct socket *newclcsock = new_smc->clcsock;
1144
1145 pclc_prfx = smc_clc_proposal_get_prefix(pclc);
1146 if (smc_clc_prfx_match(newclcsock, pclc_prfx))
1147 return SMC_CLC_DECL_DIFFPREFIX;
1148
1149 return 0;
1150}
1151
1152/* listen worker: initialize connection and buffers */
1153static int smc_listen_rdma_init(struct smc_sock *new_smc,
1154 struct smc_init_info *ini)
1155{
1156 int rc;
1157
1158 /* allocate connection / link group */
1159 rc = smc_conn_create(new_smc, ini);
1160 if (rc)
1161 return rc;
1162
1163 /* create send buffer and rmb */
1164 if (smc_buf_create(new_smc, false))
1165 return SMC_CLC_DECL_MEM;
1166
1167 return 0;
1168}
1169
1170/* listen worker: initialize connection and buffers for SMC-D */
1171static int smc_listen_ism_init(struct smc_sock *new_smc,
1172 struct smc_clc_msg_proposal *pclc,
1173 struct smc_init_info *ini)
1174{
1175 struct smc_clc_msg_smcd *pclc_smcd;
1176 int rc;
1177
1178 pclc_smcd = smc_get_clc_msg_smcd(pclc);
1179 ini->ism_gid = pclc_smcd->gid;
1180 rc = smc_conn_create(new_smc, ini);
1181 if (rc)
1182 return rc;
1183
1184 /* Check if peer can be reached via ISM device */
1185 if (smc_ism_cantalk(new_smc->conn.lgr->peer_gid,
1186 new_smc->conn.lgr->vlan_id,
1187 new_smc->conn.lgr->smcd)) {
1188 if (ini->cln_first_contact == SMC_FIRST_CONTACT)
1189 smc_lgr_forget(new_smc->conn.lgr);
1190 smc_conn_free(&new_smc->conn);
1191 return SMC_CLC_DECL_SMCDNOTALK;
1192 }
1193
1194 /* Create send and receive buffers */
1195 if (smc_buf_create(new_smc, true)) {
1196 if (ini->cln_first_contact == SMC_FIRST_CONTACT)
1197 smc_lgr_forget(new_smc->conn.lgr);
1198 smc_conn_free(&new_smc->conn);
1199 return SMC_CLC_DECL_MEM;
1200 }
1201
1202 return 0;
1203}
1204
1205/* listen worker: register buffers */
1206static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact)
1207{
1208 struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
1209
1210 if (local_contact != SMC_FIRST_CONTACT) {
1211 if (smc_reg_rmb(link, new_smc->conn.rmb_desc, true))
1212 return SMC_CLC_DECL_ERR_REGRMB;
1213 }
1214 smc_rmb_sync_sg_for_device(&new_smc->conn);
1215
1216 return 0;
1217}
1218
1219/* listen worker: finish RDMA setup */
1220static int smc_listen_rdma_finish(struct smc_sock *new_smc,
1221 struct smc_clc_msg_accept_confirm *cclc,
1222 int local_contact)
1223{
1224 struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
1225 int reason_code = 0;
1226
1227 if (local_contact == SMC_FIRST_CONTACT)
1228 smc_link_save_peer_info(link, cclc);
1229
1230 if (smc_rmb_rtoken_handling(&new_smc->conn, cclc)) {
1231 reason_code = SMC_CLC_DECL_ERR_RTOK;
1232 goto decline;
1233 }
1234
1235 if (local_contact == SMC_FIRST_CONTACT) {
1236 if (smc_ib_ready_link(link)) {
1237 reason_code = SMC_CLC_DECL_ERR_RDYLNK;
1238 goto decline;
1239 }
1240 /* QP confirmation over RoCE fabric */
1241 reason_code = smc_serv_conf_first_link(new_smc);
1242 if (reason_code)
1243 goto decline;
1244 }
1245 return 0;
1246
1247decline:
1248 smc_listen_decline(new_smc, reason_code, local_contact);
1249 return reason_code;
1250}
1251
1252/* setup for RDMA connection of server */
1253static void smc_listen_work(struct work_struct *work)
1254{
1255 struct smc_sock *new_smc = container_of(work, struct smc_sock,
1256 smc_listen_work);
1257 struct socket *newclcsock = new_smc->clcsock;
1258 struct smc_clc_msg_accept_confirm cclc;
1259 struct smc_clc_msg_proposal *pclc;
1260 struct smc_init_info ini = {0};
1261 bool ism_supported = false;
1262 u8 buf[SMC_CLC_MAX_LEN];
1263 int rc = 0;
1264
1265 if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
1266 return smc_listen_out_err(new_smc);
1267
1268 if (new_smc->use_fallback) {
1269 smc_listen_out_connected(new_smc);
1270 return;
1271 }
1272
1273 /* check if peer is smc capable */
1274 if (!tcp_sk(newclcsock->sk)->syn_smc) {
1275 smc_switch_to_fallback(new_smc);
1276 new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC;
1277 smc_listen_out_connected(new_smc);
1278 return;
1279 }
1280
1281 /* do inband token exchange -
1282 * wait for and receive SMC Proposal CLC message
1283 */
1284 pclc = (struct smc_clc_msg_proposal *)&buf;
1285 rc = smc_clc_wait_msg(new_smc, pclc, SMC_CLC_MAX_LEN,
1286 SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
1287 if (rc)
1288 goto out_decl;
1289
1290 /* IPSec connections opt out of SMC-R optimizations */
1291 if (using_ipsec(new_smc)) {
1292 rc = SMC_CLC_DECL_IPSEC;
1293 goto out_decl;
1294 }
1295
1296 /* check for matching IP prefix and subnet length */
1297 rc = smc_listen_prfx_check(new_smc, pclc);
1298 if (rc)
1299 goto out_decl;
1300
1301 /* get vlan id from IP device */
1302 if (smc_vlan_by_tcpsk(new_smc->clcsock, &ini)) {
1303 rc = SMC_CLC_DECL_GETVLANERR;
1304 goto out_decl;
1305 }
1306
1307 mutex_lock(&smc_server_lgr_pending);
1308 smc_close_init(new_smc);
1309 smc_rx_init(new_smc);
1310 smc_tx_init(new_smc);
1311
1312 /* check if ISM is available */
1313 if (pclc->hdr.path == SMC_TYPE_D || pclc->hdr.path == SMC_TYPE_B) {
1314 ini.is_smcd = true; /* prepare ISM check */
1315 rc = smc_find_ism_device(new_smc, &ini);
1316 if (!rc)
1317 rc = smc_listen_ism_init(new_smc, pclc, &ini);
1318 if (!rc)
1319 ism_supported = true;
1320 else if (pclc->hdr.path == SMC_TYPE_D)
1321 goto out_unlock; /* skip RDMA and decline */
1322 }
1323
1324 /* check if RDMA is available */
1325 if (!ism_supported) { /* SMC_TYPE_R or SMC_TYPE_B */
1326 /* prepare RDMA check */
1327 ini.is_smcd = false;
1328 ini.ism_dev = NULL;
1329 ini.ib_lcl = &pclc->lcl;
1330 rc = smc_find_rdma_device(new_smc, &ini);
1331 if (rc) {
1332 /* no RDMA device found */
1333 if (pclc->hdr.path == SMC_TYPE_B)
1334 /* neither ISM nor RDMA device found */
1335 rc = SMC_CLC_DECL_NOSMCDEV;
1336 goto out_unlock;
1337 }
1338 rc = smc_listen_rdma_init(new_smc, &ini);
1339 if (rc)
1340 goto out_unlock;
1341 rc = smc_listen_rdma_reg(new_smc, ini.cln_first_contact);
1342 if (rc)
1343 goto out_unlock;
1344 }
1345
1346 /* send SMC Accept CLC message */
1347 rc = smc_clc_send_accept(new_smc, ini.cln_first_contact);
1348 if (rc)
1349 goto out_unlock;
1350
1351 /* SMC-D does not need this lock any more */
1352 if (ism_supported)
1353 mutex_unlock(&smc_server_lgr_pending);
1354
1355 /* receive SMC Confirm CLC message */
1356 rc = smc_clc_wait_msg(new_smc, &cclc, sizeof(cclc),
1357 SMC_CLC_CONFIRM, CLC_WAIT_TIME);
1358 if (rc) {
1359 if (!ism_supported)
1360 goto out_unlock;
1361 goto out_decl;
1362 }
1363
1364 /* finish worker */
1365 if (!ism_supported) {
1366 rc = smc_listen_rdma_finish(new_smc, &cclc,
1367 ini.cln_first_contact);
1368 mutex_unlock(&smc_server_lgr_pending);
1369 if (rc)
1370 return;
1371 }
1372 smc_conn_save_peer_info(new_smc, &cclc);
1373 smc_listen_out_connected(new_smc);
1374 return;
1375
1376out_unlock:
1377 mutex_unlock(&smc_server_lgr_pending);
1378out_decl:
1379 smc_listen_decline(new_smc, rc, ini.cln_first_contact);
1380}
1381
1382static void smc_tcp_listen_work(struct work_struct *work)
1383{
1384 struct smc_sock *lsmc = container_of(work, struct smc_sock,
1385 tcp_listen_work);
1386 struct sock *lsk = &lsmc->sk;
1387 struct smc_sock *new_smc;
1388 int rc = 0;
1389
1390 lock_sock(lsk);
1391 while (lsk->sk_state == SMC_LISTEN) {
1392 rc = smc_clcsock_accept(lsmc, &new_smc);
1393 if (rc)
1394 goto out;
1395 if (!new_smc)
1396 continue;
1397
1398 new_smc->listen_smc = lsmc;
1399 new_smc->use_fallback = lsmc->use_fallback;
1400 new_smc->fallback_rsn = lsmc->fallback_rsn;
1401 sock_hold(lsk); /* sock_put in smc_listen_work */
1402 INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
1403 smc_copy_sock_settings_to_smc(new_smc);
1404 new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
1405 new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
1406 sock_hold(&new_smc->sk); /* sock_put in passive closing */
1407 if (!schedule_work(&new_smc->smc_listen_work))
1408 sock_put(&new_smc->sk);
1409 }
1410
1411out:
1412 release_sock(lsk);
1413 sock_put(&lsmc->sk); /* sock_hold in smc_listen */
1414}
1415
1416static int smc_listen(struct socket *sock, int backlog)
1417{
1418 struct sock *sk = sock->sk;
1419 struct smc_sock *smc;
1420 int rc;
1421
1422 smc = smc_sk(sk);
1423 lock_sock(sk);
1424
1425 rc = -EINVAL;
1426 if ((sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) ||
1427 smc->connect_nonblock)
1428 goto out;
1429
1430 rc = 0;
1431 if (sk->sk_state == SMC_LISTEN) {
1432 sk->sk_max_ack_backlog = backlog;
1433 goto out;
1434 }
1435 /* some socket options are handled in core, so we could not apply
1436 * them to the clc socket -- copy smc socket options to clc socket
1437 */
1438 smc_copy_sock_settings_to_clc(smc);
1439 if (!smc->use_fallback)
1440 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1441
1442 rc = kernel_listen(smc->clcsock, backlog);
1443 if (rc)
1444 goto out;
1445 sk->sk_max_ack_backlog = backlog;
1446 sk->sk_ack_backlog = 0;
1447 sk->sk_state = SMC_LISTEN;
1448 sock_hold(sk); /* sock_hold in tcp_listen_worker */
1449 if (!schedule_work(&smc->tcp_listen_work))
1450 sock_put(sk);
1451
1452out:
1453 release_sock(sk);
1454 return rc;
1455}
1456
1457static int smc_accept(struct socket *sock, struct socket *new_sock,
1458 int flags, bool kern)
1459{
1460 struct sock *sk = sock->sk, *nsk;
1461 DECLARE_WAITQUEUE(wait, current);
1462 struct smc_sock *lsmc;
1463 long timeo;
1464 int rc = 0;
1465
1466 lsmc = smc_sk(sk);
1467 sock_hold(sk); /* sock_put below */
1468 lock_sock(sk);
1469
1470 if (lsmc->sk.sk_state != SMC_LISTEN) {
1471 rc = -EINVAL;
1472 release_sock(sk);
1473 goto out;
1474 }
1475
1476 /* Wait for an incoming connection */
1477 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1478 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1479 while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
1480 set_current_state(TASK_INTERRUPTIBLE);
1481 if (!timeo) {
1482 rc = -EAGAIN;
1483 break;
1484 }
1485 release_sock(sk);
1486 timeo = schedule_timeout(timeo);
1487 /* wakeup by sk_data_ready in smc_listen_work() */
1488 sched_annotate_sleep();
1489 lock_sock(sk);
1490 if (signal_pending(current)) {
1491 rc = sock_intr_errno(timeo);
1492 break;
1493 }
1494 }
1495 set_current_state(TASK_RUNNING);
1496 remove_wait_queue(sk_sleep(sk), &wait);
1497
1498 if (!rc)
1499 rc = sock_error(nsk);
1500 release_sock(sk);
1501 if (rc)
1502 goto out;
1503
1504 if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) {
1505 /* wait till data arrives on the socket */
1506 timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
1507 MSEC_PER_SEC);
1508 if (smc_sk(nsk)->use_fallback) {
1509 struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
1510
1511 lock_sock(clcsk);
1512 if (skb_queue_empty(&clcsk->sk_receive_queue))
1513 sk_wait_data(clcsk, &timeo, NULL);
1514 release_sock(clcsk);
1515 } else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
1516 lock_sock(nsk);
1517 smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
1518 release_sock(nsk);
1519 }
1520 }
1521
1522out:
1523 sock_put(sk); /* sock_hold above */
1524 return rc;
1525}
1526
1527static int smc_getname(struct socket *sock, struct sockaddr *addr,
1528 int peer)
1529{
1530 struct smc_sock *smc;
1531
1532 if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
1533 (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
1534 return -ENOTCONN;
1535
1536 smc = smc_sk(sock->sk);
1537
1538 return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
1539}
1540
1541static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1542{
1543 struct sock *sk = sock->sk;
1544 struct smc_sock *smc;
1545 int rc;
1546
1547 smc = smc_sk(sk);
1548 lock_sock(sk);
1549
1550 /* SMC does not support connect with fastopen */
1551 if (msg->msg_flags & MSG_FASTOPEN) {
1552 /* not connected yet, fallback */
1553 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
1554 smc_switch_to_fallback(smc);
1555 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
1556 } else {
1557 rc = -EINVAL;
1558 goto out;
1559 }
1560 } else if ((sk->sk_state != SMC_ACTIVE) &&
1561 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
1562 (sk->sk_state != SMC_INIT)) {
1563 rc = -EPIPE;
1564 goto out;
1565 }
1566
1567 if (smc->use_fallback)
1568 rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
1569 else
1570 rc = smc_tx_sendmsg(smc, msg, len);
1571out:
1572 release_sock(sk);
1573 return rc;
1574}
1575
1576static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1577 int flags)
1578{
1579 struct sock *sk = sock->sk;
1580 struct smc_sock *smc;
1581 int rc = -ENOTCONN;
1582
1583 smc = smc_sk(sk);
1584 lock_sock(sk);
1585 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
1586 /* socket was connected before, no more data to read */
1587 rc = 0;
1588 goto out;
1589 }
1590 if ((sk->sk_state == SMC_INIT) ||
1591 (sk->sk_state == SMC_LISTEN) ||
1592 (sk->sk_state == SMC_CLOSED))
1593 goto out;
1594
1595 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
1596 rc = 0;
1597 goto out;
1598 }
1599
1600 if (smc->use_fallback) {
1601 rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
1602 } else {
1603 msg->msg_namelen = 0;
1604 rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
1605 }
1606
1607out:
1608 release_sock(sk);
1609 return rc;
1610}
1611
1612static __poll_t smc_accept_poll(struct sock *parent)
1613{
1614 struct smc_sock *isk = smc_sk(parent);
1615 __poll_t mask = 0;
1616
1617 spin_lock(&isk->accept_q_lock);
1618 if (!list_empty(&isk->accept_q))
1619 mask = EPOLLIN | EPOLLRDNORM;
1620 spin_unlock(&isk->accept_q_lock);
1621
1622 return mask;
1623}
1624
1625static __poll_t smc_poll(struct file *file, struct socket *sock,
1626 poll_table *wait)
1627{
1628 struct sock *sk = sock->sk;
1629 struct smc_sock *smc;
1630 __poll_t mask = 0;
1631
1632 if (!sk)
1633 return EPOLLNVAL;
1634
1635 smc = smc_sk(sock->sk);
1636 if (smc->use_fallback) {
1637 /* delegate to CLC child sock */
1638 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
1639 sk->sk_err = smc->clcsock->sk->sk_err;
1640 } else {
1641 if (sk->sk_state != SMC_CLOSED)
1642 sock_poll_wait(file, sock, wait);
1643 if (sk->sk_err)
1644 mask |= EPOLLERR;
1645 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
1646 (sk->sk_state == SMC_CLOSED))
1647 mask |= EPOLLHUP;
1648 if (sk->sk_state == SMC_LISTEN) {
1649 /* woken up by sk_data_ready in smc_listen_work() */
1650 mask |= smc_accept_poll(sk);
1651 } else if (smc->use_fallback) { /* as result of connect_work()*/
1652 mask |= smc->clcsock->ops->poll(file, smc->clcsock,
1653 wait);
1654 sk->sk_err = smc->clcsock->sk->sk_err;
1655 } else {
1656 if ((sk->sk_state != SMC_INIT &&
1657 atomic_read(&smc->conn.sndbuf_space)) ||
1658 sk->sk_shutdown & SEND_SHUTDOWN) {
1659 mask |= EPOLLOUT | EPOLLWRNORM;
1660 } else {
1661 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1662 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1663
1664 if (sk->sk_state != SMC_INIT) {
1665 /* Race breaker the same way as tcp_poll(). */
1666 smp_mb__after_atomic();
1667 if (atomic_read(&smc->conn.sndbuf_space))
1668 mask |= EPOLLOUT | EPOLLWRNORM;
1669 }
1670 }
1671 if (atomic_read(&smc->conn.bytes_to_rcv))
1672 mask |= EPOLLIN | EPOLLRDNORM;
1673 if (sk->sk_shutdown & RCV_SHUTDOWN)
1674 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
1675 if (sk->sk_state == SMC_APPCLOSEWAIT1)
1676 mask |= EPOLLIN;
1677 if (smc->conn.urg_state == SMC_URG_VALID)
1678 mask |= EPOLLPRI;
1679 }
1680 }
1681
1682 return mask;
1683}
1684
1685static int smc_shutdown(struct socket *sock, int how)
1686{
1687 struct sock *sk = sock->sk;
1688 bool do_shutdown = true;
1689 struct smc_sock *smc;
1690 int rc = -EINVAL;
1691 int old_state;
1692 int rc1 = 0;
1693
1694 smc = smc_sk(sk);
1695
1696 if ((how < SHUT_RD) || (how > SHUT_RDWR))
1697 return rc;
1698
1699 lock_sock(sk);
1700
1701 rc = -ENOTCONN;
1702 if ((sk->sk_state != SMC_ACTIVE) &&
1703 (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
1704 (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
1705 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
1706 (sk->sk_state != SMC_APPCLOSEWAIT2) &&
1707 (sk->sk_state != SMC_APPFINCLOSEWAIT))
1708 goto out;
1709 if (smc->use_fallback) {
1710 rc = kernel_sock_shutdown(smc->clcsock, how);
1711 sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
1712 if (sk->sk_shutdown == SHUTDOWN_MASK) {
1713 sk->sk_state = SMC_CLOSED;
1714 sock_put(sk);
1715 }
1716 goto out;
1717 }
1718 switch (how) {
1719 case SHUT_RDWR: /* shutdown in both directions */
1720 old_state = sk->sk_state;
1721 rc = smc_close_active(smc);
1722 if (old_state == SMC_ACTIVE &&
1723 sk->sk_state == SMC_PEERCLOSEWAIT1)
1724 do_shutdown = false;
1725 break;
1726 case SHUT_WR:
1727 rc = smc_close_shutdown_write(smc);
1728 break;
1729 case SHUT_RD:
1730 rc = 0;
1731 /* nothing more to do because peer is not involved */
1732 break;
1733 }
1734 if (do_shutdown && smc->clcsock)
1735 rc1 = kernel_sock_shutdown(smc->clcsock, how);
1736 /* map sock_shutdown_cmd constants to sk_shutdown value range */
1737 sk->sk_shutdown |= how + 1;
1738
1739out:
1740 release_sock(sk);
1741 return rc ? rc : rc1;
1742}
1743
1744static int smc_setsockopt(struct socket *sock, int level, int optname,
1745 char __user *optval, unsigned int optlen)
1746{
1747 struct sock *sk = sock->sk;
1748 struct smc_sock *smc;
1749 int val, rc;
1750
1751 if (level == SOL_TCP && optname == TCP_ULP)
1752 return -EOPNOTSUPP;
1753
1754 smc = smc_sk(sk);
1755
1756 /* generic setsockopts reaching us here always apply to the
1757 * CLC socket
1758 */
1759 rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
1760 optval, optlen);
1761 if (smc->clcsock->sk->sk_err) {
1762 sk->sk_err = smc->clcsock->sk->sk_err;
1763 sk->sk_error_report(sk);
1764 }
1765
1766 if (optlen < sizeof(int))
1767 return -EINVAL;
1768 if (get_user(val, (int __user *)optval))
1769 return -EFAULT;
1770
1771 lock_sock(sk);
1772 if (rc || smc->use_fallback)
1773 goto out;
1774 switch (optname) {
1775 case TCP_FASTOPEN:
1776 case TCP_FASTOPEN_CONNECT:
1777 case TCP_FASTOPEN_KEY:
1778 case TCP_FASTOPEN_NO_COOKIE:
1779 /* option not supported by SMC */
1780 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
1781 smc_switch_to_fallback(smc);
1782 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
1783 } else {
1784 rc = -EINVAL;
1785 }
1786 break;
1787 case TCP_NODELAY:
1788 if (sk->sk_state != SMC_INIT &&
1789 sk->sk_state != SMC_LISTEN &&
1790 sk->sk_state != SMC_CLOSED) {
1791 if (val)
1792 mod_delayed_work(system_wq, &smc->conn.tx_work,
1793 0);
1794 }
1795 break;
1796 case TCP_CORK:
1797 if (sk->sk_state != SMC_INIT &&
1798 sk->sk_state != SMC_LISTEN &&
1799 sk->sk_state != SMC_CLOSED) {
1800 if (!val)
1801 mod_delayed_work(system_wq, &smc->conn.tx_work,
1802 0);
1803 }
1804 break;
1805 case TCP_DEFER_ACCEPT:
1806 smc->sockopt_defer_accept = val;
1807 break;
1808 default:
1809 break;
1810 }
1811out:
1812 release_sock(sk);
1813
1814 return rc;
1815}
1816
1817static int smc_getsockopt(struct socket *sock, int level, int optname,
1818 char __user *optval, int __user *optlen)
1819{
1820 struct smc_sock *smc;
1821
1822 smc = smc_sk(sock->sk);
1823 /* socket options apply to the CLC socket */
1824 return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
1825 optval, optlen);
1826}
1827
1828static int smc_ioctl(struct socket *sock, unsigned int cmd,
1829 unsigned long arg)
1830{
1831 union smc_host_cursor cons, urg;
1832 struct smc_connection *conn;
1833 struct smc_sock *smc;
1834 int answ;
1835
1836 smc = smc_sk(sock->sk);
1837 conn = &smc->conn;
1838 lock_sock(&smc->sk);
1839 if (smc->use_fallback) {
1840 if (!smc->clcsock) {
1841 release_sock(&smc->sk);
1842 return -EBADF;
1843 }
1844 answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
1845 release_sock(&smc->sk);
1846 return answ;
1847 }
1848 switch (cmd) {
1849 case SIOCINQ: /* same as FIONREAD */
1850 if (smc->sk.sk_state == SMC_LISTEN) {
1851 release_sock(&smc->sk);
1852 return -EINVAL;
1853 }
1854 if (smc->sk.sk_state == SMC_INIT ||
1855 smc->sk.sk_state == SMC_CLOSED)
1856 answ = 0;
1857 else
1858 answ = atomic_read(&smc->conn.bytes_to_rcv);
1859 break;
1860 case SIOCOUTQ:
1861 /* output queue size (not send + not acked) */
1862 if (smc->sk.sk_state == SMC_LISTEN) {
1863 release_sock(&smc->sk);
1864 return -EINVAL;
1865 }
1866 if (smc->sk.sk_state == SMC_INIT ||
1867 smc->sk.sk_state == SMC_CLOSED)
1868 answ = 0;
1869 else
1870 answ = smc->conn.sndbuf_desc->len -
1871 atomic_read(&smc->conn.sndbuf_space);
1872 break;
1873 case SIOCOUTQNSD:
1874 /* output queue size (not send only) */
1875 if (smc->sk.sk_state == SMC_LISTEN) {
1876 release_sock(&smc->sk);
1877 return -EINVAL;
1878 }
1879 if (smc->sk.sk_state == SMC_INIT ||
1880 smc->sk.sk_state == SMC_CLOSED)
1881 answ = 0;
1882 else
1883 answ = smc_tx_prepared_sends(&smc->conn);
1884 break;
1885 case SIOCATMARK:
1886 if (smc->sk.sk_state == SMC_LISTEN) {
1887 release_sock(&smc->sk);
1888 return -EINVAL;
1889 }
1890 if (smc->sk.sk_state == SMC_INIT ||
1891 smc->sk.sk_state == SMC_CLOSED) {
1892 answ = 0;
1893 } else {
1894 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
1895 smc_curs_copy(&urg, &conn->urg_curs, conn);
1896 answ = smc_curs_diff(conn->rmb_desc->len,
1897 &cons, &urg) == 1;
1898 }
1899 break;
1900 default:
1901 release_sock(&smc->sk);
1902 return -ENOIOCTLCMD;
1903 }
1904 release_sock(&smc->sk);
1905
1906 return put_user(answ, (int __user *)arg);
1907}
1908
1909static ssize_t smc_sendpage(struct socket *sock, struct page *page,
1910 int offset, size_t size, int flags)
1911{
1912 struct sock *sk = sock->sk;
1913 struct smc_sock *smc;
1914 int rc = -EPIPE;
1915
1916 smc = smc_sk(sk);
1917 lock_sock(sk);
1918 if (sk->sk_state != SMC_ACTIVE) {
1919 release_sock(sk);
1920 goto out;
1921 }
1922 release_sock(sk);
1923 if (smc->use_fallback)
1924 rc = kernel_sendpage(smc->clcsock, page, offset,
1925 size, flags);
1926 else
1927 rc = sock_no_sendpage(sock, page, offset, size, flags);
1928
1929out:
1930 return rc;
1931}
1932
1933/* Map the affected portions of the rmbe into an spd, note the number of bytes
1934 * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
1935 * updates till whenever a respective page has been fully processed.
1936 * Note that subsequent recv() calls have to wait till all splice() processing
1937 * completed.
1938 */
1939static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
1940 struct pipe_inode_info *pipe, size_t len,
1941 unsigned int flags)
1942{
1943 struct sock *sk = sock->sk;
1944 struct smc_sock *smc;
1945 int rc = -ENOTCONN;
1946
1947 smc = smc_sk(sk);
1948 lock_sock(sk);
1949 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
1950 /* socket was connected before, no more data to read */
1951 rc = 0;
1952 goto out;
1953 }
1954 if (sk->sk_state == SMC_INIT ||
1955 sk->sk_state == SMC_LISTEN ||
1956 sk->sk_state == SMC_CLOSED)
1957 goto out;
1958
1959 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
1960 rc = 0;
1961 goto out;
1962 }
1963
1964 if (smc->use_fallback) {
1965 rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
1966 pipe, len, flags);
1967 } else {
1968 if (*ppos) {
1969 rc = -ESPIPE;
1970 goto out;
1971 }
1972 if (flags & SPLICE_F_NONBLOCK)
1973 flags = MSG_DONTWAIT;
1974 else
1975 flags = 0;
1976 rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
1977 }
1978out:
1979 release_sock(sk);
1980
1981 return rc;
1982}
1983
1984/* must look like tcp */
1985static const struct proto_ops smc_sock_ops = {
1986 .family = PF_SMC,
1987 .owner = THIS_MODULE,
1988 .release = smc_release,
1989 .bind = smc_bind,
1990 .connect = smc_connect,
1991 .socketpair = sock_no_socketpair,
1992 .accept = smc_accept,
1993 .getname = smc_getname,
1994 .poll = smc_poll,
1995 .ioctl = smc_ioctl,
1996 .listen = smc_listen,
1997 .shutdown = smc_shutdown,
1998 .setsockopt = smc_setsockopt,
1999 .getsockopt = smc_getsockopt,
2000 .sendmsg = smc_sendmsg,
2001 .recvmsg = smc_recvmsg,
2002 .mmap = sock_no_mmap,
2003 .sendpage = smc_sendpage,
2004 .splice_read = smc_splice_read,
2005};
2006
2007static int smc_create(struct net *net, struct socket *sock, int protocol,
2008 int kern)
2009{
2010 int family = (protocol == SMCPROTO_SMC6) ? PF_INET6 : PF_INET;
2011 struct smc_sock *smc;
2012 struct sock *sk;
2013 int rc;
2014
2015 rc = -ESOCKTNOSUPPORT;
2016 if (sock->type != SOCK_STREAM)
2017 goto out;
2018
2019 rc = -EPROTONOSUPPORT;
2020 if (protocol != SMCPROTO_SMC && protocol != SMCPROTO_SMC6)
2021 goto out;
2022
2023 rc = -ENOBUFS;
2024 sock->ops = &smc_sock_ops;
2025 sk = smc_sock_alloc(net, sock, protocol);
2026 if (!sk)
2027 goto out;
2028
2029 /* create internal TCP socket for CLC handshake and fallback */
2030 smc = smc_sk(sk);
2031 smc->use_fallback = false; /* assume rdma capability first */
2032 smc->fallback_rsn = 0;
2033 rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
2034 &smc->clcsock);
2035 if (rc) {
2036 sk_common_release(sk);
2037 goto out;
2038 }
2039 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
2040 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
2041
2042out:
2043 return rc;
2044}
2045
2046static const struct net_proto_family smc_sock_family_ops = {
2047 .family = PF_SMC,
2048 .owner = THIS_MODULE,
2049 .create = smc_create,
2050};
2051
2052unsigned int smc_net_id;
2053
2054static __net_init int smc_net_init(struct net *net)
2055{
2056 return smc_pnet_net_init(net);
2057}
2058
2059static void __net_exit smc_net_exit(struct net *net)
2060{
2061 smc_pnet_net_exit(net);
2062}
2063
2064static struct pernet_operations smc_net_ops = {
2065 .init = smc_net_init,
2066 .exit = smc_net_exit,
2067 .id = &smc_net_id,
2068 .size = sizeof(struct smc_net),
2069};
2070
2071static int __init smc_init(void)
2072{
2073 int rc;
2074
2075 rc = register_pernet_subsys(&smc_net_ops);
2076 if (rc)
2077 return rc;
2078
2079 rc = smc_pnet_init();
2080 if (rc)
2081 goto out_pernet_subsys;
2082
2083 rc = smc_llc_init();
2084 if (rc) {
2085 pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
2086 goto out_pnet;
2087 }
2088
2089 rc = smc_cdc_init();
2090 if (rc) {
2091 pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
2092 goto out_pnet;
2093 }
2094
2095 rc = proto_register(&smc_proto, 1);
2096 if (rc) {
2097 pr_err("%s: proto_register(v4) fails with %d\n", __func__, rc);
2098 goto out_pnet;
2099 }
2100
2101 rc = proto_register(&smc_proto6, 1);
2102 if (rc) {
2103 pr_err("%s: proto_register(v6) fails with %d\n", __func__, rc);
2104 goto out_proto;
2105 }
2106
2107 rc = sock_register(&smc_sock_family_ops);
2108 if (rc) {
2109 pr_err("%s: sock_register fails with %d\n", __func__, rc);
2110 goto out_proto6;
2111 }
2112 INIT_HLIST_HEAD(&smc_v4_hashinfo.ht);
2113 INIT_HLIST_HEAD(&smc_v6_hashinfo.ht);
2114
2115 rc = smc_ib_register_client();
2116 if (rc) {
2117 pr_err("%s: ib_register fails with %d\n", __func__, rc);
2118 goto out_sock;
2119 }
2120
2121 static_branch_enable(&tcp_have_smc);
2122 return 0;
2123
2124out_sock:
2125 sock_unregister(PF_SMC);
2126out_proto6:
2127 proto_unregister(&smc_proto6);
2128out_proto:
2129 proto_unregister(&smc_proto);
2130out_pnet:
2131 smc_pnet_exit();
2132out_pernet_subsys:
2133 unregister_pernet_subsys(&smc_net_ops);
2134
2135 return rc;
2136}
2137
2138static void __exit smc_exit(void)
2139{
2140 smc_core_exit();
2141 static_branch_disable(&tcp_have_smc);
2142 smc_ib_unregister_client();
2143 sock_unregister(PF_SMC);
2144 proto_unregister(&smc_proto6);
2145 proto_unregister(&smc_proto);
2146 smc_pnet_exit();
2147 unregister_pernet_subsys(&smc_net_ops);
2148}
2149
2150module_init(smc_init);
2151module_exit(smc_exit);
2152
2153MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
2154MODULE_DESCRIPTION("smc socket address family");
2155MODULE_LICENSE("GPL");
2156MODULE_ALIAS_NETPROTO(PF_SMC);