blob: c752ad4870678dcdd7c04e42692ef7c586fc014b [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/* Local endpoint object management
2 *
3 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/module.h>
15#include <linux/net.h>
16#include <linux/skbuff.h>
17#include <linux/slab.h>
18#include <linux/udp.h>
19#include <linux/ip.h>
20#include <linux/hashtable.h>
21#include <net/sock.h>
22#include <net/udp.h>
23#include <net/af_rxrpc.h>
24#include "ar-internal.h"
25
26static void rxrpc_local_processor(struct work_struct *);
27static void rxrpc_local_rcu(struct rcu_head *);
28
29/*
30 * Compare a local to an address. Return -ve, 0 or +ve to indicate less than,
31 * same or greater than.
32 *
33 * We explicitly don't compare the RxRPC service ID as we want to reject
34 * conflicting uses by differing services. Further, we don't want to share
35 * addresses with different options (IPv6), so we don't compare those bits
36 * either.
37 */
38static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
39 const struct sockaddr_rxrpc *srx)
40{
41 long diff;
42
43 diff = ((local->srx.transport_type - srx->transport_type) ?:
44 (local->srx.transport_len - srx->transport_len) ?:
45 (local->srx.transport.family - srx->transport.family));
46 if (diff != 0)
47 return diff;
48
49 switch (srx->transport.family) {
50 case AF_INET:
51 /* If the choice of UDP port is left up to the transport, then
52 * the endpoint record doesn't match.
53 */
54 return ((u16 __force)local->srx.transport.sin.sin_port -
55 (u16 __force)srx->transport.sin.sin_port) ?:
56 memcmp(&local->srx.transport.sin.sin_addr,
57 &srx->transport.sin.sin_addr,
58 sizeof(struct in_addr));
59#ifdef CONFIG_AF_RXRPC_IPV6
60 case AF_INET6:
61 /* If the choice of UDP6 port is left up to the transport, then
62 * the endpoint record doesn't match.
63 */
64 return ((u16 __force)local->srx.transport.sin6.sin6_port -
65 (u16 __force)srx->transport.sin6.sin6_port) ?:
66 memcmp(&local->srx.transport.sin6.sin6_addr,
67 &srx->transport.sin6.sin6_addr,
68 sizeof(struct in6_addr));
69#endif
70 default:
71 BUG();
72 }
73}
74
75/*
76 * Allocate a new local endpoint.
77 */
78static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
79 const struct sockaddr_rxrpc *srx)
80{
81 struct rxrpc_local *local;
82
83 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
84 if (local) {
85 atomic_set(&local->usage, 1);
86 atomic_set(&local->active_users, 1);
87 local->rxnet = rxnet;
88 INIT_LIST_HEAD(&local->link);
89 INIT_WORK(&local->processor, rxrpc_local_processor);
90 init_rwsem(&local->defrag_sem);
91 skb_queue_head_init(&local->reject_queue);
92 skb_queue_head_init(&local->event_queue);
93 local->client_conns = RB_ROOT;
94 spin_lock_init(&local->client_conns_lock);
95 spin_lock_init(&local->lock);
96 rwlock_init(&local->services_lock);
97 local->debug_id = atomic_inc_return(&rxrpc_debug_id);
98 memcpy(&local->srx, srx, sizeof(*srx));
99 local->srx.srx_service = 0;
100 trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL);
101 }
102
103 _leave(" = %p", local);
104 return local;
105}
106
107/*
108 * create the local socket
109 * - must be called with rxrpc_local_mutex locked
110 */
111static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
112{
113 struct sock *usk;
114 int ret, opt;
115
116 _enter("%p{%d,%d}",
117 local, local->srx.transport_type, local->srx.transport.family);
118
119 /* create a socket to represent the local endpoint */
120 ret = sock_create_kern(net, local->srx.transport.family,
121 local->srx.transport_type, 0, &local->socket);
122 if (ret < 0) {
123 _leave(" = %d [socket]", ret);
124 return ret;
125 }
126
127 /* set the socket up */
128 usk = local->socket->sk;
129 inet_sk(usk)->mc_loop = 0;
130
131 /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
132 inet_inc_convert_csum(usk);
133
134 rcu_assign_sk_user_data(usk, local);
135
136 udp_sk(usk)->encap_type = UDP_ENCAP_RXRPC;
137 udp_sk(usk)->encap_rcv = rxrpc_input_packet;
138 udp_sk(usk)->encap_destroy = NULL;
139 udp_sk(usk)->gro_receive = NULL;
140 udp_sk(usk)->gro_complete = NULL;
141
142 udp_encap_enable();
143#if IS_ENABLED(CONFIG_AF_RXRPC_IPV6)
144 if (local->srx.transport.family == AF_INET6)
145 udpv6_encap_enable();
146#endif
147 usk->sk_error_report = rxrpc_error_report;
148
149 /* if a local address was supplied then bind it */
150 if (local->srx.transport_len > sizeof(sa_family_t)) {
151 _debug("bind");
152 ret = kernel_bind(local->socket,
153 (struct sockaddr *)&local->srx.transport,
154 local->srx.transport_len);
155 if (ret < 0) {
156 _debug("bind failed %d", ret);
157 goto error;
158 }
159 }
160
161 switch (local->srx.transport.family) {
162 case AF_INET6:
163 /* we want to receive ICMPv6 errors */
164 opt = 1;
165 ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
166 (char *) &opt, sizeof(opt));
167 if (ret < 0) {
168 _debug("setsockopt failed");
169 goto error;
170 }
171
172 /* we want to set the don't fragment bit */
173 opt = IPV6_PMTUDISC_DO;
174 ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
175 (char *) &opt, sizeof(opt));
176 if (ret < 0) {
177 _debug("setsockopt failed");
178 goto error;
179 }
180
181 /* Fall through and set IPv4 options too otherwise we don't get
182 * errors from IPv4 packets sent through the IPv6 socket.
183 */
184
185 case AF_INET:
186 /* we want to receive ICMP errors */
187 opt = 1;
188 ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
189 (char *) &opt, sizeof(opt));
190 if (ret < 0) {
191 _debug("setsockopt failed");
192 goto error;
193 }
194
195 /* we want to set the don't fragment bit */
196 opt = IP_PMTUDISC_DO;
197 ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
198 (char *) &opt, sizeof(opt));
199 if (ret < 0) {
200 _debug("setsockopt failed");
201 goto error;
202 }
203
204 /* We want receive timestamps. */
205 opt = 1;
206 ret = kernel_setsockopt(local->socket, SOL_SOCKET, SO_TIMESTAMPNS,
207 (char *)&opt, sizeof(opt));
208 if (ret < 0) {
209 _debug("setsockopt failed");
210 goto error;
211 }
212 break;
213
214 default:
215 BUG();
216 }
217
218 _leave(" = 0");
219 return 0;
220
221error:
222 kernel_sock_shutdown(local->socket, SHUT_RDWR);
223 local->socket->sk->sk_user_data = NULL;
224 sock_release(local->socket);
225 local->socket = NULL;
226
227 _leave(" = %d", ret);
228 return ret;
229}
230
231/*
232 * Look up or create a new local endpoint using the specified local address.
233 */
234struct rxrpc_local *rxrpc_lookup_local(struct net *net,
235 const struct sockaddr_rxrpc *srx)
236{
237 struct rxrpc_local *local;
238 struct rxrpc_net *rxnet = rxrpc_net(net);
239 struct list_head *cursor;
240 const char *age;
241 long diff;
242 int ret;
243
244 _enter("{%d,%d,%pISp}",
245 srx->transport_type, srx->transport.family, &srx->transport);
246
247 mutex_lock(&rxnet->local_mutex);
248
249 for (cursor = rxnet->local_endpoints.next;
250 cursor != &rxnet->local_endpoints;
251 cursor = cursor->next) {
252 local = list_entry(cursor, struct rxrpc_local, link);
253
254 diff = rxrpc_local_cmp_key(local, srx);
255 if (diff < 0)
256 continue;
257 if (diff > 0)
258 break;
259
260 /* Services aren't allowed to share transport sockets, so
261 * reject that here. It is possible that the object is dying -
262 * but it may also still have the local transport address that
263 * we want bound.
264 */
265 if (srx->srx_service) {
266 local = NULL;
267 goto addr_in_use;
268 }
269
270 /* Found a match. We replace a dying object. Attempting to
271 * bind the transport socket may still fail if we're attempting
272 * to use a local address that the dying object is still using.
273 */
274 if (!rxrpc_use_local(local))
275 break;
276
277 age = "old";
278 goto found;
279 }
280
281 local = rxrpc_alloc_local(rxnet, srx);
282 if (!local)
283 goto nomem;
284
285 ret = rxrpc_open_socket(local, net);
286 if (ret < 0)
287 goto sock_error;
288
289 if (cursor != &rxnet->local_endpoints)
290 list_replace_init(cursor, &local->link);
291 else
292 list_add_tail(&local->link, cursor);
293 age = "new";
294
295found:
296 mutex_unlock(&rxnet->local_mutex);
297
298 _net("LOCAL %s %d {%pISp}",
299 age, local->debug_id, &local->srx.transport);
300
301 _leave(" = %p", local);
302 return local;
303
304nomem:
305 ret = -ENOMEM;
306sock_error:
307 mutex_unlock(&rxnet->local_mutex);
308 if (local)
309 call_rcu(&local->rcu, rxrpc_local_rcu);
310 _leave(" = %d", ret);
311 return ERR_PTR(ret);
312
313addr_in_use:
314 mutex_unlock(&rxnet->local_mutex);
315 _leave(" = -EADDRINUSE");
316 return ERR_PTR(-EADDRINUSE);
317}
318
319/*
320 * Get a ref on a local endpoint.
321 */
322struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
323{
324 const void *here = __builtin_return_address(0);
325 int n;
326
327 n = atomic_inc_return(&local->usage);
328 trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here);
329 return local;
330}
331
332/*
333 * Get a ref on a local endpoint unless its usage has already reached 0.
334 */
335struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
336{
337 const void *here = __builtin_return_address(0);
338
339 if (local) {
340 int n = atomic_fetch_add_unless(&local->usage, 1, 0);
341 if (n > 0)
342 trace_rxrpc_local(local->debug_id, rxrpc_local_got,
343 n + 1, here);
344 else
345 local = NULL;
346 }
347 return local;
348}
349
350/*
351 * Queue a local endpoint and pass the caller's reference to the work item.
352 */
353void rxrpc_queue_local(struct rxrpc_local *local)
354{
355 const void *here = __builtin_return_address(0);
356 unsigned int debug_id = local->debug_id;
357 int n = atomic_read(&local->usage);
358
359 if (rxrpc_queue_work(&local->processor))
360 trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here);
361 else
362 rxrpc_put_local(local);
363}
364
365/*
366 * Drop a ref on a local endpoint.
367 */
368void rxrpc_put_local(struct rxrpc_local *local)
369{
370 const void *here = __builtin_return_address(0);
371 int n;
372
373 if (local) {
374 n = atomic_dec_return(&local->usage);
375 trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here);
376
377 if (n == 0)
378 call_rcu(&local->rcu, rxrpc_local_rcu);
379 }
380}
381
382/*
383 * Start using a local endpoint.
384 */
385struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
386{
387 unsigned int au;
388
389 local = rxrpc_get_local_maybe(local);
390 if (!local)
391 return NULL;
392
393 au = atomic_fetch_add_unless(&local->active_users, 1, 0);
394 if (au == 0) {
395 rxrpc_put_local(local);
396 return NULL;
397 }
398
399 return local;
400}
401
402/*
403 * Cease using a local endpoint. Once the number of active users reaches 0, we
404 * start the closure of the transport in the work processor.
405 */
406void rxrpc_unuse_local(struct rxrpc_local *local)
407{
408 unsigned int au;
409
410 if (local) {
411 au = atomic_dec_return(&local->active_users);
412 if (au == 0)
413 rxrpc_queue_local(local);
414 else
415 rxrpc_put_local(local);
416 }
417}
418
419/*
420 * Destroy a local endpoint's socket and then hand the record to RCU to dispose
421 * of.
422 *
423 * Closing the socket cannot be done from bottom half context or RCU callback
424 * context because it might sleep.
425 */
426static void rxrpc_local_destroyer(struct rxrpc_local *local)
427{
428 struct socket *socket = local->socket;
429 struct rxrpc_net *rxnet = local->rxnet;
430
431 _enter("%d", local->debug_id);
432
433 mutex_lock(&rxnet->local_mutex);
434 list_del_init(&local->link);
435 mutex_unlock(&rxnet->local_mutex);
436
437 ASSERT(RB_EMPTY_ROOT(&local->client_conns));
438 ASSERT(!local->service);
439
440 if (socket) {
441 local->socket = NULL;
442 kernel_sock_shutdown(socket, SHUT_RDWR);
443 socket->sk->sk_user_data = NULL;
444 sock_release(socket);
445 }
446
447 /* At this point, there should be no more packets coming in to the
448 * local endpoint.
449 */
450 rxrpc_purge_queue(&local->reject_queue);
451 rxrpc_purge_queue(&local->event_queue);
452}
453
454/*
455 * Process events on an endpoint. The work item carries a ref which
456 * we must release.
457 */
458static void rxrpc_local_processor(struct work_struct *work)
459{
460 struct rxrpc_local *local =
461 container_of(work, struct rxrpc_local, processor);
462 bool again;
463
464 trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
465 atomic_read(&local->usage), NULL);
466
467 do {
468 again = false;
469 if (atomic_read(&local->active_users) == 0) {
470 rxrpc_local_destroyer(local);
471 break;
472 }
473
474 if (!skb_queue_empty(&local->reject_queue)) {
475 rxrpc_reject_packets(local);
476 again = true;
477 }
478
479 if (!skb_queue_empty(&local->event_queue)) {
480 rxrpc_process_local_events(local);
481 again = true;
482 }
483 } while (again);
484
485 rxrpc_put_local(local);
486}
487
488/*
489 * Destroy a local endpoint after the RCU grace period expires.
490 */
491static void rxrpc_local_rcu(struct rcu_head *rcu)
492{
493 struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu);
494
495 _enter("%d", local->debug_id);
496
497 ASSERT(!work_pending(&local->processor));
498
499 _net("DESTROY LOCAL %d", local->debug_id);
500 kfree(local);
501 _leave("");
502}
503
504/*
505 * Verify the local endpoint list is empty by this point.
506 */
507void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet)
508{
509 struct rxrpc_local *local;
510
511 _enter("");
512
513 flush_workqueue(rxrpc_workqueue);
514
515 if (!list_empty(&rxnet->local_endpoints)) {
516 mutex_lock(&rxnet->local_mutex);
517 list_for_each_entry(local, &rxnet->local_endpoints, link) {
518 pr_err("AF_RXRPC: Leaked local %p {%d}\n",
519 local, atomic_read(&local->usage));
520 }
521 mutex_unlock(&rxnet->local_mutex);
522 BUG();
523 }
524}