blob: 2656840cf203c1495175ccc81c45fc276329b1e8 [file] [log] [blame]
lh9ed821d2023-04-07 01:36:19 -07001#include <linux/types.h>
2#include <linux/spinlock.h>
3#include <linux/sock_diag.h>
4#include <linux/unix_diag.h>
5#include <linux/skbuff.h>
6#include <linux/module.h>
7#include <net/netlink.h>
8#include <net/af_unix.h>
9#include <net/tcp_states.h>
10
11#define UNIX_DIAG_PUT(skb, attrtype, attrlen) \
12 RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
13
14static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
15{
16 struct unix_address *addr = unix_sk(sk)->addr;
17 char *s;
18
19 if (addr) {
20 s = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short));
21 memcpy(s, addr->name->sun_path, addr->len - sizeof(short));
22 }
23
24 return 0;
25
26rtattr_failure:
27 return -EMSGSIZE;
28}
29
30static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
31{
32 struct dentry *dentry = unix_sk(sk)->path.dentry;
33 struct unix_diag_vfs *uv;
34
35 if (dentry) {
36 uv = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_VFS, sizeof(*uv));
37 uv->udiag_vfs_ino = dentry->d_inode->i_ino;
38 uv->udiag_vfs_dev = dentry->d_sb->s_dev;
39 }
40
41 return 0;
42
43rtattr_failure:
44 return -EMSGSIZE;
45}
46
47static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
48{
49 struct sock *peer;
50 int ino;
51
52 peer = unix_peer_get(sk);
53 if (peer) {
54 unix_state_lock(peer);
55 ino = sock_i_ino(peer);
56 unix_state_unlock(peer);
57 sock_put(peer);
58
59 RTA_PUT_U32(nlskb, UNIX_DIAG_PEER, ino);
60 }
61
62 return 0;
63rtattr_failure:
64 return -EMSGSIZE;
65}
66
67static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
68{
69 struct sk_buff *skb;
70 u32 *buf;
71 int i;
72
73 if (sk->sk_state == TCP_LISTEN) {
74 spin_lock(&sk->sk_receive_queue.lock);
75 buf = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_ICONS,
76 sk->sk_receive_queue.qlen * sizeof(u32));
77 i = 0;
78 skb_queue_walk(&sk->sk_receive_queue, skb) {
79 struct sock *req, *peer;
80
81 req = skb->sk;
82 /*
83 * The state lock is outer for the same sk's
84 * queue lock. With the other's queue locked it's
85 * OK to lock the state.
86 */
87 unix_state_lock_nested(req);
88 peer = unix_sk(req)->peer;
89 buf[i++] = (peer ? sock_i_ino(peer) : 0);
90 unix_state_unlock(req);
91 }
92 spin_unlock(&sk->sk_receive_queue.lock);
93 }
94
95 return 0;
96
97rtattr_failure:
98 spin_unlock(&sk->sk_receive_queue.lock);
99 return -EMSGSIZE;
100}
101
102static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
103{
104 struct unix_diag_rqlen *rql;
105
106 rql = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_RQLEN, sizeof(*rql));
107
108 if (sk->sk_state == TCP_LISTEN) {
109 rql->udiag_rqueue = sk->sk_receive_queue.qlen;
110 rql->udiag_wqueue = sk->sk_max_ack_backlog;
111 } else {
112 rql->udiag_rqueue = (__u32)unix_inq_len(sk);
113 rql->udiag_wqueue = (__u32)unix_outq_len(sk);
114 }
115
116 return 0;
117
118rtattr_failure:
119 return -EMSGSIZE;
120}
121
122static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
123 u32 pid, u32 seq, u32 flags, int sk_ino)
124{
125 unsigned char *b = skb_tail_pointer(skb);
126 struct nlmsghdr *nlh;
127 struct unix_diag_msg *rep;
128
129 nlh = NLMSG_PUT(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep));
130 nlh->nlmsg_flags = flags;
131
132 rep = NLMSG_DATA(nlh);
133
134 rep->udiag_family = AF_UNIX;
135 rep->udiag_type = sk->sk_type;
136 rep->udiag_state = sk->sk_state;
137 rep->pad = 0;
138 rep->udiag_ino = sk_ino;
139 sock_diag_save_cookie(sk, rep->udiag_cookie);
140
141 if ((req->udiag_show & UDIAG_SHOW_NAME) &&
142 sk_diag_dump_name(sk, skb))
143 goto nlmsg_failure;
144
145 if ((req->udiag_show & UDIAG_SHOW_VFS) &&
146 sk_diag_dump_vfs(sk, skb))
147 goto nlmsg_failure;
148
149 if ((req->udiag_show & UDIAG_SHOW_PEER) &&
150 sk_diag_dump_peer(sk, skb))
151 goto nlmsg_failure;
152
153 if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
154 sk_diag_dump_icons(sk, skb))
155 goto nlmsg_failure;
156
157 if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
158 sk_diag_show_rqlen(sk, skb))
159 goto nlmsg_failure;
160
161 if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
162 sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
163 goto nlmsg_failure;
164
165 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
166 return skb->len;
167
168nlmsg_failure:
169 nlmsg_trim(skb, b);
170 return -EMSGSIZE;
171}
172
173static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
174 u32 pid, u32 seq, u32 flags)
175{
176 int sk_ino;
177
178 unix_state_lock(sk);
179 sk_ino = sock_i_ino(sk);
180 unix_state_unlock(sk);
181
182 if (!sk_ino)
183 return 0;
184
185 return sk_diag_fill(sk, skb, req, pid, seq, flags, sk_ino);
186}
187
188static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
189{
190 struct unix_diag_req *req;
191 int num, s_num, slot, s_slot;
192
193 req = NLMSG_DATA(cb->nlh);
194
195 s_slot = cb->args[0];
196 num = s_num = cb->args[1];
197
198 spin_lock(&unix_table_lock);
199 for (slot = s_slot; slot <= UNIX_HASH_SIZE; s_num = 0, slot++) {
200 struct sock *sk;
201 struct hlist_node *node;
202
203 num = 0;
204 sk_for_each(sk, node, &unix_socket_table[slot]) {
205 if (num < s_num)
206 goto next;
207 if (!(req->udiag_states & (1 << sk->sk_state)))
208 goto next;
209 if (sk_diag_dump(sk, skb, req,
210 NETLINK_CB(cb->skb).pid,
211 cb->nlh->nlmsg_seq,
212 NLM_F_MULTI) < 0)
213 goto done;
214next:
215 num++;
216 }
217 }
218done:
219 spin_unlock(&unix_table_lock);
220 cb->args[0] = slot;
221 cb->args[1] = num;
222
223 return skb->len;
224}
225
226static struct sock *unix_lookup_by_ino(int ino)
227{
228 int i;
229 struct sock *sk;
230
231 spin_lock(&unix_table_lock);
232 for (i = 0; i <= UNIX_HASH_SIZE; i++) {
233 struct hlist_node *node;
234
235 sk_for_each(sk, node, &unix_socket_table[i])
236 if (ino == sock_i_ino(sk)) {
237 sock_hold(sk);
238 spin_unlock(&unix_table_lock);
239
240 return sk;
241 }
242 }
243
244 spin_unlock(&unix_table_lock);
245 return NULL;
246}
247
248static int unix_diag_get_exact(struct sk_buff *in_skb,
249 const struct nlmsghdr *nlh,
250 struct unix_diag_req *req)
251{
252 int err = -EINVAL;
253 struct sock *sk;
254 struct sk_buff *rep;
255 unsigned int extra_len;
256
257 if (req->udiag_ino == 0)
258 goto out_nosk;
259
260 sk = unix_lookup_by_ino(req->udiag_ino);
261 err = -ENOENT;
262 if (sk == NULL)
263 goto out_nosk;
264
265 err = sock_diag_check_cookie(sk, req->udiag_cookie);
266 if (err)
267 goto out;
268
269 extra_len = 256;
270again:
271 err = -ENOMEM;
272 rep = alloc_skb(NLMSG_SPACE((sizeof(struct unix_diag_msg) + extra_len)),
273 GFP_KERNEL);
274 if (!rep)
275 goto out;
276
277 err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid,
278 nlh->nlmsg_seq, 0, req->udiag_ino);
279 if (err < 0) {
280 kfree_skb(rep);
281 extra_len += 256;
282 if (extra_len >= PAGE_SIZE)
283 goto out;
284
285 goto again;
286 }
287 err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
288 MSG_DONTWAIT);
289 if (err > 0)
290 err = 0;
291out:
292 if (sk)
293 sock_put(sk);
294out_nosk:
295 return err;
296}
297
298static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
299{
300 int hdrlen = sizeof(struct unix_diag_req);
301
302 if (nlmsg_len(h) < hdrlen)
303 return -EINVAL;
304
305 if (h->nlmsg_flags & NLM_F_DUMP) {
306 struct netlink_dump_control c = {
307 .dump = unix_diag_dump,
308 };
309 return netlink_dump_start(sock_diag_nlsk, skb, h, &c);
310 } else
311 return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h));
312}
313
314static struct sock_diag_handler unix_diag_handler = {
315 .family = AF_UNIX,
316 .dump = unix_diag_handler_dump,
317};
318
319static int __init unix_diag_init(void)
320{
321 return sock_diag_register(&unix_diag_handler);
322}
323
324static void __exit unix_diag_exit(void)
325{
326 sock_diag_unregister(&unix_diag_handler);
327}
328
329module_init(unix_diag_init);
330module_exit(unix_diag_exit);
331MODULE_LICENSE("GPL");
332MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 1 /* AF_LOCAL */);