blob: 7a3b56c1507999eca26611ebac2b93b4608f56e4 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
4 */
5
6#include <rdma/ib_user_verbs.h>
7#include <rdma/ib_verbs.h>
8#include <rdma/uverbs_types.h>
9#include <rdma/uverbs_ioctl.h>
10#include <rdma/mlx5_user_ioctl_cmds.h>
11#include <rdma/mlx5_user_ioctl_verbs.h>
12#include <rdma/ib_umem.h>
13#include <rdma/uverbs_std_types.h>
14#include <linux/mlx5/driver.h>
15#include <linux/mlx5/fs.h>
16#include "mlx5_ib.h"
17#include <linux/xarray.h>
18
19#define UVERBS_MODULE_NAME mlx5_ib
20#include <rdma/uverbs_named_ioctl.h>
21
22static void dispatch_event_fd(struct list_head *fd_list, const void *data);
23
24enum devx_obj_flags {
25 DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
26 DEVX_OBJ_FLAGS_DCT = 1 << 1,
27 DEVX_OBJ_FLAGS_CQ = 1 << 2,
28};
29
30struct devx_async_data {
31 struct mlx5_ib_dev *mdev;
32 struct list_head list;
33 struct ib_uobject *fd_uobj;
34 struct mlx5_async_work cb_work;
35 u16 cmd_out_len;
36 /* must be last field in this structure */
37 struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
38};
39
40struct devx_async_event_data {
41 struct list_head list; /* headed in ev_file->event_list */
42 struct mlx5_ib_uapi_devx_async_event_hdr hdr;
43};
44
45/* first level XA value data structure */
46struct devx_event {
47 struct xarray object_ids; /* second XA level, Key = object id */
48 struct list_head unaffiliated_list;
49};
50
51/* second level XA value data structure */
52struct devx_obj_event {
53 struct rcu_head rcu;
54 struct list_head obj_sub_list;
55};
56
57struct devx_event_subscription {
58 struct list_head file_list; /* headed in ev_file->
59 * subscribed_events_list
60 */
61 struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
62 * devx_obj_event->obj_sub_list
63 */
64 struct list_head obj_list; /* headed in devx_object */
65 struct list_head event_list; /* headed in ev_file->event_list or in
66 * temp list via subscription
67 */
68
69 u8 is_cleaned:1;
70 u32 xa_key_level1;
71 u32 xa_key_level2;
72 struct rcu_head rcu;
73 u64 cookie;
74 struct devx_async_event_file *ev_file;
75 struct file *filp; /* Upon hot unplug we need a direct access to */
76 struct eventfd_ctx *eventfd;
77};
78
79struct devx_async_event_file {
80 struct ib_uobject uobj;
81 /* Head of events that are subscribed to this FD */
82 struct list_head subscribed_events_list;
83 spinlock_t lock;
84 wait_queue_head_t poll_wait;
85 struct list_head event_list;
86 struct mlx5_ib_dev *dev;
87 u8 omit_data:1;
88 u8 is_overflow_err:1;
89 u8 is_destroyed:1;
90};
91
92#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
93struct devx_obj {
94 struct mlx5_ib_dev *ib_dev;
95 u64 obj_id;
96 u32 dinlen; /* destroy inbox length */
97 u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
98 u32 flags;
99 union {
100 struct mlx5_ib_devx_mr devx_mr;
101 struct mlx5_core_dct core_dct;
102 struct mlx5_core_cq core_cq;
103 };
104 struct list_head event_sub; /* holds devx_event_subscription entries */
105};
106
107struct devx_umem {
108 struct mlx5_core_dev *mdev;
109 struct ib_umem *umem;
110 u32 page_offset;
111 int page_shift;
112 int ncont;
113 u32 dinlen;
114 u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
115};
116
117struct devx_umem_reg_cmd {
118 void *in;
119 u32 inlen;
120 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
121};
122
123static struct mlx5_ib_ucontext *
124devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
125{
126 return to_mucontext(ib_uverbs_get_ucontext(attrs));
127}
128
129int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
130{
131 u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
132 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
133 void *uctx;
134 int err;
135 u16 uid;
136 u32 cap = 0;
137
138 /* 0 means not supported */
139 if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
140 return -EINVAL;
141
142 uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
143 if (is_user && capable(CAP_NET_RAW) &&
144 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
145 cap |= MLX5_UCTX_CAP_RAW_TX;
146 if (is_user && capable(CAP_SYS_RAWIO) &&
147 (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
148 MLX5_UCTX_CAP_INTERNAL_DEV_RES))
149 cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
150
151 MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
152 MLX5_SET(uctx, uctx, cap, cap);
153
154 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
155 if (err)
156 return err;
157
158 uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
159 return uid;
160}
161
162void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
163{
164 u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {0};
165 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
166
167 MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
168 MLX5_SET(destroy_uctx_in, in, uid, uid);
169
170 mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
171}
172
173bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
174{
175 struct devx_obj *devx_obj = obj;
176 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
177
178 switch (opcode) {
179 case MLX5_CMD_OP_DESTROY_TIR:
180 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
181 *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
182 obj_id);
183 return true;
184
185 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
186 *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
187 *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
188 table_id);
189 return true;
190 default:
191 return false;
192 }
193}
194
195bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id)
196{
197 struct devx_obj *devx_obj = obj;
198 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
199
200 if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
201 *counter_id = MLX5_GET(dealloc_flow_counter_in,
202 devx_obj->dinbox,
203 flow_counter_id);
204 return true;
205 }
206
207 return false;
208}
209
210static bool is_legacy_unaffiliated_event_num(u16 event_num)
211{
212 switch (event_num) {
213 case MLX5_EVENT_TYPE_PORT_CHANGE:
214 return true;
215 default:
216 return false;
217 }
218}
219
220static bool is_legacy_obj_event_num(u16 event_num)
221{
222 switch (event_num) {
223 case MLX5_EVENT_TYPE_PATH_MIG:
224 case MLX5_EVENT_TYPE_COMM_EST:
225 case MLX5_EVENT_TYPE_SQ_DRAINED:
226 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
227 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
228 case MLX5_EVENT_TYPE_CQ_ERROR:
229 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
230 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
231 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
232 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
233 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
234 case MLX5_EVENT_TYPE_DCT_DRAINED:
235 case MLX5_EVENT_TYPE_COMP:
236 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
237 case MLX5_EVENT_TYPE_XRQ_ERROR:
238 return true;
239 default:
240 return false;
241 }
242}
243
244static u16 get_legacy_obj_type(u16 opcode)
245{
246 switch (opcode) {
247 case MLX5_CMD_OP_CREATE_RQ:
248 return MLX5_EVENT_QUEUE_TYPE_RQ;
249 case MLX5_CMD_OP_CREATE_QP:
250 return MLX5_EVENT_QUEUE_TYPE_QP;
251 case MLX5_CMD_OP_CREATE_SQ:
252 return MLX5_EVENT_QUEUE_TYPE_SQ;
253 case MLX5_CMD_OP_CREATE_DCT:
254 return MLX5_EVENT_QUEUE_TYPE_DCT;
255 default:
256 return 0;
257 }
258}
259
260static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num)
261{
262 u16 opcode;
263
264 opcode = (obj->obj_id >> 32) & 0xffff;
265
266 if (is_legacy_obj_event_num(event_num))
267 return get_legacy_obj_type(opcode);
268
269 switch (opcode) {
270 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
271 return (obj->obj_id >> 48);
272 case MLX5_CMD_OP_CREATE_RQ:
273 return MLX5_OBJ_TYPE_RQ;
274 case MLX5_CMD_OP_CREATE_QP:
275 return MLX5_OBJ_TYPE_QP;
276 case MLX5_CMD_OP_CREATE_SQ:
277 return MLX5_OBJ_TYPE_SQ;
278 case MLX5_CMD_OP_CREATE_DCT:
279 return MLX5_OBJ_TYPE_DCT;
280 case MLX5_CMD_OP_CREATE_TIR:
281 return MLX5_OBJ_TYPE_TIR;
282 case MLX5_CMD_OP_CREATE_TIS:
283 return MLX5_OBJ_TYPE_TIS;
284 case MLX5_CMD_OP_CREATE_PSV:
285 return MLX5_OBJ_TYPE_PSV;
286 case MLX5_OBJ_TYPE_MKEY:
287 return MLX5_OBJ_TYPE_MKEY;
288 case MLX5_CMD_OP_CREATE_RMP:
289 return MLX5_OBJ_TYPE_RMP;
290 case MLX5_CMD_OP_CREATE_XRC_SRQ:
291 return MLX5_OBJ_TYPE_XRC_SRQ;
292 case MLX5_CMD_OP_CREATE_XRQ:
293 return MLX5_OBJ_TYPE_XRQ;
294 case MLX5_CMD_OP_CREATE_RQT:
295 return MLX5_OBJ_TYPE_RQT;
296 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
297 return MLX5_OBJ_TYPE_FLOW_COUNTER;
298 case MLX5_CMD_OP_CREATE_CQ:
299 return MLX5_OBJ_TYPE_CQ;
300 default:
301 return 0;
302 }
303}
304
305static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
306{
307 switch (event_type) {
308 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
309 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
310 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
311 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
312 case MLX5_EVENT_TYPE_PATH_MIG:
313 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
314 case MLX5_EVENT_TYPE_COMM_EST:
315 case MLX5_EVENT_TYPE_SQ_DRAINED:
316 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
317 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
318 return eqe->data.qp_srq.type;
319 case MLX5_EVENT_TYPE_CQ_ERROR:
320 case MLX5_EVENT_TYPE_XRQ_ERROR:
321 return 0;
322 case MLX5_EVENT_TYPE_DCT_DRAINED:
323 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
324 return MLX5_EVENT_QUEUE_TYPE_DCT;
325 default:
326 return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
327 }
328}
329
330static u32 get_dec_obj_id(u64 obj_id)
331{
332 return (obj_id & 0xffffffff);
333}
334
335/*
336 * As the obj_id in the firmware is not globally unique the object type
337 * must be considered upon checking for a valid object id.
338 * For that the opcode of the creator command is encoded as part of the obj_id.
339 */
340static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
341{
342 return ((u64)opcode << 32) | obj_id;
343}
344
345static u64 devx_get_obj_id(const void *in)
346{
347 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
348 u64 obj_id;
349
350 switch (opcode) {
351 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
352 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
353 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
354 MLX5_GET(general_obj_in_cmd_hdr, in,
355 obj_type) << 16,
356 MLX5_GET(general_obj_in_cmd_hdr, in,
357 obj_id));
358 break;
359 case MLX5_CMD_OP_QUERY_MKEY:
360 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
361 MLX5_GET(query_mkey_in, in,
362 mkey_index));
363 break;
364 case MLX5_CMD_OP_QUERY_CQ:
365 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
366 MLX5_GET(query_cq_in, in, cqn));
367 break;
368 case MLX5_CMD_OP_MODIFY_CQ:
369 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
370 MLX5_GET(modify_cq_in, in, cqn));
371 break;
372 case MLX5_CMD_OP_QUERY_SQ:
373 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
374 MLX5_GET(query_sq_in, in, sqn));
375 break;
376 case MLX5_CMD_OP_MODIFY_SQ:
377 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
378 MLX5_GET(modify_sq_in, in, sqn));
379 break;
380 case MLX5_CMD_OP_QUERY_RQ:
381 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
382 MLX5_GET(query_rq_in, in, rqn));
383 break;
384 case MLX5_CMD_OP_MODIFY_RQ:
385 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
386 MLX5_GET(modify_rq_in, in, rqn));
387 break;
388 case MLX5_CMD_OP_QUERY_RMP:
389 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
390 MLX5_GET(query_rmp_in, in, rmpn));
391 break;
392 case MLX5_CMD_OP_MODIFY_RMP:
393 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
394 MLX5_GET(modify_rmp_in, in, rmpn));
395 break;
396 case MLX5_CMD_OP_QUERY_RQT:
397 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
398 MLX5_GET(query_rqt_in, in, rqtn));
399 break;
400 case MLX5_CMD_OP_MODIFY_RQT:
401 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
402 MLX5_GET(modify_rqt_in, in, rqtn));
403 break;
404 case MLX5_CMD_OP_QUERY_TIR:
405 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
406 MLX5_GET(query_tir_in, in, tirn));
407 break;
408 case MLX5_CMD_OP_MODIFY_TIR:
409 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
410 MLX5_GET(modify_tir_in, in, tirn));
411 break;
412 case MLX5_CMD_OP_QUERY_TIS:
413 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
414 MLX5_GET(query_tis_in, in, tisn));
415 break;
416 case MLX5_CMD_OP_MODIFY_TIS:
417 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
418 MLX5_GET(modify_tis_in, in, tisn));
419 break;
420 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
421 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
422 MLX5_GET(query_flow_table_in, in,
423 table_id));
424 break;
425 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
426 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
427 MLX5_GET(modify_flow_table_in, in,
428 table_id));
429 break;
430 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
431 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
432 MLX5_GET(query_flow_group_in, in,
433 group_id));
434 break;
435 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
436 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
437 MLX5_GET(query_fte_in, in,
438 flow_index));
439 break;
440 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
441 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
442 MLX5_GET(set_fte_in, in, flow_index));
443 break;
444 case MLX5_CMD_OP_QUERY_Q_COUNTER:
445 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
446 MLX5_GET(query_q_counter_in, in,
447 counter_set_id));
448 break;
449 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
450 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
451 MLX5_GET(query_flow_counter_in, in,
452 flow_counter_id));
453 break;
454 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
455 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
456 MLX5_GET(general_obj_in_cmd_hdr, in,
457 obj_id));
458 break;
459 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
460 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
461 MLX5_GET(query_scheduling_element_in,
462 in, scheduling_element_id));
463 break;
464 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
465 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
466 MLX5_GET(modify_scheduling_element_in,
467 in, scheduling_element_id));
468 break;
469 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
470 obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
471 MLX5_GET(add_vxlan_udp_dport_in, in,
472 vxlan_udp_port));
473 break;
474 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
475 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
476 MLX5_GET(query_l2_table_entry_in, in,
477 table_index));
478 break;
479 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
480 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
481 MLX5_GET(set_l2_table_entry_in, in,
482 table_index));
483 break;
484 case MLX5_CMD_OP_QUERY_QP:
485 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
486 MLX5_GET(query_qp_in, in, qpn));
487 break;
488 case MLX5_CMD_OP_RST2INIT_QP:
489 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
490 MLX5_GET(rst2init_qp_in, in, qpn));
491 break;
492 case MLX5_CMD_OP_INIT2INIT_QP:
493 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
494 MLX5_GET(init2init_qp_in, in, qpn));
495 break;
496 case MLX5_CMD_OP_INIT2RTR_QP:
497 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
498 MLX5_GET(init2rtr_qp_in, in, qpn));
499 break;
500 case MLX5_CMD_OP_RTR2RTS_QP:
501 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
502 MLX5_GET(rtr2rts_qp_in, in, qpn));
503 break;
504 case MLX5_CMD_OP_RTS2RTS_QP:
505 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
506 MLX5_GET(rts2rts_qp_in, in, qpn));
507 break;
508 case MLX5_CMD_OP_SQERR2RTS_QP:
509 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
510 MLX5_GET(sqerr2rts_qp_in, in, qpn));
511 break;
512 case MLX5_CMD_OP_2ERR_QP:
513 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
514 MLX5_GET(qp_2err_in, in, qpn));
515 break;
516 case MLX5_CMD_OP_2RST_QP:
517 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
518 MLX5_GET(qp_2rst_in, in, qpn));
519 break;
520 case MLX5_CMD_OP_QUERY_DCT:
521 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
522 MLX5_GET(query_dct_in, in, dctn));
523 break;
524 case MLX5_CMD_OP_QUERY_XRQ:
525 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
526 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
527 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
528 MLX5_GET(query_xrq_in, in, xrqn));
529 break;
530 case MLX5_CMD_OP_QUERY_XRC_SRQ:
531 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
532 MLX5_GET(query_xrc_srq_in, in,
533 xrc_srqn));
534 break;
535 case MLX5_CMD_OP_ARM_XRC_SRQ:
536 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
537 MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
538 break;
539 case MLX5_CMD_OP_QUERY_SRQ:
540 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
541 MLX5_GET(query_srq_in, in, srqn));
542 break;
543 case MLX5_CMD_OP_ARM_RQ:
544 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
545 MLX5_GET(arm_rq_in, in, srq_number));
546 break;
547 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
548 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
549 MLX5_GET(drain_dct_in, in, dctn));
550 break;
551 case MLX5_CMD_OP_ARM_XRQ:
552 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
553 case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
554 case MLX5_CMD_OP_MODIFY_XRQ:
555 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
556 MLX5_GET(arm_xrq_in, in, xrqn));
557 break;
558 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
559 obj_id = get_enc_obj_id
560 (MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
561 MLX5_GET(query_packet_reformat_context_in,
562 in, packet_reformat_id));
563 break;
564 default:
565 obj_id = 0;
566 }
567
568 return obj_id;
569}
570
571static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
572 struct ib_uobject *uobj, const void *in)
573{
574 struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
575 u64 obj_id = devx_get_obj_id(in);
576
577 if (!obj_id)
578 return false;
579
580 switch (uobj_get_object_id(uobj)) {
581 case UVERBS_OBJECT_CQ:
582 return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
583 to_mcq(uobj->object)->mcq.cqn) ==
584 obj_id;
585
586 case UVERBS_OBJECT_SRQ:
587 {
588 struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
589 u16 opcode;
590
591 switch (srq->common.res) {
592 case MLX5_RES_XSRQ:
593 opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
594 break;
595 case MLX5_RES_XRQ:
596 opcode = MLX5_CMD_OP_CREATE_XRQ;
597 break;
598 default:
599 if (!dev->mdev->issi)
600 opcode = MLX5_CMD_OP_CREATE_SRQ;
601 else
602 opcode = MLX5_CMD_OP_CREATE_RMP;
603 }
604
605 return get_enc_obj_id(opcode,
606 to_msrq(uobj->object)->msrq.srqn) ==
607 obj_id;
608 }
609
610 case UVERBS_OBJECT_QP:
611 {
612 struct mlx5_ib_qp *qp = to_mqp(uobj->object);
613 enum ib_qp_type qp_type = qp->ibqp.qp_type;
614
615 if (qp_type == IB_QPT_RAW_PACKET ||
616 (qp->flags & MLX5_IB_QP_UNDERLAY)) {
617 struct mlx5_ib_raw_packet_qp *raw_packet_qp =
618 &qp->raw_packet_qp;
619 struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
620 struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
621
622 return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
623 rq->base.mqp.qpn) == obj_id ||
624 get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
625 sq->base.mqp.qpn) == obj_id ||
626 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
627 rq->tirn) == obj_id ||
628 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
629 sq->tisn) == obj_id);
630 }
631
632 if (qp_type == MLX5_IB_QPT_DCT)
633 return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
634 qp->dct.mdct.mqp.qpn) == obj_id;
635
636 return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
637 qp->ibqp.qp_num) == obj_id;
638 }
639
640 case UVERBS_OBJECT_WQ:
641 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
642 to_mrwq(uobj->object)->core_qp.qpn) ==
643 obj_id;
644
645 case UVERBS_OBJECT_RWQ_IND_TBL:
646 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
647 to_mrwq_ind_table(uobj->object)->rqtn) ==
648 obj_id;
649
650 case MLX5_IB_OBJECT_DEVX_OBJ:
651 return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
652
653 default:
654 return false;
655 }
656}
657
658static void devx_set_umem_valid(const void *in)
659{
660 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
661
662 switch (opcode) {
663 case MLX5_CMD_OP_CREATE_MKEY:
664 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
665 break;
666 case MLX5_CMD_OP_CREATE_CQ:
667 {
668 void *cqc;
669
670 MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
671 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
672 MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
673 break;
674 }
675 case MLX5_CMD_OP_CREATE_QP:
676 {
677 void *qpc;
678
679 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
680 MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
681 MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
682 break;
683 }
684
685 case MLX5_CMD_OP_CREATE_RQ:
686 {
687 void *rqc, *wq;
688
689 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
690 wq = MLX5_ADDR_OF(rqc, rqc, wq);
691 MLX5_SET(wq, wq, dbr_umem_valid, 1);
692 MLX5_SET(wq, wq, wq_umem_valid, 1);
693 break;
694 }
695
696 case MLX5_CMD_OP_CREATE_SQ:
697 {
698 void *sqc, *wq;
699
700 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
701 wq = MLX5_ADDR_OF(sqc, sqc, wq);
702 MLX5_SET(wq, wq, dbr_umem_valid, 1);
703 MLX5_SET(wq, wq, wq_umem_valid, 1);
704 break;
705 }
706
707 case MLX5_CMD_OP_MODIFY_CQ:
708 MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
709 break;
710
711 case MLX5_CMD_OP_CREATE_RMP:
712 {
713 void *rmpc, *wq;
714
715 rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
716 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
717 MLX5_SET(wq, wq, dbr_umem_valid, 1);
718 MLX5_SET(wq, wq, wq_umem_valid, 1);
719 break;
720 }
721
722 case MLX5_CMD_OP_CREATE_XRQ:
723 {
724 void *xrqc, *wq;
725
726 xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
727 wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
728 MLX5_SET(wq, wq, dbr_umem_valid, 1);
729 MLX5_SET(wq, wq, wq_umem_valid, 1);
730 break;
731 }
732
733 case MLX5_CMD_OP_CREATE_XRC_SRQ:
734 {
735 void *xrc_srqc;
736
737 MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
738 xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
739 xrc_srq_context_entry);
740 MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
741 break;
742 }
743
744 default:
745 return;
746 }
747}
748
749static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
750{
751 *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
752
753 switch (*opcode) {
754 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
755 case MLX5_CMD_OP_CREATE_MKEY:
756 case MLX5_CMD_OP_CREATE_CQ:
757 case MLX5_CMD_OP_ALLOC_PD:
758 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
759 case MLX5_CMD_OP_CREATE_RMP:
760 case MLX5_CMD_OP_CREATE_SQ:
761 case MLX5_CMD_OP_CREATE_RQ:
762 case MLX5_CMD_OP_CREATE_RQT:
763 case MLX5_CMD_OP_CREATE_TIR:
764 case MLX5_CMD_OP_CREATE_TIS:
765 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
766 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
767 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
768 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
769 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
770 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
771 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
772 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
773 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
774 case MLX5_CMD_OP_CREATE_QP:
775 case MLX5_CMD_OP_CREATE_SRQ:
776 case MLX5_CMD_OP_CREATE_XRC_SRQ:
777 case MLX5_CMD_OP_CREATE_DCT:
778 case MLX5_CMD_OP_CREATE_XRQ:
779 case MLX5_CMD_OP_ATTACH_TO_MCG:
780 case MLX5_CMD_OP_ALLOC_XRCD:
781 return true;
782 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
783 {
784 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
785 if (op_mod == 0)
786 return true;
787 return false;
788 }
789 case MLX5_CMD_OP_CREATE_PSV:
790 {
791 u8 num_psv = MLX5_GET(create_psv_in, in, num_psv);
792
793 if (num_psv == 1)
794 return true;
795 return false;
796 }
797 default:
798 return false;
799 }
800}
801
802static bool devx_is_obj_modify_cmd(const void *in)
803{
804 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
805
806 switch (opcode) {
807 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
808 case MLX5_CMD_OP_MODIFY_CQ:
809 case MLX5_CMD_OP_MODIFY_RMP:
810 case MLX5_CMD_OP_MODIFY_SQ:
811 case MLX5_CMD_OP_MODIFY_RQ:
812 case MLX5_CMD_OP_MODIFY_RQT:
813 case MLX5_CMD_OP_MODIFY_TIR:
814 case MLX5_CMD_OP_MODIFY_TIS:
815 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
816 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
817 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
818 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
819 case MLX5_CMD_OP_RST2INIT_QP:
820 case MLX5_CMD_OP_INIT2RTR_QP:
821 case MLX5_CMD_OP_INIT2INIT_QP:
822 case MLX5_CMD_OP_RTR2RTS_QP:
823 case MLX5_CMD_OP_RTS2RTS_QP:
824 case MLX5_CMD_OP_SQERR2RTS_QP:
825 case MLX5_CMD_OP_2ERR_QP:
826 case MLX5_CMD_OP_2RST_QP:
827 case MLX5_CMD_OP_ARM_XRC_SRQ:
828 case MLX5_CMD_OP_ARM_RQ:
829 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
830 case MLX5_CMD_OP_ARM_XRQ:
831 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
832 case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
833 case MLX5_CMD_OP_MODIFY_XRQ:
834 return true;
835 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
836 {
837 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
838
839 if (op_mod == 1)
840 return true;
841 return false;
842 }
843 default:
844 return false;
845 }
846}
847
848static bool devx_is_obj_query_cmd(const void *in)
849{
850 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
851
852 switch (opcode) {
853 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
854 case MLX5_CMD_OP_QUERY_MKEY:
855 case MLX5_CMD_OP_QUERY_CQ:
856 case MLX5_CMD_OP_QUERY_RMP:
857 case MLX5_CMD_OP_QUERY_SQ:
858 case MLX5_CMD_OP_QUERY_RQ:
859 case MLX5_CMD_OP_QUERY_RQT:
860 case MLX5_CMD_OP_QUERY_TIR:
861 case MLX5_CMD_OP_QUERY_TIS:
862 case MLX5_CMD_OP_QUERY_Q_COUNTER:
863 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
864 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
865 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
866 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
867 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
868 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
869 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
870 case MLX5_CMD_OP_QUERY_QP:
871 case MLX5_CMD_OP_QUERY_SRQ:
872 case MLX5_CMD_OP_QUERY_XRC_SRQ:
873 case MLX5_CMD_OP_QUERY_DCT:
874 case MLX5_CMD_OP_QUERY_XRQ:
875 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
876 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
877 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
878 return true;
879 default:
880 return false;
881 }
882}
883
884static bool devx_is_whitelist_cmd(void *in)
885{
886 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
887
888 switch (opcode) {
889 case MLX5_CMD_OP_QUERY_HCA_CAP:
890 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
891 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
892 return true;
893 default:
894 return false;
895 }
896}
897
898static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
899{
900 if (devx_is_whitelist_cmd(cmd_in)) {
901 struct mlx5_ib_dev *dev;
902
903 if (c->devx_uid)
904 return c->devx_uid;
905
906 dev = to_mdev(c->ibucontext.device);
907 if (dev->devx_whitelist_uid)
908 return dev->devx_whitelist_uid;
909
910 return -EOPNOTSUPP;
911 }
912
913 if (!c->devx_uid)
914 return -EINVAL;
915
916 return c->devx_uid;
917}
918
919static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
920{
921 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
922
923 /* Pass all cmds for vhca_tunnel as general, tracking is done in FW */
924 if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) &&
925 MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) ||
926 (opcode >= MLX5_CMD_OP_GENERAL_START &&
927 opcode < MLX5_CMD_OP_GENERAL_END))
928 return true;
929
930 switch (opcode) {
931 case MLX5_CMD_OP_QUERY_HCA_CAP:
932 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
933 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
934 case MLX5_CMD_OP_QUERY_VPORT_STATE:
935 case MLX5_CMD_OP_QUERY_ADAPTER:
936 case MLX5_CMD_OP_QUERY_ISSI:
937 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
938 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
939 case MLX5_CMD_OP_QUERY_VNIC_ENV:
940 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
941 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
942 case MLX5_CMD_OP_NOP:
943 case MLX5_CMD_OP_QUERY_CONG_STATUS:
944 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
945 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
946 case MLX5_CMD_OP_QUERY_LAG:
947 return true;
948 default:
949 return false;
950 }
951}
952
953static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
954 struct uverbs_attr_bundle *attrs)
955{
956 struct mlx5_ib_ucontext *c;
957 struct mlx5_ib_dev *dev;
958 int user_vector;
959 int dev_eqn;
960 unsigned int irqn;
961 int err;
962
963 if (uverbs_copy_from(&user_vector, attrs,
964 MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
965 return -EFAULT;
966
967 c = devx_ufile2uctx(attrs);
968 if (IS_ERR(c))
969 return PTR_ERR(c);
970 dev = to_mdev(c->ibucontext.device);
971
972 err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
973 if (err < 0)
974 return err;
975
976 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
977 &dev_eqn, sizeof(dev_eqn)))
978 return -EFAULT;
979
980 return 0;
981}
982
983/*
984 *Security note:
985 * The hardware protection mechanism works like this: Each device object that
986 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
987 * the device specification manual) upon its creation. Then upon doorbell,
988 * hardware fetches the object context for which the doorbell was rang, and
989 * validates that the UAR through which the DB was rang matches the UAR ID
990 * of the object.
991 * If no match the doorbell is silently ignored by the hardware. Of course,
992 * the user cannot ring a doorbell on a UAR that was not mapped to it.
993 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
994 * mailboxes (except tagging them with UID), we expose to the user its UAR
995 * ID, so it can embed it in these objects in the expected specification
996 * format. So the only thing the user can do is hurt itself by creating a
997 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
998 * may ring a doorbell on its objects.
999 * The consequence of that will be that another user can schedule a QP/SQ
1000 * of the buggy user for execution (just insert it to the hardware schedule
1001 * queue or arm its CQ for event generation), no further harm is expected.
1002 */
1003static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
1004 struct uverbs_attr_bundle *attrs)
1005{
1006 struct mlx5_ib_ucontext *c;
1007 struct mlx5_ib_dev *dev;
1008 u32 user_idx;
1009 s32 dev_idx;
1010
1011 c = devx_ufile2uctx(attrs);
1012 if (IS_ERR(c))
1013 return PTR_ERR(c);
1014 dev = to_mdev(c->ibucontext.device);
1015
1016 if (uverbs_copy_from(&user_idx, attrs,
1017 MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
1018 return -EFAULT;
1019
1020 dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
1021 if (dev_idx < 0)
1022 return dev_idx;
1023
1024 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
1025 &dev_idx, sizeof(dev_idx)))
1026 return -EFAULT;
1027
1028 return 0;
1029}
1030
1031static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
1032 struct uverbs_attr_bundle *attrs)
1033{
1034 struct mlx5_ib_ucontext *c;
1035 struct mlx5_ib_dev *dev;
1036 void *cmd_in = uverbs_attr_get_alloced_ptr(
1037 attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
1038 int cmd_out_len = uverbs_attr_get_len(attrs,
1039 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
1040 void *cmd_out;
1041 int err;
1042 int uid;
1043
1044 c = devx_ufile2uctx(attrs);
1045 if (IS_ERR(c))
1046 return PTR_ERR(c);
1047 dev = to_mdev(c->ibucontext.device);
1048
1049 uid = devx_get_uid(c, cmd_in);
1050 if (uid < 0)
1051 return uid;
1052
1053 /* Only white list of some general HCA commands are allowed for this method. */
1054 if (!devx_is_general_cmd(cmd_in, dev))
1055 return -EINVAL;
1056
1057 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1058 if (IS_ERR(cmd_out))
1059 return PTR_ERR(cmd_out);
1060
1061 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1062 err = mlx5_cmd_exec(dev->mdev, cmd_in,
1063 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
1064 cmd_out, cmd_out_len);
1065 if (err)
1066 return err;
1067
1068 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
1069 cmd_out_len);
1070}
1071
1072static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
1073 u32 *dinlen,
1074 u32 *obj_id)
1075{
1076 u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
1077 u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
1078
1079 *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
1080 *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
1081
1082 MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
1083 MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
1084
1085 switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
1086 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
1087 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
1088 MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
1089 break;
1090
1091 case MLX5_CMD_OP_CREATE_UMEM:
1092 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1093 MLX5_CMD_OP_DESTROY_UMEM);
1094 break;
1095 case MLX5_CMD_OP_CREATE_MKEY:
1096 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
1097 break;
1098 case MLX5_CMD_OP_CREATE_CQ:
1099 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
1100 break;
1101 case MLX5_CMD_OP_ALLOC_PD:
1102 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
1103 break;
1104 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
1105 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1106 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
1107 break;
1108 case MLX5_CMD_OP_CREATE_RMP:
1109 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
1110 break;
1111 case MLX5_CMD_OP_CREATE_SQ:
1112 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
1113 break;
1114 case MLX5_CMD_OP_CREATE_RQ:
1115 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
1116 break;
1117 case MLX5_CMD_OP_CREATE_RQT:
1118 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
1119 break;
1120 case MLX5_CMD_OP_CREATE_TIR:
1121 *obj_id = MLX5_GET(create_tir_out, out, tirn);
1122 MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
1123 MLX5_SET(destroy_tir_in, din, tirn, *obj_id);
1124 break;
1125 case MLX5_CMD_OP_CREATE_TIS:
1126 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
1127 break;
1128 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
1129 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1130 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
1131 break;
1132 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
1133 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
1134 *obj_id = MLX5_GET(create_flow_table_out, out, table_id);
1135 MLX5_SET(destroy_flow_table_in, din, other_vport,
1136 MLX5_GET(create_flow_table_in, in, other_vport));
1137 MLX5_SET(destroy_flow_table_in, din, vport_number,
1138 MLX5_GET(create_flow_table_in, in, vport_number));
1139 MLX5_SET(destroy_flow_table_in, din, table_type,
1140 MLX5_GET(create_flow_table_in, in, table_type));
1141 MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
1142 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1143 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
1144 break;
1145 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
1146 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
1147 *obj_id = MLX5_GET(create_flow_group_out, out, group_id);
1148 MLX5_SET(destroy_flow_group_in, din, other_vport,
1149 MLX5_GET(create_flow_group_in, in, other_vport));
1150 MLX5_SET(destroy_flow_group_in, din, vport_number,
1151 MLX5_GET(create_flow_group_in, in, vport_number));
1152 MLX5_SET(destroy_flow_group_in, din, table_type,
1153 MLX5_GET(create_flow_group_in, in, table_type));
1154 MLX5_SET(destroy_flow_group_in, din, table_id,
1155 MLX5_GET(create_flow_group_in, in, table_id));
1156 MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
1157 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1158 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
1159 break;
1160 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
1161 *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
1162 *obj_id = MLX5_GET(set_fte_in, in, flow_index);
1163 MLX5_SET(delete_fte_in, din, other_vport,
1164 MLX5_GET(set_fte_in, in, other_vport));
1165 MLX5_SET(delete_fte_in, din, vport_number,
1166 MLX5_GET(set_fte_in, in, vport_number));
1167 MLX5_SET(delete_fte_in, din, table_type,
1168 MLX5_GET(set_fte_in, in, table_type));
1169 MLX5_SET(delete_fte_in, din, table_id,
1170 MLX5_GET(set_fte_in, in, table_id));
1171 MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
1172 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1173 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
1174 break;
1175 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
1176 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1177 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
1178 break;
1179 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
1180 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1181 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
1182 break;
1183 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
1184 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1185 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
1186 break;
1187 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
1188 *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
1189 *obj_id = MLX5_GET(create_scheduling_element_out, out,
1190 scheduling_element_id);
1191 MLX5_SET(destroy_scheduling_element_in, din,
1192 scheduling_hierarchy,
1193 MLX5_GET(create_scheduling_element_in, in,
1194 scheduling_hierarchy));
1195 MLX5_SET(destroy_scheduling_element_in, din,
1196 scheduling_element_id, *obj_id);
1197 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1198 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
1199 break;
1200 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
1201 *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
1202 *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
1203 MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
1204 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1205 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
1206 break;
1207 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
1208 *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
1209 *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
1210 MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
1211 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1212 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
1213 break;
1214 case MLX5_CMD_OP_CREATE_QP:
1215 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
1216 break;
1217 case MLX5_CMD_OP_CREATE_SRQ:
1218 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
1219 break;
1220 case MLX5_CMD_OP_CREATE_XRC_SRQ:
1221 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1222 MLX5_CMD_OP_DESTROY_XRC_SRQ);
1223 break;
1224 case MLX5_CMD_OP_CREATE_DCT:
1225 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
1226 break;
1227 case MLX5_CMD_OP_CREATE_XRQ:
1228 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
1229 break;
1230 case MLX5_CMD_OP_ATTACH_TO_MCG:
1231 *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
1232 MLX5_SET(detach_from_mcg_in, din, qpn,
1233 MLX5_GET(attach_to_mcg_in, in, qpn));
1234 memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
1235 MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
1236 MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
1237 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
1238 break;
1239 case MLX5_CMD_OP_ALLOC_XRCD:
1240 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
1241 break;
1242 case MLX5_CMD_OP_CREATE_PSV:
1243 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1244 MLX5_CMD_OP_DESTROY_PSV);
1245 MLX5_SET(destroy_psv_in, din, psvn,
1246 MLX5_GET(create_psv_out, out, psv0_index));
1247 break;
1248 default:
1249 /* The entry must match to one of the devx_is_obj_create_cmd */
1250 WARN_ON(true);
1251 break;
1252 }
1253}
1254
1255static int devx_handle_mkey_indirect(struct devx_obj *obj,
1256 struct mlx5_ib_dev *dev,
1257 void *in, void *out)
1258{
1259 struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
1260 struct mlx5_core_mkey *mkey;
1261 void *mkc;
1262 u8 key;
1263
1264 mkey = &devx_mr->mmkey;
1265 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1266 key = MLX5_GET(mkc, mkc, mkey_7_0);
1267 mkey->key = mlx5_idx_to_mkey(
1268 MLX5_GET(create_mkey_out, out, mkey_index)) | key;
1269 mkey->type = MLX5_MKEY_INDIRECT_DEVX;
1270 mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
1271 mkey->size = MLX5_GET64(mkc, mkc, len);
1272 mkey->pd = MLX5_GET(mkc, mkc, pd);
1273 devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
1274
1275 return xa_err(xa_store(&dev->mdev->priv.mkey_table,
1276 mlx5_base_mkey(mkey->key), mkey, GFP_KERNEL));
1277}
1278
1279static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
1280 struct devx_obj *obj,
1281 void *in, int in_len)
1282{
1283 int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
1284 MLX5_FLD_SZ_BYTES(create_mkey_in,
1285 memory_key_mkey_entry);
1286 void *mkc;
1287 u8 access_mode;
1288
1289 if (in_len < min_len)
1290 return -EINVAL;
1291
1292 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1293
1294 access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
1295 access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
1296
1297 if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
1298 access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
1299 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1300 obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
1301 return 0;
1302 }
1303
1304 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
1305 return 0;
1306}
1307
1308static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
1309 struct devx_event_subscription *sub)
1310{
1311 struct devx_event *event;
1312 struct devx_obj_event *xa_val_level2;
1313
1314 if (sub->is_cleaned)
1315 return;
1316
1317 sub->is_cleaned = 1;
1318 list_del_rcu(&sub->xa_list);
1319
1320 if (list_empty(&sub->obj_list))
1321 return;
1322
1323 list_del_rcu(&sub->obj_list);
1324 /* check whether key level 1 for this obj_sub_list is empty */
1325 event = xa_load(&dev->devx_event_table.event_xa,
1326 sub->xa_key_level1);
1327 WARN_ON(!event);
1328
1329 xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2);
1330 if (list_empty(&xa_val_level2->obj_sub_list)) {
1331 xa_erase(&event->object_ids,
1332 sub->xa_key_level2);
1333 kfree_rcu(xa_val_level2, rcu);
1334 }
1335}
1336
1337static int devx_obj_cleanup(struct ib_uobject *uobject,
1338 enum rdma_remove_reason why,
1339 struct uverbs_attr_bundle *attrs)
1340{
1341 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1342 struct mlx5_devx_event_table *devx_event_table;
1343 struct devx_obj *obj = uobject->object;
1344 struct devx_event_subscription *sub_entry, *tmp;
1345 struct mlx5_ib_dev *dev;
1346 int ret;
1347
1348 dev = mlx5_udata_to_mdev(&attrs->driver_udata);
1349 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1350 /*
1351 * The pagefault_single_data_segment() does commands against
1352 * the mmkey, we must wait for that to stop before freeing the
1353 * mkey, as another allocation could get the same mkey #.
1354 */
1355 xa_erase(&obj->ib_dev->mdev->priv.mkey_table,
1356 mlx5_base_mkey(obj->devx_mr.mmkey.key));
1357 synchronize_srcu(&dev->mr_srcu);
1358 }
1359
1360 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1361 ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
1362 else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1363 ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1364 else
1365 ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
1366 obj->dinlen, out, sizeof(out));
1367 if (ib_is_destroy_retryable(ret, why, uobject))
1368 return ret;
1369
1370 devx_event_table = &dev->devx_event_table;
1371
1372 mutex_lock(&devx_event_table->event_xa_lock);
1373 list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list)
1374 devx_cleanup_subscription(dev, sub_entry);
1375 mutex_unlock(&devx_event_table->event_xa_lock);
1376
1377 kfree(obj);
1378 return ret;
1379}
1380
1381static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
1382{
1383 struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq);
1384 struct mlx5_devx_event_table *table;
1385 struct devx_event *event;
1386 struct devx_obj_event *obj_event;
1387 u32 obj_id = mcq->cqn;
1388
1389 table = &obj->ib_dev->devx_event_table;
1390 rcu_read_lock();
1391 event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP);
1392 if (!event)
1393 goto out;
1394
1395 obj_event = xa_load(&event->object_ids, obj_id);
1396 if (!obj_event)
1397 goto out;
1398
1399 dispatch_event_fd(&obj_event->obj_sub_list, eqe);
1400out:
1401 rcu_read_unlock();
1402}
1403
1404static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
1405 struct uverbs_attr_bundle *attrs)
1406{
1407 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1408 int cmd_out_len = uverbs_attr_get_len(attrs,
1409 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
1410 int cmd_in_len = uverbs_attr_get_len(attrs,
1411 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1412 void *cmd_out;
1413 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1414 attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
1415 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1416 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1417 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1418 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1419 struct devx_obj *obj;
1420 u16 obj_type = 0;
1421 int err;
1422 int uid;
1423 u32 obj_id;
1424 u16 opcode;
1425
1426 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1427 return -EINVAL;
1428
1429 uid = devx_get_uid(c, cmd_in);
1430 if (uid < 0)
1431 return uid;
1432
1433 if (!devx_is_obj_create_cmd(cmd_in, &opcode))
1434 return -EINVAL;
1435
1436 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1437 if (IS_ERR(cmd_out))
1438 return PTR_ERR(cmd_out);
1439
1440 obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
1441 if (!obj)
1442 return -ENOMEM;
1443
1444 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1445 if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
1446 err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
1447 if (err)
1448 goto obj_free;
1449 } else {
1450 devx_set_umem_valid(cmd_in);
1451 }
1452
1453 if (opcode == MLX5_CMD_OP_CREATE_DCT) {
1454 obj->flags |= DEVX_OBJ_FLAGS_DCT;
1455 err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
1456 cmd_in, cmd_in_len,
1457 cmd_out, cmd_out_len);
1458 } else if (opcode == MLX5_CMD_OP_CREATE_CQ) {
1459 obj->flags |= DEVX_OBJ_FLAGS_CQ;
1460 obj->core_cq.comp = devx_cq_comp;
1461 err = mlx5_core_create_cq(dev->mdev, &obj->core_cq,
1462 cmd_in, cmd_in_len, cmd_out,
1463 cmd_out_len);
1464 } else {
1465 err = mlx5_cmd_exec(dev->mdev, cmd_in,
1466 cmd_in_len,
1467 cmd_out, cmd_out_len);
1468 }
1469
1470 if (err)
1471 goto obj_free;
1472
1473 uobj->object = obj;
1474 INIT_LIST_HEAD(&obj->event_sub);
1475 obj->ib_dev = dev;
1476 devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
1477 &obj_id);
1478 WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
1479
1480 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
1481 if (err)
1482 goto obj_destroy;
1483
1484 if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
1485 obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
1486 obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
1487
1488 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1489 err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
1490 if (err)
1491 goto obj_destroy;
1492 }
1493 return 0;
1494
1495obj_destroy:
1496 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1497 mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
1498 else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1499 mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1500 else
1501 mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out,
1502 sizeof(out));
1503obj_free:
1504 kfree(obj);
1505 return err;
1506}
1507
1508static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
1509 struct uverbs_attr_bundle *attrs)
1510{
1511 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
1512 int cmd_out_len = uverbs_attr_get_len(attrs,
1513 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
1514 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1515 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
1516 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1517 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1518 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1519 void *cmd_out;
1520 int err;
1521 int uid;
1522
1523 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1524 return -EINVAL;
1525
1526 uid = devx_get_uid(c, cmd_in);
1527 if (uid < 0)
1528 return uid;
1529
1530 if (!devx_is_obj_modify_cmd(cmd_in))
1531 return -EINVAL;
1532
1533 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1534 return -EINVAL;
1535
1536 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1537 if (IS_ERR(cmd_out))
1538 return PTR_ERR(cmd_out);
1539
1540 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1541 devx_set_umem_valid(cmd_in);
1542
1543 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1544 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
1545 cmd_out, cmd_out_len);
1546 if (err)
1547 return err;
1548
1549 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1550 cmd_out, cmd_out_len);
1551}
1552
1553static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
1554 struct uverbs_attr_bundle *attrs)
1555{
1556 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
1557 int cmd_out_len = uverbs_attr_get_len(attrs,
1558 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
1559 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1560 MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
1561 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1562 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1563 void *cmd_out;
1564 int err;
1565 int uid;
1566 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1567
1568 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1569 return -EINVAL;
1570
1571 uid = devx_get_uid(c, cmd_in);
1572 if (uid < 0)
1573 return uid;
1574
1575 if (!devx_is_obj_query_cmd(cmd_in))
1576 return -EINVAL;
1577
1578 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1579 return -EINVAL;
1580
1581 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1582 if (IS_ERR(cmd_out))
1583 return PTR_ERR(cmd_out);
1584
1585 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1586 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1587 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
1588 cmd_out, cmd_out_len);
1589 if (err)
1590 return err;
1591
1592 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1593 cmd_out, cmd_out_len);
1594}
1595
1596struct devx_async_event_queue {
1597 spinlock_t lock;
1598 wait_queue_head_t poll_wait;
1599 struct list_head event_list;
1600 atomic_t bytes_in_use;
1601 u8 is_destroyed:1;
1602};
1603
1604struct devx_async_cmd_event_file {
1605 struct ib_uobject uobj;
1606 struct devx_async_event_queue ev_queue;
1607 struct mlx5_async_ctx async_ctx;
1608};
1609
1610static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
1611{
1612 spin_lock_init(&ev_queue->lock);
1613 INIT_LIST_HEAD(&ev_queue->event_list);
1614 init_waitqueue_head(&ev_queue->poll_wait);
1615 atomic_set(&ev_queue->bytes_in_use, 0);
1616 ev_queue->is_destroyed = 0;
1617}
1618
1619static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
1620 struct uverbs_attr_bundle *attrs)
1621{
1622 struct devx_async_cmd_event_file *ev_file;
1623
1624 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1625 attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
1626 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
1627
1628 ev_file = container_of(uobj, struct devx_async_cmd_event_file,
1629 uobj);
1630 devx_init_event_queue(&ev_file->ev_queue);
1631 mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
1632 return 0;
1633}
1634
1635static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)(
1636 struct uverbs_attr_bundle *attrs)
1637{
1638 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1639 attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE);
1640 struct devx_async_event_file *ev_file;
1641 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1642 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1643 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1644 u32 flags;
1645 int err;
1646
1647 err = uverbs_get_flags32(&flags, attrs,
1648 MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
1649 MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA);
1650
1651 if (err)
1652 return err;
1653
1654 ev_file = container_of(uobj, struct devx_async_event_file,
1655 uobj);
1656 spin_lock_init(&ev_file->lock);
1657 INIT_LIST_HEAD(&ev_file->event_list);
1658 init_waitqueue_head(&ev_file->poll_wait);
1659 if (flags & MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA)
1660 ev_file->omit_data = 1;
1661 INIT_LIST_HEAD(&ev_file->subscribed_events_list);
1662 ev_file->dev = dev;
1663 get_device(&dev->ib_dev.dev);
1664 return 0;
1665}
1666
1667static void devx_query_callback(int status, struct mlx5_async_work *context)
1668{
1669 struct devx_async_data *async_data =
1670 container_of(context, struct devx_async_data, cb_work);
1671 struct ib_uobject *fd_uobj = async_data->fd_uobj;
1672 struct devx_async_cmd_event_file *ev_file;
1673 struct devx_async_event_queue *ev_queue;
1674 unsigned long flags;
1675
1676 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1677 uobj);
1678 ev_queue = &ev_file->ev_queue;
1679
1680 spin_lock_irqsave(&ev_queue->lock, flags);
1681 list_add_tail(&async_data->list, &ev_queue->event_list);
1682 spin_unlock_irqrestore(&ev_queue->lock, flags);
1683
1684 wake_up_interruptible(&ev_queue->poll_wait);
1685 fput(fd_uobj->object);
1686}
1687
1688#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
1689
1690static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
1691 struct uverbs_attr_bundle *attrs)
1692{
1693 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
1694 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
1695 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1696 attrs,
1697 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
1698 u16 cmd_out_len;
1699 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1700 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1701 struct ib_uobject *fd_uobj;
1702 int err;
1703 int uid;
1704 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1705 struct devx_async_cmd_event_file *ev_file;
1706 struct devx_async_data *async_data;
1707
1708 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1709 return -EINVAL;
1710
1711 uid = devx_get_uid(c, cmd_in);
1712 if (uid < 0)
1713 return uid;
1714
1715 if (!devx_is_obj_query_cmd(cmd_in))
1716 return -EINVAL;
1717
1718 err = uverbs_get_const(&cmd_out_len, attrs,
1719 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
1720 if (err)
1721 return err;
1722
1723 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1724 return -EINVAL;
1725
1726 fd_uobj = uverbs_attr_get_uobject(attrs,
1727 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
1728 if (IS_ERR(fd_uobj))
1729 return PTR_ERR(fd_uobj);
1730
1731 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1732 uobj);
1733
1734 if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
1735 MAX_ASYNC_BYTES_IN_USE) {
1736 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1737 return -EAGAIN;
1738 }
1739
1740 async_data = kvzalloc(struct_size(async_data, hdr.out_data,
1741 cmd_out_len), GFP_KERNEL);
1742 if (!async_data) {
1743 err = -ENOMEM;
1744 goto sub_bytes;
1745 }
1746
1747 err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
1748 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
1749 if (err)
1750 goto free_async;
1751
1752 async_data->cmd_out_len = cmd_out_len;
1753 async_data->mdev = mdev;
1754 async_data->fd_uobj = fd_uobj;
1755
1756 get_file(fd_uobj->object);
1757 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1758 err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
1759 uverbs_attr_get_len(attrs,
1760 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
1761 async_data->hdr.out_data,
1762 async_data->cmd_out_len,
1763 devx_query_callback, &async_data->cb_work);
1764
1765 if (err)
1766 goto cb_err;
1767
1768 return 0;
1769
1770cb_err:
1771 fput(fd_uobj->object);
1772free_async:
1773 kvfree(async_data);
1774sub_bytes:
1775 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1776 return err;
1777}
1778
1779static void
1780subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table,
1781 u32 key_level1,
1782 bool is_level2,
1783 u32 key_level2)
1784{
1785 struct devx_event *event;
1786 struct devx_obj_event *xa_val_level2;
1787
1788 /* Level 1 is valid for future use, no need to free */
1789 if (!is_level2)
1790 return;
1791
1792 event = xa_load(&devx_event_table->event_xa, key_level1);
1793 WARN_ON(!event);
1794
1795 xa_val_level2 = xa_load(&event->object_ids,
1796 key_level2);
1797 if (list_empty(&xa_val_level2->obj_sub_list)) {
1798 xa_erase(&event->object_ids,
1799 key_level2);
1800 kfree_rcu(xa_val_level2, rcu);
1801 }
1802}
1803
1804static int
1805subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
1806 u32 key_level1,
1807 bool is_level2,
1808 u32 key_level2)
1809{
1810 struct devx_obj_event *obj_event;
1811 struct devx_event *event;
1812 int err;
1813
1814 event = xa_load(&devx_event_table->event_xa, key_level1);
1815 if (!event) {
1816 event = kzalloc(sizeof(*event), GFP_KERNEL);
1817 if (!event)
1818 return -ENOMEM;
1819
1820 INIT_LIST_HEAD(&event->unaffiliated_list);
1821 xa_init(&event->object_ids);
1822
1823 err = xa_insert(&devx_event_table->event_xa,
1824 key_level1,
1825 event,
1826 GFP_KERNEL);
1827 if (err) {
1828 kfree(event);
1829 return err;
1830 }
1831 }
1832
1833 if (!is_level2)
1834 return 0;
1835
1836 obj_event = xa_load(&event->object_ids, key_level2);
1837 if (!obj_event) {
1838 obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL);
1839 if (!obj_event)
1840 /* Level1 is valid for future use, no need to free */
1841 return -ENOMEM;
1842
1843 err = xa_insert(&event->object_ids,
1844 key_level2,
1845 obj_event,
1846 GFP_KERNEL);
1847 if (err) {
1848 kfree(obj_event);
1849 return err;
1850 }
1851 INIT_LIST_HEAD(&obj_event->obj_sub_list);
1852 }
1853
1854 return 0;
1855}
1856
1857static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list,
1858 struct devx_obj *obj)
1859{
1860 int i;
1861
1862 for (i = 0; i < num_events; i++) {
1863 if (obj) {
1864 if (!is_legacy_obj_event_num(event_type_num_list[i]))
1865 return false;
1866 } else if (!is_legacy_unaffiliated_event_num(
1867 event_type_num_list[i])) {
1868 return false;
1869 }
1870 }
1871
1872 return true;
1873}
1874
1875#define MAX_SUPP_EVENT_NUM 255
1876static bool is_valid_events(struct mlx5_core_dev *dev,
1877 int num_events, u16 *event_type_num_list,
1878 struct devx_obj *obj)
1879{
1880 __be64 *aff_events;
1881 __be64 *unaff_events;
1882 int mask_entry;
1883 int mask_bit;
1884 int i;
1885
1886 if (MLX5_CAP_GEN(dev, event_cap)) {
1887 aff_events = MLX5_CAP_DEV_EVENT(dev,
1888 user_affiliated_events);
1889 unaff_events = MLX5_CAP_DEV_EVENT(dev,
1890 user_unaffiliated_events);
1891 } else {
1892 return is_valid_events_legacy(num_events, event_type_num_list,
1893 obj);
1894 }
1895
1896 for (i = 0; i < num_events; i++) {
1897 if (event_type_num_list[i] > MAX_SUPP_EVENT_NUM)
1898 return false;
1899
1900 mask_entry = event_type_num_list[i] / 64;
1901 mask_bit = event_type_num_list[i] % 64;
1902
1903 if (obj) {
1904 /* CQ completion */
1905 if (event_type_num_list[i] == 0)
1906 continue;
1907
1908 if (!(be64_to_cpu(aff_events[mask_entry]) &
1909 (1ull << mask_bit)))
1910 return false;
1911
1912 continue;
1913 }
1914
1915 if (!(be64_to_cpu(unaff_events[mask_entry]) &
1916 (1ull << mask_bit)))
1917 return false;
1918 }
1919
1920 return true;
1921}
1922
1923#define MAX_NUM_EVENTS 16
1924static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
1925 struct uverbs_attr_bundle *attrs)
1926{
1927 struct ib_uobject *devx_uobj = uverbs_attr_get_uobject(
1928 attrs,
1929 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE);
1930 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1931 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1932 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1933 struct ib_uobject *fd_uobj;
1934 struct devx_obj *obj = NULL;
1935 struct devx_async_event_file *ev_file;
1936 struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table;
1937 u16 *event_type_num_list;
1938 struct devx_event_subscription *event_sub, *tmp_sub;
1939 struct list_head sub_list;
1940 int redirect_fd;
1941 bool use_eventfd = false;
1942 int num_events;
1943 int num_alloc_xa_entries = 0;
1944 u16 obj_type = 0;
1945 u64 cookie = 0;
1946 u32 obj_id = 0;
1947 int err;
1948 int i;
1949
1950 if (!c->devx_uid)
1951 return -EINVAL;
1952
1953 if (!IS_ERR(devx_uobj)) {
1954 obj = (struct devx_obj *)devx_uobj->object;
1955 if (obj)
1956 obj_id = get_dec_obj_id(obj->obj_id);
1957 }
1958
1959 fd_uobj = uverbs_attr_get_uobject(attrs,
1960 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE);
1961 if (IS_ERR(fd_uobj))
1962 return PTR_ERR(fd_uobj);
1963
1964 ev_file = container_of(fd_uobj, struct devx_async_event_file,
1965 uobj);
1966
1967 if (uverbs_attr_is_valid(attrs,
1968 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM)) {
1969 err = uverbs_copy_from(&redirect_fd, attrs,
1970 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM);
1971 if (err)
1972 return err;
1973
1974 use_eventfd = true;
1975 }
1976
1977 if (uverbs_attr_is_valid(attrs,
1978 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE)) {
1979 if (use_eventfd)
1980 return -EINVAL;
1981
1982 err = uverbs_copy_from(&cookie, attrs,
1983 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE);
1984 if (err)
1985 return err;
1986 }
1987
1988 num_events = uverbs_attr_ptr_get_array_size(
1989 attrs, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
1990 sizeof(u16));
1991
1992 if (num_events < 0)
1993 return num_events;
1994
1995 if (num_events > MAX_NUM_EVENTS)
1996 return -EINVAL;
1997
1998 event_type_num_list = uverbs_attr_get_alloced_ptr(attrs,
1999 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST);
2000
2001 if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj))
2002 return -EINVAL;
2003
2004 INIT_LIST_HEAD(&sub_list);
2005
2006 /* Protect from concurrent subscriptions to same XA entries to allow
2007 * both to succeed
2008 */
2009 mutex_lock(&devx_event_table->event_xa_lock);
2010 for (i = 0; i < num_events; i++) {
2011 u32 key_level1;
2012
2013 if (obj)
2014 obj_type = get_dec_obj_type(obj,
2015 event_type_num_list[i]);
2016 key_level1 = event_type_num_list[i] | obj_type << 16;
2017
2018 err = subscribe_event_xa_alloc(devx_event_table,
2019 key_level1,
2020 obj,
2021 obj_id);
2022 if (err)
2023 goto err;
2024
2025 num_alloc_xa_entries++;
2026 event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
2027 if (!event_sub) {
2028 err = -ENOMEM;
2029 goto err;
2030 }
2031
2032 list_add_tail(&event_sub->event_list, &sub_list);
2033 if (use_eventfd) {
2034 event_sub->eventfd =
2035 eventfd_ctx_fdget(redirect_fd);
2036
2037 if (IS_ERR(event_sub->eventfd)) {
2038 err = PTR_ERR(event_sub->eventfd);
2039 event_sub->eventfd = NULL;
2040 goto err;
2041 }
2042 }
2043
2044 event_sub->cookie = cookie;
2045 event_sub->ev_file = ev_file;
2046 event_sub->filp = fd_uobj->object;
2047 /* May be needed upon cleanup the devx object/subscription */
2048 event_sub->xa_key_level1 = key_level1;
2049 event_sub->xa_key_level2 = obj_id;
2050 INIT_LIST_HEAD(&event_sub->obj_list);
2051 }
2052
2053 /* Once all the allocations and the XA data insertions were done we
2054 * can go ahead and add all the subscriptions to the relevant lists
2055 * without concern of a failure.
2056 */
2057 list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2058 struct devx_event *event;
2059 struct devx_obj_event *obj_event;
2060
2061 list_del_init(&event_sub->event_list);
2062
2063 spin_lock_irq(&ev_file->lock);
2064 list_add_tail_rcu(&event_sub->file_list,
2065 &ev_file->subscribed_events_list);
2066 spin_unlock_irq(&ev_file->lock);
2067
2068 event = xa_load(&devx_event_table->event_xa,
2069 event_sub->xa_key_level1);
2070 WARN_ON(!event);
2071
2072 if (!obj) {
2073 list_add_tail_rcu(&event_sub->xa_list,
2074 &event->unaffiliated_list);
2075 continue;
2076 }
2077
2078 obj_event = xa_load(&event->object_ids, obj_id);
2079 WARN_ON(!obj_event);
2080 list_add_tail_rcu(&event_sub->xa_list,
2081 &obj_event->obj_sub_list);
2082 list_add_tail_rcu(&event_sub->obj_list,
2083 &obj->event_sub);
2084 }
2085
2086 mutex_unlock(&devx_event_table->event_xa_lock);
2087 return 0;
2088
2089err:
2090 list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2091 list_del(&event_sub->event_list);
2092
2093 subscribe_event_xa_dealloc(devx_event_table,
2094 event_sub->xa_key_level1,
2095 obj,
2096 obj_id);
2097
2098 if (event_sub->eventfd)
2099 eventfd_ctx_put(event_sub->eventfd);
2100
2101 kfree(event_sub);
2102 }
2103
2104 mutex_unlock(&devx_event_table->event_xa_lock);
2105 return err;
2106}
2107
2108static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
2109 struct uverbs_attr_bundle *attrs,
2110 struct devx_umem *obj)
2111{
2112 u64 addr;
2113 size_t size;
2114 u32 access;
2115 int npages;
2116 int err;
2117 u32 page_mask;
2118
2119 if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
2120 uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
2121 return -EFAULT;
2122
2123 err = uverbs_get_flags32(&access, attrs,
2124 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2125 IB_ACCESS_LOCAL_WRITE |
2126 IB_ACCESS_REMOTE_WRITE |
2127 IB_ACCESS_REMOTE_READ);
2128 if (err)
2129 return err;
2130
2131 err = ib_check_mr_access(access);
2132 if (err)
2133 return err;
2134
2135 obj->umem = ib_umem_get(&attrs->driver_udata, addr, size, access, 0);
2136 if (IS_ERR(obj->umem))
2137 return PTR_ERR(obj->umem);
2138
2139 mlx5_ib_cont_pages(obj->umem, obj->umem->address,
2140 MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
2141 &obj->page_shift, &obj->ncont, NULL);
2142
2143 if (!npages) {
2144 ib_umem_release(obj->umem);
2145 return -EINVAL;
2146 }
2147
2148 page_mask = (1 << obj->page_shift) - 1;
2149 obj->page_offset = obj->umem->address & page_mask;
2150
2151 return 0;
2152}
2153
2154static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs,
2155 struct devx_umem *obj,
2156 struct devx_umem_reg_cmd *cmd)
2157{
2158 cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
2159 (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
2160 cmd->in = uverbs_zalloc(attrs, cmd->inlen);
2161 return PTR_ERR_OR_ZERO(cmd->in);
2162}
2163
2164static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
2165 struct devx_umem *obj,
2166 struct devx_umem_reg_cmd *cmd)
2167{
2168 void *umem;
2169 __be64 *mtt;
2170
2171 umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
2172 mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
2173
2174 MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
2175 MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
2176 MLX5_SET(umem, umem, log_page_size, obj->page_shift -
2177 MLX5_ADAPTER_PAGE_SHIFT);
2178 MLX5_SET(umem, umem, page_offset, obj->page_offset);
2179 mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
2180 (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
2181 MLX5_IB_MTT_READ);
2182}
2183
2184static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
2185 struct uverbs_attr_bundle *attrs)
2186{
2187 struct devx_umem_reg_cmd cmd;
2188 struct devx_umem *obj;
2189 struct ib_uobject *uobj = uverbs_attr_get_uobject(
2190 attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2191 u32 obj_id;
2192 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
2193 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2194 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
2195 int err;
2196
2197 if (!c->devx_uid)
2198 return -EINVAL;
2199
2200 obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
2201 if (!obj)
2202 return -ENOMEM;
2203
2204 err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
2205 if (err)
2206 goto err_obj_free;
2207
2208 err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd);
2209 if (err)
2210 goto err_umem_release;
2211
2212 devx_umem_reg_cmd_build(dev, obj, &cmd);
2213
2214 MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
2215 err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
2216 sizeof(cmd.out));
2217 if (err)
2218 goto err_umem_release;
2219
2220 obj->mdev = dev->mdev;
2221 uobj->object = obj;
2222 devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
2223 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
2224 if (err)
2225 goto err_umem_destroy;
2226
2227 return 0;
2228
2229err_umem_destroy:
2230 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
2231err_umem_release:
2232 ib_umem_release(obj->umem);
2233err_obj_free:
2234 kfree(obj);
2235 return err;
2236}
2237
2238static int devx_umem_cleanup(struct ib_uobject *uobject,
2239 enum rdma_remove_reason why,
2240 struct uverbs_attr_bundle *attrs)
2241{
2242 struct devx_umem *obj = uobject->object;
2243 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
2244 int err;
2245
2246 err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
2247 if (ib_is_destroy_retryable(err, why, uobject))
2248 return err;
2249
2250 ib_umem_release(obj->umem);
2251 kfree(obj);
2252 return 0;
2253}
2254
2255static bool is_unaffiliated_event(struct mlx5_core_dev *dev,
2256 unsigned long event_type)
2257{
2258 __be64 *unaff_events;
2259 int mask_entry;
2260 int mask_bit;
2261
2262 if (!MLX5_CAP_GEN(dev, event_cap))
2263 return is_legacy_unaffiliated_event_num(event_type);
2264
2265 unaff_events = MLX5_CAP_DEV_EVENT(dev,
2266 user_unaffiliated_events);
2267 WARN_ON(event_type > MAX_SUPP_EVENT_NUM);
2268
2269 mask_entry = event_type / 64;
2270 mask_bit = event_type % 64;
2271
2272 if (!(be64_to_cpu(unaff_events[mask_entry]) & (1ull << mask_bit)))
2273 return false;
2274
2275 return true;
2276}
2277
2278static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
2279{
2280 struct mlx5_eqe *eqe = data;
2281 u32 obj_id = 0;
2282
2283 switch (event_type) {
2284 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
2285 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
2286 case MLX5_EVENT_TYPE_PATH_MIG:
2287 case MLX5_EVENT_TYPE_COMM_EST:
2288 case MLX5_EVENT_TYPE_SQ_DRAINED:
2289 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
2290 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
2291 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
2292 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
2293 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
2294 obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
2295 break;
2296 case MLX5_EVENT_TYPE_XRQ_ERROR:
2297 obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff;
2298 break;
2299 case MLX5_EVENT_TYPE_DCT_DRAINED:
2300 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
2301 obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
2302 break;
2303 case MLX5_EVENT_TYPE_CQ_ERROR:
2304 obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
2305 break;
2306 default:
2307 obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id);
2308 break;
2309 }
2310
2311 return obj_id;
2312}
2313
2314static int deliver_event(struct devx_event_subscription *event_sub,
2315 const void *data)
2316{
2317 struct devx_async_event_file *ev_file;
2318 struct devx_async_event_data *event_data;
2319 unsigned long flags;
2320
2321 ev_file = event_sub->ev_file;
2322
2323 if (ev_file->omit_data) {
2324 spin_lock_irqsave(&ev_file->lock, flags);
2325 if (!list_empty(&event_sub->event_list)) {
2326 spin_unlock_irqrestore(&ev_file->lock, flags);
2327 return 0;
2328 }
2329
2330 list_add_tail(&event_sub->event_list, &ev_file->event_list);
2331 spin_unlock_irqrestore(&ev_file->lock, flags);
2332 wake_up_interruptible(&ev_file->poll_wait);
2333 return 0;
2334 }
2335
2336 event_data = kzalloc(sizeof(*event_data) + sizeof(struct mlx5_eqe),
2337 GFP_ATOMIC);
2338 if (!event_data) {
2339 spin_lock_irqsave(&ev_file->lock, flags);
2340 ev_file->is_overflow_err = 1;
2341 spin_unlock_irqrestore(&ev_file->lock, flags);
2342 return -ENOMEM;
2343 }
2344
2345 event_data->hdr.cookie = event_sub->cookie;
2346 memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));
2347
2348 spin_lock_irqsave(&ev_file->lock, flags);
2349 list_add_tail(&event_data->list, &ev_file->event_list);
2350 spin_unlock_irqrestore(&ev_file->lock, flags);
2351 wake_up_interruptible(&ev_file->poll_wait);
2352
2353 return 0;
2354}
2355
2356static void dispatch_event_fd(struct list_head *fd_list,
2357 const void *data)
2358{
2359 struct devx_event_subscription *item;
2360
2361 list_for_each_entry_rcu(item, fd_list, xa_list) {
2362 if (!get_file_rcu(item->filp))
2363 continue;
2364
2365 if (item->eventfd) {
2366 eventfd_signal(item->eventfd, 1);
2367 fput(item->filp);
2368 continue;
2369 }
2370
2371 deliver_event(item, data);
2372 fput(item->filp);
2373 }
2374}
2375
2376static int devx_event_notifier(struct notifier_block *nb,
2377 unsigned long event_type, void *data)
2378{
2379 struct mlx5_devx_event_table *table;
2380 struct mlx5_ib_dev *dev;
2381 struct devx_event *event;
2382 struct devx_obj_event *obj_event;
2383 u16 obj_type = 0;
2384 bool is_unaffiliated;
2385 u32 obj_id;
2386
2387 /* Explicit filtering to kernel events which may occur frequently */
2388 if (event_type == MLX5_EVENT_TYPE_CMD ||
2389 event_type == MLX5_EVENT_TYPE_PAGE_REQUEST)
2390 return NOTIFY_OK;
2391
2392 table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb);
2393 dev = container_of(table, struct mlx5_ib_dev, devx_event_table);
2394 is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type);
2395
2396 if (!is_unaffiliated)
2397 obj_type = get_event_obj_type(event_type, data);
2398
2399 rcu_read_lock();
2400 event = xa_load(&table->event_xa, event_type | (obj_type << 16));
2401 if (!event) {
2402 rcu_read_unlock();
2403 return NOTIFY_DONE;
2404 }
2405
2406 if (is_unaffiliated) {
2407 dispatch_event_fd(&event->unaffiliated_list, data);
2408 rcu_read_unlock();
2409 return NOTIFY_OK;
2410 }
2411
2412 obj_id = devx_get_obj_id_from_event(event_type, data);
2413 obj_event = xa_load(&event->object_ids, obj_id);
2414 if (!obj_event) {
2415 rcu_read_unlock();
2416 return NOTIFY_DONE;
2417 }
2418
2419 dispatch_event_fd(&obj_event->obj_sub_list, data);
2420
2421 rcu_read_unlock();
2422 return NOTIFY_OK;
2423}
2424
2425void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev)
2426{
2427 struct mlx5_devx_event_table *table = &dev->devx_event_table;
2428
2429 xa_init(&table->event_xa);
2430 mutex_init(&table->event_xa_lock);
2431 MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
2432 mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
2433}
2434
2435void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev)
2436{
2437 struct mlx5_devx_event_table *table = &dev->devx_event_table;
2438 struct devx_event_subscription *sub, *tmp;
2439 struct devx_event *event;
2440 void *entry;
2441 unsigned long id;
2442
2443 mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
2444 mutex_lock(&dev->devx_event_table.event_xa_lock);
2445 xa_for_each(&table->event_xa, id, entry) {
2446 event = entry;
2447 list_for_each_entry_safe(sub, tmp, &event->unaffiliated_list,
2448 xa_list)
2449 devx_cleanup_subscription(dev, sub);
2450 kfree(entry);
2451 }
2452 mutex_unlock(&dev->devx_event_table.event_xa_lock);
2453 xa_destroy(&table->event_xa);
2454}
2455
2456static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
2457 size_t count, loff_t *pos)
2458{
2459 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2460 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2461 struct devx_async_data *event;
2462 int ret = 0;
2463 size_t eventsz;
2464
2465 spin_lock_irq(&ev_queue->lock);
2466
2467 while (list_empty(&ev_queue->event_list)) {
2468 spin_unlock_irq(&ev_queue->lock);
2469
2470 if (filp->f_flags & O_NONBLOCK)
2471 return -EAGAIN;
2472
2473 if (wait_event_interruptible(
2474 ev_queue->poll_wait,
2475 (!list_empty(&ev_queue->event_list) ||
2476 ev_queue->is_destroyed))) {
2477 return -ERESTARTSYS;
2478 }
2479
2480 if (list_empty(&ev_queue->event_list) &&
2481 ev_queue->is_destroyed)
2482 return -EIO;
2483
2484 spin_lock_irq(&ev_queue->lock);
2485 }
2486
2487 event = list_entry(ev_queue->event_list.next,
2488 struct devx_async_data, list);
2489 eventsz = event->cmd_out_len +
2490 sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
2491
2492 if (eventsz > count) {
2493 spin_unlock_irq(&ev_queue->lock);
2494 return -ENOSPC;
2495 }
2496
2497 list_del(ev_queue->event_list.next);
2498 spin_unlock_irq(&ev_queue->lock);
2499
2500 if (copy_to_user(buf, &event->hdr, eventsz))
2501 ret = -EFAULT;
2502 else
2503 ret = eventsz;
2504
2505 atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
2506 kvfree(event);
2507 return ret;
2508}
2509
2510static int devx_async_cmd_event_close(struct inode *inode, struct file *filp)
2511{
2512 struct ib_uobject *uobj = filp->private_data;
2513 struct devx_async_cmd_event_file *comp_ev_file = container_of(
2514 uobj, struct devx_async_cmd_event_file, uobj);
2515 struct devx_async_data *entry, *tmp;
2516
2517 spin_lock_irq(&comp_ev_file->ev_queue.lock);
2518 list_for_each_entry_safe(entry, tmp,
2519 &comp_ev_file->ev_queue.event_list, list)
2520 kvfree(entry);
2521 spin_unlock_irq(&comp_ev_file->ev_queue.lock);
2522
2523 uverbs_close_fd(filp);
2524 return 0;
2525}
2526
2527static __poll_t devx_async_cmd_event_poll(struct file *filp,
2528 struct poll_table_struct *wait)
2529{
2530 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2531 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2532 __poll_t pollflags = 0;
2533
2534 poll_wait(filp, &ev_queue->poll_wait, wait);
2535
2536 spin_lock_irq(&ev_queue->lock);
2537 if (ev_queue->is_destroyed)
2538 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2539 else if (!list_empty(&ev_queue->event_list))
2540 pollflags = EPOLLIN | EPOLLRDNORM;
2541 spin_unlock_irq(&ev_queue->lock);
2542
2543 return pollflags;
2544}
2545
2546static const struct file_operations devx_async_cmd_event_fops = {
2547 .owner = THIS_MODULE,
2548 .read = devx_async_cmd_event_read,
2549 .poll = devx_async_cmd_event_poll,
2550 .release = devx_async_cmd_event_close,
2551 .llseek = no_llseek,
2552};
2553
2554static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
2555 size_t count, loff_t *pos)
2556{
2557 struct devx_async_event_file *ev_file = filp->private_data;
2558 struct devx_event_subscription *event_sub;
2559 struct devx_async_event_data *event;
2560 int ret = 0;
2561 size_t eventsz;
2562 bool omit_data;
2563 void *event_data;
2564
2565 omit_data = ev_file->omit_data;
2566
2567 spin_lock_irq(&ev_file->lock);
2568
2569 if (ev_file->is_overflow_err) {
2570 ev_file->is_overflow_err = 0;
2571 spin_unlock_irq(&ev_file->lock);
2572 return -EOVERFLOW;
2573 }
2574
2575 if (ev_file->is_destroyed) {
2576 spin_unlock_irq(&ev_file->lock);
2577 return -EIO;
2578 }
2579
2580 while (list_empty(&ev_file->event_list)) {
2581 spin_unlock_irq(&ev_file->lock);
2582
2583 if (filp->f_flags & O_NONBLOCK)
2584 return -EAGAIN;
2585
2586 if (wait_event_interruptible(ev_file->poll_wait,
2587 (!list_empty(&ev_file->event_list) ||
2588 ev_file->is_destroyed))) {
2589 return -ERESTARTSYS;
2590 }
2591
2592 spin_lock_irq(&ev_file->lock);
2593 if (ev_file->is_destroyed) {
2594 spin_unlock_irq(&ev_file->lock);
2595 return -EIO;
2596 }
2597 }
2598
2599 if (omit_data) {
2600 event_sub = list_first_entry(&ev_file->event_list,
2601 struct devx_event_subscription,
2602 event_list);
2603 eventsz = sizeof(event_sub->cookie);
2604 event_data = &event_sub->cookie;
2605 } else {
2606 event = list_first_entry(&ev_file->event_list,
2607 struct devx_async_event_data, list);
2608 eventsz = sizeof(struct mlx5_eqe) +
2609 sizeof(struct mlx5_ib_uapi_devx_async_event_hdr);
2610 event_data = &event->hdr;
2611 }
2612
2613 if (eventsz > count) {
2614 spin_unlock_irq(&ev_file->lock);
2615 return -EINVAL;
2616 }
2617
2618 if (omit_data)
2619 list_del_init(&event_sub->event_list);
2620 else
2621 list_del(&event->list);
2622
2623 spin_unlock_irq(&ev_file->lock);
2624
2625 if (copy_to_user(buf, event_data, eventsz))
2626 /* This points to an application issue, not a kernel concern */
2627 ret = -EFAULT;
2628 else
2629 ret = eventsz;
2630
2631 if (!omit_data)
2632 kfree(event);
2633 return ret;
2634}
2635
2636static __poll_t devx_async_event_poll(struct file *filp,
2637 struct poll_table_struct *wait)
2638{
2639 struct devx_async_event_file *ev_file = filp->private_data;
2640 __poll_t pollflags = 0;
2641
2642 poll_wait(filp, &ev_file->poll_wait, wait);
2643
2644 spin_lock_irq(&ev_file->lock);
2645 if (ev_file->is_destroyed)
2646 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2647 else if (!list_empty(&ev_file->event_list))
2648 pollflags = EPOLLIN | EPOLLRDNORM;
2649 spin_unlock_irq(&ev_file->lock);
2650
2651 return pollflags;
2652}
2653
2654static int devx_async_event_close(struct inode *inode, struct file *filp)
2655{
2656 struct devx_async_event_file *ev_file = filp->private_data;
2657 struct devx_event_subscription *event_sub, *event_sub_tmp;
2658 struct devx_async_event_data *entry, *tmp;
2659 struct mlx5_ib_dev *dev = ev_file->dev;
2660
2661 mutex_lock(&dev->devx_event_table.event_xa_lock);
2662 /* delete the subscriptions which are related to this FD */
2663 list_for_each_entry_safe(event_sub, event_sub_tmp,
2664 &ev_file->subscribed_events_list, file_list) {
2665 devx_cleanup_subscription(dev, event_sub);
2666 if (event_sub->eventfd)
2667 eventfd_ctx_put(event_sub->eventfd);
2668
2669 list_del_rcu(&event_sub->file_list);
2670 /* subscription may not be used by the read API any more */
2671 kfree_rcu(event_sub, rcu);
2672 }
2673
2674 mutex_unlock(&dev->devx_event_table.event_xa_lock);
2675
2676 /* free the pending events allocation */
2677 if (!ev_file->omit_data) {
2678 spin_lock_irq(&ev_file->lock);
2679 list_for_each_entry_safe(entry, tmp,
2680 &ev_file->event_list, list)
2681 kfree(entry); /* read can't come any more */
2682 spin_unlock_irq(&ev_file->lock);
2683 }
2684
2685 uverbs_close_fd(filp);
2686 put_device(&dev->ib_dev.dev);
2687 return 0;
2688}
2689
2690static const struct file_operations devx_async_event_fops = {
2691 .owner = THIS_MODULE,
2692 .read = devx_async_event_read,
2693 .poll = devx_async_event_poll,
2694 .release = devx_async_event_close,
2695 .llseek = no_llseek,
2696};
2697
2698static int devx_hot_unplug_async_cmd_event_file(struct ib_uobject *uobj,
2699 enum rdma_remove_reason why)
2700{
2701 struct devx_async_cmd_event_file *comp_ev_file =
2702 container_of(uobj, struct devx_async_cmd_event_file,
2703 uobj);
2704 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2705
2706 spin_lock_irq(&ev_queue->lock);
2707 ev_queue->is_destroyed = 1;
2708 spin_unlock_irq(&ev_queue->lock);
2709
2710 if (why == RDMA_REMOVE_DRIVER_REMOVE)
2711 wake_up_interruptible(&ev_queue->poll_wait);
2712
2713 mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
2714 return 0;
2715};
2716
2717static int devx_hot_unplug_async_event_file(struct ib_uobject *uobj,
2718 enum rdma_remove_reason why)
2719{
2720 struct devx_async_event_file *ev_file =
2721 container_of(uobj, struct devx_async_event_file,
2722 uobj);
2723
2724 spin_lock_irq(&ev_file->lock);
2725 ev_file->is_destroyed = 1;
2726 spin_unlock_irq(&ev_file->lock);
2727
2728 wake_up_interruptible(&ev_file->poll_wait);
2729 return 0;
2730};
2731
2732DECLARE_UVERBS_NAMED_METHOD(
2733 MLX5_IB_METHOD_DEVX_UMEM_REG,
2734 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
2735 MLX5_IB_OBJECT_DEVX_UMEM,
2736 UVERBS_ACCESS_NEW,
2737 UA_MANDATORY),
2738 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
2739 UVERBS_ATTR_TYPE(u64),
2740 UA_MANDATORY),
2741 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
2742 UVERBS_ATTR_TYPE(u64),
2743 UA_MANDATORY),
2744 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2745 enum ib_access_flags),
2746 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
2747 UVERBS_ATTR_TYPE(u32),
2748 UA_MANDATORY));
2749
2750DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2751 MLX5_IB_METHOD_DEVX_UMEM_DEREG,
2752 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
2753 MLX5_IB_OBJECT_DEVX_UMEM,
2754 UVERBS_ACCESS_DESTROY,
2755 UA_MANDATORY));
2756
2757DECLARE_UVERBS_NAMED_METHOD(
2758 MLX5_IB_METHOD_DEVX_QUERY_EQN,
2759 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
2760 UVERBS_ATTR_TYPE(u32),
2761 UA_MANDATORY),
2762 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
2763 UVERBS_ATTR_TYPE(u32),
2764 UA_MANDATORY));
2765
2766DECLARE_UVERBS_NAMED_METHOD(
2767 MLX5_IB_METHOD_DEVX_QUERY_UAR,
2768 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
2769 UVERBS_ATTR_TYPE(u32),
2770 UA_MANDATORY),
2771 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
2772 UVERBS_ATTR_TYPE(u32),
2773 UA_MANDATORY));
2774
2775DECLARE_UVERBS_NAMED_METHOD(
2776 MLX5_IB_METHOD_DEVX_OTHER,
2777 UVERBS_ATTR_PTR_IN(
2778 MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
2779 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2780 UA_MANDATORY,
2781 UA_ALLOC_AND_COPY),
2782 UVERBS_ATTR_PTR_OUT(
2783 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
2784 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2785 UA_MANDATORY));
2786
2787DECLARE_UVERBS_NAMED_METHOD(
2788 MLX5_IB_METHOD_DEVX_OBJ_CREATE,
2789 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
2790 MLX5_IB_OBJECT_DEVX_OBJ,
2791 UVERBS_ACCESS_NEW,
2792 UA_MANDATORY),
2793 UVERBS_ATTR_PTR_IN(
2794 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
2795 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2796 UA_MANDATORY,
2797 UA_ALLOC_AND_COPY),
2798 UVERBS_ATTR_PTR_OUT(
2799 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
2800 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2801 UA_MANDATORY));
2802
2803DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2804 MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
2805 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
2806 MLX5_IB_OBJECT_DEVX_OBJ,
2807 UVERBS_ACCESS_DESTROY,
2808 UA_MANDATORY));
2809
2810DECLARE_UVERBS_NAMED_METHOD(
2811 MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
2812 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
2813 UVERBS_IDR_ANY_OBJECT,
2814 UVERBS_ACCESS_READ,
2815 UA_MANDATORY),
2816 UVERBS_ATTR_PTR_IN(
2817 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
2818 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2819 UA_MANDATORY,
2820 UA_ALLOC_AND_COPY),
2821 UVERBS_ATTR_PTR_OUT(
2822 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
2823 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2824 UA_MANDATORY));
2825
2826DECLARE_UVERBS_NAMED_METHOD(
2827 MLX5_IB_METHOD_DEVX_OBJ_QUERY,
2828 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2829 UVERBS_IDR_ANY_OBJECT,
2830 UVERBS_ACCESS_READ,
2831 UA_MANDATORY),
2832 UVERBS_ATTR_PTR_IN(
2833 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2834 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2835 UA_MANDATORY,
2836 UA_ALLOC_AND_COPY),
2837 UVERBS_ATTR_PTR_OUT(
2838 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
2839 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2840 UA_MANDATORY));
2841
2842DECLARE_UVERBS_NAMED_METHOD(
2843 MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
2844 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2845 UVERBS_IDR_ANY_OBJECT,
2846 UVERBS_ACCESS_READ,
2847 UA_MANDATORY),
2848 UVERBS_ATTR_PTR_IN(
2849 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2850 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2851 UA_MANDATORY,
2852 UA_ALLOC_AND_COPY),
2853 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
2854 u16, UA_MANDATORY),
2855 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
2856 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2857 UVERBS_ACCESS_READ,
2858 UA_MANDATORY),
2859 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
2860 UVERBS_ATTR_TYPE(u64),
2861 UA_MANDATORY));
2862
2863DECLARE_UVERBS_NAMED_METHOD(
2864 MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT,
2865 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE,
2866 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2867 UVERBS_ACCESS_READ,
2868 UA_MANDATORY),
2869 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE,
2870 MLX5_IB_OBJECT_DEVX_OBJ,
2871 UVERBS_ACCESS_READ,
2872 UA_OPTIONAL),
2873 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
2874 UVERBS_ATTR_MIN_SIZE(sizeof(u16)),
2875 UA_MANDATORY,
2876 UA_ALLOC_AND_COPY),
2877 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE,
2878 UVERBS_ATTR_TYPE(u64),
2879 UA_OPTIONAL),
2880 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM,
2881 UVERBS_ATTR_TYPE(u32),
2882 UA_OPTIONAL));
2883
2884DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
2885 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
2886 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
2887 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN),
2888 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT));
2889
2890DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
2891 UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
2892 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
2893 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
2894 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
2895 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
2896 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
2897
2898DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
2899 UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
2900 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
2901 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
2902
2903
2904DECLARE_UVERBS_NAMED_METHOD(
2905 MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
2906 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
2907 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2908 UVERBS_ACCESS_NEW,
2909 UA_MANDATORY));
2910
2911DECLARE_UVERBS_NAMED_OBJECT(
2912 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2913 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
2914 devx_hot_unplug_async_cmd_event_file,
2915 &devx_async_cmd_event_fops, "[devx_async_cmd]",
2916 O_RDONLY),
2917 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
2918
2919DECLARE_UVERBS_NAMED_METHOD(
2920 MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC,
2921 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE,
2922 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2923 UVERBS_ACCESS_NEW,
2924 UA_MANDATORY),
2925 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
2926 enum mlx5_ib_uapi_devx_create_event_channel_flags,
2927 UA_MANDATORY));
2928
2929DECLARE_UVERBS_NAMED_OBJECT(
2930 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2931 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
2932 devx_hot_unplug_async_event_file,
2933 &devx_async_event_fops, "[devx_async_event]",
2934 O_RDONLY),
2935 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));
2936
2937static bool devx_is_supported(struct ib_device *device)
2938{
2939 struct mlx5_ib_dev *dev = to_mdev(device);
2940
2941 return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
2942}
2943
2944const struct uapi_definition mlx5_ib_devx_defs[] = {
2945 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2946 MLX5_IB_OBJECT_DEVX,
2947 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2948 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2949 MLX5_IB_OBJECT_DEVX_OBJ,
2950 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2951 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2952 MLX5_IB_OBJECT_DEVX_UMEM,
2953 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2954 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2955 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2956 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2957 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2958 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2959 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2960 {},
2961};