1 /****************************************************************************
2  ****************************************************************************
3  ***
4  ***   This header was automatically generated from a Linux kernel header
5  ***   of the same name, to make information necessary for userspace to
6  ***   call into the kernel available to libc.  It contains only constants,
7  ***   structures, and macros generated from the original header, and thus,
8  ***   contains no copyrightable information.
9  ***
10  ***   To edit the content of this header, modify the corresponding
11  ***   source file (e.g. under external/kernel-headers/original/) then
12  ***   run bionic/libc/kernel/tools/update_all.py
13  ***
14  ***   Any manual change here will be lost the next time this script will
15  ***   be run. You've been warned!
16  ***
17  ****************************************************************************
18  ****************************************************************************/
19 #ifndef MLX5_ABI_USER_H
20 #define MLX5_ABI_USER_H
21 #include <linux/types.h>
22 #include <linux/if_ether.h>
23 #include <rdma/ib_user_ioctl_verbs.h>
24 enum {
25   MLX5_QP_FLAG_SIGNATURE = 1 << 0,
26   MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
27   MLX5_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2,
28   MLX5_QP_FLAG_BFREG_INDEX = 1 << 3,
29   MLX5_QP_FLAG_TYPE_DCT = 1 << 4,
30   MLX5_QP_FLAG_TYPE_DCI = 1 << 5,
31   MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6,
32   MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7,
33   MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8,
34   MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE = 1 << 9,
35   MLX5_QP_FLAG_UAR_PAGE_INDEX = 1 << 10,
36 };
37 enum {
38   MLX5_SRQ_FLAG_SIGNATURE = 1 << 0,
39 };
40 enum {
41   MLX5_WQ_FLAG_SIGNATURE = 1 << 0,
42 };
43 #define MLX5_IB_UVERBS_ABI_VERSION 1
44 struct mlx5_ib_alloc_ucontext_req {
45   __u32 total_num_bfregs;
46   __u32 num_low_latency_bfregs;
47 };
48 enum mlx5_lib_caps {
49   MLX5_LIB_CAP_4K_UAR = (__u64) 1 << 0,
50   MLX5_LIB_CAP_DYN_UAR = (__u64) 1 << 1,
51 };
52 enum mlx5_ib_alloc_uctx_v2_flags {
53   MLX5_IB_ALLOC_UCTX_DEVX = 1 << 0,
54 };
55 struct mlx5_ib_alloc_ucontext_req_v2 {
56   __u32 total_num_bfregs;
57   __u32 num_low_latency_bfregs;
58   __u32 flags;
59   __u32 comp_mask;
60   __u8 max_cqe_version;
61   __u8 reserved0;
62   __u16 reserved1;
63   __u32 reserved2;
64   __aligned_u64 lib_caps;
65 };
66 enum mlx5_ib_alloc_ucontext_resp_mask {
67   MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
68   MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY = 1UL << 1,
69 };
70 enum mlx5_user_cmds_supp_uhw {
71   MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0,
72   MLX5_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1,
73 };
74 enum mlx5_user_inline_mode {
75   MLX5_USER_INLINE_MODE_NA,
76   MLX5_USER_INLINE_MODE_NONE,
77   MLX5_USER_INLINE_MODE_L2,
78   MLX5_USER_INLINE_MODE_IP,
79   MLX5_USER_INLINE_MODE_TCP_UDP,
80 };
81 enum {
82   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0,
83   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1,
84   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2,
85   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3,
86   MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4,
87 };
88 struct mlx5_ib_alloc_ucontext_resp {
89   __u32 qp_tab_size;
90   __u32 bf_reg_size;
91   __u32 tot_bfregs;
92   __u32 cache_line_size;
93   __u16 max_sq_desc_sz;
94   __u16 max_rq_desc_sz;
95   __u32 max_send_wqebb;
96   __u32 max_recv_wr;
97   __u32 max_srq_recv_wr;
98   __u16 num_ports;
99   __u16 flow_action_flags;
100   __u32 comp_mask;
101   __u32 response_length;
102   __u8 cqe_version;
103   __u8 cmds_supp_uhw;
104   __u8 eth_min_inline;
105   __u8 clock_info_versions;
106   __aligned_u64 hca_core_clock_offset;
107   __u32 log_uar_size;
108   __u32 num_uars_per_page;
109   __u32 num_dyn_bfregs;
110   __u32 dump_fill_mkey;
111 };
112 struct mlx5_ib_alloc_pd_resp {
113   __u32 pdn;
114 };
115 struct mlx5_ib_tso_caps {
116   __u32 max_tso;
117   __u32 supported_qpts;
118 };
119 struct mlx5_ib_rss_caps {
120   __aligned_u64 rx_hash_fields_mask;
121   __u8 rx_hash_function;
122   __u8 reserved[7];
123 };
124 enum mlx5_ib_cqe_comp_res_format {
125   MLX5_IB_CQE_RES_FORMAT_HASH = 1 << 0,
126   MLX5_IB_CQE_RES_FORMAT_CSUM = 1 << 1,
127   MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX = 1 << 2,
128 };
129 struct mlx5_ib_cqe_comp_caps {
130   __u32 max_num;
131   __u32 supported_format;
132 };
133 enum mlx5_ib_packet_pacing_cap_flags {
134   MLX5_IB_PP_SUPPORT_BURST = 1 << 0,
135 };
136 struct mlx5_packet_pacing_caps {
137   __u32 qp_rate_limit_min;
138   __u32 qp_rate_limit_max;
139   __u32 supported_qpts;
140   __u8 cap_flags;
141   __u8 reserved[3];
142 };
143 enum mlx5_ib_mpw_caps {
144   MPW_RESERVED = 1 << 0,
145   MLX5_IB_ALLOW_MPW = 1 << 1,
146   MLX5_IB_SUPPORT_EMPW = 1 << 2,
147 };
148 enum mlx5_ib_sw_parsing_offloads {
149   MLX5_IB_SW_PARSING = 1 << 0,
150   MLX5_IB_SW_PARSING_CSUM = 1 << 1,
151   MLX5_IB_SW_PARSING_LSO = 1 << 2,
152 };
153 struct mlx5_ib_sw_parsing_caps {
154   __u32 sw_parsing_offloads;
155   __u32 supported_qpts;
156 };
157 struct mlx5_ib_striding_rq_caps {
158   __u32 min_single_stride_log_num_of_bytes;
159   __u32 max_single_stride_log_num_of_bytes;
160   __u32 min_single_wqe_log_num_of_strides;
161   __u32 max_single_wqe_log_num_of_strides;
162   __u32 supported_qpts;
163   __u32 reserved;
164 };
165 enum mlx5_ib_query_dev_resp_flags {
166   MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0,
167   MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1,
168   MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE = 1 << 2,
169   MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT = 1 << 3,
170 };
171 enum mlx5_ib_tunnel_offloads {
172   MLX5_IB_TUNNELED_OFFLOADS_VXLAN = 1 << 0,
173   MLX5_IB_TUNNELED_OFFLOADS_GRE = 1 << 1,
174   MLX5_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2,
175   MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE = 1 << 3,
176   MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP = 1 << 4,
177 };
178 struct mlx5_ib_query_device_resp {
179   __u32 comp_mask;
180   __u32 response_length;
181   struct mlx5_ib_tso_caps tso_caps;
182   struct mlx5_ib_rss_caps rss_caps;
183   struct mlx5_ib_cqe_comp_caps cqe_comp_caps;
184   struct mlx5_packet_pacing_caps packet_pacing_caps;
185   __u32 mlx5_ib_support_multi_pkt_send_wqes;
186   __u32 flags;
187   struct mlx5_ib_sw_parsing_caps sw_parsing_caps;
188   struct mlx5_ib_striding_rq_caps striding_rq_caps;
189   __u32 tunnel_offloads_caps;
190   __u32 reserved;
191 };
192 enum mlx5_ib_create_cq_flags {
193   MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD = 1 << 0,
194   MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX = 1 << 1,
195 };
196 struct mlx5_ib_create_cq {
197   __aligned_u64 buf_addr;
198   __aligned_u64 db_addr;
199   __u32 cqe_size;
200   __u8 cqe_comp_en;
201   __u8 cqe_comp_res_format;
202   __u16 flags;
203   __u16 uar_page_index;
204   __u16 reserved0;
205   __u32 reserved1;
206 };
207 struct mlx5_ib_create_cq_resp {
208   __u32 cqn;
209   __u32 reserved;
210 };
211 struct mlx5_ib_resize_cq {
212   __aligned_u64 buf_addr;
213   __u16 cqe_size;
214   __u16 reserved0;
215   __u32 reserved1;
216 };
217 struct mlx5_ib_create_srq {
218   __aligned_u64 buf_addr;
219   __aligned_u64 db_addr;
220   __u32 flags;
221   __u32 reserved0;
222   __u32 uidx;
223   __u32 reserved1;
224 };
225 struct mlx5_ib_create_srq_resp {
226   __u32 srqn;
227   __u32 reserved;
228 };
229 struct mlx5_ib_create_qp {
230   __aligned_u64 buf_addr;
231   __aligned_u64 db_addr;
232   __u32 sq_wqe_count;
233   __u32 rq_wqe_count;
234   __u32 rq_wqe_shift;
235   __u32 flags;
236   __u32 uidx;
237   __u32 bfreg_index;
238   union {
239     __aligned_u64 sq_buf_addr;
240     __aligned_u64 access_key;
241   };
242 };
243 enum mlx5_rx_hash_function_flags {
244   MLX5_RX_HASH_FUNC_TOEPLITZ = 1 << 0,
245 };
246 enum mlx5_rx_hash_fields {
247   MLX5_RX_HASH_SRC_IPV4 = 1 << 0,
248   MLX5_RX_HASH_DST_IPV4 = 1 << 1,
249   MLX5_RX_HASH_SRC_IPV6 = 1 << 2,
250   MLX5_RX_HASH_DST_IPV6 = 1 << 3,
251   MLX5_RX_HASH_SRC_PORT_TCP = 1 << 4,
252   MLX5_RX_HASH_DST_PORT_TCP = 1 << 5,
253   MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
254   MLX5_RX_HASH_DST_PORT_UDP = 1 << 7,
255   MLX5_RX_HASH_IPSEC_SPI = 1 << 8,
256   MLX5_RX_HASH_INNER = (1UL << 31),
257 };
258 struct mlx5_ib_create_qp_rss {
259   __aligned_u64 rx_hash_fields_mask;
260   __u8 rx_hash_function;
261   __u8 rx_key_len;
262   __u8 reserved[6];
263   __u8 rx_hash_key[128];
264   __u32 comp_mask;
265   __u32 flags;
266 };
267 enum mlx5_ib_create_qp_resp_mask {
268   MLX5_IB_CREATE_QP_RESP_MASK_TIRN = 1UL << 0,
269   MLX5_IB_CREATE_QP_RESP_MASK_TISN = 1UL << 1,
270   MLX5_IB_CREATE_QP_RESP_MASK_RQN = 1UL << 2,
271   MLX5_IB_CREATE_QP_RESP_MASK_SQN = 1UL << 3,
272   MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR = 1UL << 4,
273 };
274 struct mlx5_ib_create_qp_resp {
275   __u32 bfreg_index;
276   __u32 reserved;
277   __u32 comp_mask;
278   __u32 tirn;
279   __u32 tisn;
280   __u32 rqn;
281   __u32 sqn;
282   __u32 reserved1;
283   __u64 tir_icm_addr;
284 };
285 struct mlx5_ib_alloc_mw {
286   __u32 comp_mask;
287   __u8 num_klms;
288   __u8 reserved1;
289   __u16 reserved2;
290 };
291 enum mlx5_ib_create_wq_mask {
292   MLX5_IB_CREATE_WQ_STRIDING_RQ = (1 << 0),
293 };
294 struct mlx5_ib_create_wq {
295   __aligned_u64 buf_addr;
296   __aligned_u64 db_addr;
297   __u32 rq_wqe_count;
298   __u32 rq_wqe_shift;
299   __u32 user_index;
300   __u32 flags;
301   __u32 comp_mask;
302   __u32 single_stride_log_num_of_bytes;
303   __u32 single_wqe_log_num_of_strides;
304   __u32 two_byte_shift_en;
305 };
306 struct mlx5_ib_create_ah_resp {
307   __u32 response_length;
308   __u8 dmac[ETH_ALEN];
309   __u8 reserved[6];
310 };
311 struct mlx5_ib_burst_info {
312   __u32 max_burst_sz;
313   __u16 typical_pkt_sz;
314   __u16 reserved;
315 };
316 struct mlx5_ib_modify_qp {
317   __u32 comp_mask;
318   struct mlx5_ib_burst_info burst_info;
319   __u32 reserved;
320 };
321 struct mlx5_ib_modify_qp_resp {
322   __u32 response_length;
323   __u32 dctn;
324 };
325 struct mlx5_ib_create_wq_resp {
326   __u32 response_length;
327   __u32 reserved;
328 };
329 struct mlx5_ib_create_rwq_ind_tbl_resp {
330   __u32 response_length;
331   __u32 reserved;
332 };
333 struct mlx5_ib_modify_wq {
334   __u32 comp_mask;
335   __u32 reserved;
336 };
337 struct mlx5_ib_clock_info {
338   __u32 sign;
339   __u32 resv;
340   __aligned_u64 nsec;
341   __aligned_u64 cycles;
342   __aligned_u64 frac;
343   __u32 mult;
344   __u32 shift;
345   __aligned_u64 mask;
346   __aligned_u64 overflow_period;
347 };
348 enum mlx5_ib_mmap_cmd {
349   MLX5_IB_MMAP_REGULAR_PAGE = 0,
350   MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
351   MLX5_IB_MMAP_WC_PAGE = 2,
352   MLX5_IB_MMAP_NC_PAGE = 3,
353   MLX5_IB_MMAP_CORE_CLOCK = 5,
354   MLX5_IB_MMAP_ALLOC_WC = 6,
355   MLX5_IB_MMAP_CLOCK_INFO = 7,
356   MLX5_IB_MMAP_DEVICE_MEM = 8,
357 };
358 enum {
359   MLX5_IB_CLOCK_INFO_KERNEL_UPDATING = 1,
360 };
361 enum {
362   MLX5_IB_CLOCK_INFO_V1 = 0,
363 };
364 struct mlx5_ib_flow_counters_desc {
365   __u32 description;
366   __u32 index;
367 };
368 struct mlx5_ib_flow_counters_data {
369   RDMA_UAPI_PTR(struct mlx5_ib_flow_counters_desc *, counters_data);
370   __u32 ncounters;
371   __u32 reserved;
372 };
373 struct mlx5_ib_create_flow {
374   __u32 ncounters_data;
375   __u32 reserved;
376   struct mlx5_ib_flow_counters_data data[];
377 };
378 #endif
379