Training courses

Kernel and Embedded Linux

Bootlin training courses

Embedded Linux, kernel,
Yocto Project, Buildroot, real-time,
graphics, boot time, debugging...

Bootlin logo

Elixir Cross Referencer

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
/*
 * Copyright (c) 2018-2019 Cavium, Inc.
 * All rights reserved.
 *
 *  Redistribution and use in source and binary forms, with or without
 *  modification, are permitted provided that the following conditions
 *  are met:
 *
 *  1. Redistributions of source code must retain the above copyright
 *     notice, this list of conditions and the following disclaimer.
 *  2. Redistributions in binary form must reproduce the above copyright
 *     notice, this list of conditions and the following disclaimer in the
 *     documentation and/or other materials provided with the distribution.
 *
 *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 *  POSSIBILITY OF SUCH DAMAGE.
 *
 * $FreeBSD$
 */

#ifndef __ECORE_RDMA_API_H__
#define __ECORE_RDMA_API_H__

#ifndef LINUX_REMOVE
#ifndef ETH_ALEN
#define ETH_ALEN 6
#endif
#endif

#ifndef __EXTRACT__LINUX__

enum ecore_roce_ll2_tx_dest
{
	ECORE_ROCE_LL2_TX_DEST_NW /* Light L2 TX Destination to the Network */,
	ECORE_ROCE_LL2_TX_DEST_LB /* Light L2 TX Destination to the Loopback */,
	ECORE_ROCE_LL2_TX_DEST_MAX
};

/* HW/FW RoCE Limitations (external. For internal see ecore_roce.h) */
/* CNQ size Limitation
 * The CNQ size should be set as twice the amount of CQs, since for each CQ one
 * element may be inserted into the CNQ and another element is used per CQ to
 * accommodate for a possible race in the arm mechanism.
 * The FW supports a CNQ of 64k-1 and this apparently causes an issue - notice
 * that the number of QPs can reach 32k giving 64k CQs and 128k CNQ elements.
 * Luckily the FW can buffer CNQ elements avoiding an overflow, on the expense
 * of performance.
 */
#define ECORE_RDMA_MAX_CNQ_SIZE               (0xFFFF) /* 2^16 - 1 */

/* rdma interface */

enum ecore_roce_qp_state {
	ECORE_ROCE_QP_STATE_RESET, /* Reset */
	ECORE_ROCE_QP_STATE_INIT,  /* Initialized */
	ECORE_ROCE_QP_STATE_RTR,   /* Ready to Receive */
	ECORE_ROCE_QP_STATE_RTS,   /* Ready to Send */
	ECORE_ROCE_QP_STATE_SQD,   /* Send Queue Draining */
	ECORE_ROCE_QP_STATE_ERR,   /* Error */
	ECORE_ROCE_QP_STATE_SQE    /* Send Queue Error */
};

enum ecore_rdma_qp_type {
	ECORE_RDMA_QP_TYPE_RC,
	ECORE_RDMA_QP_TYPE_XRC_INI,
	ECORE_RDMA_QP_TYPE_XRC_TGT,
	ECORE_RDMA_QP_TYPE_INVAL = 0xffff,
};

enum ecore_rdma_tid_type
{
	ECORE_RDMA_TID_REGISTERED_MR,
	ECORE_RDMA_TID_FMR,
	ECORE_RDMA_TID_MW_TYPE1,
	ECORE_RDMA_TID_MW_TYPE2A
};

typedef
void (*affiliated_event_t)(void	*context,
			   u8	fw_event_code,
			   void	*fw_handle);

typedef
void (*unaffiliated_event_t)(void *context,
			     u8   event_code);

struct ecore_rdma_events {
	void			*context;
	affiliated_event_t	affiliated_event;
	unaffiliated_event_t	unaffiliated_event;
};

struct ecore_rdma_device {
    /* Vendor specific information */
	u32	vendor_id;
	u32	vendor_part_id;
	u32	hw_ver;
	u64	fw_ver;

	u64	node_guid; /* node GUID */
	u64	sys_image_guid; /* System image GUID */

	u8	max_cnq;
	u8	max_sge; /* The maximum number of scatter/gather entries
			  * per Work Request supported
			  */
	u8	max_srq_sge; /* The maximum number of scatter/gather entries
			      * per Work Request supported for SRQ
			      */
	u16	max_inline;
	u32	max_wqe; /* The maximum number of outstanding work
			  * requests on any Work Queue supported
			  */
	u32	max_srq_wqe; /* The maximum number of outstanding work
			      * requests on any Work Queue supported for SRQ
			      */
	u8	max_qp_resp_rd_atomic_resc; /* The maximum number of RDMA Reads
					     * & atomic operation that can be
					     * outstanding per QP
					     */

	u8	max_qp_req_rd_atomic_resc; /* The maximum depth per QP for
					    * initiation of RDMA Read
					    * & atomic operations
					    */
	u64	max_dev_resp_rd_atomic_resc;
	u32	max_cq;
	u32	max_qp;
	u32	max_srq; /* Maximum number of SRQs */
	u32	max_mr; /* Maximum number of MRs supported by this device */
	u64	max_mr_size; /* Size (in bytes) of the largest contiguous memory
			      * block that can be registered by this device
			      */
	u32	max_cqe;
	u32	max_mw; /* The maximum number of memory windows supported */
	u32	max_fmr;
	u32	max_mr_mw_fmr_pbl;
	u64	max_mr_mw_fmr_size;
	u32	max_pd; /* The maximum number of protection domains supported */
	u32	max_ah;
	u8	max_pkey;
	u16	max_srq_wr; /* Maximum number of WRs per SRQ */
	u8	max_stats_queues; /* Maximum number of statistics queues */
	u32	dev_caps;

	/* Abilty to support RNR-NAK generation */

#define ECORE_RDMA_DEV_CAP_RNR_NAK_MASK				0x1
#define ECORE_RDMA_DEV_CAP_RNR_NAK_SHIFT			0
	/* Abilty to support shutdown port */
#define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK			0x1
#define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT			1
	/* Abilty to support port active event */
#define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK		0x1
#define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT		2
	/* Abilty to support port change event */
#define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK		0x1
#define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT		3
	/* Abilty to support system image GUID */
#define ECORE_RDMA_DEV_CAP_SYS_IMAGE_MASK			0x1
#define ECORE_RDMA_DEV_CAP_SYS_IMAGE_SHIFT			4
	/* Abilty to support bad P_Key counter support */
#define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK			0x1
#define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT			5
	/* Abilty to support atomic operations */
#define ECORE_RDMA_DEV_CAP_ATOMIC_OP_MASK			0x1
#define ECORE_RDMA_DEV_CAP_ATOMIC_OP_SHIFT			6
#define ECORE_RDMA_DEV_CAP_RESIZE_CQ_MASK			0x1
#define ECORE_RDMA_DEV_CAP_RESIZE_CQ_SHIFT			7
	/* Abilty to support modifying the maximum number of
	 * outstanding work requests per QP
	 */
#define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK			0x1
#define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT			8
	/* Abilty to support automatic path migration */
#define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK			0x1
#define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT			9
	/* Abilty to support the base memory management extensions */
#define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK			0x1
#define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT		10
#define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK			0x1
#define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT			11
	/* Abilty to support multipile page sizes per memory region */
#define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK		0x1
#define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT		12
	/* Abilty to support block list physical buffer list */
#define ECORE_RDMA_DEV_CAP_BLOCK_MODE_MASK			0x1
#define ECORE_RDMA_DEV_CAP_BLOCK_MODE_SHIFT			13
	/* Abilty to support zero based virtual addresses */
#define ECORE_RDMA_DEV_CAP_ZBVA_MASK				0x1
#define ECORE_RDMA_DEV_CAP_ZBVA_SHIFT				14
	/* Abilty to support local invalidate fencing */
#define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK			0x1
#define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT		15
	/* Abilty to support Loopback on QP */
#define ECORE_RDMA_DEV_CAP_LB_INDICATOR_MASK			0x1
#define ECORE_RDMA_DEV_CAP_LB_INDICATOR_SHIFT			16
	u64	page_size_caps;
	u8	dev_ack_delay;
	u32	reserved_lkey; /* Value of reserved L_key */
	u32	bad_pkey_counter; /* Bad P_key counter support indicator */
	struct ecore_rdma_events events;
};

enum ecore_port_state {
	ECORE_RDMA_PORT_UP,
	ECORE_RDMA_PORT_DOWN,
};

enum ecore_roce_capability {
	ECORE_ROCE_V1	= 1 << 0,
	ECORE_ROCE_V2	= 1 << 1,
};

struct ecore_rdma_port {
	enum ecore_port_state port_state;
	int	link_speed;
	u64	max_msg_size;
	u8	source_gid_table_len;
	void	*source_gid_table_ptr;
	u8	pkey_table_len;
	void	*pkey_table_ptr;
	u32	pkey_bad_counter;
	enum ecore_roce_capability capability;
};

struct ecore_rdma_cnq_params
{
	u8  num_pbl_pages; /* Number of pages in the PBL allocated
				   * for this queue
				   */
	u64 pbl_ptr; /* Address to the first entry of the queue PBL */
};

/* The CQ Mode affects the CQ doorbell transaction size.
 * 64/32 bit machines should configure to 32/16 bits respectively.
 */
enum ecore_rdma_cq_mode {
	ECORE_RDMA_CQ_MODE_16_BITS,
	ECORE_RDMA_CQ_MODE_32_BITS,
};

struct ecore_roce_dcqcn_params {
	u8	notification_point;
	u8	reaction_point;

	/* fields for notification point */
	u32	cnp_send_timeout;
	u8	cnp_dscp;
	u8	cnp_vlan_priority;

	/* fields for reaction point */
	u32	rl_bc_rate;  /* Byte Counter Limit. */
	u32	rl_max_rate; /* Maximum rate in Mbps resolution */
	u32	rl_r_ai;     /* Active increase rate */
	u32	rl_r_hai;    /* Hyper active increase rate */
	u32	dcqcn_gd;    /* Alpha denominator */
	u32	dcqcn_k_us;  /* Alpha update interval */
	u32	dcqcn_timeout_us;
};

struct ecore_rdma_glob_cfg {
	/* global tunables affecting all QPs created after they are
	 * set.
	 */
	u8 vlan_pri_en;
	u8 vlan_pri;
	u8 ecn_en;
	u8 ecn;
	u8 dscp_en;
	u8 dscp;
};

#ifndef LINUX_REMOVE
#define ECORE_RDMA_DCSP_BIT_MASK			0x01
#define ECORE_RDMA_DCSP_EN_BIT_MASK			0x02
#define ECORE_RDMA_ECN_BIT_MASK				0x04
#define ECORE_RDMA_ECN_EN_BIT_MASK			0x08
#define ECORE_RDMA_VLAN_PRIO_BIT_MASK		0x10
#define ECORE_RDMA_VLAN_PRIO_EN_BIT_MASK	0x20

enum _ecore_status_t
ecore_rdma_set_glob_cfg(struct ecore_hwfn *p_hwfn,
			struct ecore_rdma_glob_cfg *in_params,
			u32 glob_cfg_bits);

enum _ecore_status_t
ecore_rdma_get_glob_cfg(struct ecore_hwfn *p_hwfn,
			struct ecore_rdma_glob_cfg *out_params);
#endif /* LINUX_REMOVE */

#ifdef CONFIG_ECORE_IWARP

#define ECORE_IWARP_MAX_LIS_BACKLOG		(256)

#define ECORE_MPA_RTR_TYPE_NONE		0 /* No RTR type */
#define ECORE_MPA_RTR_TYPE_ZERO_SEND	(1 << 0)
#define ECORE_MPA_RTR_TYPE_ZERO_WRITE	(1 << 1)
#define ECORE_MPA_RTR_TYPE_ZERO_READ	(1 << 2)

enum ecore_mpa_rev {
	ECORE_MPA_REV1,
	ECORE_MPA_REV2,
};

struct ecore_iwarp_params {
	u32				rcv_wnd_size;
	u16				ooo_num_rx_bufs;
#define ECORE_IWARP_TS_EN (1 << 0)
#define ECORE_IWARP_DA_EN (1 << 1)
	u8				flags;
	u8				crc_needed;
	enum ecore_mpa_rev		mpa_rev;
	u8				mpa_rtr;
	u8				mpa_peer2peer;
};

#endif

struct ecore_roce_params {
	enum ecore_rdma_cq_mode		cq_mode;
	struct ecore_roce_dcqcn_params	dcqcn_params;
	u8				ll2_handle; /* required for UD QPs */
};

struct ecore_rdma_start_in_params {
	struct ecore_rdma_events	*events;
	struct ecore_rdma_cnq_params	cnq_pbl_list[128];
	u8				desired_cnq;
	u16				max_mtu;
	u8				mac_addr[ETH_ALEN];
#ifdef CONFIG_ECORE_IWARP
	struct ecore_iwarp_params	iwarp;
#endif
	struct ecore_roce_params	roce;
};

struct ecore_rdma_add_user_out_params {
	/* output variables (given to miniport) */
	u16	dpi;
	u64	dpi_addr;
	u64	dpi_phys_addr;
	u32	dpi_size;
	u16	wid_count;
};

enum roce_mode
{
	ROCE_V1,
	ROCE_V2_IPV4,
	ROCE_V2_IPV6,
	MAX_ROCE_MODE
};

/* ECORE GID can be used as IPv4/6 address in RoCE v2 */
union ecore_gid {
	u8 bytes[16];
	u16 words[8];
	u32 dwords[4];
	u64 qwords[2];
	u32 ipv4_addr;
};

struct ecore_rdma_register_tid_in_params {
	/* input variables (given by miniport) */
	u32	itid; /* index only, 18 bit long, lkey = itid << 8 | key */
	enum ecore_rdma_tid_type tid_type;
	u8	key;
	u16	pd;
	bool	local_read;
	bool	local_write;
	bool	remote_read;
	bool	remote_write;
	bool	remote_atomic;
	bool	mw_bind;
	u64	pbl_ptr;
	bool	pbl_two_level;
	u8	pbl_page_size_log; /* for the pages that contain the pointers
		       * to the MR pages
		       */
	u8	page_size_log; /* for the MR pages */
	u32	fbo;
	u64	length; /* only lower 40 bits are valid */
	u64	vaddr;
	bool	zbva;
	bool	phy_mr;
	bool	dma_mr;

	/* DIF related fields */
	bool	dif_enabled;
	u64	dif_error_addr;
	u64	dif_runt_addr;
};

/*Returns the CQ CID or zero in case of failure */
struct ecore_rdma_create_cq_in_params {
	/* input variables (given by miniport) */
	u32	cq_handle_lo; /* CQ handle to be written in CNQ */
	u32	cq_handle_hi;
	u32	cq_size;
	u16	dpi;
	bool	pbl_two_level;
	u64	pbl_ptr;
	u16	pbl_num_pages;
	u8	pbl_page_size_log; /* for the pages that contain the
			   * pointers to the CQ pages
			   */
	u8	cnq_id;
	u16	int_timeout;
};

struct ecore_rdma_create_srq_in_params	{
	u64 pbl_base_addr;
	u64 prod_pair_addr;
	u16 num_pages;
	u16 pd_id;
	u16 page_size;

	/* XRC related only */
	bool is_xrc;
	u16 xrcd_id;
	u32 cq_cid;
	bool reserved_key_en;
};

struct ecore_rdma_destroy_cq_in_params {
	/* input variables (given by miniport) */
	u16 icid;
};

struct ecore_rdma_destroy_cq_out_params {
	/* output variables, provided to the upper layer */

	/* Sequence number of completion notification sent for the CQ on
	 * the associated CNQ
	 */
	u16	num_cq_notif;
};
#endif

struct ecore_rdma_resize_cq_in_params {
	/* input variables (given by miniport) */

	u16	icid;
	u32	cq_size;
	bool	pbl_two_level;
	u64	pbl_ptr;
	u16	pbl_num_pages;
	u8	pbl_page_size_log; /* for the pages that contain the
		       * pointers to the CQ pages
		       */
};

#ifndef __EXTRACT__LINUX__

struct ecore_rdma_create_qp_in_params {
	/* input variables (given by miniport) */
	u32	qp_handle_lo; /* QP handle to be written in CQE */
	u32	qp_handle_hi;
	u32	qp_handle_async_lo; /* QP handle to be written in async event */
	u32	qp_handle_async_hi;
	bool	use_srq;
	bool	signal_all;
	bool	fmr_and_reserved_lkey;
	u16	pd;
	u16	dpi;
	u16	sq_cq_id;
	u16	sq_num_pages;
	u64	sq_pbl_ptr;	/* Not relevant for iWARP */
	u8	max_sq_sges;
	u16	rq_cq_id;
	u16	rq_num_pages;
	u64	rq_pbl_ptr;	/* Not relevant for iWARP */
	u16	srq_id;
	u8	stats_queue;
	enum	ecore_rdma_qp_type qp_type;
	u16	xrcd_id;
};

struct ecore_rdma_create_qp_out_params {
	/* output variables (given to miniport) */
	u32		qp_id;
	u16		icid;
	void		*rq_pbl_virt;
	dma_addr_t	rq_pbl_phys;
	void		*sq_pbl_virt;
	dma_addr_t	sq_pbl_phys;
};

struct ecore_rdma_modify_qp_in_params {
	/* input variables (given by miniport) */
	u32		modify_flags;
#define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK               0x1
#define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT              0
#define ECORE_ROCE_MODIFY_QP_VALID_PKEY_MASK                    0x1
#define ECORE_ROCE_MODIFY_QP_VALID_PKEY_SHIFT                   1
#define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK             0x1
#define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT            2
#define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_MASK                 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT                3
#define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK          0x1
#define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT         4
#define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK                  0x1
#define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT                 5
#define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK                  0x1
#define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT                 6
#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK       0x1
#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT      7
#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK      0x1
#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT     8
#define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK             0x1
#define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT            9
#define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK               0x1
#define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT              10
#define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK           0x1
#define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT          11
#define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK       0x1
#define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT      12
#define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK     0x1
#define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT    13
#define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK               0x1
#define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT              14

	enum ecore_roce_qp_state	new_state;
	u16		pkey;
	bool		incoming_rdma_read_en;
	bool		incoming_rdma_write_en;
	bool		incoming_atomic_en;
	bool		e2e_flow_control_en;
	u32		dest_qp;
	u16		mtu;
	u8		traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
	u8		hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
	u32		flow_label; /* ignored in IPv4 */
	union ecore_gid	sgid; /* GRH SGID; IPv4/6 Source IP */
	union ecore_gid	dgid; /* GRH DGID; IPv4/6 Destination IP */
	u16		udp_src_port; /* RoCEv2 only */

	u16		vlan_id;

	u32		rq_psn;
	u32		sq_psn;
	u8		max_rd_atomic_resp;
	u8		max_rd_atomic_req;
	u32		ack_timeout;
	u8		retry_cnt;
	u8		rnr_retry_cnt;
	u8		min_rnr_nak_timer;
	bool		sqd_async;
	u8		remote_mac_addr[6];
	u8		local_mac_addr[6];
	bool		use_local_mac;
	enum roce_mode	roce_mode;
};

struct ecore_rdma_query_qp_out_params {
	/* output variables (given to miniport) */
	enum ecore_roce_qp_state	state;
	u32		rq_psn; /* responder */
	u32		sq_psn; /* requester */
	bool		draining; /* send queue is draining */
	u16		mtu;
	u32		dest_qp;
	bool		incoming_rdma_read_en;
	bool		incoming_rdma_write_en;
	bool		incoming_atomic_en;
	bool		e2e_flow_control_en;
	union ecore_gid sgid; /* GRH SGID; IPv4/6 Source IP */
	union ecore_gid dgid; /* GRH DGID; IPv4/6 Destination IP */
	u32		flow_label; /* ignored in IPv4 */
	u8		hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
	u8		traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
	u32		timeout;
	u8		rnr_retry;
	u8		retry_cnt;
	u8		min_rnr_nak_timer;
	u16		pkey_index;
	u8		max_rd_atomic;
	u8		max_dest_rd_atomic;
	bool		sqd_async;
};

struct ecore_rdma_destroy_qp_out_params {
	u32		sq_cq_prod;
	u32		rq_cq_prod;
};

struct ecore_rdma_create_srq_out_params {
	u16 srq_id;
};

struct ecore_rdma_destroy_srq_in_params {
	u16 srq_id;
	bool is_xrc;
};

struct ecore_rdma_modify_srq_in_params {
	u32 wqe_limit;
	u16 srq_id;
	bool is_xrc;
};
#endif

struct ecore_rdma_resize_cq_out_params {
	/* output variables, provided to the upper layer */
	u32 prod; /* CQ producer value on old PBL */
	u32 cons; /* CQ consumer value on old PBL */
};

struct ecore_rdma_resize_cnq_in_params {
	/* input variables (given by miniport) */
	u32	cnq_id;
	u32	pbl_page_size_log; /* for the pages that contain the
			* pointers to the cnq pages
			*/
	u64	pbl_ptr;
};

#ifndef __EXTRACT__LINUX__
struct ecore_rdma_stats_out_params {
	u64	sent_bytes;
	u64	sent_pkts;
	u64	rcv_bytes;
	u64	rcv_pkts;

	/* RoCE only */
	u64	icrc_errors;		/* wraps at 32 bits */
	u64	retransmit_events;	/* wraps at 32 bits */
	u64	silent_drops;		/* wraps at 16 bits */
	u64	rnr_nacks_sent;		/* wraps at 16 bits */

	/* RoCE DCQCN */
	u64	ecn_pkt_rcv;
	u64	cnp_pkt_rcv;
	u64	cnp_pkt_sent;

	/* iWARP only */
	u64	iwarp_tx_fast_rxmit_cnt;
	u64	iwarp_tx_slow_start_cnt;
	u64	unalign_rx_comp;
};

struct ecore_rdma_counters_out_params {
	u64	pd_count;
	u64	max_pd;
	u64	dpi_count;
	u64	max_dpi;
	u64	cq_count;
	u64	max_cq;
	u64	qp_count;
	u64	max_qp;
	u64	tid_count;
	u64	max_tid;
	u64	srq_count;
	u64	max_srq;
	u64	xrc_srq_count;
	u64	max_xrc_srq;
	u64	xrcd_count;
	u64	max_xrcd;
};
#endif

enum _ecore_status_t
ecore_rdma_add_user(void *rdma_cxt,
		    struct ecore_rdma_add_user_out_params *out_params);

enum _ecore_status_t
ecore_rdma_alloc_pd(void *rdma_cxt,
		    u16	*pd);

enum _ecore_status_t
ecore_rdma_alloc_tid(void *rdma_cxt,
		     u32 *tid);

enum _ecore_status_t
ecore_rdma_create_cq(void *rdma_cxt,
		     struct ecore_rdma_create_cq_in_params *params,
		     u16 *icid);

/* Returns a pointer to the responders' CID, which is also a pointer to the
 * ecore_qp_params struct. Returns NULL in case of failure.
 */
struct ecore_rdma_qp*
ecore_rdma_create_qp(void *rdma_cxt,
		     struct ecore_rdma_create_qp_in_params  *in_params,
		     struct ecore_rdma_create_qp_out_params *out_params);

enum _ecore_status_t
ecore_roce_create_ud_qp(void *rdma_cxt,
			struct ecore_rdma_create_qp_out_params *out_params);

enum _ecore_status_t
ecore_rdma_deregister_tid(void *rdma_cxt,
			  u32		tid);

enum _ecore_status_t
ecore_rdma_destroy_cq(void *rdma_cxt,
		      struct ecore_rdma_destroy_cq_in_params  *in_params,
		      struct ecore_rdma_destroy_cq_out_params *out_params);

enum _ecore_status_t
ecore_rdma_destroy_qp(void *rdma_cxt,
		      struct ecore_rdma_qp *qp,
		      struct ecore_rdma_destroy_qp_out_params *out_params);

enum _ecore_status_t
ecore_roce_destroy_ud_qp(void *rdma_cxt, u16 cid);

void
ecore_rdma_free_pd(void *rdma_cxt,
		   u16	pd);

enum _ecore_status_t
ecore_rdma_alloc_xrcd(void *rdma_cxt, u16 *xrcd_id);

void
ecore_rdma_free_xrcd(void  *rdma_cxt, u16 xrcd_id);

void
ecore_rdma_free_tid(void *rdma_cxt,
		    u32	tid);

enum _ecore_status_t
ecore_rdma_modify_qp(void *rdma_cxt,
		     struct ecore_rdma_qp *qp,
		     struct ecore_rdma_modify_qp_in_params *params);

struct ecore_rdma_device*
ecore_rdma_query_device(void *rdma_cxt);

struct ecore_rdma_port*
ecore_rdma_query_port(void *rdma_cxt);

enum _ecore_status_t
ecore_rdma_query_qp(void *rdma_cxt,
		    struct ecore_rdma_qp		  *qp,
		    struct ecore_rdma_query_qp_out_params *out_params);

enum _ecore_status_t
ecore_rdma_register_tid(void *rdma_cxt,
			struct ecore_rdma_register_tid_in_params *params);

void ecore_rdma_remove_user(void *rdma_cxt,
			    u16		dpi);

enum _ecore_status_t
ecore_rdma_resize_cnq(void *rdma_cxt,
		      struct ecore_rdma_resize_cnq_in_params *in_params);

/*Returns the CQ CID or zero in case of failure */
enum _ecore_status_t
ecore_rdma_resize_cq(void *rdma_cxt,
		     struct ecore_rdma_resize_cq_in_params  *in_params,
		     struct ecore_rdma_resize_cq_out_params *out_params);

/* Before calling rdma_start upper layer (VBD/qed) should fill the
 * page-size and mtu in hwfn context
 */
enum _ecore_status_t
ecore_rdma_start(void *p_hwfn,
		 struct ecore_rdma_start_in_params *params);

enum _ecore_status_t
ecore_rdma_stop(void *rdma_cxt);

enum _ecore_status_t
ecore_rdma_query_stats(void *rdma_cxt, u8 stats_queue,
		       struct ecore_rdma_stats_out_params *out_parms);

enum _ecore_status_t
ecore_rdma_query_counters(void *rdma_cxt,
			  struct ecore_rdma_counters_out_params *out_parms);

u32 ecore_rdma_get_sb_id(struct ecore_hwfn *p_hwfn, u32 rel_sb_id);

#ifndef LINUX_REMOVE
u32 ecore_rdma_query_cau_timer_res(void);
#endif

void ecore_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);

void ecore_rdma_resc_free(struct ecore_hwfn *p_hwfn);

enum _ecore_status_t
ecore_rdma_create_srq(void *rdma_cxt,
		      struct ecore_rdma_create_srq_in_params *in_params,
		      struct ecore_rdma_create_srq_out_params *out_params);

enum _ecore_status_t
ecore_rdma_destroy_srq(void *rdma_cxt,
		       struct ecore_rdma_destroy_srq_in_params *in_params);

enum _ecore_status_t
ecore_rdma_modify_srq(void *rdma_cxt,
		      struct ecore_rdma_modify_srq_in_params *in_params);

#ifdef CONFIG_ECORE_IWARP

/* iWARP API */

#ifndef __EXTRACT__LINUX__

enum ecore_iwarp_event_type {
	ECORE_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */
	ECORE_IWARP_EVENT_PASSIVE_COMPLETE, /* Passive side established
					     * ( ack on mpa response )
					     */
	ECORE_IWARP_EVENT_LISTEN_PAUSE_COMP, /* Passive side will drop
					      * MPA requests
					      */
	ECORE_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */
	ECORE_IWARP_EVENT_DISCONNECT,
	ECORE_IWARP_EVENT_CLOSE,
    /* Slow/Error path events start from here */
	ECORE_IWARP_EVENT_IRQ_FULL,
	ECORE_IWARP_ERROR_EVENTS_START = ECORE_IWARP_EVENT_IRQ_FULL,
	ECORE_IWARP_EVENT_RQ_EMPTY,
	ECORE_IWARP_EVENT_LLP_TIMEOUT,
	ECORE_IWARP_EVENT_REMOTE_PROTECTION_ERROR,
	ECORE_IWARP_EVENT_CQ_OVERFLOW,
	ECORE_IWARP_EVENT_QP_CATASTROPHIC,
	ECORE_IWARP_EVENT_ACTIVE_MPA_REPLY,
	ECORE_IWARP_EVENT_LOCAL_ACCESS_ERROR,
	ECORE_IWARP_EVENT_REMOTE_OPERATION_ERROR,
	ECORE_IWARP_EVENT_TERMINATE_RECEIVED
};

enum ecore_tcp_ip_version
{
	ECORE_TCP_IPV4,
	ECORE_TCP_IPV6,
};

struct ecore_iwarp_cm_info {
	enum ecore_tcp_ip_version ip_version;
	u32 remote_ip[4];
	u32 local_ip[4];
	u16 remote_port;
	u16 local_port;
	u16 vlan;
	const void *private_data;
	u16 private_data_len;
	u8 ord;
	u8 ird;
};

struct ecore_iwarp_cm_event_params {
	enum ecore_iwarp_event_type event;
	const struct ecore_iwarp_cm_info *cm_info;
	void *ep_context; /* To be passed to accept call */
	int status;
};

typedef int (*iwarp_event_handler)(void *context,
				   struct ecore_iwarp_cm_event_params *event);

/* Active Side Connect Flow:
 * upper layer driver calls ecore_iwarp_connect
 * Function is blocking: i.e. returns after tcp connection is established
 * After MPA connection is established ECORE_IWARP_EVENT_ACTIVE_COMPLETE event
 * will be passed to upperlayer driver using the event_cb passed in
 * ecore_iwarp_connect_in. Information of the established connection will be
 * initialized in event data.
 */
struct ecore_iwarp_connect_in {
	iwarp_event_handler event_cb;
	void *cb_context;
	struct ecore_rdma_qp *qp;
	struct ecore_iwarp_cm_info cm_info;
	u16 mss;
	u8 remote_mac_addr[6];
	u8 local_mac_addr[6];
};

struct ecore_iwarp_connect_out {
	void *ep_context;
};

/* Passive side connect flow:
 * upper layer driver calls ecore_iwarp_create_listen
 * once Syn packet that matches a ip/port that is listened on arrives, ecore
 * will offload the tcp connection. After MPA Request is received on the
 * offload connection, the event ECORE_IWARP_EVENT_MPA_REQUEST will be sent
 * to upper layer driver using the event_cb passed below. The event data
 * will be placed in event parameter. After upper layer driver processes the
 * event, ecore_iwarp_accept or ecore_iwarp_reject should be called to continue
 * MPA negotiation. Once negotiation is complete the event
 * ECORE_IWARP_EVENT_PASSIVE_COMPLETE will be passed to the event_cb passed
 * originally in ecore_iwarp_listen_in structure.
 */
struct ecore_iwarp_listen_in {
	iwarp_event_handler event_cb; /* Callback func for delivering events */
	void *cb_context; /* passed to event_cb */
	u32 max_backlog; /* Max num of pending incoming connection requests */
	enum ecore_tcp_ip_version ip_version;
	u32 ip_addr[4];
	u16 port;
	u16 vlan;
};

struct ecore_iwarp_listen_out {
	void *handle; /* to be sent to destroy */
};

struct ecore_iwarp_accept_in {
	void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
	void *cb_context; /* context to be passed to event_cb */
	struct ecore_rdma_qp *qp;
	const void *private_data;
	u16 private_data_len;
	u8 ord;
	u8 ird;
};

struct ecore_iwarp_reject_in {
	void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
	void *cb_context; /* context to be passed to event_cb */
	const void *private_data;
	u16 private_data_len;
};

struct ecore_iwarp_send_rtr_in {
	void *ep_context;
};

struct ecore_iwarp_tcp_abort_in {
	void *ep_context;
};

#endif

enum _ecore_status_t
ecore_iwarp_connect(void *rdma_cxt,
		    struct ecore_iwarp_connect_in *iparams,
		    struct ecore_iwarp_connect_out *oparams);

enum _ecore_status_t
ecore_iwarp_create_listen(void *rdma_cxt,
			  struct ecore_iwarp_listen_in *iparams,
			  struct ecore_iwarp_listen_out *oparams);

enum _ecore_status_t
ecore_iwarp_accept(void *rdma_cxt,
		   struct ecore_iwarp_accept_in *iparams);

enum _ecore_status_t
ecore_iwarp_reject(void *rdma_cxt,
		   struct ecore_iwarp_reject_in *iparams);

enum _ecore_status_t
ecore_iwarp_destroy_listen(void *rdma_cxt, void *handle);

enum _ecore_status_t
ecore_iwarp_send_rtr(void *rdma_cxt, struct ecore_iwarp_send_rtr_in *iparams);

enum _ecore_status_t
ecore_iwarp_pause_listen(void *rdma_cxt, void *handle, bool pause, bool comp);

#endif /* CONFIG_ECORE_IWARP */

#endif