aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/benet/hwlib.h
blob: afedf4dc5903f8fcf9833ffce015f08e926ad85b (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
/*
 * Copyright (C) 2005 - 2008 ServerEngines
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
 * linux-drivers@serverengines.com
 *
 * ServerEngines
 * 209 N. Fair Oaks Ave
 * Sunnyvale, CA 94085
 */
#ifndef __hwlib_h__
#define __hwlib_h__

#include <linux/module.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/spinlock.h>

#include "regmap.h"		/* srcgen array map output */

#include "asyncmesg.h"
#include "fwcmd_opcodes.h"
#include "post_codes.h"
#include "fwcmd_mcc.h"

#include "fwcmd_types_bmap.h"
#include "fwcmd_common_bmap.h"
#include "fwcmd_eth_bmap.h"
#include "bestatus.h"
/*
 *
 * Macros for reading/writing a protection domain or CSR registers
 * in BladeEngine.
 */
#define PD_READ(fo, field)	ioread32((fo)->db_va + \
		offsetof(struct BE_PROTECTION_DOMAIN_DBMAP_AMAP, field)/8)

#define PD_WRITE(fo, field, val) iowrite32(val, (fo)->db_va + \
		offsetof(struct BE_PROTECTION_DOMAIN_DBMAP_AMAP, field)/8)

#define CSR_READ(fo, field)	ioread32((fo)->csr_va + \
		offsetof(struct BE_BLADE_ENGINE_CSRMAP_AMAP, field)/8)

#define CSR_WRITE(fo, field, val)	iowrite32(val, (fo)->csr_va + \
		offsetof(struct BE_BLADE_ENGINE_CSRMAP_AMAP, field)/8)

#define PCICFG0_READ(fo, field)	ioread32((fo)->pci_va + \
		offsetof(struct BE_PCICFG0_CSRMAP_AMAP, field)/8)

#define PCICFG0_WRITE(fo, field, val)	iowrite32(val, (fo)->pci_va + \
		offsetof(struct BE_PCICFG0_CSRMAP_AMAP, field)/8)

#define PCICFG1_READ(fo, field)	ioread32((fo)->pci_va + \
		offsetof(struct BE_PCICFG1_CSRMAP_AMAP, field)/8)

#define PCICFG1_WRITE(fo, field, val)	iowrite32(val, (fo)->pci_va + \
		offsetof(struct BE_PCICFG1_CSRMAP_AMAP, field)/8)

#ifdef BE_DEBUG
#define ASSERT(c)       BUG_ON(!(c));
#else
#define ASSERT(c)
#endif

/* debug levels */
enum BE_DEBUG_LEVELS {
	DL_ALWAYS = 0,		/* cannot be masked */
	DL_ERR = 0x1,		/* errors that should never happen */
	DL_WARN = 0x2,		/* something questionable.
				   recoverable errors */
	DL_NOTE = 0x4,		/* infrequent, important debug info */
	DL_INFO = 0x8,		/* debug information */
	DL_VERBOSE = 0x10,	/* detailed info, such as buffer traces */
	BE_DL_MIN_VALUE = 0x1,	/* this is the min value used */
	BE_DL_MAX_VALUE = 0x80	/* this is the higheset value used */
} ;

extern unsigned int trace_level;

#define TRACE(lm, fmt, args...)  {				\
		if (trace_level & lm) {				\
			printk(KERN_NOTICE "BE: %s:%d \n" fmt,	\
			__FILE__ , __LINE__ , ## args);		\
		}						\
	}

static inline unsigned int be_trace_set_level(unsigned int level)
{
	unsigned int old_level = trace_level;
	trace_level = level;
	return old_level;
}

#define be_trace_get_level() 	trace_level
/*
 * Returns number of pages spanned by the size of data
 * starting at the given address.
 */
#define PAGES_SPANNED(_address, _size) \
   ((u32)((((size_t)(_address) & (PAGE_SIZE - 1)) + \
		(_size) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
/* Byte offset into the page corresponding to given address */
#define OFFSET_IN_PAGE(_addr_) ((size_t)(_addr_) & (PAGE_SIZE-1))

/*
 * circular subtract.
 * Returns a - b assuming a circular number system, where a and b are
 * in range (0, maxValue-1). If a==b, zero is returned so the
 * highest value possible with this subtraction is maxValue-1.
 */
static inline u32 be_subc(u32 a, u32 b, u32 max)
{
	ASSERT(a <= max && b <= max);
	ASSERT(max > 0);
	return a >= b ? (a - b) : (max - b + a);
}

static inline u32 be_addc(u32 a, u32 b, u32 max)
{
	ASSERT(a < max);
	ASSERT(max > 0);
	return (max - a > b) ? (a + b) : (b + a - max);
}

/* descriptor for a physically contiguous memory used for ring */
struct ring_desc {
	u32 length;	/* length in bytes */
	void *va; 	/* virtual address */
	u64 pa;		/* bus address */
} ;

/*
 * This structure stores information about a ring shared between hardware
 * and software.  Each ring is allocated by the driver in the uncached
 * extension and mapped into BladeEngine's unified table.
 */
struct mp_ring {
	u32 pages;		/* queue size in pages */
	u32 id;			/* queue id assigned by beklib */
	u32 num;		/* number of elements in queue */
	u32 cidx;		/* consumer index */
	u32 pidx;		/* producer index -- not used by most rings */
	u32 itemSize;		/* size in bytes of one object */

	void *va;		/* The virtual address of the ring.
				   This should be last to allow 32 & 64
				   bit debugger extensions to work. */
} ;

/*-----------  amap bit filed get / set macros and functions -----*/
/*
 * Structures defined in the map header files (under fw/amap/) with names
 * in the format BE_<name>_AMAP are pseudo structures with members
 * of type u8. These structures are templates that are used in
 * conjuntion with the structures with names in the format
 * <name>_AMAP to calculate the bit masks and bit offsets to get or set
 * bit fields in structures. The structures <name>_AMAP are arrays
 * of 32 bits words and have the correct size.  The following macros
 * provide convenient ways to get and set the various members
 * in the structures without using strucctures with bit fields.
 * Always use the macros AMAP_GET_BITS_PTR and AMAP_SET_BITS_PTR
 * macros to extract and set various members.
 */

/*
 * Returns the a bit mask for the register that is NOT shifted into location.
 * That means return values always look like: 0x1, 0xFF, 0x7FF, etc...
 */
static inline u32 amap_mask(u32 bit_size)
{
	return bit_size == 32 ? 0xFFFFFFFF : (1 << bit_size) - 1;
}

#define AMAP_BIT_MASK(_struct_, field)       \
	amap_mask(AMAP_BIT_SIZE(_struct_, field))

/*
 * non-optimized set bits function. First clears the bits and then assigns them.
 * This does not require knowledge of the particular DWORD you are setting.
 * e.g. AMAP_SET_BITS_PTR (struct, field1, &contextMemory, 123);
 */
static inline void
amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
{
	u32 *dw = (u32 *)ptr;
	*(dw + dw_offset) &= ~(mask << offset);
	*(dw + dw_offset) |= (mask & value) << offset;
}

#define AMAP_SET_BITS_PTR(_struct_, field, _structPtr_, val)	\
	amap_set(_structPtr_, AMAP_WORD_OFFSET(_struct_, field),\
		AMAP_BIT_MASK(_struct_, field),			\
		AMAP_BIT_OFFSET(_struct_, field), val)
/*
 * Non-optimized routine that gets the bits without knowing the correct DWORD.
 * e.g. fieldValue = AMAP_GET_BITS_PTR (struct, field1, &contextMemory);
 */
static inline u32
amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
{
	u32 *dw = (u32 *)ptr;
	return mask & (*(dw + dw_offset) >> offset);
}
#define AMAP_GET_BITS_PTR(_struct_, field, _structPtr_)			\
	amap_get(_structPtr_, AMAP_WORD_OFFSET(_struct_, field),	\
		AMAP_BIT_MASK(_struct_, field),				\
		AMAP_BIT_OFFSET(_struct_, field))

/* Returns 0-31 representing bit offset within a DWORD of a bitfield. */
#define AMAP_BIT_OFFSET(_struct_, field)                  \
	(offsetof(struct BE_ ## _struct_ ## _AMAP, field) % 32)

/* Returns 0-n representing DWORD offset of bitfield within the structure. */
#define AMAP_WORD_OFFSET(_struct_, field)  \
		  (offsetof(struct BE_ ## _struct_ ## _AMAP, field)/32)

/* Returns size of bitfield in bits. */
#define AMAP_BIT_SIZE(_struct_, field) \
		sizeof(((struct BE_ ## _struct_ ## _AMAP*)0)->field)

struct be_mcc_wrb_response_copy {
	u16 length;		/* bytes in response */
	u16 fwcmd_offset;	/* offset within the wrb of the response */
	void *va;		/* user's va to copy response into */

} ;
typedef void (*mcc_wrb_cqe_callback) (void *context, int status,
				struct MCC_WRB_AMAP *optional_wrb);
struct be_mcc_wrb_context {

	mcc_wrb_cqe_callback internal_cb;	/* Function to call on
						completion */
	void *internal_cb_context;	/* Parameter to pass
						   to completion function */

	mcc_wrb_cqe_callback cb;	/* Function to call on completion */
	void *cb_context;	/* Parameter to pass to completion function */

	int *users_final_status;	/* pointer to a local
						variable for synchronous
						commands */
	struct MCC_WRB_AMAP *wrb;	/* pointer to original wrb for embedded
						commands only */
	struct list_head next;	/* links context structs together in
				   free list */

	struct be_mcc_wrb_response_copy copy;	/* Optional parameters to copy
					   embedded response to user's va */

#if defined(BE_DEBUG)
	u16 subsystem, opcode;	/* Track this FWCMD for debug builds. */
	struct MCC_WRB_AMAP *ring_wrb;
	u32 consumed_count;
#endif
} ;

/*
    Represents a function object for network or storage.  This
    is used to manage per-function resources like MCC CQs, etc.
*/
struct be_function_object {

	u32 magic;		/*!< magic for detecting memory corruption. */

	/* PCI BAR mapped addresses */
	u8 __iomem *csr_va;	/* CSR */
	u8 __iomem *db_va;	/* Door Bell */
	u8 __iomem *pci_va;	/* PCI config space */
	u32 emulate;		/* if set, MPU is not available.
				  Emulate everything.     */
	u32 pend_queue_driving;	/* if set, drive the queued WRBs
				   after releasing the WRB lock */

	spinlock_t post_lock;	/* lock for verifying one thread posting wrbs */
	spinlock_t cq_lock;	/* lock for verifying one thread
				   processing cq */
	spinlock_t mcc_context_lock;	/* lock for protecting mcc
					   context free list */
	unsigned long post_irq;
	unsigned long cq_irq;

	u32 type;
	u32 pci_function_number;

	struct be_mcc_object *mcc;	/* mcc rings. */

	struct {
		struct MCC_MAILBOX_AMAP *va;	/* VA to the mailbox */
		u64 pa;	/* PA to the mailbox */
		u32 length;	/* byte length of mailbox */

		/* One default context struct used for posting at
		 * least one MCC_WRB
		 */
		struct be_mcc_wrb_context default_context;
		bool default_context_allocated;
	} mailbox;

	struct {

		/* Wake on lans configured. */
		u32 wol_bitmask;	/* bits 0,1,2,3 are set if
					   corresponding index is enabled */
	} config;


	struct BE_FIRMWARE_CONFIG fw_config;
} ;

/*
      Represents an Event Queue
*/
struct be_eq_object {
	u32 magic;
	atomic_t ref_count;

	struct be_function_object *parent_function;

	struct list_head eq_list;
	struct list_head cq_list_head;

	u32 eq_id;
	void *cb_context;

} ;

/*
    Manages a completion queue
*/
struct be_cq_object {
	u32 magic;
	atomic_t ref_count;

	struct be_function_object *parent_function;
	struct be_eq_object *eq_object;

	struct list_head cq_list;
	struct list_head cqlist_for_eq;

	void *va;
	u32 num_entries;

	void *cb_context;

	u32 cq_id;

} ;

/*
    Manages an ethernet send queue
*/
struct be_ethsq_object {
	u32 magic;

	struct list_head list;

	struct be_function_object *parent_function;
	struct be_cq_object *cq_object;
	u32 bid;

} ;

/*
@brief
    Manages an ethernet receive queue
*/
struct be_ethrq_object {
	u32 magic;
	struct list_head list;
	struct be_function_object *parent_function;
	u32 rid;
	struct be_cq_object *cq_object;
	struct be_cq_object *rss_cq_object[4];

} ;

/*
    Manages an MCC
*/
typedef void (*mcc_async_event_callback) (void *context, u32 event_code,
				void *event);
struct be_mcc_object {
	u32 magic;

	struct be_function_object *parent_function;
	struct list_head mcc_list;

	struct be_cq_object *cq_object;

	/* Async event callback for MCC CQ. */
	mcc_async_event_callback async_cb;
	void *async_context;

	struct {
		struct be_mcc_wrb_context *base;
		u32 num;
		struct list_head list_head;
	} wrb_context;

	struct {
		struct ring_desc *rd;
		struct mp_ring ring;
	} sq;

	struct {
		struct mp_ring ring;
	} cq;

	u32 processing;		/* flag indicating that one thread
				   is processing CQ */
	u32 rearm;		/* doorbell rearm setting to make
				   sure the active processing thread */
	/* rearms the CQ if any of the threads requested it. */

	struct list_head backlog;
	u32 backlog_length;
	u32 driving_backlog;
	u32 consumed_index;

} ;


/* Queue context header -- the required software information for
 * queueing a WRB.
 */
struct be_queue_driver_context {
	mcc_wrb_cqe_callback internal_cb;	/* Function to call on
						   completion */
	void *internal_cb_context;	/* Parameter to pass
						   to completion function */

	mcc_wrb_cqe_callback cb;	/* Function to call on completion */
	void *cb_context;	/* Parameter to pass to completion function */

	struct be_mcc_wrb_response_copy copy;	/* Optional parameters to copy
					   embedded response to user's va */
	void *optional_fwcmd_va;
	struct list_head list;
	u32 bytes;
} ;

/*
 * Common MCC WRB header that all commands require.
 */
struct be_mcc_wrb_header {
	u8 rsvd[offsetof(struct BE_MCC_WRB_AMAP, payload)/8];
} ;

/*
 * All non embedded commands supported by hwlib functions only allow
 * 1 SGE.  This queue context handles them all.
 */
struct be_nonembedded_q_ctxt {
	struct be_queue_driver_context context;
	struct be_mcc_wrb_header wrb_header;
	struct MCC_SGE_AMAP sge[1];
} ;

/*
 * ------------------------------------------------------------------------
 *  This section contains the specific queue struct for each command.
 *  The user could always provide a be_generic_q_ctxt but this is a
 *  rather large struct.  By using the specific struct, memory consumption
 *  can be reduced.
 * ------------------------------------------------------------------------
 */

struct be_link_status_q_ctxt {
	struct be_queue_driver_context context;
	struct be_mcc_wrb_header wrb_header;
	struct FWCMD_COMMON_NTWK_LINK_STATUS_QUERY fwcmd;
} ;

struct be_multicast_q_ctxt {
	struct be_queue_driver_context context;
	struct be_mcc_wrb_header wrb_header;
	struct FWCMD_COMMON_NTWK_MULTICAST_SET fwcmd;
} ;


struct be_vlan_q_ctxt {
	struct be_queue_driver_context context;
	struct be_mcc_wrb_header wrb_header;
	struct FWCMD_COMMON_NTWK_VLAN_CONFIG fwcmd;
} ;

struct be_promiscuous_q_ctxt {
	struct be_queue_driver_context context;
	struct be_mcc_wrb_header wrb_header;
	struct FWCMD_ETH_PROMISCUOUS fwcmd;
} ;

struct be_force_failover_q_ctxt {
	struct be_queue_driver_context context;
	struct be_mcc_wrb_header wrb_header;
	struct FWCMD_COMMON_FORCE_FAILOVER fwcmd;
} ;


struct be_rxf_filter_q_ctxt {
	struct be_queue_driver_context context;
	struct be_mcc_wrb_header wrb_header;
	struct FWCMD_COMMON_NTWK_RX_FILTER fwcmd;
} ;

struct be_eq_modify_delay_q_ctxt {
	struct be_queue_driver_context context;
	struct be_mcc_wrb_header wrb_header;
	struct FWCMD_COMMON_MODIFY_EQ_DELAY fwcmd;
} ;

/*
 * The generic context is the largest size that would be required.
 * It is the software context plus an entire WRB.
 */
struct be_generic_q_ctxt {
	struct be_queue_driver_context context;
	struct be_mcc_wrb_header wrb_header;
	struct MCC_WRB_PAYLOAD_AMAP payload;
} ;

/*
 * Types for the BE_QUEUE_CONTEXT object.
 */
#define BE_QUEUE_INVALID	(0)
#define BE_QUEUE_LINK_STATUS	(0xA006)
#define BE_QUEUE_ETH_STATS	(0xA007)
#define BE_QUEUE_TPM_STATS	(0xA008)
#define BE_QUEUE_TCP_STATS	(0xA009)
#define BE_QUEUE_MULTICAST	(0xA00A)
#define BE_QUEUE_VLAN		(0xA00B)
#define BE_QUEUE_RSS		(0xA00C)
#define BE_QUEUE_FORCE_FAILOVER	(0xA00D)
#define BE_QUEUE_PROMISCUOUS	(0xA00E)
#define BE_QUEUE_WAKE_ON_LAN	(0xA00F)
#define BE_QUEUE_NOP		(0xA010)

/* --- BE_FUNCTION_ENUM --- */
#define BE_FUNCTION_TYPE_ISCSI          (0)
#define BE_FUNCTION_TYPE_NETWORK        (1)
#define BE_FUNCTION_TYPE_ARM            (2)

/* --- BE_ETH_TX_RING_TYPE_ENUM --- */
#define BE_ETH_TX_RING_TYPE_FORWARDING  (1) 	/* Ether ring for forwarding */
#define BE_ETH_TX_RING_TYPE_STANDARD    (2)	/* Ether ring for sending */
						/* network packets. */
#define BE_ETH_TX_RING_TYPE_BOUND       (3)	/* Ethernet ring for sending */
						/* network packets, bound */
						/* to a physical port. */
/*
 * ----------------------------------------------------------------------
 *   API MACROS
 * ----------------------------------------------------------------------
 */
#define BE_FWCMD_NAME(_short_name_)     struct FWCMD_##_short_name_
#define BE_OPCODE_NAME(_short_name_)    OPCODE_##_short_name_
#define BE_SUBSYSTEM_NAME(_short_name_) SUBSYSTEM_##_short_name_


#define BE_PREPARE_EMBEDDED_FWCMD(_pfob_, _wrb_, _short_name_)	\
	((BE_FWCMD_NAME(_short_name_) *)				\
	be_function_prepare_embedded_fwcmd(_pfob_, _wrb_,	\
		sizeof(BE_FWCMD_NAME(_short_name_)),		\
		FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.request), \
		FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.response), \
		BE_OPCODE_NAME(_short_name_),				\
		BE_SUBSYSTEM_NAME(_short_name_)));

#define BE_PREPARE_NONEMBEDDED_FWCMD(_pfob_, _wrb_, _iva_, _ipa_, _short_name_)\
	((BE_FWCMD_NAME(_short_name_) *)				\
	be_function_prepare_nonembedded_fwcmd(_pfob_, _wrb_, (_iva_), (_ipa_), \
		sizeof(BE_FWCMD_NAME(_short_name_)),		\
		FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.request), \
		FIELD_SIZEOF(BE_FWCMD_NAME(_short_name_), params.response), \
		BE_OPCODE_NAME(_short_name_),				\
		BE_SUBSYSTEM_NAME(_short_name_)));

int be_function_object_create(u8 __iomem *csr_va, u8 __iomem *db_va,
	u8 __iomem *pci_va, u32 function_type, struct ring_desc *mailbox_rd,
	  struct be_function_object *pfob);

int be_function_object_destroy(struct be_function_object *pfob);
int be_function_cleanup(struct be_function_object *pfob);


int be_function_get_fw_version(struct be_function_object *pfob,
	struct FWCMD_COMMON_GET_FW_VERSION_RESPONSE_PAYLOAD *fw_version,
	mcc_wrb_cqe_callback cb, void *cb_context);


int be_eq_modify_delay(struct be_function_object *pfob,
		   u32 num_eq, struct be_eq_object **eq_array,
		   u32 *eq_delay_array, mcc_wrb_cqe_callback cb,
		   void *cb_context,
		   struct be_eq_modify_delay_q_ctxt *q_ctxt);



int be_eq_create(struct be_function_object *pfob,
	     struct ring_desc *rd, u32 eqe_size, u32 num_entries,
	     u32 watermark, u32 timer_delay, struct be_eq_object *eq_object);

int be_eq_destroy(struct be_eq_object *eq);

int be_cq_create(struct be_function_object *pfob,
	struct ring_desc *rd, u32 length,
	bool solicited_eventable, bool no_delay,
	u32 wm_thresh, struct be_eq_object *eq_object,
	struct be_cq_object *cq_object);

int be_cq_destroy(struct be_cq_object *cq);

int be_mcc_ring_create(struct be_function_object *pfob,
		   struct ring_desc *rd, u32 length,
		   struct be_mcc_wrb_context *context_array,
		   u32 num_context_entries,
		   struct be_cq_object *cq, struct be_mcc_object *mcc);
int be_mcc_ring_destroy(struct be_mcc_object *mcc_object);

int be_mcc_process_cq(struct be_mcc_object *mcc_object, bool rearm);

int be_mcc_add_async_event_callback(struct be_mcc_object *mcc_object,
		mcc_async_event_callback cb, void *cb_context);

int be_pci_soft_reset(struct be_function_object *pfob);


int be_drive_POST(struct be_function_object *pfob);


int be_eth_sq_create(struct be_function_object *pfob,
		struct ring_desc *rd, u32 length_in_bytes,
		u32 type, u32 ulp, struct be_cq_object *cq_object,
		struct be_ethsq_object *eth_sq);

struct be_eth_sq_parameters {
	u32 port;
	u32 rsvd0[2];
} ;

int be_eth_sq_create_ex(struct be_function_object *pfob,
		    struct ring_desc *rd, u32 length_in_bytes,
		    u32 type, u32 ulp, struct be_cq_object *cq_object,
		    struct be_eth_sq_parameters *ex_parameters,
		    struct be_ethsq_object *eth_sq);
int be_eth_sq_destroy(struct be_ethsq_object *eth_sq);

int be_eth_set_flow_control(struct be_function_object *pfob,
			bool txfc_enable, bool rxfc_enable);

int be_eth_get_flow_control(struct be_function_object *pfob,
			bool *txfc_enable, bool *rxfc_enable);
int be_eth_set_qos(struct be_function_object *pfob, u32 max_bps, u32 max_pps);

int be_eth_get_qos(struct be_function_object *pfob, u32 *max_bps, u32 *max_pps);

int be_eth_set_frame_size(struct be_function_object *pfob,
		      u32 *tx_frame_size, u32 *rx_frame_size);

int be_eth_rq_create(struct be_function_object *pfob,
		 struct ring_desc *rd, struct be_cq_object *cq_object,
		 struct be_cq_object *bcmc_cq_object,
		 struct be_ethrq_object *eth_rq);

int be_eth_rq_destroy(struct be_ethrq_object *eth_rq);

int be_eth_rq_destroy_options(struct be_ethrq_object *eth_rq, bool flush,
		mcc_wrb_cqe_callback cb, void *cb_context);
int be_eth_rq_set_frag_size(struct be_function_object *pfob,
		u32 new_frag_size_bytes, u32 *actual_frag_size_bytes);
int be_eth_rq_get_frag_size(struct be_function_object *pfob,
						u32 *frag_size_bytes);

void *be_function_prepare_embedded_fwcmd(struct be_function_object *pfob,
		   struct MCC_WRB_AMAP *wrb,
		   u32 payload_length, u32 request_length,
		   u32 response_length, u32 opcode, u32 subsystem);
void *be_function_prepare_nonembedded_fwcmd(struct be_function_object *pfob,
	struct MCC_WRB_AMAP *wrb, void *fwcmd_header_va, u64 fwcmd_header_pa,
	u32 payload_length, u32 request_length, u32 response_length,
	u32 opcode, u32 subsystem);


struct MCC_WRB_AMAP *
be_function_peek_mcc_wrb(struct be_function_object *pfob);

int be_rxf_mac_address_read_write(struct be_function_object *pfob,
	      bool port1, bool mac1, bool mgmt,
	      bool write, bool permanent, u8 *mac_address,
	      mcc_wrb_cqe_callback cb,
	      void *cb_context);

int be_rxf_multicast_config(struct be_function_object *pfob,
			bool promiscuous, u32 num, u8 *mac_table,
			mcc_wrb_cqe_callback cb,
			void *cb_context,
			struct be_multicast_q_ctxt *q_ctxt);

int be_rxf_vlan_config(struct be_function_object *pfob,
	   bool promiscuous, u32 num, u16 *vlan_tag_array,
	   mcc_wrb_cqe_callback cb, void *cb_context,
	   struct be_vlan_q_ctxt *q_ctxt);


int be_rxf_link_status(struct be_function_object *pfob,
		   struct BE_LINK_STATUS *link_status,
		   mcc_wrb_cqe_callback cb,
		   void *cb_context,
		   struct be_link_status_q_ctxt *q_ctxt);


int be_rxf_query_eth_statistics(struct be_function_object *pfob,
		struct FWCMD_ETH_GET_STATISTICS *va_for_fwcmd,
		u64 pa_for_fwcmd, mcc_wrb_cqe_callback cb,
		void *cb_context,
		struct be_nonembedded_q_ctxt *q_ctxt);

int be_rxf_promiscuous(struct be_function_object *pfob,
		   bool enable_port0, bool enable_port1,
		   mcc_wrb_cqe_callback cb, void *cb_context,
		   struct be_promiscuous_q_ctxt *q_ctxt);


int be_rxf_filter_config(struct be_function_object *pfob,
		     struct NTWK_RX_FILTER_SETTINGS *settings,
		     mcc_wrb_cqe_callback cb,
		     void *cb_context,
		     struct be_rxf_filter_q_ctxt *q_ctxt);

/*
 * ------------------------------------------------------
 *  internal functions used by hwlib
 * ------------------------------------------------------
 */


int be_function_ring_destroy(struct be_function_object *pfob,
		       u32 id, u32 ring_type, mcc_wrb_cqe_callback cb,
		       void *cb_context,
		       mcc_wrb_cqe_callback internal_cb,
		       void *internal_callback_context);

int be_function_post_mcc_wrb(struct be_function_object *pfob,
		struct MCC_WRB_AMAP *wrb,
		struct be_generic_q_ctxt *q_ctxt,
		mcc_wrb_cqe_callback cb, void *cb_context,
		mcc_wrb_cqe_callback internal_cb,
		void *internal_cb_context, void *optional_fwcmd_va,
		struct be_mcc_wrb_response_copy *response_copy);

int be_function_queue_mcc_wrb(struct be_function_object *pfob,
			  struct be_generic_q_ctxt *q_ctxt);

/*
 * ------------------------------------------------------
 *  MCC QUEUE
 * ------------------------------------------------------
 */

int be_mpu_init_mailbox(struct be_function_object *pfob, struct ring_desc *rd);


struct MCC_WRB_AMAP *
_be_mpu_peek_ring_wrb(struct be_mcc_object *mcc, bool driving_queue);

struct be_mcc_wrb_context *
_be_mcc_allocate_wrb_context(struct be_function_object *pfob);

void _be_mcc_free_wrb_context(struct be_function_object *pfob,
			 struct be_mcc_wrb_context *context);

int _be_mpu_post_wrb_mailbox(struct be_function_object *pfob,
	 struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context);

int _be_mpu_post_wrb_ring(struct be_mcc_object *mcc,
	struct MCC_WRB_AMAP *wrb, struct be_mcc_wrb_context *wrb_context);

void be_drive_mcc_wrb_queue(struct be_mcc_object *mcc);


/*
 * ------------------------------------------------------
 *  Ring Sizes
 * ------------------------------------------------------
 */
static inline u32 be_ring_encoding_to_length(u32 encoding, u32 object_size)
{

	ASSERT(encoding != 1);	/* 1 is rsvd */
	ASSERT(encoding < 16);
	ASSERT(object_size > 0);

	if (encoding == 0)	/* 32k deep */
		encoding = 16;

	return (1 << (encoding - 1)) * object_size;
}

static inline
u32 be_ring_length_to_encoding(u32 length_in_bytes, u32 object_size)
{

	u32 count, encoding;

	ASSERT(object_size > 0);
	ASSERT(length_in_bytes % object_size == 0);

	count = length_in_bytes / object_size;

	ASSERT(count > 1);
	ASSERT(count <= 32 * 1024);
	ASSERT(length_in_bytes <= 8 * PAGE_SIZE); /* max ring size in UT */

	encoding = __ilog2_u32(count) + 1;

	if (encoding == 16)
		encoding = 0;	/* 32k deep */

	return encoding;
}

void be_rd_to_pa_list(struct ring_desc *rd, struct PHYS_ADDR *pa_list,
						u32 max_num);
#endif /* __hwlib_h__ */