1 # Chelsio T6 HASHFILTER configuration file.
3 # Copyright (C) 2014-2017 Chelsio Communications. All rights reserved.
5 # DO NOT MODIFY THIS FILE UNDER ANY CIRCUMSTANCES. MODIFICATION OF THIS FILE
6 # WILL RESULT IN A NON-FUNCTIONAL ADAPTER AND MAY RESULT IN PHYSICAL DAMAGE
10 # This file provides the default, power-on configuration for 2-port T6-based
11 # adapters shipped from the factory. These defaults are designed to address
12 # the needs of the vast majority of Terminator customers. The basic idea is to
13 # have a default configuration which allows a customer to plug a Terminator
14 # adapter in and have it work regardless of OS, driver or application except in
15 # the most unusual and/or demanding customer applications.
17 # Many of the Terminator resources which are described by this configuration
18 # are finite. This requires balancing the configuration/operation needs of
19 # device drivers across OSes and a large number of customer application.
21 # Some of the more important resources to allocate and their constaints are:
22 # 1. Virtual Interfaces: 256.
23 # 2. Ingress Queues with Free Lists: 1024.
24 # 3. Egress Queues: 128K.
25 # 4. MSI-X Vectors: 1088.
26 # 5. Multi-Port Support (MPS) TCAM: 336 entries to support MAC destination
27 # address matching on Ingress Packets.
29 # Some of the important OS/Driver resource needs are:
30 # 6. Some OS Drivers will manage all resources through a single Physical
31 # Function (currently PF4 but it could be any Physical Function).
32 # 7. Some OS Drivers will manage different ports and functions (NIC,
33 # storage, etc.) on different Physical Functions. For example, NIC
34 # functions for ports 0-1 on PF0-1, FCoE on PF4, iSCSI on PF5, etc.
36 # Some of the customer application needs which need to be accommodated:
37 # 8. Some customers will want to support large CPU count systems with
38 # good scaling. Thus, we'll need to accommodate a number of
39 # Ingress Queues and MSI-X Vectors to allow up to some number of CPUs
40 # to be involved per port and per application function. For example,
41 # in the case where all ports and application functions will be
42 # managed via a single Unified PF and we want to accommodate scaling up
43 # to 8 CPUs, we would want:
46 # 3 application functions (NIC, FCoE, iSCSI) per port *
47 # 16 Ingress Queue/MSI-X Vectors per application function
49 # for a total of 96 Ingress Queues and MSI-X Vectors on the Unified PF.
50 # (Plus a few for Firmware Event Queues, etc.)
52 # 9. Some customers will want to use PCI-E SR-IOV Capability to allow Virtual
53 # Machines to directly access T6 functionality via SR-IOV Virtual Functions
54 # and "PCI Device Passthrough" -- this is especially true for the NIC
55 # application functionality.
59 # Global configuration settings.
62 rss_glb_config_mode = basicvirtual
63 rss_glb_config_options = tnlmapen,hashtoeplitz,tnlalllkp
66 pl_timeout_value = 200 # the timeout value in units of us
68 # The following Scatter Gather Engine (SGE) settings assume a 4KB Host
69 # Page Size and a 64B L1 Cache Line Size. It programs the
70 # EgrStatusPageSize and IngPadBoundary to 64B and the PktShift to 2.
71 # If a Master PF Driver finds itself on a machine with different
72 # parameters, then the Master PF Driver is responsible for initializing
73 # these parameters to appropriate values.
76 # 1. The Free List Buffer Sizes below are raw and the firmware will
77 # round them up to the Ingress Padding Boundary.
78 # 2. The SGE Timer Values below are expressed below in microseconds.
79 # The firmware will convert these values to Core Clock Ticks when
80 # it processes the configuration parameters.
82 reg[0x1008] = 0x40800/0x21c70 # SGE_CONTROL
83 reg[0x100c] = 0x22222222 # SGE_HOST_PAGE_SIZE
84 reg[0x10a0] = 0x01040810 # SGE_INGRESS_RX_THRESHOLD
85 reg[0x1044] = 4096 # SGE_FL_BUFFER_SIZE0
86 reg[0x1048] = 65536 # SGE_FL_BUFFER_SIZE1
87 reg[0x104c] = 1536 # SGE_FL_BUFFER_SIZE2
88 reg[0x1050] = 9024 # SGE_FL_BUFFER_SIZE3
89 reg[0x1054] = 9216 # SGE_FL_BUFFER_SIZE4
90 reg[0x1058] = 2048 # SGE_FL_BUFFER_SIZE5
91 reg[0x105c] = 128 # SGE_FL_BUFFER_SIZE6
92 reg[0x1060] = 8192 # SGE_FL_BUFFER_SIZE7
93 reg[0x1064] = 16384 # SGE_FL_BUFFER_SIZE8
95 sge_timer_value = 5, 10, 20, 50, 100, 200 # SGE_TIMER_VALUE* in usecs
96 reg[0x10c4] = 0x20000000/0x20000000 # GK_CONTROL, enable 5th thread
98 # Set the SGE Doorbell Queue Timer "tick" to 5us and initialize
99 # the Timer Table to a default set of values (which are multiples
101 sge_dbq_timertick = 5
102 sge_dbq_timer = 1, 2, 3, 5, 7, 9, 12, 16
104 # enable TP_OUT_CONFIG.IPIDSPLITMODE
105 reg[0x7d04] = 0x00010000/0x00010000
107 reg[0x7dc0] = 0x0e2f8849 # TP_SHIFT_CNT
109 #Tick granularities in kbps
110 tsch_ticks = 100000, 10000, 1000, 10
112 # TP_VLAN_PRI_MAP to select filter tuples and enable ServerSram
113 # filter control: compact, fcoemask
114 # server sram : srvrsram
115 # filter tuples : fragmentation, mpshittype, macmatch, ethertype,
116 # protocol, tos, vlan, vnic_id, port, fcoe
117 # valid filterModes are described the Terminator 5 Data Book
118 # vnicMode = pf_vf #default. Other values are outer_vlan, encapsulation
119 filterMode = fragmentation, mpshittype, protocol, vlan, port, fcoe
121 # filter tuples enforced in LE active region (equal to or subset of filterMode)
122 filterMask = port, protocol
124 # Percentage of dynamic memory (in either the EDRAM or external MEM)
125 # to use for TP RX payload
128 # TP RX payload page size
129 tp_pmrx_pagesize = 16K
131 # TP number of RX channels
132 tp_nrxch = 0 # 0 (auto) = 1
134 # Percentage of dynamic memory (in either the EDRAM or external MEM)
135 # to use for TP TX payload
138 # TP TX payload page size
139 tp_pmtx_pagesize = 64K
141 # TP number of TX channels
142 tp_ntxch = 0 # 0 (auto) = equal number of ports
145 tp_mtus = 88, 256, 512, 576, 808, 1024, 1280, 1488, 1500, 2002, 2048, 4096, 4352, 8192, 9000, 9600
147 # enable TP_OUT_CONFIG.IPIDSPLITMODE and CRXPKTENC
148 reg[0x7d04] = 0x00010008/0x00010008
151 reg[0x7d08] = 0x00000800/0x00000800 # set IssFromCplEnable
154 reg[0x7d48] = 0x00000000/0x00000400 # clear EnableFLMError
157 reg[0x7d4c] = 0x00010000/0x00010000 # set DisableNewPshFlag
160 reg[0x7d60] = 0x06000000/0x07000000 # set InitCWND to 6
163 reg[0x7d6c] = 0x28000000/0x28000000 # set EnableTnlCngHdr
164 # set RxMacCheck (Note:
165 # Only for hash filter,
169 reg[0x19c04] = 0x00000000/0x02040000 # LE IPv4 compression disabled
170 # EXTN_HASH_IPV4 Diable
173 reg[0x19c74] = 0x00000004/0x0000000f # TCAM_ACTV_HIT = 4
176 reg[0x19c78] = 0x08000000/0x0e000000 # HASH_ACTV_HIT = 4
179 reg[0x19c28] = 0x00800000/0x01f00000 # LE Hash bucket size 8,
182 mc_mode_brc[0] = 0 # mc0 - 1: enable BRC, 0: enable RBC, 2: enable BRBC
184 # Some "definitions" to make the rest of this a bit more readable. We support
185 # 4 ports, 3 functions (NIC, FCoE and iSCSI), scaling up to 8 "CPU Queue Sets"
186 # per function per port ...
188 # NMSIX = 1088 # available MSI-X Vectors
189 # NVI = 256 # available Virtual Interfaces
190 # NMPSTCAM = 336 # MPS TCAM entries
193 # NCPUS = 16 # CPUs we want to support scalably
194 # NFUNCS = 3 # functions per port (NIC, FCoE, iSCSI)
196 # Breakdown of Virtual Interface/Queue/Interrupt resources for the "Unified
197 # PF" which many OS Drivers will use to manage most or all functions.
199 # Each Ingress Queue can use one MSI-X interrupt but some Ingress Queues can
200 # use Forwarded Interrupt Ingress Queues. For these latter, an Ingress Queue
201 # would be created and the Queue ID of a Forwarded Interrupt Ingress Queue
202 # will be specified as the "Ingress Queue Asynchronous Destination Index."
203 # Thus, the number of MSI-X Vectors assigned to the Unified PF will be less
204 # than or equal to the number of Ingress Queues ...
206 # NVI_NIC = 4 # NIC access to NPORTS
207 # NFLIQ_NIC = 32 # NIC Ingress Queues with Free Lists
208 # NETHCTRL_NIC = 32 # NIC Ethernet Control/TX Queues
209 # NEQ_NIC = 64 # NIC Egress Queues (FL, ETHCTRL/TX)
210 # NMPSTCAM_NIC = 16 # NIC MPS TCAM Entries (NPORTS*4)
211 # NMSIX_NIC = 32 # NIC MSI-X Interrupt Vectors (FLIQ)
213 # NVI_OFLD = 0 # Offload uses NIC function to access ports
214 # NFLIQ_OFLD = 16 # Offload Ingress Queues with Free Lists
215 # NETHCTRL_OFLD = 0 # Offload Ethernet Control/TX Queues
216 # NEQ_OFLD = 16 # Offload Egress Queues (FL)
217 # NMPSTCAM_OFLD = 0 # Offload MPS TCAM Entries (uses NIC's)
218 # NMSIX_OFLD = 16 # Offload MSI-X Interrupt Vectors (FLIQ)
220 # NVI_RDMA = 0 # RDMA uses NIC function to access ports
221 # NFLIQ_RDMA = 4 # RDMA Ingress Queues with Free Lists
222 # NETHCTRL_RDMA = 0 # RDMA Ethernet Control/TX Queues
223 # NEQ_RDMA = 4 # RDMA Egress Queues (FL)
224 # NMPSTCAM_RDMA = 0 # RDMA MPS TCAM Entries (uses NIC's)
225 # NMSIX_RDMA = 4 # RDMA MSI-X Interrupt Vectors (FLIQ)
227 # NEQ_WD = 128 # Wire Direct TX Queues and FLs
228 # NETHCTRL_WD = 64 # Wire Direct TX Queues
229 # NFLIQ_WD = 64 ` # Wire Direct Ingress Queues with Free Lists
231 # NVI_ISCSI = 4 # ISCSI access to NPORTS
232 # NFLIQ_ISCSI = 4 # ISCSI Ingress Queues with Free Lists
233 # NETHCTRL_ISCSI = 0 # ISCSI Ethernet Control/TX Queues
234 # NEQ_ISCSI = 4 # ISCSI Egress Queues (FL)
235 # NMPSTCAM_ISCSI = 4 # ISCSI MPS TCAM Entries (NPORTS)
236 # NMSIX_ISCSI = 4 # ISCSI MSI-X Interrupt Vectors (FLIQ)
238 # NVI_FCOE = 4 # FCOE access to NPORTS
239 # NFLIQ_FCOE = 34 # FCOE Ingress Queues with Free Lists
240 # NETHCTRL_FCOE = 32 # FCOE Ethernet Control/TX Queues
241 # NEQ_FCOE = 66 # FCOE Egress Queues (FL)
242 # NMPSTCAM_FCOE = 32 # FCOE MPS TCAM Entries (NPORTS)
243 # NMSIX_FCOE = 34 # FCOE MSI-X Interrupt Vectors (FLIQ)
245 # Two extra Ingress Queues per function for Firmware Events and Forwarded
246 # Interrupts, and two extra interrupts per function for Firmware Events (or a
247 # Forwarded Interrupt Queue) and General Interrupts per function.
249 # NFLIQ_EXTRA = 6 # "extra" Ingress Queues 2*NFUNCS (Firmware and
250 # # Forwarded Interrupts
251 # NMSIX_EXTRA = 6 # extra interrupts 2*NFUNCS (Firmware and
252 # # General Interrupts
254 # Microsoft HyperV resources. The HyperV Virtual Ingress Queues will have
255 # their interrupts forwarded to another set of Forwarded Interrupt Queues.
257 # NVI_HYPERV = 16 # VMs we want to support
258 # NVIIQ_HYPERV = 2 # Virtual Ingress Queues with Free Lists per VM
259 # NFLIQ_HYPERV = 40 # VIQs + NCPUS Forwarded Interrupt Queues
260 # NEQ_HYPERV = 32 # VIQs Free Lists
261 # NMPSTCAM_HYPERV = 16 # MPS TCAM Entries (NVI_HYPERV)
262 # NMSIX_HYPERV = 8 # NCPUS Forwarded Interrupt Queues
264 # Adding all of the above Unified PF resource needs together: (NIC + OFLD +
265 # RDMA + ISCSI + FCOE + EXTRA + HYPERV)
268 # NFLIQ_UNIFIED = 106
269 # NETHCTRL_UNIFIED = 32
271 # NMPSTCAM_UNIFIED = 40
273 # The sum of all the MSI-X resources above is 74 MSI-X Vectors but we'll round
274 # that up to 128 to make sure the Unified PF doesn't run out of resources.
276 # NMSIX_UNIFIED = 128
278 # The Storage PFs could need up to NPORTS*NCPUS + NMSIX_EXTRA MSI-X Vectors
279 # which is 34 but they're probably safe with 32.
283 # Note: The UnifiedPF is PF4 which doesn't have any Virtual Functions
284 # associated with it. Thus, the MSI-X Vector allocations we give to the
285 # UnifiedPF aren't inherited by any Virtual Functions. As a result we can
286 # provision many more Virtual Functions than we can if the UnifiedPF were
290 # All of the below PCI-E parameters are actually stored in various *_init.txt
291 # files. We include them below essentially as comments.
293 # For PF0-3 we assign 8 vectors each for NIC Ingress Queues of the associated
296 # For PF4, the Unified PF, we give it an MSI-X Table Size as outlined above.
298 # For PF5-6 we assign enough MSI-X Vectors to support FCoE and iSCSI
299 # storage applications across all four possible ports.
301 # Additionally, since the UnifiedPF isn't one of the per-port Physical
302 # Functions, we give the UnifiedPF and the PF0-3 Physical Functions
303 # different PCI Device IDs which will allow Unified and Per-Port Drivers
304 # to directly select the type of Physical Function to which they wish to be
307 # Note that the actual values used for the PCI-E Intelectual Property will be
308 # 1 less than those below since that's the way it "counts" things. For
309 # readability, we use the number we actually mean ...
311 # PF0_INT = 8 # NCPUS
312 # PF1_INT = 8 # NCPUS
313 # PF0_3_INT = 32 # PF0_INT + PF1_INT + PF2_INT + PF3_INT
315 # PF4_INT = 128 # NMSIX_UNIFIED
316 # PF5_INT = 32 # NMSIX_STORAGE
317 # PF6_INT = 32 # NMSIX_STORAGE
318 # PF7_INT = 0 # Nothing Assigned
319 # PF4_7_INT = 192 # PF4_INT + PF5_INT + PF6_INT + PF7_INT
321 # PF0_7_INT = 224 # PF0_3_INT + PF4_7_INT
323 # With the above we can get 17 VFs/PF0-3 (limited by 336 MPS TCAM entries)
324 # but we'll lower that to 16 to make our total 64 and a nice power of 2 ...
329 # Some OS Drivers manage all application functions for all ports via PF4.
330 # Thus we need to provide a large number of resources here. For Egress
331 # Queues we need to account for both TX Queues as well as Free List Queues
332 # (because the host is responsible for producing Free List Buffers for the
333 # hardware to consume).
336 wx_caps = all # write/execute permissions for all commands
337 r_caps = all # read permissions for all commands
338 nvi = 8 # NVI_UNIFIED
340 niqflint = 320 # NFLIQ_UNIFIED + NLFIQ_WD
341 nethctrl = 320 # NETHCTRL_UNIFIED + NETHCTRL_WD
342 neq = 640 # NEQ_UNIFIED + NEQ_WD
343 nexactf = 40 # NMPSTCAM_UNIFIED
345 cmask = all # access to all channels
346 pmask = all # access to all four ports ...
347 nclip = 384 # number of clip region entries
348 nfilter = 496 # number of filter region entries
349 nhash = 524288 # number of hash region entries
350 nhpfilter = 64 # number of high priority filter region entries
351 protocol = nic_hashfilter
354 # The following function, 1023, is not an actual PCIE function but is used to
355 # configure and reserve firmware internal resources that come from the global
359 wx_caps = all # write/execute permissions for all commands
360 r_caps = all # read permissions for all commands
361 nvi = 4 # NVI_UNIFIED
362 cmask = all # access to all channels
363 pmask = all # access to all four ports ...
364 nexactf = 8 # NPORTS + DCBX +
365 nfilter = 16 # number of filter region entries
368 # For Virtual functions, we only allow NIC functionality and we only allow
369 # access to one port (1 << PF). Note that because of limitations in the
370 # Scatter Gather Engine (SGE) hardware which checks writes to VF KDOORBELL
371 # and GTS registers, the number of Ingress and Egress Queues must be a power
374 [function "0/*"] # NVF
379 [function "1/*"] # NVF
384 [function "2/*"] # NVF
389 [function "3/*"] # NVF
394 # MPS features a 196608 bytes ingress buffer that is used for ingress buffering
395 # for packets from the wire as well as the loopback path of the L2 switch. The
396 # folling params control how the buffer memory is distributed and the L2 flow
399 # bg_mem: %-age of mem to use for port/buffer group
400 # lpbk_mem: %-age of port/bg mem to use for loopback
401 # hwm: high watermark; bytes available when starting to send pause
402 # frames (in units of 0.1 MTU)
403 # lwm: low watermark; bytes remaining when sending 'unpause' frame
404 # (in inuits of 0.1 MTU)
405 # dwm: minimum delta between high and low watermark (in units of 100
409 dcb = 0 # configure for DCB PPP and enable DCBX offload
422 checksum = 0x1c3a42cf
424 # Total resources used by above allocations:
425 # Virtual Interfaces: 104
426 # Ingress Queues/w Free Lists and Interrupts: 526
428 # MPS TCAM Entries: 336
430 # Virtual Functions: 64