xref: /aosp_15_r20/external/coreboot/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-lmc.h (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 #ifndef __BDK_CSRS_LMC_H__
2 #define __BDK_CSRS_LMC_H__
3 /* This file is auto-generated. Do not edit */
4 
5 /***********************license start***************
6  * Copyright (c) 2003-2017  Cavium Inc. ([email protected]). All rights
7  * reserved.
8  *
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are
12  * met:
13  *
14  *   * Redistributions of source code must retain the above copyright
15  *     notice, this list of conditions and the following disclaimer.
16  *
17  *   * Redistributions in binary form must reproduce the above
18  *     copyright notice, this list of conditions and the following
19  *     disclaimer in the documentation and/or other materials provided
20  *     with the distribution.
21 
22  *   * Neither the name of Cavium Inc. nor the names of
23  *     its contributors may be used to endorse or promote products
24  *     derived from this software without specific prior written
25  *     permission.
26 
27  * This Software, including technical data, may be subject to U.S. export  control
28  * laws, including the U.S. Export Administration Act and its  associated
29  * regulations, and may be subject to export or import  regulations in other
30  * countries.
31 
32  * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
33  * AND WITH ALL FAULTS AND CAVIUM  NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
34  * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
35  * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
36  * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
37  * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
38  * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
39  * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
40  * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
41  * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
42  ***********************license end**************************************/
43 
44 /* FIXME(dhendrix) added to make compiler happy. However this introduces a
45  * circular dependency and the typdef'd bdk_lmcx_modereg_params2_t makes
46  * forward declaration impossible. */
47 //#include <libdram/libdram-config.h>
48 #include <bdk-minimal.h>
49 #include <libbdk-arch/bdk-model.h>
50 
51 /**
52  * @file
53  *
54  * Configuration and status register (CSR) address and type definitions for
55  * Cavium LMC.
56  *
57  * This file is auto generated. Do not edit.
58  *
59  */
60 
61 /**
62  * Enumeration lmc_bar_e
63  *
64  * LMC Base Address Register Enumeration
65  * Enumerates the base address registers.
66  */
67 #define BDK_LMC_BAR_E_LMCX_PF_BAR0(a) (0x87e088000000ll + 0x1000000ll * (a))
68 #define BDK_LMC_BAR_E_LMCX_PF_BAR0_SIZE 0x800000ull
69 #define BDK_LMC_BAR_E_LMCX_PF_BAR4(a) (0x87e088f00000ll + 0x1000000ll * (a))
70 #define BDK_LMC_BAR_E_LMCX_PF_BAR4_SIZE 0x100000ull
71 
72 /**
73  * Enumeration lmc_int_vec_e
74  *
75  * LMC MSI-X Vector Enumeration
76  * Enumerates the MSI-X interrupt vectors.
77  */
78 #define BDK_LMC_INT_VEC_E_INTS (0)
79 
80 /**
81  * Enumeration lmc_psb_acc_e
82  *
83  * LMC Power Serial Bus Accumulator Enumeration
84  * Enumerates the PSB accumulators for LMC slaves, which correspond to index {b} of
85  * PSBS_SYS()_ACCUM().
86  */
87 #define BDK_LMC_PSB_ACC_E_DCLK_EN (0)
88 #define BDK_LMC_PSB_ACC_E_RSVD3 (3)
89 #define BDK_LMC_PSB_ACC_E_RX_ACTIVE (2)
90 #define BDK_LMC_PSB_ACC_E_TX_ACTIVE (1)
91 
92 /**
93  * Enumeration lmc_psb_event_e
94  *
95  * LMC Power Serial Bus Event Enumeration
96  * Enumerates the event numbers for LMC slaves, which correspond to index {b} of
97  * PSBS_SYS()_EVENT()_CFG.
98  */
99 #define BDK_LMC_PSB_EVENT_E_DCLK_EN (0)
100 #define BDK_LMC_PSB_EVENT_E_RX_ACTIVE (2)
101 #define BDK_LMC_PSB_EVENT_E_TX_ACTIVE (1)
102 
103 /**
104  * Enumeration lmc_seq_sel_e
105  *
106  * LMC Sequence Select Enumeration
107  * Enumerates the different values of LMC()_SEQ_CTL[SEQ_SEL].
108  */
109 #define BDK_LMC_SEQ_SEL_E_INIT (0)
110 #define BDK_LMC_SEQ_SEL_E_MPR_RW (9)
111 #define BDK_LMC_SEQ_SEL_E_MRW (8)
112 #define BDK_LMC_SEQ_SEL_E_OFFSET_TRAINING (0xb)
113 #define BDK_LMC_SEQ_SEL_E_PPR (0xf)
114 #define BDK_LMC_SEQ_SEL_E_RCD_INIT (7)
115 #define BDK_LMC_SEQ_SEL_E_READ_LEVEL (1)
116 #define BDK_LMC_SEQ_SEL_E_RW_TRAINING (0xe)
117 #define BDK_LMC_SEQ_SEL_E_SREF_ENTRY (2)
118 #define BDK_LMC_SEQ_SEL_E_SREF_EXIT (3)
119 #define BDK_LMC_SEQ_SEL_E_VREF_INT (0xa)
120 #define BDK_LMC_SEQ_SEL_E_WRITE_LEVEL (6)
121 
122 /**
123  * Register (RSL) lmc#_adr_scramble
124  *
125  * LMC Address Scramble Register
126  * These registers set the aliasing that uses the lowest, legal chip select(s).
127  */
128 union bdk_lmcx_adr_scramble
129 {
130     uint64_t u;
131     struct bdk_lmcx_adr_scramble_s
132     {
133 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
134         uint64_t key                   : 64; /**< [ 63:  0](R/W/H) Scramble key for addresses. Clear this field to zero to disable. To enable
135                                                                  address scrambling, this key should be set to a value generated from a
136                                                                  cryptographically-secure random number generator such as RNM_RANDOM. */
137 #else /* Word 0 - Little Endian */
138         uint64_t key                   : 64; /**< [ 63:  0](R/W/H) Scramble key for addresses. Clear this field to zero to disable. To enable
139                                                                  address scrambling, this key should be set to a value generated from a
140                                                                  cryptographically-secure random number generator such as RNM_RANDOM. */
141 #endif /* Word 0 - End */
142     } s;
143     /* struct bdk_lmcx_adr_scramble_s cn; */
144 };
145 typedef union bdk_lmcx_adr_scramble bdk_lmcx_adr_scramble_t;
146 
147 static inline uint64_t BDK_LMCX_ADR_SCRAMBLE(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_ADR_SCRAMBLE(unsigned long a)148 static inline uint64_t BDK_LMCX_ADR_SCRAMBLE(unsigned long a)
149 {
150     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
151         return 0x87e088000328ll + 0x1000000ll * ((a) & 0x3);
152     __bdk_csr_fatal("LMCX_ADR_SCRAMBLE", 1, a, 0, 0, 0);
153 }
154 
155 #define typedef_BDK_LMCX_ADR_SCRAMBLE(a) bdk_lmcx_adr_scramble_t
156 #define bustype_BDK_LMCX_ADR_SCRAMBLE(a) BDK_CSR_TYPE_RSL
157 #define basename_BDK_LMCX_ADR_SCRAMBLE(a) "LMCX_ADR_SCRAMBLE"
158 #define device_bar_BDK_LMCX_ADR_SCRAMBLE(a) 0x0 /* PF_BAR0 */
159 #define busnum_BDK_LMCX_ADR_SCRAMBLE(a) (a)
160 #define arguments_BDK_LMCX_ADR_SCRAMBLE(a) (a),-1,-1,-1
161 
162 /**
163  * Register (RSL) lmc#_bank_conflict1
164  *
165  * LMC Bank Conflict1 Counter Register
166  */
167 union bdk_lmcx_bank_conflict1
168 {
169     uint64_t u;
170     struct bdk_lmcx_bank_conflict1_s
171     {
172 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
173         uint64_t cnt                   : 64; /**< [ 63:  0](RO/H) Bank conflict counter. A 64-bit counter that increments at every DCLK
174                                                                  cycles when LMC could not issue R/W operations to the DRAM due to
175                                                                  bank conflict. This increments when all 8 in-flight buffers are not
176                                                                  utilized. */
177 #else /* Word 0 - Little Endian */
178         uint64_t cnt                   : 64; /**< [ 63:  0](RO/H) Bank conflict counter. A 64-bit counter that increments at every DCLK
179                                                                  cycles when LMC could not issue R/W operations to the DRAM due to
180                                                                  bank conflict. This increments when all 8 in-flight buffers are not
181                                                                  utilized. */
182 #endif /* Word 0 - End */
183     } s;
184     struct bdk_lmcx_bank_conflict1_cn9
185     {
186 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
187         uint64_t cnt                   : 64; /**< [ 63:  0](RO/H) Bank conflict counter. A 64-bit counter that increments at every dclk
188                                                                  cycles when LMC could not issue R/W operations to the DRAM due to
189                                                                  bank conflict. This increments when all 8 in-flight buffers are not
190                                                                  utilized. */
191 #else /* Word 0 - Little Endian */
192         uint64_t cnt                   : 64; /**< [ 63:  0](RO/H) Bank conflict counter. A 64-bit counter that increments at every dclk
193                                                                  cycles when LMC could not issue R/W operations to the DRAM due to
194                                                                  bank conflict. This increments when all 8 in-flight buffers are not
195                                                                  utilized. */
196 #endif /* Word 0 - End */
197     } cn9;
198     /* struct bdk_lmcx_bank_conflict1_s cn81xx; */
199     /* struct bdk_lmcx_bank_conflict1_s cn88xx; */
200     /* struct bdk_lmcx_bank_conflict1_cn9 cn83xx; */
201 };
202 typedef union bdk_lmcx_bank_conflict1 bdk_lmcx_bank_conflict1_t;
203 
204 static inline uint64_t BDK_LMCX_BANK_CONFLICT1(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_BANK_CONFLICT1(unsigned long a)205 static inline uint64_t BDK_LMCX_BANK_CONFLICT1(unsigned long a)
206 {
207     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
208         return 0x87e088000360ll + 0x1000000ll * ((a) & 0x0);
209     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
210         return 0x87e088000360ll + 0x1000000ll * ((a) & 0x1);
211     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && (a<=3))
212         return 0x87e088000360ll + 0x1000000ll * ((a) & 0x3);
213     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
214         return 0x87e088000360ll + 0x1000000ll * ((a) & 0x3);
215     __bdk_csr_fatal("LMCX_BANK_CONFLICT1", 1, a, 0, 0, 0);
216 }
217 
218 #define typedef_BDK_LMCX_BANK_CONFLICT1(a) bdk_lmcx_bank_conflict1_t
219 #define bustype_BDK_LMCX_BANK_CONFLICT1(a) BDK_CSR_TYPE_RSL
220 #define basename_BDK_LMCX_BANK_CONFLICT1(a) "LMCX_BANK_CONFLICT1"
221 #define device_bar_BDK_LMCX_BANK_CONFLICT1(a) 0x0 /* PF_BAR0 */
222 #define busnum_BDK_LMCX_BANK_CONFLICT1(a) (a)
223 #define arguments_BDK_LMCX_BANK_CONFLICT1(a) (a),-1,-1,-1
224 
225 /**
226  * Register (RSL) lmc#_bank_conflict2
227  *
228  * LMC Bank Conflict2 Counter Register
229  */
230 union bdk_lmcx_bank_conflict2
231 {
232     uint64_t u;
233     struct bdk_lmcx_bank_conflict2_s
234     {
235 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
236         uint64_t cnt                   : 64; /**< [ 63:  0](RO/H) Bank conflict counter. A 64-bit counter that increments at every DCLK
237                                                                  cycles when LMC could not issue R/W operations to the DRAM due to
238                                                                  bank conflict. This increments only when there are less than four in-flight
239                                                                  buffers occupied. */
240 #else /* Word 0 - Little Endian */
241         uint64_t cnt                   : 64; /**< [ 63:  0](RO/H) Bank conflict counter. A 64-bit counter that increments at every DCLK
242                                                                  cycles when LMC could not issue R/W operations to the DRAM due to
243                                                                  bank conflict. This increments only when there are less than four in-flight
244                                                                  buffers occupied. */
245 #endif /* Word 0 - End */
246     } s;
247     /* struct bdk_lmcx_bank_conflict2_s cn9; */
248     /* struct bdk_lmcx_bank_conflict2_s cn81xx; */
249     /* struct bdk_lmcx_bank_conflict2_s cn88xx; */
250     struct bdk_lmcx_bank_conflict2_cn83xx
251     {
252 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
253         uint64_t cnt                   : 64; /**< [ 63:  0](RO/H) Bank conflict counter. A 64-bit counter that increments at every dclk
254                                                                  cycles when LMC could not issue R/W operations to the DRAM due to
255                                                                  bank conflict. This increments only when there are less than four in-flight
256                                                                  buffers occupied. */
257 #else /* Word 0 - Little Endian */
258         uint64_t cnt                   : 64; /**< [ 63:  0](RO/H) Bank conflict counter. A 64-bit counter that increments at every dclk
259                                                                  cycles when LMC could not issue R/W operations to the DRAM due to
260                                                                  bank conflict. This increments only when there are less than four in-flight
261                                                                  buffers occupied. */
262 #endif /* Word 0 - End */
263     } cn83xx;
264 };
265 typedef union bdk_lmcx_bank_conflict2 bdk_lmcx_bank_conflict2_t;
266 
267 static inline uint64_t BDK_LMCX_BANK_CONFLICT2(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_BANK_CONFLICT2(unsigned long a)268 static inline uint64_t BDK_LMCX_BANK_CONFLICT2(unsigned long a)
269 {
270     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
271         return 0x87e088000368ll + 0x1000000ll * ((a) & 0x0);
272     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
273         return 0x87e088000368ll + 0x1000000ll * ((a) & 0x1);
274     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && (a<=3))
275         return 0x87e088000368ll + 0x1000000ll * ((a) & 0x3);
276     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
277         return 0x87e088000368ll + 0x1000000ll * ((a) & 0x3);
278     __bdk_csr_fatal("LMCX_BANK_CONFLICT2", 1, a, 0, 0, 0);
279 }
280 
281 #define typedef_BDK_LMCX_BANK_CONFLICT2(a) bdk_lmcx_bank_conflict2_t
282 #define bustype_BDK_LMCX_BANK_CONFLICT2(a) BDK_CSR_TYPE_RSL
283 #define basename_BDK_LMCX_BANK_CONFLICT2(a) "LMCX_BANK_CONFLICT2"
284 #define device_bar_BDK_LMCX_BANK_CONFLICT2(a) 0x0 /* PF_BAR0 */
285 #define busnum_BDK_LMCX_BANK_CONFLICT2(a) (a)
286 #define arguments_BDK_LMCX_BANK_CONFLICT2(a) (a),-1,-1,-1
287 
288 /**
289  * Register (RSL) lmc#_bist_ctl
290  *
291  * LMC BIST Control Registers
292  * This register has fields to control BIST operation.
293  */
294 union bdk_lmcx_bist_ctl
295 {
296     uint64_t u;
297     struct bdk_lmcx_bist_ctl_s
298     {
299 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
300         uint64_t reserved_5_63         : 59;
301         uint64_t macram_bist_status    : 1;  /**< [  4:  4](RO/H) Maximum Activate Counts RAM BIST status.
302                                                                  1 means fail. */
303         uint64_t dlcram_bist_status    : 1;  /**< [  3:  3](RO/H) DLC RAM BIST status; one means fail. */
304         uint64_t dlcram_bist_done      : 1;  /**< [  2:  2](RO/H) DLC and MAC RAM BIST complete indication;
305                                                                  One means both RAMs have completed. */
306         uint64_t start_bist            : 1;  /**< [  1:  1](R/W) Start BIST on DLC and MAC memory. */
307         uint64_t clear_bist            : 1;  /**< [  0:  0](R/W) Start clear BIST on DLC and MAC memory. */
308 #else /* Word 0 - Little Endian */
309         uint64_t clear_bist            : 1;  /**< [  0:  0](R/W) Start clear BIST on DLC and MAC memory. */
310         uint64_t start_bist            : 1;  /**< [  1:  1](R/W) Start BIST on DLC and MAC memory. */
311         uint64_t dlcram_bist_done      : 1;  /**< [  2:  2](RO/H) DLC and MAC RAM BIST complete indication;
312                                                                  One means both RAMs have completed. */
313         uint64_t dlcram_bist_status    : 1;  /**< [  3:  3](RO/H) DLC RAM BIST status; one means fail. */
314         uint64_t macram_bist_status    : 1;  /**< [  4:  4](RO/H) Maximum Activate Counts RAM BIST status.
315                                                                  1 means fail. */
316         uint64_t reserved_5_63         : 59;
317 #endif /* Word 0 - End */
318     } s;
319     /* struct bdk_lmcx_bist_ctl_s cn; */
320 };
321 typedef union bdk_lmcx_bist_ctl bdk_lmcx_bist_ctl_t;
322 
323 static inline uint64_t BDK_LMCX_BIST_CTL(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_BIST_CTL(unsigned long a)324 static inline uint64_t BDK_LMCX_BIST_CTL(unsigned long a)
325 {
326     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
327         return 0x87e088000100ll + 0x1000000ll * ((a) & 0x0);
328     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
329         return 0x87e088000100ll + 0x1000000ll * ((a) & 0x1);
330     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
331         return 0x87e088000100ll + 0x1000000ll * ((a) & 0x3);
332     __bdk_csr_fatal("LMCX_BIST_CTL", 1, a, 0, 0, 0);
333 }
334 
335 #define typedef_BDK_LMCX_BIST_CTL(a) bdk_lmcx_bist_ctl_t
336 #define bustype_BDK_LMCX_BIST_CTL(a) BDK_CSR_TYPE_RSL
337 #define basename_BDK_LMCX_BIST_CTL(a) "LMCX_BIST_CTL"
338 #define device_bar_BDK_LMCX_BIST_CTL(a) 0x0 /* PF_BAR0 */
339 #define busnum_BDK_LMCX_BIST_CTL(a) (a)
340 #define arguments_BDK_LMCX_BIST_CTL(a) (a),-1,-1,-1
341 
342 /**
343  * Register (RSL) lmc#_char_ctl
344  *
345  * INTERNAL: LMC Characterization Control Register
346  *
347  * This register provides an assortment of various control fields needed to characterize the DDR4
348  * interface.
349  */
350 union bdk_lmcx_char_ctl
351 {
352     uint64_t u;
353     struct bdk_lmcx_char_ctl_s
354     {
355 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
356         uint64_t reserved_54_63        : 10;
357         uint64_t dq_char_byte_check    : 1;  /**< [ 53: 53](R/W) When set, LMC performs loopback pattern check on a byte. The selection of the byte is
358                                                                  controlled by LMC()_CHAR_CTL[DQ_CHAR_BYTE_SEL]. */
359         uint64_t dq_char_check_lock    : 1;  /**< [ 52: 52](RO/H) Indicates if a lock has been achieved. Is set to one only if a lock is achieved
360                                                                  during the LFSR priming period after [DQ_CHAR_CHECK_ENABLE] is set to one, and is
361                                                                  forced back to zero when [DQ_CHAR_CHECK_ENABLE] is set to zero. */
362         uint64_t dq_char_check_enable  : 1;  /**< [ 51: 51](R/W) Enable DQ pattern check. The transition from disabled to enabled clears
363                                                                  LMC()_CHAR_DQ_ERR_COUNT. */
364         uint64_t dq_char_bit_sel       : 3;  /**< [ 50: 48](R/W) Select a bit within the byte for DQ characterization pattern check. */
365         uint64_t dq_char_byte_sel      : 4;  /**< [ 47: 44](R/W) Select a byte of data for DQ characterization pattern check. */
366         uint64_t dr                    : 1;  /**< [ 43: 43](R/W) Pattern at data rate (not clock rate). */
367         uint64_t skew_on               : 1;  /**< [ 42: 42](R/W) Skew adjacent bits. */
368         uint64_t en                    : 1;  /**< [ 41: 41](R/W) Enable characterization. */
369         uint64_t sel                   : 1;  /**< [ 40: 40](R/W) Pattern select: 0 = PRBS, 1 = programmable pattern. */
370         uint64_t prog                  : 8;  /**< [ 39: 32](R/W) Programmable pattern. */
371         uint64_t prbs                  : 32; /**< [ 31:  0](R/W) PRBS polynomial. */
372 #else /* Word 0 - Little Endian */
373         uint64_t prbs                  : 32; /**< [ 31:  0](R/W) PRBS polynomial. */
374         uint64_t prog                  : 8;  /**< [ 39: 32](R/W) Programmable pattern. */
375         uint64_t sel                   : 1;  /**< [ 40: 40](R/W) Pattern select: 0 = PRBS, 1 = programmable pattern. */
376         uint64_t en                    : 1;  /**< [ 41: 41](R/W) Enable characterization. */
377         uint64_t skew_on               : 1;  /**< [ 42: 42](R/W) Skew adjacent bits. */
378         uint64_t dr                    : 1;  /**< [ 43: 43](R/W) Pattern at data rate (not clock rate). */
379         uint64_t dq_char_byte_sel      : 4;  /**< [ 47: 44](R/W) Select a byte of data for DQ characterization pattern check. */
380         uint64_t dq_char_bit_sel       : 3;  /**< [ 50: 48](R/W) Select a bit within the byte for DQ characterization pattern check. */
381         uint64_t dq_char_check_enable  : 1;  /**< [ 51: 51](R/W) Enable DQ pattern check. The transition from disabled to enabled clears
382                                                                  LMC()_CHAR_DQ_ERR_COUNT. */
383         uint64_t dq_char_check_lock    : 1;  /**< [ 52: 52](RO/H) Indicates if a lock has been achieved. Is set to one only if a lock is achieved
384                                                                  during the LFSR priming period after [DQ_CHAR_CHECK_ENABLE] is set to one, and is
385                                                                  forced back to zero when [DQ_CHAR_CHECK_ENABLE] is set to zero. */
386         uint64_t dq_char_byte_check    : 1;  /**< [ 53: 53](R/W) When set, LMC performs loopback pattern check on a byte. The selection of the byte is
387                                                                  controlled by LMC()_CHAR_CTL[DQ_CHAR_BYTE_SEL]. */
388         uint64_t reserved_54_63        : 10;
389 #endif /* Word 0 - End */
390     } s;
391     struct bdk_lmcx_char_ctl_cn88xxp1
392     {
393 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
394         uint64_t reserved_54_63        : 10;
395         uint64_t dq_char_byte_check    : 1;  /**< [ 53: 53](RO) Reserved. */
396         uint64_t dq_char_check_lock    : 1;  /**< [ 52: 52](RO/H) Indicates if a lock has been achieved. Is set to one only if a lock is achieved
397                                                                  during the LFSR priming period after [DQ_CHAR_CHECK_ENABLE] is set to one, and is
398                                                                  forced back to zero when [DQ_CHAR_CHECK_ENABLE] is set to zero. */
399         uint64_t dq_char_check_enable  : 1;  /**< [ 51: 51](R/W) Enable DQ pattern check. The transition from disabled to enabled clears
400                                                                  LMC()_CHAR_DQ_ERR_COUNT. */
401         uint64_t dq_char_bit_sel       : 3;  /**< [ 50: 48](R/W) Select a bit within the byte for DQ characterization pattern check. */
402         uint64_t dq_char_byte_sel      : 4;  /**< [ 47: 44](R/W) Select a byte of data for DQ characterization pattern check. */
403         uint64_t dr                    : 1;  /**< [ 43: 43](R/W) Pattern at data rate (not clock rate). */
404         uint64_t skew_on               : 1;  /**< [ 42: 42](R/W) Skew adjacent bits. */
405         uint64_t en                    : 1;  /**< [ 41: 41](R/W) Enable characterization. */
406         uint64_t sel                   : 1;  /**< [ 40: 40](R/W) Pattern select: 0 = PRBS, 1 = programmable pattern. */
407         uint64_t prog                  : 8;  /**< [ 39: 32](R/W) Programmable pattern. */
408         uint64_t prbs                  : 32; /**< [ 31:  0](R/W) PRBS polynomial. */
409 #else /* Word 0 - Little Endian */
410         uint64_t prbs                  : 32; /**< [ 31:  0](R/W) PRBS polynomial. */
411         uint64_t prog                  : 8;  /**< [ 39: 32](R/W) Programmable pattern. */
412         uint64_t sel                   : 1;  /**< [ 40: 40](R/W) Pattern select: 0 = PRBS, 1 = programmable pattern. */
413         uint64_t en                    : 1;  /**< [ 41: 41](R/W) Enable characterization. */
414         uint64_t skew_on               : 1;  /**< [ 42: 42](R/W) Skew adjacent bits. */
415         uint64_t dr                    : 1;  /**< [ 43: 43](R/W) Pattern at data rate (not clock rate). */
416         uint64_t dq_char_byte_sel      : 4;  /**< [ 47: 44](R/W) Select a byte of data for DQ characterization pattern check. */
417         uint64_t dq_char_bit_sel       : 3;  /**< [ 50: 48](R/W) Select a bit within the byte for DQ characterization pattern check. */
418         uint64_t dq_char_check_enable  : 1;  /**< [ 51: 51](R/W) Enable DQ pattern check. The transition from disabled to enabled clears
419                                                                  LMC()_CHAR_DQ_ERR_COUNT. */
420         uint64_t dq_char_check_lock    : 1;  /**< [ 52: 52](RO/H) Indicates if a lock has been achieved. Is set to one only if a lock is achieved
421                                                                  during the LFSR priming period after [DQ_CHAR_CHECK_ENABLE] is set to one, and is
422                                                                  forced back to zero when [DQ_CHAR_CHECK_ENABLE] is set to zero. */
423         uint64_t dq_char_byte_check    : 1;  /**< [ 53: 53](RO) Reserved. */
424         uint64_t reserved_54_63        : 10;
425 #endif /* Word 0 - End */
426     } cn88xxp1;
427     /* struct bdk_lmcx_char_ctl_s cn9; */
428     /* struct bdk_lmcx_char_ctl_s cn81xx; */
429     /* struct bdk_lmcx_char_ctl_s cn83xx; */
430     /* struct bdk_lmcx_char_ctl_s cn88xxp2; */
431 };
432 typedef union bdk_lmcx_char_ctl bdk_lmcx_char_ctl_t;
433 
434 static inline uint64_t BDK_LMCX_CHAR_CTL(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_CHAR_CTL(unsigned long a)435 static inline uint64_t BDK_LMCX_CHAR_CTL(unsigned long a)
436 {
437     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
438         return 0x87e088000220ll + 0x1000000ll * ((a) & 0x0);
439     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
440         return 0x87e088000220ll + 0x1000000ll * ((a) & 0x1);
441     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
442         return 0x87e088000220ll + 0x1000000ll * ((a) & 0x3);
443     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
444         return 0x87e088000220ll + 0x1000000ll * ((a) & 0x3);
445     __bdk_csr_fatal("LMCX_CHAR_CTL", 1, a, 0, 0, 0);
446 }
447 
448 #define typedef_BDK_LMCX_CHAR_CTL(a) bdk_lmcx_char_ctl_t
449 #define bustype_BDK_LMCX_CHAR_CTL(a) BDK_CSR_TYPE_RSL
450 #define basename_BDK_LMCX_CHAR_CTL(a) "LMCX_CHAR_CTL"
451 #define device_bar_BDK_LMCX_CHAR_CTL(a) 0x0 /* PF_BAR0 */
452 #define busnum_BDK_LMCX_CHAR_CTL(a) (a)
453 #define arguments_BDK_LMCX_CHAR_CTL(a) (a),-1,-1,-1
454 
455 /**
456  * Register (RSL) lmc#_char_dq_err_count
457  *
458  * INTERNAL: LMC DDR Characterization Error Count Register
459  *
460  * This register is used to initiate the various control sequences in the LMC.
461  */
462 union bdk_lmcx_char_dq_err_count
463 {
464     uint64_t u;
465     struct bdk_lmcx_char_dq_err_count_s
466     {
467 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
468         uint64_t reserved_40_63        : 24;
469         uint64_t dq_err_count          : 40; /**< [ 39:  0](RO/H) DQ error count. */
470 #else /* Word 0 - Little Endian */
471         uint64_t dq_err_count          : 40; /**< [ 39:  0](RO/H) DQ error count. */
472         uint64_t reserved_40_63        : 24;
473 #endif /* Word 0 - End */
474     } s;
475     /* struct bdk_lmcx_char_dq_err_count_s cn; */
476 };
477 typedef union bdk_lmcx_char_dq_err_count bdk_lmcx_char_dq_err_count_t;
478 
479 static inline uint64_t BDK_LMCX_CHAR_DQ_ERR_COUNT(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_CHAR_DQ_ERR_COUNT(unsigned long a)480 static inline uint64_t BDK_LMCX_CHAR_DQ_ERR_COUNT(unsigned long a)
481 {
482     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
483         return 0x87e088000040ll + 0x1000000ll * ((a) & 0x0);
484     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
485         return 0x87e088000040ll + 0x1000000ll * ((a) & 0x1);
486     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
487         return 0x87e088000040ll + 0x1000000ll * ((a) & 0x3);
488     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
489         return 0x87e088000040ll + 0x1000000ll * ((a) & 0x3);
490     __bdk_csr_fatal("LMCX_CHAR_DQ_ERR_COUNT", 1, a, 0, 0, 0);
491 }
492 
493 #define typedef_BDK_LMCX_CHAR_DQ_ERR_COUNT(a) bdk_lmcx_char_dq_err_count_t
494 #define bustype_BDK_LMCX_CHAR_DQ_ERR_COUNT(a) BDK_CSR_TYPE_RSL
495 #define basename_BDK_LMCX_CHAR_DQ_ERR_COUNT(a) "LMCX_CHAR_DQ_ERR_COUNT"
496 #define device_bar_BDK_LMCX_CHAR_DQ_ERR_COUNT(a) 0x0 /* PF_BAR0 */
497 #define busnum_BDK_LMCX_CHAR_DQ_ERR_COUNT(a) (a)
498 #define arguments_BDK_LMCX_CHAR_DQ_ERR_COUNT(a) (a),-1,-1,-1
499 
500 /**
501  * Register (RSL) lmc#_char_mask0
502  *
503  * LMC Characterization Mask Register 0
504  * This register provides an assortment of various control fields needed to characterize the
505  * DDR4 interface.
506  * It is also used to corrupt the write data bits when ECC Corrupt logic generator is enabled.
507  */
508 union bdk_lmcx_char_mask0
509 {
510     uint64_t u;
511     struct bdk_lmcx_char_mask0_s
512     {
513 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
514         uint64_t mask                  : 64; /**< [ 63:  0](R/W) Mask for DQ0\<63:0\>.
515                                                                  Before enabling ECC corrupt generation logic by setting
516                                                                  LMC()_ECC_PARITY_TEST[ECC_CORRUPT_ENA], set any the MASK bits to one to flip the
517                                                                  corresponding bits of the lower 64-bit dataword during a write data transfer. */
518 #else /* Word 0 - Little Endian */
519         uint64_t mask                  : 64; /**< [ 63:  0](R/W) Mask for DQ0\<63:0\>.
520                                                                  Before enabling ECC corrupt generation logic by setting
521                                                                  LMC()_ECC_PARITY_TEST[ECC_CORRUPT_ENA], set any the MASK bits to one to flip the
522                                                                  corresponding bits of the lower 64-bit dataword during a write data transfer. */
523 #endif /* Word 0 - End */
524     } s;
525     /* struct bdk_lmcx_char_mask0_s cn; */
526 };
527 typedef union bdk_lmcx_char_mask0 bdk_lmcx_char_mask0_t;
528 
529 static inline uint64_t BDK_LMCX_CHAR_MASK0(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_CHAR_MASK0(unsigned long a)530 static inline uint64_t BDK_LMCX_CHAR_MASK0(unsigned long a)
531 {
532     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
533         return 0x87e088000228ll + 0x1000000ll * ((a) & 0x0);
534     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
535         return 0x87e088000228ll + 0x1000000ll * ((a) & 0x1);
536     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
537         return 0x87e088000228ll + 0x1000000ll * ((a) & 0x3);
538     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
539         return 0x87e088000228ll + 0x1000000ll * ((a) & 0x3);
540     __bdk_csr_fatal("LMCX_CHAR_MASK0", 1, a, 0, 0, 0);
541 }
542 
543 #define typedef_BDK_LMCX_CHAR_MASK0(a) bdk_lmcx_char_mask0_t
544 #define bustype_BDK_LMCX_CHAR_MASK0(a) BDK_CSR_TYPE_RSL
545 #define basename_BDK_LMCX_CHAR_MASK0(a) "LMCX_CHAR_MASK0"
546 #define device_bar_BDK_LMCX_CHAR_MASK0(a) 0x0 /* PF_BAR0 */
547 #define busnum_BDK_LMCX_CHAR_MASK0(a) (a)
548 #define arguments_BDK_LMCX_CHAR_MASK0(a) (a),-1,-1,-1
549 
550 /**
551  * Register (RSL) lmc#_char_mask1
552  *
553  * INTERNAL: LMC Characterization Mask Register 1
554  *
555  * This register provides an assortment of various control fields needed to characterize the DDR4
556  * interface.
557  */
558 union bdk_lmcx_char_mask1
559 {
560     uint64_t u;
561     struct bdk_lmcx_char_mask1_s
562     {
563 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
564         uint64_t reserved_8_63         : 56;
565         uint64_t mask                  : 8;  /**< [  7:  0](R/W) Mask for DQ0\<71:64\>. */
566 #else /* Word 0 - Little Endian */
567         uint64_t mask                  : 8;  /**< [  7:  0](R/W) Mask for DQ0\<71:64\>. */
568         uint64_t reserved_8_63         : 56;
569 #endif /* Word 0 - End */
570     } s;
571     /* struct bdk_lmcx_char_mask1_s cn; */
572 };
573 typedef union bdk_lmcx_char_mask1 bdk_lmcx_char_mask1_t;
574 
575 static inline uint64_t BDK_LMCX_CHAR_MASK1(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_CHAR_MASK1(unsigned long a)576 static inline uint64_t BDK_LMCX_CHAR_MASK1(unsigned long a)
577 {
578     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
579         return 0x87e088000230ll + 0x1000000ll * ((a) & 0x0);
580     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
581         return 0x87e088000230ll + 0x1000000ll * ((a) & 0x1);
582     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
583         return 0x87e088000230ll + 0x1000000ll * ((a) & 0x3);
584     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
585         return 0x87e088000230ll + 0x1000000ll * ((a) & 0x3);
586     __bdk_csr_fatal("LMCX_CHAR_MASK1", 1, a, 0, 0, 0);
587 }
588 
589 #define typedef_BDK_LMCX_CHAR_MASK1(a) bdk_lmcx_char_mask1_t
590 #define bustype_BDK_LMCX_CHAR_MASK1(a) BDK_CSR_TYPE_RSL
591 #define basename_BDK_LMCX_CHAR_MASK1(a) "LMCX_CHAR_MASK1"
592 #define device_bar_BDK_LMCX_CHAR_MASK1(a) 0x0 /* PF_BAR0 */
593 #define busnum_BDK_LMCX_CHAR_MASK1(a) (a)
594 #define arguments_BDK_LMCX_CHAR_MASK1(a) (a),-1,-1,-1
595 
596 /**
597  * Register (RSL) lmc#_char_mask2
598  *
599  * LMC Characterization Mask Register 2
600  * This register provides an assortment of various control fields needed to characterize the
601  * DDR4 interface.
602  * It is also used to corrupt the write data bits when ECC corrupt logic generator is enabled.
603  */
604 union bdk_lmcx_char_mask2
605 {
606     uint64_t u;
607     struct bdk_lmcx_char_mask2_s
608     {
609 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
610         uint64_t mask                  : 64; /**< [ 63:  0](R/W) Mask for DQ1\<63:0\>.
611                                                                  Before enabling ECC Corrupt generation logic by setting
612                                                                  LMC()_ECC_PARITY_TEST[ECC_CORRUPT_ENA], set any the MASK bits to one to flip the
613                                                                  corresponding bits of the upper 64-bit dataword during a write data transfer. */
614 #else /* Word 0 - Little Endian */
615         uint64_t mask                  : 64; /**< [ 63:  0](R/W) Mask for DQ1\<63:0\>.
616                                                                  Before enabling ECC Corrupt generation logic by setting
617                                                                  LMC()_ECC_PARITY_TEST[ECC_CORRUPT_ENA], set any the MASK bits to one to flip the
618                                                                  corresponding bits of the upper 64-bit dataword during a write data transfer. */
619 #endif /* Word 0 - End */
620     } s;
621     /* struct bdk_lmcx_char_mask2_s cn; */
622 };
623 typedef union bdk_lmcx_char_mask2 bdk_lmcx_char_mask2_t;
624 
625 static inline uint64_t BDK_LMCX_CHAR_MASK2(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_CHAR_MASK2(unsigned long a)626 static inline uint64_t BDK_LMCX_CHAR_MASK2(unsigned long a)
627 {
628     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
629         return 0x87e088000238ll + 0x1000000ll * ((a) & 0x0);
630     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
631         return 0x87e088000238ll + 0x1000000ll * ((a) & 0x1);
632     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
633         return 0x87e088000238ll + 0x1000000ll * ((a) & 0x3);
634     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
635         return 0x87e088000238ll + 0x1000000ll * ((a) & 0x3);
636     __bdk_csr_fatal("LMCX_CHAR_MASK2", 1, a, 0, 0, 0);
637 }
638 
639 #define typedef_BDK_LMCX_CHAR_MASK2(a) bdk_lmcx_char_mask2_t
640 #define bustype_BDK_LMCX_CHAR_MASK2(a) BDK_CSR_TYPE_RSL
641 #define basename_BDK_LMCX_CHAR_MASK2(a) "LMCX_CHAR_MASK2"
642 #define device_bar_BDK_LMCX_CHAR_MASK2(a) 0x0 /* PF_BAR0 */
643 #define busnum_BDK_LMCX_CHAR_MASK2(a) (a)
644 #define arguments_BDK_LMCX_CHAR_MASK2(a) (a),-1,-1,-1
645 
646 /**
647  * Register (RSL) lmc#_char_mask3
648  *
649  * INTERNAL: LMC Characterization Mask Register 3
650  *
651  * This register provides an assortment of various control fields needed to characterize the DDR4
652  * interface.
653  */
654 union bdk_lmcx_char_mask3
655 {
656     uint64_t u;
657     struct bdk_lmcx_char_mask3_s
658     {
659 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
660         uint64_t reserved_17_63        : 47;
661         uint64_t dac_on_mask           : 9;  /**< [ 16:  8](R/W) This mask is applied to the DAC ON signals that go to the PHY, so that each byte lane can
662                                                                  selectively turn off or on the signals once the master signals are enabled. Using the
663                                                                  symbol D for DAC ON, the mask looks like this:
664                                                                  \<pre\>
665                                                                    DDDDDDDDD
666                                                                    876543210
667                                                                  \</pre\> */
668         uint64_t mask                  : 8;  /**< [  7:  0](R/W) Mask for DQ1\<71:64\>. */
669 #else /* Word 0 - Little Endian */
670         uint64_t mask                  : 8;  /**< [  7:  0](R/W) Mask for DQ1\<71:64\>. */
671         uint64_t dac_on_mask           : 9;  /**< [ 16:  8](R/W) This mask is applied to the DAC ON signals that go to the PHY, so that each byte lane can
672                                                                  selectively turn off or on the signals once the master signals are enabled. Using the
673                                                                  symbol D for DAC ON, the mask looks like this:
674                                                                  \<pre\>
675                                                                    DDDDDDDDD
676                                                                    876543210
677                                                                  \</pre\> */
678         uint64_t reserved_17_63        : 47;
679 #endif /* Word 0 - End */
680     } s;
681     struct bdk_lmcx_char_mask3_cn8
682     {
683 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
684         uint64_t reserved_8_63         : 56;
685         uint64_t mask                  : 8;  /**< [  7:  0](R/W) Mask for DQ1\<71:64\>. */
686 #else /* Word 0 - Little Endian */
687         uint64_t mask                  : 8;  /**< [  7:  0](R/W) Mask for DQ1\<71:64\>. */
688         uint64_t reserved_8_63         : 56;
689 #endif /* Word 0 - End */
690     } cn8;
691     /* struct bdk_lmcx_char_mask3_s cn9; */
692 };
693 typedef union bdk_lmcx_char_mask3 bdk_lmcx_char_mask3_t;
694 
695 static inline uint64_t BDK_LMCX_CHAR_MASK3(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_CHAR_MASK3(unsigned long a)696 static inline uint64_t BDK_LMCX_CHAR_MASK3(unsigned long a)
697 {
698     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
699         return 0x87e088000240ll + 0x1000000ll * ((a) & 0x0);
700     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
701         return 0x87e088000240ll + 0x1000000ll * ((a) & 0x1);
702     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
703         return 0x87e088000240ll + 0x1000000ll * ((a) & 0x3);
704     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
705         return 0x87e088000240ll + 0x1000000ll * ((a) & 0x3);
706     __bdk_csr_fatal("LMCX_CHAR_MASK3", 1, a, 0, 0, 0);
707 }
708 
709 #define typedef_BDK_LMCX_CHAR_MASK3(a) bdk_lmcx_char_mask3_t
710 #define bustype_BDK_LMCX_CHAR_MASK3(a) BDK_CSR_TYPE_RSL
711 #define basename_BDK_LMCX_CHAR_MASK3(a) "LMCX_CHAR_MASK3"
712 #define device_bar_BDK_LMCX_CHAR_MASK3(a) 0x0 /* PF_BAR0 */
713 #define busnum_BDK_LMCX_CHAR_MASK3(a) (a)
714 #define arguments_BDK_LMCX_CHAR_MASK3(a) (a),-1,-1,-1
715 
716 /**
717  * Register (RSL) lmc#_char_mask4
718  *
719  * INTERNAL: LMC Characterization Mask Register 4
720  *
721  * This register is an assortment of various control fields needed to characterize the DDR4 interface.
722  */
723 union bdk_lmcx_char_mask4
724 {
725     uint64_t u;
726     struct bdk_lmcx_char_mask4_s
727     {
728 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
729         uint64_t ref_pin_on_mask       : 9;  /**< [ 63: 55](R/W) This mask is applied to the ref_pin_on signals that go to the PHY, so that each byte lane
730                                                                  can selectively turn off or on the signals once the master signals are enabled. Using the
731                                                                  symbol R, the mask looks like this:
732                                                                  \<pre\>
733                                                                  RRRRRRRRR
734                                                                  876543210
735                                                                  \</pre\> */
736         uint64_t dac_on_mask           : 9;  /**< [ 54: 46](R/W) This mask is applied to the DAC_ON signals that go to the PHY, so that each byte lane can
737                                                                  selectively turn off or on the signals once the master signals are enabled. Using the
738                                                                  symbol D for DAC_ON, the mask looks like this:
739                                                                  DDDDDDDDD
740                                                                  876543210 */
741         uint64_t reserved_36_45        : 10;
742         uint64_t par_mask              : 1;  /**< [ 35: 35](R/W) Mask for DDR_PAR. */
743         uint64_t act_n_mask            : 1;  /**< [ 34: 34](R/W) Mask for DDR_ACT_L. */
744         uint64_t a17_mask              : 1;  /**< [ 33: 33](R/W) Mask for DDR_A17. */
745         uint64_t reset_n_mask          : 1;  /**< [ 32: 32](R/W) Mask for DDR_RESET_L. */
746         uint64_t a_mask                : 16; /**< [ 31: 16](R/W) Mask for DDR_A\<15:0\>. */
747         uint64_t ba_mask               : 3;  /**< [ 15: 13](R/W) Mask for DDR_BA\<2:0\>. */
748         uint64_t we_n_mask             : 1;  /**< [ 12: 12](R/W) Mask for DDR_WE_L. */
749         uint64_t cas_n_mask            : 1;  /**< [ 11: 11](R/W) Mask for DDR_CAS_L. */
750         uint64_t ras_n_mask            : 1;  /**< [ 10: 10](R/W) Mask for DDR_RAS_L. */
751         uint64_t odt1_mask             : 2;  /**< [  9:  8](R/W) Mask for DDR_ODT1. */
752         uint64_t odt0_mask             : 2;  /**< [  7:  6](R/W) Mask for DDR_ODT0. */
753         uint64_t cs1_n_mask            : 2;  /**< [  5:  4](R/W) Mask for DDR_CS1_L. */
754         uint64_t cs0_n_mask            : 2;  /**< [  3:  2](R/W) Mask for DDR_CS0_L. */
755         uint64_t cke_mask              : 2;  /**< [  1:  0](R/W) Mask for DDR_CKE*. */
756 #else /* Word 0 - Little Endian */
757         uint64_t cke_mask              : 2;  /**< [  1:  0](R/W) Mask for DDR_CKE*. */
758         uint64_t cs0_n_mask            : 2;  /**< [  3:  2](R/W) Mask for DDR_CS0_L. */
759         uint64_t cs1_n_mask            : 2;  /**< [  5:  4](R/W) Mask for DDR_CS1_L. */
760         uint64_t odt0_mask             : 2;  /**< [  7:  6](R/W) Mask for DDR_ODT0. */
761         uint64_t odt1_mask             : 2;  /**< [  9:  8](R/W) Mask for DDR_ODT1. */
762         uint64_t ras_n_mask            : 1;  /**< [ 10: 10](R/W) Mask for DDR_RAS_L. */
763         uint64_t cas_n_mask            : 1;  /**< [ 11: 11](R/W) Mask for DDR_CAS_L. */
764         uint64_t we_n_mask             : 1;  /**< [ 12: 12](R/W) Mask for DDR_WE_L. */
765         uint64_t ba_mask               : 3;  /**< [ 15: 13](R/W) Mask for DDR_BA\<2:0\>. */
766         uint64_t a_mask                : 16; /**< [ 31: 16](R/W) Mask for DDR_A\<15:0\>. */
767         uint64_t reset_n_mask          : 1;  /**< [ 32: 32](R/W) Mask for DDR_RESET_L. */
768         uint64_t a17_mask              : 1;  /**< [ 33: 33](R/W) Mask for DDR_A17. */
769         uint64_t act_n_mask            : 1;  /**< [ 34: 34](R/W) Mask for DDR_ACT_L. */
770         uint64_t par_mask              : 1;  /**< [ 35: 35](R/W) Mask for DDR_PAR. */
771         uint64_t reserved_36_45        : 10;
772         uint64_t dac_on_mask           : 9;  /**< [ 54: 46](R/W) This mask is applied to the DAC_ON signals that go to the PHY, so that each byte lane can
773                                                                  selectively turn off or on the signals once the master signals are enabled. Using the
774                                                                  symbol D for DAC_ON, the mask looks like this:
775                                                                  DDDDDDDDD
776                                                                  876543210 */
777         uint64_t ref_pin_on_mask       : 9;  /**< [ 63: 55](R/W) This mask is applied to the ref_pin_on signals that go to the PHY, so that each byte lane
778                                                                  can selectively turn off or on the signals once the master signals are enabled. Using the
779                                                                  symbol R, the mask looks like this:
780                                                                  \<pre\>
781                                                                  RRRRRRRRR
782                                                                  876543210
783                                                                  \</pre\> */
784 #endif /* Word 0 - End */
785     } s;
786     struct bdk_lmcx_char_mask4_cn8
787     {
788 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
789         uint64_t ref_pin_on_mask       : 9;  /**< [ 63: 55](R/W) This mask is applied to the ref_pin_on signals that go to the PHY, so that each byte lane
790                                                                  can selectively turn off or on the signals once the master signals are enabled. Using the
791                                                                  symbol R, the mask looks like this:
792                                                                  \<pre\>
793                                                                  RRRRRRRRR
794                                                                  876543210
795                                                                  \</pre\> */
796         uint64_t dac_on_mask           : 9;  /**< [ 54: 46](R/W) This mask is applied to the DAC_ON signals that go to the PHY, so that each byte lane can
797                                                                  selectively turn off or on the signals once the master signals are enabled. Using the
798                                                                  symbol D for DAC_ON, the mask looks like this:
799                                                                  DDDDDDDDD
800                                                                  876543210 */
801         uint64_t reserved_45           : 1;
802         uint64_t dbi_mask              : 9;  /**< [ 44: 36](R/W) Mask for DBI/DQS\<1\>. */
803         uint64_t par_mask              : 1;  /**< [ 35: 35](R/W) Mask for DDR_PAR. */
804         uint64_t act_n_mask            : 1;  /**< [ 34: 34](R/W) Mask for DDR_ACT_L. */
805         uint64_t a17_mask              : 1;  /**< [ 33: 33](R/W) Mask for DDR_A17. */
806         uint64_t reset_n_mask          : 1;  /**< [ 32: 32](R/W) Mask for DDR_RESET_L. */
807         uint64_t a_mask                : 16; /**< [ 31: 16](R/W) Mask for DDR_A\<15:0\>. */
808         uint64_t ba_mask               : 3;  /**< [ 15: 13](R/W) Mask for DDR_BA\<2:0\>. */
809         uint64_t we_n_mask             : 1;  /**< [ 12: 12](R/W) Mask for DDR_WE_L. */
810         uint64_t cas_n_mask            : 1;  /**< [ 11: 11](R/W) Mask for DDR_CAS_L. */
811         uint64_t ras_n_mask            : 1;  /**< [ 10: 10](R/W) Mask for DDR_RAS_L. */
812         uint64_t odt1_mask             : 2;  /**< [  9:  8](R/W) Mask for DDR_ODT1. */
813         uint64_t odt0_mask             : 2;  /**< [  7:  6](R/W) Mask for DDR_ODT0. */
814         uint64_t cs1_n_mask            : 2;  /**< [  5:  4](R/W) Mask for DDR_CS1_L. */
815         uint64_t cs0_n_mask            : 2;  /**< [  3:  2](R/W) Mask for DDR_CS0_L. */
816         uint64_t cke_mask              : 2;  /**< [  1:  0](R/W) Mask for DDR_CKE*. */
817 #else /* Word 0 - Little Endian */
818         uint64_t cke_mask              : 2;  /**< [  1:  0](R/W) Mask for DDR_CKE*. */
819         uint64_t cs0_n_mask            : 2;  /**< [  3:  2](R/W) Mask for DDR_CS0_L. */
820         uint64_t cs1_n_mask            : 2;  /**< [  5:  4](R/W) Mask for DDR_CS1_L. */
821         uint64_t odt0_mask             : 2;  /**< [  7:  6](R/W) Mask for DDR_ODT0. */
822         uint64_t odt1_mask             : 2;  /**< [  9:  8](R/W) Mask for DDR_ODT1. */
823         uint64_t ras_n_mask            : 1;  /**< [ 10: 10](R/W) Mask for DDR_RAS_L. */
824         uint64_t cas_n_mask            : 1;  /**< [ 11: 11](R/W) Mask for DDR_CAS_L. */
825         uint64_t we_n_mask             : 1;  /**< [ 12: 12](R/W) Mask for DDR_WE_L. */
826         uint64_t ba_mask               : 3;  /**< [ 15: 13](R/W) Mask for DDR_BA\<2:0\>. */
827         uint64_t a_mask                : 16; /**< [ 31: 16](R/W) Mask for DDR_A\<15:0\>. */
828         uint64_t reset_n_mask          : 1;  /**< [ 32: 32](R/W) Mask for DDR_RESET_L. */
829         uint64_t a17_mask              : 1;  /**< [ 33: 33](R/W) Mask for DDR_A17. */
830         uint64_t act_n_mask            : 1;  /**< [ 34: 34](R/W) Mask for DDR_ACT_L. */
831         uint64_t par_mask              : 1;  /**< [ 35: 35](R/W) Mask for DDR_PAR. */
832         uint64_t dbi_mask              : 9;  /**< [ 44: 36](R/W) Mask for DBI/DQS\<1\>. */
833         uint64_t reserved_45           : 1;
834         uint64_t dac_on_mask           : 9;  /**< [ 54: 46](R/W) This mask is applied to the DAC_ON signals that go to the PHY, so that each byte lane can
835                                                                  selectively turn off or on the signals once the master signals are enabled. Using the
836                                                                  symbol D for DAC_ON, the mask looks like this:
837                                                                  DDDDDDDDD
838                                                                  876543210 */
839         uint64_t ref_pin_on_mask       : 9;  /**< [ 63: 55](R/W) This mask is applied to the ref_pin_on signals that go to the PHY, so that each byte lane
840                                                                  can selectively turn off or on the signals once the master signals are enabled. Using the
841                                                                  symbol R, the mask looks like this:
842                                                                  \<pre\>
843                                                                  RRRRRRRRR
844                                                                  876543210
845                                                                  \</pre\> */
846 #endif /* Word 0 - End */
847     } cn8;
848     struct bdk_lmcx_char_mask4_cn9
849     {
850 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
851         uint64_t ref_pin_on_mask       : 9;  /**< [ 63: 55](R/W) This mask is applied to the ref_pin_on signals that go to the PHY, so that each byte lane
852                                                                  can selectively turn off or on the signals once the master signals are enabled. Using the
853                                                                  symbol R, the mask looks like this:
854                                                                  \<pre\>
855                                                                  RRRRRRRRR
856                                                                  876543210
857                                                                  \</pre\> */
858         uint64_t reserved_47_54        : 8;
859         uint64_t dbi_mask              : 9;  /**< [ 46: 38](R/W) Mask for DBI/DQS\<1\>. */
860         uint64_t c2_mask               : 1;  /**< [ 37: 37](R/W) Mask for CID C2. */
861         uint64_t c1_mask               : 1;  /**< [ 36: 36](R/W) Mask for CID C1. */
862         uint64_t par_mask              : 1;  /**< [ 35: 35](R/W) Mask for DDR_PAR. */
863         uint64_t act_n_mask            : 1;  /**< [ 34: 34](R/W) Mask for DDR_ACT_L. */
864         uint64_t a17_mask              : 1;  /**< [ 33: 33](R/W) Mask for DDR_A17. */
865         uint64_t reset_n_mask          : 1;  /**< [ 32: 32](R/W) Mask for DDR_RESET_L. */
866         uint64_t a_mask                : 16; /**< [ 31: 16](R/W) Mask for DDR_A\<15:0\>. */
867         uint64_t ba_mask               : 3;  /**< [ 15: 13](R/W) Mask for DDR_BA\<2:0\>. */
868         uint64_t we_n_mask             : 1;  /**< [ 12: 12](R/W) Mask for DDR_WE_L. */
869         uint64_t cas_n_mask            : 1;  /**< [ 11: 11](R/W) Mask for DDR_CAS_L. */
870         uint64_t ras_n_mask            : 1;  /**< [ 10: 10](R/W) Mask for DDR_RAS_L. */
871         uint64_t odt1_mask             : 2;  /**< [  9:  8](R/W) Mask for DDR_ODT1. */
872         uint64_t odt0_mask             : 2;  /**< [  7:  6](R/W) Mask for DDR_ODT0. */
873         uint64_t cs1_n_mask            : 2;  /**< [  5:  4](R/W) Mask for DDR_CS1_L. */
874         uint64_t cs0_n_mask            : 2;  /**< [  3:  2](R/W) Mask for DDR_CS0_L. */
875         uint64_t cke_mask              : 2;  /**< [  1:  0](R/W) Mask for DDR_CKE*. */
876 #else /* Word 0 - Little Endian */
877         uint64_t cke_mask              : 2;  /**< [  1:  0](R/W) Mask for DDR_CKE*. */
878         uint64_t cs0_n_mask            : 2;  /**< [  3:  2](R/W) Mask for DDR_CS0_L. */
879         uint64_t cs1_n_mask            : 2;  /**< [  5:  4](R/W) Mask for DDR_CS1_L. */
880         uint64_t odt0_mask             : 2;  /**< [  7:  6](R/W) Mask for DDR_ODT0. */
881         uint64_t odt1_mask             : 2;  /**< [  9:  8](R/W) Mask for DDR_ODT1. */
882         uint64_t ras_n_mask            : 1;  /**< [ 10: 10](R/W) Mask for DDR_RAS_L. */
883         uint64_t cas_n_mask            : 1;  /**< [ 11: 11](R/W) Mask for DDR_CAS_L. */
884         uint64_t we_n_mask             : 1;  /**< [ 12: 12](R/W) Mask for DDR_WE_L. */
885         uint64_t ba_mask               : 3;  /**< [ 15: 13](R/W) Mask for DDR_BA\<2:0\>. */
886         uint64_t a_mask                : 16; /**< [ 31: 16](R/W) Mask for DDR_A\<15:0\>. */
887         uint64_t reset_n_mask          : 1;  /**< [ 32: 32](R/W) Mask for DDR_RESET_L. */
888         uint64_t a17_mask              : 1;  /**< [ 33: 33](R/W) Mask for DDR_A17. */
889         uint64_t act_n_mask            : 1;  /**< [ 34: 34](R/W) Mask for DDR_ACT_L. */
890         uint64_t par_mask              : 1;  /**< [ 35: 35](R/W) Mask for DDR_PAR. */
891         uint64_t c1_mask               : 1;  /**< [ 36: 36](R/W) Mask for CID C1. */
892         uint64_t c2_mask               : 1;  /**< [ 37: 37](R/W) Mask for CID C2. */
893         uint64_t dbi_mask              : 9;  /**< [ 46: 38](R/W) Mask for DBI/DQS\<1\>. */
894         uint64_t reserved_47_54        : 8;
895         uint64_t ref_pin_on_mask       : 9;  /**< [ 63: 55](R/W) This mask is applied to the ref_pin_on signals that go to the PHY, so that each byte lane
896                                                                  can selectively turn off or on the signals once the master signals are enabled. Using the
897                                                                  symbol R, the mask looks like this:
898                                                                  \<pre\>
899                                                                  RRRRRRRRR
900                                                                  876543210
901                                                                  \</pre\> */
902 #endif /* Word 0 - End */
903     } cn9;
904 };
905 typedef union bdk_lmcx_char_mask4 bdk_lmcx_char_mask4_t;
906 
907 static inline uint64_t BDK_LMCX_CHAR_MASK4(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_CHAR_MASK4(unsigned long a)908 static inline uint64_t BDK_LMCX_CHAR_MASK4(unsigned long a)
909 {
910     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
911         return 0x87e088000318ll + 0x1000000ll * ((a) & 0x0);
912     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
913         return 0x87e088000318ll + 0x1000000ll * ((a) & 0x1);
914     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
915         return 0x87e088000318ll + 0x1000000ll * ((a) & 0x3);
916     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
917         return 0x87e088000318ll + 0x1000000ll * ((a) & 0x3);
918     __bdk_csr_fatal("LMCX_CHAR_MASK4", 1, a, 0, 0, 0);
919 }
920 
921 #define typedef_BDK_LMCX_CHAR_MASK4(a) bdk_lmcx_char_mask4_t
922 #define bustype_BDK_LMCX_CHAR_MASK4(a) BDK_CSR_TYPE_RSL
923 #define basename_BDK_LMCX_CHAR_MASK4(a) "LMCX_CHAR_MASK4"
924 #define device_bar_BDK_LMCX_CHAR_MASK4(a) 0x0 /* PF_BAR0 */
925 #define busnum_BDK_LMCX_CHAR_MASK4(a) (a)
926 #define arguments_BDK_LMCX_CHAR_MASK4(a) (a),-1,-1,-1
927 
928 /**
929  * Register (RSL) lmc#_comp_ctl2
930  *
931  * LMC Compensation Control Register
932  */
933 union bdk_lmcx_comp_ctl2
934 {
935     uint64_t u;
936     struct bdk_lmcx_comp_ctl2_s
937     {
938 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
939         uint64_t reserved_51_63        : 13;
940         uint64_t rclk_char_mode        : 1;  /**< [ 50: 50](R/W) Reserved.
941                                                                  Internal:
942                                                                  Select core clock characterization mode. */
943         uint64_t ddr__ptune            : 5;  /**< [ 49: 45](RO/H) DDR PCTL from compensation circuit. The encoded value provides debug information for the
944                                                                  compensation impedance on P-pullup. */
945         uint64_t ddr__ntune            : 5;  /**< [ 44: 40](RO/H) DDR NCTL from compensation circuit. The encoded value provides debug information for the
946                                                                  compensation impedance on N-pulldown. */
947         uint64_t ptune_offset          : 4;  /**< [ 39: 36](R/W) Ptune offset value. This is a signed value where the MSB is a sign bit, with zero
948                                                                  indicating addition and one indicating subtraction. */
949         uint64_t ntune_offset          : 4;  /**< [ 35: 32](R/W) Ntune offset value. This is a signed value where the MSB is a sign bit, with zero
950                                                                  indicating addition and one indicating subtraction. */
951         uint64_t m180                  : 1;  /**< [ 31: 31](R/W) Reserved; must be zero.
952                                                                  Internal:
953                                                                  Cap impedance at 180 ohm, instead of 240 ohm. */
954         uint64_t byp                   : 1;  /**< [ 30: 30](R/W) Bypass mode. When set, [PTUNE],[NTUNE] are the compensation setting. When clear,
955                                                                  [DDR__PTUNE],[DDR__NTUNE] are the compensation setting. */
956         uint64_t ptune                 : 5;  /**< [ 29: 25](R/W) PCTL impedance control in bypass mode. */
957         uint64_t ntune                 : 5;  /**< [ 24: 20](R/W) NCTL impedance control in bypass mode. */
958         uint64_t rodt_ctl              : 4;  /**< [ 19: 16](R/W) RODT NCTL impedance control bits. This field controls ODT values during a memory read.
959                                                                    0x0 = No ODT.
960                                                                    0x1 = 20 ohm.
961                                                                    0x2 = 30 ohm.
962                                                                    0x3 = 40 ohm.
963                                                                    0x4 = 60 ohm.
964                                                                    0x5 = 120 ohm.
965                                                                    _ else = Reserved.
966 
967                                                                  In DDR4 mode:
968                                                                    0x0 = No ODT.
969                                                                    0x1 = 40 ohm.
970                                                                    0x2 = 60 ohm.
971                                                                    0x3 = 80 ohm.
972                                                                    0x4 = 120 ohm.
973                                                                    0x5 = 240 ohm.
974                                                                    0x6 = 34 ohm.
975                                                                    0x7 = 48 ohm.
976                                                                    _ else = Reserved. */
977         uint64_t control_ctl           : 4;  /**< [ 15: 12](R/W) Drive strength control for DDR_DIMMx_CS*_L/DDR_DIMMx_ODT_* /DDR_DIMMx_CKE* drivers.
978 
979                                                                  In DDR3 mode:
980                                                                    0x1 = 24 ohm.
981                                                                    0x2 = 26.67 ohm.
982                                                                    0x3 = 30 ohm.
983                                                                    0x4 = 34.3 ohm.
984                                                                    0x5 = 40 ohm.
985                                                                    0x6 = 48 ohm.
986                                                                    0x7 = 60 ohm.
987                                                                    _ else = Reserved.
988 
989                                                                  In DDR4 mode:
990                                                                    0x0 = Reserved.
991                                                                    0x1 = Reserved.
992                                                                    0x2 = 26 ohm.
993                                                                    0x3 = 30 ohm.
994                                                                    0x4 = 34 ohm.
995                                                                    0x5 = 40 ohm.
996                                                                    0x6 = 48 ohm.
997                                                                    _ else = Reserved. */
998         uint64_t cmd_ctl               : 4;  /**< [ 11:  8](R/W) Drive strength control for DDR_RAS_L_A\<16\>/DDR_CAS_L_A\<15\>/DDR_WE_L_A\<14\>/DDR_A\<13:0\>/
999                                                                  DDR_A\<15\>_BG1/DDR_A\<14\>_BG0/DDR_BA* /DDR_BA2_TEN/DDR_PAR/DDR_RESET_L drivers.
1000 
1001                                                                  In DDR3 mode:
1002                                                                    0x1 = 24 ohm.
1003                                                                    0x2 = 26.67 ohm.
1004                                                                    0x3 = 30 ohm.
1005                                                                    0x4 = 34.3 ohm.
1006                                                                    0x5 = 40 ohm.
1007                                                                    0x6 = 48 ohm.
1008                                                                    0x7 = 60 ohm.
1009                                                                    _ else = Reserved.
1010 
1011                                                                  In DDR4 mode:
1012                                                                    0x0 = Reserved.
1013                                                                    0x1 = Reserved.
1014                                                                    0x2 = 26 ohm.
1015                                                                    0x3 = 30 ohm.
1016                                                                    0x4 = 34 ohm.
1017                                                                    0x5 = 40 ohm.
1018                                                                    0x6 = 48 ohm.
1019                                                                    _ else = Reserved. */
1020         uint64_t ck_ctl                : 4;  /**< [  7:  4](R/W) Drive strength control for DDR_CK_*_P/N drivers.
1021 
1022                                                                  In DDR3 mode:
1023                                                                    0x1 = 24 ohm.
1024                                                                    0x2 = 26.67 ohm.
1025                                                                    0x3 = 30 ohm.
1026                                                                    0x4 = 34.3 ohm.
1027                                                                    0x5 = 40 ohm.
1028                                                                    0x6 = 48 ohm.
1029                                                                    0x7 = 60 ohm.
1030                                                                    _ else = Reserved.
1031 
1032                                                                  In DDR4 mode:
1033                                                                    0x0 = Reserved.
1034                                                                    0x1 = Reserved.
1035                                                                    0x2 = 26 ohm.
1036                                                                    0x3 = 30 ohm.
1037                                                                    0x4 = 34 ohm.
1038                                                                    0x5 = 40 ohm.
1039                                                                    0x6 = 48 ohm.
1040                                                                    _ else = Reserved. */
1041         uint64_t dqx_ctl               : 4;  /**< [  3:  0](R/W) Drive strength control for DDR_DQ* /DDR_CB* /DDR_DQS_*_P/N drivers.
1042                                                                    0x1 = 24 ohm.
1043                                                                    0x2 = 26.67 ohm.
1044                                                                    0x3 = 30 ohm.
1045                                                                    0x4 = 34.3 ohm.
1046                                                                    0x5 = 40 ohm.
1047                                                                    0x6 = 48 ohm.
1048                                                                    0x7 = 60 ohm.
1049                                                                    _ else = Reserved. */
1050 #else /* Word 0 - Little Endian */
1051         uint64_t dqx_ctl               : 4;  /**< [  3:  0](R/W) Drive strength control for DDR_DQ* /DDR_CB* /DDR_DQS_*_P/N drivers.
1052                                                                    0x1 = 24 ohm.
1053                                                                    0x2 = 26.67 ohm.
1054                                                                    0x3 = 30 ohm.
1055                                                                    0x4 = 34.3 ohm.
1056                                                                    0x5 = 40 ohm.
1057                                                                    0x6 = 48 ohm.
1058                                                                    0x7 = 60 ohm.
1059                                                                    _ else = Reserved. */
1060         uint64_t ck_ctl                : 4;  /**< [  7:  4](R/W) Drive strength control for DDR_CK_*_P/N drivers.
1061 
1062                                                                  In DDR3 mode:
1063                                                                    0x1 = 24 ohm.
1064                                                                    0x2 = 26.67 ohm.
1065                                                                    0x3 = 30 ohm.
1066                                                                    0x4 = 34.3 ohm.
1067                                                                    0x5 = 40 ohm.
1068                                                                    0x6 = 48 ohm.
1069                                                                    0x7 = 60 ohm.
1070                                                                    _ else = Reserved.
1071 
1072                                                                  In DDR4 mode:
1073                                                                    0x0 = Reserved.
1074                                                                    0x1 = Reserved.
1075                                                                    0x2 = 26 ohm.
1076                                                                    0x3 = 30 ohm.
1077                                                                    0x4 = 34 ohm.
1078                                                                    0x5 = 40 ohm.
1079                                                                    0x6 = 48 ohm.
1080                                                                    _ else = Reserved. */
1081         uint64_t cmd_ctl               : 4;  /**< [ 11:  8](R/W) Drive strength control for DDR_RAS_L_A\<16\>/DDR_CAS_L_A\<15\>/DDR_WE_L_A\<14\>/DDR_A\<13:0\>/
1082                                                                  DDR_A\<15\>_BG1/DDR_A\<14\>_BG0/DDR_BA* /DDR_BA2_TEN/DDR_PAR/DDR_RESET_L drivers.
1083 
1084                                                                  In DDR3 mode:
1085                                                                    0x1 = 24 ohm.
1086                                                                    0x2 = 26.67 ohm.
1087                                                                    0x3 = 30 ohm.
1088                                                                    0x4 = 34.3 ohm.
1089                                                                    0x5 = 40 ohm.
1090                                                                    0x6 = 48 ohm.
1091                                                                    0x7 = 60 ohm.
1092                                                                    _ else = Reserved.
1093 
1094                                                                  In DDR4 mode:
1095                                                                    0x0 = Reserved.
1096                                                                    0x1 = Reserved.
1097                                                                    0x2 = 26 ohm.
1098                                                                    0x3 = 30 ohm.
1099                                                                    0x4 = 34 ohm.
1100                                                                    0x5 = 40 ohm.
1101                                                                    0x6 = 48 ohm.
1102                                                                    _ else = Reserved. */
1103         uint64_t control_ctl           : 4;  /**< [ 15: 12](R/W) Drive strength control for DDR_DIMMx_CS*_L/DDR_DIMMx_ODT_* /DDR_DIMMx_CKE* drivers.
1104 
1105                                                                  In DDR3 mode:
1106                                                                    0x1 = 24 ohm.
1107                                                                    0x2 = 26.67 ohm.
1108                                                                    0x3 = 30 ohm.
1109                                                                    0x4 = 34.3 ohm.
1110                                                                    0x5 = 40 ohm.
1111                                                                    0x6 = 48 ohm.
1112                                                                    0x7 = 60 ohm.
1113                                                                    _ else = Reserved.
1114 
1115                                                                  In DDR4 mode:
1116                                                                    0x0 = Reserved.
1117                                                                    0x1 = Reserved.
1118                                                                    0x2 = 26 ohm.
1119                                                                    0x3 = 30 ohm.
1120                                                                    0x4 = 34 ohm.
1121                                                                    0x5 = 40 ohm.
1122                                                                    0x6 = 48 ohm.
1123                                                                    _ else = Reserved. */
1124         uint64_t rodt_ctl              : 4;  /**< [ 19: 16](R/W) RODT NCTL impedance control bits. This field controls ODT values during a memory read.
1125                                                                    0x0 = No ODT.
1126                                                                    0x1 = 20 ohm.
1127                                                                    0x2 = 30 ohm.
1128                                                                    0x3 = 40 ohm.
1129                                                                    0x4 = 60 ohm.
1130                                                                    0x5 = 120 ohm.
1131                                                                    _ else = Reserved.
1132 
1133                                                                  In DDR4 mode:
1134                                                                    0x0 = No ODT.
1135                                                                    0x1 = 40 ohm.
1136                                                                    0x2 = 60 ohm.
1137                                                                    0x3 = 80 ohm.
1138                                                                    0x4 = 120 ohm.
1139                                                                    0x5 = 240 ohm.
1140                                                                    0x6 = 34 ohm.
1141                                                                    0x7 = 48 ohm.
1142                                                                    _ else = Reserved. */
1143         uint64_t ntune                 : 5;  /**< [ 24: 20](R/W) NCTL impedance control in bypass mode. */
1144         uint64_t ptune                 : 5;  /**< [ 29: 25](R/W) PCTL impedance control in bypass mode. */
1145         uint64_t byp                   : 1;  /**< [ 30: 30](R/W) Bypass mode. When set, [PTUNE],[NTUNE] are the compensation setting. When clear,
1146                                                                  [DDR__PTUNE],[DDR__NTUNE] are the compensation setting. */
1147         uint64_t m180                  : 1;  /**< [ 31: 31](R/W) Reserved; must be zero.
1148                                                                  Internal:
1149                                                                  Cap impedance at 180 ohm, instead of 240 ohm. */
1150         uint64_t ntune_offset          : 4;  /**< [ 35: 32](R/W) Ntune offset value. This is a signed value where the MSB is a sign bit, with zero
1151                                                                  indicating addition and one indicating subtraction. */
1152         uint64_t ptune_offset          : 4;  /**< [ 39: 36](R/W) Ptune offset value. This is a signed value where the MSB is a sign bit, with zero
1153                                                                  indicating addition and one indicating subtraction. */
1154         uint64_t ddr__ntune            : 5;  /**< [ 44: 40](RO/H) DDR NCTL from compensation circuit. The encoded value provides debug information for the
1155                                                                  compensation impedance on N-pulldown. */
1156         uint64_t ddr__ptune            : 5;  /**< [ 49: 45](RO/H) DDR PCTL from compensation circuit. The encoded value provides debug information for the
1157                                                                  compensation impedance on P-pullup. */
1158         uint64_t rclk_char_mode        : 1;  /**< [ 50: 50](R/W) Reserved.
1159                                                                  Internal:
1160                                                                  Select core clock characterization mode. */
1161         uint64_t reserved_51_63        : 13;
1162 #endif /* Word 0 - End */
1163     } s;
1164     struct bdk_lmcx_comp_ctl2_cn9
1165     {
1166 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
1167         uint64_t reserved_51_63        : 13;
1168         uint64_t rclk_char_mode        : 1;  /**< [ 50: 50](R/W) Reserved.
1169                                                                  Internal:
1170                                                                  Select core clock characterization mode. */
1171         uint64_t ddr__ptune            : 5;  /**< [ 49: 45](RO/H) DDR PCTL from compensation circuit. The encoded value provides debug information for the
1172                                                                  compensation impedance on P-pullup. */
1173         uint64_t ddr__ntune            : 5;  /**< [ 44: 40](RO/H) DDR NCTL from compensation circuit. The encoded value provides debug information for the
1174                                                                  compensation impedance on N-pulldown. */
1175         uint64_t ptune_offset          : 4;  /**< [ 39: 36](R/W) Ptune offset value. This is a signed value where the MSB is a sign bit, with zero
1176                                                                  indicating addition and one indicating subtraction. */
1177         uint64_t ntune_offset          : 4;  /**< [ 35: 32](R/W) Ntune offset value. This is a signed value where the MSB is a sign bit, with zero
1178                                                                  indicating addition and one indicating subtraction. */
1179         uint64_t m180                  : 1;  /**< [ 31: 31](R/W) Reserved; must be zero.
1180                                                                  Internal:
1181                                                                  Cap impedance at 180 ohm, instead of 240 ohm. */
1182         uint64_t byp                   : 1;  /**< [ 30: 30](R/W) Bypass mode. When set, [PTUNE],[NTUNE] are the compensation setting. When clear,
1183                                                                  [DDR__PTUNE],[DDR__NTUNE] are the compensation setting. */
1184         uint64_t ptune                 : 5;  /**< [ 29: 25](R/W) PCTL impedance control in bypass mode. */
1185         uint64_t ntune                 : 5;  /**< [ 24: 20](R/W) NCTL impedance control in bypass mode. */
1186         uint64_t rodt_ctl              : 4;  /**< [ 19: 16](R/W) RODT NCTL impedance control bits. This field controls ODT values during a memory read.
1187                                                                    0x0 = No ODT.
1188                                                                    0x1 = 34 ohm.
1189                                                                    0x2 = 40 ohm.
1190                                                                    0x3 = 50 ohm.
1191                                                                    0x4 = 67 ohm.
1192                                                                    0x5 = 100 ohm.
1193                                                                    0x6 = 200 ohm.
1194                                                                    _ else = Reserved. */
1195         uint64_t control_ctl           : 4;  /**< [ 15: 12](R/W) Drive strength control for DDR_DIMMx_CS*_L/DDR_DIMMx_ODT_* /DDR_DIMMx_CKE* drivers.
1196 
1197                                                                    0x0 = Reserved.
1198                                                                    0x1 = 21 ohm.
1199                                                                    0x2 = 24 ohm.
1200                                                                    0x3 = 27 ohm.
1201                                                                    0x4 = 30 ohm.
1202                                                                    0x5 = 36 ohm.
1203                                                                    0x6 = 44 ohm.
1204                                                                    _ else = Reserved. */
1205         uint64_t cmd_ctl               : 4;  /**< [ 11:  8](R/W) Drive strength control for DDR_RAS_L_A\<16\>/DDR_CAS_L_A\<15\>/DDR_WE_L_A\<14\>/DDR_A\<13:0\>/
1206                                                                  DDR_A\<15\>_BG1/DDR_A\<14\>_BG0/DDR_BA* /DDR_BA2_TEN/DDR_PAR/DDR_RESET_L drivers.
1207 
1208                                                                    0x0 = Reserved.
1209                                                                    0x1 = 21 ohm.
1210                                                                    0x2 = 24 ohm.
1211                                                                    0x3 = 27 ohm.
1212                                                                    0x4 = 30 ohm.
1213                                                                    0x5 = 36 ohm.
1214                                                                    0x6 = 44 ohm.
1215                                                                    _ else = Reserved. */
1216         uint64_t ck_ctl                : 4;  /**< [  7:  4](R/W) Drive strength control for DDR_CK_*_P/N drivers.
1217 
1218                                                                    0x0 = Reserved.
1219                                                                    0x1 = 21 ohm.
1220                                                                    0x2 = 24 ohm.
1221                                                                    0x3 = 27 ohm.
1222                                                                    0x4 = 30 ohm.
1223                                                                    0x5 = 36 ohm.
1224                                                                    0x6 = 44 ohm.
1225                                                                    _ else = Reserved. */
1226         uint64_t dqx_ctl               : 4;  /**< [  3:  0](R/W) Drive strength control for DDR_DQ* /DDR_CB* /DDR_DQS_*_P/N drivers.
1227                                                                    0x1 = 20 ohm.
1228                                                                    0x2 = 22 ohm.
1229                                                                    0x3 = 25 ohm.
1230                                                                    0x4 = 29 ohm.
1231                                                                    0x5 = 34 ohm.
1232                                                                    0x6 = 40 ohm.
1233                                                                    0x7 = 50 ohm.
1234                                                                    _ else = Reserved. */
1235 #else /* Word 0 - Little Endian */
1236         uint64_t dqx_ctl               : 4;  /**< [  3:  0](R/W) Drive strength control for DDR_DQ* /DDR_CB* /DDR_DQS_*_P/N drivers.
1237                                                                    0x1 = 20 ohm.
1238                                                                    0x2 = 22 ohm.
1239                                                                    0x3 = 25 ohm.
1240                                                                    0x4 = 29 ohm.
1241                                                                    0x5 = 34 ohm.
1242                                                                    0x6 = 40 ohm.
1243                                                                    0x7 = 50 ohm.
1244                                                                    _ else = Reserved. */
1245         uint64_t ck_ctl                : 4;  /**< [  7:  4](R/W) Drive strength control for DDR_CK_*_P/N drivers.
1246 
1247                                                                    0x0 = Reserved.
1248                                                                    0x1 = 21 ohm.
1249                                                                    0x2 = 24 ohm.
1250                                                                    0x3 = 27 ohm.
1251                                                                    0x4 = 30 ohm.
1252                                                                    0x5 = 36 ohm.
1253                                                                    0x6 = 44 ohm.
1254                                                                    _ else = Reserved. */
1255         uint64_t cmd_ctl               : 4;  /**< [ 11:  8](R/W) Drive strength control for DDR_RAS_L_A\<16\>/DDR_CAS_L_A\<15\>/DDR_WE_L_A\<14\>/DDR_A\<13:0\>/
1256                                                                  DDR_A\<15\>_BG1/DDR_A\<14\>_BG0/DDR_BA* /DDR_BA2_TEN/DDR_PAR/DDR_RESET_L drivers.
1257 
1258                                                                    0x0 = Reserved.
1259                                                                    0x1 = 21 ohm.
1260                                                                    0x2 = 24 ohm.
1261                                                                    0x3 = 27 ohm.
1262                                                                    0x4 = 30 ohm.
1263                                                                    0x5 = 36 ohm.
1264                                                                    0x6 = 44 ohm.
1265                                                                    _ else = Reserved. */
1266         uint64_t control_ctl           : 4;  /**< [ 15: 12](R/W) Drive strength control for DDR_DIMMx_CS*_L/DDR_DIMMx_ODT_* /DDR_DIMMx_CKE* drivers.
1267 
1268                                                                    0x0 = Reserved.
1269                                                                    0x1 = 21 ohm.
1270                                                                    0x2 = 24 ohm.
1271                                                                    0x3 = 27 ohm.
1272                                                                    0x4 = 30 ohm.
1273                                                                    0x5 = 36 ohm.
1274                                                                    0x6 = 44 ohm.
1275                                                                    _ else = Reserved. */
1276         uint64_t rodt_ctl              : 4;  /**< [ 19: 16](R/W) RODT NCTL impedance control bits. This field controls ODT values during a memory read.
1277                                                                    0x0 = No ODT.
1278                                                                    0x1 = 34 ohm.
1279                                                                    0x2 = 40 ohm.
1280                                                                    0x3 = 50 ohm.
1281                                                                    0x4 = 67 ohm.
1282                                                                    0x5 = 100 ohm.
1283                                                                    0x6 = 200 ohm.
1284                                                                    _ else = Reserved. */
1285         uint64_t ntune                 : 5;  /**< [ 24: 20](R/W) NCTL impedance control in bypass mode. */
1286         uint64_t ptune                 : 5;  /**< [ 29: 25](R/W) PCTL impedance control in bypass mode. */
1287         uint64_t byp                   : 1;  /**< [ 30: 30](R/W) Bypass mode. When set, [PTUNE],[NTUNE] are the compensation setting. When clear,
1288                                                                  [DDR__PTUNE],[DDR__NTUNE] are the compensation setting. */
1289         uint64_t m180                  : 1;  /**< [ 31: 31](R/W) Reserved; must be zero.
1290                                                                  Internal:
1291                                                                  Cap impedance at 180 ohm, instead of 240 ohm. */
1292         uint64_t ntune_offset          : 4;  /**< [ 35: 32](R/W) Ntune offset value. This is a signed value where the MSB is a sign bit, with zero
1293                                                                  indicating addition and one indicating subtraction. */
1294         uint64_t ptune_offset          : 4;  /**< [ 39: 36](R/W) Ptune offset value. This is a signed value where the MSB is a sign bit, with zero
1295                                                                  indicating addition and one indicating subtraction. */
1296         uint64_t ddr__ntune            : 5;  /**< [ 44: 40](RO/H) DDR NCTL from compensation circuit. The encoded value provides debug information for the
1297                                                                  compensation impedance on N-pulldown. */
1298         uint64_t ddr__ptune            : 5;  /**< [ 49: 45](RO/H) DDR PCTL from compensation circuit. The encoded value provides debug information for the
1299                                                                  compensation impedance on P-pullup. */
1300         uint64_t rclk_char_mode        : 1;  /**< [ 50: 50](R/W) Reserved.
1301                                                                  Internal:
1302                                                                  Select core clock characterization mode. */
1303         uint64_t reserved_51_63        : 13;
1304 #endif /* Word 0 - End */
1305     } cn9;
1306     /* struct bdk_lmcx_comp_ctl2_s cn81xx; */
1307     /* struct bdk_lmcx_comp_ctl2_s cn88xx; */
1308     struct bdk_lmcx_comp_ctl2_cn83xx
1309     {
1310 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
1311         uint64_t reserved_51_63        : 13;
1312         uint64_t rclk_char_mode        : 1;  /**< [ 50: 50](R/W) Reserved.
1313                                                                  Internal:
1314                                                                  Select RCLK characterization mode. */
1315         uint64_t ddr__ptune            : 5;  /**< [ 49: 45](RO/H) DDR PCTL from compensation circuit. The encoded value provides debug information for the
1316                                                                  compensation impedance on P-pullup. */
1317         uint64_t ddr__ntune            : 5;  /**< [ 44: 40](RO/H) DDR NCTL from compensation circuit. The encoded value provides debug information for the
1318                                                                  compensation impedance on N-pulldown. */
1319         uint64_t ptune_offset          : 4;  /**< [ 39: 36](R/W) Ptune offset value. This is a signed value where the MSB is a sign bit, with zero
1320                                                                  indicating addition and one indicating subtraction. */
1321         uint64_t ntune_offset          : 4;  /**< [ 35: 32](R/W) Ntune offset value. This is a signed value where the MSB is a sign bit, with zero
1322                                                                  indicating addition and one indicating subtraction. */
1323         uint64_t m180                  : 1;  /**< [ 31: 31](R/W) Reserved; must be zero.
1324                                                                  Internal:
1325                                                                  Cap impedance at 180 ohm, instead of 240 ohm. */
1326         uint64_t byp                   : 1;  /**< [ 30: 30](R/W) Bypass mode. When set, [PTUNE],[NTUNE] are the compensation setting. When clear,
1327                                                                  [DDR__PTUNE],[DDR__NTUNE] are the compensation setting. */
1328         uint64_t ptune                 : 5;  /**< [ 29: 25](R/W) PCTL impedance control in bypass mode. */
1329         uint64_t ntune                 : 5;  /**< [ 24: 20](R/W) NCTL impedance control in bypass mode. */
1330         uint64_t rodt_ctl              : 4;  /**< [ 19: 16](R/W) RODT NCTL impedance control bits. This field controls ODT values during a memory read.
1331                                                                    0x0 = No ODT.
1332                                                                    0x1 = 20 ohm.
1333                                                                    0x2 = 30 ohm.
1334                                                                    0x3 = 40 ohm.
1335                                                                    0x4 = 60 ohm.
1336                                                                    0x5 = 120 ohm.
1337                                                                    _ else = Reserved.
1338 
1339                                                                  In DDR4 mode:
1340                                                                    0x0 = No ODT.
1341                                                                    0x1 = 40 ohm.
1342                                                                    0x2 = 60 ohm.
1343                                                                    0x3 = 80 ohm.
1344                                                                    0x4 = 120 ohm.
1345                                                                    0x5 = 240 ohm.
1346                                                                    0x6 = 34 ohm.
1347                                                                    0x7 = 48 ohm.
1348                                                                    _ else = Reserved. */
1349         uint64_t control_ctl           : 4;  /**< [ 15: 12](R/W) Drive strength control for DDR_DIMMx_CS*_L/DDR_DIMMx_ODT_* /DDR_DIMMx_CKE* drivers.
1350 
1351                                                                  In DDR3 mode:
1352                                                                    0x1 = 24 ohm.
1353                                                                    0x2 = 26.67 ohm.
1354                                                                    0x3 = 30 ohm.
1355                                                                    0x4 = 34.3 ohm.
1356                                                                    0x5 = 40 ohm.
1357                                                                    0x6 = 48 ohm.
1358                                                                    0x7 = 60 ohm.
1359                                                                    _ else = Reserved.
1360 
1361                                                                  In DDR4 mode:
1362                                                                    0x0 = Reserved.
1363                                                                    0x1 = Reserved.
1364                                                                    0x2 = 26 ohm.
1365                                                                    0x3 = 30 ohm.
1366                                                                    0x4 = 34 ohm.
1367                                                                    0x5 = 40 ohm.
1368                                                                    0x6 = 48 ohm.
1369                                                                    _ else = Reserved. */
1370         uint64_t cmd_ctl               : 4;  /**< [ 11:  8](R/W) Drive strength control for DDR_RAS_L_A\<16\>/DDR_CAS_L_A\<15\>/DDR_WE_L_A\<14\>/DDR_A\<13:0\>/
1371                                                                  DDR_A\<15\>_BG1/DDR_A\<14\>_BG0/DDR_BA* /DDR_BA2_TEN/DDR_PAR/DDR_RESET_L drivers.
1372 
1373                                                                  In DDR3 mode:
1374                                                                    0x1 = 24 ohm.
1375                                                                    0x2 = 26.67 ohm.
1376                                                                    0x3 = 30 ohm.
1377                                                                    0x4 = 34.3 ohm.
1378                                                                    0x5 = 40 ohm.
1379                                                                    0x6 = 48 ohm.
1380                                                                    0x7 = 60 ohm.
1381                                                                    _ else = Reserved.
1382 
1383                                                                  In DDR4 mode:
1384                                                                    0x0 = Reserved.
1385                                                                    0x1 = Reserved.
1386                                                                    0x2 = 26 ohm.
1387                                                                    0x3 = 30 ohm.
1388                                                                    0x4 = 34 ohm.
1389                                                                    0x5 = 40 ohm.
1390                                                                    0x6 = 48 ohm.
1391                                                                    _ else = Reserved. */
1392         uint64_t ck_ctl                : 4;  /**< [  7:  4](R/W) Drive strength control for DDR_CK_*_P/N drivers.
1393 
1394                                                                  In DDR3 mode:
1395                                                                    0x1 = 24 ohm.
1396                                                                    0x2 = 26.67 ohm.
1397                                                                    0x3 = 30 ohm.
1398                                                                    0x4 = 34.3 ohm.
1399                                                                    0x5 = 40 ohm.
1400                                                                    0x6 = 48 ohm.
1401                                                                    0x7 = 60 ohm.
1402                                                                    _ else = Reserved.
1403 
1404                                                                  In DDR4 mode:
1405                                                                    0x0 = Reserved.
1406                                                                    0x1 = Reserved.
1407                                                                    0x2 = 26 ohm.
1408                                                                    0x3 = 30 ohm.
1409                                                                    0x4 = 34 ohm.
1410                                                                    0x5 = 40 ohm.
1411                                                                    0x6 = 48 ohm.
1412                                                                    _ else = Reserved. */
1413         uint64_t dqx_ctl               : 4;  /**< [  3:  0](R/W) Drive strength control for DDR_DQ* /DDR_CB* /DDR_DQS_*_P/N drivers.
1414                                                                    0x1 = 24 ohm.
1415                                                                    0x2 = 26.67 ohm.
1416                                                                    0x3 = 30 ohm.
1417                                                                    0x4 = 34.3 ohm.
1418                                                                    0x5 = 40 ohm.
1419                                                                    0x6 = 48 ohm.
1420                                                                    0x7 = 60 ohm.
1421                                                                    _ else = Reserved. */
1422 #else /* Word 0 - Little Endian */
1423         uint64_t dqx_ctl               : 4;  /**< [  3:  0](R/W) Drive strength control for DDR_DQ* /DDR_CB* /DDR_DQS_*_P/N drivers.
1424                                                                    0x1 = 24 ohm.
1425                                                                    0x2 = 26.67 ohm.
1426                                                                    0x3 = 30 ohm.
1427                                                                    0x4 = 34.3 ohm.
1428                                                                    0x5 = 40 ohm.
1429                                                                    0x6 = 48 ohm.
1430                                                                    0x7 = 60 ohm.
1431                                                                    _ else = Reserved. */
1432         uint64_t ck_ctl                : 4;  /**< [  7:  4](R/W) Drive strength control for DDR_CK_*_P/N drivers.
1433 
1434                                                                  In DDR3 mode:
1435                                                                    0x1 = 24 ohm.
1436                                                                    0x2 = 26.67 ohm.
1437                                                                    0x3 = 30 ohm.
1438                                                                    0x4 = 34.3 ohm.
1439                                                                    0x5 = 40 ohm.
1440                                                                    0x6 = 48 ohm.
1441                                                                    0x7 = 60 ohm.
1442                                                                    _ else = Reserved.
1443 
1444                                                                  In DDR4 mode:
1445                                                                    0x0 = Reserved.
1446                                                                    0x1 = Reserved.
1447                                                                    0x2 = 26 ohm.
1448                                                                    0x3 = 30 ohm.
1449                                                                    0x4 = 34 ohm.
1450                                                                    0x5 = 40 ohm.
1451                                                                    0x6 = 48 ohm.
1452                                                                    _ else = Reserved. */
1453         uint64_t cmd_ctl               : 4;  /**< [ 11:  8](R/W) Drive strength control for DDR_RAS_L_A\<16\>/DDR_CAS_L_A\<15\>/DDR_WE_L_A\<14\>/DDR_A\<13:0\>/
1454                                                                  DDR_A\<15\>_BG1/DDR_A\<14\>_BG0/DDR_BA* /DDR_BA2_TEN/DDR_PAR/DDR_RESET_L drivers.
1455 
1456                                                                  In DDR3 mode:
1457                                                                    0x1 = 24 ohm.
1458                                                                    0x2 = 26.67 ohm.
1459                                                                    0x3 = 30 ohm.
1460                                                                    0x4 = 34.3 ohm.
1461                                                                    0x5 = 40 ohm.
1462                                                                    0x6 = 48 ohm.
1463                                                                    0x7 = 60 ohm.
1464                                                                    _ else = Reserved.
1465 
1466                                                                  In DDR4 mode:
1467                                                                    0x0 = Reserved.
1468                                                                    0x1 = Reserved.
1469                                                                    0x2 = 26 ohm.
1470                                                                    0x3 = 30 ohm.
1471                                                                    0x4 = 34 ohm.
1472                                                                    0x5 = 40 ohm.
1473                                                                    0x6 = 48 ohm.
1474                                                                    _ else = Reserved. */
1475         uint64_t control_ctl           : 4;  /**< [ 15: 12](R/W) Drive strength control for DDR_DIMMx_CS*_L/DDR_DIMMx_ODT_* /DDR_DIMMx_CKE* drivers.
1476 
1477                                                                  In DDR3 mode:
1478                                                                    0x1 = 24 ohm.
1479                                                                    0x2 = 26.67 ohm.
1480                                                                    0x3 = 30 ohm.
1481                                                                    0x4 = 34.3 ohm.
1482                                                                    0x5 = 40 ohm.
1483                                                                    0x6 = 48 ohm.
1484                                                                    0x7 = 60 ohm.
1485                                                                    _ else = Reserved.
1486 
1487                                                                  In DDR4 mode:
1488                                                                    0x0 = Reserved.
1489                                                                    0x1 = Reserved.
1490                                                                    0x2 = 26 ohm.
1491                                                                    0x3 = 30 ohm.
1492                                                                    0x4 = 34 ohm.
1493                                                                    0x5 = 40 ohm.
1494                                                                    0x6 = 48 ohm.
1495                                                                    _ else = Reserved. */
1496         uint64_t rodt_ctl              : 4;  /**< [ 19: 16](R/W) RODT NCTL impedance control bits. This field controls ODT values during a memory read.
1497                                                                    0x0 = No ODT.
1498                                                                    0x1 = 20 ohm.
1499                                                                    0x2 = 30 ohm.
1500                                                                    0x3 = 40 ohm.
1501                                                                    0x4 = 60 ohm.
1502                                                                    0x5 = 120 ohm.
1503                                                                    _ else = Reserved.
1504 
1505                                                                  In DDR4 mode:
1506                                                                    0x0 = No ODT.
1507                                                                    0x1 = 40 ohm.
1508                                                                    0x2 = 60 ohm.
1509                                                                    0x3 = 80 ohm.
1510                                                                    0x4 = 120 ohm.
1511                                                                    0x5 = 240 ohm.
1512                                                                    0x6 = 34 ohm.
1513                                                                    0x7 = 48 ohm.
1514                                                                    _ else = Reserved. */
1515         uint64_t ntune                 : 5;  /**< [ 24: 20](R/W) NCTL impedance control in bypass mode. */
1516         uint64_t ptune                 : 5;  /**< [ 29: 25](R/W) PCTL impedance control in bypass mode. */
1517         uint64_t byp                   : 1;  /**< [ 30: 30](R/W) Bypass mode. When set, [PTUNE],[NTUNE] are the compensation setting. When clear,
1518                                                                  [DDR__PTUNE],[DDR__NTUNE] are the compensation setting. */
1519         uint64_t m180                  : 1;  /**< [ 31: 31](R/W) Reserved; must be zero.
1520                                                                  Internal:
1521                                                                  Cap impedance at 180 ohm, instead of 240 ohm. */
1522         uint64_t ntune_offset          : 4;  /**< [ 35: 32](R/W) Ntune offset value. This is a signed value where the MSB is a sign bit, with zero
1523                                                                  indicating addition and one indicating subtraction. */
1524         uint64_t ptune_offset          : 4;  /**< [ 39: 36](R/W) Ptune offset value. This is a signed value where the MSB is a sign bit, with zero
1525                                                                  indicating addition and one indicating subtraction. */
1526         uint64_t ddr__ntune            : 5;  /**< [ 44: 40](RO/H) DDR NCTL from compensation circuit. The encoded value provides debug information for the
1527                                                                  compensation impedance on N-pulldown. */
1528         uint64_t ddr__ptune            : 5;  /**< [ 49: 45](RO/H) DDR PCTL from compensation circuit. The encoded value provides debug information for the
1529                                                                  compensation impedance on P-pullup. */
1530         uint64_t rclk_char_mode        : 1;  /**< [ 50: 50](R/W) Reserved.
1531                                                                  Internal:
1532                                                                  Select RCLK characterization mode. */
1533         uint64_t reserved_51_63        : 13;
1534 #endif /* Word 0 - End */
1535     } cn83xx;
1536 };
1537 typedef union bdk_lmcx_comp_ctl2 bdk_lmcx_comp_ctl2_t;
1538 
1539 static inline uint64_t BDK_LMCX_COMP_CTL2(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_COMP_CTL2(unsigned long a)1540 static inline uint64_t BDK_LMCX_COMP_CTL2(unsigned long a)
1541 {
1542     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
1543         return 0x87e0880001b8ll + 0x1000000ll * ((a) & 0x0);
1544     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
1545         return 0x87e0880001b8ll + 0x1000000ll * ((a) & 0x1);
1546     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
1547         return 0x87e0880001b8ll + 0x1000000ll * ((a) & 0x3);
1548     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
1549         return 0x87e0880001b8ll + 0x1000000ll * ((a) & 0x3);
1550     __bdk_csr_fatal("LMCX_COMP_CTL2", 1, a, 0, 0, 0);
1551 }
1552 
1553 #define typedef_BDK_LMCX_COMP_CTL2(a) bdk_lmcx_comp_ctl2_t
1554 #define bustype_BDK_LMCX_COMP_CTL2(a) BDK_CSR_TYPE_RSL
1555 #define basename_BDK_LMCX_COMP_CTL2(a) "LMCX_COMP_CTL2"
1556 #define device_bar_BDK_LMCX_COMP_CTL2(a) 0x0 /* PF_BAR0 */
1557 #define busnum_BDK_LMCX_COMP_CTL2(a) (a)
1558 #define arguments_BDK_LMCX_COMP_CTL2(a) (a),-1,-1,-1
1559 
1560 /**
1561  * Register (RSL) lmc#_config
1562  *
1563  * LMC Memory Configuration Register
1564  * This register controls certain parameters required for memory configuration. Note the
1565  * following:
1566  * * Priority order for hardware write operations to
1567  * LMC()_CONFIG/LMC()_NXM_FADR/LMC()_ECC_SYND: DED error \> SEC error.
1568  * * The self-refresh entry sequence(s) power the DLL up/down (depending on
1569  * LMC()_MODEREG_PARAMS0[DLL]) when LMC()_CONFIG[SREF_WITH_DLL] is set.
1570  * * Prior to the self-refresh exit sequence, LMC()_MODEREG_PARAMS0 should be reprogrammed
1571  * (if needed) to the appropriate values.
1572  *
1573  * See LMC initialization sequence for the LMC bringup sequence.
1574  */
1575 union bdk_lmcx_config
1576 {
1577     uint64_t u;
1578     struct bdk_lmcx_config_s
1579     {
1580 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
1581         uint64_t lrdimm_ena            : 1;  /**< [ 63: 63](R/W) Reserved.
1582                                                                  Internal:
1583                                                                  Load reduced DIMM enable. When set allows the use of JEDEC DDR4 LRDIMMs. */
1584         uint64_t bg2_enable            : 1;  /**< [ 62: 62](R/W) BG1 enable bit. Only has an effect when LMC()_CONFIG[MODEDDR4] = 1.
1585                                                                  Set to one when using DDR4 x4 or x8 parts.
1586                                                                  Clear to zero when using DDR4 x16 parts. */
1587         uint64_t mode_x4dev            : 1;  /**< [ 61: 61](R/W) DDR x4 device mode. */
1588         uint64_t mode32b               : 1;  /**< [ 60: 60](R/W) 32-bit datapath mode. When set, only 32 DQ pins are used. */
1589         uint64_t scrz                  : 1;  /**< [ 59: 59](R/W1S/H) Hide LMC()_SCRAMBLE_CFG0 and LMC()_SCRAMBLE_CFG1 when set. */
1590         uint64_t early_unload_d1_r1    : 1;  /**< [ 58: 58](R/W) Reserved, MBZ.
1591                                                                  Internal:
1592                                                                  When set, unload the PHY silo one cycle early for Rank 3 reads.
1593                                                                  The recommended EARLY_UNLOAD_D1_R1 value can be calculated after the final
1594                                                                  LMC()_RLEVEL_RANK3[BYTE*] values are selected (as part of read leveling initialization).
1595                                                                  Then, determine the largest read leveling setting for rank 3 (i.e. calculate
1596                                                                  maxset=MAX(LMC()_RLEVEL_RANK3[BYTEi]) across all i), then set EARLY_UNLOAD_D1_R1 when the
1597                                                                  low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D1_R1 = (maxset\<1:0\>
1598                                                                  !=3)). */
1599         uint64_t early_unload_d1_r0    : 1;  /**< [ 57: 57](R/W) Reserved, MBZ.
1600                                                                  Internal:
1601                                                                  When set, unload the PHY silo one cycle early for Rank 2 reads.
1602                                                                  The recommended EARLY_UNLOAD_D1_RO value can be calculated after the final
1603                                                                  LMC()_RLEVEL_RANK2[BYTE*] values are selected (as part of read leveling initialization).
1604                                                                  Then, determine the largest read leveling setting for rank 2 (i.e. calculate
1605                                                                  maxset=MAX(LMC()_RLEVEL_RANK2[BYTEi]) across all i), then set EARLY_UNLOAD_D1_RO when the
1606                                                                  low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D1_RO = (maxset\<1:0\>
1607                                                                  !=3)). */
1608         uint64_t early_unload_d0_r1    : 1;  /**< [ 56: 56](R/W) Reserved, MBZ.
1609                                                                  Internal:
1610                                                                  When set, unload the PHY silo one cycle early for Rank 1 reads.
1611                                                                  The recommended EARLY_UNLOAD_D0_R1 value can be calculated after the final
1612                                                                  LMC()_RLEVEL_RANK1[BYTE*] values are selected (as part of read leveling initialization).
1613                                                                  Then, determine the largest read leveling setting for rank one (i.e. calculate
1614                                                                  maxset=MAX(LMC()_RLEVEL_RANK1[BYTEi]) across all i), then set EARLY_UNLOAD_D0_R1 when the
1615                                                                  low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D0_R1 = (maxset\<1:0\>
1616                                                                  !=3)). */
1617         uint64_t early_unload_d0_r0    : 1;  /**< [ 55: 55](R/W) Reserved, MBZ.
1618                                                                  Internal:
1619                                                                  When set, unload the PHY silo one cycle early for Rank 0 reads.
1620                                                                  The recommended EARLY_UNLOAD_D0_R0 value can be calculated after the final
1621                                                                  LMC()_RLEVEL_RANK0[BYTE*] values are selected (as part of read leveling initialization).
1622                                                                  Then, determine the largest read leveling setting for rank 0 (i.e. calculate
1623                                                                  maxset=MAX(LMC()_RLEVEL_RANK0[BYTEi]) across all i), then set EARLY_UNLOAD_D0_R0 when the
1624                                                                  low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D0_R0 = (maxset\<1:0\>
1625                                                                  !=3)). */
1626         uint64_t init_status           : 4;  /**< [ 54: 51](RO/H) Indicates status of initialization. [INIT_STATUS][n] = 1 implies rank n has been
1627                                                                  initialized.
1628                                                                  Software must set necessary [RANKMASK] bits before executing the initialization sequence
1629                                                                  using LMC()_SEQ_CTL. If the rank has been selected for init with the [RANKMASK] bits,
1630                                                                  the [INIT_STATUS] bits will be set after successful initialization and after self-refresh
1631                                                                  exit. [INIT_STATUS] determines the chip-selects that assert during refresh, ZQCS,
1632                                                                  precharge
1633                                                                  power-down entry/exit, and self-refresh entry SEQ_SELs. */
1634         uint64_t mirrmask              : 4;  /**< [ 50: 47](R/W) "Mask determining which ranks are address-mirrored.
1635                                                                  [MIRRMASK]\<n\> = 1 means rank n addresses are mirrored for
1636                                                                  0 \<= n \<= 3.
1637                                                                  In DDR3, a mirrored read/write operation has the following differences:
1638                                                                  * DDR#_BA\<1\> is swapped with DDR#_BA\<0\>.
1639                                                                  * DDR#_A\<8\> is swapped with DDR#_A\<7\>.
1640                                                                  * DDR#_A\<6\> is swapped with DDR#_A\<5\>.
1641                                                                  * DDR#_A\<4\> is swapped with DDR#_A\<3\>.
1642 
1643                                                                  When RANK_ENA = 0, MIRRMASK\<1\> MBZ.
1644 
1645                                                                  In DDR4, a mirrored read/write operation has the following differences:
1646                                                                  * DDR#_BG\<1\> is swapped with DDR#_BG\<0\>.
1647                                                                  * DDR#_BA\<1\> is swapped with DDR#_BA\<0\>.
1648                                                                  * DDR#_A\<13\> is swapped with DDR#_A\<11\>.
1649                                                                  * DDR#_A\<8\> is swapped with DDR#_A\<7\>.
1650                                                                  * DDR#_A\<6\> is swapped with DDR#_A\<5\>.
1651                                                                  * DDR#_A\<4\> is swapped with DDR#_A\<3\>.
1652 
1653                                                                  For CN70XX, MIRRMASK\<3:2\> MBZ.
1654                                                                  * When RANK_ENA = 0, MIRRMASK\<1\> MBZ." */
1655         uint64_t rankmask              : 4;  /**< [ 46: 43](R/W) Mask to select rank to be leveled/initialized. To write level/read level/initialize rank
1656                                                                  i, set [RANKMASK]\<i\>:
1657 
1658                                                                  \<pre\>
1659                                                                                [RANK_ENA]=1   [RANK_ENA]=0
1660                                                                  RANKMASK\<0\> = DIMM0_CS0      DIMM0_CS0
1661                                                                  RANKMASK\<1\> = DIMM0_CS1      MBZ
1662                                                                  RANKMASK\<2\> = Reserved       Reserved
1663                                                                  RANKMASK\<3\> = Reserved       Reserved
1664                                                                  \</pre\>
1665 
1666                                                                  For read/write leveling, each rank has to be leveled separately, so [RANKMASK] should only
1667                                                                  have one bit set. [RANKMASK] is not used during self-refresh entry/exit and precharge
1668                                                                  power down entry/exit instruction sequences. When [RANK_ENA] = 0, [RANKMASK]\<1\> and
1669                                                                  [RANKMASK]\<3\> MBZ. */
1670         uint64_t rank_ena              : 1;  /**< [ 42: 42](R/W) "RANK enable (for use with dual-rank DIMMs).
1671                                                                  * For dual-rank DIMMs, the [RANK_ENA] bit will enable the drive of the DDR#_DIMM*_CS*_L
1672                                                                  and
1673                                                                  ODT_\<1:0\> pins differently based on the ([PBANK_LSB] - 1) address bit.
1674                                                                  * Write zero for SINGLE ranked DIMMs." */
1675         uint64_t sref_with_dll         : 1;  /**< [ 41: 41](R/W) Self-refresh entry/exit write mode registers. When set, self-refresh entry sequence writes
1676                                                                  MR2 and MR1 (in this order, in all ranks), and self-refresh exit sequence writes MR1, MR0,
1677                                                                  MR2, and MR3 (in this order, for all ranks). The write operations occur before self-
1678                                                                  refresh entry, and after self-refresh exit. When clear, self-refresh entry and exit
1679                                                                  instruction sequences do not write any mode registers in the DDR3/4 parts. */
1680         uint64_t early_dqx             : 1;  /**< [ 40: 40](R/W) Set this bit to send DQx signals one CK cycle earlier for the case when the shortest DQx
1681                                                                  lines have a larger delay than the CK line. */
1682         uint64_t ref_zqcs_int          : 22; /**< [ 39: 18](R/W) Refresh interval is represented in number of 512 CK cycle increments. To get more precise
1683                                                                  control of the refresh interval, LMC()_EXT_CONFIG[REF_INT_LSBS] can be set to a
1684                                                                  nonzero value.
1685                                                                  ZQCS interval is represented in a number of refresh intervals. A refresh sequence is
1686                                                                  triggered when bits \<24:18\> are equal to 0x0, and a ZQCS sequence is triggered when
1687                                                                  \<39:18\>
1688                                                                  are equal to 0x0.
1689 
1690                                                                  The ZQCS timer only decrements when the refresh timer is zero.
1691 
1692                                                                  Program \<24:18\> to RND-DN(TREFI/clkPeriod/512).
1693 
1694                                                                  A value of zero in bits \<24:18\> will effectively turn off refresh.
1695 
1696                                                                  Program \<36:25\> to (RND-DN(ZQCS_Period / Refresh_Period) - 1), where Refresh_Period is the
1697                                                                  effective period programmed in bits \<24:18\>. Note that this value should always be greater
1698                                                                  than 32, to account for resistor calibration delays.
1699 
1700                                                                  000_00000000_0000000: Reserved
1701 
1702                                                                  Max refresh interval = 127 * 512= 65024 CK cycles.
1703 
1704                                                                  Max ZQCS interval = 32768 * 127 * 512 = 2130706432 CK cycles.
1705 
1706                                                                  If refresh interval is programmed to ~8 us, max ZQCS interval is ~262 ms, or ~4 ZQCS
1707                                                                  operations per second.
1708                                                                  LMC()_CONFIG[INIT_STATUS] determines which ranks receive the REF / ZQCS. LMC does not
1709                                                                  send any refreshes / ZQCS's when LMC()_CONFIG[INIT_STATUS]=0. */
1710         uint64_t reset                 : 1;  /**< [ 17: 17](R/W) Reset one-shot pulse for LMC()_OPS_CNT, LMC()_IFB_CNT, and LMC()_DCLK_CNT.
1711                                                                  To cause the reset, software writes this to a one, then rewrites it to a zero. */
1712         uint64_t ecc_adr               : 1;  /**< [ 16: 16](R/W) Include memory reference address in the ECC calculation.
1713                                                                  0 = disabled, 1 = enabled. */
1714         uint64_t forcewrite            : 4;  /**< [ 15: 12](R/W) Force the oldest outstanding write to complete after having waited for 2^[FORCEWRITE] CK
1715                                                                  cycles. 0 = disabled. */
1716         uint64_t idlepower             : 3;  /**< [ 11:  9](R/W) Enter precharge power-down mode after the memory controller has been idle for
1717                                                                  2^(2+[IDLEPOWER]) CK cycles. 0 = disabled.
1718 
1719                                                                  This field should only be programmed after initialization.
1720                                                                  LMC()_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL is disabled during the
1721                                                                  precharge power-down. */
1722         uint64_t pbank_lsb             : 4;  /**< [  8:  5](R/W) DIMM address bit select. Reverting to the explanation for [ROW_LSB], [PBANK_LSB] would be:
1723                                                                  [ROW_LSB] bit + num_rowbits + num_rankbits
1724 
1725                                                                  Values for [PBANK_LSB] are as follows:
1726                                                                  0x0: DIMM = mem_adr\<28\>; if [RANK_ENA]=1, rank = mem_adr\<27\>.
1727                                                                  0x1: DIMM = mem_adr\<29\>; if [RANK_ENA]=1, rank = mem_adr\<28\>.
1728                                                                  0x2: DIMM = mem_adr\<30\>; if [RANK_ENA]=1, rank = mem_adr\<29\>.
1729                                                                  0x3: DIMM = mem_adr\<31\>; if [RANK_ENA]=1, rank = mem_adr\<30\>.
1730                                                                  0x4: DIMM = mem_adr\<32\>; if [RANK_ENA]=1, rank = mem_adr\<31\>.
1731                                                                  0x5: DIMM = mem_adr\<33\>; if [RANK_ENA]=1, rank = mem_adr\<32\>.
1732                                                                  0x6: DIMM = mem_adr\<34\>; if [RANK_ENA]=1, rank = mem_adr\<33\>.
1733                                                                  0x7: DIMM = mem_adr\<35\>; if [RANK_ENA]=1, rank = mem_adr\<34\>.
1734                                                                  0x8: DIMM = mem_adr\<36\>; if [RANK_ENA]=1, rank = mem_adr\<35\>.
1735                                                                  0x9: DIMM = mem_adr\<37\>; if [RANK_ENA]=1, rank = mem_adr\<36\>.
1736                                                                  0xA: DIMM = 0;           if [RANK_ENA]=1, rank = mem_adr\<37\>.
1737                                                                  0xB-0xF: Reserved.
1738 
1739                                                                  For example, for a DIMM made of Samsung's K4B1G0846C-F7 1Gb (16M * 8 bit * 8 bank)
1740                                                                  parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
1741                                                                  16. So, row = mem_adr\<29:16\>.
1742 
1743                                                                  With [RANK_ENA] = 0, [PBANK_LSB] = 2.
1744                                                                  With [RANK_ENA] = 1, [PBANK_LSB] = 3.
1745 
1746                                                                  Internal:
1747                                                                  When interfacing with 8H 3DS, set this 0xA regardless of [RANK_ENA] value. */
1748         uint64_t row_lsb               : 3;  /**< [  4:  2](R/W) Row address bit select.
1749                                                                  0x0 = Address bit 14 is LSB.
1750                                                                  0x1 = Address bit 15 is LSB.
1751                                                                  0x2 = Address bit 16 is LSB.
1752                                                                  0x3 = Address bit 17 is LSB.
1753                                                                  0x4 = Address bit 18 is LSB.
1754                                                                  0x5 = Address bit 19 is LSB.
1755                                                                  0x6 = Address bit 20 is LSB.
1756                                                                  0x6 = Reserved.
1757 
1758                                                                  Encoding used to determine which memory address bit position represents the low order DDR
1759                                                                  ROW address. The processor's memory address\<34:7\> needs to be translated to DRAM addresses
1760                                                                  (bnk,row,col,rank and DIMM) and that is a function of the following:
1761                                                                  * Datapath width (64).
1762                                                                  * Number of banks (8).
1763                                                                  * Number of column bits of the memory part--specified indirectly by this register.
1764                                                                  * Number of row bits of the memory part--specified indirectly by [PBANK_LSB].
1765                                                                  * Number of ranks in a DIMM--specified by LMC()_CONFIG[RANK_ENA].
1766                                                                  * Number of DIMMs in the system by the register below ([PBANK_LSB]).
1767 
1768                                                                  Column address starts from mem_addr[3] for 64b (8 bytes) DQ width. [ROW_LSB] is
1769                                                                  mem_adr[15] for 64b mode. Therefore, the [ROW_LSB] parameter should be set to
1770                                                                  0x1 (64b).
1771 
1772                                                                  For example, for a DIMM made of Samsung's K4B1G0846C-F7 1GB (16M * 8 bit * 8 bank)
1773                                                                  parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
1774                                                                  16. So, row = mem_adr\<29:16\>.
1775 
1776                                                                  Refer to cache-block read transaction example, Cache-block read transaction example. */
1777         uint64_t ecc_ena               : 1;  /**< [  1:  1](R/W) ECC enable. When set, enables the 8b ECC check/correct logic. Should be one when used with
1778                                                                  DIMMs with ECC; zero, otherwise.
1779 
1780                                                                  * When this mode is turned on, DQ\<71:64\> on write operations contains the ECC code
1781                                                                  generated for the 64 bits of data which will be written in the memory. Later on read
1782                                                                  operations, will be used to check for single-bit error (which will be auto-corrected) and
1783                                                                  double-bit error (which will be reported).
1784 
1785                                                                  * When not turned on, DQ\<71:64\> are driven to zero. Please refer to SEC_ERR, DED_ERR,
1786                                                                  LMC()_NXM_FADR, and LMC()_ECC_SYND registers for diagnostics information when there is
1787                                                                  an error. */
1788         uint64_t reserved_0            : 1;
1789 #else /* Word 0 - Little Endian */
1790         uint64_t reserved_0            : 1;
1791         uint64_t ecc_ena               : 1;  /**< [  1:  1](R/W) ECC enable. When set, enables the 8b ECC check/correct logic. Should be one when used with
1792                                                                  DIMMs with ECC; zero, otherwise.
1793 
1794                                                                  * When this mode is turned on, DQ\<71:64\> on write operations contains the ECC code
1795                                                                  generated for the 64 bits of data which will be written in the memory. Later on read
1796                                                                  operations, will be used to check for single-bit error (which will be auto-corrected) and
1797                                                                  double-bit error (which will be reported).
1798 
1799                                                                  * When not turned on, DQ\<71:64\> are driven to zero. Please refer to SEC_ERR, DED_ERR,
1800                                                                  LMC()_NXM_FADR, and LMC()_ECC_SYND registers for diagnostics information when there is
1801                                                                  an error. */
1802         uint64_t row_lsb               : 3;  /**< [  4:  2](R/W) Row address bit select.
1803                                                                  0x0 = Address bit 14 is LSB.
1804                                                                  0x1 = Address bit 15 is LSB.
1805                                                                  0x2 = Address bit 16 is LSB.
1806                                                                  0x3 = Address bit 17 is LSB.
1807                                                                  0x4 = Address bit 18 is LSB.
1808                                                                  0x5 = Address bit 19 is LSB.
1809                                                                  0x6 = Address bit 20 is LSB.
1810                                                                  0x6 = Reserved.
1811 
1812                                                                  Encoding used to determine which memory address bit position represents the low order DDR
1813                                                                  ROW address. The processor's memory address\<34:7\> needs to be translated to DRAM addresses
1814                                                                  (bnk,row,col,rank and DIMM) and that is a function of the following:
1815                                                                  * Datapath width (64).
1816                                                                  * Number of banks (8).
1817                                                                  * Number of column bits of the memory part--specified indirectly by this register.
1818                                                                  * Number of row bits of the memory part--specified indirectly by [PBANK_LSB].
1819                                                                  * Number of ranks in a DIMM--specified by LMC()_CONFIG[RANK_ENA].
1820                                                                  * Number of DIMMs in the system by the register below ([PBANK_LSB]).
1821 
1822                                                                  Column address starts from mem_addr[3] for 64b (8 bytes) DQ width. [ROW_LSB] is
1823                                                                  mem_adr[15] for 64b mode. Therefore, the [ROW_LSB] parameter should be set to
1824                                                                  0x1 (64b).
1825 
1826                                                                  For example, for a DIMM made of Samsung's K4B1G0846C-F7 1GB (16M * 8 bit * 8 bank)
1827                                                                  parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
1828                                                                  16. So, row = mem_adr\<29:16\>.
1829 
1830                                                                  Refer to cache-block read transaction example, Cache-block read transaction example. */
1831         uint64_t pbank_lsb             : 4;  /**< [  8:  5](R/W) DIMM address bit select. Reverting to the explanation for [ROW_LSB], [PBANK_LSB] would be:
1832                                                                  [ROW_LSB] bit + num_rowbits + num_rankbits
1833 
1834                                                                  Values for [PBANK_LSB] are as follows:
1835                                                                  0x0: DIMM = mem_adr\<28\>; if [RANK_ENA]=1, rank = mem_adr\<27\>.
1836                                                                  0x1: DIMM = mem_adr\<29\>; if [RANK_ENA]=1, rank = mem_adr\<28\>.
1837                                                                  0x2: DIMM = mem_adr\<30\>; if [RANK_ENA]=1, rank = mem_adr\<29\>.
1838                                                                  0x3: DIMM = mem_adr\<31\>; if [RANK_ENA]=1, rank = mem_adr\<30\>.
1839                                                                  0x4: DIMM = mem_adr\<32\>; if [RANK_ENA]=1, rank = mem_adr\<31\>.
1840                                                                  0x5: DIMM = mem_adr\<33\>; if [RANK_ENA]=1, rank = mem_adr\<32\>.
1841                                                                  0x6: DIMM = mem_adr\<34\>; if [RANK_ENA]=1, rank = mem_adr\<33\>.
1842                                                                  0x7: DIMM = mem_adr\<35\>; if [RANK_ENA]=1, rank = mem_adr\<34\>.
1843                                                                  0x8: DIMM = mem_adr\<36\>; if [RANK_ENA]=1, rank = mem_adr\<35\>.
1844                                                                  0x9: DIMM = mem_adr\<37\>; if [RANK_ENA]=1, rank = mem_adr\<36\>.
1845                                                                  0xA: DIMM = 0;           if [RANK_ENA]=1, rank = mem_adr\<37\>.
1846                                                                  0xB-0xF: Reserved.
1847 
1848                                                                  For example, for a DIMM made of Samsung's K4B1G0846C-F7 1Gb (16M * 8 bit * 8 bank)
1849                                                                  parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
1850                                                                  16. So, row = mem_adr\<29:16\>.
1851 
1852                                                                  With [RANK_ENA] = 0, [PBANK_LSB] = 2.
1853                                                                  With [RANK_ENA] = 1, [PBANK_LSB] = 3.
1854 
1855                                                                  Internal:
1856                                                                  When interfacing with 8H 3DS, set this 0xA regardless of [RANK_ENA] value. */
1857         uint64_t idlepower             : 3;  /**< [ 11:  9](R/W) Enter precharge power-down mode after the memory controller has been idle for
1858                                                                  2^(2+[IDLEPOWER]) CK cycles. 0 = disabled.
1859 
1860                                                                  This field should only be programmed after initialization.
1861                                                                  LMC()_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL is disabled during the
1862                                                                  precharge power-down. */
1863         uint64_t forcewrite            : 4;  /**< [ 15: 12](R/W) Force the oldest outstanding write to complete after having waited for 2^[FORCEWRITE] CK
1864                                                                  cycles. 0 = disabled. */
1865         uint64_t ecc_adr               : 1;  /**< [ 16: 16](R/W) Include memory reference address in the ECC calculation.
1866                                                                  0 = disabled, 1 = enabled. */
1867         uint64_t reset                 : 1;  /**< [ 17: 17](R/W) Reset one-shot pulse for LMC()_OPS_CNT, LMC()_IFB_CNT, and LMC()_DCLK_CNT.
1868                                                                  To cause the reset, software writes this to a one, then rewrites it to a zero. */
1869         uint64_t ref_zqcs_int          : 22; /**< [ 39: 18](R/W) Refresh interval is represented in number of 512 CK cycle increments. To get more precise
1870                                                                  control of the refresh interval, LMC()_EXT_CONFIG[REF_INT_LSBS] can be set to a
1871                                                                  nonzero value.
1872                                                                  ZQCS interval is represented in a number of refresh intervals. A refresh sequence is
1873                                                                  triggered when bits \<24:18\> are equal to 0x0, and a ZQCS sequence is triggered when
1874                                                                  \<39:18\>
1875                                                                  are equal to 0x0.
1876 
1877                                                                  The ZQCS timer only decrements when the refresh timer is zero.
1878 
1879                                                                  Program \<24:18\> to RND-DN(TREFI/clkPeriod/512).
1880 
1881                                                                  A value of zero in bits \<24:18\> will effectively turn off refresh.
1882 
1883                                                                  Program \<36:25\> to (RND-DN(ZQCS_Period / Refresh_Period) - 1), where Refresh_Period is the
1884                                                                  effective period programmed in bits \<24:18\>. Note that this value should always be greater
1885                                                                  than 32, to account for resistor calibration delays.
1886 
1887                                                                  000_00000000_0000000: Reserved
1888 
1889                                                                  Max refresh interval = 127 * 512= 65024 CK cycles.
1890 
1891                                                                  Max ZQCS interval = 32768 * 127 * 512 = 2130706432 CK cycles.
1892 
1893                                                                  If refresh interval is programmed to ~8 us, max ZQCS interval is ~262 ms, or ~4 ZQCS
1894                                                                  operations per second.
1895                                                                  LMC()_CONFIG[INIT_STATUS] determines which ranks receive the REF / ZQCS. LMC does not
1896                                                                  send any refreshes / ZQCS's when LMC()_CONFIG[INIT_STATUS]=0. */
1897         uint64_t early_dqx             : 1;  /**< [ 40: 40](R/W) Set this bit to send DQx signals one CK cycle earlier for the case when the shortest DQx
1898                                                                  lines have a larger delay than the CK line. */
1899         uint64_t sref_with_dll         : 1;  /**< [ 41: 41](R/W) Self-refresh entry/exit write mode registers. When set, self-refresh entry sequence writes
1900                                                                  MR2 and MR1 (in this order, in all ranks), and self-refresh exit sequence writes MR1, MR0,
1901                                                                  MR2, and MR3 (in this order, for all ranks). The write operations occur before self-
1902                                                                  refresh entry, and after self-refresh exit. When clear, self-refresh entry and exit
1903                                                                  instruction sequences do not write any mode registers in the DDR3/4 parts. */
1904         uint64_t rank_ena              : 1;  /**< [ 42: 42](R/W) "RANK enable (for use with dual-rank DIMMs).
1905                                                                  * For dual-rank DIMMs, the [RANK_ENA] bit will enable the drive of the DDR#_DIMM*_CS*_L
1906                                                                  and
1907                                                                  ODT_\<1:0\> pins differently based on the ([PBANK_LSB] - 1) address bit.
1908                                                                  * Write zero for SINGLE ranked DIMMs." */
1909         uint64_t rankmask              : 4;  /**< [ 46: 43](R/W) Mask to select rank to be leveled/initialized. To write level/read level/initialize rank
1910                                                                  i, set [RANKMASK]\<i\>:
1911 
1912                                                                  \<pre\>
1913                                                                                [RANK_ENA]=1   [RANK_ENA]=0
1914                                                                  RANKMASK\<0\> = DIMM0_CS0      DIMM0_CS0
1915                                                                  RANKMASK\<1\> = DIMM0_CS1      MBZ
1916                                                                  RANKMASK\<2\> = Reserved       Reserved
1917                                                                  RANKMASK\<3\> = Reserved       Reserved
1918                                                                  \</pre\>
1919 
1920                                                                  For read/write leveling, each rank has to be leveled separately, so [RANKMASK] should only
1921                                                                  have one bit set. [RANKMASK] is not used during self-refresh entry/exit and precharge
1922                                                                  power down entry/exit instruction sequences. When [RANK_ENA] = 0, [RANKMASK]\<1\> and
1923                                                                  [RANKMASK]\<3\> MBZ. */
1924         uint64_t mirrmask              : 4;  /**< [ 50: 47](R/W) "Mask determining which ranks are address-mirrored.
1925                                                                  [MIRRMASK]\<n\> = 1 means rank n addresses are mirrored for
1926                                                                  0 \<= n \<= 3.
1927                                                                  In DDR3, a mirrored read/write operation has the following differences:
1928                                                                  * DDR#_BA\<1\> is swapped with DDR#_BA\<0\>.
1929                                                                  * DDR#_A\<8\> is swapped with DDR#_A\<7\>.
1930                                                                  * DDR#_A\<6\> is swapped with DDR#_A\<5\>.
1931                                                                  * DDR#_A\<4\> is swapped with DDR#_A\<3\>.
1932 
1933                                                                  When RANK_ENA = 0, MIRRMASK\<1\> MBZ.
1934 
1935                                                                  In DDR4, a mirrored read/write operation has the following differences:
1936                                                                  * DDR#_BG\<1\> is swapped with DDR#_BG\<0\>.
1937                                                                  * DDR#_BA\<1\> is swapped with DDR#_BA\<0\>.
1938                                                                  * DDR#_A\<13\> is swapped with DDR#_A\<11\>.
1939                                                                  * DDR#_A\<8\> is swapped with DDR#_A\<7\>.
1940                                                                  * DDR#_A\<6\> is swapped with DDR#_A\<5\>.
1941                                                                  * DDR#_A\<4\> is swapped with DDR#_A\<3\>.
1942 
1943                                                                  For CN70XX, MIRRMASK\<3:2\> MBZ.
1944                                                                  * When RANK_ENA = 0, MIRRMASK\<1\> MBZ." */
1945         uint64_t init_status           : 4;  /**< [ 54: 51](RO/H) Indicates status of initialization. [INIT_STATUS][n] = 1 implies rank n has been
1946                                                                  initialized.
1947                                                                  Software must set necessary [RANKMASK] bits before executing the initialization sequence
1948                                                                  using LMC()_SEQ_CTL. If the rank has been selected for init with the [RANKMASK] bits,
1949                                                                  the [INIT_STATUS] bits will be set after successful initialization and after self-refresh
1950                                                                  exit. [INIT_STATUS] determines the chip-selects that assert during refresh, ZQCS,
1951                                                                  precharge
1952                                                                  power-down entry/exit, and self-refresh entry SEQ_SELs. */
1953         uint64_t early_unload_d0_r0    : 1;  /**< [ 55: 55](R/W) Reserved, MBZ.
1954                                                                  Internal:
1955                                                                  When set, unload the PHY silo one cycle early for Rank 0 reads.
1956                                                                  The recommended EARLY_UNLOAD_D0_R0 value can be calculated after the final
1957                                                                  LMC()_RLEVEL_RANK0[BYTE*] values are selected (as part of read leveling initialization).
1958                                                                  Then, determine the largest read leveling setting for rank 0 (i.e. calculate
1959                                                                  maxset=MAX(LMC()_RLEVEL_RANK0[BYTEi]) across all i), then set EARLY_UNLOAD_D0_R0 when the
1960                                                                  low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D0_R0 = (maxset\<1:0\>
1961                                                                  !=3)). */
1962         uint64_t early_unload_d0_r1    : 1;  /**< [ 56: 56](R/W) Reserved, MBZ.
1963                                                                  Internal:
1964                                                                  When set, unload the PHY silo one cycle early for Rank 1 reads.
1965                                                                  The recommended EARLY_UNLOAD_D0_R1 value can be calculated after the final
1966                                                                  LMC()_RLEVEL_RANK1[BYTE*] values are selected (as part of read leveling initialization).
1967                                                                  Then, determine the largest read leveling setting for rank one (i.e. calculate
1968                                                                  maxset=MAX(LMC()_RLEVEL_RANK1[BYTEi]) across all i), then set EARLY_UNLOAD_D0_R1 when the
1969                                                                  low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D0_R1 = (maxset\<1:0\>
1970                                                                  !=3)). */
1971         uint64_t early_unload_d1_r0    : 1;  /**< [ 57: 57](R/W) Reserved, MBZ.
1972                                                                  Internal:
1973                                                                  When set, unload the PHY silo one cycle early for Rank 2 reads.
1974                                                                  The recommended EARLY_UNLOAD_D1_RO value can be calculated after the final
1975                                                                  LMC()_RLEVEL_RANK2[BYTE*] values are selected (as part of read leveling initialization).
1976                                                                  Then, determine the largest read leveling setting for rank 2 (i.e. calculate
1977                                                                  maxset=MAX(LMC()_RLEVEL_RANK2[BYTEi]) across all i), then set EARLY_UNLOAD_D1_RO when the
1978                                                                  low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D1_RO = (maxset\<1:0\>
1979                                                                  !=3)). */
1980         uint64_t early_unload_d1_r1    : 1;  /**< [ 58: 58](R/W) Reserved, MBZ.
1981                                                                  Internal:
1982                                                                  When set, unload the PHY silo one cycle early for Rank 3 reads.
1983                                                                  The recommended EARLY_UNLOAD_D1_R1 value can be calculated after the final
1984                                                                  LMC()_RLEVEL_RANK3[BYTE*] values are selected (as part of read leveling initialization).
1985                                                                  Then, determine the largest read leveling setting for rank 3 (i.e. calculate
1986                                                                  maxset=MAX(LMC()_RLEVEL_RANK3[BYTEi]) across all i), then set EARLY_UNLOAD_D1_R1 when the
1987                                                                  low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D1_R1 = (maxset\<1:0\>
1988                                                                  !=3)). */
1989         uint64_t scrz                  : 1;  /**< [ 59: 59](R/W1S/H) Hide LMC()_SCRAMBLE_CFG0 and LMC()_SCRAMBLE_CFG1 when set. */
1990         uint64_t mode32b               : 1;  /**< [ 60: 60](R/W) 32-bit datapath mode. When set, only 32 DQ pins are used. */
1991         uint64_t mode_x4dev            : 1;  /**< [ 61: 61](R/W) DDR x4 device mode. */
1992         uint64_t bg2_enable            : 1;  /**< [ 62: 62](R/W) BG1 enable bit. Only has an effect when LMC()_CONFIG[MODEDDR4] = 1.
1993                                                                  Set to one when using DDR4 x4 or x8 parts.
1994                                                                  Clear to zero when using DDR4 x16 parts. */
1995         uint64_t lrdimm_ena            : 1;  /**< [ 63: 63](R/W) Reserved.
1996                                                                  Internal:
1997                                                                  Load reduced DIMM enable. When set allows the use of JEDEC DDR4 LRDIMMs. */
1998 #endif /* Word 0 - End */
1999     } s;
2000     struct bdk_lmcx_config_cn9
2001     {
2002 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
2003         uint64_t lrdimm_ena            : 1;  /**< [ 63: 63](R/W) Reserved.
2004                                                                  Internal:
2005                                                                  Load reduced DIMM enable. When set allows the use of JEDEC DDR4 LRDIMMs. */
2006         uint64_t bg2_enable            : 1;  /**< [ 62: 62](R/W) BG1 enable bit.
2007                                                                  Set to one when using DDR4 x4 or x8 parts.
2008                                                                  Clear to zero when using DDR4 x16 parts. */
2009         uint64_t mode_x4dev            : 1;  /**< [ 61: 61](R/W) DDR x4 device mode. */
2010         uint64_t mode32b               : 1;  /**< [ 60: 60](R/W) 32-bit datapath mode. When set, only 32 DQ pins are used. */
2011         uint64_t reserved_59           : 1;
2012         uint64_t early_unload_d1_r1    : 1;  /**< [ 58: 58](R/W) Reserved, MBZ.
2013                                                                  Internal:
2014                                                                  When set, unload the PHY silo one cycle early for Rank 3 reads.
2015                                                                  The recommended [EARLY_UNLOAD_D1_R1] value can be calculated after the final
2016                                                                  LMC()_RLEVEL_RANK(3)[BYTE*] values are selected (as part of read leveling initialization).
2017                                                                  Then, determine the largest read leveling setting for rank 3 (i.e. calculate
2018                                                                  maxset=MAX(LMC()_RLEVEL_RANK(3)[BYTEi]) across all i), then set [EARLY_UNLOAD_D1_R1] when the
2019                                                                  low three bits of this largest setting is smaller than 4 (i.e.
2020                                                                  [EARLY_UNLOAD_D1_R1] = (maxset\<2:0\> \< 4)). */
2021         uint64_t early_unload_d1_r0    : 1;  /**< [ 57: 57](R/W) Reserved, MBZ.
2022                                                                  Internal:
2023                                                                  When set, unload the PHY silo one cycle early for Rank 2 reads.
2024                                                                  The recommended [EARLY_UNLOAD_D1_R0] value can be calculated after the final
2025                                                                  LMC()_RLEVEL_RANK(2)[BYTE*] values are selected (as part of read leveling initialization).
2026                                                                  Then, determine the largest read leveling setting for rank 2 (i.e. calculate
2027                                                                  maxset=MAX(LMC()_RLEVEL_RANK(2)[BYTEi]) across all i), then set [EARLY_UNLOAD_D1_R0] when the
2028                                                                  low three bits of this largest setting is smaller than 4 (i.e.
2029                                                                  [EARLY_UNLOAD_D1_R0] = (maxset\<2:0\> \< 4)). */
2030         uint64_t early_unload_d0_r1    : 1;  /**< [ 56: 56](R/W) Reserved, MBZ.
2031                                                                  Internal:
2032                                                                  When set, unload the PHY silo one cycle early for Rank 1 reads.
2033                                                                  The recommended [EARLY_UNLOAD_D0_R1] value can be calculated after the final
2034                                                                  LMC()_RLEVEL_RANK(1)[BYTE*] values are selected (as part of read leveling initialization).
2035                                                                  Then, determine the largest read leveling setting for rank one (i.e. calculate
2036                                                                  maxset=MAX(LMC()_RLEVEL_RANK(1)[BYTEi]) across all i), then set [EARLY_UNLOAD_D0_R1] when the
2037                                                                  low three bits of this largest setting is smaller than 4 (i.e.
2038                                                                  [EARLY_UNLOAD_D0_R1] = (maxset\<2:0\> \< 4)). */
2039         uint64_t early_unload_d0_r0    : 1;  /**< [ 55: 55](R/W) Reserved, MBZ.
2040                                                                  Internal:
2041                                                                  When set, unload the PHY silo one cycle early for Rank 0 reads.
2042                                                                  The recommended [EARLY_UNLOAD_D0_R0] value can be calculated after the final
2043                                                                  LMC()_RLEVEL_RANK(0)[BYTE*] values are selected (as part of read leveling initialization).
2044                                                                  Then, determine the largest read leveling setting for rank 0 (i.e. calculate
2045                                                                  maxset=MAX(LMC()_RLEVEL_RANK(0)[BYTEi]) across all i), then set [EARLY_UNLOAD_D0_R0] when the
2046                                                                  low three bits of this largest setting is smaller than 4 (i.e.
2047                                                                  [EARLY_UNLOAD_D0_R0] = (maxset\<2:0\> \< 4)). */
2048         uint64_t init_status           : 4;  /**< [ 54: 51](RO/H) Indicates status of initialization. [INIT_STATUS][n] = 1 implies rank n has been
2049                                                                  initialized.
2050                                                                  Software must set necessary [RANKMASK] bits before executing the initialization sequence
2051                                                                  using LMC()_SEQ_CTL. If the rank has been selected for init with the [RANKMASK] bits,
2052                                                                  the [INIT_STATUS] bits will be set after successful initialization and after self-refresh
2053                                                                  exit. [INIT_STATUS] determines the chip-selects that assert during refresh, ZQCS,
2054                                                                  precharge
2055                                                                  power-down entry/exit, and self-refresh entry SEQ_SELs. */
2056         uint64_t mirrmask              : 4;  /**< [ 50: 47](R/W) "Mask determining which ranks are address-mirrored.
2057                                                                  [MIRRMASK]\<n\> = 1 means rank n addresses are mirrored for
2058                                                                  0 \<= n \<= 3.
2059                                                                  A mirrored read/write operation has the following differences:
2060                                                                  * DDR#_BG\<1\> is swapped with DDR#_BG\<0\>.
2061                                                                  * DDR#_BA\<1\> is swapped with DDR#_BA\<0\>.
2062                                                                  * DDR#_A\<13\> is swapped with DDR#_A\<11\>.
2063                                                                  * DDR#_A\<8\> is swapped with DDR#_A\<7\>.
2064                                                                  * DDR#_A\<6\> is swapped with DDR#_A\<5\>.
2065                                                                  * DDR#_A\<4\> is swapped with DDR#_A\<3\>." */
2066         uint64_t rankmask              : 4;  /**< [ 46: 43](R/W) Mask to select rank to be leveled/initialized. To write level/read level/initialize rank
2067                                                                  i, set [RANKMASK]\<i\>:
2068 
2069                                                                  \<pre\>
2070                                                                                [RANK_ENA]=1   [RANK_ENA]=0
2071                                                                  RANKMASK\<0\> = DIMM0_CS0      DIMM0_CS0
2072                                                                  RANKMASK\<1\> = DIMM0_CS1      MBZ
2073                                                                  RANKMASK\<2\> = DIMM1_CS0      DIMM1_CS0
2074                                                                  RANKMASK\<3\> = DIMM1_CS1      MBZ
2075                                                                  \</pre\>
2076 
2077                                                                  For read/write leveling, each rank has to be leveled separately, so [RANKMASK] should only
2078                                                                  have one bit set. [RANKMASK] is not used during self-refresh entry/exit and precharge
2079                                                                  power down entry/exit instruction sequences. When [RANK_ENA] = 0, [RANKMASK]\<1\> and
2080                                                                  [RANKMASK]\<3\> MBZ. */
2081         uint64_t rank_ena              : 1;  /**< [ 42: 42](R/W) "RANK enable (for use with dual-rank DIMMs).
2082                                                                  * For dual-rank DIMMs, the [RANK_ENA] bit will enable the drive of the DDR#_DIMM*_CS*_L
2083                                                                  and
2084                                                                  ODT_\<1:0\> pins differently based on the ([PBANK_LSB] - 1) address bit.
2085                                                                  * Write zero for SINGLE ranked DIMMs." */
2086         uint64_t sref_with_dll         : 1;  /**< [ 41: 41](R/W) Self-refresh entry/exit write mode registers. When set, self-refresh entry sequence writes
2087                                                                  MR2 and MR1 (in this order, in all ranks), and self-refresh exit sequence writes MR1, MR0,
2088                                                                  MR2, and MR3 (in this order, for all ranks). The write operations occur before self-
2089                                                                  refresh entry, and after self-refresh exit. When clear, self-refresh entry and exit
2090                                                                  instruction sequences do not write any mode registers in the DDR4 parts. */
2091         uint64_t early_dqx             : 1;  /**< [ 40: 40](R/W) Set this bit to send DQx signals one CK cycle earlier for the case when the shortest DQx
2092                                                                  lines have a larger delay than the CK line. */
2093         uint64_t ref_zqcs_int          : 22; /**< [ 39: 18](R/W) Refresh interval is represented in number of 512 CK cycle increments. To get more precise
2094                                                                  control of the refresh interval, LMC()_EXT_CONFIG[REF_INT_LSBS] can be set to a
2095                                                                  nonzero value.
2096                                                                  ZQCS interval is represented in a number of refresh intervals. A refresh sequence is
2097                                                                  triggered when bits \<24:18\> are equal to 0x0, and a ZQCS sequence is triggered when
2098                                                                  \<39:18\>
2099                                                                  are equal to 0x0.
2100 
2101                                                                  The ZQCS timer only decrements when the refresh timer is zero.
2102 
2103                                                                  Program \<24:18\> to RND-DN(TREFI/clkPeriod/512).
2104 
2105                                                                  A value of zero in bits \<24:18\> will effectively turn off refresh.
2106 
2107                                                                  Program \<36:25\> to (RND-DN(ZQCS_Period / Refresh_Period) - 1), where Refresh_Period is the
2108                                                                  effective period programmed in bits \<24:18\>. Note that this value should always be greater
2109                                                                  than 32, to account for resistor calibration delays.
2110 
2111                                                                  000_00000000_0000000: Reserved
2112 
2113                                                                  Max refresh interval = 127 * 512= 65024 CK cycles.
2114 
2115                                                                  Max ZQCS interval = 32768 * 127 * 512 = 2130706432 CK cycles.
2116 
2117                                                                  If refresh interval is programmed to ~8 us, max ZQCS interval is ~262 ms, or ~4 ZQCS
2118                                                                  operations per second.
2119                                                                  LMC()_CONFIG[INIT_STATUS] determines which ranks receive the REF / ZQCS. LMC does not
2120                                                                  send any refreshes / ZQCS's when LMC()_CONFIG[INIT_STATUS]=0. */
2121         uint64_t reset                 : 1;  /**< [ 17: 17](R/W) Reset one-shot pulse for LMC()_OPS_CNT, LMC()_IFB_CNT, and LMC()_DCLK_CNT.
2122                                                                  To cause the reset, software writes this to a one, then rewrites it to a zero. */
2123         uint64_t reserved_16           : 1;
2124         uint64_t forcewrite            : 4;  /**< [ 15: 12](R/W) Force the oldest outstanding write to complete after having waited for 2^[FORCEWRITE] CK
2125                                                                  cycles. 0 = disabled. */
2126         uint64_t idlepower             : 3;  /**< [ 11:  9](R/W) Enter precharge power-down mode after the memory controller has been idle for
2127                                                                  2^(2+[IDLEPOWER]) CK cycles. 0 = disabled.
2128 
2129                                                                  This field should only be programmed after initialization.
2130                                                                  LMC()_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL is disabled during the
2131                                                                  precharge power-down. */
2132         uint64_t pbank_lsb             : 4;  /**< [  8:  5](R/W) DIMM address bit select. Reverting to the explanation for [ROW_LSB], [PBANK_LSB] would be:
2133                                                                  [ROW_LSB] bit + num_rowbits + num_rankbits
2134 
2135                                                                  Values for [PBANK_LSB] are as follows:
2136                                                                  0x0: DIMM = mem_adr\<28\>; if [RANK_ENA]=1, rank = mem_adr\<27\>.
2137                                                                  0x1: DIMM = mem_adr\<29\>; if [RANK_ENA]=1, rank = mem_adr\<28\>.
2138                                                                  0x2: DIMM = mem_adr\<30\>; if [RANK_ENA]=1, rank = mem_adr\<29\>.
2139                                                                  0x3: DIMM = mem_adr\<31\>; if [RANK_ENA]=1, rank = mem_adr\<30\>.
2140                                                                  0x4: DIMM = mem_adr\<32\>; if [RANK_ENA]=1, rank = mem_adr\<31\>.
2141                                                                  0x5: DIMM = mem_adr\<33\>; if [RANK_ENA]=1, rank = mem_adr\<32\>.
2142                                                                  0x6: DIMM = mem_adr\<34\>; if [RANK_ENA]=1, rank = mem_adr\<33\>.
2143                                                                  0x7: DIMM = mem_adr\<35\>; if [RANK_ENA]=1, rank = mem_adr\<34\>.
2144                                                                  0x8: DIMM = mem_adr\<36\>; if [RANK_ENA]=1, rank = mem_adr\<35\>.
2145                                                                  0x9: DIMM = mem_adr\<37\>; if [RANK_ENA]=1, rank = mem_adr\<36\>.
2146                                                                  0xA: DIMM = mem_adr\<38\>; if [RANK_ENA]=1, rank = mem_adr\<37\>.
2147                                                                  0xB: DIMM = mem_adr\<39\>; if [RANK_ENA]=1, rank = mem_adr\<38\>.
2148                                                                  0xC: DIMM = mem_adr\<40\>; if [RANK_ENA]=1, rank = mem_adr\<39\>.
2149                                                                  0xD: DIMM = mem_adr\<41\>; if [RANK_ENA]=1, rank = mem_adr\<40\>.
2150                                                                  0xE: DIMM = 0;           if [RANK_ENA]=1, rank = mem_adr\<41\>.
2151                                                                  0xF: Reserved.
2152 
2153                                                                  For example, for a DIMM made of Samsung's K4B1G0846C-F7 1Gb (16M * 8 bit * 8 bank)
2154                                                                  parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
2155                                                                  16. So, row = mem_adr\<29:16\>.
2156 
2157                                                                  With [RANK_ENA] = 0, [PBANK_LSB] = 2.
2158                                                                  With [RANK_ENA] = 1, [PBANK_LSB] = 3.
2159 
2160                                                                  Internal:
2161                                                                  When interfacing with 8H 3DS, set this 0xA regardless of [RANK_ENA] value. */
2162         uint64_t row_lsb               : 3;  /**< [  4:  2](R/W) Row address bit select.
2163                                                                  0x0 = Address bit 14 is LSB.
2164                                                                  0x1 = Address bit 15 is LSB.
2165                                                                  0x2 = Address bit 16 is LSB.
2166                                                                  0x3 = Address bit 17 is LSB.
2167                                                                  0x4 = Address bit 18 is LSB.
2168                                                                  0x5 = Address bit 19 is LSB.
2169                                                                  0x6 = Address bit 20 is LSB.
2170                                                                  0x6 = Reserved.
2171 
2172                                                                  Encoding used to determine which memory address bit position represents the low order DDR
2173                                                                  ROW address. The processor's memory address\<34:7\> needs to be translated to DRAM addresses
2174                                                                  (bnk,row,col,rank and DIMM) and that is a function of the following:
2175                                                                  * Datapath width (64).
2176                                                                  * Number of banks (8).
2177                                                                  * Number of column bits of the memory part--specified indirectly by this register.
2178                                                                  * Number of row bits of the memory part--specified indirectly by [PBANK_LSB].
2179                                                                  * Number of ranks in a DIMM--specified by LMC()_CONFIG[RANK_ENA].
2180                                                                  * Number of DIMMs in the system by the register below ([PBANK_LSB]).
2181 
2182                                                                  Column address starts from mem_addr[3] for 64b (8 bytes) DQ width. [ROW_LSB] is
2183                                                                  mem_adr[15] for 64b mode. Therefore, the [ROW_LSB] parameter should be set to
2184                                                                  0x1 (64b).
2185 
2186                                                                  For example, for a DIMM made of Samsung's K4B1G0846C-F7 1GB (16M * 8 bit * 8 bank)
2187                                                                  parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
2188                                                                  16. So, row = mem_adr\<29:16\>.
2189 
2190                                                                  Refer to cache-block read transaction example, Cache-block read transaction example. */
2191         uint64_t ecc_ena               : 1;  /**< [  1:  1](R/W) ECC enable. When set, enables the 8b ECC check/correct logic. Should be one when used with
2192                                                                  DIMMs with ECC; zero, otherwise.
2193 
2194                                                                  * When this mode is turned on, DQ\<71:64\> on write operations contains the ECC code
2195                                                                  generated for the 64 bits of data which will be written in the memory. Later on read
2196                                                                  operations, will be used to check for single-bit error (which will be auto-corrected) and
2197                                                                  double-bit error (which will be reported).
2198 
2199                                                                  * When not turned on, DQ\<71:64\> are driven to zero. Please refer to SEC_ERR, DED_ERR,
2200                                                                  LMC()_NXM_FADR, and LMC()_ECC_SYND registers for diagnostics information when there is
2201                                                                  an error. */
2202         uint64_t reserved_0            : 1;
2203 #else /* Word 0 - Little Endian */
2204         uint64_t reserved_0            : 1;
2205         uint64_t ecc_ena               : 1;  /**< [  1:  1](R/W) ECC enable. When set, enables the 8b ECC check/correct logic. Should be one when used with
2206                                                                  DIMMs with ECC; zero, otherwise.
2207 
2208                                                                  * When this mode is turned on, DQ\<71:64\> on write operations contains the ECC code
2209                                                                  generated for the 64 bits of data which will be written in the memory. Later on read
2210                                                                  operations, will be used to check for single-bit error (which will be auto-corrected) and
2211                                                                  double-bit error (which will be reported).
2212 
2213                                                                  * When not turned on, DQ\<71:64\> are driven to zero. Please refer to SEC_ERR, DED_ERR,
2214                                                                  LMC()_NXM_FADR, and LMC()_ECC_SYND registers for diagnostics information when there is
2215                                                                  an error. */
2216         uint64_t row_lsb               : 3;  /**< [  4:  2](R/W) Row address bit select.
2217                                                                  0x0 = Address bit 14 is LSB.
2218                                                                  0x1 = Address bit 15 is LSB.
2219                                                                  0x2 = Address bit 16 is LSB.
2220                                                                  0x3 = Address bit 17 is LSB.
2221                                                                  0x4 = Address bit 18 is LSB.
2222                                                                  0x5 = Address bit 19 is LSB.
2223                                                                  0x6 = Address bit 20 is LSB.
2224                                                                  0x6 = Reserved.
2225 
2226                                                                  Encoding used to determine which memory address bit position represents the low order DDR
2227                                                                  ROW address. The processor's memory address\<34:7\> needs to be translated to DRAM addresses
2228                                                                  (bnk,row,col,rank and DIMM) and that is a function of the following:
2229                                                                  * Datapath width (64).
2230                                                                  * Number of banks (8).
2231                                                                  * Number of column bits of the memory part--specified indirectly by this register.
2232                                                                  * Number of row bits of the memory part--specified indirectly by [PBANK_LSB].
2233                                                                  * Number of ranks in a DIMM--specified by LMC()_CONFIG[RANK_ENA].
2234                                                                  * Number of DIMMs in the system by the register below ([PBANK_LSB]).
2235 
2236                                                                  Column address starts from mem_addr[3] for 64b (8 bytes) DQ width. [ROW_LSB] is
2237                                                                  mem_adr[15] for 64b mode. Therefore, the [ROW_LSB] parameter should be set to
2238                                                                  0x1 (64b).
2239 
2240                                                                  For example, for a DIMM made of Samsung's K4B1G0846C-F7 1GB (16M * 8 bit * 8 bank)
2241                                                                  parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
2242                                                                  16. So, row = mem_adr\<29:16\>.
2243 
2244                                                                  Refer to cache-block read transaction example, Cache-block read transaction example. */
2245         uint64_t pbank_lsb             : 4;  /**< [  8:  5](R/W) DIMM address bit select. Reverting to the explanation for [ROW_LSB], [PBANK_LSB] would be:
2246                                                                  [ROW_LSB] bit + num_rowbits + num_rankbits
2247 
2248                                                                  Values for [PBANK_LSB] are as follows:
2249                                                                  0x0: DIMM = mem_adr\<28\>; if [RANK_ENA]=1, rank = mem_adr\<27\>.
2250                                                                  0x1: DIMM = mem_adr\<29\>; if [RANK_ENA]=1, rank = mem_adr\<28\>.
2251                                                                  0x2: DIMM = mem_adr\<30\>; if [RANK_ENA]=1, rank = mem_adr\<29\>.
2252                                                                  0x3: DIMM = mem_adr\<31\>; if [RANK_ENA]=1, rank = mem_adr\<30\>.
2253                                                                  0x4: DIMM = mem_adr\<32\>; if [RANK_ENA]=1, rank = mem_adr\<31\>.
2254                                                                  0x5: DIMM = mem_adr\<33\>; if [RANK_ENA]=1, rank = mem_adr\<32\>.
2255                                                                  0x6: DIMM = mem_adr\<34\>; if [RANK_ENA]=1, rank = mem_adr\<33\>.
2256                                                                  0x7: DIMM = mem_adr\<35\>; if [RANK_ENA]=1, rank = mem_adr\<34\>.
2257                                                                  0x8: DIMM = mem_adr\<36\>; if [RANK_ENA]=1, rank = mem_adr\<35\>.
2258                                                                  0x9: DIMM = mem_adr\<37\>; if [RANK_ENA]=1, rank = mem_adr\<36\>.
2259                                                                  0xA: DIMM = mem_adr\<38\>; if [RANK_ENA]=1, rank = mem_adr\<37\>.
2260                                                                  0xB: DIMM = mem_adr\<39\>; if [RANK_ENA]=1, rank = mem_adr\<38\>.
2261                                                                  0xC: DIMM = mem_adr\<40\>; if [RANK_ENA]=1, rank = mem_adr\<39\>.
2262                                                                  0xD: DIMM = mem_adr\<41\>; if [RANK_ENA]=1, rank = mem_adr\<40\>.
2263                                                                  0xE: DIMM = 0;           if [RANK_ENA]=1, rank = mem_adr\<41\>.
2264                                                                  0xF: Reserved.
2265 
2266                                                                  For example, for a DIMM made of Samsung's K4B1G0846C-F7 1Gb (16M * 8 bit * 8 bank)
2267                                                                  parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
2268                                                                  16. So, row = mem_adr\<29:16\>.
2269 
2270                                                                  With [RANK_ENA] = 0, [PBANK_LSB] = 2.
2271                                                                  With [RANK_ENA] = 1, [PBANK_LSB] = 3.
2272 
2273                                                                  Internal:
2274                                                                  When interfacing with 8H 3DS, set this 0xA regardless of [RANK_ENA] value. */
2275         uint64_t idlepower             : 3;  /**< [ 11:  9](R/W) Enter precharge power-down mode after the memory controller has been idle for
2276                                                                  2^(2+[IDLEPOWER]) CK cycles. 0 = disabled.
2277 
2278                                                                  This field should only be programmed after initialization.
2279                                                                  LMC()_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL is disabled during the
2280                                                                  precharge power-down. */
2281         uint64_t forcewrite            : 4;  /**< [ 15: 12](R/W) Force the oldest outstanding write to complete after having waited for 2^[FORCEWRITE] CK
2282                                                                  cycles. 0 = disabled. */
2283         uint64_t reserved_16           : 1;
2284         uint64_t reset                 : 1;  /**< [ 17: 17](R/W) Reset one-shot pulse for LMC()_OPS_CNT, LMC()_IFB_CNT, and LMC()_DCLK_CNT.
2285                                                                  To cause the reset, software writes this to a one, then rewrites it to a zero. */
2286         uint64_t ref_zqcs_int          : 22; /**< [ 39: 18](R/W) Refresh interval is represented in number of 512 CK cycle increments. To get more precise
2287                                                                  control of the refresh interval, LMC()_EXT_CONFIG[REF_INT_LSBS] can be set to a
2288                                                                  nonzero value.
2289                                                                  ZQCS interval is represented in a number of refresh intervals. A refresh sequence is
2290                                                                  triggered when bits \<24:18\> are equal to 0x0, and a ZQCS sequence is triggered when
2291                                                                  \<39:18\>
2292                                                                  are equal to 0x0.
2293 
2294                                                                  The ZQCS timer only decrements when the refresh timer is zero.
2295 
2296                                                                  Program \<24:18\> to RND-DN(TREFI/clkPeriod/512).
2297 
2298                                                                  A value of zero in bits \<24:18\> will effectively turn off refresh.
2299 
2300                                                                  Program \<36:25\> to (RND-DN(ZQCS_Period / Refresh_Period) - 1), where Refresh_Period is the
2301                                                                  effective period programmed in bits \<24:18\>. Note that this value should always be greater
2302                                                                  than 32, to account for resistor calibration delays.
2303 
2304                                                                  000_00000000_0000000: Reserved
2305 
2306                                                                  Max refresh interval = 127 * 512= 65024 CK cycles.
2307 
2308                                                                  Max ZQCS interval = 32768 * 127 * 512 = 2130706432 CK cycles.
2309 
2310                                                                  If refresh interval is programmed to ~8 us, max ZQCS interval is ~262 ms, or ~4 ZQCS
2311                                                                  operations per second.
2312                                                                  LMC()_CONFIG[INIT_STATUS] determines which ranks receive the REF / ZQCS. LMC does not
2313                                                                  send any refreshes / ZQCS's when LMC()_CONFIG[INIT_STATUS]=0. */
2314         uint64_t early_dqx             : 1;  /**< [ 40: 40](R/W) Set this bit to send DQx signals one CK cycle earlier for the case when the shortest DQx
2315                                                                  lines have a larger delay than the CK line. */
2316         uint64_t sref_with_dll         : 1;  /**< [ 41: 41](R/W) Self-refresh entry/exit write mode registers. When set, self-refresh entry sequence writes
2317                                                                  MR2 and MR1 (in this order, in all ranks), and self-refresh exit sequence writes MR1, MR0,
2318                                                                  MR2, and MR3 (in this order, for all ranks). The write operations occur before self-
2319                                                                  refresh entry, and after self-refresh exit. When clear, self-refresh entry and exit
2320                                                                  instruction sequences do not write any mode registers in the DDR4 parts. */
2321         uint64_t rank_ena              : 1;  /**< [ 42: 42](R/W) "RANK enable (for use with dual-rank DIMMs).
2322                                                                  * For dual-rank DIMMs, the [RANK_ENA] bit will enable the drive of the DDR#_DIMM*_CS*_L
2323                                                                  and
2324                                                                  ODT_\<1:0\> pins differently based on the ([PBANK_LSB] - 1) address bit.
2325                                                                  * Write zero for SINGLE ranked DIMMs." */
2326         uint64_t rankmask              : 4;  /**< [ 46: 43](R/W) Mask to select rank to be leveled/initialized. To write level/read level/initialize rank
2327                                                                  i, set [RANKMASK]\<i\>:
2328 
2329                                                                  \<pre\>
2330                                                                                [RANK_ENA]=1   [RANK_ENA]=0
2331                                                                  RANKMASK\<0\> = DIMM0_CS0      DIMM0_CS0
2332                                                                  RANKMASK\<1\> = DIMM0_CS1      MBZ
2333                                                                  RANKMASK\<2\> = DIMM1_CS0      DIMM1_CS0
2334                                                                  RANKMASK\<3\> = DIMM1_CS1      MBZ
2335                                                                  \</pre\>
2336 
2337                                                                  For read/write leveling, each rank has to be leveled separately, so [RANKMASK] should only
2338                                                                  have one bit set. [RANKMASK] is not used during self-refresh entry/exit and precharge
2339                                                                  power down entry/exit instruction sequences. When [RANK_ENA] = 0, [RANKMASK]\<1\> and
2340                                                                  [RANKMASK]\<3\> MBZ. */
2341         uint64_t mirrmask              : 4;  /**< [ 50: 47](R/W) "Mask determining which ranks are address-mirrored.
2342                                                                  [MIRRMASK]\<n\> = 1 means rank n addresses are mirrored for
2343                                                                  0 \<= n \<= 3.
2344                                                                  A mirrored read/write operation has the following differences:
2345                                                                  * DDR#_BG\<1\> is swapped with DDR#_BG\<0\>.
2346                                                                  * DDR#_BA\<1\> is swapped with DDR#_BA\<0\>.
2347                                                                  * DDR#_A\<13\> is swapped with DDR#_A\<11\>.
2348                                                                  * DDR#_A\<8\> is swapped with DDR#_A\<7\>.
2349                                                                  * DDR#_A\<6\> is swapped with DDR#_A\<5\>.
2350                                                                  * DDR#_A\<4\> is swapped with DDR#_A\<3\>." */
2351         uint64_t init_status           : 4;  /**< [ 54: 51](RO/H) Indicates status of initialization. [INIT_STATUS][n] = 1 implies rank n has been
2352                                                                  initialized.
2353                                                                  Software must set necessary [RANKMASK] bits before executing the initialization sequence
2354                                                                  using LMC()_SEQ_CTL. If the rank has been selected for init with the [RANKMASK] bits,
2355                                                                  the [INIT_STATUS] bits will be set after successful initialization and after self-refresh
2356                                                                  exit. [INIT_STATUS] determines the chip-selects that assert during refresh, ZQCS,
2357                                                                  precharge
2358                                                                  power-down entry/exit, and self-refresh entry SEQ_SELs. */
2359         uint64_t early_unload_d0_r0    : 1;  /**< [ 55: 55](R/W) Reserved, MBZ.
2360                                                                  Internal:
2361                                                                  When set, unload the PHY silo one cycle early for Rank 0 reads.
2362                                                                  The recommended [EARLY_UNLOAD_D0_R0] value can be calculated after the final
2363                                                                  LMC()_RLEVEL_RANK(0)[BYTE*] values are selected (as part of read leveling initialization).
2364                                                                  Then, determine the largest read leveling setting for rank 0 (i.e. calculate
2365                                                                  maxset=MAX(LMC()_RLEVEL_RANK(0)[BYTEi]) across all i), then set [EARLY_UNLOAD_D0_R0] when the
2366                                                                  low three bits of this largest setting is smaller than 4 (i.e.
2367                                                                  [EARLY_UNLOAD_D0_R0] = (maxset\<2:0\> \< 4)). */
2368         uint64_t early_unload_d0_r1    : 1;  /**< [ 56: 56](R/W) Reserved, MBZ.
2369                                                                  Internal:
2370                                                                  When set, unload the PHY silo one cycle early for Rank 1 reads.
2371                                                                  The recommended [EARLY_UNLOAD_D0_R1] value can be calculated after the final
2372                                                                  LMC()_RLEVEL_RANK(1)[BYTE*] values are selected (as part of read leveling initialization).
2373                                                                  Then, determine the largest read leveling setting for rank one (i.e. calculate
2374                                                                  maxset=MAX(LMC()_RLEVEL_RANK(1)[BYTEi]) across all i), then set [EARLY_UNLOAD_D0_R1] when the
2375                                                                  low three bits of this largest setting is smaller than 4 (i.e.
2376                                                                  [EARLY_UNLOAD_D0_R1] = (maxset\<2:0\> \< 4)). */
2377         uint64_t early_unload_d1_r0    : 1;  /**< [ 57: 57](R/W) Reserved, MBZ.
2378                                                                  Internal:
2379                                                                  When set, unload the PHY silo one cycle early for Rank 2 reads.
2380                                                                  The recommended [EARLY_UNLOAD_D1_R0] value can be calculated after the final
2381                                                                  LMC()_RLEVEL_RANK(2)[BYTE*] values are selected (as part of read leveling initialization).
2382                                                                  Then, determine the largest read leveling setting for rank 2 (i.e. calculate
2383                                                                  maxset=MAX(LMC()_RLEVEL_RANK(2)[BYTEi]) across all i), then set [EARLY_UNLOAD_D1_R0] when the
2384                                                                  low three bits of this largest setting is smaller than 4 (i.e.
2385                                                                  [EARLY_UNLOAD_D1_R0] = (maxset\<2:0\> \< 4)). */
2386         uint64_t early_unload_d1_r1    : 1;  /**< [ 58: 58](R/W) Reserved, MBZ.
2387                                                                  Internal:
2388                                                                  When set, unload the PHY silo one cycle early for Rank 3 reads.
2389                                                                  The recommended [EARLY_UNLOAD_D1_R1] value can be calculated after the final
2390                                                                  LMC()_RLEVEL_RANK(3)[BYTE*] values are selected (as part of read leveling initialization).
2391                                                                  Then, determine the largest read leveling setting for rank 3 (i.e. calculate
2392                                                                  maxset=MAX(LMC()_RLEVEL_RANK(3)[BYTEi]) across all i), then set [EARLY_UNLOAD_D1_R1] when the
2393                                                                  low three bits of this largest setting is smaller than 4 (i.e.
2394                                                                  [EARLY_UNLOAD_D1_R1] = (maxset\<2:0\> \< 4)). */
2395         uint64_t reserved_59           : 1;
2396         uint64_t mode32b               : 1;  /**< [ 60: 60](R/W) 32-bit datapath mode. When set, only 32 DQ pins are used. */
2397         uint64_t mode_x4dev            : 1;  /**< [ 61: 61](R/W) DDR x4 device mode. */
2398         uint64_t bg2_enable            : 1;  /**< [ 62: 62](R/W) BG1 enable bit.
2399                                                                  Set to one when using DDR4 x4 or x8 parts.
2400                                                                  Clear to zero when using DDR4 x16 parts. */
2401         uint64_t lrdimm_ena            : 1;  /**< [ 63: 63](R/W) Reserved.
2402                                                                  Internal:
2403                                                                  Load reduced DIMM enable. When set allows the use of JEDEC DDR4 LRDIMMs. */
2404 #endif /* Word 0 - End */
2405     } cn9;
2406     /* struct bdk_lmcx_config_s cn81xx; */
2407     struct bdk_lmcx_config_cn88xx
2408     {
2409 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
2410         uint64_t lrdimm_ena            : 1;  /**< [ 63: 63](R/W) Reserved.
2411                                                                  Internal:
2412                                                                  Load reduced DIMM enable. When set allows the use of JEDEC DDR4 LRDIMMs. */
2413         uint64_t bg2_enable            : 1;  /**< [ 62: 62](R/W) BG1 enable bit. Only has an effect when LMC()_CONFIG[MODEDDR4] = 1.
2414                                                                  Set to one when using DDR4 x4 or x8 parts.
2415                                                                  Clear to zero when using DDR4 x16 parts. */
2416         uint64_t mode_x4dev            : 1;  /**< [ 61: 61](R/W) DDR x4 device mode. */
2417         uint64_t mode32b               : 1;  /**< [ 60: 60](R/W) 32-bit datapath mode. When set, only 32 DQ pins are used. */
2418         uint64_t scrz                  : 1;  /**< [ 59: 59](R/W1S/H) Hide LMC()_SCRAMBLE_CFG0 and LMC()_SCRAMBLE_CFG1 when set. */
2419         uint64_t early_unload_d1_r1    : 1;  /**< [ 58: 58](R/W) Reserved, MBZ.
2420                                                                  Internal:
2421                                                                  When set, unload the PHY silo one cycle early for Rank 3 reads.
2422                                                                  The recommended EARLY_UNLOAD_D1_R1 value can be calculated after the final
2423                                                                  LMC()_RLEVEL_RANK3[BYTE*] values are selected (as part of read leveling initialization).
2424                                                                  Then, determine the largest read leveling setting for rank 3 (i.e. calculate
2425                                                                  maxset=MAX(LMC()_RLEVEL_RANK3[BYTEi]) across all i), then set EARLY_UNLOAD_D1_R1 when the
2426                                                                  low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D1_R1 = (maxset\<1:0\>
2427                                                                  !=3)). */
2428         uint64_t early_unload_d1_r0    : 1;  /**< [ 57: 57](R/W) Reserved, MBZ.
2429                                                                  Internal:
2430                                                                  When set, unload the PHY silo one cycle early for Rank 2 reads.
2431                                                                  The recommended EARLY_UNLOAD_D1_RO value can be calculated after the final
2432                                                                  LMC()_RLEVEL_RANK2[BYTE*] values are selected (as part of read leveling initialization).
2433                                                                  Then, determine the largest read leveling setting for rank 2 (i.e. calculate
2434                                                                  maxset=MAX(LMC()_RLEVEL_RANK2[BYTEi]) across all i), then set EARLY_UNLOAD_D1_RO when the
2435                                                                  low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D1_RO = (maxset\<1:0\>
2436                                                                  !=3)). */
2437         uint64_t early_unload_d0_r1    : 1;  /**< [ 56: 56](R/W) Reserved, MBZ.
2438                                                                  Internal:
2439                                                                  When set, unload the PHY silo one cycle early for Rank 1 reads.
2440                                                                  The recommended EARLY_UNLOAD_D0_R1 value can be calculated after the final
2441                                                                  LMC()_RLEVEL_RANK1[BYTE*] values are selected (as part of read leveling initialization).
2442                                                                  Then, determine the largest read leveling setting for rank one (i.e. calculate
2443                                                                  maxset=MAX(LMC()_RLEVEL_RANK1[BYTEi]) across all i), then set EARLY_UNLOAD_D0_R1 when the
2444                                                                  low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D0_R1 = (maxset\<1:0\>
2445                                                                  !=3)). */
2446         uint64_t early_unload_d0_r0    : 1;  /**< [ 55: 55](R/W) Reserved, MBZ.
2447                                                                  Internal:
2448                                                                  When set, unload the PHY silo one cycle early for Rank 0 reads.
2449                                                                  The recommended EARLY_UNLOAD_D0_R0 value can be calculated after the final
2450                                                                  LMC()_RLEVEL_RANK0[BYTE*] values are selected (as part of read leveling initialization).
2451                                                                  Then, determine the largest read leveling setting for rank 0 (i.e. calculate
2452                                                                  maxset=MAX(LMC()_RLEVEL_RANK0[BYTEi]) across all i), then set EARLY_UNLOAD_D0_R0 when the
2453                                                                  low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D0_R0 = (maxset\<1:0\>
2454                                                                  !=3)). */
2455         uint64_t init_status           : 4;  /**< [ 54: 51](RO/H) Indicates status of initialization. [INIT_STATUS][n] = 1 implies rank n has been
2456                                                                  initialized.
2457                                                                  Software must set necessary [RANKMASK] bits before executing the initialization sequence
2458                                                                  using LMC()_SEQ_CTL. If the rank has been selected for init with the [RANKMASK] bits,
2459                                                                  the [INIT_STATUS] bits will be set after successful initialization and after self-refresh
2460                                                                  exit. [INIT_STATUS] determines the chip-selects that assert during refresh, ZQCS,
2461                                                                  precharge
2462                                                                  power-down entry/exit, and self-refresh entry SEQ_SELs. */
2463         uint64_t mirrmask              : 4;  /**< [ 50: 47](R/W) "Mask determining which ranks are address-mirrored.
2464                                                                  [MIRRMASK]\<n\> = 1 means rank n addresses are mirrored for
2465                                                                  0 \<= n \<= 3.
2466                                                                  In DDR3, a mirrored read/write operation has the following differences:
2467                                                                  * DDR#_BA\<1\> is swapped with DDR#_BA\<0\>.
2468                                                                  * DDR#_A\<8\> is swapped with DDR#_A\<7\>.
2469                                                                  * DDR#_A\<6\> is swapped with DDR#_A\<5\>.
2470                                                                  * DDR#_A\<4\> is swapped with DDR#_A\<3\>.
2471 
2472                                                                  When RANK_ENA = 0, MIRRMASK\<1\> MBZ.
2473 
2474                                                                  In DDR4, a mirrored read/write operation has the following differences:
2475                                                                  * DDR#_BG\<1\> is swapped with DDR#_BG\<0\>.
2476                                                                  * DDR#_BA\<1\> is swapped with DDR#_BA\<0\>.
2477                                                                  * DDR#_A\<13\> is swapped with DDR#_A\<11\>.
2478                                                                  * DDR#_A\<8\> is swapped with DDR#_A\<7\>.
2479                                                                  * DDR#_A\<6\> is swapped with DDR#_A\<5\>.
2480                                                                  * DDR#_A\<4\> is swapped with DDR#_A\<3\>.
2481 
2482                                                                  For CN70XX, MIRRMASK\<3:2\> MBZ.
2483                                                                  * When RANK_ENA = 0, MIRRMASK\<1\> MBZ." */
2484         uint64_t rankmask              : 4;  /**< [ 46: 43](R/W) Mask to select rank to be leveled/initialized. To write level/read level/initialize rank
2485                                                                  i, set [RANKMASK]\<i\>:
2486 
2487                                                                  \<pre\>
2488                                                                                [RANK_ENA]=1   [RANK_ENA]=0
2489                                                                  RANKMASK\<0\> = DIMM0_CS0      DIMM0_CS0
2490                                                                  RANKMASK\<1\> = DIMM0_CS1      MBZ
2491                                                                  RANKMASK\<2\> = DIMM1_CS0      DIMM1_CS0
2492                                                                  RANKMASK\<3\> = DIMM1_CS1      MBZ
2493                                                                  \</pre\>
2494 
2495                                                                  For read/write leveling, each rank has to be leveled separately, so [RANKMASK] should only
2496                                                                  have one bit set. [RANKMASK] is not used during self-refresh entry/exit and precharge
2497                                                                  power down entry/exit instruction sequences. When [RANK_ENA] = 0, [RANKMASK]\<1\> and
2498                                                                  [RANKMASK]\<3\> MBZ. */
2499         uint64_t rank_ena              : 1;  /**< [ 42: 42](R/W) "RANK enable (for use with dual-rank DIMMs).
2500                                                                  * For dual-rank DIMMs, the [RANK_ENA] bit will enable the drive of the DDR#_DIMM*_CS*_L
2501                                                                  and
2502                                                                  ODT_\<1:0\> pins differently based on the ([PBANK_LSB] - 1) address bit.
2503                                                                  * Write zero for SINGLE ranked DIMMs." */
2504         uint64_t sref_with_dll         : 1;  /**< [ 41: 41](R/W) Self-refresh entry/exit write mode registers. When set, self-refresh entry sequence writes
2505                                                                  MR2 and MR1 (in this order, in all ranks), and self-refresh exit sequence writes MR1, MR0,
2506                                                                  MR2, and MR3 (in this order, for all ranks). The write operations occur before self-
2507                                                                  refresh entry, and after self-refresh exit. When clear, self-refresh entry and exit
2508                                                                  instruction sequences do not write any mode registers in the DDR3/4 parts. */
2509         uint64_t early_dqx             : 1;  /**< [ 40: 40](R/W) Set this bit to send DQx signals one CK cycle earlier for the case when the shortest DQx
2510                                                                  lines have a larger delay than the CK line. */
2511         uint64_t ref_zqcs_int          : 22; /**< [ 39: 18](R/W) Refresh interval is represented in number of 512 CK cycle increments. To get more precise
2512                                                                  control of the refresh interval, LMC()_EXT_CONFIG[REF_INT_LSBS] can be set to a
2513                                                                  nonzero value.
2514                                                                  ZQCS interval is represented in a number of refresh intervals. A refresh sequence is
2515                                                                  triggered when bits \<24:18\> are equal to 0x0, and a ZQCS sequence is triggered when
2516                                                                  \<39:18\>
2517                                                                  are equal to 0x0.
2518 
2519                                                                  The ZQCS timer only decrements when the refresh timer is zero.
2520 
2521                                                                  Program \<24:18\> to RND-DN(TREFI/clkPeriod/512).
2522 
2523                                                                  A value of zero in bits \<24:18\> will effectively turn off refresh.
2524 
2525                                                                  Program \<36:25\> to (RND-DN(ZQCS_Period / Refresh_Period) - 1), where Refresh_Period is the
2526                                                                  effective period programmed in bits \<24:18\>. Note that this value should always be greater
2527                                                                  than 32, to account for resistor calibration delays.
2528 
2529                                                                  000_00000000_0000000: Reserved
2530 
2531                                                                  Max refresh interval = 127 * 512= 65024 CK cycles.
2532 
2533                                                                  Max ZQCS interval = 32768 * 127 * 512 = 2130706432 CK cycles.
2534 
2535                                                                  If refresh interval is programmed to ~8 us, max ZQCS interval is ~262 ms, or ~4 ZQCS
2536                                                                  operations per second.
2537                                                                  LMC()_CONFIG[INIT_STATUS] determines which ranks receive the REF / ZQCS. LMC does not
2538                                                                  send any refreshes / ZQCS's when LMC()_CONFIG[INIT_STATUS]=0. */
2539         uint64_t reset                 : 1;  /**< [ 17: 17](R/W) Reset one-shot pulse for LMC()_OPS_CNT, LMC()_IFB_CNT, and LMC()_DCLK_CNT.
2540                                                                  To cause the reset, software writes this to a one, then rewrites it to a zero. */
2541         uint64_t ecc_adr               : 1;  /**< [ 16: 16](R/W) Include memory reference address in the ECC calculation.
2542                                                                  0 = disabled, 1 = enabled. */
2543         uint64_t forcewrite            : 4;  /**< [ 15: 12](R/W) Force the oldest outstanding write to complete after having waited for 2^[FORCEWRITE] CK
2544                                                                  cycles. 0 = disabled. */
2545         uint64_t idlepower             : 3;  /**< [ 11:  9](R/W) Enter precharge power-down mode after the memory controller has been idle for
2546                                                                  2^(2+[IDLEPOWER]) CK cycles. 0 = disabled.
2547 
2548                                                                  This field should only be programmed after initialization.
2549                                                                  LMC()_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL is disabled during the
2550                                                                  precharge power-down. */
2551         uint64_t pbank_lsb             : 4;  /**< [  8:  5](R/W) DIMM address bit select. Reverting to the explanation for [ROW_LSB], [PBANK_LSB] would be:
2552                                                                  [ROW_LSB] bit + num_rowbits + num_rankbits
2553 
2554                                                                  Values for [PBANK_LSB] are as follows:
2555                                                                  0x0: DIMM = mem_adr\<28\>; if [RANK_ENA]=1, rank = mem_adr\<27\>.
2556                                                                  0x1: DIMM = mem_adr\<29\>; if [RANK_ENA]=1, rank = mem_adr\<28\>.
2557                                                                  0x2: DIMM = mem_adr\<30\>; if [RANK_ENA]=1, rank = mem_adr\<29\>.
2558                                                                  0x3: DIMM = mem_adr\<31\>; if [RANK_ENA]=1, rank = mem_adr\<30\>.
2559                                                                  0x4: DIMM = mem_adr\<32\>; if [RANK_ENA]=1, rank = mem_adr\<31\>.
2560                                                                  0x5: DIMM = mem_adr\<33\>; if [RANK_ENA]=1, rank = mem_adr\<32\>.
2561                                                                  0x6: DIMM = mem_adr\<34\>; if [RANK_ENA]=1, rank = mem_adr\<33\>.
2562                                                                  0x7: DIMM = mem_adr\<35\>; if [RANK_ENA]=1, rank = mem_adr\<34\>.
2563                                                                  0x8: DIMM = mem_adr\<36\>; if [RANK_ENA]=1, rank = mem_adr\<35\>.
2564                                                                  0x9: DIMM = mem_adr\<37\>; if [RANK_ENA]=1, rank = mem_adr\<36\>.
2565                                                                  0xA: DIMM = 0;           if [RANK_ENA]=1, rank = mem_adr\<37\>.
2566                                                                  0xB-0xF: Reserved.
2567 
2568                                                                  For example, for a DIMM made of Samsung's K4B1G0846C-F7 1Gb (16M * 8 bit * 8 bank)
2569                                                                  parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
2570                                                                  16. So, row = mem_adr\<29:16\>.
2571 
2572                                                                  With [RANK_ENA] = 0, [PBANK_LSB] = 2.
2573                                                                  With [RANK_ENA] = 1, [PBANK_LSB] = 3.
2574 
2575                                                                  Internal:
2576                                                                  When interfacing with 8H 3DS, set this 0xA regardless of [RANK_ENA] value. */
2577         uint64_t row_lsb               : 3;  /**< [  4:  2](R/W) Row address bit select.
2578                                                                  0x0 = Address bit 14 is LSB.
2579                                                                  0x1 = Address bit 15 is LSB.
2580                                                                  0x2 = Address bit 16 is LSB.
2581                                                                  0x3 = Address bit 17 is LSB.
2582                                                                  0x4 = Address bit 18 is LSB.
2583                                                                  0x5 = Address bit 19 is LSB.
2584                                                                  0x6 = Address bit 20 is LSB.
2585                                                                  0x6 = Reserved.
2586 
2587                                                                  Encoding used to determine which memory address bit position represents the low order DDR
2588                                                                  ROW address. The processor's memory address\<34:7\> needs to be translated to DRAM addresses
2589                                                                  (bnk,row,col,rank and DIMM) and that is a function of the following:
2590                                                                  * Datapath width (64).
2591                                                                  * Number of banks (8).
2592                                                                  * Number of column bits of the memory part--specified indirectly by this register.
2593                                                                  * Number of row bits of the memory part--specified indirectly by [PBANK_LSB].
2594                                                                  * Number of ranks in a DIMM--specified by LMC()_CONFIG[RANK_ENA].
2595                                                                  * Number of DIMMs in the system by the register below ([PBANK_LSB]).
2596 
2597                                                                  Column address starts from mem_addr[3] for 64b (8 bytes) DQ width. [ROW_LSB] is
2598                                                                  mem_adr[15] for 64b mode. Therefore, the [ROW_LSB] parameter should be set to
2599                                                                  0x1 (64b).
2600 
2601                                                                  For example, for a DIMM made of Samsung's K4B1G0846C-F7 1GB (16M * 8 bit * 8 bank)
2602                                                                  parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
2603                                                                  16. So, row = mem_adr\<29:16\>.
2604 
2605                                                                  Refer to cache-block read transaction example, Cache-block read transaction example. */
2606         uint64_t ecc_ena               : 1;  /**< [  1:  1](R/W) ECC enable. When set, enables the 8b ECC check/correct logic. Should be one when used with
2607                                                                  DIMMs with ECC; zero, otherwise.
2608 
2609                                                                  * When this mode is turned on, DQ\<71:64\> on write operations contains the ECC code
2610                                                                  generated for the 64 bits of data which will be written in the memory. Later on read
2611                                                                  operations, will be used to check for single-bit error (which will be auto-corrected) and
2612                                                                  double-bit error (which will be reported).
2613 
2614                                                                  * When not turned on, DQ\<71:64\> are driven to zero. Please refer to SEC_ERR, DED_ERR,
2615                                                                  LMC()_NXM_FADR, and LMC()_ECC_SYND registers for diagnostics information when there is
2616                                                                  an error. */
2617         uint64_t reserved_0            : 1;
2618 #else /* Word 0 - Little Endian */
2619         uint64_t reserved_0            : 1;
2620         uint64_t ecc_ena               : 1;  /**< [  1:  1](R/W) ECC enable. When set, enables the 8b ECC check/correct logic. Should be one when used with
2621                                                                  DIMMs with ECC; zero, otherwise.
2622 
2623                                                                  * When this mode is turned on, DQ\<71:64\> on write operations contains the ECC code
2624                                                                  generated for the 64 bits of data which will be written in the memory. Later on read
2625                                                                  operations, will be used to check for single-bit error (which will be auto-corrected) and
2626                                                                  double-bit error (which will be reported).
2627 
2628                                                                  * When not turned on, DQ\<71:64\> are driven to zero. Please refer to SEC_ERR, DED_ERR,
2629                                                                  LMC()_NXM_FADR, and LMC()_ECC_SYND registers for diagnostics information when there is
2630                                                                  an error. */
2631         uint64_t row_lsb               : 3;  /**< [  4:  2](R/W) Row address bit select.
2632                                                                  0x0 = Address bit 14 is LSB.
2633                                                                  0x1 = Address bit 15 is LSB.
2634                                                                  0x2 = Address bit 16 is LSB.
2635                                                                  0x3 = Address bit 17 is LSB.
2636                                                                  0x4 = Address bit 18 is LSB.
2637                                                                  0x5 = Address bit 19 is LSB.
2638                                                                  0x6 = Address bit 20 is LSB.
2639                                                                  0x6 = Reserved.
2640 
2641                                                                  Encoding used to determine which memory address bit position represents the low order DDR
2642                                                                  ROW address. The processor's memory address\<34:7\> needs to be translated to DRAM addresses
2643                                                                  (bnk,row,col,rank and DIMM) and that is a function of the following:
2644                                                                  * Datapath width (64).
2645                                                                  * Number of banks (8).
2646                                                                  * Number of column bits of the memory part--specified indirectly by this register.
2647                                                                  * Number of row bits of the memory part--specified indirectly by [PBANK_LSB].
2648                                                                  * Number of ranks in a DIMM--specified by LMC()_CONFIG[RANK_ENA].
2649                                                                  * Number of DIMMs in the system by the register below ([PBANK_LSB]).
2650 
2651                                                                  Column address starts from mem_addr[3] for 64b (8 bytes) DQ width. [ROW_LSB] is
2652                                                                  mem_adr[15] for 64b mode. Therefore, the [ROW_LSB] parameter should be set to
2653                                                                  0x1 (64b).
2654 
2655                                                                  For example, for a DIMM made of Samsung's K4B1G0846C-F7 1GB (16M * 8 bit * 8 bank)
2656                                                                  parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
2657                                                                  16. So, row = mem_adr\<29:16\>.
2658 
2659                                                                  Refer to cache-block read transaction example, Cache-block read transaction example. */
2660         uint64_t pbank_lsb             : 4;  /**< [  8:  5](R/W) DIMM address bit select. Reverting to the explanation for [ROW_LSB], [PBANK_LSB] would be:
2661                                                                  [ROW_LSB] bit + num_rowbits + num_rankbits
2662 
2663                                                                  Values for [PBANK_LSB] are as follows:
2664                                                                  0x0: DIMM = mem_adr\<28\>; if [RANK_ENA]=1, rank = mem_adr\<27\>.
2665                                                                  0x1: DIMM = mem_adr\<29\>; if [RANK_ENA]=1, rank = mem_adr\<28\>.
2666                                                                  0x2: DIMM = mem_adr\<30\>; if [RANK_ENA]=1, rank = mem_adr\<29\>.
2667                                                                  0x3: DIMM = mem_adr\<31\>; if [RANK_ENA]=1, rank = mem_adr\<30\>.
2668                                                                  0x4: DIMM = mem_adr\<32\>; if [RANK_ENA]=1, rank = mem_adr\<31\>.
2669                                                                  0x5: DIMM = mem_adr\<33\>; if [RANK_ENA]=1, rank = mem_adr\<32\>.
2670                                                                  0x6: DIMM = mem_adr\<34\>; if [RANK_ENA]=1, rank = mem_adr\<33\>.
2671                                                                  0x7: DIMM = mem_adr\<35\>; if [RANK_ENA]=1, rank = mem_adr\<34\>.
2672                                                                  0x8: DIMM = mem_adr\<36\>; if [RANK_ENA]=1, rank = mem_adr\<35\>.
2673                                                                  0x9: DIMM = mem_adr\<37\>; if [RANK_ENA]=1, rank = mem_adr\<36\>.
2674                                                                  0xA: DIMM = 0;           if [RANK_ENA]=1, rank = mem_adr\<37\>.
2675                                                                  0xB-0xF: Reserved.
2676 
2677                                                                  For example, for a DIMM made of Samsung's K4B1G0846C-F7 1Gb (16M * 8 bit * 8 bank)
2678                                                                  parts, the column address width = 10, so with 10b of col, 3b of bus, 3b of bank, [ROW_LSB] =
2679                                                                  16. So, row = mem_adr\<29:16\>.
2680 
2681                                                                  With [RANK_ENA] = 0, [PBANK_LSB] = 2.
2682                                                                  With [RANK_ENA] = 1, [PBANK_LSB] = 3.
2683 
2684                                                                  Internal:
2685                                                                  When interfacing with 8H 3DS, set this 0xA regardless of [RANK_ENA] value. */
2686         uint64_t idlepower             : 3;  /**< [ 11:  9](R/W) Enter precharge power-down mode after the memory controller has been idle for
2687                                                                  2^(2+[IDLEPOWER]) CK cycles. 0 = disabled.
2688 
2689                                                                  This field should only be programmed after initialization.
2690                                                                  LMC()_MODEREG_PARAMS0[PPD] determines whether the DRAM DLL is disabled during the
2691                                                                  precharge power-down. */
2692         uint64_t forcewrite            : 4;  /**< [ 15: 12](R/W) Force the oldest outstanding write to complete after having waited for 2^[FORCEWRITE] CK
2693                                                                  cycles. 0 = disabled. */
2694         uint64_t ecc_adr               : 1;  /**< [ 16: 16](R/W) Include memory reference address in the ECC calculation.
2695                                                                  0 = disabled, 1 = enabled. */
2696         uint64_t reset                 : 1;  /**< [ 17: 17](R/W) Reset one-shot pulse for LMC()_OPS_CNT, LMC()_IFB_CNT, and LMC()_DCLK_CNT.
2697                                                                  To cause the reset, software writes this to a one, then rewrites it to a zero. */
2698         uint64_t ref_zqcs_int          : 22; /**< [ 39: 18](R/W) Refresh interval is represented in number of 512 CK cycle increments. To get more precise
2699                                                                  control of the refresh interval, LMC()_EXT_CONFIG[REF_INT_LSBS] can be set to a
2700                                                                  nonzero value.
2701                                                                  ZQCS interval is represented in a number of refresh intervals. A refresh sequence is
2702                                                                  triggered when bits \<24:18\> are equal to 0x0, and a ZQCS sequence is triggered when
2703                                                                  \<39:18\>
2704                                                                  are equal to 0x0.
2705 
2706                                                                  The ZQCS timer only decrements when the refresh timer is zero.
2707 
2708                                                                  Program \<24:18\> to RND-DN(TREFI/clkPeriod/512).
2709 
2710                                                                  A value of zero in bits \<24:18\> will effectively turn off refresh.
2711 
2712                                                                  Program \<36:25\> to (RND-DN(ZQCS_Period / Refresh_Period) - 1), where Refresh_Period is the
2713                                                                  effective period programmed in bits \<24:18\>. Note that this value should always be greater
2714                                                                  than 32, to account for resistor calibration delays.
2715 
2716                                                                  000_00000000_0000000: Reserved
2717 
2718                                                                  Max refresh interval = 127 * 512= 65024 CK cycles.
2719 
2720                                                                  Max ZQCS interval = 32768 * 127 * 512 = 2130706432 CK cycles.
2721 
2722                                                                  If refresh interval is programmed to ~8 us, max ZQCS interval is ~262 ms, or ~4 ZQCS
2723                                                                  operations per second.
2724                                                                  LMC()_CONFIG[INIT_STATUS] determines which ranks receive the REF / ZQCS. LMC does not
2725                                                                  send any refreshes / ZQCS's when LMC()_CONFIG[INIT_STATUS]=0. */
2726         uint64_t early_dqx             : 1;  /**< [ 40: 40](R/W) Set this bit to send DQx signals one CK cycle earlier for the case when the shortest DQx
2727                                                                  lines have a larger delay than the CK line. */
2728         uint64_t sref_with_dll         : 1;  /**< [ 41: 41](R/W) Self-refresh entry/exit write mode registers. When set, self-refresh entry sequence writes
2729                                                                  MR2 and MR1 (in this order, in all ranks), and self-refresh exit sequence writes MR1, MR0,
2730                                                                  MR2, and MR3 (in this order, for all ranks). The write operations occur before self-
2731                                                                  refresh entry, and after self-refresh exit. When clear, self-refresh entry and exit
2732                                                                  instruction sequences do not write any mode registers in the DDR3/4 parts. */
2733         uint64_t rank_ena              : 1;  /**< [ 42: 42](R/W) "RANK enable (for use with dual-rank DIMMs).
2734                                                                  * For dual-rank DIMMs, the [RANK_ENA] bit will enable the drive of the DDR#_DIMM*_CS*_L
2735                                                                  and
2736                                                                  ODT_\<1:0\> pins differently based on the ([PBANK_LSB] - 1) address bit.
2737                                                                  * Write zero for SINGLE ranked DIMMs." */
2738         uint64_t rankmask              : 4;  /**< [ 46: 43](R/W) Mask to select rank to be leveled/initialized. To write level/read level/initialize rank
2739                                                                  i, set [RANKMASK]\<i\>:
2740 
2741                                                                  \<pre\>
2742                                                                                [RANK_ENA]=1   [RANK_ENA]=0
2743                                                                  RANKMASK\<0\> = DIMM0_CS0      DIMM0_CS0
2744                                                                  RANKMASK\<1\> = DIMM0_CS1      MBZ
2745                                                                  RANKMASK\<2\> = DIMM1_CS0      DIMM1_CS0
2746                                                                  RANKMASK\<3\> = DIMM1_CS1      MBZ
2747                                                                  \</pre\>
2748 
2749                                                                  For read/write leveling, each rank has to be leveled separately, so [RANKMASK] should only
2750                                                                  have one bit set. [RANKMASK] is not used during self-refresh entry/exit and precharge
2751                                                                  power down entry/exit instruction sequences. When [RANK_ENA] = 0, [RANKMASK]\<1\> and
2752                                                                  [RANKMASK]\<3\> MBZ. */
2753         uint64_t mirrmask              : 4;  /**< [ 50: 47](R/W) "Mask determining which ranks are address-mirrored.
2754                                                                  [MIRRMASK]\<n\> = 1 means rank n addresses are mirrored for
2755                                                                  0 \<= n \<= 3.
2756                                                                  In DDR3, a mirrored read/write operation has the following differences:
2757                                                                  * DDR#_BA\<1\> is swapped with DDR#_BA\<0\>.
2758                                                                  * DDR#_A\<8\> is swapped with DDR#_A\<7\>.
2759                                                                  * DDR#_A\<6\> is swapped with DDR#_A\<5\>.
2760                                                                  * DDR#_A\<4\> is swapped with DDR#_A\<3\>.
2761 
2762                                                                  When RANK_ENA = 0, MIRRMASK\<1\> MBZ.
2763 
2764                                                                  In DDR4, a mirrored read/write operation has the following differences:
2765                                                                  * DDR#_BG\<1\> is swapped with DDR#_BG\<0\>.
2766                                                                  * DDR#_BA\<1\> is swapped with DDR#_BA\<0\>.
2767                                                                  * DDR#_A\<13\> is swapped with DDR#_A\<11\>.
2768                                                                  * DDR#_A\<8\> is swapped with DDR#_A\<7\>.
2769                                                                  * DDR#_A\<6\> is swapped with DDR#_A\<5\>.
2770                                                                  * DDR#_A\<4\> is swapped with DDR#_A\<3\>.
2771 
2772                                                                  For CN70XX, MIRRMASK\<3:2\> MBZ.
2773                                                                  * When RANK_ENA = 0, MIRRMASK\<1\> MBZ." */
2774         uint64_t init_status           : 4;  /**< [ 54: 51](RO/H) Indicates status of initialization. [INIT_STATUS][n] = 1 implies rank n has been
2775                                                                  initialized.
2776                                                                  Software must set necessary [RANKMASK] bits before executing the initialization sequence
2777                                                                  using LMC()_SEQ_CTL. If the rank has been selected for init with the [RANKMASK] bits,
2778                                                                  the [INIT_STATUS] bits will be set after successful initialization and after self-refresh
2779                                                                  exit. [INIT_STATUS] determines the chip-selects that assert during refresh, ZQCS,
2780                                                                  precharge
2781                                                                  power-down entry/exit, and self-refresh entry SEQ_SELs. */
2782         uint64_t early_unload_d0_r0    : 1;  /**< [ 55: 55](R/W) Reserved, MBZ.
2783                                                                  Internal:
2784                                                                  When set, unload the PHY silo one cycle early for Rank 0 reads.
2785                                                                  The recommended EARLY_UNLOAD_D0_R0 value can be calculated after the final
2786                                                                  LMC()_RLEVEL_RANK0[BYTE*] values are selected (as part of read leveling initialization).
2787                                                                  Then, determine the largest read leveling setting for rank 0 (i.e. calculate
2788                                                                  maxset=MAX(LMC()_RLEVEL_RANK0[BYTEi]) across all i), then set EARLY_UNLOAD_D0_R0 when the
2789                                                                  low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D0_R0 = (maxset\<1:0\>
2790                                                                  !=3)). */
2791         uint64_t early_unload_d0_r1    : 1;  /**< [ 56: 56](R/W) Reserved, MBZ.
2792                                                                  Internal:
2793                                                                  When set, unload the PHY silo one cycle early for Rank 1 reads.
2794                                                                  The recommended EARLY_UNLOAD_D0_R1 value can be calculated after the final
2795                                                                  LMC()_RLEVEL_RANK1[BYTE*] values are selected (as part of read leveling initialization).
2796                                                                  Then, determine the largest read leveling setting for rank one (i.e. calculate
2797                                                                  maxset=MAX(LMC()_RLEVEL_RANK1[BYTEi]) across all i), then set EARLY_UNLOAD_D0_R1 when the
2798                                                                  low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D0_R1 = (maxset\<1:0\>
2799                                                                  !=3)). */
2800         uint64_t early_unload_d1_r0    : 1;  /**< [ 57: 57](R/W) Reserved, MBZ.
2801                                                                  Internal:
2802                                                                  When set, unload the PHY silo one cycle early for Rank 2 reads.
2803                                                                  The recommended EARLY_UNLOAD_D1_RO value can be calculated after the final
2804                                                                  LMC()_RLEVEL_RANK2[BYTE*] values are selected (as part of read leveling initialization).
2805                                                                  Then, determine the largest read leveling setting for rank 2 (i.e. calculate
2806                                                                  maxset=MAX(LMC()_RLEVEL_RANK2[BYTEi]) across all i), then set EARLY_UNLOAD_D1_RO when the
2807                                                                  low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D1_RO = (maxset\<1:0\>
2808                                                                  !=3)). */
2809         uint64_t early_unload_d1_r1    : 1;  /**< [ 58: 58](R/W) Reserved, MBZ.
2810                                                                  Internal:
2811                                                                  When set, unload the PHY silo one cycle early for Rank 3 reads.
2812                                                                  The recommended EARLY_UNLOAD_D1_R1 value can be calculated after the final
2813                                                                  LMC()_RLEVEL_RANK3[BYTE*] values are selected (as part of read leveling initialization).
2814                                                                  Then, determine the largest read leveling setting for rank 3 (i.e. calculate
2815                                                                  maxset=MAX(LMC()_RLEVEL_RANK3[BYTEi]) across all i), then set EARLY_UNLOAD_D1_R1 when the
2816                                                                  low two bits of this largest setting is not 3 (i.e. EARLY_UNLOAD_D1_R1 = (maxset\<1:0\>
2817                                                                  !=3)). */
2818         uint64_t scrz                  : 1;  /**< [ 59: 59](R/W1S/H) Hide LMC()_SCRAMBLE_CFG0 and LMC()_SCRAMBLE_CFG1 when set. */
2819         uint64_t mode32b               : 1;  /**< [ 60: 60](R/W) 32-bit datapath mode. When set, only 32 DQ pins are used. */
2820         uint64_t mode_x4dev            : 1;  /**< [ 61: 61](R/W) DDR x4 device mode. */
2821         uint64_t bg2_enable            : 1;  /**< [ 62: 62](R/W) BG1 enable bit. Only has an effect when LMC()_CONFIG[MODEDDR4] = 1.
2822                                                                  Set to one when using DDR4 x4 or x8 parts.
2823                                                                  Clear to zero when using DDR4 x16 parts. */
2824         uint64_t lrdimm_ena            : 1;  /**< [ 63: 63](R/W) Reserved.
2825                                                                  Internal:
2826                                                                  Load reduced DIMM enable. When set allows the use of JEDEC DDR4 LRDIMMs. */
2827 #endif /* Word 0 - End */
2828     } cn88xx;
2829     /* struct bdk_lmcx_config_cn88xx cn83xx; */
2830 };
2831 typedef union bdk_lmcx_config bdk_lmcx_config_t;
2832 
2833 static inline uint64_t BDK_LMCX_CONFIG(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_CONFIG(unsigned long a)2834 static inline uint64_t BDK_LMCX_CONFIG(unsigned long a)
2835 {
2836     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
2837         return 0x87e088000188ll + 0x1000000ll * ((a) & 0x0);
2838     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
2839         return 0x87e088000188ll + 0x1000000ll * ((a) & 0x1);
2840     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
2841         return 0x87e088000188ll + 0x1000000ll * ((a) & 0x3);
2842     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
2843         return 0x87e088000188ll + 0x1000000ll * ((a) & 0x3);
2844     __bdk_csr_fatal("LMCX_CONFIG", 1, a, 0, 0, 0);
2845 }
2846 
2847 #define typedef_BDK_LMCX_CONFIG(a) bdk_lmcx_config_t
2848 #define bustype_BDK_LMCX_CONFIG(a) BDK_CSR_TYPE_RSL
2849 #define basename_BDK_LMCX_CONFIG(a) "LMCX_CONFIG"
2850 #define device_bar_BDK_LMCX_CONFIG(a) 0x0 /* PF_BAR0 */
2851 #define busnum_BDK_LMCX_CONFIG(a) (a)
2852 #define arguments_BDK_LMCX_CONFIG(a) (a),-1,-1,-1
2853 
2854 /**
2855  * Register (RSL) lmc#_control
2856  *
2857  * LMC Control Register
2858  */
2859 union bdk_lmcx_control
2860 {
2861     uint64_t u;
2862     struct bdk_lmcx_control_s
2863     {
2864 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
2865         uint64_t scramble_ena          : 1;  /**< [ 63: 63](R/W) When set, will enable the scramble/descramble logic. */
2866         uint64_t thrcnt                : 12; /**< [ 62: 51](RO/H) Fine count. */
2867         uint64_t persub                : 8;  /**< [ 50: 43](R/W) Reserved.
2868                                                                  Internal:
2869                                                                  Offset for DFA rate-matching. */
2870         uint64_t thrmax                : 4;  /**< [ 42: 39](R/W) Reserved.
2871                                                                  Internal:
2872                                                                  Fine rate matching max bucket size. In conjunction with the coarse rate matching logic,
2873                                                                  the fine rate matching logic gives software the ability to prioritize DFA reads over L2C
2874                                                                  writes. Higher [PERSUB] values result in a lower DFA read bandwidth.
2875 
2876                                                                  0x0 = Reserved. */
2877         uint64_t crm_cnt               : 5;  /**< [ 38: 34](RO/H) Coarse count. */
2878         uint64_t crm_thr               : 5;  /**< [ 33: 29](R/W) Coarse rate matching threshold. */
2879         uint64_t crm_max               : 5;  /**< [ 28: 24](R/W) Reserved.
2880                                                                  Internal:
2881                                                                  Coarse rate matching max bucket size. The coarse rate matching logic is used to control
2882                                                                  the bandwidth allocated to DFA reads. [CRM_MAX] is subdivided into two regions with DFA
2883                                                                  reads being preferred over LMC reads/writes when [CRM_CNT] \< [CRM_THR]. [CRM_CNT]
2884                                                                  increments by
2885                                                                  one when a DFA read is slotted and by 2 when a LMC read/write is slotted, and rolls over
2886                                                                  when [CRM_MAX] is reached.
2887 
2888                                                                  0x0 = Reserved. */
2889         uint64_t rodt_bprch            : 1;  /**< [ 23: 23](R/W) When set, the turn-off time for the ODT pin during a read command is delayed an additional
2890                                                                  CK cycle. */
2891         uint64_t wodt_bprch            : 1;  /**< [ 22: 22](R/W) When set, the turn-off time for the ODT pin during a write command is delayed an
2892                                                                  additional CK cycle. */
2893         uint64_t bprch                 : 2;  /**< [ 21: 20](R/W) "Back porch enable. When set, the turn-on time for the default DDR#_DQ* /DDR#_DQS_*_P/N
2894                                                                  drivers is delayed an additional BPRCH CK cycles.
2895                                                                  0x0 = 0 CK cycles.
2896                                                                  0x1 = 1 CK cycles.
2897                                                                  0x2 = 2 CK cycles.
2898                                                                  0x3 = 3 CK cycles." */
2899         uint64_t ext_zqcs_dis          : 1;  /**< [ 19: 19](R/W) Disable (external) auto-ZQCS calibration. When clear, LMC runs external ZQ calibration
2900                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
2901         uint64_t int_zqcs_dis          : 1;  /**< [ 18: 18](R/W) Disable (internal) auto-ZQCS calibration. When clear, LMC runs internal ZQ calibration
2902                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
2903         uint64_t auto_dclkdis          : 1;  /**< [ 17: 17](R/W) When 1, LMC automatically shuts off its internal clock to conserve power when there is no
2904                                                                  traffic. Note that this has no effect on the DDR3/DDR4 PHY and pads clocks. */
2905         uint64_t xor_bank              : 1;  /**< [ 16: 16](R/W) Enable signal to XOR the bank bits. See LMC()_EXT_CONFIG2 on how LMC selects the L2C-LMC
2906                                                                  address bits. */
2907         uint64_t max_write_batch       : 4;  /**< [ 15: 12](R/W) Maximum number of consecutive write operations to service before forcing read operations
2908                                                                  to interrupt. */
2909         uint64_t nxm_write_en          : 1;  /**< [ 11: 11](R/W) NXM write mode. When clear, LMC discards write operations to addresses that don't exist in
2910                                                                  the DRAM (as defined by LMC()_NXM configuration). When set, LMC completes write
2911                                                                  operations to addresses that don't exist in the DRAM at an aliased address. */
2912         uint64_t reserved_10           : 1;
2913         uint64_t inorder_wr            : 1;  /**< [  9:  9](R/W) Send write operations in order (regardless of priority). */
2914         uint64_t inorder_rd            : 1;  /**< [  8:  8](R/W) Send read operations in order (regardless of priority). */
2915         uint64_t throttle_wr           : 1;  /**< [  7:  7](R/W) When set, use at most one IFB for write operations. */
2916         uint64_t throttle_rd           : 1;  /**< [  6:  6](R/W) When set, use at most one IFB for read operations. */
2917         uint64_t fprch2                : 2;  /**< [  5:  4](R/W) "Front porch enable. When set, the turn-off time for the default DDR#_DQ* /DDR#_DQS_*_P/N
2918                                                                  drivers is FPRCH2 CKs earlier.
2919                                                                  0x0 = 0 CK cycles.
2920                                                                  0x1 = 1 CK cycles.
2921                                                                  0x2 = 2 CK cycles.
2922                                                                  0x3 = Reserved." */
2923         uint64_t pocas                 : 1;  /**< [  3:  3](R/W) Reserved; must be zero.
2924                                                                  Internal:
2925                                                                  Enable the posted CAS feature of DDR3. This bit must be
2926                                                                  set whenever LMC()_MODEREG_PARAMS0[AL]!=0. */
2927         uint64_t ddr2t                 : 1;  /**< [  2:  2](R/W) Turn on the DDR 2T mode. 2 CK-cycle window for CMD and address. This mode helps relieve
2928                                                                  setup time pressure on the address and command bus which nominally have a very large
2929                                                                  fanout. Please refer to Micron's tech note tn_47_01 titled DDR2-533 Memory Design Guide
2930                                                                  for Two Dimm Unbuffered Systems for physical details. */
2931         uint64_t bwcnt                 : 1;  /**< [  1:  1](R/W) Bus utilization counter clear. Clears the LMC()_OPS_CNT, LMC()_IFB_CNT, and
2932                                                                  LMC()_DCLK_CNT registers. To clear the CSRs, software should first write this field to
2933                                                                  a one, then write this field to a zero. */
2934         uint64_t rdimm_ena             : 1;  /**< [  0:  0](R/W) Registered DIMM enable. When set allows the use of JEDEC Registered DIMMs which require
2935                                                                  address and control bits to be registered in the controller. */
2936 #else /* Word 0 - Little Endian */
2937         uint64_t rdimm_ena             : 1;  /**< [  0:  0](R/W) Registered DIMM enable. When set allows the use of JEDEC Registered DIMMs which require
2938                                                                  address and control bits to be registered in the controller. */
2939         uint64_t bwcnt                 : 1;  /**< [  1:  1](R/W) Bus utilization counter clear. Clears the LMC()_OPS_CNT, LMC()_IFB_CNT, and
2940                                                                  LMC()_DCLK_CNT registers. To clear the CSRs, software should first write this field to
2941                                                                  a one, then write this field to a zero. */
2942         uint64_t ddr2t                 : 1;  /**< [  2:  2](R/W) Turn on the DDR 2T mode. 2 CK-cycle window for CMD and address. This mode helps relieve
2943                                                                  setup time pressure on the address and command bus which nominally have a very large
2944                                                                  fanout. Please refer to Micron's tech note tn_47_01 titled DDR2-533 Memory Design Guide
2945                                                                  for Two Dimm Unbuffered Systems for physical details. */
2946         uint64_t pocas                 : 1;  /**< [  3:  3](R/W) Reserved; must be zero.
2947                                                                  Internal:
2948                                                                  Enable the posted CAS feature of DDR3. This bit must be
2949                                                                  set whenever LMC()_MODEREG_PARAMS0[AL]!=0. */
2950         uint64_t fprch2                : 2;  /**< [  5:  4](R/W) "Front porch enable. When set, the turn-off time for the default DDR#_DQ* /DDR#_DQS_*_P/N
2951                                                                  drivers is FPRCH2 CKs earlier.
2952                                                                  0x0 = 0 CK cycles.
2953                                                                  0x1 = 1 CK cycles.
2954                                                                  0x2 = 2 CK cycles.
2955                                                                  0x3 = Reserved." */
2956         uint64_t throttle_rd           : 1;  /**< [  6:  6](R/W) When set, use at most one IFB for read operations. */
2957         uint64_t throttle_wr           : 1;  /**< [  7:  7](R/W) When set, use at most one IFB for write operations. */
2958         uint64_t inorder_rd            : 1;  /**< [  8:  8](R/W) Send read operations in order (regardless of priority). */
2959         uint64_t inorder_wr            : 1;  /**< [  9:  9](R/W) Send write operations in order (regardless of priority). */
2960         uint64_t reserved_10           : 1;
2961         uint64_t nxm_write_en          : 1;  /**< [ 11: 11](R/W) NXM write mode. When clear, LMC discards write operations to addresses that don't exist in
2962                                                                  the DRAM (as defined by LMC()_NXM configuration). When set, LMC completes write
2963                                                                  operations to addresses that don't exist in the DRAM at an aliased address. */
2964         uint64_t max_write_batch       : 4;  /**< [ 15: 12](R/W) Maximum number of consecutive write operations to service before forcing read operations
2965                                                                  to interrupt. */
2966         uint64_t xor_bank              : 1;  /**< [ 16: 16](R/W) Enable signal to XOR the bank bits. See LMC()_EXT_CONFIG2 on how LMC selects the L2C-LMC
2967                                                                  address bits. */
2968         uint64_t auto_dclkdis          : 1;  /**< [ 17: 17](R/W) When 1, LMC automatically shuts off its internal clock to conserve power when there is no
2969                                                                  traffic. Note that this has no effect on the DDR3/DDR4 PHY and pads clocks. */
2970         uint64_t int_zqcs_dis          : 1;  /**< [ 18: 18](R/W) Disable (internal) auto-ZQCS calibration. When clear, LMC runs internal ZQ calibration
2971                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
2972         uint64_t ext_zqcs_dis          : 1;  /**< [ 19: 19](R/W) Disable (external) auto-ZQCS calibration. When clear, LMC runs external ZQ calibration
2973                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
2974         uint64_t bprch                 : 2;  /**< [ 21: 20](R/W) "Back porch enable. When set, the turn-on time for the default DDR#_DQ* /DDR#_DQS_*_P/N
2975                                                                  drivers is delayed an additional BPRCH CK cycles.
2976                                                                  0x0 = 0 CK cycles.
2977                                                                  0x1 = 1 CK cycles.
2978                                                                  0x2 = 2 CK cycles.
2979                                                                  0x3 = 3 CK cycles." */
2980         uint64_t wodt_bprch            : 1;  /**< [ 22: 22](R/W) When set, the turn-off time for the ODT pin during a write command is delayed an
2981                                                                  additional CK cycle. */
2982         uint64_t rodt_bprch            : 1;  /**< [ 23: 23](R/W) When set, the turn-off time for the ODT pin during a read command is delayed an additional
2983                                                                  CK cycle. */
2984         uint64_t crm_max               : 5;  /**< [ 28: 24](R/W) Reserved.
2985                                                                  Internal:
2986                                                                  Coarse rate matching max bucket size. The coarse rate matching logic is used to control
2987                                                                  the bandwidth allocated to DFA reads. [CRM_MAX] is subdivided into two regions with DFA
2988                                                                  reads being preferred over LMC reads/writes when [CRM_CNT] \< [CRM_THR]. [CRM_CNT]
2989                                                                  increments by
2990                                                                  one when a DFA read is slotted and by 2 when a LMC read/write is slotted, and rolls over
2991                                                                  when [CRM_MAX] is reached.
2992 
2993                                                                  0x0 = Reserved. */
2994         uint64_t crm_thr               : 5;  /**< [ 33: 29](R/W) Coarse rate matching threshold. */
2995         uint64_t crm_cnt               : 5;  /**< [ 38: 34](RO/H) Coarse count. */
2996         uint64_t thrmax                : 4;  /**< [ 42: 39](R/W) Reserved.
2997                                                                  Internal:
2998                                                                  Fine rate matching max bucket size. In conjunction with the coarse rate matching logic,
2999                                                                  the fine rate matching logic gives software the ability to prioritize DFA reads over L2C
3000                                                                  writes. Higher [PERSUB] values result in a lower DFA read bandwidth.
3001 
3002                                                                  0x0 = Reserved. */
3003         uint64_t persub                : 8;  /**< [ 50: 43](R/W) Reserved.
3004                                                                  Internal:
3005                                                                  Offset for DFA rate-matching. */
3006         uint64_t thrcnt                : 12; /**< [ 62: 51](RO/H) Fine count. */
3007         uint64_t scramble_ena          : 1;  /**< [ 63: 63](R/W) When set, will enable the scramble/descramble logic. */
3008 #endif /* Word 0 - End */
3009     } s;
3010     struct bdk_lmcx_control_cn88xxp1
3011     {
3012 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
3013         uint64_t scramble_ena          : 1;  /**< [ 63: 63](R/W) When set, will enable the scramble/descramble logic. */
3014         uint64_t thrcnt                : 12; /**< [ 62: 51](RO/H) Fine count. */
3015         uint64_t persub                : 8;  /**< [ 50: 43](R/W) Offset for DFA rate-matching. */
3016         uint64_t thrmax                : 4;  /**< [ 42: 39](R/W) Fine rate matching max bucket size. In conjunction with the coarse rate matching logic,
3017                                                                  the fine rate matching logic gives software the ability to prioritize DFA reads over L2C
3018                                                                  writes. Higher [PERSUB] values result in a lower DFA read bandwidth.
3019 
3020                                                                  0x0 = Reserved. */
3021         uint64_t crm_cnt               : 5;  /**< [ 38: 34](RO/H) Coarse count. */
3022         uint64_t crm_thr               : 5;  /**< [ 33: 29](R/W) Coarse rate matching threshold. */
3023         uint64_t crm_max               : 5;  /**< [ 28: 24](R/W) Coarse rate matching max bucket size. The coarse rate matching logic is used to control
3024                                                                  the bandwidth allocated to DFA reads. [CRM_MAX] is subdivided into two regions with DFA
3025                                                                  reads being preferred over LMC reads/writes when [CRM_CNT] \< [CRM_THR]. [CRM_CNT]
3026                                                                  increments by
3027                                                                  one when a DFA read is slotted and by 2 when a LMC read/write is slotted, and rolls over
3028                                                                  when [CRM_MAX] is reached.
3029 
3030                                                                  0x0 = Reserved. */
3031         uint64_t rodt_bprch            : 1;  /**< [ 23: 23](R/W) When set, the turn-off time for the ODT pin during a read command is delayed an additional
3032                                                                  CK cycle. */
3033         uint64_t wodt_bprch            : 1;  /**< [ 22: 22](R/W) When set, the turn-off time for the ODT pin during a write command is delayed an
3034                                                                  additional CK cycle. */
3035         uint64_t bprch                 : 2;  /**< [ 21: 20](R/W) "Back porch enable. When set, the turn-on time for the default DDR#_DQ* /DDR#_DQS_*_P/N
3036                                                                  drivers is delayed an additional BPRCH CK cycles.
3037                                                                  0x0 = 0 CK cycles.
3038                                                                  0x1 = 1 CK cycles.
3039                                                                  0x2 = 2 CK cycles.
3040                                                                  0x3 = 3 CK cycles." */
3041         uint64_t ext_zqcs_dis          : 1;  /**< [ 19: 19](R/W) Disable (external) auto-ZQCS calibration. When clear, LMC runs external ZQ calibration
3042                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
3043         uint64_t int_zqcs_dis          : 1;  /**< [ 18: 18](R/W) Disable (internal) auto-ZQCS calibration. When clear, LMC runs internal ZQ calibration
3044                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
3045         uint64_t auto_dclkdis          : 1;  /**< [ 17: 17](R/W) When 1, LMC automatically shuts off its internal clock to conserve power when there is no
3046                                                                  traffic. Note that this has no effect on the DDR3/DDR4 PHY and pads clocks. */
3047         uint64_t xor_bank              : 1;  /**< [ 16: 16](R/W) XOR the bank bits.
3048                                                                  0: bank\<2:0\> = address\<9:7\>.
3049                                                                  1: bank\<2:0\> = address\<9:7\> ^ address\<14:12\>. */
3050         uint64_t max_write_batch       : 4;  /**< [ 15: 12](R/W) Maximum number of consecutive write operations to service before forcing read operations
3051                                                                  to interrupt. */
3052         uint64_t nxm_write_en          : 1;  /**< [ 11: 11](R/W) NXM write mode. When clear, LMC discards write operations to addresses that don't exist in
3053                                                                  the DRAM (as defined by LMC()_NXM configuration). When set, LMC completes write
3054                                                                  operations to addresses that don't exist in the DRAM at an aliased address. */
3055         uint64_t elev_prio_dis         : 1;  /**< [ 10: 10](R/W) Disable elevate priority logic. When set, write operations are sent in regardless of
3056                                                                  priority information from L2C. */
3057         uint64_t inorder_wr            : 1;  /**< [  9:  9](R/W) Send write operations in order (regardless of priority). */
3058         uint64_t inorder_rd            : 1;  /**< [  8:  8](R/W) Send read operations in order (regardless of priority). */
3059         uint64_t throttle_wr           : 1;  /**< [  7:  7](R/W) When set, use at most one IFB for write operations. */
3060         uint64_t throttle_rd           : 1;  /**< [  6:  6](R/W) When set, use at most one IFB for read operations. */
3061         uint64_t fprch2                : 2;  /**< [  5:  4](R/W) "Front porch enable. When set, the turn-off time for the default DDR#_DQ* /DDR#_DQS_*_P/N
3062                                                                  drivers is FPRCH2 CKs earlier.
3063                                                                  0x0 = 0 CK cycles.
3064                                                                  0x1 = 1 CK cycles.
3065                                                                  0x2 = 2 CK cycles.
3066                                                                  0x3 = Reserved." */
3067         uint64_t pocas                 : 1;  /**< [  3:  3](R/W) Reserved; must be zero.
3068                                                                  Internal:
3069                                                                  Enable the posted CAS feature of DDR3. This bit must be
3070                                                                  set whenever LMC()_MODEREG_PARAMS0[AL]!=0. */
3071         uint64_t ddr2t                 : 1;  /**< [  2:  2](R/W) Turn on the DDR 2T mode. 2 CK-cycle window for CMD and address. This mode helps relieve
3072                                                                  setup time pressure on the address and command bus which nominally have a very large
3073                                                                  fanout. Please refer to Micron's tech note tn_47_01 titled DDR2-533 Memory Design Guide
3074                                                                  for Two Dimm Unbuffered Systems for physical details. */
3075         uint64_t bwcnt                 : 1;  /**< [  1:  1](R/W) Bus utilization counter clear. Clears the LMC()_OPS_CNT, LMC()_IFB_CNT, and
3076                                                                  LMC()_DCLK_CNT registers. To clear the CSRs, software should first write this field to
3077                                                                  a one, then write this field to a zero. */
3078         uint64_t rdimm_ena             : 1;  /**< [  0:  0](R/W) Registered DIMM enable. When set allows the use of JEDEC Registered DIMMs which require
3079                                                                  address and control bits to be registered in the controller. */
3080 #else /* Word 0 - Little Endian */
3081         uint64_t rdimm_ena             : 1;  /**< [  0:  0](R/W) Registered DIMM enable. When set allows the use of JEDEC Registered DIMMs which require
3082                                                                  address and control bits to be registered in the controller. */
3083         uint64_t bwcnt                 : 1;  /**< [  1:  1](R/W) Bus utilization counter clear. Clears the LMC()_OPS_CNT, LMC()_IFB_CNT, and
3084                                                                  LMC()_DCLK_CNT registers. To clear the CSRs, software should first write this field to
3085                                                                  a one, then write this field to a zero. */
3086         uint64_t ddr2t                 : 1;  /**< [  2:  2](R/W) Turn on the DDR 2T mode. 2 CK-cycle window for CMD and address. This mode helps relieve
3087                                                                  setup time pressure on the address and command bus which nominally have a very large
3088                                                                  fanout. Please refer to Micron's tech note tn_47_01 titled DDR2-533 Memory Design Guide
3089                                                                  for Two Dimm Unbuffered Systems for physical details. */
3090         uint64_t pocas                 : 1;  /**< [  3:  3](R/W) Reserved; must be zero.
3091                                                                  Internal:
3092                                                                  Enable the posted CAS feature of DDR3. This bit must be
3093                                                                  set whenever LMC()_MODEREG_PARAMS0[AL]!=0. */
3094         uint64_t fprch2                : 2;  /**< [  5:  4](R/W) "Front porch enable. When set, the turn-off time for the default DDR#_DQ* /DDR#_DQS_*_P/N
3095                                                                  drivers is FPRCH2 CKs earlier.
3096                                                                  0x0 = 0 CK cycles.
3097                                                                  0x1 = 1 CK cycles.
3098                                                                  0x2 = 2 CK cycles.
3099                                                                  0x3 = Reserved." */
3100         uint64_t throttle_rd           : 1;  /**< [  6:  6](R/W) When set, use at most one IFB for read operations. */
3101         uint64_t throttle_wr           : 1;  /**< [  7:  7](R/W) When set, use at most one IFB for write operations. */
3102         uint64_t inorder_rd            : 1;  /**< [  8:  8](R/W) Send read operations in order (regardless of priority). */
3103         uint64_t inorder_wr            : 1;  /**< [  9:  9](R/W) Send write operations in order (regardless of priority). */
3104         uint64_t elev_prio_dis         : 1;  /**< [ 10: 10](R/W) Disable elevate priority logic. When set, write operations are sent in regardless of
3105                                                                  priority information from L2C. */
3106         uint64_t nxm_write_en          : 1;  /**< [ 11: 11](R/W) NXM write mode. When clear, LMC discards write operations to addresses that don't exist in
3107                                                                  the DRAM (as defined by LMC()_NXM configuration). When set, LMC completes write
3108                                                                  operations to addresses that don't exist in the DRAM at an aliased address. */
3109         uint64_t max_write_batch       : 4;  /**< [ 15: 12](R/W) Maximum number of consecutive write operations to service before forcing read operations
3110                                                                  to interrupt. */
3111         uint64_t xor_bank              : 1;  /**< [ 16: 16](R/W) XOR the bank bits.
3112                                                                  0: bank\<2:0\> = address\<9:7\>.
3113                                                                  1: bank\<2:0\> = address\<9:7\> ^ address\<14:12\>. */
3114         uint64_t auto_dclkdis          : 1;  /**< [ 17: 17](R/W) When 1, LMC automatically shuts off its internal clock to conserve power when there is no
3115                                                                  traffic. Note that this has no effect on the DDR3/DDR4 PHY and pads clocks. */
3116         uint64_t int_zqcs_dis          : 1;  /**< [ 18: 18](R/W) Disable (internal) auto-ZQCS calibration. When clear, LMC runs internal ZQ calibration
3117                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
3118         uint64_t ext_zqcs_dis          : 1;  /**< [ 19: 19](R/W) Disable (external) auto-ZQCS calibration. When clear, LMC runs external ZQ calibration
3119                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
3120         uint64_t bprch                 : 2;  /**< [ 21: 20](R/W) "Back porch enable. When set, the turn-on time for the default DDR#_DQ* /DDR#_DQS_*_P/N
3121                                                                  drivers is delayed an additional BPRCH CK cycles.
3122                                                                  0x0 = 0 CK cycles.
3123                                                                  0x1 = 1 CK cycles.
3124                                                                  0x2 = 2 CK cycles.
3125                                                                  0x3 = 3 CK cycles." */
3126         uint64_t wodt_bprch            : 1;  /**< [ 22: 22](R/W) When set, the turn-off time for the ODT pin during a write command is delayed an
3127                                                                  additional CK cycle. */
3128         uint64_t rodt_bprch            : 1;  /**< [ 23: 23](R/W) When set, the turn-off time for the ODT pin during a read command is delayed an additional
3129                                                                  CK cycle. */
3130         uint64_t crm_max               : 5;  /**< [ 28: 24](R/W) Coarse rate matching max bucket size. The coarse rate matching logic is used to control
3131                                                                  the bandwidth allocated to DFA reads. [CRM_MAX] is subdivided into two regions with DFA
3132                                                                  reads being preferred over LMC reads/writes when [CRM_CNT] \< [CRM_THR]. [CRM_CNT]
3133                                                                  increments by
3134                                                                  one when a DFA read is slotted and by 2 when a LMC read/write is slotted, and rolls over
3135                                                                  when [CRM_MAX] is reached.
3136 
3137                                                                  0x0 = Reserved. */
3138         uint64_t crm_thr               : 5;  /**< [ 33: 29](R/W) Coarse rate matching threshold. */
3139         uint64_t crm_cnt               : 5;  /**< [ 38: 34](RO/H) Coarse count. */
3140         uint64_t thrmax                : 4;  /**< [ 42: 39](R/W) Fine rate matching max bucket size. In conjunction with the coarse rate matching logic,
3141                                                                  the fine rate matching logic gives software the ability to prioritize DFA reads over L2C
3142                                                                  writes. Higher [PERSUB] values result in a lower DFA read bandwidth.
3143 
3144                                                                  0x0 = Reserved. */
3145         uint64_t persub                : 8;  /**< [ 50: 43](R/W) Offset for DFA rate-matching. */
3146         uint64_t thrcnt                : 12; /**< [ 62: 51](RO/H) Fine count. */
3147         uint64_t scramble_ena          : 1;  /**< [ 63: 63](R/W) When set, will enable the scramble/descramble logic. */
3148 #endif /* Word 0 - End */
3149     } cn88xxp1;
3150     struct bdk_lmcx_control_cn9
3151     {
3152 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
3153         uint64_t reserved_63           : 1;
3154         uint64_t thrcnt                : 12; /**< [ 62: 51](RO/H) Fine count. */
3155         uint64_t persub                : 8;  /**< [ 50: 43](R/W) Reserved.
3156                                                                  Internal:
3157                                                                  Offset for DFA rate-matching. */
3158         uint64_t thrmax                : 4;  /**< [ 42: 39](R/W) Reserved.
3159                                                                  Internal:
3160                                                                  Fine rate matching max bucket size. In conjunction with the coarse rate matching logic,
3161                                                                  the fine rate matching logic gives software the ability to prioritize DFA reads over L2C
3162                                                                  writes. Higher [PERSUB] values result in a lower DFA read bandwidth.
3163 
3164                                                                  0x0 = Reserved. */
3165         uint64_t crm_cnt               : 5;  /**< [ 38: 34](RO/H) Coarse count. */
3166         uint64_t crm_thr               : 5;  /**< [ 33: 29](R/W) Coarse rate matching threshold. */
3167         uint64_t crm_max               : 5;  /**< [ 28: 24](R/W) Reserved.
3168                                                                  Internal:
3169                                                                  Coarse rate matching max bucket size. The coarse rate matching logic is used to control
3170                                                                  the bandwidth allocated to DFA reads. [CRM_MAX] is subdivided into two regions with DFA
3171                                                                  reads being preferred over LMC reads/writes when [CRM_CNT] \< [CRM_THR]. [CRM_CNT]
3172                                                                  increments by
3173                                                                  one when a DFA read is slotted and by 2 when a LMC read/write is slotted, and rolls over
3174                                                                  when [CRM_MAX] is reached.
3175 
3176                                                                  0x0 = Reserved. */
3177         uint64_t rodt_bprch            : 1;  /**< [ 23: 23](R/W) When set, the turn-off time for the ODT pin during a read command is delayed an additional
3178                                                                  CK cycle. */
3179         uint64_t wodt_bprch            : 1;  /**< [ 22: 22](R/W) When set, the turn-off time for the ODT pin during a write command is delayed an
3180                                                                  additional CK cycle. */
3181         uint64_t bprch                 : 2;  /**< [ 21: 20](R/W) "Back porch enable. When set, the turn-on time for the default DDR#_DQ* /DDR#_DQS_*_P/N
3182                                                                  drivers is delayed an additional BPRCH CK cycles.
3183                                                                  0x0 = 0 CK cycles.
3184                                                                  0x1 = 1 CK cycles.
3185                                                                  0x2 = 2 CK cycles.
3186                                                                  0x3 = 3 CK cycles." */
3187         uint64_t ext_zqcs_dis          : 1;  /**< [ 19: 19](R/W) Disable (external) auto-ZQCS calibration. When clear, LMC runs external ZQ calibration
3188                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
3189         uint64_t int_zqcs_dis          : 1;  /**< [ 18: 18](R/W) Disable (internal) auto-ZQCS calibration. When clear, LMC runs internal ZQ calibration
3190                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
3191         uint64_t auto_dclkdis          : 1;  /**< [ 17: 17](R/W) When 1, LMC automatically shuts off its internal clock to conserve power when there is no
3192                                                                  traffic. Note that this has no effect on the DDR4 PHY and pads clocks. */
3193         uint64_t xor_bank              : 1;  /**< [ 16: 16](R/W) Enable signal to XOR the bank bits. See LMC()_EXT_CONFIG2 on how LMC selects the L2C-LMC
3194                                                                  address bits. */
3195         uint64_t max_write_batch       : 4;  /**< [ 15: 12](R/W) Maximum number of consecutive write operations to service before forcing read operations
3196                                                                  to interrupt. */
3197         uint64_t nxm_write_en          : 1;  /**< [ 11: 11](R/W) NXM write mode. When clear, LMC discards write operations to addresses that don't exist in
3198                                                                  the DRAM (as defined by LMC()_NXM configuration). When set, LMC completes write
3199                                                                  operations to addresses that don't exist in the DRAM at an aliased address. */
3200         uint64_t wrfl_prio_dis         : 1;  /**< [ 10: 10](R/W) Disable write flush priority logic. When set, LMC does not prioritize write regardless if
3201                                                                  there is pending write flush command sent from TAD. */
3202         uint64_t inorder_wr            : 1;  /**< [  9:  9](R/W) Send write operations in order (regardless of priority). */
3203         uint64_t inorder_rd            : 1;  /**< [  8:  8](R/W) Send read operations in order (regardless of priority). */
3204         uint64_t reserved_6_7          : 2;
3205         uint64_t fprch2                : 2;  /**< [  5:  4](R/W) "Front porch enable. When set, the turn-off time for the default DDR#_DQ* /DDR#_DQS_*_P/N
3206                                                                  drivers is FPRCH2 CKs earlier.
3207                                                                  0x0 = 0 CK cycles.
3208                                                                  0x1 = 1 CK cycles.
3209                                                                  0x2 = 2 CK cycles.
3210                                                                  0x3 = Reserved." */
3211         uint64_t pocas                 : 1;  /**< [  3:  3](R/W) Reserved; must be zero.
3212                                                                  Internal:
3213                                                                  Enable the posted CAS feature of DDR4. This bit must be
3214                                                                  set whenever LMC()_MODEREG_PARAMS0[AL]!=0. */
3215         uint64_t ddr2t                 : 1;  /**< [  2:  2](R/W) Turn on the DDR 2T mode. 2 CK-cycle window for CMD and address. This mode helps relieve
3216                                                                  setup time pressure on the address and command bus which nominally have a very large
3217                                                                  fanout.
3218                                                                  If software wants to enable this feature, it must be set prior to running any
3219                                                                  initialization code. */
3220         uint64_t bwcnt                 : 1;  /**< [  1:  1](R/W) Bus utilization counter clear. Clears the LMC()_OPS_CNT, LMC()_IFB_CNT, and
3221                                                                  LMC()_DCLK_CNT registers. To clear the CSRs, software should first write this field to
3222                                                                  a one, then write this field to a zero. */
3223         uint64_t rdimm_ena             : 1;  /**< [  0:  0](R/W) Registered DIMM enable. When set allows the use of JEDEC Registered DIMMs which require
3224                                                                  address and control bits to be registered in the controller. */
3225 #else /* Word 0 - Little Endian */
3226         uint64_t rdimm_ena             : 1;  /**< [  0:  0](R/W) Registered DIMM enable. When set allows the use of JEDEC Registered DIMMs which require
3227                                                                  address and control bits to be registered in the controller. */
3228         uint64_t bwcnt                 : 1;  /**< [  1:  1](R/W) Bus utilization counter clear. Clears the LMC()_OPS_CNT, LMC()_IFB_CNT, and
3229                                                                  LMC()_DCLK_CNT registers. To clear the CSRs, software should first write this field to
3230                                                                  a one, then write this field to a zero. */
3231         uint64_t ddr2t                 : 1;  /**< [  2:  2](R/W) Turn on the DDR 2T mode. 2 CK-cycle window for CMD and address. This mode helps relieve
3232                                                                  setup time pressure on the address and command bus which nominally have a very large
3233                                                                  fanout.
3234                                                                  If software wants to enable this feature, it must be set prior to running any
3235                                                                  initialization code. */
3236         uint64_t pocas                 : 1;  /**< [  3:  3](R/W) Reserved; must be zero.
3237                                                                  Internal:
3238                                                                  Enable the posted CAS feature of DDR4. This bit must be
3239                                                                  set whenever LMC()_MODEREG_PARAMS0[AL]!=0. */
3240         uint64_t fprch2                : 2;  /**< [  5:  4](R/W) "Front porch enable. When set, the turn-off time for the default DDR#_DQ* /DDR#_DQS_*_P/N
3241                                                                  drivers is FPRCH2 CKs earlier.
3242                                                                  0x0 = 0 CK cycles.
3243                                                                  0x1 = 1 CK cycles.
3244                                                                  0x2 = 2 CK cycles.
3245                                                                  0x3 = Reserved." */
3246         uint64_t reserved_6_7          : 2;
3247         uint64_t inorder_rd            : 1;  /**< [  8:  8](R/W) Send read operations in order (regardless of priority). */
3248         uint64_t inorder_wr            : 1;  /**< [  9:  9](R/W) Send write operations in order (regardless of priority). */
3249         uint64_t wrfl_prio_dis         : 1;  /**< [ 10: 10](R/W) Disable write flush priority logic. When set, LMC does not prioritize write regardless if
3250                                                                  there is pending write flush command sent from TAD. */
3251         uint64_t nxm_write_en          : 1;  /**< [ 11: 11](R/W) NXM write mode. When clear, LMC discards write operations to addresses that don't exist in
3252                                                                  the DRAM (as defined by LMC()_NXM configuration). When set, LMC completes write
3253                                                                  operations to addresses that don't exist in the DRAM at an aliased address. */
3254         uint64_t max_write_batch       : 4;  /**< [ 15: 12](R/W) Maximum number of consecutive write operations to service before forcing read operations
3255                                                                  to interrupt. */
3256         uint64_t xor_bank              : 1;  /**< [ 16: 16](R/W) Enable signal to XOR the bank bits. See LMC()_EXT_CONFIG2 on how LMC selects the L2C-LMC
3257                                                                  address bits. */
3258         uint64_t auto_dclkdis          : 1;  /**< [ 17: 17](R/W) When 1, LMC automatically shuts off its internal clock to conserve power when there is no
3259                                                                  traffic. Note that this has no effect on the DDR4 PHY and pads clocks. */
3260         uint64_t int_zqcs_dis          : 1;  /**< [ 18: 18](R/W) Disable (internal) auto-ZQCS calibration. When clear, LMC runs internal ZQ calibration
3261                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
3262         uint64_t ext_zqcs_dis          : 1;  /**< [ 19: 19](R/W) Disable (external) auto-ZQCS calibration. When clear, LMC runs external ZQ calibration
3263                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
3264         uint64_t bprch                 : 2;  /**< [ 21: 20](R/W) "Back porch enable. When set, the turn-on time for the default DDR#_DQ* /DDR#_DQS_*_P/N
3265                                                                  drivers is delayed an additional BPRCH CK cycles.
3266                                                                  0x0 = 0 CK cycles.
3267                                                                  0x1 = 1 CK cycles.
3268                                                                  0x2 = 2 CK cycles.
3269                                                                  0x3 = 3 CK cycles." */
3270         uint64_t wodt_bprch            : 1;  /**< [ 22: 22](R/W) When set, the turn-off time for the ODT pin during a write command is delayed an
3271                                                                  additional CK cycle. */
3272         uint64_t rodt_bprch            : 1;  /**< [ 23: 23](R/W) When set, the turn-off time for the ODT pin during a read command is delayed an additional
3273                                                                  CK cycle. */
3274         uint64_t crm_max               : 5;  /**< [ 28: 24](R/W) Reserved.
3275                                                                  Internal:
3276                                                                  Coarse rate matching max bucket size. The coarse rate matching logic is used to control
3277                                                                  the bandwidth allocated to DFA reads. [CRM_MAX] is subdivided into two regions with DFA
3278                                                                  reads being preferred over LMC reads/writes when [CRM_CNT] \< [CRM_THR]. [CRM_CNT]
3279                                                                  increments by
3280                                                                  one when a DFA read is slotted and by 2 when a LMC read/write is slotted, and rolls over
3281                                                                  when [CRM_MAX] is reached.
3282 
3283                                                                  0x0 = Reserved. */
3284         uint64_t crm_thr               : 5;  /**< [ 33: 29](R/W) Coarse rate matching threshold. */
3285         uint64_t crm_cnt               : 5;  /**< [ 38: 34](RO/H) Coarse count. */
3286         uint64_t thrmax                : 4;  /**< [ 42: 39](R/W) Reserved.
3287                                                                  Internal:
3288                                                                  Fine rate matching max bucket size. In conjunction with the coarse rate matching logic,
3289                                                                  the fine rate matching logic gives software the ability to prioritize DFA reads over L2C
3290                                                                  writes. Higher [PERSUB] values result in a lower DFA read bandwidth.
3291 
3292                                                                  0x0 = Reserved. */
3293         uint64_t persub                : 8;  /**< [ 50: 43](R/W) Reserved.
3294                                                                  Internal:
3295                                                                  Offset for DFA rate-matching. */
3296         uint64_t thrcnt                : 12; /**< [ 62: 51](RO/H) Fine count. */
3297         uint64_t reserved_63           : 1;
3298 #endif /* Word 0 - End */
3299     } cn9;
3300     struct bdk_lmcx_control_cn81xx
3301     {
3302 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
3303         uint64_t scramble_ena          : 1;  /**< [ 63: 63](R/W) When set, will enable the scramble/descramble logic. */
3304         uint64_t thrcnt                : 12; /**< [ 62: 51](RO/H) Fine count. */
3305         uint64_t persub                : 8;  /**< [ 50: 43](R/W) Reserved.
3306                                                                  Internal:
3307                                                                  Offset for DFA rate-matching. */
3308         uint64_t thrmax                : 4;  /**< [ 42: 39](R/W) Reserved.
3309                                                                  Internal:
3310                                                                  Fine rate matching max bucket size. In conjunction with the coarse rate matching logic,
3311                                                                  the fine rate matching logic gives software the ability to prioritize DFA reads over L2C
3312                                                                  writes. Higher [PERSUB] values result in a lower DFA read bandwidth.
3313 
3314                                                                  0x0 = Reserved. */
3315         uint64_t crm_cnt               : 5;  /**< [ 38: 34](RO/H) Coarse count. */
3316         uint64_t crm_thr               : 5;  /**< [ 33: 29](R/W) Coarse rate matching threshold. */
3317         uint64_t crm_max               : 5;  /**< [ 28: 24](R/W) Reserved.
3318                                                                  Internal:
3319                                                                  Coarse rate matching max bucket size. The coarse rate matching logic is used to control
3320                                                                  the bandwidth allocated to DFA reads. [CRM_MAX] is subdivided into two regions with DFA
3321                                                                  reads being preferred over LMC reads/writes when [CRM_CNT] \< [CRM_THR]. [CRM_CNT]
3322                                                                  increments by
3323                                                                  one when a DFA read is slotted and by 2 when a LMC read/write is slotted, and rolls over
3324                                                                  when [CRM_MAX] is reached.
3325 
3326                                                                  0x0 = Reserved. */
3327         uint64_t rodt_bprch            : 1;  /**< [ 23: 23](R/W) When set, the turn-off time for the ODT pin during a read command is delayed an additional
3328                                                                  CK cycle. */
3329         uint64_t wodt_bprch            : 1;  /**< [ 22: 22](R/W) When set, the turn-off time for the ODT pin during a write command is delayed an
3330                                                                  additional CK cycle. */
3331         uint64_t bprch                 : 2;  /**< [ 21: 20](R/W) "Back porch enable. When set, the turn-on time for the default DDR#_DQ* /DDR#_DQS_*_P/N
3332                                                                  drivers is delayed an additional BPRCH CK cycles.
3333                                                                  0x0 = 0 CK cycles.
3334                                                                  0x1 = 1 CK cycles.
3335                                                                  0x2 = 2 CK cycles.
3336                                                                  0x3 = 3 CK cycles." */
3337         uint64_t ext_zqcs_dis          : 1;  /**< [ 19: 19](R/W) Disable (external) auto-ZQCS calibration. When clear, LMC runs external ZQ calibration
3338                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
3339         uint64_t int_zqcs_dis          : 1;  /**< [ 18: 18](R/W) Disable (internal) auto-ZQCS calibration. When clear, LMC runs internal ZQ calibration
3340                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
3341         uint64_t auto_dclkdis          : 1;  /**< [ 17: 17](R/W) When 1, LMC automatically shuts off its internal clock to conserve power when there is no
3342                                                                  traffic. Note that this has no effect on the DDR3/DDR4 PHY and pads clocks. */
3343         uint64_t xor_bank              : 1;  /**< [ 16: 16](R/W) Enable signal to XOR the bank bits. See LMC()_EXT_CONFIG2 on how LMC selects the L2C-LMC
3344                                                                  address bits. */
3345         uint64_t max_write_batch       : 4;  /**< [ 15: 12](R/W) Maximum number of consecutive write operations to service before forcing read operations
3346                                                                  to interrupt. */
3347         uint64_t nxm_write_en          : 1;  /**< [ 11: 11](R/W) NXM write mode. When clear, LMC discards write operations to addresses that don't exist in
3348                                                                  the DRAM (as defined by LMC()_NXM configuration). When set, LMC completes write
3349                                                                  operations to addresses that don't exist in the DRAM at an aliased address. */
3350         uint64_t elev_prio_dis         : 1;  /**< [ 10: 10](R/W) Disable elevate priority logic. When set, write operations are sent in regardless of
3351                                                                  priority information from L2C. */
3352         uint64_t inorder_wr            : 1;  /**< [  9:  9](R/W) Send write operations in order (regardless of priority). */
3353         uint64_t inorder_rd            : 1;  /**< [  8:  8](R/W) Send read operations in order (regardless of priority). */
3354         uint64_t throttle_wr           : 1;  /**< [  7:  7](R/W) When set, use at most one IFB for write operations. */
3355         uint64_t throttle_rd           : 1;  /**< [  6:  6](R/W) When set, use at most one IFB for read operations. */
3356         uint64_t fprch2                : 2;  /**< [  5:  4](R/W) "Front porch enable. When set, the turn-off time for the default DDR#_DQ* /DDR#_DQS_*_P/N
3357                                                                  drivers is FPRCH2 CKs earlier.
3358                                                                  0x0 = 0 CK cycles.
3359                                                                  0x1 = 1 CK cycles.
3360                                                                  0x2 = 2 CK cycles.
3361                                                                  0x3 = Reserved." */
3362         uint64_t pocas                 : 1;  /**< [  3:  3](R/W) Reserved; must be zero.
3363                                                                  Internal:
3364                                                                  Enable the posted CAS feature of DDR3. This bit must be
3365                                                                  set whenever LMC()_MODEREG_PARAMS0[AL]!=0. */
3366         uint64_t ddr2t                 : 1;  /**< [  2:  2](R/W) Turn on the DDR 2T mode. 2 CK-cycle window for CMD and address. This mode helps relieve
3367                                                                  setup time pressure on the address and command bus which nominally have a very large
3368                                                                  fanout. Please refer to Micron's tech note tn_47_01 titled DDR2-533 Memory Design Guide
3369                                                                  for Two Dimm Unbuffered Systems for physical details. */
3370         uint64_t bwcnt                 : 1;  /**< [  1:  1](R/W) Bus utilization counter clear. Clears the LMC()_OPS_CNT, LMC()_IFB_CNT, and
3371                                                                  LMC()_DCLK_CNT registers. To clear the CSRs, software should first write this field to
3372                                                                  a one, then write this field to a zero. */
3373         uint64_t rdimm_ena             : 1;  /**< [  0:  0](R/W) Registered DIMM enable. When set allows the use of JEDEC Registered DIMMs which require
3374                                                                  address and control bits to be registered in the controller. */
3375 #else /* Word 0 - Little Endian */
3376         uint64_t rdimm_ena             : 1;  /**< [  0:  0](R/W) Registered DIMM enable. When set allows the use of JEDEC Registered DIMMs which require
3377                                                                  address and control bits to be registered in the controller. */
3378         uint64_t bwcnt                 : 1;  /**< [  1:  1](R/W) Bus utilization counter clear. Clears the LMC()_OPS_CNT, LMC()_IFB_CNT, and
3379                                                                  LMC()_DCLK_CNT registers. To clear the CSRs, software should first write this field to
3380                                                                  a one, then write this field to a zero. */
3381         uint64_t ddr2t                 : 1;  /**< [  2:  2](R/W) Turn on the DDR 2T mode. 2 CK-cycle window for CMD and address. This mode helps relieve
3382                                                                  setup time pressure on the address and command bus which nominally have a very large
3383                                                                  fanout. Please refer to Micron's tech note tn_47_01 titled DDR2-533 Memory Design Guide
3384                                                                  for Two Dimm Unbuffered Systems for physical details. */
3385         uint64_t pocas                 : 1;  /**< [  3:  3](R/W) Reserved; must be zero.
3386                                                                  Internal:
3387                                                                  Enable the posted CAS feature of DDR3. This bit must be
3388                                                                  set whenever LMC()_MODEREG_PARAMS0[AL]!=0. */
3389         uint64_t fprch2                : 2;  /**< [  5:  4](R/W) "Front porch enable. When set, the turn-off time for the default DDR#_DQ* /DDR#_DQS_*_P/N
3390                                                                  drivers is FPRCH2 CKs earlier.
3391                                                                  0x0 = 0 CK cycles.
3392                                                                  0x1 = 1 CK cycles.
3393                                                                  0x2 = 2 CK cycles.
3394                                                                  0x3 = Reserved." */
3395         uint64_t throttle_rd           : 1;  /**< [  6:  6](R/W) When set, use at most one IFB for read operations. */
3396         uint64_t throttle_wr           : 1;  /**< [  7:  7](R/W) When set, use at most one IFB for write operations. */
3397         uint64_t inorder_rd            : 1;  /**< [  8:  8](R/W) Send read operations in order (regardless of priority). */
3398         uint64_t inorder_wr            : 1;  /**< [  9:  9](R/W) Send write operations in order (regardless of priority). */
3399         uint64_t elev_prio_dis         : 1;  /**< [ 10: 10](R/W) Disable elevate priority logic. When set, write operations are sent in regardless of
3400                                                                  priority information from L2C. */
3401         uint64_t nxm_write_en          : 1;  /**< [ 11: 11](R/W) NXM write mode. When clear, LMC discards write operations to addresses that don't exist in
3402                                                                  the DRAM (as defined by LMC()_NXM configuration). When set, LMC completes write
3403                                                                  operations to addresses that don't exist in the DRAM at an aliased address. */
3404         uint64_t max_write_batch       : 4;  /**< [ 15: 12](R/W) Maximum number of consecutive write operations to service before forcing read operations
3405                                                                  to interrupt. */
3406         uint64_t xor_bank              : 1;  /**< [ 16: 16](R/W) Enable signal to XOR the bank bits. See LMC()_EXT_CONFIG2 on how LMC selects the L2C-LMC
3407                                                                  address bits. */
3408         uint64_t auto_dclkdis          : 1;  /**< [ 17: 17](R/W) When 1, LMC automatically shuts off its internal clock to conserve power when there is no
3409                                                                  traffic. Note that this has no effect on the DDR3/DDR4 PHY and pads clocks. */
3410         uint64_t int_zqcs_dis          : 1;  /**< [ 18: 18](R/W) Disable (internal) auto-ZQCS calibration. When clear, LMC runs internal ZQ calibration
3411                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
3412         uint64_t ext_zqcs_dis          : 1;  /**< [ 19: 19](R/W) Disable (external) auto-ZQCS calibration. When clear, LMC runs external ZQ calibration
3413                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
3414         uint64_t bprch                 : 2;  /**< [ 21: 20](R/W) "Back porch enable. When set, the turn-on time for the default DDR#_DQ* /DDR#_DQS_*_P/N
3415                                                                  drivers is delayed an additional BPRCH CK cycles.
3416                                                                  0x0 = 0 CK cycles.
3417                                                                  0x1 = 1 CK cycles.
3418                                                                  0x2 = 2 CK cycles.
3419                                                                  0x3 = 3 CK cycles." */
3420         uint64_t wodt_bprch            : 1;  /**< [ 22: 22](R/W) When set, the turn-off time for the ODT pin during a write command is delayed an
3421                                                                  additional CK cycle. */
3422         uint64_t rodt_bprch            : 1;  /**< [ 23: 23](R/W) When set, the turn-off time for the ODT pin during a read command is delayed an additional
3423                                                                  CK cycle. */
3424         uint64_t crm_max               : 5;  /**< [ 28: 24](R/W) Reserved.
3425                                                                  Internal:
3426                                                                  Coarse rate matching max bucket size. The coarse rate matching logic is used to control
3427                                                                  the bandwidth allocated to DFA reads. [CRM_MAX] is subdivided into two regions with DFA
3428                                                                  reads being preferred over LMC reads/writes when [CRM_CNT] \< [CRM_THR]. [CRM_CNT]
3429                                                                  increments by
3430                                                                  one when a DFA read is slotted and by 2 when a LMC read/write is slotted, and rolls over
3431                                                                  when [CRM_MAX] is reached.
3432 
3433                                                                  0x0 = Reserved. */
3434         uint64_t crm_thr               : 5;  /**< [ 33: 29](R/W) Coarse rate matching threshold. */
3435         uint64_t crm_cnt               : 5;  /**< [ 38: 34](RO/H) Coarse count. */
3436         uint64_t thrmax                : 4;  /**< [ 42: 39](R/W) Reserved.
3437                                                                  Internal:
3438                                                                  Fine rate matching max bucket size. In conjunction with the coarse rate matching logic,
3439                                                                  the fine rate matching logic gives software the ability to prioritize DFA reads over L2C
3440                                                                  writes. Higher [PERSUB] values result in a lower DFA read bandwidth.
3441 
3442                                                                  0x0 = Reserved. */
3443         uint64_t persub                : 8;  /**< [ 50: 43](R/W) Reserved.
3444                                                                  Internal:
3445                                                                  Offset for DFA rate-matching. */
3446         uint64_t thrcnt                : 12; /**< [ 62: 51](RO/H) Fine count. */
3447         uint64_t scramble_ena          : 1;  /**< [ 63: 63](R/W) When set, will enable the scramble/descramble logic. */
3448 #endif /* Word 0 - End */
3449     } cn81xx;
3450     /* struct bdk_lmcx_control_cn81xx cn83xx; */
3451     struct bdk_lmcx_control_cn88xxp2
3452     {
3453 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
3454         uint64_t scramble_ena          : 1;  /**< [ 63: 63](R/W) When set, will enable the scramble/descramble logic. */
3455         uint64_t thrcnt                : 12; /**< [ 62: 51](RO/H) Fine count. */
3456         uint64_t persub                : 8;  /**< [ 50: 43](R/W) Offset for DFA rate-matching. */
3457         uint64_t thrmax                : 4;  /**< [ 42: 39](R/W) Fine rate matching max bucket size. In conjunction with the coarse rate matching logic,
3458                                                                  the fine rate matching logic gives software the ability to prioritize DFA reads over L2C
3459                                                                  writes. Higher [PERSUB] values result in a lower DFA read bandwidth.
3460 
3461                                                                  0x0 = Reserved. */
3462         uint64_t crm_cnt               : 5;  /**< [ 38: 34](RO/H) Coarse count. */
3463         uint64_t crm_thr               : 5;  /**< [ 33: 29](R/W) Coarse rate matching threshold. */
3464         uint64_t crm_max               : 5;  /**< [ 28: 24](R/W) Coarse rate matching max bucket size. The coarse rate matching logic is used to control
3465                                                                  the bandwidth allocated to DFA reads. [CRM_MAX] is subdivided into two regions with DFA
3466                                                                  reads being preferred over LMC reads/writes when [CRM_CNT] \< [CRM_THR]. [CRM_CNT]
3467                                                                  increments by
3468                                                                  one when a DFA read is slotted and by 2 when a LMC read/write is slotted, and rolls over
3469                                                                  when [CRM_MAX] is reached.
3470 
3471                                                                  0x0 = Reserved. */
3472         uint64_t rodt_bprch            : 1;  /**< [ 23: 23](R/W) When set, the turn-off time for the ODT pin during a read command is delayed an additional
3473                                                                  CK cycle. */
3474         uint64_t wodt_bprch            : 1;  /**< [ 22: 22](R/W) When set, the turn-off time for the ODT pin during a write command is delayed an
3475                                                                  additional CK cycle. */
3476         uint64_t bprch                 : 2;  /**< [ 21: 20](R/W) "Back porch enable. When set, the turn-on time for the default DDR#_DQ* /DDR#_DQS_*_P/N
3477                                                                  drivers is delayed an additional BPRCH CK cycles.
3478                                                                  0x0 = 0 CK cycles.
3479                                                                  0x1 = 1 CK cycles.
3480                                                                  0x2 = 2 CK cycles.
3481                                                                  0x3 = 3 CK cycles." */
3482         uint64_t ext_zqcs_dis          : 1;  /**< [ 19: 19](R/W) Disable (external) auto-ZQCS calibration. When clear, LMC runs external ZQ calibration
3483                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
3484         uint64_t int_zqcs_dis          : 1;  /**< [ 18: 18](R/W) Disable (internal) auto-ZQCS calibration. When clear, LMC runs internal ZQ calibration
3485                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
3486         uint64_t auto_dclkdis          : 1;  /**< [ 17: 17](R/W) When 1, LMC automatically shuts off its internal clock to conserve power when there is no
3487                                                                  traffic. Note that this has no effect on the DDR3/DDR4 PHY and pads clocks. */
3488         uint64_t xor_bank              : 1;  /**< [ 16: 16](R/W) Enable signal to XOR the bank bits. See LMC()_EXT_CONFIG2 on how LMC selects the L2C-LMC
3489                                                                  address bits. */
3490         uint64_t max_write_batch       : 4;  /**< [ 15: 12](R/W) Maximum number of consecutive write operations to service before forcing read operations
3491                                                                  to interrupt. */
3492         uint64_t nxm_write_en          : 1;  /**< [ 11: 11](R/W) NXM write mode. When clear, LMC discards write operations to addresses that don't exist in
3493                                                                  the DRAM (as defined by LMC()_NXM configuration). When set, LMC completes write
3494                                                                  operations to addresses that don't exist in the DRAM at an aliased address. */
3495         uint64_t elev_prio_dis         : 1;  /**< [ 10: 10](R/W) Disable elevate priority logic. When set, write operations are sent in regardless of
3496                                                                  priority information from L2C. */
3497         uint64_t inorder_wr            : 1;  /**< [  9:  9](R/W) Send write operations in order (regardless of priority). */
3498         uint64_t inorder_rd            : 1;  /**< [  8:  8](R/W) Send read operations in order (regardless of priority). */
3499         uint64_t throttle_wr           : 1;  /**< [  7:  7](R/W) When set, use at most one IFB for write operations. */
3500         uint64_t throttle_rd           : 1;  /**< [  6:  6](R/W) When set, use at most one IFB for read operations. */
3501         uint64_t fprch2                : 2;  /**< [  5:  4](R/W) "Front porch enable. When set, the turn-off time for the default DDR#_DQ* /DDR#_DQS_*_P/N
3502                                                                  drivers is FPRCH2 CKs earlier.
3503                                                                  0x0 = 0 CK cycles.
3504                                                                  0x1 = 1 CK cycles.
3505                                                                  0x2 = 2 CK cycles.
3506                                                                  0x3 = Reserved." */
3507         uint64_t pocas                 : 1;  /**< [  3:  3](R/W) Reserved; must be zero.
3508                                                                  Internal:
3509                                                                  Enable the posted CAS feature of DDR3. This bit must be
3510                                                                  set whenever LMC()_MODEREG_PARAMS0[AL]!=0. */
3511         uint64_t ddr2t                 : 1;  /**< [  2:  2](R/W) Turn on the DDR 2T mode. 2 CK-cycle window for CMD and address. This mode helps relieve
3512                                                                  setup time pressure on the address and command bus which nominally have a very large
3513                                                                  fanout. Please refer to Micron's tech note tn_47_01 titled DDR2-533 Memory Design Guide
3514                                                                  for Two Dimm Unbuffered Systems for physical details. */
3515         uint64_t bwcnt                 : 1;  /**< [  1:  1](R/W) Bus utilization counter clear. Clears the LMC()_OPS_CNT, LMC()_IFB_CNT, and
3516                                                                  LMC()_DCLK_CNT registers. To clear the CSRs, software should first write this field to
3517                                                                  a one, then write this field to a zero. */
3518         uint64_t rdimm_ena             : 1;  /**< [  0:  0](R/W) Registered DIMM enable. When set allows the use of JEDEC Registered DIMMs which require
3519                                                                  address and control bits to be registered in the controller. */
3520 #else /* Word 0 - Little Endian */
3521         uint64_t rdimm_ena             : 1;  /**< [  0:  0](R/W) Registered DIMM enable. When set allows the use of JEDEC Registered DIMMs which require
3522                                                                  address and control bits to be registered in the controller. */
3523         uint64_t bwcnt                 : 1;  /**< [  1:  1](R/W) Bus utilization counter clear. Clears the LMC()_OPS_CNT, LMC()_IFB_CNT, and
3524                                                                  LMC()_DCLK_CNT registers. To clear the CSRs, software should first write this field to
3525                                                                  a one, then write this field to a zero. */
3526         uint64_t ddr2t                 : 1;  /**< [  2:  2](R/W) Turn on the DDR 2T mode. 2 CK-cycle window for CMD and address. This mode helps relieve
3527                                                                  setup time pressure on the address and command bus which nominally have a very large
3528                                                                  fanout. Please refer to Micron's tech note tn_47_01 titled DDR2-533 Memory Design Guide
3529                                                                  for Two Dimm Unbuffered Systems for physical details. */
3530         uint64_t pocas                 : 1;  /**< [  3:  3](R/W) Reserved; must be zero.
3531                                                                  Internal:
3532                                                                  Enable the posted CAS feature of DDR3. This bit must be
3533                                                                  set whenever LMC()_MODEREG_PARAMS0[AL]!=0. */
3534         uint64_t fprch2                : 2;  /**< [  5:  4](R/W) "Front porch enable. When set, the turn-off time for the default DDR#_DQ* /DDR#_DQS_*_P/N
3535                                                                  drivers is FPRCH2 CKs earlier.
3536                                                                  0x0 = 0 CK cycles.
3537                                                                  0x1 = 1 CK cycles.
3538                                                                  0x2 = 2 CK cycles.
3539                                                                  0x3 = Reserved." */
3540         uint64_t throttle_rd           : 1;  /**< [  6:  6](R/W) When set, use at most one IFB for read operations. */
3541         uint64_t throttle_wr           : 1;  /**< [  7:  7](R/W) When set, use at most one IFB for write operations. */
3542         uint64_t inorder_rd            : 1;  /**< [  8:  8](R/W) Send read operations in order (regardless of priority). */
3543         uint64_t inorder_wr            : 1;  /**< [  9:  9](R/W) Send write operations in order (regardless of priority). */
3544         uint64_t elev_prio_dis         : 1;  /**< [ 10: 10](R/W) Disable elevate priority logic. When set, write operations are sent in regardless of
3545                                                                  priority information from L2C. */
3546         uint64_t nxm_write_en          : 1;  /**< [ 11: 11](R/W) NXM write mode. When clear, LMC discards write operations to addresses that don't exist in
3547                                                                  the DRAM (as defined by LMC()_NXM configuration). When set, LMC completes write
3548                                                                  operations to addresses that don't exist in the DRAM at an aliased address. */
3549         uint64_t max_write_batch       : 4;  /**< [ 15: 12](R/W) Maximum number of consecutive write operations to service before forcing read operations
3550                                                                  to interrupt. */
3551         uint64_t xor_bank              : 1;  /**< [ 16: 16](R/W) Enable signal to XOR the bank bits. See LMC()_EXT_CONFIG2 on how LMC selects the L2C-LMC
3552                                                                  address bits. */
3553         uint64_t auto_dclkdis          : 1;  /**< [ 17: 17](R/W) When 1, LMC automatically shuts off its internal clock to conserve power when there is no
3554                                                                  traffic. Note that this has no effect on the DDR3/DDR4 PHY and pads clocks. */
3555         uint64_t int_zqcs_dis          : 1;  /**< [ 18: 18](R/W) Disable (internal) auto-ZQCS calibration. When clear, LMC runs internal ZQ calibration
3556                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
3557         uint64_t ext_zqcs_dis          : 1;  /**< [ 19: 19](R/W) Disable (external) auto-ZQCS calibration. When clear, LMC runs external ZQ calibration
3558                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
3559         uint64_t bprch                 : 2;  /**< [ 21: 20](R/W) "Back porch enable. When set, the turn-on time for the default DDR#_DQ* /DDR#_DQS_*_P/N
3560                                                                  drivers is delayed an additional BPRCH CK cycles.
3561                                                                  0x0 = 0 CK cycles.
3562                                                                  0x1 = 1 CK cycles.
3563                                                                  0x2 = 2 CK cycles.
3564                                                                  0x3 = 3 CK cycles." */
3565         uint64_t wodt_bprch            : 1;  /**< [ 22: 22](R/W) When set, the turn-off time for the ODT pin during a write command is delayed an
3566                                                                  additional CK cycle. */
3567         uint64_t rodt_bprch            : 1;  /**< [ 23: 23](R/W) When set, the turn-off time for the ODT pin during a read command is delayed an additional
3568                                                                  CK cycle. */
3569         uint64_t crm_max               : 5;  /**< [ 28: 24](R/W) Coarse rate matching max bucket size. The coarse rate matching logic is used to control
3570                                                                  the bandwidth allocated to DFA reads. [CRM_MAX] is subdivided into two regions with DFA
3571                                                                  reads being preferred over LMC reads/writes when [CRM_CNT] \< [CRM_THR]. [CRM_CNT]
3572                                                                  increments by
3573                                                                  one when a DFA read is slotted and by 2 when a LMC read/write is slotted, and rolls over
3574                                                                  when [CRM_MAX] is reached.
3575 
3576                                                                  0x0 = Reserved. */
3577         uint64_t crm_thr               : 5;  /**< [ 33: 29](R/W) Coarse rate matching threshold. */
3578         uint64_t crm_cnt               : 5;  /**< [ 38: 34](RO/H) Coarse count. */
3579         uint64_t thrmax                : 4;  /**< [ 42: 39](R/W) Fine rate matching max bucket size. In conjunction with the coarse rate matching logic,
3580                                                                  the fine rate matching logic gives software the ability to prioritize DFA reads over L2C
3581                                                                  writes. Higher [PERSUB] values result in a lower DFA read bandwidth.
3582 
3583                                                                  0x0 = Reserved. */
3584         uint64_t persub                : 8;  /**< [ 50: 43](R/W) Offset for DFA rate-matching. */
3585         uint64_t thrcnt                : 12; /**< [ 62: 51](RO/H) Fine count. */
3586         uint64_t scramble_ena          : 1;  /**< [ 63: 63](R/W) When set, will enable the scramble/descramble logic. */
3587 #endif /* Word 0 - End */
3588     } cn88xxp2;
3589 };
3590 typedef union bdk_lmcx_control bdk_lmcx_control_t;
3591 
3592 static inline uint64_t BDK_LMCX_CONTROL(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_CONTROL(unsigned long a)3593 static inline uint64_t BDK_LMCX_CONTROL(unsigned long a)
3594 {
3595     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
3596         return 0x87e088000190ll + 0x1000000ll * ((a) & 0x0);
3597     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
3598         return 0x87e088000190ll + 0x1000000ll * ((a) & 0x1);
3599     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
3600         return 0x87e088000190ll + 0x1000000ll * ((a) & 0x3);
3601     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
3602         return 0x87e088000190ll + 0x1000000ll * ((a) & 0x3);
3603     __bdk_csr_fatal("LMCX_CONTROL", 1, a, 0, 0, 0);
3604 }
3605 
3606 #define typedef_BDK_LMCX_CONTROL(a) bdk_lmcx_control_t
3607 #define bustype_BDK_LMCX_CONTROL(a) BDK_CSR_TYPE_RSL
3608 #define basename_BDK_LMCX_CONTROL(a) "LMCX_CONTROL"
3609 #define device_bar_BDK_LMCX_CONTROL(a) 0x0 /* PF_BAR0 */
3610 #define busnum_BDK_LMCX_CONTROL(a) (a)
3611 #define arguments_BDK_LMCX_CONTROL(a) (a),-1,-1,-1
3612 
3613 /**
3614  * Register (RSL) lmc#_ctl
3615  *
3616  * LMC Control Register
3617  */
3618 union bdk_lmcx_ctl
3619 {
3620     uint64_t u;
3621     struct bdk_lmcx_ctl_s
3622     {
3623 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
3624         uint64_t reserved_14_63        : 50;
3625         uint64_t wr_cmd_delay_sel      : 2;  /**< [ 13: 12](R/W) Selects the write command delays (in core clk cycles) for incoming write transaction.
3626                                                                  Depending on the dram clock speed, programming this field can be required to ensure proper
3627                                                                  loading of the write data into LMC's buffer. It is recommended to set this field as
3628                                                                  follows:
3629                                                                  _ DDR4-1600 - DDR4-2133 = set to 0 (no delay).
3630                                                                  _ DDR4-2400 = set to 1 (delay by 1 cycle).
3631                                                                  _ DDR4-2666 = set to 2 (delay by 2 cycles).
3632                                                                  _ DDR4-3200 = set to 3 (delay by 3 cycles).
3633 
3634                                                                  Internal:
3635                                                                  CYA bits to cover the case when rclk is at its slowest speed (300MHz), while dclk is
3636                                                                  greater than 1.2GHz. In general, the condition ((24+CWL) * dclk_period (ns) \> 33.33ns)
3637                                                                  must be met. */
3638         uint64_t reserved_9_11         : 3;
3639         uint64_t predictive_start      : 1;  /**< [  8:  8](WO) A 0-\>1 transition initiates the predictive fill logic on the LMC response data.
3640                                                                  For optimal performance, set this field to one along with the correct value of [RDF_CNT]
3641                                                                  after core clock stabilizes to a new frequency.
3642                                                                  This field is a one-shot and clears itself each time it is set. */
3643         uint64_t rdf_cnt               : 8;  /**< [  7:  0](R/W) Defines the sample point of the LMC response data in the DDR-clock/core-clock crossing.
3644                                                                  For optimal performance set to
3645                                                                  RNDUP[((10 * DDR-clock period)/core-clock period) - 1].
3646                                                                  Set to zero to disable predictive mode. */
3647 #else /* Word 0 - Little Endian */
3648         uint64_t rdf_cnt               : 8;  /**< [  7:  0](R/W) Defines the sample point of the LMC response data in the DDR-clock/core-clock crossing.
3649                                                                  For optimal performance set to
3650                                                                  RNDUP[((10 * DDR-clock period)/core-clock period) - 1].
3651                                                                  Set to zero to disable predictive mode. */
3652         uint64_t predictive_start      : 1;  /**< [  8:  8](WO) A 0-\>1 transition initiates the predictive fill logic on the LMC response data.
3653                                                                  For optimal performance, set this field to one along with the correct value of [RDF_CNT]
3654                                                                  after core clock stabilizes to a new frequency.
3655                                                                  This field is a one-shot and clears itself each time it is set. */
3656         uint64_t reserved_9_11         : 3;
3657         uint64_t wr_cmd_delay_sel      : 2;  /**< [ 13: 12](R/W) Selects the write command delays (in core clk cycles) for incoming write transaction.
3658                                                                  Depending on the dram clock speed, programming this field can be required to ensure proper
3659                                                                  loading of the write data into LMC's buffer. It is recommended to set this field as
3660                                                                  follows:
3661                                                                  _ DDR4-1600 - DDR4-2133 = set to 0 (no delay).
3662                                                                  _ DDR4-2400 = set to 1 (delay by 1 cycle).
3663                                                                  _ DDR4-2666 = set to 2 (delay by 2 cycles).
3664                                                                  _ DDR4-3200 = set to 3 (delay by 3 cycles).
3665 
3666                                                                  Internal:
3667                                                                  CYA bits to cover the case when rclk is at its slowest speed (300MHz), while dclk is
3668                                                                  greater than 1.2GHz. In general, the condition ((24+CWL) * dclk_period (ns) \> 33.33ns)
3669                                                                  must be met. */
3670         uint64_t reserved_14_63        : 50;
3671 #endif /* Word 0 - End */
3672     } s;
3673     /* struct bdk_lmcx_ctl_s cn; */
3674 };
3675 typedef union bdk_lmcx_ctl bdk_lmcx_ctl_t;
3676 
3677 static inline uint64_t BDK_LMCX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_CTL(unsigned long a)3678 static inline uint64_t BDK_LMCX_CTL(unsigned long a)
3679 {
3680     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
3681         return 0x87e0880001c0ll + 0x1000000ll * ((a) & 0x3);
3682     __bdk_csr_fatal("LMCX_CTL", 1, a, 0, 0, 0);
3683 }
3684 
3685 #define typedef_BDK_LMCX_CTL(a) bdk_lmcx_ctl_t
3686 #define bustype_BDK_LMCX_CTL(a) BDK_CSR_TYPE_RSL
3687 #define basename_BDK_LMCX_CTL(a) "LMCX_CTL"
3688 #define device_bar_BDK_LMCX_CTL(a) 0x0 /* PF_BAR0 */
3689 #define busnum_BDK_LMCX_CTL(a) (a)
3690 #define arguments_BDK_LMCX_CTL(a) (a),-1,-1,-1
3691 
3692 /**
3693  * Register (RSL) lmc#_dbtrain_ctl
3694  *
3695  * LMC Data Buffer Training Control Register
3696  * Reserved.
3697  * Internal:
3698  * This register contains control bits that are used during the Data Buffer
3699  * training sequence in DDR4 LRDIMM mode. When one of the data buffer training
3700  * sequence is initiated, it uses the contents of this register to control
3701  * its operation.
3702  */
3703 union bdk_lmcx_dbtrain_ctl
3704 {
3705     uint64_t u;
3706     struct bdk_lmcx_dbtrain_ctl_s
3707     {
3708 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
3709         uint64_t reserved_63           : 1;
3710         uint64_t lfsr_pattern_sel      : 1;  /**< [ 62: 62](RO) Reserved. */
3711         uint64_t cmd_count_ext         : 2;  /**< [ 61: 60](RO) Reserved. */
3712         uint64_t db_output_impedance   : 3;  /**< [ 59: 57](R/W) Reserved.
3713                                                                  Internal:
3714                                                                  Host interface DQ/DQS output driver impedance control.
3715                                                                  This is the default value used during host interface write leveling in LRDIMM
3716                                                                  environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
3717                                                                  0x0 = RZQ/6 (40 ohm).
3718                                                                  0x1 = RZQ/7 (34 ohm).
3719                                                                  0x2 = RZQ/5 (48 ohm).
3720                                                                  0x3-0x7 = Reserved. */
3721         uint64_t db_sel                : 1;  /**< [ 56: 56](R/W) Reserved.
3722                                                                  Internal:
3723                                                                  Used when running host interface write leveling.
3724                                                                  0 = selects DIMM0's data buffer.
3725                                                                  1 = selects DIMM1's data buffer. */
3726         uint64_t tccd_sel              : 1;  /**< [ 55: 55](R/W) When set, the sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to space out
3727                                                                  back-to-back read commands. Otherwise it will space out back-to-back
3728                                                                  reads with a default value of 4 cycles.
3729 
3730                                                                  While in DRAM MPR mode, reads from page 0 may use tCCD_S or tCCD_L.
3731                                                                  Reads from pages 1, 2 or 3 however must use tCCD_L, thereby requiring
3732                                                                  this bit to be set. */
3733         uint64_t rw_train              : 1;  /**< [ 54: 54](R/W) When set, the DBTRAIN sequence will perform a write to the DRAM
3734                                                                  memory array using burst pattern that are set in
3735                                                                  LMC()_GENERAL_PURPOSE0[DATA]\<61:0\>, LMC()_GENERAL_PURPOSE1[DATA]\<61:0\> and
3736                                                                  LMC()_GENERAL_PURPOSE2[DATA]\<15:0\>.
3737 
3738                                                                  This burst pattern gets shifted by one byte at every cycle.
3739                                                                  The sequence will then do the reads to the same location and compare
3740                                                                  the data coming back with this pattern.
3741                                                                  The bit-wise comparison result gets stored in
3742                                                                  LMC()_MPR_DATA0[MPR_DATA]\<63:0\> and LMC()_MPR_DATA1[MPR_DATA]\<7:0\>. */
3743         uint64_t read_dq_count         : 7;  /**< [ 53: 47](R/W) Reserved.
3744                                                                  Internal:
3745                                                                  The amount of cycles until a pulse is issued to sample the DQ into the
3746                                                                  MPR register. This bits control the timing of when to sample the data
3747                                                                  buffer training result. */
3748         uint64_t read_cmd_count        : 5;  /**< [ 46: 42](R/W) The amount of read and write commands to be sent during R/W training.
3749                                                                  Internal:
3750                                                                  This can be set to zero in which case the sequence does not send any
3751                                                                  Read commands to accommodate for the DWL training mode. */
3752         uint64_t write_ena             : 1;  /**< [ 41: 41](R/W) Reserved.
3753                                                                  Internal:
3754                                                                  Enables the write operation. This is mainly used to accomplish the MWD
3755                                                                  training sequence of the data buffer.
3756                                                                  LMC()_DBTRAIN_CTL[ACTIVATE] must be set to one for this to take effect. */
3757         uint64_t activate              : 1;  /**< [ 40: 40](R/W) Reserved.
3758                                                                  Internal:
3759                                                                  Enables the activate command during the data buffer training sequence. */
3760         uint64_t prank                 : 2;  /**< [ 39: 38](R/W) Physical rank bits for read/write/activate operation. */
3761         uint64_t lrank                 : 3;  /**< [ 37: 35](R/W) Reserved.
3762                                                                  Internal:
3763                                                                  Logical rank bits for read/write/activate operation during the data buffer
3764                                                                  training. */
3765         uint64_t row_a                 : 18; /**< [ 34: 17](R/W) The row address for the activate command. */
3766         uint64_t bg                    : 2;  /**< [ 16: 15](R/W) The bank group that the R/W commands are directed to. */
3767         uint64_t ba                    : 2;  /**< [ 14: 13](R/W) The bank address for the R/W commands are directed to. */
3768         uint64_t column_a              : 13; /**< [ 12:  0](R/W) Column address for the read/write operation. */
3769 #else /* Word 0 - Little Endian */
3770         uint64_t column_a              : 13; /**< [ 12:  0](R/W) Column address for the read/write operation. */
3771         uint64_t ba                    : 2;  /**< [ 14: 13](R/W) The bank address for the R/W commands are directed to. */
3772         uint64_t bg                    : 2;  /**< [ 16: 15](R/W) The bank group that the R/W commands are directed to. */
3773         uint64_t row_a                 : 18; /**< [ 34: 17](R/W) The row address for the activate command. */
3774         uint64_t lrank                 : 3;  /**< [ 37: 35](R/W) Reserved.
3775                                                                  Internal:
3776                                                                  Logical rank bits for read/write/activate operation during the data buffer
3777                                                                  training. */
3778         uint64_t prank                 : 2;  /**< [ 39: 38](R/W) Physical rank bits for read/write/activate operation. */
3779         uint64_t activate              : 1;  /**< [ 40: 40](R/W) Reserved.
3780                                                                  Internal:
3781                                                                  Enables the activate command during the data buffer training sequence. */
3782         uint64_t write_ena             : 1;  /**< [ 41: 41](R/W) Reserved.
3783                                                                  Internal:
3784                                                                  Enables the write operation. This is mainly used to accomplish the MWD
3785                                                                  training sequence of the data buffer.
3786                                                                  LMC()_DBTRAIN_CTL[ACTIVATE] must be set to one for this to take effect. */
3787         uint64_t read_cmd_count        : 5;  /**< [ 46: 42](R/W) The amount of read and write commands to be sent during R/W training.
3788                                                                  Internal:
3789                                                                  This can be set to zero in which case the sequence does not send any
3790                                                                  Read commands to accommodate for the DWL training mode. */
3791         uint64_t read_dq_count         : 7;  /**< [ 53: 47](R/W) Reserved.
3792                                                                  Internal:
3793                                                                  The amount of cycles until a pulse is issued to sample the DQ into the
3794                                                                  MPR register. This bits control the timing of when to sample the data
3795                                                                  buffer training result. */
3796         uint64_t rw_train              : 1;  /**< [ 54: 54](R/W) When set, the DBTRAIN sequence will perform a write to the DRAM
3797                                                                  memory array using burst pattern that are set in
3798                                                                  LMC()_GENERAL_PURPOSE0[DATA]\<61:0\>, LMC()_GENERAL_PURPOSE1[DATA]\<61:0\> and
3799                                                                  LMC()_GENERAL_PURPOSE2[DATA]\<15:0\>.
3800 
3801                                                                  This burst pattern gets shifted by one byte at every cycle.
3802                                                                  The sequence will then do the reads to the same location and compare
3803                                                                  the data coming back with this pattern.
3804                                                                  The bit-wise comparison result gets stored in
3805                                                                  LMC()_MPR_DATA0[MPR_DATA]\<63:0\> and LMC()_MPR_DATA1[MPR_DATA]\<7:0\>. */
3806         uint64_t tccd_sel              : 1;  /**< [ 55: 55](R/W) When set, the sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to space out
3807                                                                  back-to-back read commands. Otherwise it will space out back-to-back
3808                                                                  reads with a default value of 4 cycles.
3809 
3810                                                                  While in DRAM MPR mode, reads from page 0 may use tCCD_S or tCCD_L.
3811                                                                  Reads from pages 1, 2 or 3 however must use tCCD_L, thereby requiring
3812                                                                  this bit to be set. */
3813         uint64_t db_sel                : 1;  /**< [ 56: 56](R/W) Reserved.
3814                                                                  Internal:
3815                                                                  Used when running host interface write leveling.
3816                                                                  0 = selects DIMM0's data buffer.
3817                                                                  1 = selects DIMM1's data buffer. */
3818         uint64_t db_output_impedance   : 3;  /**< [ 59: 57](R/W) Reserved.
3819                                                                  Internal:
3820                                                                  Host interface DQ/DQS output driver impedance control.
3821                                                                  This is the default value used during host interface write leveling in LRDIMM
3822                                                                  environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
3823                                                                  0x0 = RZQ/6 (40 ohm).
3824                                                                  0x1 = RZQ/7 (34 ohm).
3825                                                                  0x2 = RZQ/5 (48 ohm).
3826                                                                  0x3-0x7 = Reserved. */
3827         uint64_t cmd_count_ext         : 2;  /**< [ 61: 60](RO) Reserved. */
3828         uint64_t lfsr_pattern_sel      : 1;  /**< [ 62: 62](RO) Reserved. */
3829         uint64_t reserved_63           : 1;
3830 #endif /* Word 0 - End */
3831     } s;
3832     /* struct bdk_lmcx_dbtrain_ctl_s cn88xxp1; */
3833     struct bdk_lmcx_dbtrain_ctl_cn9
3834     {
3835 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
3836         uint64_t reserved_63           : 1;
3837         uint64_t lfsr_pattern_sel      : 1;  /**< [ 62: 62](R/W) If set high, the sequence uses 32-bit LFSR pattern when generating data sequence
3838                                                                  during the General R/W training (LMC()_DBTRAIN_CTL[RW_TRAIN] == 1).
3839 
3840                                                                  The LFSR polynomials are programmed by LMC()_CHAR_CTL[PRBS]. */
3841         uint64_t cmd_count_ext         : 2;  /**< [ 61: 60](R/W) Extension bits to the field LMC()_DBTRAIN_CTL[READ_CMD_COUNT]. This enables
3842                                                                  up to 128 read and write commands. */
3843         uint64_t db_output_impedance   : 3;  /**< [ 59: 57](R/W) Reserved.
3844                                                                  Internal:
3845                                                                  Host interface DQ/DQS output driver impedance control.
3846                                                                  This is the default value used during host interface write leveling in LRDIMM
3847                                                                  environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
3848                                                                  0x0 = RZQ/6 (40 ohm).
3849                                                                  0x1 = RZQ/7 (34 ohm).
3850                                                                  0x2 = RZQ/5 (48 ohm).
3851                                                                  0x3-0x7 = Reserved. */
3852         uint64_t db_sel                : 1;  /**< [ 56: 56](R/W) Reserved.
3853                                                                  Internal:
3854                                                                  Used when running host interface write leveling.
3855                                                                  0 = selects DIMM0's data buffer.
3856                                                                  1 = selects DIMM1's data buffer. */
3857         uint64_t tccd_sel              : 1;  /**< [ 55: 55](R/W) When set, the sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to space out
3858                                                                  back-to-back read commands. Otherwise it will space out back-to-back
3859                                                                  reads with a default value of 4 cycles.
3860 
3861                                                                  While in DRAM MPR mode, reads from page 0 may use tCCD_S or tCCD_L.
3862                                                                  Reads from pages 1, 2 or 3 however must use tCCD_L, thereby requiring
3863                                                                  this bit to be set. */
3864         uint64_t rw_train              : 1;  /**< [ 54: 54](R/W) When set, the DBTRAIN sequence will perform a write to the DRAM
3865                                                                  memory array using burst pattern that are set in
3866                                                                  LMC()_GENERAL_PURPOSE0[DATA]\<61:0\>, LMC()_GENERAL_PURPOSE1[DATA]\<61:0\> and
3867                                                                  LMC()_GENERAL_PURPOSE2[DATA]\<15:0\>.
3868 
3869                                                                  This burst pattern gets shifted by one byte at every cycle.
3870                                                                  The sequence will then do the reads to the same location and compare
3871                                                                  the data coming back with this pattern.
3872                                                                  The bit-wise comparison result gets stored in
3873                                                                  LMC()_MPR_DATA0[MPR_DATA]\<63:0\> and LMC()_MPR_DATA1[MPR_DATA]\<7:0\>. */
3874         uint64_t read_dq_count         : 7;  /**< [ 53: 47](R/W) Reserved.
3875                                                                  Internal:
3876                                                                  The amount of cycles until a pulse is issued to sample the DQ into the
3877                                                                  MPR register. This bits control the timing of when to sample the data
3878                                                                  buffer training result. */
3879         uint64_t read_cmd_count        : 5;  /**< [ 46: 42](R/W) The amount of read and write commands to be sent during R/W training.
3880                                                                  Internal:
3881                                                                  This can be set to zero in which case the sequence does not send any
3882                                                                  Read commands to accommodate for the DWL training mode. */
3883         uint64_t write_ena             : 1;  /**< [ 41: 41](R/W) Reserved.
3884                                                                  Internal:
3885                                                                  Enables the write operation. This is mainly used to accomplish the MWD
3886                                                                  training sequence of the data buffer.
3887                                                                  LMC()_DBTRAIN_CTL[ACTIVATE] must be set to one for this to take effect. */
3888         uint64_t activate              : 1;  /**< [ 40: 40](R/W) Reserved.
3889                                                                  Internal:
3890                                                                  Enables the activate command during the data buffer training sequence. */
3891         uint64_t prank                 : 2;  /**< [ 39: 38](R/W) Physical rank bits for read/write/activate operation. */
3892         uint64_t lrank                 : 3;  /**< [ 37: 35](R/W) Reserved.
3893                                                                  Internal:
3894                                                                  Logical rank bits for read/write/activate operation during the data buffer
3895                                                                  training. */
3896         uint64_t row_a                 : 18; /**< [ 34: 17](R/W) The row address for the activate command. */
3897         uint64_t bg                    : 2;  /**< [ 16: 15](R/W) The bank group that the R/W commands are directed to. */
3898         uint64_t ba                    : 2;  /**< [ 14: 13](R/W) The bank address for the R/W commands are directed to. */
3899         uint64_t column_a              : 13; /**< [ 12:  0](R/W) Column address for the read/write operation. */
3900 #else /* Word 0 - Little Endian */
3901         uint64_t column_a              : 13; /**< [ 12:  0](R/W) Column address for the read/write operation. */
3902         uint64_t ba                    : 2;  /**< [ 14: 13](R/W) The bank address for the R/W commands are directed to. */
3903         uint64_t bg                    : 2;  /**< [ 16: 15](R/W) The bank group that the R/W commands are directed to. */
3904         uint64_t row_a                 : 18; /**< [ 34: 17](R/W) The row address for the activate command. */
3905         uint64_t lrank                 : 3;  /**< [ 37: 35](R/W) Reserved.
3906                                                                  Internal:
3907                                                                  Logical rank bits for read/write/activate operation during the data buffer
3908                                                                  training. */
3909         uint64_t prank                 : 2;  /**< [ 39: 38](R/W) Physical rank bits for read/write/activate operation. */
3910         uint64_t activate              : 1;  /**< [ 40: 40](R/W) Reserved.
3911                                                                  Internal:
3912                                                                  Enables the activate command during the data buffer training sequence. */
3913         uint64_t write_ena             : 1;  /**< [ 41: 41](R/W) Reserved.
3914                                                                  Internal:
3915                                                                  Enables the write operation. This is mainly used to accomplish the MWD
3916                                                                  training sequence of the data buffer.
3917                                                                  LMC()_DBTRAIN_CTL[ACTIVATE] must be set to one for this to take effect. */
3918         uint64_t read_cmd_count        : 5;  /**< [ 46: 42](R/W) The amount of read and write commands to be sent during R/W training.
3919                                                                  Internal:
3920                                                                  This can be set to zero in which case the sequence does not send any
3921                                                                  Read commands to accommodate for the DWL training mode. */
3922         uint64_t read_dq_count         : 7;  /**< [ 53: 47](R/W) Reserved.
3923                                                                  Internal:
3924                                                                  The amount of cycles until a pulse is issued to sample the DQ into the
3925                                                                  MPR register. This bits control the timing of when to sample the data
3926                                                                  buffer training result. */
3927         uint64_t rw_train              : 1;  /**< [ 54: 54](R/W) When set, the DBTRAIN sequence will perform a write to the DRAM
3928                                                                  memory array using burst pattern that are set in
3929                                                                  LMC()_GENERAL_PURPOSE0[DATA]\<61:0\>, LMC()_GENERAL_PURPOSE1[DATA]\<61:0\> and
3930                                                                  LMC()_GENERAL_PURPOSE2[DATA]\<15:0\>.
3931 
3932                                                                  This burst pattern gets shifted by one byte at every cycle.
3933                                                                  The sequence will then do the reads to the same location and compare
3934                                                                  the data coming back with this pattern.
3935                                                                  The bit-wise comparison result gets stored in
3936                                                                  LMC()_MPR_DATA0[MPR_DATA]\<63:0\> and LMC()_MPR_DATA1[MPR_DATA]\<7:0\>. */
3937         uint64_t tccd_sel              : 1;  /**< [ 55: 55](R/W) When set, the sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to space out
3938                                                                  back-to-back read commands. Otherwise it will space out back-to-back
3939                                                                  reads with a default value of 4 cycles.
3940 
3941                                                                  While in DRAM MPR mode, reads from page 0 may use tCCD_S or tCCD_L.
3942                                                                  Reads from pages 1, 2 or 3 however must use tCCD_L, thereby requiring
3943                                                                  this bit to be set. */
3944         uint64_t db_sel                : 1;  /**< [ 56: 56](R/W) Reserved.
3945                                                                  Internal:
3946                                                                  Used when running host interface write leveling.
3947                                                                  0 = selects DIMM0's data buffer.
3948                                                                  1 = selects DIMM1's data buffer. */
3949         uint64_t db_output_impedance   : 3;  /**< [ 59: 57](R/W) Reserved.
3950                                                                  Internal:
3951                                                                  Host interface DQ/DQS output driver impedance control.
3952                                                                  This is the default value used during host interface write leveling in LRDIMM
3953                                                                  environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
3954                                                                  0x0 = RZQ/6 (40 ohm).
3955                                                                  0x1 = RZQ/7 (34 ohm).
3956                                                                  0x2 = RZQ/5 (48 ohm).
3957                                                                  0x3-0x7 = Reserved. */
3958         uint64_t cmd_count_ext         : 2;  /**< [ 61: 60](R/W) Extension bits to the field LMC()_DBTRAIN_CTL[READ_CMD_COUNT]. This enables
3959                                                                  up to 128 read and write commands. */
3960         uint64_t lfsr_pattern_sel      : 1;  /**< [ 62: 62](R/W) If set high, the sequence uses 32-bit LFSR pattern when generating data sequence
3961                                                                  during the General R/W training (LMC()_DBTRAIN_CTL[RW_TRAIN] == 1).
3962 
3963                                                                  The LFSR polynomials are programmed by LMC()_CHAR_CTL[PRBS]. */
3964         uint64_t reserved_63           : 1;
3965 #endif /* Word 0 - End */
3966     } cn9;
3967     /* struct bdk_lmcx_dbtrain_ctl_cn9 cn81xx; */
3968     struct bdk_lmcx_dbtrain_ctl_cn83xx
3969     {
3970 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
3971         uint64_t reserved_63           : 1;
3972         uint64_t lfsr_pattern_sel      : 1;  /**< [ 62: 62](R/W) If set high, the sequence uses 32-bit LFSR pattern when generating data sequence
3973                                                                  during the General R/W training (LMC()_DBTRAIN_CTL[RW_TRAIN] == 1).
3974 
3975                                                                  The LFSR polynomials are programmed by LMC()_CHAR_CTL[PRBS]. */
3976         uint64_t cmd_count_ext         : 2;  /**< [ 61: 60](R/W) Extension bits to the field LMC()_DBTRAIN_CTL[READ_CMD_COUNT]. This enables
3977                                                                  up to 128 read and write commands. */
3978         uint64_t db_output_impedance   : 3;  /**< [ 59: 57](R/W) Reserved.
3979                                                                  Internal:
3980                                                                  Host interface DQ/DQS output driver impedance control.
3981                                                                  This is the default value used during host interface write leveling in LRDIMM
3982                                                                  environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
3983                                                                  0x0 = RZQ/6 (40 ohm).
3984                                                                  0x1 = RZQ/7 (34 ohm).
3985                                                                  0x2 = RZQ/5 (48 ohm).
3986                                                                  0x3-0x7 = Reserved. */
3987         uint64_t db_sel                : 1;  /**< [ 56: 56](R/W) Reserved.
3988                                                                  Internal:
3989                                                                  Used when running host interface write leveling.
3990                                                                  0 = selects DIMM0's data buffer.
3991                                                                  1 = selects DIMM1's data buffer. */
3992         uint64_t tccd_sel              : 1;  /**< [ 55: 55](R/W) When set, the sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to space out
3993                                                                  back-to-back read commands. Otherwise it will space out back-to-back
3994                                                                  reads with a default value of 4 cycles.
3995 
3996                                                                  While in DRAM MPR mode, reads from Page 0 may use tCCD_S or tCCD_L.
3997                                                                  Reads from Pages 1, 2 or 3 however must use tCCD_L, thereby requring
3998                                                                  this bit to be set. */
3999         uint64_t rw_train              : 1;  /**< [ 54: 54](R/W) When set, the DBTRAIN sequence will perform a write to the DRAM
4000                                                                  memory array using burst patern that are set in
4001                                                                  LMC()_GENERAL_PURPOSE0[DATA]\<61:0\>, LMC()_GENERAL_PURPOSE1[DATA]\<61:0\> and
4002                                                                  LMC()_GENERAL_PURPOSE2[DATA]\<15:0\>.
4003 
4004                                                                  This burst pattern gets shifted by one byte at every cycle.
4005                                                                  The sequence will then do the reads to the same location and compare
4006                                                                  the data coming back with this pattern.
4007                                                                  The bit-wise comparison result gets stored in
4008                                                                  LMC()_MPR_DATA0[MPR_DATA]\<63:0\> and LMC()_MPR_DATA1[MPR_DATA]\<7:0\>. */
4009         uint64_t read_dq_count         : 7;  /**< [ 53: 47](R/W) Reserved.
4010                                                                  Internal:
4011                                                                  The amount of cycles until a pulse is issued to sample the DQ into the
4012                                                                  MPR register. This bits control the timing of when to sample the data
4013                                                                  buffer training result. */
4014         uint64_t read_cmd_count        : 5;  /**< [ 46: 42](R/W) The amount of read and write commands to be sent during R/W training.
4015                                                                  Internal:
4016                                                                  This can be set to zero in which case the sequence does not send any
4017                                                                  Read commands to accommodate for the DWL training mode. */
4018         uint64_t write_ena             : 1;  /**< [ 41: 41](R/W) Reserved.
4019                                                                  Internal:
4020                                                                  Enables the write operation. This is mainly used to accomplish the MWD
4021                                                                  training sequence of the data buffer.
4022                                                                  LMC()_DBTRAIN_CTL[ACTIVATE] must be set to one for this to take effect. */
4023         uint64_t activate              : 1;  /**< [ 40: 40](R/W) Reserved.
4024                                                                  Internal:
4025                                                                  Enables the activate command during the data buffer training sequence. */
4026         uint64_t prank                 : 2;  /**< [ 39: 38](R/W) Physical rank bits for read/write/activate operation. */
4027         uint64_t lrank                 : 3;  /**< [ 37: 35](R/W) Reserved.
4028                                                                  Internal:
4029                                                                  Logical rank bits for read/write/activate operation during the data buffer
4030                                                                  training. */
4031         uint64_t row_a                 : 18; /**< [ 34: 17](R/W) The row address for the activate command. */
4032         uint64_t bg                    : 2;  /**< [ 16: 15](R/W) The bank group that the R/W commands are directed to. */
4033         uint64_t ba                    : 2;  /**< [ 14: 13](R/W) The bank address for the R/W commands are directed to. */
4034         uint64_t column_a              : 13; /**< [ 12:  0](R/W) Column address for the read/write operation. */
4035 #else /* Word 0 - Little Endian */
4036         uint64_t column_a              : 13; /**< [ 12:  0](R/W) Column address for the read/write operation. */
4037         uint64_t ba                    : 2;  /**< [ 14: 13](R/W) The bank address for the R/W commands are directed to. */
4038         uint64_t bg                    : 2;  /**< [ 16: 15](R/W) The bank group that the R/W commands are directed to. */
4039         uint64_t row_a                 : 18; /**< [ 34: 17](R/W) The row address for the activate command. */
4040         uint64_t lrank                 : 3;  /**< [ 37: 35](R/W) Reserved.
4041                                                                  Internal:
4042                                                                  Logical rank bits for read/write/activate operation during the data buffer
4043                                                                  training. */
4044         uint64_t prank                 : 2;  /**< [ 39: 38](R/W) Physical rank bits for read/write/activate operation. */
4045         uint64_t activate              : 1;  /**< [ 40: 40](R/W) Reserved.
4046                                                                  Internal:
4047                                                                  Enables the activate command during the data buffer training sequence. */
4048         uint64_t write_ena             : 1;  /**< [ 41: 41](R/W) Reserved.
4049                                                                  Internal:
4050                                                                  Enables the write operation. This is mainly used to accomplish the MWD
4051                                                                  training sequence of the data buffer.
4052                                                                  LMC()_DBTRAIN_CTL[ACTIVATE] must be set to one for this to take effect. */
4053         uint64_t read_cmd_count        : 5;  /**< [ 46: 42](R/W) The amount of read and write commands to be sent during R/W training.
4054                                                                  Internal:
4055                                                                  This can be set to zero in which case the sequence does not send any
4056                                                                  Read commands to accommodate for the DWL training mode. */
4057         uint64_t read_dq_count         : 7;  /**< [ 53: 47](R/W) Reserved.
4058                                                                  Internal:
4059                                                                  The amount of cycles until a pulse is issued to sample the DQ into the
4060                                                                  MPR register. This bits control the timing of when to sample the data
4061                                                                  buffer training result. */
4062         uint64_t rw_train              : 1;  /**< [ 54: 54](R/W) When set, the DBTRAIN sequence will perform a write to the DRAM
4063                                                                  memory array using burst patern that are set in
4064                                                                  LMC()_GENERAL_PURPOSE0[DATA]\<61:0\>, LMC()_GENERAL_PURPOSE1[DATA]\<61:0\> and
4065                                                                  LMC()_GENERAL_PURPOSE2[DATA]\<15:0\>.
4066 
4067                                                                  This burst pattern gets shifted by one byte at every cycle.
4068                                                                  The sequence will then do the reads to the same location and compare
4069                                                                  the data coming back with this pattern.
4070                                                                  The bit-wise comparison result gets stored in
4071                                                                  LMC()_MPR_DATA0[MPR_DATA]\<63:0\> and LMC()_MPR_DATA1[MPR_DATA]\<7:0\>. */
4072         uint64_t tccd_sel              : 1;  /**< [ 55: 55](R/W) When set, the sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to space out
4073                                                                  back-to-back read commands. Otherwise it will space out back-to-back
4074                                                                  reads with a default value of 4 cycles.
4075 
4076                                                                  While in DRAM MPR mode, reads from Page 0 may use tCCD_S or tCCD_L.
4077                                                                  Reads from Pages 1, 2 or 3 however must use tCCD_L, thereby requring
4078                                                                  this bit to be set. */
4079         uint64_t db_sel                : 1;  /**< [ 56: 56](R/W) Reserved.
4080                                                                  Internal:
4081                                                                  Used when running host interface write leveling.
4082                                                                  0 = selects DIMM0's data buffer.
4083                                                                  1 = selects DIMM1's data buffer. */
4084         uint64_t db_output_impedance   : 3;  /**< [ 59: 57](R/W) Reserved.
4085                                                                  Internal:
4086                                                                  Host interface DQ/DQS output driver impedance control.
4087                                                                  This is the default value used during host interface write leveling in LRDIMM
4088                                                                  environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
4089                                                                  0x0 = RZQ/6 (40 ohm).
4090                                                                  0x1 = RZQ/7 (34 ohm).
4091                                                                  0x2 = RZQ/5 (48 ohm).
4092                                                                  0x3-0x7 = Reserved. */
4093         uint64_t cmd_count_ext         : 2;  /**< [ 61: 60](R/W) Extension bits to the field LMC()_DBTRAIN_CTL[READ_CMD_COUNT]. This enables
4094                                                                  up to 128 read and write commands. */
4095         uint64_t lfsr_pattern_sel      : 1;  /**< [ 62: 62](R/W) If set high, the sequence uses 32-bit LFSR pattern when generating data sequence
4096                                                                  during the General R/W training (LMC()_DBTRAIN_CTL[RW_TRAIN] == 1).
4097 
4098                                                                  The LFSR polynomials are programmed by LMC()_CHAR_CTL[PRBS]. */
4099         uint64_t reserved_63           : 1;
4100 #endif /* Word 0 - End */
4101     } cn83xx;
4102     struct bdk_lmcx_dbtrain_ctl_cn88xxp2
4103     {
4104 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
4105         uint64_t reserved_63           : 1;
4106         uint64_t lfsr_pattern_sel      : 1;  /**< [ 62: 62](RO) Reserved. */
4107         uint64_t cmd_count_ext         : 2;  /**< [ 61: 60](R/W) Extension bits to the field LMC()_DBTRAIN_CTL[READ_CMD_COUNT]. This enables
4108                                                                  up to 128 read and write commmands. */
4109         uint64_t db_output_impedance   : 3;  /**< [ 59: 57](R/W) Reserved.
4110                                                                  Internal:
4111                                                                  Host interface DQ/DQS output driver impedance control.
4112                                                                  This is the default value used during host interface write leveling in LRDIMM
4113                                                                  environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
4114                                                                  0x0 = RZQ/6 (40 ohm).
4115                                                                  0x1 = RZQ/7 (34 ohm).
4116                                                                  0x2 = RZQ/5 (48 ohm).
4117                                                                  0x3-0x7 = Reserved. */
4118         uint64_t db_sel                : 1;  /**< [ 56: 56](R/W) Reserved.
4119                                                                  Internal:
4120                                                                  Used when running host interface write leveling.
4121                                                                  0 = selects DIMM0's data buffer.
4122                                                                  1 = selects DIMM1's data buffer. */
4123         uint64_t tccd_sel              : 1;  /**< [ 55: 55](R/W) When set, the sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to space out
4124                                                                  back-to-back read commands. Otherwise it will space out back-to-back
4125                                                                  reads with a default value of 4 cycles.
4126 
4127                                                                  While in DRAM MPR mode, reads from page 0 may use tCCD_S or tCCD_L.
4128                                                                  Reads from pages 1, 2 or 3 however must use tCCD_L, thereby requiring
4129                                                                  this bit to be set. */
4130         uint64_t rw_train              : 1;  /**< [ 54: 54](R/W) When set, the DBTRAIN sequence will perform a write to the DRAM
4131                                                                  memory array using burst pattern that are set in
4132                                                                  LMC()_GENERAL_PURPOSE0[DATA]\<61:0\>, LMC()_GENERAL_PURPOSE1[DATA]\<61:0\> and
4133                                                                  LMC()_GENERAL_PURPOSE2[DATA]\<15:0\>.
4134 
4135                                                                  This burst pattern gets shifted by one byte at every cycle.
4136                                                                  The sequence will then do the reads to the same location and compare
4137                                                                  the data coming back with this pattern.
4138                                                                  The bit-wise comparison result gets stored in
4139                                                                  LMC()_MPR_DATA0[MPR_DATA]\<63:0\> and LMC()_MPR_DATA1[MPR_DATA]\<7:0\>. */
4140         uint64_t read_dq_count         : 7;  /**< [ 53: 47](R/W) Reserved.
4141                                                                  Internal:
4142                                                                  The amount of cycles until a pulse is issued to sample the DQ into the
4143                                                                  MPR register. This bits control the timing of when to sample the data
4144                                                                  buffer training result. */
4145         uint64_t read_cmd_count        : 5;  /**< [ 46: 42](R/W) The amount of read and write commands to be sent during R/W training.
4146                                                                  Internal:
4147                                                                  This can be set to zero in which case the sequence does not send any
4148                                                                  Read commands to accommodate for the DWL training mode. */
4149         uint64_t write_ena             : 1;  /**< [ 41: 41](R/W) Reserved.
4150                                                                  Internal:
4151                                                                  Enables the write operation. This is mainly used to accomplish the MWD
4152                                                                  training sequence of the data buffer.
4153                                                                  LMC()_DBTRAIN_CTL[ACTIVATE] must be set to one for this to take effect. */
4154         uint64_t activate              : 1;  /**< [ 40: 40](R/W) Reserved.
4155                                                                  Internal:
4156                                                                  Enables the activate command during the data buffer training sequence. */
4157         uint64_t prank                 : 2;  /**< [ 39: 38](R/W) Physical rank bits for read/write/activate operation. */
4158         uint64_t lrank                 : 3;  /**< [ 37: 35](R/W) Reserved.
4159                                                                  Internal:
4160                                                                  Logical rank bits for read/write/activate operation during the data buffer
4161                                                                  training. */
4162         uint64_t row_a                 : 18; /**< [ 34: 17](R/W) The row address for the activate command. */
4163         uint64_t bg                    : 2;  /**< [ 16: 15](R/W) The bank group that the R/W commands are directed to. */
4164         uint64_t ba                    : 2;  /**< [ 14: 13](R/W) The bank address for the R/W commands are directed to. */
4165         uint64_t column_a              : 13; /**< [ 12:  0](R/W) Column address for the read/write operation. */
4166 #else /* Word 0 - Little Endian */
4167         uint64_t column_a              : 13; /**< [ 12:  0](R/W) Column address for the read/write operation. */
4168         uint64_t ba                    : 2;  /**< [ 14: 13](R/W) The bank address for the R/W commands are directed to. */
4169         uint64_t bg                    : 2;  /**< [ 16: 15](R/W) The bank group that the R/W commands are directed to. */
4170         uint64_t row_a                 : 18; /**< [ 34: 17](R/W) The row address for the activate command. */
4171         uint64_t lrank                 : 3;  /**< [ 37: 35](R/W) Reserved.
4172                                                                  Internal:
4173                                                                  Logical rank bits for read/write/activate operation during the data buffer
4174                                                                  training. */
4175         uint64_t prank                 : 2;  /**< [ 39: 38](R/W) Physical rank bits for read/write/activate operation. */
4176         uint64_t activate              : 1;  /**< [ 40: 40](R/W) Reserved.
4177                                                                  Internal:
4178                                                                  Enables the activate command during the data buffer training sequence. */
4179         uint64_t write_ena             : 1;  /**< [ 41: 41](R/W) Reserved.
4180                                                                  Internal:
4181                                                                  Enables the write operation. This is mainly used to accomplish the MWD
4182                                                                  training sequence of the data buffer.
4183                                                                  LMC()_DBTRAIN_CTL[ACTIVATE] must be set to one for this to take effect. */
4184         uint64_t read_cmd_count        : 5;  /**< [ 46: 42](R/W) The amount of read and write commands to be sent during R/W training.
4185                                                                  Internal:
4186                                                                  This can be set to zero in which case the sequence does not send any
4187                                                                  Read commands to accommodate for the DWL training mode. */
4188         uint64_t read_dq_count         : 7;  /**< [ 53: 47](R/W) Reserved.
4189                                                                  Internal:
4190                                                                  The amount of cycles until a pulse is issued to sample the DQ into the
4191                                                                  MPR register. This bits control the timing of when to sample the data
4192                                                                  buffer training result. */
4193         uint64_t rw_train              : 1;  /**< [ 54: 54](R/W) When set, the DBTRAIN sequence will perform a write to the DRAM
4194                                                                  memory array using burst pattern that are set in
4195                                                                  LMC()_GENERAL_PURPOSE0[DATA]\<61:0\>, LMC()_GENERAL_PURPOSE1[DATA]\<61:0\> and
4196                                                                  LMC()_GENERAL_PURPOSE2[DATA]\<15:0\>.
4197 
4198                                                                  This burst pattern gets shifted by one byte at every cycle.
4199                                                                  The sequence will then do the reads to the same location and compare
4200                                                                  the data coming back with this pattern.
4201                                                                  The bit-wise comparison result gets stored in
4202                                                                  LMC()_MPR_DATA0[MPR_DATA]\<63:0\> and LMC()_MPR_DATA1[MPR_DATA]\<7:0\>. */
4203         uint64_t tccd_sel              : 1;  /**< [ 55: 55](R/W) When set, the sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to space out
4204                                                                  back-to-back read commands. Otherwise it will space out back-to-back
4205                                                                  reads with a default value of 4 cycles.
4206 
4207                                                                  While in DRAM MPR mode, reads from page 0 may use tCCD_S or tCCD_L.
4208                                                                  Reads from pages 1, 2 or 3 however must use tCCD_L, thereby requiring
4209                                                                  this bit to be set. */
4210         uint64_t db_sel                : 1;  /**< [ 56: 56](R/W) Reserved.
4211                                                                  Internal:
4212                                                                  Used when running host interface write leveling.
4213                                                                  0 = selects DIMM0's data buffer.
4214                                                                  1 = selects DIMM1's data buffer. */
4215         uint64_t db_output_impedance   : 3;  /**< [ 59: 57](R/W) Reserved.
4216                                                                  Internal:
4217                                                                  Host interface DQ/DQS output driver impedance control.
4218                                                                  This is the default value used during host interface write leveling in LRDIMM
4219                                                                  environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
4220                                                                  0x0 = RZQ/6 (40 ohm).
4221                                                                  0x1 = RZQ/7 (34 ohm).
4222                                                                  0x2 = RZQ/5 (48 ohm).
4223                                                                  0x3-0x7 = Reserved. */
4224         uint64_t cmd_count_ext         : 2;  /**< [ 61: 60](R/W) Extension bits to the field LMC()_DBTRAIN_CTL[READ_CMD_COUNT]. This enables
4225                                                                  up to 128 read and write commmands. */
4226         uint64_t lfsr_pattern_sel      : 1;  /**< [ 62: 62](RO) Reserved. */
4227         uint64_t reserved_63           : 1;
4228 #endif /* Word 0 - End */
4229     } cn88xxp2;
4230 };
4231 typedef union bdk_lmcx_dbtrain_ctl bdk_lmcx_dbtrain_ctl_t;
4232 
4233 static inline uint64_t BDK_LMCX_DBTRAIN_CTL(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_DBTRAIN_CTL(unsigned long a)4234 static inline uint64_t BDK_LMCX_DBTRAIN_CTL(unsigned long a)
4235 {
4236     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
4237         return 0x87e0880003f8ll + 0x1000000ll * ((a) & 0x0);
4238     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
4239         return 0x87e0880003f8ll + 0x1000000ll * ((a) & 0x1);
4240     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
4241         return 0x87e0880003f8ll + 0x1000000ll * ((a) & 0x3);
4242     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
4243         return 0x87e0880003f8ll + 0x1000000ll * ((a) & 0x3);
4244     __bdk_csr_fatal("LMCX_DBTRAIN_CTL", 1, a, 0, 0, 0);
4245 }
4246 
4247 #define typedef_BDK_LMCX_DBTRAIN_CTL(a) bdk_lmcx_dbtrain_ctl_t
4248 #define bustype_BDK_LMCX_DBTRAIN_CTL(a) BDK_CSR_TYPE_RSL
4249 #define basename_BDK_LMCX_DBTRAIN_CTL(a) "LMCX_DBTRAIN_CTL"
4250 #define device_bar_BDK_LMCX_DBTRAIN_CTL(a) 0x0 /* PF_BAR0 */
4251 #define busnum_BDK_LMCX_DBTRAIN_CTL(a) (a)
4252 #define arguments_BDK_LMCX_DBTRAIN_CTL(a) (a),-1,-1,-1
4253 
4254 /**
4255  * Register (RSL) lmc#_dclk_cnt
4256  *
4257  * LMC System-Memory-Clock Counter Register
4258  */
4259 union bdk_lmcx_dclk_cnt
4260 {
4261     uint64_t u;
4262     struct bdk_lmcx_dclk_cnt_s
4263     {
4264 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
4265         uint64_t dclkcnt               : 64; /**< [ 63:  0](RO/H) Performance counter. A 64-bit counter that increments every CK cycle. */
4266 #else /* Word 0 - Little Endian */
4267         uint64_t dclkcnt               : 64; /**< [ 63:  0](RO/H) Performance counter. A 64-bit counter that increments every CK cycle. */
4268 #endif /* Word 0 - End */
4269     } s;
4270     /* struct bdk_lmcx_dclk_cnt_s cn; */
4271 };
4272 typedef union bdk_lmcx_dclk_cnt bdk_lmcx_dclk_cnt_t;
4273 
4274 static inline uint64_t BDK_LMCX_DCLK_CNT(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_DCLK_CNT(unsigned long a)4275 static inline uint64_t BDK_LMCX_DCLK_CNT(unsigned long a)
4276 {
4277     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
4278         return 0x87e0880001e0ll + 0x1000000ll * ((a) & 0x0);
4279     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
4280         return 0x87e0880001e0ll + 0x1000000ll * ((a) & 0x1);
4281     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
4282         return 0x87e0880001e0ll + 0x1000000ll * ((a) & 0x3);
4283     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
4284         return 0x87e0880001e0ll + 0x1000000ll * ((a) & 0x3);
4285     __bdk_csr_fatal("LMCX_DCLK_CNT", 1, a, 0, 0, 0);
4286 }
4287 
4288 #define typedef_BDK_LMCX_DCLK_CNT(a) bdk_lmcx_dclk_cnt_t
4289 #define bustype_BDK_LMCX_DCLK_CNT(a) BDK_CSR_TYPE_RSL
4290 #define basename_BDK_LMCX_DCLK_CNT(a) "LMCX_DCLK_CNT"
4291 #define device_bar_BDK_LMCX_DCLK_CNT(a) 0x0 /* PF_BAR0 */
4292 #define busnum_BDK_LMCX_DCLK_CNT(a) (a)
4293 #define arguments_BDK_LMCX_DCLK_CNT(a) (a),-1,-1,-1
4294 
4295 /**
4296  * Register (RSL) lmc#_ddr4_dimm_ctl
4297  *
4298  * LMC DIMM Control Register
4299  * Bits 0-21 of this register are used only when LMC()_CONTROL[RDIMM_ENA] = 1.
4300  *
4301  * During an RCW initialization sequence, bits 0-21 control LMC's write
4302  * operations to the extended DDR4 control words in the JEDEC standard
4303  * registering clock driver on an RDIMM.
4304  *
4305  * Internal:
4306  * Bits 22-27 is used only when LMC()_CONFIG[LRDIMM_ENA] = 1 AND
4307  * LMC()_MR_MPR_CTL[MR_WR_PBA_ENABLE] = 1.
4308  *
4309  * During PBA mode of an MRW sequence, bits 22-27 controls the Buffer Configuration
4310  * Control Word F0BC1x settings during the BCW write.
4311  */
4312 union bdk_lmcx_ddr4_dimm_ctl
4313 {
4314     uint64_t u;
4315     struct bdk_lmcx_ddr4_dimm_ctl_s
4316     {
4317 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
4318         uint64_t reserved_28_63        : 36;
4319         uint64_t rank_timing_enable    : 1;  /**< [ 27: 27](R/W) Reserved.
4320                                                                  Internal:
4321                                                                  Package Rank Timing Alignment Enable bit for the DDR4 LRDIMM Buffer Configuration Control
4322                                                                  Word F0BC1x DA[7]. Used during PBA BCW Write through the MRW sequence. */
4323         uint64_t bodt_trans_mode       : 1;  /**< [ 26: 26](R/W) Reserved.
4324                                                                  Internal:
4325                                                                  BODT input handling in Transparent Mode for the DDR4 LRDIMM Buffer Conifguration Control
4326                                                                  Word F0BC1x. Used during PBA BCW Write through the MRW sequence. */
4327         uint64_t trans_mode_ena        : 1;  /**< [ 25: 25](R/W) Reserved.
4328                                                                  Internal:
4329                                                                  Transparent Mode Enable bit for DDR4 LRDIMM Buffer Configuration Control Word
4330                                                                  F0BC1x DA[5]. Used during PBA BCW Write through the MRW sequence. */
4331         uint64_t read_preamble_mode    : 1;  /**< [ 24: 24](R/W) Reserved.
4332                                                                  Internal:
4333                                                                  Read Preamble Training Mode Enable bit for DDR4 LRDIMM Buffer Configuration Control Word
4334                                                                  F0BC1x DA[4]. Used during PBA BCW Write through the MRW sequence. */
4335         uint64_t buff_config_da3       : 1;  /**< [ 23: 23](R/W) Reserved.
4336                                                                  Internal:
4337                                                                  Reserved setting value in F0BC1x DA3. Used during PBA BCW Write through the MRW sequence. */
4338         uint64_t mpr_over_ena          : 1;  /**< [ 22: 22](R/W) Reserved.
4339                                                                  Internal:
4340                                                                  MPR Override Mode Enable bit for the DDR4 LRDIMM Buffer Configuration Control Word
4341                                                                  F0BC1x DA[1]. Used during PBA BCW Write through the MRW sequence. */
4342         uint64_t ddr4_dimm1_wmask      : 11; /**< [ 21: 11](R/W) Reserved.
4343                                                                  Internal:
4344                                                                  DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
4345         uint64_t ddr4_dimm0_wmask      : 11; /**< [ 10:  0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
4346 #else /* Word 0 - Little Endian */
4347         uint64_t ddr4_dimm0_wmask      : 11; /**< [ 10:  0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
4348         uint64_t ddr4_dimm1_wmask      : 11; /**< [ 21: 11](R/W) Reserved.
4349                                                                  Internal:
4350                                                                  DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
4351         uint64_t mpr_over_ena          : 1;  /**< [ 22: 22](R/W) Reserved.
4352                                                                  Internal:
4353                                                                  MPR Override Mode Enable bit for the DDR4 LRDIMM Buffer Configuration Control Word
4354                                                                  F0BC1x DA[1]. Used during PBA BCW Write through the MRW sequence. */
4355         uint64_t buff_config_da3       : 1;  /**< [ 23: 23](R/W) Reserved.
4356                                                                  Internal:
4357                                                                  Reserved setting value in F0BC1x DA3. Used during PBA BCW Write through the MRW sequence. */
4358         uint64_t read_preamble_mode    : 1;  /**< [ 24: 24](R/W) Reserved.
4359                                                                  Internal:
4360                                                                  Read Preamble Training Mode Enable bit for DDR4 LRDIMM Buffer Configuration Control Word
4361                                                                  F0BC1x DA[4]. Used during PBA BCW Write through the MRW sequence. */
4362         uint64_t trans_mode_ena        : 1;  /**< [ 25: 25](R/W) Reserved.
4363                                                                  Internal:
4364                                                                  Transparent Mode Enable bit for DDR4 LRDIMM Buffer Configuration Control Word
4365                                                                  F0BC1x DA[5]. Used during PBA BCW Write through the MRW sequence. */
4366         uint64_t bodt_trans_mode       : 1;  /**< [ 26: 26](R/W) Reserved.
4367                                                                  Internal:
4368                                                                  BODT input handling in Transparent Mode for the DDR4 LRDIMM Buffer Conifguration Control
4369                                                                  Word F0BC1x. Used during PBA BCW Write through the MRW sequence. */
4370         uint64_t rank_timing_enable    : 1;  /**< [ 27: 27](R/W) Reserved.
4371                                                                  Internal:
4372                                                                  Package Rank Timing Alignment Enable bit for the DDR4 LRDIMM Buffer Configuration Control
4373                                                                  Word F0BC1x DA[7]. Used during PBA BCW Write through the MRW sequence. */
4374         uint64_t reserved_28_63        : 36;
4375 #endif /* Word 0 - End */
4376     } s;
4377     struct bdk_lmcx_ddr4_dimm_ctl_cn9
4378     {
4379 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
4380         uint64_t reserved_28_63        : 36;
4381         uint64_t rank_timing_enable    : 1;  /**< [ 27: 27](R/W) Reserved.
4382                                                                  Internal:
4383                                                                  Package Rank Timing Alignment Enable bit for the DDR4 LRDIMM Buffer Configuration Control
4384                                                                  Word F0BC1x DA[7]. Used during PBA BCW Write through the MRW sequence. */
4385         uint64_t bodt_trans_mode       : 1;  /**< [ 26: 26](R/W) Reserved.
4386                                                                  Internal:
4387                                                                  BODT input handling in Transparent Mode for the DDR4 LRDIMM Buffer Conifguration Control
4388                                                                  Word F0BC1x. Used during PBA BCW Write through the MRW sequence. */
4389         uint64_t trans_mode_ena        : 1;  /**< [ 25: 25](R/W) Reserved.
4390                                                                  Internal:
4391                                                                  Transparent Mode Enable bit for DDR4 LRDIMM Buffer Configuration Control Word
4392                                                                  F0BC1x DA[5]. Used during PBA BCW Write through the MRW sequence. */
4393         uint64_t read_preamble_mode    : 1;  /**< [ 24: 24](R/W) Reserved.
4394                                                                  Internal:
4395                                                                  Read Preamble Training Mode Enable bit for DDR4 LRDIMM Buffer Configuration Control Word
4396                                                                  F0BC1x DA[4]. Used during PBA BCW Write through the MRW sequence. */
4397         uint64_t buff_config_da3       : 1;  /**< [ 23: 23](R/W) Reserved.
4398                                                                  Internal:
4399                                                                  Reserved setting value in F0BC1x DA3. Used during PBA BCW Write through the MRW sequence. */
4400         uint64_t mpr_over_ena          : 1;  /**< [ 22: 22](R/W) Reserved.
4401                                                                  Internal:
4402                                                                  MPR Override Mode Enable bit for the DDR4 LRDIMM Buffer Configuration Control Word
4403                                                                  F0BC1x DA[1]. Used during PBA BCW Write through the MRW sequence. */
4404         uint64_t ddr4_dimm1_wmask      : 11; /**< [ 21: 11](R/W) DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
4405         uint64_t ddr4_dimm0_wmask      : 11; /**< [ 10:  0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
4406 #else /* Word 0 - Little Endian */
4407         uint64_t ddr4_dimm0_wmask      : 11; /**< [ 10:  0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
4408         uint64_t ddr4_dimm1_wmask      : 11; /**< [ 21: 11](R/W) DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
4409         uint64_t mpr_over_ena          : 1;  /**< [ 22: 22](R/W) Reserved.
4410                                                                  Internal:
4411                                                                  MPR Override Mode Enable bit for the DDR4 LRDIMM Buffer Configuration Control Word
4412                                                                  F0BC1x DA[1]. Used during PBA BCW Write through the MRW sequence. */
4413         uint64_t buff_config_da3       : 1;  /**< [ 23: 23](R/W) Reserved.
4414                                                                  Internal:
4415                                                                  Reserved setting value in F0BC1x DA3. Used during PBA BCW Write through the MRW sequence. */
4416         uint64_t read_preamble_mode    : 1;  /**< [ 24: 24](R/W) Reserved.
4417                                                                  Internal:
4418                                                                  Read Preamble Training Mode Enable bit for DDR4 LRDIMM Buffer Configuration Control Word
4419                                                                  F0BC1x DA[4]. Used during PBA BCW Write through the MRW sequence. */
4420         uint64_t trans_mode_ena        : 1;  /**< [ 25: 25](R/W) Reserved.
4421                                                                  Internal:
4422                                                                  Transparent Mode Enable bit for DDR4 LRDIMM Buffer Configuration Control Word
4423                                                                  F0BC1x DA[5]. Used during PBA BCW Write through the MRW sequence. */
4424         uint64_t bodt_trans_mode       : 1;  /**< [ 26: 26](R/W) Reserved.
4425                                                                  Internal:
4426                                                                  BODT input handling in Transparent Mode for the DDR4 LRDIMM Buffer Conifguration Control
4427                                                                  Word F0BC1x. Used during PBA BCW Write through the MRW sequence. */
4428         uint64_t rank_timing_enable    : 1;  /**< [ 27: 27](R/W) Reserved.
4429                                                                  Internal:
4430                                                                  Package Rank Timing Alignment Enable bit for the DDR4 LRDIMM Buffer Configuration Control
4431                                                                  Word F0BC1x DA[7]. Used during PBA BCW Write through the MRW sequence. */
4432         uint64_t reserved_28_63        : 36;
4433 #endif /* Word 0 - End */
4434     } cn9;
4435     /* struct bdk_lmcx_ddr4_dimm_ctl_s cn81xx; */
4436     /* struct bdk_lmcx_ddr4_dimm_ctl_cn9 cn88xx; */
4437     /* struct bdk_lmcx_ddr4_dimm_ctl_cn9 cn83xx; */
4438 };
4439 typedef union bdk_lmcx_ddr4_dimm_ctl bdk_lmcx_ddr4_dimm_ctl_t;
4440 
4441 static inline uint64_t BDK_LMCX_DDR4_DIMM_CTL(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_DDR4_DIMM_CTL(unsigned long a)4442 static inline uint64_t BDK_LMCX_DDR4_DIMM_CTL(unsigned long a)
4443 {
4444     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
4445         return 0x87e0880003f0ll + 0x1000000ll * ((a) & 0x0);
4446     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
4447         return 0x87e0880003f0ll + 0x1000000ll * ((a) & 0x1);
4448     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
4449         return 0x87e0880003f0ll + 0x1000000ll * ((a) & 0x3);
4450     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
4451         return 0x87e0880003f0ll + 0x1000000ll * ((a) & 0x3);
4452     __bdk_csr_fatal("LMCX_DDR4_DIMM_CTL", 1, a, 0, 0, 0);
4453 }
4454 
4455 #define typedef_BDK_LMCX_DDR4_DIMM_CTL(a) bdk_lmcx_ddr4_dimm_ctl_t
4456 #define bustype_BDK_LMCX_DDR4_DIMM_CTL(a) BDK_CSR_TYPE_RSL
4457 #define basename_BDK_LMCX_DDR4_DIMM_CTL(a) "LMCX_DDR4_DIMM_CTL"
4458 #define device_bar_BDK_LMCX_DDR4_DIMM_CTL(a) 0x0 /* PF_BAR0 */
4459 #define busnum_BDK_LMCX_DDR4_DIMM_CTL(a) (a)
4460 #define arguments_BDK_LMCX_DDR4_DIMM_CTL(a) (a),-1,-1,-1
4461 
4462 /**
4463  * Register (RSL) lmc#_ddr_pll_ctl
4464  *
4465  * LMC DDR PLL Control Register
4466  * This register controls the DDR_CK frequency. For details, refer to CK speed programming. See
4467  * LMC initialization sequence for the initialization sequence.
4468  * DDR PLL bringup sequence:
4469  *
4470  * 1. Write CLKF, POSTDIV.
4471  *
4472  * 2. Wait 1 ref clock cycle (10ns).
4473  *
4474  * 3. Write 0 to PD, 1 to UPDATE.
4475  *
4476  * 4. Wait 500 ref clock cycles (5us).
4477  *
4478  * 5. Write 0 to PLL_RESET.
4479  *
4480  * 6. Wait 2000 ref clock cycles (20us).
4481  *
4482  * 7. Write 0x2 to PLL_SEL, 0 to PS_RESET. LMCs not bringing up the PLL
4483  *    need to write 0x2 to PLL_SEL to receive the phase-shifted PLL output
4484  *
4485  * 8. Wait 2 ref clock cycles (20ns).
4486  *
4487  * 9. Write 1 to PHY_DCOK, wait 20us before bringing up the DDR interface.
4488  */
4489 union bdk_lmcx_ddr_pll_ctl
4490 {
4491     uint64_t u;
4492     struct bdk_lmcx_ddr_pll_ctl_s
4493     {
4494 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
4495         uint64_t reserved_45_63        : 19;
4496         uint64_t dclk_alt_refclk_sel   : 1;  /**< [ 44: 44](R/W) Select alternate reference clock for DCLK PLL. */
4497         uint64_t reserved_18_43        : 26;
4498         uint64_t pll_bypass            : 1;  /**< [ 17: 17](R/W) Bypass PLL output with bypass_clk_n/p. */
4499         uint64_t postdiv               : 2;  /**< [ 16: 15](R/W) DDR postscalar divide ratio. Determines the LMC CK speed. See also [CLKF].
4500                                                                  0x0 =  2.
4501                                                                  0x1 =  4.
4502                                                                  0x2 =  8.
4503                                                                  0x3 = 16. */
4504         uint64_t pll_sel               : 2;  /**< [ 14: 13](R/W) PLL output select.
4505                                                                  0x0 = Off.
4506                                                                  0x1 = Runt.
4507                                                                  0x2 = PLL. */
4508         uint64_t update                : 1;  /**< [ 12: 12](WO) PLL programming update. This is a one-shot operation; it automatically returns
4509                                                                  to 0 after a write to 1. */
4510         uint64_t pd                    : 1;  /**< [ 11: 11](R/W) Powerdown PLL. */
4511         uint64_t ps_reset              : 1;  /**< [ 10: 10](R/W) Post scalar reset. */
4512         uint64_t pll_reset             : 1;  /**< [  9:  9](R/W) PLL reset. */
4513         uint64_t reserved_0_8          : 9;
4514 #else /* Word 0 - Little Endian */
4515         uint64_t reserved_0_8          : 9;
4516         uint64_t pll_reset             : 1;  /**< [  9:  9](R/W) PLL reset. */
4517         uint64_t ps_reset              : 1;  /**< [ 10: 10](R/W) Post scalar reset. */
4518         uint64_t pd                    : 1;  /**< [ 11: 11](R/W) Powerdown PLL. */
4519         uint64_t update                : 1;  /**< [ 12: 12](WO) PLL programming update. This is a one-shot operation; it automatically returns
4520                                                                  to 0 after a write to 1. */
4521         uint64_t pll_sel               : 2;  /**< [ 14: 13](R/W) PLL output select.
4522                                                                  0x0 = Off.
4523                                                                  0x1 = Runt.
4524                                                                  0x2 = PLL. */
4525         uint64_t postdiv               : 2;  /**< [ 16: 15](R/W) DDR postscalar divide ratio. Determines the LMC CK speed. See also [CLKF].
4526                                                                  0x0 =  2.
4527                                                                  0x1 =  4.
4528                                                                  0x2 =  8.
4529                                                                  0x3 = 16. */
4530         uint64_t pll_bypass            : 1;  /**< [ 17: 17](R/W) Bypass PLL output with bypass_clk_n/p. */
4531         uint64_t reserved_18_43        : 26;
4532         uint64_t dclk_alt_refclk_sel   : 1;  /**< [ 44: 44](R/W) Select alternate reference clock for DCLK PLL. */
4533         uint64_t reserved_45_63        : 19;
4534 #endif /* Word 0 - End */
4535     } s;
4536     struct bdk_lmcx_ddr_pll_ctl_cn9
4537     {
4538 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
4539         uint64_t reserved_42_63        : 22;
4540         uint64_t phy_dcok              : 1;  /**< [ 41: 41](R/W) Set to power up PHY logic after setting LMC()_DDR_PLL_CTL[DDR4_MODE]. */
4541         uint64_t ddr4_mode             : 1;  /**< [ 40: 40](R/W) Reserved.
4542                                                                  Internal:
4543                                                                  FIXME REMOVE
4544                                                                  DDR4 mode select: 1 = DDR4, 0 = Reserved. */
4545         uint64_t pll_phase_sel         : 1;  /**< [ 39: 39](R/W) Phase select.
4546                                                                  0 = Select PLL Output clock phase 0, 120, and 240.
4547                                                                  1 = Select PLL Output clock phase 60, 180, and 300.
4548 
4549                                                                  For even LMCs should use 0, for odd LMCs should use 1. */
4550         uint64_t rep                   : 1;  /**< [ 38: 38](R/W) Regamp internal setting. */
4551         uint64_t pll_ref_oct           : 1;  /**< [ 37: 37](R/W) Termination.
4552                                                                  0 = Disable 50 Ohm on chip termination.
4553                                                                  1 = Enable 50 Ohm on chip termination. */
4554         uint64_t pll_ref_hcsl          : 1;  /**< [ 36: 36](R/W) Reference termination.
4555                                                                  0 = disable HCSL reference clock termination.
4556                                                                  1 = enable HCSL reference clock termination when [PLL_REF_OCT] is 1. */
4557         uint64_t pll_ref_bypass        : 1;  /**< [ 35: 35](R/W) Bypass reference clock with bypass_clk_n/p. */
4558         uint64_t pll_diffamp           : 4;  /**< [ 34: 31](R/W) Diffamp bias current setting. */
4559         uint64_t cpamp                 : 1;  /**< [ 30: 30](R/W) Charge pump internal opamp setting. */
4560         uint64_t pll_cps               : 4;  /**< [ 29: 26](R/W) Charge pump current setting for Cs. */
4561         uint64_t pll_cpb               : 4;  /**< [ 25: 22](R/W) Charge pump current setting for Cb. */
4562         uint64_t bg_div16_en           : 1;  /**< [ 21: 21](R/W) Bandgap clock frequency.
4563                                                                  0 = Reference clock divided by 4.
4564                                                                  1 = Reference clock divided by 16. */
4565         uint64_t bg_clk_en             : 1;  /**< [ 20: 20](R/W) Bandgap gap chopping enable. */
4566         uint64_t prediv                : 2;  /**< [ 19: 18](R/W) Reference clock divider.
4567                                                                  0x0 = reference clock divides down by 1.
4568                                                                  0x1 = reference clock divides down by 1.
4569                                                                  0x2 = reference clock divides down by 2.
4570                                                                  0x3 = reference clock divides down by 3. */
4571         uint64_t pll_bypass            : 1;  /**< [ 17: 17](R/W) Bypass PLL output with bypass_clk_n/p. */
4572         uint64_t postdiv               : 2;  /**< [ 16: 15](R/W) DDR postscalar divide ratio. Determines the LMC CK speed. See also [CLKF].
4573                                                                  0x0 =  2.
4574                                                                  0x1 =  4.
4575                                                                  0x2 =  8.
4576                                                                  0x3 = 16. */
4577         uint64_t pll_sel               : 2;  /**< [ 14: 13](R/W) PLL output select.
4578                                                                  0x0 = Off.
4579                                                                  0x1 = Runt.
4580                                                                  0x2 = PLL. */
4581         uint64_t update                : 1;  /**< [ 12: 12](WO) PLL programming update. This is a one-shot operation; it automatically returns
4582                                                                  to 0 after a write to 1. */
4583         uint64_t pd                    : 1;  /**< [ 11: 11](R/W) Powerdown PLL. */
4584         uint64_t ps_reset              : 1;  /**< [ 10: 10](R/W) Post scalar reset. */
4585         uint64_t pll_reset             : 1;  /**< [  9:  9](R/W) PLL reset. */
4586         uint64_t clkf                  : 9;  /**< [  8:  0](R/W) Multiply reference by [CLKF]. 96 \<= [CLKF] \<= 172. LMC PLL frequency = 33.33 * [CLKF].
4587                                                                  min = 3.2 GHz, max = 5.7 GHz.
4588 
4589                                                                  Typical settings:
4590                                                                    800 MHz: CLKF = 0x60  (96), POSTDIV = 0x1 (4).
4591                                                                    933 MHz: CLKF = 0x70 (112), POSTDIV = 0x1 (4).
4592                                                                    1067 MHz: CLKF = 0x80 (128), POSTDIV = 0x1 (4).
4593                                                                    1200 MHz: CLKF = 0x90 (144), POSTDIV = 0x1 (4).
4594                                                                    1333 MHz: CLKF = 0xA0 (160), POSTDIV = 0x1 (4). */
4595 #else /* Word 0 - Little Endian */
4596         uint64_t clkf                  : 9;  /**< [  8:  0](R/W) Multiply reference by [CLKF]. 96 \<= [CLKF] \<= 172. LMC PLL frequency = 33.33 * [CLKF].
4597                                                                  min = 3.2 GHz, max = 5.7 GHz.
4598 
4599                                                                  Typical settings:
4600                                                                    800 MHz: CLKF = 0x60  (96), POSTDIV = 0x1 (4).
4601                                                                    933 MHz: CLKF = 0x70 (112), POSTDIV = 0x1 (4).
4602                                                                    1067 MHz: CLKF = 0x80 (128), POSTDIV = 0x1 (4).
4603                                                                    1200 MHz: CLKF = 0x90 (144), POSTDIV = 0x1 (4).
4604                                                                    1333 MHz: CLKF = 0xA0 (160), POSTDIV = 0x1 (4). */
4605         uint64_t pll_reset             : 1;  /**< [  9:  9](R/W) PLL reset. */
4606         uint64_t ps_reset              : 1;  /**< [ 10: 10](R/W) Post scalar reset. */
4607         uint64_t pd                    : 1;  /**< [ 11: 11](R/W) Powerdown PLL. */
4608         uint64_t update                : 1;  /**< [ 12: 12](WO) PLL programming update. This is a one-shot operation; it automatically returns
4609                                                                  to 0 after a write to 1. */
4610         uint64_t pll_sel               : 2;  /**< [ 14: 13](R/W) PLL output select.
4611                                                                  0x0 = Off.
4612                                                                  0x1 = Runt.
4613                                                                  0x2 = PLL. */
4614         uint64_t postdiv               : 2;  /**< [ 16: 15](R/W) DDR postscalar divide ratio. Determines the LMC CK speed. See also [CLKF].
4615                                                                  0x0 =  2.
4616                                                                  0x1 =  4.
4617                                                                  0x2 =  8.
4618                                                                  0x3 = 16. */
4619         uint64_t pll_bypass            : 1;  /**< [ 17: 17](R/W) Bypass PLL output with bypass_clk_n/p. */
4620         uint64_t prediv                : 2;  /**< [ 19: 18](R/W) Reference clock divider.
4621                                                                  0x0 = reference clock divides down by 1.
4622                                                                  0x1 = reference clock divides down by 1.
4623                                                                  0x2 = reference clock divides down by 2.
4624                                                                  0x3 = reference clock divides down by 3. */
4625         uint64_t bg_clk_en             : 1;  /**< [ 20: 20](R/W) Bandgap gap chopping enable. */
4626         uint64_t bg_div16_en           : 1;  /**< [ 21: 21](R/W) Bandgap clock frequency.
4627                                                                  0 = Reference clock divided by 4.
4628                                                                  1 = Reference clock divided by 16. */
4629         uint64_t pll_cpb               : 4;  /**< [ 25: 22](R/W) Charge pump current setting for Cb. */
4630         uint64_t pll_cps               : 4;  /**< [ 29: 26](R/W) Charge pump current setting for Cs. */
4631         uint64_t cpamp                 : 1;  /**< [ 30: 30](R/W) Charge pump internal opamp setting. */
4632         uint64_t pll_diffamp           : 4;  /**< [ 34: 31](R/W) Diffamp bias current setting. */
4633         uint64_t pll_ref_bypass        : 1;  /**< [ 35: 35](R/W) Bypass reference clock with bypass_clk_n/p. */
4634         uint64_t pll_ref_hcsl          : 1;  /**< [ 36: 36](R/W) Reference termination.
4635                                                                  0 = disable HCSL reference clock termination.
4636                                                                  1 = enable HCSL reference clock termination when [PLL_REF_OCT] is 1. */
4637         uint64_t pll_ref_oct           : 1;  /**< [ 37: 37](R/W) Termination.
4638                                                                  0 = Disable 50 Ohm on chip termination.
4639                                                                  1 = Enable 50 Ohm on chip termination. */
4640         uint64_t rep                   : 1;  /**< [ 38: 38](R/W) Regamp internal setting. */
4641         uint64_t pll_phase_sel         : 1;  /**< [ 39: 39](R/W) Phase select.
4642                                                                  0 = Select PLL Output clock phase 0, 120, and 240.
4643                                                                  1 = Select PLL Output clock phase 60, 180, and 300.
4644 
4645                                                                  For even LMCs should use 0, for odd LMCs should use 1. */
4646         uint64_t ddr4_mode             : 1;  /**< [ 40: 40](R/W) Reserved.
4647                                                                  Internal:
4648                                                                  FIXME REMOVE
4649                                                                  DDR4 mode select: 1 = DDR4, 0 = Reserved. */
4650         uint64_t phy_dcok              : 1;  /**< [ 41: 41](R/W) Set to power up PHY logic after setting LMC()_DDR_PLL_CTL[DDR4_MODE]. */
4651         uint64_t reserved_42_63        : 22;
4652 #endif /* Word 0 - End */
4653     } cn9;
4654     struct bdk_lmcx_ddr_pll_ctl_cn81xx
4655     {
4656 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
4657         uint64_t reserved_45_63        : 19;
4658         uint64_t dclk_alt_refclk_sel   : 1;  /**< [ 44: 44](R/W) Select alternate reference clock for DCLK PLL. */
4659         uint64_t bwadj                 : 12; /**< [ 43: 32](R/W) Bandwidth control for DCLK PLLs. */
4660         uint64_t dclk_invert           : 1;  /**< [ 31: 31](R/W) Invert DCLK that feeds LMC/DDR at the south side of the chip. */
4661         uint64_t phy_dcok              : 1;  /**< [ 30: 30](R/W) Set to power up PHY logic after setting LMC()_DDR_PLL_CTL[DDR4_MODE]. */
4662         uint64_t ddr4_mode             : 1;  /**< [ 29: 29](R/W) DDR4 mode select: 1 = DDR4, 0 = DDR3. */
4663         uint64_t pll_fbslip            : 1;  /**< [ 28: 28](RO/H) PLL FBSLIP indication. */
4664         uint64_t pll_lock              : 1;  /**< [ 27: 27](RO/H) PLL LOCK indication. */
4665         uint64_t pll_rfslip            : 1;  /**< [ 26: 26](RO/H) PLL RFSLIP indication. */
4666         uint64_t clkr                  : 2;  /**< [ 25: 24](R/W) PLL post-divider control. */
4667         uint64_t jtg_test_mode         : 1;  /**< [ 23: 23](R/W) Reserved; must be zero.
4668                                                                  Internal:
4669                                                                  JTAG test mode. Clock alignment between DCLK & REFCLK as
4670                                                                  well as FCLK & REFCLK can only be performed after the ddr_pll_divider_reset is deasserted.
4671                                                                  Software needs to wait at least 10 reference clock cycles after deasserting
4672                                                                  pll_divider_reset
4673                                                                  before asserting LMC()_DDR_PLL_CTL[JTG_TEST_MODE]. During alignment (which can take up
4674                                                                  to 160 microseconds) DCLK and FCLK can exhibit some high-frequency pulses. Therefore, all
4675                                                                  bring up activities in that clock domain need to be delayed (when the chip operates in
4676                                                                  jtg_test_mode) by about 160 microseconds to ensure that lock is achieved. */
4677         uint64_t ddr_div_reset         : 1;  /**< [ 22: 22](R/W) DDR postscalar divider reset. */
4678         uint64_t ddr_ps_en             : 4;  /**< [ 21: 18](R/W) DDR postscalar divide ratio. Determines the LMC CK speed.
4679                                                                  0x0 = divide LMC PLL by 1.
4680                                                                  0x1 = divide LMC PLL by 2.
4681                                                                  0x2 = divide LMC PLL by 3.
4682                                                                  0x3 = divide LMC PLL by 4.
4683                                                                  0x4 = divide LMC PLL by 5.
4684                                                                  0x5 = divide LMC PLL by 6.
4685                                                                  0x6 = divide LMC PLL by 7.
4686                                                                  0x7 = divide LMC PLL by 8.
4687                                                                  0x8 = divide LMC PLL by 10.
4688                                                                  0x9 = divide LMC PLL by 12.
4689                                                                  0xA = Reserved.
4690                                                                  0xB = Reserved.
4691                                                                  0xC = Reserved.
4692                                                                  0xD = Reserved.
4693                                                                  0xE = Reserved.
4694                                                                  0xF = Reserved.
4695 
4696                                                                  [DDR_PS_EN] is not used when [DDR_DIV_RESET] = 1. */
4697         uint64_t reserved_9_17         : 9;
4698         uint64_t clkf_ext              : 1;  /**< [  8:  8](R/W) A 1-bit extension to the [CLKF] register to support for DDR4-2666. */
4699         uint64_t reset_n               : 1;  /**< [  7:  7](R/W) PLL reset */
4700         uint64_t clkf                  : 7;  /**< [  6:  0](R/W) Multiply reference by [CLKF]. 31 \<= [CLKF] \<= 99. LMC PLL frequency = 50 * [CLKF]. min =
4701                                                                  1.6
4702                                                                  GHz, max = 5 GHz. */
4703 #else /* Word 0 - Little Endian */
4704         uint64_t clkf                  : 7;  /**< [  6:  0](R/W) Multiply reference by [CLKF]. 31 \<= [CLKF] \<= 99. LMC PLL frequency = 50 * [CLKF]. min =
4705                                                                  1.6
4706                                                                  GHz, max = 5 GHz. */
4707         uint64_t reset_n               : 1;  /**< [  7:  7](R/W) PLL reset */
4708         uint64_t clkf_ext              : 1;  /**< [  8:  8](R/W) A 1-bit extension to the [CLKF] register to support for DDR4-2666. */
4709         uint64_t reserved_9_17         : 9;
4710         uint64_t ddr_ps_en             : 4;  /**< [ 21: 18](R/W) DDR postscalar divide ratio. Determines the LMC CK speed.
4711                                                                  0x0 = divide LMC PLL by 1.
4712                                                                  0x1 = divide LMC PLL by 2.
4713                                                                  0x2 = divide LMC PLL by 3.
4714                                                                  0x3 = divide LMC PLL by 4.
4715                                                                  0x4 = divide LMC PLL by 5.
4716                                                                  0x5 = divide LMC PLL by 6.
4717                                                                  0x6 = divide LMC PLL by 7.
4718                                                                  0x7 = divide LMC PLL by 8.
4719                                                                  0x8 = divide LMC PLL by 10.
4720                                                                  0x9 = divide LMC PLL by 12.
4721                                                                  0xA = Reserved.
4722                                                                  0xB = Reserved.
4723                                                                  0xC = Reserved.
4724                                                                  0xD = Reserved.
4725                                                                  0xE = Reserved.
4726                                                                  0xF = Reserved.
4727 
4728                                                                  [DDR_PS_EN] is not used when [DDR_DIV_RESET] = 1. */
4729         uint64_t ddr_div_reset         : 1;  /**< [ 22: 22](R/W) DDR postscalar divider reset. */
4730         uint64_t jtg_test_mode         : 1;  /**< [ 23: 23](R/W) Reserved; must be zero.
4731                                                                  Internal:
4732                                                                  JTAG test mode. Clock alignment between DCLK & REFCLK as
4733                                                                  well as FCLK & REFCLK can only be performed after the ddr_pll_divider_reset is deasserted.
4734                                                                  Software needs to wait at least 10 reference clock cycles after deasserting
4735                                                                  pll_divider_reset
4736                                                                  before asserting LMC()_DDR_PLL_CTL[JTG_TEST_MODE]. During alignment (which can take up
4737                                                                  to 160 microseconds) DCLK and FCLK can exhibit some high-frequency pulses. Therefore, all
4738                                                                  bring up activities in that clock domain need to be delayed (when the chip operates in
4739                                                                  jtg_test_mode) by about 160 microseconds to ensure that lock is achieved. */
4740         uint64_t clkr                  : 2;  /**< [ 25: 24](R/W) PLL post-divider control. */
4741         uint64_t pll_rfslip            : 1;  /**< [ 26: 26](RO/H) PLL RFSLIP indication. */
4742         uint64_t pll_lock              : 1;  /**< [ 27: 27](RO/H) PLL LOCK indication. */
4743         uint64_t pll_fbslip            : 1;  /**< [ 28: 28](RO/H) PLL FBSLIP indication. */
4744         uint64_t ddr4_mode             : 1;  /**< [ 29: 29](R/W) DDR4 mode select: 1 = DDR4, 0 = DDR3. */
4745         uint64_t phy_dcok              : 1;  /**< [ 30: 30](R/W) Set to power up PHY logic after setting LMC()_DDR_PLL_CTL[DDR4_MODE]. */
4746         uint64_t dclk_invert           : 1;  /**< [ 31: 31](R/W) Invert DCLK that feeds LMC/DDR at the south side of the chip. */
4747         uint64_t bwadj                 : 12; /**< [ 43: 32](R/W) Bandwidth control for DCLK PLLs. */
4748         uint64_t dclk_alt_refclk_sel   : 1;  /**< [ 44: 44](R/W) Select alternate reference clock for DCLK PLL. */
4749         uint64_t reserved_45_63        : 19;
4750 #endif /* Word 0 - End */
4751     } cn81xx;
4752     /* struct bdk_lmcx_ddr_pll_ctl_cn81xx cn88xx; */
4753     struct bdk_lmcx_ddr_pll_ctl_cn83xx
4754     {
4755 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
4756         uint64_t reserved_45_63        : 19;
4757         uint64_t dclk_alt_refclk_sel   : 1;  /**< [ 44: 44](R/W) Select alternate reference clock for DCLK PLL. */
4758         uint64_t bwadj                 : 12; /**< [ 43: 32](R/W) Bandwidth control for DCLK PLLs. */
4759         uint64_t dclk_invert           : 1;  /**< [ 31: 31](R/W) Invert dclk that feeds LMC/DDR at the south side of the chip. */
4760         uint64_t phy_dcok              : 1;  /**< [ 30: 30](R/W) Set to power up PHY logic after setting LMC()_DDR_PLL_CTL[DDR4_MODE]. */
4761         uint64_t ddr4_mode             : 1;  /**< [ 29: 29](R/W) DDR4 mode select: 1 = DDR4, 0 = DDR3. */
4762         uint64_t pll_fbslip            : 1;  /**< [ 28: 28](RO/H) PLL FBSLIP indication. */
4763         uint64_t pll_lock              : 1;  /**< [ 27: 27](RO/H) PLL LOCK indication. */
4764         uint64_t pll_rfslip            : 1;  /**< [ 26: 26](RO/H) PLL RFSLIP indication. */
4765         uint64_t clkr                  : 2;  /**< [ 25: 24](R/W) PLL post-divider control. */
4766         uint64_t jtg_test_mode         : 1;  /**< [ 23: 23](R/W) Reserved; must be zero.
4767                                                                  Internal:
4768                                                                  JTAG test mode. Clock alignment between DCLK & REFCLK as
4769                                                                  well as FCLK & REFCLK can only be performed after the ddr_pll_divider_reset is deasserted.
4770                                                                  Software needs to wait at least 10 reference clock cycles after deasserting
4771                                                                  pll_divider_reset
4772                                                                  before asserting LMC()_DDR_PLL_CTL[JTG_TEST_MODE]. During alignment (which can take up
4773                                                                  to 160 microseconds) DCLK and FCLK can exhibit some high-frequency pulses. Therefore, all
4774                                                                  bring up activities in that clock domain need to be delayed (when the chip operates in
4775                                                                  jtg_test_mode) by about 160 microseconds to ensure that lock is achieved. */
4776         uint64_t ddr_div_reset         : 1;  /**< [ 22: 22](R/W) DDR postscalar divider reset. */
4777         uint64_t ddr_ps_en             : 4;  /**< [ 21: 18](R/W) DDR postscalar divide ratio. Determines the LMC CK speed.
4778                                                                  0x0 = divide LMC PLL by 1.
4779                                                                  0x1 = divide LMC PLL by 2.
4780                                                                  0x2 = divide LMC PLL by 3.
4781                                                                  0x3 = divide LMC PLL by 4.
4782                                                                  0x4 = divide LMC PLL by 5.
4783                                                                  0x5 = divide LMC PLL by 6.
4784                                                                  0x6 = divide LMC PLL by 7.
4785                                                                  0x7 = divide LMC PLL by 8.
4786                                                                  0x8 = divide LMC PLL by 10.
4787                                                                  0x9 = divide LMC PLL by 12.
4788                                                                  0xA = Reserved.
4789                                                                  0xB = Reserved.
4790                                                                  0xC = Reserved.
4791                                                                  0xD = Reserved.
4792                                                                  0xE = Reserved.
4793                                                                  0xF = Reserved.
4794 
4795                                                                  [DDR_PS_EN] is not used when [DDR_DIV_RESET] = 1. */
4796         uint64_t reserved_9_17         : 9;
4797         uint64_t clkf_ext              : 1;  /**< [  8:  8](R/W) A 1-bit extension to the [CLKF] register to support for DDR4-2666. */
4798         uint64_t reset_n               : 1;  /**< [  7:  7](R/W) PLL reset */
4799         uint64_t clkf                  : 7;  /**< [  6:  0](R/W) Multiply reference by [CLKF]. 31 \<= [CLKF] \<= 99. LMC PLL frequency = 50 * [CLKF]. min =
4800                                                                  1.6
4801                                                                  GHz, max = 5 GHz. */
4802 #else /* Word 0 - Little Endian */
4803         uint64_t clkf                  : 7;  /**< [  6:  0](R/W) Multiply reference by [CLKF]. 31 \<= [CLKF] \<= 99. LMC PLL frequency = 50 * [CLKF]. min =
4804                                                                  1.6
4805                                                                  GHz, max = 5 GHz. */
4806         uint64_t reset_n               : 1;  /**< [  7:  7](R/W) PLL reset */
4807         uint64_t clkf_ext              : 1;  /**< [  8:  8](R/W) A 1-bit extension to the [CLKF] register to support for DDR4-2666. */
4808         uint64_t reserved_9_17         : 9;
4809         uint64_t ddr_ps_en             : 4;  /**< [ 21: 18](R/W) DDR postscalar divide ratio. Determines the LMC CK speed.
4810                                                                  0x0 = divide LMC PLL by 1.
4811                                                                  0x1 = divide LMC PLL by 2.
4812                                                                  0x2 = divide LMC PLL by 3.
4813                                                                  0x3 = divide LMC PLL by 4.
4814                                                                  0x4 = divide LMC PLL by 5.
4815                                                                  0x5 = divide LMC PLL by 6.
4816                                                                  0x6 = divide LMC PLL by 7.
4817                                                                  0x7 = divide LMC PLL by 8.
4818                                                                  0x8 = divide LMC PLL by 10.
4819                                                                  0x9 = divide LMC PLL by 12.
4820                                                                  0xA = Reserved.
4821                                                                  0xB = Reserved.
4822                                                                  0xC = Reserved.
4823                                                                  0xD = Reserved.
4824                                                                  0xE = Reserved.
4825                                                                  0xF = Reserved.
4826 
4827                                                                  [DDR_PS_EN] is not used when [DDR_DIV_RESET] = 1. */
4828         uint64_t ddr_div_reset         : 1;  /**< [ 22: 22](R/W) DDR postscalar divider reset. */
4829         uint64_t jtg_test_mode         : 1;  /**< [ 23: 23](R/W) Reserved; must be zero.
4830                                                                  Internal:
4831                                                                  JTAG test mode. Clock alignment between DCLK & REFCLK as
4832                                                                  well as FCLK & REFCLK can only be performed after the ddr_pll_divider_reset is deasserted.
4833                                                                  Software needs to wait at least 10 reference clock cycles after deasserting
4834                                                                  pll_divider_reset
4835                                                                  before asserting LMC()_DDR_PLL_CTL[JTG_TEST_MODE]. During alignment (which can take up
4836                                                                  to 160 microseconds) DCLK and FCLK can exhibit some high-frequency pulses. Therefore, all
4837                                                                  bring up activities in that clock domain need to be delayed (when the chip operates in
4838                                                                  jtg_test_mode) by about 160 microseconds to ensure that lock is achieved. */
4839         uint64_t clkr                  : 2;  /**< [ 25: 24](R/W) PLL post-divider control. */
4840         uint64_t pll_rfslip            : 1;  /**< [ 26: 26](RO/H) PLL RFSLIP indication. */
4841         uint64_t pll_lock              : 1;  /**< [ 27: 27](RO/H) PLL LOCK indication. */
4842         uint64_t pll_fbslip            : 1;  /**< [ 28: 28](RO/H) PLL FBSLIP indication. */
4843         uint64_t ddr4_mode             : 1;  /**< [ 29: 29](R/W) DDR4 mode select: 1 = DDR4, 0 = DDR3. */
4844         uint64_t phy_dcok              : 1;  /**< [ 30: 30](R/W) Set to power up PHY logic after setting LMC()_DDR_PLL_CTL[DDR4_MODE]. */
4845         uint64_t dclk_invert           : 1;  /**< [ 31: 31](R/W) Invert dclk that feeds LMC/DDR at the south side of the chip. */
4846         uint64_t bwadj                 : 12; /**< [ 43: 32](R/W) Bandwidth control for DCLK PLLs. */
4847         uint64_t dclk_alt_refclk_sel   : 1;  /**< [ 44: 44](R/W) Select alternate reference clock for DCLK PLL. */
4848         uint64_t reserved_45_63        : 19;
4849 #endif /* Word 0 - End */
4850     } cn83xx;
4851 };
4852 typedef union bdk_lmcx_ddr_pll_ctl bdk_lmcx_ddr_pll_ctl_t;
4853 
4854 static inline uint64_t BDK_LMCX_DDR_PLL_CTL(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_DDR_PLL_CTL(unsigned long a)4855 static inline uint64_t BDK_LMCX_DDR_PLL_CTL(unsigned long a)
4856 {
4857     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
4858         return 0x87e088000258ll + 0x1000000ll * ((a) & 0x0);
4859     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
4860         return 0x87e088000258ll + 0x1000000ll * ((a) & 0x1);
4861     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
4862         return 0x87e088000258ll + 0x1000000ll * ((a) & 0x3);
4863     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
4864         return 0x87e088000258ll + 0x1000000ll * ((a) & 0x3);
4865     __bdk_csr_fatal("LMCX_DDR_PLL_CTL", 1, a, 0, 0, 0);
4866 }
4867 
4868 #define typedef_BDK_LMCX_DDR_PLL_CTL(a) bdk_lmcx_ddr_pll_ctl_t
4869 #define bustype_BDK_LMCX_DDR_PLL_CTL(a) BDK_CSR_TYPE_RSL
4870 #define basename_BDK_LMCX_DDR_PLL_CTL(a) "LMCX_DDR_PLL_CTL"
4871 #define device_bar_BDK_LMCX_DDR_PLL_CTL(a) 0x0 /* PF_BAR0 */
4872 #define busnum_BDK_LMCX_DDR_PLL_CTL(a) (a)
4873 #define arguments_BDK_LMCX_DDR_PLL_CTL(a) (a),-1,-1,-1
4874 
4875 /**
4876  * Register (RSL) lmc#_dimm#_ddr4_params0
4877  *
4878  * LMC DIMM Parameters Registers 0
4879  * This register contains values to be programmed into the extra DDR4 control words in the
4880  * corresponding (registered) DIMM. These are control words RC1x through RC8x.
4881  */
4882 union bdk_lmcx_dimmx_ddr4_params0
4883 {
4884     uint64_t u;
4885     struct bdk_lmcx_dimmx_ddr4_params0_s
4886     {
4887 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
4888         uint64_t rc8x                  : 8;  /**< [ 63: 56](R/W) RC8x. */
4889         uint64_t rc7x                  : 8;  /**< [ 55: 48](R/W) RC7x. */
4890         uint64_t rc6x                  : 8;  /**< [ 47: 40](R/W) RC6x. */
4891         uint64_t rc5x                  : 8;  /**< [ 39: 32](R/W) RC5x. */
4892         uint64_t rc4x                  : 8;  /**< [ 31: 24](R/W) RC4x. */
4893         uint64_t rc3x                  : 8;  /**< [ 23: 16](R/W) RC3x. */
4894         uint64_t rc2x                  : 8;  /**< [ 15:  8](R/W) RC2x. */
4895         uint64_t rc1x                  : 8;  /**< [  7:  0](R/W) RC1x. */
4896 #else /* Word 0 - Little Endian */
4897         uint64_t rc1x                  : 8;  /**< [  7:  0](R/W) RC1x. */
4898         uint64_t rc2x                  : 8;  /**< [ 15:  8](R/W) RC2x. */
4899         uint64_t rc3x                  : 8;  /**< [ 23: 16](R/W) RC3x. */
4900         uint64_t rc4x                  : 8;  /**< [ 31: 24](R/W) RC4x. */
4901         uint64_t rc5x                  : 8;  /**< [ 39: 32](R/W) RC5x. */
4902         uint64_t rc6x                  : 8;  /**< [ 47: 40](R/W) RC6x. */
4903         uint64_t rc7x                  : 8;  /**< [ 55: 48](R/W) RC7x. */
4904         uint64_t rc8x                  : 8;  /**< [ 63: 56](R/W) RC8x. */
4905 #endif /* Word 0 - End */
4906     } s;
4907     /* struct bdk_lmcx_dimmx_ddr4_params0_s cn; */
4908 };
4909 typedef union bdk_lmcx_dimmx_ddr4_params0 bdk_lmcx_dimmx_ddr4_params0_t;
4910 
4911 static inline uint64_t BDK_LMCX_DIMMX_DDR4_PARAMS0(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
BDK_LMCX_DIMMX_DDR4_PARAMS0(unsigned long a,unsigned long b)4912 static inline uint64_t BDK_LMCX_DIMMX_DDR4_PARAMS0(unsigned long a, unsigned long b)
4913 {
4914     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
4915         return 0x87e0880000d0ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
4916     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
4917         return 0x87e0880000d0ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
4918     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=1)))
4919         return 0x87e0880000d0ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
4920     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b<=1)))
4921         return 0x87e0880000d0ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
4922     __bdk_csr_fatal("LMCX_DIMMX_DDR4_PARAMS0", 2, a, b, 0, 0);
4923 }
4924 
4925 #define typedef_BDK_LMCX_DIMMX_DDR4_PARAMS0(a,b) bdk_lmcx_dimmx_ddr4_params0_t
4926 #define bustype_BDK_LMCX_DIMMX_DDR4_PARAMS0(a,b) BDK_CSR_TYPE_RSL
4927 #define basename_BDK_LMCX_DIMMX_DDR4_PARAMS0(a,b) "LMCX_DIMMX_DDR4_PARAMS0"
4928 #define device_bar_BDK_LMCX_DIMMX_DDR4_PARAMS0(a,b) 0x0 /* PF_BAR0 */
4929 #define busnum_BDK_LMCX_DIMMX_DDR4_PARAMS0(a,b) (a)
4930 #define arguments_BDK_LMCX_DIMMX_DDR4_PARAMS0(a,b) (a),(b),-1,-1
4931 
4932 /**
4933  * Register (RSL) lmc#_dimm#_ddr4_params1
4934  *
4935  * LMC DIMM Parameters Registers 1
4936  * This register contains values to be programmed into the extra DDR4 control words in the
4937  * corresponding (registered) DIMM. These are control words RC9x through RCBx.
4938  */
4939 union bdk_lmcx_dimmx_ddr4_params1
4940 {
4941     uint64_t u;
4942     struct bdk_lmcx_dimmx_ddr4_params1_s
4943     {
4944 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
4945         uint64_t reserved_24_63        : 40;
4946         uint64_t rcbx                  : 8;  /**< [ 23: 16](R/W) RCBx. */
4947         uint64_t rcax                  : 8;  /**< [ 15:  8](R/W) RCAx. */
4948         uint64_t rc9x                  : 8;  /**< [  7:  0](R/W) RC9x. */
4949 #else /* Word 0 - Little Endian */
4950         uint64_t rc9x                  : 8;  /**< [  7:  0](R/W) RC9x. */
4951         uint64_t rcax                  : 8;  /**< [ 15:  8](R/W) RCAx. */
4952         uint64_t rcbx                  : 8;  /**< [ 23: 16](R/W) RCBx. */
4953         uint64_t reserved_24_63        : 40;
4954 #endif /* Word 0 - End */
4955     } s;
4956     /* struct bdk_lmcx_dimmx_ddr4_params1_s cn; */
4957 };
4958 typedef union bdk_lmcx_dimmx_ddr4_params1 bdk_lmcx_dimmx_ddr4_params1_t;
4959 
4960 static inline uint64_t BDK_LMCX_DIMMX_DDR4_PARAMS1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
BDK_LMCX_DIMMX_DDR4_PARAMS1(unsigned long a,unsigned long b)4961 static inline uint64_t BDK_LMCX_DIMMX_DDR4_PARAMS1(unsigned long a, unsigned long b)
4962 {
4963     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
4964         return 0x87e088000140ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
4965     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
4966         return 0x87e088000140ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
4967     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=1)))
4968         return 0x87e088000140ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
4969     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b<=1)))
4970         return 0x87e088000140ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
4971     __bdk_csr_fatal("LMCX_DIMMX_DDR4_PARAMS1", 2, a, b, 0, 0);
4972 }
4973 
4974 #define typedef_BDK_LMCX_DIMMX_DDR4_PARAMS1(a,b) bdk_lmcx_dimmx_ddr4_params1_t
4975 #define bustype_BDK_LMCX_DIMMX_DDR4_PARAMS1(a,b) BDK_CSR_TYPE_RSL
4976 #define basename_BDK_LMCX_DIMMX_DDR4_PARAMS1(a,b) "LMCX_DIMMX_DDR4_PARAMS1"
4977 #define device_bar_BDK_LMCX_DIMMX_DDR4_PARAMS1(a,b) 0x0 /* PF_BAR0 */
4978 #define busnum_BDK_LMCX_DIMMX_DDR4_PARAMS1(a,b) (a)
4979 #define arguments_BDK_LMCX_DIMMX_DDR4_PARAMS1(a,b) (a),(b),-1,-1
4980 
4981 /**
4982  * Register (RSL) lmc#_dimm#_params
4983  *
4984  * LMC DIMM Parameters Register
4985  * This register contains values to be programmed into each control word in the corresponding
4986  * (registered) DIMM. The control words allow optimization of the device properties for different
4987  * raw card designs. Note that LMC only uses this CSR when LMC()_CONTROL[RDIMM_ENA]=1. During
4988  * a power-up/init sequence, LMC writes these fields into the control words in the JEDEC standard
4989  * DDR4 registering clock driver when the corresponding LMC()_DIMM_CTL[DIMM*_WMASK] bits are set.
4990  */
4991 union bdk_lmcx_dimmx_params
4992 {
4993     uint64_t u;
4994     struct bdk_lmcx_dimmx_params_s
4995     {
4996 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
4997         uint64_t rc15                  : 4;  /**< [ 63: 60](R/W) RC15, Reserved. */
4998         uint64_t rc14                  : 4;  /**< [ 59: 56](R/W) RC14, Reserved. */
4999         uint64_t rc13                  : 4;  /**< [ 55: 52](R/W) RC13, Reserved. */
5000         uint64_t rc12                  : 4;  /**< [ 51: 48](R/W) RC12, Reserved. */
5001         uint64_t rc11                  : 4;  /**< [ 47: 44](R/W) RC11, Encoding for RDIMM operating VDD. */
5002         uint64_t rc10                  : 4;  /**< [ 43: 40](R/W) RC10, Encoding for RDIMM operating speed. */
5003         uint64_t rc9                   : 4;  /**< [ 39: 36](R/W) RC9, Power savings settings control word. */
5004         uint64_t rc8                   : 4;  /**< [ 35: 32](R/W) RC8, Additional IBT settings control word. */
5005         uint64_t rc7                   : 4;  /**< [ 31: 28](R/W) RC7, Reserved. */
5006         uint64_t rc6                   : 4;  /**< [ 27: 24](R/W) RC6, Reserved. */
5007         uint64_t rc5                   : 4;  /**< [ 23: 20](R/W) RC5, CK driver characteristics control word. */
5008         uint64_t rc4                   : 4;  /**< [ 19: 16](R/W) RC4, Control signals driver characteristics control word. */
5009         uint64_t rc3                   : 4;  /**< [ 15: 12](R/W) RC3, CA signals driver characteristics control word. */
5010         uint64_t rc2                   : 4;  /**< [ 11:  8](R/W) RC2, Timing control word. */
5011         uint64_t rc1                   : 4;  /**< [  7:  4](R/W) RC1, Clock driver enable control word. */
5012         uint64_t rc0                   : 4;  /**< [  3:  0](R/W) RC0, Global features control word. */
5013 #else /* Word 0 - Little Endian */
5014         uint64_t rc0                   : 4;  /**< [  3:  0](R/W) RC0, Global features control word. */
5015         uint64_t rc1                   : 4;  /**< [  7:  4](R/W) RC1, Clock driver enable control word. */
5016         uint64_t rc2                   : 4;  /**< [ 11:  8](R/W) RC2, Timing control word. */
5017         uint64_t rc3                   : 4;  /**< [ 15: 12](R/W) RC3, CA signals driver characteristics control word. */
5018         uint64_t rc4                   : 4;  /**< [ 19: 16](R/W) RC4, Control signals driver characteristics control word. */
5019         uint64_t rc5                   : 4;  /**< [ 23: 20](R/W) RC5, CK driver characteristics control word. */
5020         uint64_t rc6                   : 4;  /**< [ 27: 24](R/W) RC6, Reserved. */
5021         uint64_t rc7                   : 4;  /**< [ 31: 28](R/W) RC7, Reserved. */
5022         uint64_t rc8                   : 4;  /**< [ 35: 32](R/W) RC8, Additional IBT settings control word. */
5023         uint64_t rc9                   : 4;  /**< [ 39: 36](R/W) RC9, Power savings settings control word. */
5024         uint64_t rc10                  : 4;  /**< [ 43: 40](R/W) RC10, Encoding for RDIMM operating speed. */
5025         uint64_t rc11                  : 4;  /**< [ 47: 44](R/W) RC11, Encoding for RDIMM operating VDD. */
5026         uint64_t rc12                  : 4;  /**< [ 51: 48](R/W) RC12, Reserved. */
5027         uint64_t rc13                  : 4;  /**< [ 55: 52](R/W) RC13, Reserved. */
5028         uint64_t rc14                  : 4;  /**< [ 59: 56](R/W) RC14, Reserved. */
5029         uint64_t rc15                  : 4;  /**< [ 63: 60](R/W) RC15, Reserved. */
5030 #endif /* Word 0 - End */
5031     } s;
5032     /* struct bdk_lmcx_dimmx_params_s cn; */
5033 };
5034 typedef union bdk_lmcx_dimmx_params bdk_lmcx_dimmx_params_t;
5035 
5036 static inline uint64_t BDK_LMCX_DIMMX_PARAMS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
BDK_LMCX_DIMMX_PARAMS(unsigned long a,unsigned long b)5037 static inline uint64_t BDK_LMCX_DIMMX_PARAMS(unsigned long a, unsigned long b)
5038 {
5039     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
5040         return 0x87e088000270ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
5041     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
5042         return 0x87e088000270ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
5043     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=1)))
5044         return 0x87e088000270ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
5045     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b<=1)))
5046         return 0x87e088000270ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
5047     __bdk_csr_fatal("LMCX_DIMMX_PARAMS", 2, a, b, 0, 0);
5048 }
5049 
5050 #define typedef_BDK_LMCX_DIMMX_PARAMS(a,b) bdk_lmcx_dimmx_params_t
5051 #define bustype_BDK_LMCX_DIMMX_PARAMS(a,b) BDK_CSR_TYPE_RSL
5052 #define basename_BDK_LMCX_DIMMX_PARAMS(a,b) "LMCX_DIMMX_PARAMS"
5053 #define device_bar_BDK_LMCX_DIMMX_PARAMS(a,b) 0x0 /* PF_BAR0 */
5054 #define busnum_BDK_LMCX_DIMMX_PARAMS(a,b) (a)
5055 #define arguments_BDK_LMCX_DIMMX_PARAMS(a,b) (a),(b),-1,-1
5056 
5057 /**
5058  * Register (RSL) lmc#_dimm_ctl
5059  *
5060  * LMC DIMM Control Register
5061  * Note that this CSR is only used when LMC()_CONTROL[RDIMM_ENA] = 1 or
5062  * LMC()_CONFIG[LRDIMM_ENA] = 1. During a power-up/init sequence, this CSR controls
5063  * LMC's write operations to the control words in the JEDEC standard DDR4 registering
5064  * clock driver.
5065  */
5066 union bdk_lmcx_dimm_ctl
5067 {
5068     uint64_t u;
5069     struct bdk_lmcx_dimm_ctl_s
5070     {
5071 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
5072         uint64_t reserved_45_63        : 19;
5073         uint64_t tcws                  : 13; /**< [ 44: 32](R/W) LMC waits for this time period before and after a RDIMM control word access during a
5074                                                                  power-up/init SEQUENCE. TCWS is in multiples of 8 CK cycles.
5075                                                                  Set TCWS (CSR field) = RNDUP[TCWS(ns)/(8 * TCYC(ns))], where TCWS is the desired time
5076                                                                  (ns), and TCYC(ns) is the DDR clock frequency (not data rate).
5077                                                                  TYP = 0x4E0 (equivalent to 15 us) when changing clock timing (RC2.DBA1, RC6.DA4, RC10.DA3,
5078                                                                  RC10.DA4, RC11.DA3, and RC11.DA4)
5079                                                                  TYP = 0x8, otherwise
5080                                                                  0x0 = Reserved. */
5081         uint64_t dimm1_wmask           : 16; /**< [ 31: 16](R/W) Reserved.
5082                                                                  Internal:
5083                                                                  DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
5084         uint64_t dimm0_wmask           : 16; /**< [ 15:  0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
5085 #else /* Word 0 - Little Endian */
5086         uint64_t dimm0_wmask           : 16; /**< [ 15:  0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
5087         uint64_t dimm1_wmask           : 16; /**< [ 31: 16](R/W) Reserved.
5088                                                                  Internal:
5089                                                                  DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
5090         uint64_t tcws                  : 13; /**< [ 44: 32](R/W) LMC waits for this time period before and after a RDIMM control word access during a
5091                                                                  power-up/init SEQUENCE. TCWS is in multiples of 8 CK cycles.
5092                                                                  Set TCWS (CSR field) = RNDUP[TCWS(ns)/(8 * TCYC(ns))], where TCWS is the desired time
5093                                                                  (ns), and TCYC(ns) is the DDR clock frequency (not data rate).
5094                                                                  TYP = 0x4E0 (equivalent to 15 us) when changing clock timing (RC2.DBA1, RC6.DA4, RC10.DA3,
5095                                                                  RC10.DA4, RC11.DA3, and RC11.DA4)
5096                                                                  TYP = 0x8, otherwise
5097                                                                  0x0 = Reserved. */
5098         uint64_t reserved_45_63        : 19;
5099 #endif /* Word 0 - End */
5100     } s;
5101     struct bdk_lmcx_dimm_ctl_cn9
5102     {
5103 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
5104         uint64_t reserved_46_63        : 18;
5105         uint64_t cke_assert            : 1;  /**< [ 45: 45](R/W) CKE assertion.
5106                                                                  0 = LMC does not change the current state of the CKE pin during
5107                                                                  RCD_INIT. Note that clearing this field to 0 before running RCD_INIT is
5108                                                                  necessary when initiating control gear-down mode on the RCD.
5109                                                                  1 = LMC will drive CKE output HIGH at the beginning of RCD_INIT sequence. */
5110         uint64_t tcws                  : 13; /**< [ 44: 32](R/W) LMC waits for this time period before and after a RDIMM control word access during a
5111                                                                  power-up/init SEQUENCE. TCWS is in multiples of 8 CK cycles.
5112                                                                  Set TCWS (CSR field) = RNDUP[TCWS(ns)/(8 * TCYC(ns))], where TCWS is the desired time
5113                                                                  (ns), and TCYC(ns) is the DDR clock frequency (not data rate).
5114                                                                  TYP = 0x4E0 (equivalent to 15 us) when changing clock timing (RC2.DBA1, RC6.DA4, RC10.DA3,
5115                                                                  RC10.DA4, RC11.DA3, and RC11.DA4)
5116                                                                  TYP = 0x8, otherwise
5117                                                                  0x0 = Reserved. */
5118         uint64_t dimm1_wmask           : 16; /**< [ 31: 16](R/W) DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
5119         uint64_t dimm0_wmask           : 16; /**< [ 15:  0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
5120 #else /* Word 0 - Little Endian */
5121         uint64_t dimm0_wmask           : 16; /**< [ 15:  0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
5122         uint64_t dimm1_wmask           : 16; /**< [ 31: 16](R/W) DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
5123         uint64_t tcws                  : 13; /**< [ 44: 32](R/W) LMC waits for this time period before and after a RDIMM control word access during a
5124                                                                  power-up/init SEQUENCE. TCWS is in multiples of 8 CK cycles.
5125                                                                  Set TCWS (CSR field) = RNDUP[TCWS(ns)/(8 * TCYC(ns))], where TCWS is the desired time
5126                                                                  (ns), and TCYC(ns) is the DDR clock frequency (not data rate).
5127                                                                  TYP = 0x4E0 (equivalent to 15 us) when changing clock timing (RC2.DBA1, RC6.DA4, RC10.DA3,
5128                                                                  RC10.DA4, RC11.DA3, and RC11.DA4)
5129                                                                  TYP = 0x8, otherwise
5130                                                                  0x0 = Reserved. */
5131         uint64_t cke_assert            : 1;  /**< [ 45: 45](R/W) CKE assertion.
5132                                                                  0 = LMC does not change the current state of the CKE pin during
5133                                                                  RCD_INIT. Note that clearing this field to 0 before running RCD_INIT is
5134                                                                  necessary when initiating control gear-down mode on the RCD.
5135                                                                  1 = LMC will drive CKE output HIGH at the beginning of RCD_INIT sequence. */
5136         uint64_t reserved_46_63        : 18;
5137 #endif /* Word 0 - End */
5138     } cn9;
5139     struct bdk_lmcx_dimm_ctl_cn81xx
5140     {
5141 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
5142         uint64_t reserved_46_63        : 18;
5143         uint64_t parity                : 1;  /**< [ 45: 45](R/W) "Parity. The Par_In input of a registered DIMM should be tied off. LMC adjusts the value
5144                                                                  of the DDR_WE_L (DWE#) pin during DDR3 register part control word writes to ensure the
5145                                                                  parity is observed correctly by the receiving DDR3 SSTE32882 or DDR4 DDR4RCD01 register
5146                                                                  part. When Par_In is grounded, PARITY should be cleared to 0." */
5147         uint64_t tcws                  : 13; /**< [ 44: 32](R/W) LMC waits for this time period before and after a RDIMM control word access during a
5148                                                                  power-up/init SEQUENCE. TCWS is in multiples of 8 CK cycles.
5149                                                                  Set TCWS (CSR field) = RNDUP[TCWS(ns)/(8 * TCYC(ns))], where TCWS is the desired time
5150                                                                  (ns), and TCYC(ns) is the DDR clock frequency (not data rate).
5151                                                                  TYP = 0x4E0 (equivalent to 15 us) when changing clock timing (RC2.DBA1, RC6.DA4, RC10.DA3,
5152                                                                  RC10.DA4, RC11.DA3, and RC11.DA4)
5153                                                                  TYP = 0x8, otherwise
5154                                                                  0x0 = Reserved. */
5155         uint64_t dimm1_wmask           : 16; /**< [ 31: 16](R/W) Reserved.
5156                                                                  Internal:
5157                                                                  DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
5158         uint64_t dimm0_wmask           : 16; /**< [ 15:  0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
5159 #else /* Word 0 - Little Endian */
5160         uint64_t dimm0_wmask           : 16; /**< [ 15:  0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
5161         uint64_t dimm1_wmask           : 16; /**< [ 31: 16](R/W) Reserved.
5162                                                                  Internal:
5163                                                                  DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
5164         uint64_t tcws                  : 13; /**< [ 44: 32](R/W) LMC waits for this time period before and after a RDIMM control word access during a
5165                                                                  power-up/init SEQUENCE. TCWS is in multiples of 8 CK cycles.
5166                                                                  Set TCWS (CSR field) = RNDUP[TCWS(ns)/(8 * TCYC(ns))], where TCWS is the desired time
5167                                                                  (ns), and TCYC(ns) is the DDR clock frequency (not data rate).
5168                                                                  TYP = 0x4E0 (equivalent to 15 us) when changing clock timing (RC2.DBA1, RC6.DA4, RC10.DA3,
5169                                                                  RC10.DA4, RC11.DA3, and RC11.DA4)
5170                                                                  TYP = 0x8, otherwise
5171                                                                  0x0 = Reserved. */
5172         uint64_t parity                : 1;  /**< [ 45: 45](R/W) "Parity. The Par_In input of a registered DIMM should be tied off. LMC adjusts the value
5173                                                                  of the DDR_WE_L (DWE#) pin during DDR3 register part control word writes to ensure the
5174                                                                  parity is observed correctly by the receiving DDR3 SSTE32882 or DDR4 DDR4RCD01 register
5175                                                                  part. When Par_In is grounded, PARITY should be cleared to 0." */
5176         uint64_t reserved_46_63        : 18;
5177 #endif /* Word 0 - End */
5178     } cn81xx;
5179     struct bdk_lmcx_dimm_ctl_cn88xx
5180     {
5181 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
5182         uint64_t reserved_46_63        : 18;
5183         uint64_t parity                : 1;  /**< [ 45: 45](R/W) "Parity. The Par_In input of a registered DIMM should be tied off. LMC adjusts the value
5184                                                                  of the DDR_WE_L (DWE#) pin during DDR3 register part control word writes to ensure the
5185                                                                  parity is observed correctly by the receiving DDR3 SSTE32882 or DDR4 DDR4RCD01 register
5186                                                                  part. When Par_In is grounded, PARITY should be cleared to 0." */
5187         uint64_t tcws                  : 13; /**< [ 44: 32](R/W) LMC waits for this time period before and after a RDIMM control word access during a
5188                                                                  power-up/init SEQUENCE. TCWS is in multiples of 8 CK cycles.
5189                                                                  Set TCWS (CSR field) = RNDUP[TCWS(ns)/(8 * TCYC(ns))], where TCWS is the desired time
5190                                                                  (ns), and TCYC(ns) is the DDR clock frequency (not data rate).
5191                                                                  TYP = 0x4E0 (equivalent to 15 us) when changing clock timing (RC2.DBA1, RC6.DA4, RC10.DA3,
5192                                                                  RC10.DA4, RC11.DA3, and RC11.DA4)
5193                                                                  TYP = 0x8, otherwise
5194                                                                  0x0 = Reserved. */
5195         uint64_t dimm1_wmask           : 16; /**< [ 31: 16](R/W) DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
5196         uint64_t dimm0_wmask           : 16; /**< [ 15:  0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
5197 #else /* Word 0 - Little Endian */
5198         uint64_t dimm0_wmask           : 16; /**< [ 15:  0](R/W) DIMM0 write mask. If (DIMM0_WMASK[n] = 1), write DIMM0.RCn. */
5199         uint64_t dimm1_wmask           : 16; /**< [ 31: 16](R/W) DIMM1 write mask. If (DIMM1_WMASK[n] = 1), write DIMM1.RCn. */
5200         uint64_t tcws                  : 13; /**< [ 44: 32](R/W) LMC waits for this time period before and after a RDIMM control word access during a
5201                                                                  power-up/init SEQUENCE. TCWS is in multiples of 8 CK cycles.
5202                                                                  Set TCWS (CSR field) = RNDUP[TCWS(ns)/(8 * TCYC(ns))], where TCWS is the desired time
5203                                                                  (ns), and TCYC(ns) is the DDR clock frequency (not data rate).
5204                                                                  TYP = 0x4E0 (equivalent to 15 us) when changing clock timing (RC2.DBA1, RC6.DA4, RC10.DA3,
5205                                                                  RC10.DA4, RC11.DA3, and RC11.DA4)
5206                                                                  TYP = 0x8, otherwise
5207                                                                  0x0 = Reserved. */
5208         uint64_t parity                : 1;  /**< [ 45: 45](R/W) "Parity. The Par_In input of a registered DIMM should be tied off. LMC adjusts the value
5209                                                                  of the DDR_WE_L (DWE#) pin during DDR3 register part control word writes to ensure the
5210                                                                  parity is observed correctly by the receiving DDR3 SSTE32882 or DDR4 DDR4RCD01 register
5211                                                                  part. When Par_In is grounded, PARITY should be cleared to 0." */
5212         uint64_t reserved_46_63        : 18;
5213 #endif /* Word 0 - End */
5214     } cn88xx;
5215     /* struct bdk_lmcx_dimm_ctl_cn88xx cn83xx; */
5216 };
5217 typedef union bdk_lmcx_dimm_ctl bdk_lmcx_dimm_ctl_t;
5218 
5219 static inline uint64_t BDK_LMCX_DIMM_CTL(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_DIMM_CTL(unsigned long a)5220 static inline uint64_t BDK_LMCX_DIMM_CTL(unsigned long a)
5221 {
5222     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
5223         return 0x87e088000310ll + 0x1000000ll * ((a) & 0x0);
5224     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
5225         return 0x87e088000310ll + 0x1000000ll * ((a) & 0x1);
5226     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
5227         return 0x87e088000310ll + 0x1000000ll * ((a) & 0x3);
5228     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
5229         return 0x87e088000310ll + 0x1000000ll * ((a) & 0x3);
5230     __bdk_csr_fatal("LMCX_DIMM_CTL", 1, a, 0, 0, 0);
5231 }
5232 
5233 #define typedef_BDK_LMCX_DIMM_CTL(a) bdk_lmcx_dimm_ctl_t
5234 #define bustype_BDK_LMCX_DIMM_CTL(a) BDK_CSR_TYPE_RSL
5235 #define basename_BDK_LMCX_DIMM_CTL(a) "LMCX_DIMM_CTL"
5236 #define device_bar_BDK_LMCX_DIMM_CTL(a) 0x0 /* PF_BAR0 */
5237 #define busnum_BDK_LMCX_DIMM_CTL(a) (a)
5238 #define arguments_BDK_LMCX_DIMM_CTL(a) (a),-1,-1,-1
5239 
5240 /**
5241  * Register (RSL) lmc#_dll_ctl2
5242  *
5243  * LMC DLL Control/System-Memory-Clock-Reset Register
5244  * See LMC initialization sequence for the initialization sequence.
5245  * Internal:
5246  * DLL Bringup sequence:
5247  *
5248  * 1. If not done already, set LMC()_DLL_CTL2 = 0, except when LMC()_DLL_CTL2[DRESET] = 1.
5249  *
5250  * 2. Write one to LMC()_DLL_CTL2[DLL_BRINGUP].
5251  *
5252  * 3. Wait for 10 CK cycles, then write one to LMC()_DLL_CTL2[QUAD_DLL_ENA]. It may not be
5253  * feasible
5254  * to count 10 CK cycles, but the idea is to configure the delay line into DLL mode by asserting
5255  * LMC()_DLL_CTL2[DLL_BRINGUP] earlier than [QUAD_DLL_ENA], even if it is one cycle early.
5256  * LMC()_DLL_CTL2[QUAD_DLL_ENA] must not change after this point without restarting the LMC
5257  * and/or
5258  * DRESET initialization sequence.
5259  *
5260  * 4. Read L2D_BST0 and wait for the result. (L2D_BST0 is subject to change depending on how it
5261  * called in o63. It is still ok to go without step 4, since step 5 has enough time).
5262  *
5263  * 5. Wait 10 us.
5264  *
5265  * 6. Write zero to LMC()_DLL_CTL2[DLL_BRINGUP]. LMC()_DLL_CTL2[DLL_BRINGUP] must not change.
5266  * after
5267  * this point without restarting the LMC and/or DRESET initialization sequence.
5268  *
5269  * 7. Read L2D_BST0 and wait for the result. (same as step 4, but the idea here is the wait some
5270  * time before going to step 8, even it is one cycle is fine).
5271  *
5272  * 8. Write zero to LMC()_DLL_CTL2[DRESET]. LMC()_DLL_CTL2[DRESET] must not change after this
5273  * point
5274  * without restarting the LMC and/or DRESET initialization sequence.
5275  *
5276  * 9. Wait for LMC()_DLL_CTL2[DRESET_DLY] amount to ensure clocks were turned on and reset deasserted.
5277  */
5278 union bdk_lmcx_dll_ctl2
5279 {
5280     uint64_t u;
5281     struct bdk_lmcx_dll_ctl2_s
5282     {
5283 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
5284         uint64_t reserved_24_63        : 40;
5285         uint64_t dreset_cclk_dis       : 1;  /**< [ 23: 23](R/W) Force conditional dclk on during reset. */
5286         uint64_t dreset_dly            : 6;  /**< [ 22: 17](R/W) Wait time for DRESET propagation in dclk cycles. When DRESET is cleared
5287                                                                  this value is counted down to allow clocks to turn on and capture reset state.
5288                                                                  Once the counter expires, reset is deasserted. Setting this field to 0 will
5289                                                                  default to 50 dclk cycles. */
5290         uint64_t intf_en               : 1;  /**< [ 16: 16](R/W) Interface enable. */
5291         uint64_t dll_bringup           : 1;  /**< [ 15: 15](R/W) DLL bring up. */
5292         uint64_t dreset                : 1;  /**< [ 14: 14](R/W) System-memory-clock domain reset. The reset signal that is used by the system-memory-clock
5293                                                                  domain is
5294                                                                  (DRESET -OR- core-clock reset). */
5295         uint64_t quad_dll_ena          : 1;  /**< [ 13: 13](R/W) DLL enable. */
5296         uint64_t byp_sel               : 4;  /**< [ 12:  9](R/W) Reserved; must be zero.
5297                                                                  Internal:
5298                                                                  Bypass select.
5299                                                                  0x0 = no byte.
5300                                                                  0x1 = byte 0.
5301                                                                  ...
5302                                                                  0x9 = byte 8.
5303                                                                  0xA = all bytes.
5304                                                                  0xB-0xF = Reserved. */
5305         uint64_t byp_setting           : 9;  /**< [  8:  0](R/W) Reserved; must be zero.
5306                                                                  Internal:
5307                                                                  Bypass setting.
5308                                                                  DDR3-1600: 0x22.
5309                                                                  DDR3-1333: 0x32.
5310                                                                  DDR3-1066: 0x4B.
5311                                                                  DDR3-800: 0x75.
5312                                                                  DDR3-667: 0x96.
5313                                                                  DDR3-600: 0xAC. */
5314 #else /* Word 0 - Little Endian */
5315         uint64_t byp_setting           : 9;  /**< [  8:  0](R/W) Reserved; must be zero.
5316                                                                  Internal:
5317                                                                  Bypass setting.
5318                                                                  DDR3-1600: 0x22.
5319                                                                  DDR3-1333: 0x32.
5320                                                                  DDR3-1066: 0x4B.
5321                                                                  DDR3-800: 0x75.
5322                                                                  DDR3-667: 0x96.
5323                                                                  DDR3-600: 0xAC. */
5324         uint64_t byp_sel               : 4;  /**< [ 12:  9](R/W) Reserved; must be zero.
5325                                                                  Internal:
5326                                                                  Bypass select.
5327                                                                  0x0 = no byte.
5328                                                                  0x1 = byte 0.
5329                                                                  ...
5330                                                                  0x9 = byte 8.
5331                                                                  0xA = all bytes.
5332                                                                  0xB-0xF = Reserved. */
5333         uint64_t quad_dll_ena          : 1;  /**< [ 13: 13](R/W) DLL enable. */
5334         uint64_t dreset                : 1;  /**< [ 14: 14](R/W) System-memory-clock domain reset. The reset signal that is used by the system-memory-clock
5335                                                                  domain is
5336                                                                  (DRESET -OR- core-clock reset). */
5337         uint64_t dll_bringup           : 1;  /**< [ 15: 15](R/W) DLL bring up. */
5338         uint64_t intf_en               : 1;  /**< [ 16: 16](R/W) Interface enable. */
5339         uint64_t dreset_dly            : 6;  /**< [ 22: 17](R/W) Wait time for DRESET propagation in dclk cycles. When DRESET is cleared
5340                                                                  this value is counted down to allow clocks to turn on and capture reset state.
5341                                                                  Once the counter expires, reset is deasserted. Setting this field to 0 will
5342                                                                  default to 50 dclk cycles. */
5343         uint64_t dreset_cclk_dis       : 1;  /**< [ 23: 23](R/W) Force conditional dclk on during reset. */
5344         uint64_t reserved_24_63        : 40;
5345 #endif /* Word 0 - End */
5346     } s;
5347     struct bdk_lmcx_dll_ctl2_cn8
5348     {
5349 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
5350         uint64_t reserved_17_63        : 47;
5351         uint64_t intf_en               : 1;  /**< [ 16: 16](R/W) Interface enable. */
5352         uint64_t dll_bringup           : 1;  /**< [ 15: 15](R/W) DLL bring up. */
5353         uint64_t dreset                : 1;  /**< [ 14: 14](R/W) System-memory-clock domain reset. The reset signal that is used by the system-memory-clock
5354                                                                  domain is
5355                                                                  (DRESET -OR- core-clock reset). */
5356         uint64_t quad_dll_ena          : 1;  /**< [ 13: 13](R/W) DLL enable. */
5357         uint64_t byp_sel               : 4;  /**< [ 12:  9](R/W) Reserved; must be zero.
5358                                                                  Internal:
5359                                                                  Bypass select.
5360                                                                  0x0 = no byte.
5361                                                                  0x1 = byte 0.
5362                                                                  ...
5363                                                                  0x9 = byte 8.
5364                                                                  0xA = all bytes.
5365                                                                  0xB-0xF = Reserved. */
5366         uint64_t byp_setting           : 9;  /**< [  8:  0](R/W) Reserved; must be zero.
5367                                                                  Internal:
5368                                                                  Bypass setting.
5369                                                                  DDR3-1600: 0x22.
5370                                                                  DDR3-1333: 0x32.
5371                                                                  DDR3-1066: 0x4B.
5372                                                                  DDR3-800: 0x75.
5373                                                                  DDR3-667: 0x96.
5374                                                                  DDR3-600: 0xAC. */
5375 #else /* Word 0 - Little Endian */
5376         uint64_t byp_setting           : 9;  /**< [  8:  0](R/W) Reserved; must be zero.
5377                                                                  Internal:
5378                                                                  Bypass setting.
5379                                                                  DDR3-1600: 0x22.
5380                                                                  DDR3-1333: 0x32.
5381                                                                  DDR3-1066: 0x4B.
5382                                                                  DDR3-800: 0x75.
5383                                                                  DDR3-667: 0x96.
5384                                                                  DDR3-600: 0xAC. */
5385         uint64_t byp_sel               : 4;  /**< [ 12:  9](R/W) Reserved; must be zero.
5386                                                                  Internal:
5387                                                                  Bypass select.
5388                                                                  0x0 = no byte.
5389                                                                  0x1 = byte 0.
5390                                                                  ...
5391                                                                  0x9 = byte 8.
5392                                                                  0xA = all bytes.
5393                                                                  0xB-0xF = Reserved. */
5394         uint64_t quad_dll_ena          : 1;  /**< [ 13: 13](R/W) DLL enable. */
5395         uint64_t dreset                : 1;  /**< [ 14: 14](R/W) System-memory-clock domain reset. The reset signal that is used by the system-memory-clock
5396                                                                  domain is
5397                                                                  (DRESET -OR- core-clock reset). */
5398         uint64_t dll_bringup           : 1;  /**< [ 15: 15](R/W) DLL bring up. */
5399         uint64_t intf_en               : 1;  /**< [ 16: 16](R/W) Interface enable. */
5400         uint64_t reserved_17_63        : 47;
5401 #endif /* Word 0 - End */
5402     } cn8;
5403     struct bdk_lmcx_dll_ctl2_cn9
5404     {
5405 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
5406         uint64_t reserved_24_63        : 40;
5407         uint64_t dreset_cclk_dis       : 1;  /**< [ 23: 23](R/W) Force conditional dclk on during reset. */
5408         uint64_t dreset_dly            : 6;  /**< [ 22: 17](R/W) Wait time for DRESET propagation in dclk cycles. When DRESET is cleared
5409                                                                  this value is counted down to allow clocks to turn on and capture reset state.
5410                                                                  Once the counter expires, reset is deasserted. Setting this field to 0 will
5411                                                                  default to 50 dclk cycles. */
5412         uint64_t intf_en               : 1;  /**< [ 16: 16](R/W) Interface enable. */
5413         uint64_t dll_bringup           : 1;  /**< [ 15: 15](R/W) DLL bring up. */
5414         uint64_t dreset                : 1;  /**< [ 14: 14](R/W) System memory clock domain reset. The reset signal that is used by the
5415                                                                  system memory clock domain is (DRESET -OR- core-clock reset). */
5416         uint64_t quad_dll_ena          : 1;  /**< [ 13: 13](R/W) DLL enable. */
5417         uint64_t byp_sel               : 4;  /**< [ 12:  9](R/W) Reserved; must be zero.
5418                                                                  Internal:
5419                                                                  Bypass select.
5420                                                                  0x0 = no byte.
5421                                                                  0x1 = byte 0.
5422                                                                  ...
5423                                                                  0x9 = byte 8.
5424                                                                  0xA = all bytes.
5425                                                                  0xB-0xF = Reserved. */
5426         uint64_t byp_setting           : 9;  /**< [  8:  0](R/W) Reserved; must be zero.
5427                                                                  Internal:
5428                                                                  Bypass setting.
5429                                                                  DDR3-1600: 0x22.
5430                                                                  DDR3-1333: 0x32.
5431                                                                  DDR3-1066: 0x4B.
5432                                                                  DDR3-800: 0x75.
5433                                                                  DDR3-667: 0x96.
5434                                                                  DDR3-600: 0xAC. */
5435 #else /* Word 0 - Little Endian */
5436         uint64_t byp_setting           : 9;  /**< [  8:  0](R/W) Reserved; must be zero.
5437                                                                  Internal:
5438                                                                  Bypass setting.
5439                                                                  DDR3-1600: 0x22.
5440                                                                  DDR3-1333: 0x32.
5441                                                                  DDR3-1066: 0x4B.
5442                                                                  DDR3-800: 0x75.
5443                                                                  DDR3-667: 0x96.
5444                                                                  DDR3-600: 0xAC. */
5445         uint64_t byp_sel               : 4;  /**< [ 12:  9](R/W) Reserved; must be zero.
5446                                                                  Internal:
5447                                                                  Bypass select.
5448                                                                  0x0 = no byte.
5449                                                                  0x1 = byte 0.
5450                                                                  ...
5451                                                                  0x9 = byte 8.
5452                                                                  0xA = all bytes.
5453                                                                  0xB-0xF = Reserved. */
5454         uint64_t quad_dll_ena          : 1;  /**< [ 13: 13](R/W) DLL enable. */
5455         uint64_t dreset                : 1;  /**< [ 14: 14](R/W) System memory clock domain reset. The reset signal that is used by the
5456                                                                  system memory clock domain is (DRESET -OR- core-clock reset). */
5457         uint64_t dll_bringup           : 1;  /**< [ 15: 15](R/W) DLL bring up. */
5458         uint64_t intf_en               : 1;  /**< [ 16: 16](R/W) Interface enable. */
5459         uint64_t dreset_dly            : 6;  /**< [ 22: 17](R/W) Wait time for DRESET propagation in dclk cycles. When DRESET is cleared
5460                                                                  this value is counted down to allow clocks to turn on and capture reset state.
5461                                                                  Once the counter expires, reset is deasserted. Setting this field to 0 will
5462                                                                  default to 50 dclk cycles. */
5463         uint64_t dreset_cclk_dis       : 1;  /**< [ 23: 23](R/W) Force conditional dclk on during reset. */
5464         uint64_t reserved_24_63        : 40;
5465 #endif /* Word 0 - End */
5466     } cn9;
5467 };
5468 typedef union bdk_lmcx_dll_ctl2 bdk_lmcx_dll_ctl2_t;
5469 
5470 static inline uint64_t BDK_LMCX_DLL_CTL2(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_DLL_CTL2(unsigned long a)5471 static inline uint64_t BDK_LMCX_DLL_CTL2(unsigned long a)
5472 {
5473     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
5474         return 0x87e0880001c8ll + 0x1000000ll * ((a) & 0x0);
5475     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
5476         return 0x87e0880001c8ll + 0x1000000ll * ((a) & 0x1);
5477     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
5478         return 0x87e0880001c8ll + 0x1000000ll * ((a) & 0x3);
5479     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
5480         return 0x87e0880001c8ll + 0x1000000ll * ((a) & 0x3);
5481     __bdk_csr_fatal("LMCX_DLL_CTL2", 1, a, 0, 0, 0);
5482 }
5483 
5484 #define typedef_BDK_LMCX_DLL_CTL2(a) bdk_lmcx_dll_ctl2_t
5485 #define bustype_BDK_LMCX_DLL_CTL2(a) BDK_CSR_TYPE_RSL
5486 #define basename_BDK_LMCX_DLL_CTL2(a) "LMCX_DLL_CTL2"
5487 #define device_bar_BDK_LMCX_DLL_CTL2(a) 0x0 /* PF_BAR0 */
5488 #define busnum_BDK_LMCX_DLL_CTL2(a) (a)
5489 #define arguments_BDK_LMCX_DLL_CTL2(a) (a),-1,-1,-1
5490 
5491 /**
5492  * Register (RSL) lmc#_dll_ctl3
5493  *
5494  * LMC DLL Control/System-Memory-Clock Reset Register
5495  */
5496 union bdk_lmcx_dll_ctl3
5497 {
5498     uint64_t u;
5499     struct bdk_lmcx_dll_ctl3_s
5500     {
5501 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
5502         uint64_t reserved_62_63        : 2;
5503         uint64_t rd_deskew_mem_sel_dis : 1;  /**< [ 61: 61](R/W) By default, LMC always selects per-rank deskew settings that are stored inside
5504                                                                  PHY. Set this field to one to manually disable this feature and that the common
5505                                                                  deskew setting inside the PHY's state machine will get selected instead. */
5506         uint64_t wr_deskew_mem_sel     : 1;  /**< [ 60: 60](R/W) Reserved.
5507                                                                  Internal:
5508                                                                  Only relevant when [WR_DESKEW_ENA] is set.
5509                                                                  0 = Selects the common deskew settings stored in each DQ bit. All writes to any package
5510                                                                  rank uses this common settings to deskew the data bits.
5511                                                                  1 = Selects the stored per-package rank deskew settings. Write to a particular
5512                                                                  package rank uses the corresponding stored setting for that rank. */
5513         uint64_t wr_deskew_mem_ld      : 1;  /**< [ 59: 59](WO) Reserved.
5514                                                                  Internal:
5515                                                                  When set, all DQ bit deskew settings in DDR PHY are all loaded into their corresponding
5516                                                                  rank deskew storage. The rank is chosen by the CSR LMC()_MR_MPR_CTL[MR_WR_RANK]. This is a
5517                                                                  oneshot operation and clears itself each time it is set. Note this has to be done during
5518                                                                  the bringup state where there isn't yet any traffic to DRAM. */
5519         uint64_t reserved_50_58        : 9;
5520         uint64_t wr_deskew_ena         : 1;  /**< [ 49: 49](R/W) When set, it enables the write bit deskew feature. */
5521         uint64_t wr_deskew_ld          : 1;  /**< [ 48: 48](WO) When set, the bit deskew settings in LMC()_DLL_CTL3[OFFSET] gets loaded to
5522                                                                  the designated byte LMC()_DLL_CTL3[BYTE_SEL] and bit LMC()_DLL_CTL3[BIT_SELECT]
5523                                                                  for write bit deskew. This is a oneshot and clears itself each time
5524                                                                  it is set. */
5525         uint64_t bit_select            : 4;  /**< [ 47: 44](R/W) 0x0-0x7 = Selects bit 0 - bit 8 for write deskew setting assignment.
5526                                                                  0x8 = Selects dbi for write deskew setting assignment.
5527                                                                  0x9 = No-op.
5528                                                                  0xA = Reuse deskew setting on.
5529                                                                  0xB = Reuse deskew setting off.
5530                                                                  0xC = Vref bypass setting load.
5531                                                                  0xD = Vref bypass on.
5532                                                                  0xE = Vref bypass off.
5533                                                                  0xF = Bit select reset. Clear write deskew settings to default value 0x40 in each DQ bit.
5534                                                                  Also sets Vref bypass to off and deskew reuse setting to off. */
5535         uint64_t dclk90_fwd            : 1;  /**< [ 43: 43](WO) When set to one, clock-delay information is forwarded to the neighboring LMC. See LMC CK
5536                                                                  Locak Initialization step for the LMC bring-up sequence.
5537 
5538                                                                  Internal:
5539                                                                  Generate a one cycle pulse to forward setting. This is a oneshot and clears
5540                                                                  itself each time it is set. */
5541         uint64_t ddr_90_dly_byp        : 1;  /**< [ 42: 42](R/W) Reserved; must be zero.
5542                                                                  Internal:
5543                                                                  Bypass DDR90_DLY in clock tree. */
5544         uint64_t dclk90_recal_dis      : 1;  /**< [ 41: 41](R/W) Disable periodic recalibration of DDR90 delay line in. */
5545         uint64_t dclk90_byp_sel        : 1;  /**< [ 40: 40](R/W) Bypass setting select for DDR90 delay line. */
5546         uint64_t dclk90_byp_setting    : 9;  /**< [ 39: 31](R/W) Bypass setting for DDR90 delay line. */
5547         uint64_t dll_fast              : 1;  /**< [ 30: 30](RO/H) Reserved; must be zero.
5548                                                                  Internal:
5549                                                                  DLL lock, 0=DLL locked. */
5550         uint64_t dll90_setting         : 9;  /**< [ 29: 21](RO/H) Reserved; must be zero.
5551                                                                  Internal:
5552                                                                  Encoded DLL settings. Works in conjunction with [DLL90_BYTE_SEL]. */
5553         uint64_t fine_tune_mode        : 1;  /**< [ 20: 20](R/W) DLL fine tune mode. 0 = disabled; 1 = enable. When enabled, calibrate internal PHY DLL
5554                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
5555         uint64_t dll_mode              : 1;  /**< [ 19: 19](R/W) Reserved; must be zero.
5556                                                                  Internal:
5557                                                                  DLL mode. */
5558         uint64_t dll90_byte_sel        : 4;  /**< [ 18: 15](R/W) Observe DLL settings for selected byte.
5559                                                                  0x0 = byte 0.
5560                                                                  0x1 = byte 1.
5561                                                                  ...
5562                                                                  0x8: byte 8.
5563                                                                  0x9-0xF: reserved. */
5564         uint64_t offset_ena            : 1;  /**< [ 14: 14](R/W) Reserved; must be zero.
5565                                                                  Internal:
5566                                                                  Offset enable. 1=enable. */
5567         uint64_t load_offset           : 1;  /**< [ 13: 13](WO) Reserved; must be zero.
5568                                                                  Internal:
5569                                                                  Load offset. 0=disable, 1=generate a one cycle pulse to
5570                                                                  the PHY. This field is a oneshot and clears itself each time it is set. */
5571         uint64_t mode_sel              : 2;  /**< [ 12: 11](R/W) Reserved; must be zero.
5572                                                                  Internal:
5573                                                                  Mode select. 0x0 = reset, 0x1 = write, 0x2 = read, 0x3 =
5574                                                                  write and read. */
5575         uint64_t byte_sel              : 4;  /**< [ 10:  7](R/W) Reserved; must be zero.
5576                                                                  Internal:
5577                                                                  Byte select. 0x0 = no byte, 0x1 = byte 0, ..., 0x9 =
5578                                                                  byte 8, 0xA = all bytes, 0xB-0xF = Reserved. */
5579         uint64_t reserved_0_6          : 7;
5580 #else /* Word 0 - Little Endian */
5581         uint64_t reserved_0_6          : 7;
5582         uint64_t byte_sel              : 4;  /**< [ 10:  7](R/W) Reserved; must be zero.
5583                                                                  Internal:
5584                                                                  Byte select. 0x0 = no byte, 0x1 = byte 0, ..., 0x9 =
5585                                                                  byte 8, 0xA = all bytes, 0xB-0xF = Reserved. */
5586         uint64_t mode_sel              : 2;  /**< [ 12: 11](R/W) Reserved; must be zero.
5587                                                                  Internal:
5588                                                                  Mode select. 0x0 = reset, 0x1 = write, 0x2 = read, 0x3 =
5589                                                                  write and read. */
5590         uint64_t load_offset           : 1;  /**< [ 13: 13](WO) Reserved; must be zero.
5591                                                                  Internal:
5592                                                                  Load offset. 0=disable, 1=generate a one cycle pulse to
5593                                                                  the PHY. This field is a oneshot and clears itself each time it is set. */
5594         uint64_t offset_ena            : 1;  /**< [ 14: 14](R/W) Reserved; must be zero.
5595                                                                  Internal:
5596                                                                  Offset enable. 1=enable. */
5597         uint64_t dll90_byte_sel        : 4;  /**< [ 18: 15](R/W) Observe DLL settings for selected byte.
5598                                                                  0x0 = byte 0.
5599                                                                  0x1 = byte 1.
5600                                                                  ...
5601                                                                  0x8: byte 8.
5602                                                                  0x9-0xF: reserved. */
5603         uint64_t dll_mode              : 1;  /**< [ 19: 19](R/W) Reserved; must be zero.
5604                                                                  Internal:
5605                                                                  DLL mode. */
5606         uint64_t fine_tune_mode        : 1;  /**< [ 20: 20](R/W) DLL fine tune mode. 0 = disabled; 1 = enable. When enabled, calibrate internal PHY DLL
5607                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
5608         uint64_t dll90_setting         : 9;  /**< [ 29: 21](RO/H) Reserved; must be zero.
5609                                                                  Internal:
5610                                                                  Encoded DLL settings. Works in conjunction with [DLL90_BYTE_SEL]. */
5611         uint64_t dll_fast              : 1;  /**< [ 30: 30](RO/H) Reserved; must be zero.
5612                                                                  Internal:
5613                                                                  DLL lock, 0=DLL locked. */
5614         uint64_t dclk90_byp_setting    : 9;  /**< [ 39: 31](R/W) Bypass setting for DDR90 delay line. */
5615         uint64_t dclk90_byp_sel        : 1;  /**< [ 40: 40](R/W) Bypass setting select for DDR90 delay line. */
5616         uint64_t dclk90_recal_dis      : 1;  /**< [ 41: 41](R/W) Disable periodic recalibration of DDR90 delay line in. */
5617         uint64_t ddr_90_dly_byp        : 1;  /**< [ 42: 42](R/W) Reserved; must be zero.
5618                                                                  Internal:
5619                                                                  Bypass DDR90_DLY in clock tree. */
5620         uint64_t dclk90_fwd            : 1;  /**< [ 43: 43](WO) When set to one, clock-delay information is forwarded to the neighboring LMC. See LMC CK
5621                                                                  Locak Initialization step for the LMC bring-up sequence.
5622 
5623                                                                  Internal:
5624                                                                  Generate a one cycle pulse to forward setting. This is a oneshot and clears
5625                                                                  itself each time it is set. */
5626         uint64_t bit_select            : 4;  /**< [ 47: 44](R/W) 0x0-0x7 = Selects bit 0 - bit 8 for write deskew setting assignment.
5627                                                                  0x8 = Selects dbi for write deskew setting assignment.
5628                                                                  0x9 = No-op.
5629                                                                  0xA = Reuse deskew setting on.
5630                                                                  0xB = Reuse deskew setting off.
5631                                                                  0xC = Vref bypass setting load.
5632                                                                  0xD = Vref bypass on.
5633                                                                  0xE = Vref bypass off.
5634                                                                  0xF = Bit select reset. Clear write deskew settings to default value 0x40 in each DQ bit.
5635                                                                  Also sets Vref bypass to off and deskew reuse setting to off. */
5636         uint64_t wr_deskew_ld          : 1;  /**< [ 48: 48](WO) When set, the bit deskew settings in LMC()_DLL_CTL3[OFFSET] gets loaded to
5637                                                                  the designated byte LMC()_DLL_CTL3[BYTE_SEL] and bit LMC()_DLL_CTL3[BIT_SELECT]
5638                                                                  for write bit deskew. This is a oneshot and clears itself each time
5639                                                                  it is set. */
5640         uint64_t wr_deskew_ena         : 1;  /**< [ 49: 49](R/W) When set, it enables the write bit deskew feature. */
5641         uint64_t reserved_50_58        : 9;
5642         uint64_t wr_deskew_mem_ld      : 1;  /**< [ 59: 59](WO) Reserved.
5643                                                                  Internal:
5644                                                                  When set, all DQ bit deskew settings in DDR PHY are all loaded into their corresponding
5645                                                                  rank deskew storage. The rank is chosen by the CSR LMC()_MR_MPR_CTL[MR_WR_RANK]. This is a
5646                                                                  oneshot operation and clears itself each time it is set. Note this has to be done during
5647                                                                  the bringup state where there isn't yet any traffic to DRAM. */
5648         uint64_t wr_deskew_mem_sel     : 1;  /**< [ 60: 60](R/W) Reserved.
5649                                                                  Internal:
5650                                                                  Only relevant when [WR_DESKEW_ENA] is set.
5651                                                                  0 = Selects the common deskew settings stored in each DQ bit. All writes to any package
5652                                                                  rank uses this common settings to deskew the data bits.
5653                                                                  1 = Selects the stored per-package rank deskew settings. Write to a particular
5654                                                                  package rank uses the corresponding stored setting for that rank. */
5655         uint64_t rd_deskew_mem_sel_dis : 1;  /**< [ 61: 61](R/W) By default, LMC always selects per-rank deskew settings that are stored inside
5656                                                                  PHY. Set this field to one to manually disable this feature and that the common
5657                                                                  deskew setting inside the PHY's state machine will get selected instead. */
5658         uint64_t reserved_62_63        : 2;
5659 #endif /* Word 0 - End */
5660     } s;
5661     struct bdk_lmcx_dll_ctl3_cn88xxp1
5662     {
5663 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
5664         uint64_t reserved_50_63        : 14;
5665         uint64_t wr_deskew_ena         : 1;  /**< [ 49: 49](R/W) When set, it enables the write bit deskew feature. */
5666         uint64_t wr_deskew_ld          : 1;  /**< [ 48: 48](WO) When set, the bit deskew settings in LMC()_DLL_CTL3[OFFSET] gets loaded to
5667                                                                  the designated byte LMC()_DLL_CTL3[BYTE_SEL] and bit LMC()_DLL_CTL3[BIT_SELECT]
5668                                                                  for write bit deskew. This is a oneshot and clears itself each time
5669                                                                  it is set. */
5670         uint64_t bit_select            : 4;  /**< [ 47: 44](R/W) 0x0-0x7 = Selects bit0-bit8 for write deskew setting assignment.
5671                                                                  0x8 = Selects dbi for write deskew setting assignment.
5672                                                                  0x9 = No-op.
5673                                                                  0xA = Reuse deskew setting on.
5674                                                                  0xB = Reuse deskew setting off.
5675                                                                  0xC-0xE = Reserved
5676                                                                  0xF = Bit select reset. Clear write deskew settings to default value 0x40 in each DQ bit.
5677                                                                  Also sets Vref bypass to off and deskew reuse setting to off. */
5678         uint64_t dclk90_fwd            : 1;  /**< [ 43: 43](WO) When set to one, clock-delay information is forwarded to the neighboring LMC. See LMC CK
5679                                                                  Locak Initialization step for the LMC bring-up sequence.
5680 
5681                                                                  Internal:
5682                                                                  Generate a one cycle pulse to forward setting. This is a oneshot and clears
5683                                                                  itself each time it is set. */
5684         uint64_t ddr_90_dly_byp        : 1;  /**< [ 42: 42](R/W) Reserved; must be zero.
5685                                                                  Internal:
5686                                                                  Bypass DDR90_DLY in clock tree. */
5687         uint64_t dclk90_recal_dis      : 1;  /**< [ 41: 41](R/W) Disable periodic recalibration of DDR90 delay line in. */
5688         uint64_t dclk90_byp_sel        : 1;  /**< [ 40: 40](R/W) Bypass setting select for DDR90 delay line. */
5689         uint64_t dclk90_byp_setting    : 9;  /**< [ 39: 31](R/W) Bypass setting for DDR90 delay line. */
5690         uint64_t dll_fast              : 1;  /**< [ 30: 30](RO/H) Reserved; must be zero.
5691                                                                  Internal:
5692                                                                  DLL lock, 0=DLL locked. */
5693         uint64_t dll90_setting         : 9;  /**< [ 29: 21](RO/H) Reserved; must be zero.
5694                                                                  Internal:
5695                                                                  Encoded DLL settings. Works in conjunction with [DLL90_BYTE_SEL]. */
5696         uint64_t fine_tune_mode        : 1;  /**< [ 20: 20](R/W) DLL fine tune mode. 0 = disabled; 1 = enable. When enabled, calibrate internal PHY DLL
5697                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
5698         uint64_t dll_mode              : 1;  /**< [ 19: 19](R/W) Reserved; must be zero.
5699                                                                  Internal:
5700                                                                  DLL mode. */
5701         uint64_t dll90_byte_sel        : 4;  /**< [ 18: 15](R/W) Observe DLL settings for selected byte.
5702                                                                  0x0 = byte 0.
5703                                                                  0x1 = byte 1.
5704                                                                  ...
5705                                                                  0x8: byte 8.
5706                                                                  0x9-0xF: reserved. */
5707         uint64_t offset_ena            : 1;  /**< [ 14: 14](R/W) Reserved; must be zero.
5708                                                                  Internal:
5709                                                                  Offset enable. 1=enable. */
5710         uint64_t load_offset           : 1;  /**< [ 13: 13](WO) Reserved; must be zero.
5711                                                                  Internal:
5712                                                                  Load offset. 0=disable, 1=generate a one cycle pulse to
5713                                                                  the PHY. This field is a oneshot and clears itself each time it is set. */
5714         uint64_t mode_sel              : 2;  /**< [ 12: 11](R/W) Reserved; must be zero.
5715                                                                  Internal:
5716                                                                  Mode select. 0x0 = reset, 0x1 = write, 0x2 = read, 0x3 =
5717                                                                  write and read. */
5718         uint64_t byte_sel              : 4;  /**< [ 10:  7](R/W) Reserved; must be zero.
5719                                                                  Internal:
5720                                                                  Byte select. 0x0 = no byte, 0x1 = byte 0, ..., 0x9 =
5721                                                                  byte 8, 0xA = all bytes, 0xB-0xF = Reserved. */
5722         uint64_t offset                : 7;  /**< [  6:  0](R/W) Reserved; must be zero.
5723                                                                  Internal:
5724                                                                  Write/read offset setting. \<5:0\>: offset (not
5725                                                                  two's-complement), \<5\>: 0 = increment, 1 = decrement. */
5726 #else /* Word 0 - Little Endian */
5727         uint64_t offset                : 7;  /**< [  6:  0](R/W) Reserved; must be zero.
5728                                                                  Internal:
5729                                                                  Write/read offset setting. \<5:0\>: offset (not
5730                                                                  two's-complement), \<5\>: 0 = increment, 1 = decrement. */
5731         uint64_t byte_sel              : 4;  /**< [ 10:  7](R/W) Reserved; must be zero.
5732                                                                  Internal:
5733                                                                  Byte select. 0x0 = no byte, 0x1 = byte 0, ..., 0x9 =
5734                                                                  byte 8, 0xA = all bytes, 0xB-0xF = Reserved. */
5735         uint64_t mode_sel              : 2;  /**< [ 12: 11](R/W) Reserved; must be zero.
5736                                                                  Internal:
5737                                                                  Mode select. 0x0 = reset, 0x1 = write, 0x2 = read, 0x3 =
5738                                                                  write and read. */
5739         uint64_t load_offset           : 1;  /**< [ 13: 13](WO) Reserved; must be zero.
5740                                                                  Internal:
5741                                                                  Load offset. 0=disable, 1=generate a one cycle pulse to
5742                                                                  the PHY. This field is a oneshot and clears itself each time it is set. */
5743         uint64_t offset_ena            : 1;  /**< [ 14: 14](R/W) Reserved; must be zero.
5744                                                                  Internal:
5745                                                                  Offset enable. 1=enable. */
5746         uint64_t dll90_byte_sel        : 4;  /**< [ 18: 15](R/W) Observe DLL settings for selected byte.
5747                                                                  0x0 = byte 0.
5748                                                                  0x1 = byte 1.
5749                                                                  ...
5750                                                                  0x8: byte 8.
5751                                                                  0x9-0xF: reserved. */
5752         uint64_t dll_mode              : 1;  /**< [ 19: 19](R/W) Reserved; must be zero.
5753                                                                  Internal:
5754                                                                  DLL mode. */
5755         uint64_t fine_tune_mode        : 1;  /**< [ 20: 20](R/W) DLL fine tune mode. 0 = disabled; 1 = enable. When enabled, calibrate internal PHY DLL
5756                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
5757         uint64_t dll90_setting         : 9;  /**< [ 29: 21](RO/H) Reserved; must be zero.
5758                                                                  Internal:
5759                                                                  Encoded DLL settings. Works in conjunction with [DLL90_BYTE_SEL]. */
5760         uint64_t dll_fast              : 1;  /**< [ 30: 30](RO/H) Reserved; must be zero.
5761                                                                  Internal:
5762                                                                  DLL lock, 0=DLL locked. */
5763         uint64_t dclk90_byp_setting    : 9;  /**< [ 39: 31](R/W) Bypass setting for DDR90 delay line. */
5764         uint64_t dclk90_byp_sel        : 1;  /**< [ 40: 40](R/W) Bypass setting select for DDR90 delay line. */
5765         uint64_t dclk90_recal_dis      : 1;  /**< [ 41: 41](R/W) Disable periodic recalibration of DDR90 delay line in. */
5766         uint64_t ddr_90_dly_byp        : 1;  /**< [ 42: 42](R/W) Reserved; must be zero.
5767                                                                  Internal:
5768                                                                  Bypass DDR90_DLY in clock tree. */
5769         uint64_t dclk90_fwd            : 1;  /**< [ 43: 43](WO) When set to one, clock-delay information is forwarded to the neighboring LMC. See LMC CK
5770                                                                  Locak Initialization step for the LMC bring-up sequence.
5771 
5772                                                                  Internal:
5773                                                                  Generate a one cycle pulse to forward setting. This is a oneshot and clears
5774                                                                  itself each time it is set. */
5775         uint64_t bit_select            : 4;  /**< [ 47: 44](R/W) 0x0-0x7 = Selects bit0-bit8 for write deskew setting assignment.
5776                                                                  0x8 = Selects dbi for write deskew setting assignment.
5777                                                                  0x9 = No-op.
5778                                                                  0xA = Reuse deskew setting on.
5779                                                                  0xB = Reuse deskew setting off.
5780                                                                  0xC-0xE = Reserved
5781                                                                  0xF = Bit select reset. Clear write deskew settings to default value 0x40 in each DQ bit.
5782                                                                  Also sets Vref bypass to off and deskew reuse setting to off. */
5783         uint64_t wr_deskew_ld          : 1;  /**< [ 48: 48](WO) When set, the bit deskew settings in LMC()_DLL_CTL3[OFFSET] gets loaded to
5784                                                                  the designated byte LMC()_DLL_CTL3[BYTE_SEL] and bit LMC()_DLL_CTL3[BIT_SELECT]
5785                                                                  for write bit deskew. This is a oneshot and clears itself each time
5786                                                                  it is set. */
5787         uint64_t wr_deskew_ena         : 1;  /**< [ 49: 49](R/W) When set, it enables the write bit deskew feature. */
5788         uint64_t reserved_50_63        : 14;
5789 #endif /* Word 0 - End */
5790     } cn88xxp1;
5791     struct bdk_lmcx_dll_ctl3_cn9
5792     {
5793 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
5794         uint64_t reserved_62_63        : 2;
5795         uint64_t rd_deskew_mem_sel_dis : 1;  /**< [ 61: 61](R/W) By default, LMC always selects per-rank deskew settings that are stored inside
5796                                                                  PHY. Set this field to one to manually disable this feature and that the common
5797                                                                  deskew setting inside the PHY's state machine will get selected instead. */
5798         uint64_t wr_deskew_mem_sel     : 1;  /**< [ 60: 60](R/W) Reserved.
5799                                                                  Internal:
5800                                                                  Only relevant when [WR_DESKEW_ENA] is set.
5801                                                                  0 = Selects the common deskew settings stored in each DQ bit. All writes to any package
5802                                                                  rank uses this common settings to deskew the data bits.
5803                                                                  1 = Selects the stored per-package rank deskew settings. Write to a particular
5804                                                                  package rank uses the corresponding stored setting for that rank. */
5805         uint64_t wr_deskew_mem_ld      : 1;  /**< [ 59: 59](WO) Reserved.
5806                                                                  Internal:
5807                                                                  When set, all DQ bit deskew settings in DDR PHY are all loaded into their corresponding
5808                                                                  rank deskew storage. The rank is chosen by the CSR LMC()_MR_MPR_CTL[MR_WR_RANK]. This is a
5809                                                                  oneshot operation and clears itself each time it is set. Note this has to be done during
5810                                                                  the bringup state where there isn't yet any traffic to DRAM. */
5811         uint64_t offset                : 9;  /**< [ 58: 50](R/W) Reserved; must be zero.
5812                                                                  Internal:
5813                                                                  Write/read offset setting. \<8:0\>: offset (not
5814                                                                  two's-complement), \<8\>: 0 = increment, 1 = decrement. */
5815         uint64_t wr_deskew_ena         : 1;  /**< [ 49: 49](R/W) When set, it enables the write bit deskew feature. */
5816         uint64_t wr_deskew_ld          : 1;  /**< [ 48: 48](WO) When set, the bit deskew settings in LMC()_DLL_CTL3[OFFSET] gets loaded to
5817                                                                  the designated byte LMC()_DLL_CTL3[BYTE_SEL] and bit LMC()_DLL_CTL3[BIT_SELECT]
5818                                                                  for write bit deskew. This is a oneshot and clears itself each time
5819                                                                  it is set. */
5820         uint64_t bit_select            : 4;  /**< [ 47: 44](R/W) 0x0-0x7 = Selects bit 0 - bit 8 for write deskew setting assignment.
5821                                                                  0x8 = Selects dbi for write deskew setting assignment.
5822                                                                  0x9 = No-op.
5823                                                                  0xA = Reuse deskew setting on.
5824                                                                  0xB = Reuse deskew setting off.
5825                                                                  0xC = Vref bypass setting load.
5826                                                                  0xD = Vref bypass on.
5827                                                                  0xE = Vref bypass off.
5828                                                                  0xF = Bit select reset. Clear write deskew settings to default value 0x40 in each DQ bit.
5829                                                                  Also sets Vref bypass to off and deskew reuse setting to off. */
5830         uint64_t reserved_31_43        : 13;
5831         uint64_t dll_fast              : 1;  /**< [ 30: 30](RO/H) Reserved; must be zero.
5832                                                                  Internal:
5833                                                                  DLL lock, 0=DLL locked. */
5834         uint64_t dll90_setting         : 9;  /**< [ 29: 21](RO/H) Reserved; must be zero.
5835                                                                  Internal:
5836                                                                  Encoded DLL settings. Works in conjunction with [DLL90_BYTE_SEL]. */
5837         uint64_t fine_tune_mode        : 1;  /**< [ 20: 20](R/W) DLL fine tune mode. 0 = disabled; 1 = enable. When enabled, calibrate internal PHY DLL
5838                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
5839         uint64_t dll_mode              : 1;  /**< [ 19: 19](R/W) Reserved; must be zero.
5840                                                                  Internal:
5841                                                                  DLL mode. */
5842         uint64_t dll90_byte_sel        : 4;  /**< [ 18: 15](R/W) Observe DLL settings for selected byte.
5843                                                                  0x0 = byte 0.
5844                                                                  0x1 = byte 1.
5845                                                                  ...
5846                                                                  0x8 = ECC byte.
5847                                                                  0x9-0xF = Reserved. */
5848         uint64_t offset_ena            : 1;  /**< [ 14: 14](R/W) Reserved; must be zero.
5849                                                                  Internal:
5850                                                                  Offset enable. 1=enable. */
5851         uint64_t load_offset           : 1;  /**< [ 13: 13](WO) Reserved; must be zero.
5852                                                                  Internal:
5853                                                                  Load offset. 0=disable, 1=generate a one cycle pulse to
5854                                                                  the PHY. This field is a oneshot and clears itself each time it is set. */
5855         uint64_t mode_sel              : 2;  /**< [ 12: 11](R/W) Reserved; must be zero.
5856                                                                  Internal:
5857                                                                  Mode select. 0x0 = reset, 0x1 = write, 0x2 = read, 0x3 =
5858                                                                  write and read. */
5859         uint64_t byte_sel              : 4;  /**< [ 10:  7](R/W) Reserved; must be zero.
5860                                                                  Internal:
5861                                                                  Byte select. 0x0 = no byte, 0x1 = byte 0, ..., 0x9 =
5862                                                                  byte 8, 0xA = all bytes, 0xB-0xF = Reserved. */
5863         uint64_t reserved_0_6          : 7;
5864 #else /* Word 0 - Little Endian */
5865         uint64_t reserved_0_6          : 7;
5866         uint64_t byte_sel              : 4;  /**< [ 10:  7](R/W) Reserved; must be zero.
5867                                                                  Internal:
5868                                                                  Byte select. 0x0 = no byte, 0x1 = byte 0, ..., 0x9 =
5869                                                                  byte 8, 0xA = all bytes, 0xB-0xF = Reserved. */
5870         uint64_t mode_sel              : 2;  /**< [ 12: 11](R/W) Reserved; must be zero.
5871                                                                  Internal:
5872                                                                  Mode select. 0x0 = reset, 0x1 = write, 0x2 = read, 0x3 =
5873                                                                  write and read. */
5874         uint64_t load_offset           : 1;  /**< [ 13: 13](WO) Reserved; must be zero.
5875                                                                  Internal:
5876                                                                  Load offset. 0=disable, 1=generate a one cycle pulse to
5877                                                                  the PHY. This field is a oneshot and clears itself each time it is set. */
5878         uint64_t offset_ena            : 1;  /**< [ 14: 14](R/W) Reserved; must be zero.
5879                                                                  Internal:
5880                                                                  Offset enable. 1=enable. */
5881         uint64_t dll90_byte_sel        : 4;  /**< [ 18: 15](R/W) Observe DLL settings for selected byte.
5882                                                                  0x0 = byte 0.
5883                                                                  0x1 = byte 1.
5884                                                                  ...
5885                                                                  0x8 = ECC byte.
5886                                                                  0x9-0xF = Reserved. */
5887         uint64_t dll_mode              : 1;  /**< [ 19: 19](R/W) Reserved; must be zero.
5888                                                                  Internal:
5889                                                                  DLL mode. */
5890         uint64_t fine_tune_mode        : 1;  /**< [ 20: 20](R/W) DLL fine tune mode. 0 = disabled; 1 = enable. When enabled, calibrate internal PHY DLL
5891                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
5892         uint64_t dll90_setting         : 9;  /**< [ 29: 21](RO/H) Reserved; must be zero.
5893                                                                  Internal:
5894                                                                  Encoded DLL settings. Works in conjunction with [DLL90_BYTE_SEL]. */
5895         uint64_t dll_fast              : 1;  /**< [ 30: 30](RO/H) Reserved; must be zero.
5896                                                                  Internal:
5897                                                                  DLL lock, 0=DLL locked. */
5898         uint64_t reserved_31_43        : 13;
5899         uint64_t bit_select            : 4;  /**< [ 47: 44](R/W) 0x0-0x7 = Selects bit 0 - bit 8 for write deskew setting assignment.
5900                                                                  0x8 = Selects dbi for write deskew setting assignment.
5901                                                                  0x9 = No-op.
5902                                                                  0xA = Reuse deskew setting on.
5903                                                                  0xB = Reuse deskew setting off.
5904                                                                  0xC = Vref bypass setting load.
5905                                                                  0xD = Vref bypass on.
5906                                                                  0xE = Vref bypass off.
5907                                                                  0xF = Bit select reset. Clear write deskew settings to default value 0x40 in each DQ bit.
5908                                                                  Also sets Vref bypass to off and deskew reuse setting to off. */
5909         uint64_t wr_deskew_ld          : 1;  /**< [ 48: 48](WO) When set, the bit deskew settings in LMC()_DLL_CTL3[OFFSET] gets loaded to
5910                                                                  the designated byte LMC()_DLL_CTL3[BYTE_SEL] and bit LMC()_DLL_CTL3[BIT_SELECT]
5911                                                                  for write bit deskew. This is a oneshot and clears itself each time
5912                                                                  it is set. */
5913         uint64_t wr_deskew_ena         : 1;  /**< [ 49: 49](R/W) When set, it enables the write bit deskew feature. */
5914         uint64_t offset                : 9;  /**< [ 58: 50](R/W) Reserved; must be zero.
5915                                                                  Internal:
5916                                                                  Write/read offset setting. \<8:0\>: offset (not
5917                                                                  two's-complement), \<8\>: 0 = increment, 1 = decrement. */
5918         uint64_t wr_deskew_mem_ld      : 1;  /**< [ 59: 59](WO) Reserved.
5919                                                                  Internal:
5920                                                                  When set, all DQ bit deskew settings in DDR PHY are all loaded into their corresponding
5921                                                                  rank deskew storage. The rank is chosen by the CSR LMC()_MR_MPR_CTL[MR_WR_RANK]. This is a
5922                                                                  oneshot operation and clears itself each time it is set. Note this has to be done during
5923                                                                  the bringup state where there isn't yet any traffic to DRAM. */
5924         uint64_t wr_deskew_mem_sel     : 1;  /**< [ 60: 60](R/W) Reserved.
5925                                                                  Internal:
5926                                                                  Only relevant when [WR_DESKEW_ENA] is set.
5927                                                                  0 = Selects the common deskew settings stored in each DQ bit. All writes to any package
5928                                                                  rank uses this common settings to deskew the data bits.
5929                                                                  1 = Selects the stored per-package rank deskew settings. Write to a particular
5930                                                                  package rank uses the corresponding stored setting for that rank. */
5931         uint64_t rd_deskew_mem_sel_dis : 1;  /**< [ 61: 61](R/W) By default, LMC always selects per-rank deskew settings that are stored inside
5932                                                                  PHY. Set this field to one to manually disable this feature and that the common
5933                                                                  deskew setting inside the PHY's state machine will get selected instead. */
5934         uint64_t reserved_62_63        : 2;
5935 #endif /* Word 0 - End */
5936     } cn9;
5937     struct bdk_lmcx_dll_ctl3_cn81xx
5938     {
5939 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
5940         uint64_t reserved_50_63        : 14;
5941         uint64_t wr_deskew_ena         : 1;  /**< [ 49: 49](R/W) When set, it enables the write bit deskew feature. */
5942         uint64_t wr_deskew_ld          : 1;  /**< [ 48: 48](WO) When set, the bit deskew settings in LMC()_DLL_CTL3[OFFSET] gets loaded to
5943                                                                  the designated byte LMC()_DLL_CTL3[BYTE_SEL] and bit LMC()_DLL_CTL3[BIT_SELECT]
5944                                                                  for write bit deskew. This is a oneshot and clears itself each time
5945                                                                  it is set. */
5946         uint64_t bit_select            : 4;  /**< [ 47: 44](R/W) 0x0-0x7 = Selects bit 0 - bit 8 for write deskew setting assignment.
5947                                                                  0x8 = Selects dbi for write deskew setting assignment.
5948                                                                  0x9 = No-op.
5949                                                                  0xA = Reuse deskew setting on.
5950                                                                  0xB = Reuse deskew setting off.
5951                                                                  0xC = Vref bypass setting load.
5952                                                                  0xD = Vref bypass on.
5953                                                                  0xE = Vref bypass off.
5954                                                                  0xF = Bit select reset. Clear write deskew settings to default value 0x40 in each DQ bit.
5955                                                                  Also sets Vref bypass to off and deskew reuse setting to off. */
5956         uint64_t dclk90_fwd            : 1;  /**< [ 43: 43](WO) When set to one, clock-delay information is forwarded to the neighboring LMC. See LMC CK
5957                                                                  Locak Initialization step for the LMC bring-up sequence.
5958 
5959                                                                  Internal:
5960                                                                  Generate a one cycle pulse to forward setting. This is a oneshot and clears
5961                                                                  itself each time it is set. */
5962         uint64_t ddr_90_dly_byp        : 1;  /**< [ 42: 42](R/W) Reserved; must be zero.
5963                                                                  Internal:
5964                                                                  Bypass DDR90_DLY in clock tree. */
5965         uint64_t dclk90_recal_dis      : 1;  /**< [ 41: 41](R/W) Disable periodic recalibration of DDR90 delay line in. */
5966         uint64_t dclk90_byp_sel        : 1;  /**< [ 40: 40](R/W) Bypass setting select for DDR90 delay line. */
5967         uint64_t dclk90_byp_setting    : 9;  /**< [ 39: 31](R/W) Bypass setting for DDR90 delay line. */
5968         uint64_t dll_fast              : 1;  /**< [ 30: 30](RO/H) Reserved; must be zero.
5969                                                                  Internal:
5970                                                                  DLL lock, 0=DLL locked. */
5971         uint64_t dll90_setting         : 9;  /**< [ 29: 21](RO/H) Reserved; must be zero.
5972                                                                  Internal:
5973                                                                  Encoded DLL settings. Works in conjunction with [DLL90_BYTE_SEL]. */
5974         uint64_t fine_tune_mode        : 1;  /**< [ 20: 20](R/W) DLL fine tune mode. 0 = disabled; 1 = enable. When enabled, calibrate internal PHY DLL
5975                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
5976         uint64_t dll_mode              : 1;  /**< [ 19: 19](R/W) Reserved; must be zero.
5977                                                                  Internal:
5978                                                                  DLL mode. */
5979         uint64_t dll90_byte_sel        : 4;  /**< [ 18: 15](R/W) Observe DLL settings for selected byte.
5980                                                                  0x0 = byte 0.
5981                                                                  0x1 = byte 1.
5982                                                                  ...
5983                                                                  0x8: byte 8.
5984                                                                  0x9-0xF: reserved. */
5985         uint64_t offset_ena            : 1;  /**< [ 14: 14](R/W) Reserved; must be zero.
5986                                                                  Internal:
5987                                                                  Offset enable. 1=enable. */
5988         uint64_t load_offset           : 1;  /**< [ 13: 13](WO) Reserved; must be zero.
5989                                                                  Internal:
5990                                                                  Load offset. 0=disable, 1=generate a one cycle pulse to
5991                                                                  the PHY. This field is a oneshot and clears itself each time it is set. */
5992         uint64_t mode_sel              : 2;  /**< [ 12: 11](R/W) Reserved; must be zero.
5993                                                                  Internal:
5994                                                                  Mode select. 0x0 = reset, 0x1 = write, 0x2 = read, 0x3 =
5995                                                                  write and read. */
5996         uint64_t byte_sel              : 4;  /**< [ 10:  7](R/W) Reserved; must be zero.
5997                                                                  Internal:
5998                                                                  Byte select. 0x0 = no byte, 0x1 = byte 0, ..., 0x9 =
5999                                                                  byte 8, 0xA = all bytes, 0xB-0xF = Reserved. */
6000         uint64_t offset                : 7;  /**< [  6:  0](R/W) Reserved; must be zero.
6001                                                                  Internal:
6002                                                                  Write/read offset setting. \<5:0\>: offset (not
6003                                                                  two's-complement), \<5\>: 0 = increment, 1 = decrement. */
6004 #else /* Word 0 - Little Endian */
6005         uint64_t offset                : 7;  /**< [  6:  0](R/W) Reserved; must be zero.
6006                                                                  Internal:
6007                                                                  Write/read offset setting. \<5:0\>: offset (not
6008                                                                  two's-complement), \<5\>: 0 = increment, 1 = decrement. */
6009         uint64_t byte_sel              : 4;  /**< [ 10:  7](R/W) Reserved; must be zero.
6010                                                                  Internal:
6011                                                                  Byte select. 0x0 = no byte, 0x1 = byte 0, ..., 0x9 =
6012                                                                  byte 8, 0xA = all bytes, 0xB-0xF = Reserved. */
6013         uint64_t mode_sel              : 2;  /**< [ 12: 11](R/W) Reserved; must be zero.
6014                                                                  Internal:
6015                                                                  Mode select. 0x0 = reset, 0x1 = write, 0x2 = read, 0x3 =
6016                                                                  write and read. */
6017         uint64_t load_offset           : 1;  /**< [ 13: 13](WO) Reserved; must be zero.
6018                                                                  Internal:
6019                                                                  Load offset. 0=disable, 1=generate a one cycle pulse to
6020                                                                  the PHY. This field is a oneshot and clears itself each time it is set. */
6021         uint64_t offset_ena            : 1;  /**< [ 14: 14](R/W) Reserved; must be zero.
6022                                                                  Internal:
6023                                                                  Offset enable. 1=enable. */
6024         uint64_t dll90_byte_sel        : 4;  /**< [ 18: 15](R/W) Observe DLL settings for selected byte.
6025                                                                  0x0 = byte 0.
6026                                                                  0x1 = byte 1.
6027                                                                  ...
6028                                                                  0x8: byte 8.
6029                                                                  0x9-0xF: reserved. */
6030         uint64_t dll_mode              : 1;  /**< [ 19: 19](R/W) Reserved; must be zero.
6031                                                                  Internal:
6032                                                                  DLL mode. */
6033         uint64_t fine_tune_mode        : 1;  /**< [ 20: 20](R/W) DLL fine tune mode. 0 = disabled; 1 = enable. When enabled, calibrate internal PHY DLL
6034                                                                  every LMC()_CONFIG[REF_ZQCS_INT] CK cycles. */
6035         uint64_t dll90_setting         : 9;  /**< [ 29: 21](RO/H) Reserved; must be zero.
6036                                                                  Internal:
6037                                                                  Encoded DLL settings. Works in conjunction with [DLL90_BYTE_SEL]. */
6038         uint64_t dll_fast              : 1;  /**< [ 30: 30](RO/H) Reserved; must be zero.
6039                                                                  Internal:
6040                                                                  DLL lock, 0=DLL locked. */
6041         uint64_t dclk90_byp_setting    : 9;  /**< [ 39: 31](R/W) Bypass setting for DDR90 delay line. */
6042         uint64_t dclk90_byp_sel        : 1;  /**< [ 40: 40](R/W) Bypass setting select for DDR90 delay line. */
6043         uint64_t dclk90_recal_dis      : 1;  /**< [ 41: 41](R/W) Disable periodic recalibration of DDR90 delay line in. */
6044         uint64_t ddr_90_dly_byp        : 1;  /**< [ 42: 42](R/W) Reserved; must be zero.
6045                                                                  Internal:
6046                                                                  Bypass DDR90_DLY in clock tree. */
6047         uint64_t dclk90_fwd            : 1;  /**< [ 43: 43](WO) When set to one, clock-delay information is forwarded to the neighboring LMC. See LMC CK
6048                                                                  Locak Initialization step for the LMC bring-up sequence.
6049 
6050                                                                  Internal:
6051                                                                  Generate a one cycle pulse to forward setting. This is a oneshot and clears
6052                                                                  itself each time it is set. */
6053         uint64_t bit_select            : 4;  /**< [ 47: 44](R/W) 0x0-0x7 = Selects bit 0 - bit 8 for write deskew setting assignment.
6054                                                                  0x8 = Selects dbi for write deskew setting assignment.
6055                                                                  0x9 = No-op.
6056                                                                  0xA = Reuse deskew setting on.
6057                                                                  0xB = Reuse deskew setting off.
6058                                                                  0xC = Vref bypass setting load.
6059                                                                  0xD = Vref bypass on.
6060                                                                  0xE = Vref bypass off.
6061                                                                  0xF = Bit select reset. Clear write deskew settings to default value 0x40 in each DQ bit.
6062                                                                  Also sets Vref bypass to off and deskew reuse setting to off. */
6063         uint64_t wr_deskew_ld          : 1;  /**< [ 48: 48](WO) When set, the bit deskew settings in LMC()_DLL_CTL3[OFFSET] gets loaded to
6064                                                                  the designated byte LMC()_DLL_CTL3[BYTE_SEL] and bit LMC()_DLL_CTL3[BIT_SELECT]
6065                                                                  for write bit deskew. This is a oneshot and clears itself each time
6066                                                                  it is set. */
6067         uint64_t wr_deskew_ena         : 1;  /**< [ 49: 49](R/W) When set, it enables the write bit deskew feature. */
6068         uint64_t reserved_50_63        : 14;
6069 #endif /* Word 0 - End */
6070     } cn81xx;
6071     /* struct bdk_lmcx_dll_ctl3_cn81xx cn83xx; */
6072     /* struct bdk_lmcx_dll_ctl3_cn81xx cn88xxp2; */
6073 };
6074 typedef union bdk_lmcx_dll_ctl3 bdk_lmcx_dll_ctl3_t;
6075 
6076 static inline uint64_t BDK_LMCX_DLL_CTL3(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_DLL_CTL3(unsigned long a)6077 static inline uint64_t BDK_LMCX_DLL_CTL3(unsigned long a)
6078 {
6079     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
6080         return 0x87e088000218ll + 0x1000000ll * ((a) & 0x0);
6081     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
6082         return 0x87e088000218ll + 0x1000000ll * ((a) & 0x1);
6083     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
6084         return 0x87e088000218ll + 0x1000000ll * ((a) & 0x3);
6085     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
6086         return 0x87e088000218ll + 0x1000000ll * ((a) & 0x3);
6087     __bdk_csr_fatal("LMCX_DLL_CTL3", 1, a, 0, 0, 0);
6088 }
6089 
6090 #define typedef_BDK_LMCX_DLL_CTL3(a) bdk_lmcx_dll_ctl3_t
6091 #define bustype_BDK_LMCX_DLL_CTL3(a) BDK_CSR_TYPE_RSL
6092 #define basename_BDK_LMCX_DLL_CTL3(a) "LMCX_DLL_CTL3"
6093 #define device_bar_BDK_LMCX_DLL_CTL3(a) 0x0 /* PF_BAR0 */
6094 #define busnum_BDK_LMCX_DLL_CTL3(a) (a)
6095 #define arguments_BDK_LMCX_DLL_CTL3(a) (a),-1,-1,-1
6096 
6097 /**
6098  * Register (RSL) lmc#_dual_memcfg
6099  *
6100  * LMC Dual Memory Configuration Register
6101  * This register controls certain parameters of dual-memory configuration.
6102  *
6103  * This register enables the design to have two separate memory configurations, selected
6104  * dynamically by the reference address. Note however, that both configurations share
6105  * LMC()_CONTROL[XOR_BANK], LMC()_CONFIG[PBANK_LSB], LMC()_CONFIG[RANK_ENA], and all
6106  * timing parameters.
6107  *
6108  * In this description:
6109  * * config0 refers to the normal memory configuration that is defined by the
6110  * LMC()_CONFIG[ROW_LSB] parameter
6111  * * config1 refers to the dual (or second) memory configuration that is defined by this
6112  * register.
6113  */
6114 union bdk_lmcx_dual_memcfg
6115 {
6116     uint64_t u;
6117     struct bdk_lmcx_dual_memcfg_s
6118     {
6119 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
6120         uint64_t reserved_19_63        : 45;
6121         uint64_t row_lsb               : 3;  /**< [ 18: 16](R/W) Encoding used to determine which memory address bit position represents the low order DDR
6122                                                                  ROW address. Refer to
6123                                                                  LMC()_CONFIG[ROW_LSB].
6124                                                                  Refer to cache block read transaction example. */
6125         uint64_t reserved_4_15         : 12;
6126         uint64_t cs_mask               : 4;  /**< [  3:  0](R/W) Chip select mask. This mask corresponds to the four chip-select signals for a memory
6127                                                                  configuration. Each reference address asserts one of the chip-select signals. If that
6128                                                                  chip select signal has its corresponding [CS_MASK] bit set, then the config1 parameters are
6129                                                                  used, otherwise the config0 parameters are used. */
6130 #else /* Word 0 - Little Endian */
6131         uint64_t cs_mask               : 4;  /**< [  3:  0](R/W) Chip select mask. This mask corresponds to the four chip-select signals for a memory
6132                                                                  configuration. Each reference address asserts one of the chip-select signals. If that
6133                                                                  chip select signal has its corresponding [CS_MASK] bit set, then the config1 parameters are
6134                                                                  used, otherwise the config0 parameters are used. */
6135         uint64_t reserved_4_15         : 12;
6136         uint64_t row_lsb               : 3;  /**< [ 18: 16](R/W) Encoding used to determine which memory address bit position represents the low order DDR
6137                                                                  ROW address. Refer to
6138                                                                  LMC()_CONFIG[ROW_LSB].
6139                                                                  Refer to cache block read transaction example. */
6140         uint64_t reserved_19_63        : 45;
6141 #endif /* Word 0 - End */
6142     } s;
6143     /* struct bdk_lmcx_dual_memcfg_s cn; */
6144 };
6145 typedef union bdk_lmcx_dual_memcfg bdk_lmcx_dual_memcfg_t;
6146 
6147 static inline uint64_t BDK_LMCX_DUAL_MEMCFG(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_DUAL_MEMCFG(unsigned long a)6148 static inline uint64_t BDK_LMCX_DUAL_MEMCFG(unsigned long a)
6149 {
6150     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
6151         return 0x87e088000098ll + 0x1000000ll * ((a) & 0x0);
6152     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
6153         return 0x87e088000098ll + 0x1000000ll * ((a) & 0x1);
6154     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
6155         return 0x87e088000098ll + 0x1000000ll * ((a) & 0x3);
6156     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
6157         return 0x87e088000098ll + 0x1000000ll * ((a) & 0x3);
6158     __bdk_csr_fatal("LMCX_DUAL_MEMCFG", 1, a, 0, 0, 0);
6159 }
6160 
6161 #define typedef_BDK_LMCX_DUAL_MEMCFG(a) bdk_lmcx_dual_memcfg_t
6162 #define bustype_BDK_LMCX_DUAL_MEMCFG(a) BDK_CSR_TYPE_RSL
6163 #define basename_BDK_LMCX_DUAL_MEMCFG(a) "LMCX_DUAL_MEMCFG"
6164 #define device_bar_BDK_LMCX_DUAL_MEMCFG(a) 0x0 /* PF_BAR0 */
6165 #define busnum_BDK_LMCX_DUAL_MEMCFG(a) (a)
6166 #define arguments_BDK_LMCX_DUAL_MEMCFG(a) (a),-1,-1,-1
6167 
6168 /**
6169  * Register (RSL) lmc#_ecc_parity_test
6170  *
6171  * LMC ECC Parity Test Registers
6172  * This register has bits to control the generation of ECC and command address parity errors.
6173  * ECC error is generated by enabling [CA_PARITY_CORRUPT_ENA] and selecting any of the
6174  * [ECC_CORRUPT_IDX] index of the dataword from the cacheline to be corrupted.
6175  * User needs to select which bit of the 128-bit dataword to corrupt by asserting any of the
6176  * CHAR_MASK0 and CHAR_MASK2 bits. (CHAR_MASK0 and CHAR_MASK2 corresponds to the lower and upper
6177  * 64-bit signal that can corrupt any individual bit of the data).
6178  *
6179  * Command address parity error is generated by enabling [CA_PARITY_CORRUPT_ENA] and
6180  * selecting the DDR command that the parity is to be corrupted with through [CA_PARITY_SEL].
6181  */
6182 union bdk_lmcx_ecc_parity_test
6183 {
6184     uint64_t u;
6185     struct bdk_lmcx_ecc_parity_test_s
6186     {
6187 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
6188         uint64_t reserved_12_63        : 52;
6189         uint64_t ecc_corrupt_ena       : 1;  /**< [ 11: 11](R/W/H) Enables the ECC data corruption. */
6190         uint64_t ecc_corrupt_idx       : 3;  /**< [ 10:  8](R/W) Selects the cacheline index with which the dataword is to be corrupted. */
6191         uint64_t reserved_6_7          : 2;
6192         uint64_t ca_parity_corrupt_ena : 1;  /**< [  5:  5](R/W/H) Enables the CA parity bit corruption. */
6193         uint64_t ca_parity_sel         : 5;  /**< [  4:  0](R/W) Selects the type of DDR command to corrupt the parity bit.
6194                                                                  0x0  = No command selected.
6195                                                                  0x1  = NOP.
6196                                                                  0x2  = ACT.
6197                                                                  0x3  = REF.
6198                                                                  0x4  = WRS4.
6199                                                                  0x5  = WRS8.
6200                                                                  0x6  = WRAS4.
6201                                                                  0x7  = WRAS8.
6202                                                                  0x8  = RDS4.
6203                                                                  0x9  = RDS8.
6204                                                                  0xa  = RDAS4.
6205                                                                  0xb  = RDAS8.
6206                                                                  0xc  = SRE.
6207                                                                  0xd  = SRX.
6208                                                                  0xe  = PRE.
6209                                                                  0xf  = PREA.
6210                                                                  0x10 = MRS.
6211                                                                  0x11-0x13 = Reserved.
6212                                                                  0x14 = ZQCL.
6213                                                                  0x15 = ZQCS.
6214                                                                  0x16-0x16 = Reserved. */
6215 #else /* Word 0 - Little Endian */
6216         uint64_t ca_parity_sel         : 5;  /**< [  4:  0](R/W) Selects the type of DDR command to corrupt the parity bit.
6217                                                                  0x0  = No command selected.
6218                                                                  0x1  = NOP.
6219                                                                  0x2  = ACT.
6220                                                                  0x3  = REF.
6221                                                                  0x4  = WRS4.
6222                                                                  0x5  = WRS8.
6223                                                                  0x6  = WRAS4.
6224                                                                  0x7  = WRAS8.
6225                                                                  0x8  = RDS4.
6226                                                                  0x9  = RDS8.
6227                                                                  0xa  = RDAS4.
6228                                                                  0xb  = RDAS8.
6229                                                                  0xc  = SRE.
6230                                                                  0xd  = SRX.
6231                                                                  0xe  = PRE.
6232                                                                  0xf  = PREA.
6233                                                                  0x10 = MRS.
6234                                                                  0x11-0x13 = Reserved.
6235                                                                  0x14 = ZQCL.
6236                                                                  0x15 = ZQCS.
6237                                                                  0x16-0x16 = Reserved. */
6238         uint64_t ca_parity_corrupt_ena : 1;  /**< [  5:  5](R/W/H) Enables the CA parity bit corruption. */
6239         uint64_t reserved_6_7          : 2;
6240         uint64_t ecc_corrupt_idx       : 3;  /**< [ 10:  8](R/W) Selects the cacheline index with which the dataword is to be corrupted. */
6241         uint64_t ecc_corrupt_ena       : 1;  /**< [ 11: 11](R/W/H) Enables the ECC data corruption. */
6242         uint64_t reserved_12_63        : 52;
6243 #endif /* Word 0 - End */
6244     } s;
6245     /* struct bdk_lmcx_ecc_parity_test_s cn; */
6246 };
6247 typedef union bdk_lmcx_ecc_parity_test bdk_lmcx_ecc_parity_test_t;
6248 
6249 static inline uint64_t BDK_LMCX_ECC_PARITY_TEST(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_ECC_PARITY_TEST(unsigned long a)6250 static inline uint64_t BDK_LMCX_ECC_PARITY_TEST(unsigned long a)
6251 {
6252     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
6253         return 0x87e088000108ll + 0x1000000ll * ((a) & 0x0);
6254     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
6255         return 0x87e088000108ll + 0x1000000ll * ((a) & 0x1);
6256     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
6257         return 0x87e088000108ll + 0x1000000ll * ((a) & 0x3);
6258     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
6259         return 0x87e088000108ll + 0x1000000ll * ((a) & 0x3);
6260     __bdk_csr_fatal("LMCX_ECC_PARITY_TEST", 1, a, 0, 0, 0);
6261 }
6262 
6263 #define typedef_BDK_LMCX_ECC_PARITY_TEST(a) bdk_lmcx_ecc_parity_test_t
6264 #define bustype_BDK_LMCX_ECC_PARITY_TEST(a) BDK_CSR_TYPE_RSL
6265 #define basename_BDK_LMCX_ECC_PARITY_TEST(a) "LMCX_ECC_PARITY_TEST"
6266 #define device_bar_BDK_LMCX_ECC_PARITY_TEST(a) 0x0 /* PF_BAR0 */
6267 #define busnum_BDK_LMCX_ECC_PARITY_TEST(a) (a)
6268 #define arguments_BDK_LMCX_ECC_PARITY_TEST(a) (a),-1,-1,-1
6269 
6270 /**
6271  * Register (RSL) lmc#_ecc_synd
6272  *
6273  * LMC MRD ECC Syndromes Register
6274  */
6275 union bdk_lmcx_ecc_synd
6276 {
6277     uint64_t u;
6278     struct bdk_lmcx_ecc_synd_s
6279     {
6280 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
6281         uint64_t reserved_32_63        : 32;
6282         uint64_t mrdsyn3               : 8;  /**< [ 31: 24](RO/H) MRD ECC syndrome quad 3. [MRDSYN3] corresponds to DQ[63:0]_c1_p1, or in 32-bit mode
6283                                                                  DQ[31:0]_c3_p1/0, where _cC_pP denotes cycle C and phase P. */
6284         uint64_t mrdsyn2               : 8;  /**< [ 23: 16](RO/H) MRD ECC syndrome quad 2. [MRDSYN2] corresponds to DQ[63:0]_c1_p0, or in 32-bit mode
6285                                                                  DQ[31:0]_c2_p1/0, where _cC_pP denotes cycle C and phase P. */
6286         uint64_t mrdsyn1               : 8;  /**< [ 15:  8](RO/H) MRD ECC syndrome quad 1. [MRDSYN1] corresponds to DQ[63:0]_c0_p1, or in 32-bit mode
6287                                                                  DQ[31:0]_c1_p1/0, where _cC_pP denotes cycle C and phase P. */
6288         uint64_t mrdsyn0               : 8;  /**< [  7:  0](RO/H) MRD ECC syndrome quad 0. [MRDSYN0] corresponds to DQ[63:0]_c0_p0, or in 32-bit mode
6289                                                                  DQ[31:0]_c0_p1/0, where _cC_pP denotes cycle C and phase P. */
6290 #else /* Word 0 - Little Endian */
6291         uint64_t mrdsyn0               : 8;  /**< [  7:  0](RO/H) MRD ECC syndrome quad 0. [MRDSYN0] corresponds to DQ[63:0]_c0_p0, or in 32-bit mode
6292                                                                  DQ[31:0]_c0_p1/0, where _cC_pP denotes cycle C and phase P. */
6293         uint64_t mrdsyn1               : 8;  /**< [ 15:  8](RO/H) MRD ECC syndrome quad 1. [MRDSYN1] corresponds to DQ[63:0]_c0_p1, or in 32-bit mode
6294                                                                  DQ[31:0]_c1_p1/0, where _cC_pP denotes cycle C and phase P. */
6295         uint64_t mrdsyn2               : 8;  /**< [ 23: 16](RO/H) MRD ECC syndrome quad 2. [MRDSYN2] corresponds to DQ[63:0]_c1_p0, or in 32-bit mode
6296                                                                  DQ[31:0]_c2_p1/0, where _cC_pP denotes cycle C and phase P. */
6297         uint64_t mrdsyn3               : 8;  /**< [ 31: 24](RO/H) MRD ECC syndrome quad 3. [MRDSYN3] corresponds to DQ[63:0]_c1_p1, or in 32-bit mode
6298                                                                  DQ[31:0]_c3_p1/0, where _cC_pP denotes cycle C and phase P. */
6299         uint64_t reserved_32_63        : 32;
6300 #endif /* Word 0 - End */
6301     } s;
6302     /* struct bdk_lmcx_ecc_synd_s cn; */
6303 };
6304 typedef union bdk_lmcx_ecc_synd bdk_lmcx_ecc_synd_t;
6305 
6306 static inline uint64_t BDK_LMCX_ECC_SYND(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_ECC_SYND(unsigned long a)6307 static inline uint64_t BDK_LMCX_ECC_SYND(unsigned long a)
6308 {
6309     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
6310         return 0x87e088000038ll + 0x1000000ll * ((a) & 0x0);
6311     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
6312         return 0x87e088000038ll + 0x1000000ll * ((a) & 0x1);
6313     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
6314         return 0x87e088000038ll + 0x1000000ll * ((a) & 0x3);
6315     __bdk_csr_fatal("LMCX_ECC_SYND", 1, a, 0, 0, 0);
6316 }
6317 
6318 #define typedef_BDK_LMCX_ECC_SYND(a) bdk_lmcx_ecc_synd_t
6319 #define bustype_BDK_LMCX_ECC_SYND(a) BDK_CSR_TYPE_RSL
6320 #define basename_BDK_LMCX_ECC_SYND(a) "LMCX_ECC_SYND"
6321 #define device_bar_BDK_LMCX_ECC_SYND(a) 0x0 /* PF_BAR0 */
6322 #define busnum_BDK_LMCX_ECC_SYND(a) (a)
6323 #define arguments_BDK_LMCX_ECC_SYND(a) (a),-1,-1,-1
6324 
6325 /**
6326  * Register (RSL) lmc#_ext_config
6327  *
6328  * LMC Extended Configuration Register
6329  * This register has additional configuration and control bits for the LMC.
6330  */
6331 union bdk_lmcx_ext_config
6332 {
6333     uint64_t u;
6334     struct bdk_lmcx_ext_config_s
6335     {
6336 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
6337         uint64_t ref_rank_all          : 1;  /**< [ 63: 63](R/W) Reserved.
6338                                                                  Internal:
6339                                                                  For diagnostic use only.
6340                                                                    When set, cycles through all ranks during the refresh sequence disregarding
6341                                                                    rank availability status. */
6342         uint64_t ref_mode              : 2;  /**< [ 62: 61](R/W) Selects the refresh mode.
6343                                                                  0x0 = All ranks get refreshed together at the end of TREFI and all traffic is halted.
6344                                                                  0x1 = Ranks are refreshed in pairs during TREFI window. At TREFI/2, ranks 1 & 3
6345                                                                      are refreshed while allowing traffic to 0 & 2. At TREFI, ranks 0 & 2 are
6346                                                                      refreshed while allowing traffic to 1 & 3.
6347                                                                  0x2 = Ranks are refreshed in pairs during TREFI window. All traffic is halted
6348                                                                      whenever each pair is refreshed. */
6349         uint64_t reserved_59_60        : 2;
6350         uint64_t mrs_side              : 1;  /**< [ 58: 58](R/W) Specifies the RDIMM side. Only applies when [MRS_ONE_SIDE] is set.
6351                                                                  0 = MRS command is sent to the A side of an RDIMM.
6352                                                                  1 = MRS command is sent to the B side of an RDIMM. */
6353         uint64_t mrs_one_side          : 1;  /**< [ 57: 57](R/W) Only applies to DDR4 RDIMM.
6354                                                                  When set, MRS commands are directed to either the A or B
6355                                                                  side of the RCD.
6356 
6357                                                                  PDA operation is NOT allowed when this bit is set. In
6358                                                                  other words, LMC()_MR_MPR_CTL[MR_WR_PDA_ENABLE]
6359                                                                  must be cleared before running MRW sequence with this
6360                                                                  bit turned on. */
6361         uint64_t mrs_bside_invert_disable : 1;/**< [ 56: 56](R/W) When set, the command decoder cancels the auto inversion of
6362                                                                  A3-A9, A11, A13, A17, BA0, BA1 and BG0 during MRS/MRS_PDA
6363                                                                  command to the B side of the RDIMM.
6364                                                                  When set, make sure that the RCD's control word
6365                                                                  RC00 DA[0] = 1 so that the output inversion is disabled in
6366                                                                  the DDR4 RCD. */
6367         uint64_t dimm_sel_invert_off   : 1;  /**< [ 55: 55](R/W) During coalesce_address_mode, the default logic would be to invert
6368                                                                  the pbank bit whenever LMC()_NXM[MEM_MSB_D1_R0] \> LMC()_NXM[MEM_MSB_D0_R0].
6369                                                                  When this bit is set to one, it disables this default behavior.
6370                                                                  This configuration has lower priority compared to
6371                                                                  [DIMM_SEL_FORCE_INVERT]. */
6372         uint64_t dimm_sel_force_invert : 1;  /**< [ 54: 54](R/W) Reserved.
6373                                                                  Internal:
6374                                                                  When set to 1, this bit forces the pbank bit to be inverted
6375                                                                  when in coalesce_address_mode. That is, pbank value of 0 selects
6376                                                                  DIMM1 instead of DIMM0.
6377                                                                  Intended to be used for the case of DIMM1 having bigger rank/s
6378                                                                  than DIMM0. This bit has priority over [DIMM_SEL_INVERT_OFF]. */
6379         uint64_t coalesce_address_mode : 1;  /**< [ 53: 53](R/W) When set to one, LMC coalesces the L2C+LMC internal address mapping
6380                                                                  to create a uniform memory space that is free from holes in
6381                                                                  between ranks. When different size DIMMs are used, the DIMM with
6382                                                                  the higher capacity is mapped to the lower address space. */
6383         uint64_t dimm1_cid             : 2;  /**< [ 52: 51](R/W) Reserved.
6384                                                                  Internal:
6385                                                                  DIMM1 configuration bits that represent the number of the chip
6386                                                                  ID of the DRAM. This value is use for decoding the address
6387                                                                  as well as routing Chip IDs to the appropriate output
6388                                                                  pins.
6389                                                                  0x0 = 0 Chip ID  (Mono-Die stack).
6390                                                                  0x1 = 1 Chip ID  (2H 3DS).
6391                                                                  0x2 = 2 Chip IDs (4H 3DS).
6392                                                                  0x3 = 3 Chip IDs (8H 3DS). */
6393         uint64_t dimm0_cid             : 2;  /**< [ 50: 49](R/W) Reserved.
6394                                                                  Internal:
6395                                                                  DIMM0 configuration bits that represent the number of the chip
6396                                                                  ID of the DRAM. This value is use for decoding the address
6397                                                                  as well as routing Chip IDs to the appropriate output
6398                                                                  pins.
6399                                                                  0x0 = 0 Chip ID  (Mono-Die stack).
6400                                                                  0x1 = 1 Chip ID  (2H 3DS).
6401                                                                  0x2 = 2 Chip IDs (4H 3DS).
6402                                                                  0x3 = 3 Chip IDs (8H 3DS). */
6403         uint64_t rcd_parity_check      : 1;  /**< [ 48: 48](R/W) Enables the one cycle delay of the CA parity output. This MUST be set to one
6404                                                                  when using DDR4 RDIMM AND parity checking in RCD is enabled (RC0E DA0 = 1). Set
6405                                                                  this to zero otherwise. To enable the parity checking in RCD, set this bit first
6406                                                                  BEFORE issuing the RCW write RC0E DA0 = 1. */
6407         uint64_t reserved_46_47        : 2;
6408         uint64_t error_alert_n_sample  : 1;  /**< [ 45: 45](RO) Read to get a sample of the DDR*_ERROR_ALERT_L signal. */
6409         uint64_t ea_int_polarity       : 1;  /**< [ 44: 44](R/W) Set to invert the DDR*_ERROR_ALERT_L interrupt polarity. When clear, interrupt is
6410                                                                  signalled on
6411                                                                  the rising edge of DDR*_ERROR_ALERT_L. When set, interrupt is signalled on the falling
6412                                                                  edge of DDR*_ERROR_ALERT_L. */
6413         uint64_t sref_seq_stop_clock   : 1;  /**< [ 43: 43](R/W) When enabled, LMC disables all clock output pins to the DIMM at the end of
6414                                                                  LMC_SEQ_SEL_E::SREF_ENTRY
6415                                                                  sequence. In RDIMM applications, this implies that the RCD will be programmed into Clock
6416                                                                  Stop Power Down mode at the end of the LMC_SEQ_SEL_E::SREF_ENTRY sequence.
6417                                                                  It also automatically enables all clock outputs at the start of LMC_SEQ_SEL_E::SREF_EXIT sequence. */
6418         uint64_t par_addr_mask         : 3;  /**< [ 42: 40](R/W) Mask applied to parity for address bits \<14:12\>. Clear to exclude these address
6419                                                                  bits from the parity calculation, necessary if the DRAM device does not have these pins. */
6420         uint64_t reserved_38_39        : 2;
6421         uint64_t mrs_cmd_override      : 1;  /**< [ 37: 37](R/W) Set to override the behavior of MRS and RCW operations.
6422                                                                  If this bit is set, the override behavior is governed by the control field
6423                                                                  [MRS_CMD_SELECT]. See LMC()_EXT_CONFIG[MRS_CMD_SELECT] for detail.
6424 
6425                                                                  If this bit is cleared, select operation where signals other than CS are active before
6426                                                                  and after the CS_N active cycle (except for the case when interfacing with DDR3 RDIMM). */
6427         uint64_t mrs_cmd_select        : 1;  /**< [ 36: 36](R/W) When [MRS_CMD_OVERRIDE] is set, use this bit to select which style of operation for MRS
6428                                                                  and
6429                                                                  RCW commands.
6430 
6431                                                                  When this bit is clear, select operation where signals other than CS are active before and
6432                                                                  after the DDR_CS_L active cycle.
6433 
6434                                                                  When this bit is set, select the operation where the other command signals (DDR*_RAS_L,
6435                                                                  DDR*_CAS_L, DDR*_WE_L, DDR*_A\<15:0\>, etc.) all are active only during the cycle where the
6436                                                                  DDR_CS_L is also active. */
6437         uint64_t reserved_33_35        : 3;
6438         uint64_t invert_data           : 1;  /**< [ 32: 32](R/W) Set this bit to cause all data to be inverted before writing or reading to/from DRAM. This
6439                                                                  effectively uses the scramble logic to instead invert all the data, so this bit must not
6440                                                                  be set if data scrambling is enabled. May be useful if data inversion will result in lower
6441                                                                  power. */
6442         uint64_t reserved_30_31        : 2;
6443         uint64_t cmd_rti               : 1;  /**< [ 29: 29](R/W) Set this bit to change the behavior of the LMC to return to a completely idle command (no
6444                                                                  CS active, no command pins active, and address/bank address/bank group all low) on the
6445                                                                  interface after an active command, rather than only forcing the CS inactive between
6446                                                                  commands. */
6447         uint64_t cal_ena               : 1;  /**< [ 28: 28](R/W) Set to cause LMC to operate in CAL mode. First set LMC()_MODEREG_PARAMS3[CAL], then
6448                                                                  set [CAL_ENA]. */
6449         uint64_t reserved_27           : 1;
6450         uint64_t par_include_a17       : 1;  /**< [ 26: 26](R/W) If set, include A17 in parity calculations in DDR4 mode. */
6451         uint64_t par_include_bg1       : 1;  /**< [ 25: 25](R/W) If set, include BG1 in parity calculations in DDR4 mode. */
6452         uint64_t gen_par               : 1;  /**< [ 24: 24](R/W) Enable parity generation in the DRAM commands; must be set prior to enabling parity in
6453                                                                  register or DRAM devices. */
6454         uint64_t reserved_21_23        : 3;
6455         uint64_t vrefint_seq_deskew    : 1;  /**< [ 20: 20](R/W) Personality bit to change the operation of what is normally the internal Vref training
6456                                                                  sequence into the deskew training sequence. */
6457         uint64_t read_ena_bprch        : 1;  /**< [ 19: 19](R/W) Enable pad receiver one cycle longer than normal during read operations. */
6458         uint64_t read_ena_fprch        : 1;  /**< [ 18: 18](R/W) Enable pad receiver starting one cycle earlier than normal during read operations. */
6459         uint64_t slot_ctl_reset_force  : 1;  /**< [ 17: 17](WO) Write 1 to reset the slot-control override for all slot-control registers. After writing a
6460                                                                  1 to this bit, slot-control registers will update with changes made to other timing-
6461                                                                  control registers. This is a one-shot operation; it automatically returns to 0 after a
6462                                                                  write to 1. */
6463         uint64_t ref_int_lsbs          : 9;  /**< [ 16:  8](R/W) The refresh interval value least significant bits. The default is 0x0.
6464                                                                  Refresh interval is represented in number of 512 CK cycle increments and is controlled by
6465                                                                  LMC()_CONFIG[REF_ZQCS_INT]. More precise refresh interval however (in number of
6466                                                                  one CK cycle) can be achieved by setting this field to a nonzero value. */
6467         uint64_t drive_ena_bprch       : 1;  /**< [  7:  7](R/W) Drive DQx for one cycle longer than normal during write operations. */
6468         uint64_t drive_ena_fprch       : 1;  /**< [  6:  6](R/W) Drive DQx starting one cycle earlier than normal during write operations. */
6469         uint64_t dlcram_flip_synd      : 2;  /**< [  5:  4](R/W) Reserved.
6470                                                                  Internal:
6471                                                                  DLC RAM flip syndrome control bits. */
6472         uint64_t dlcram_cor_dis        : 1;  /**< [  3:  3](R/W) Reserved.
6473                                                                  Internal:
6474                                                                  DLC RAM correction disable control. */
6475         uint64_t dlc_nxm_rd            : 1;  /**< [  2:  2](R/W) Reserved.
6476                                                                  Internal:
6477                                                                  When set, enable NXM events for HFA read operations.
6478                                                                  Default is disabled, but
6479                                                                  could be useful for debug of DLC/DFA accesses. */
6480         uint64_t l2c_nxm_rd            : 1;  /**< [  1:  1](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
6481                                                                  loaded for L2C NXM read operations. NXM read operations may occur during normal operation
6482                                                                  (due to prefetches), so [L2C_NXM_RD] should not be set during normal operation to allow
6483                                                                  LMC()_INT[NXM_WR_ERR] to indicate NXM writes. */
6484         uint64_t l2c_nxm_wr            : 1;  /**< [  0:  0](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
6485                                                                  loaded for L2C NXM write operations. NXM writes are generally an indication of
6486                                                                  failure, so [L2C_NXM_WR] can generally be set. */
6487 #else /* Word 0 - Little Endian */
6488         uint64_t l2c_nxm_wr            : 1;  /**< [  0:  0](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
6489                                                                  loaded for L2C NXM write operations. NXM writes are generally an indication of
6490                                                                  failure, so [L2C_NXM_WR] can generally be set. */
6491         uint64_t l2c_nxm_rd            : 1;  /**< [  1:  1](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
6492                                                                  loaded for L2C NXM read operations. NXM read operations may occur during normal operation
6493                                                                  (due to prefetches), so [L2C_NXM_RD] should not be set during normal operation to allow
6494                                                                  LMC()_INT[NXM_WR_ERR] to indicate NXM writes. */
6495         uint64_t dlc_nxm_rd            : 1;  /**< [  2:  2](R/W) Reserved.
6496                                                                  Internal:
6497                                                                  When set, enable NXM events for HFA read operations.
6498                                                                  Default is disabled, but
6499                                                                  could be useful for debug of DLC/DFA accesses. */
6500         uint64_t dlcram_cor_dis        : 1;  /**< [  3:  3](R/W) Reserved.
6501                                                                  Internal:
6502                                                                  DLC RAM correction disable control. */
6503         uint64_t dlcram_flip_synd      : 2;  /**< [  5:  4](R/W) Reserved.
6504                                                                  Internal:
6505                                                                  DLC RAM flip syndrome control bits. */
6506         uint64_t drive_ena_fprch       : 1;  /**< [  6:  6](R/W) Drive DQx starting one cycle earlier than normal during write operations. */
6507         uint64_t drive_ena_bprch       : 1;  /**< [  7:  7](R/W) Drive DQx for one cycle longer than normal during write operations. */
6508         uint64_t ref_int_lsbs          : 9;  /**< [ 16:  8](R/W) The refresh interval value least significant bits. The default is 0x0.
6509                                                                  Refresh interval is represented in number of 512 CK cycle increments and is controlled by
6510                                                                  LMC()_CONFIG[REF_ZQCS_INT]. More precise refresh interval however (in number of
6511                                                                  one CK cycle) can be achieved by setting this field to a nonzero value. */
6512         uint64_t slot_ctl_reset_force  : 1;  /**< [ 17: 17](WO) Write 1 to reset the slot-control override for all slot-control registers. After writing a
6513                                                                  1 to this bit, slot-control registers will update with changes made to other timing-
6514                                                                  control registers. This is a one-shot operation; it automatically returns to 0 after a
6515                                                                  write to 1. */
6516         uint64_t read_ena_fprch        : 1;  /**< [ 18: 18](R/W) Enable pad receiver starting one cycle earlier than normal during read operations. */
6517         uint64_t read_ena_bprch        : 1;  /**< [ 19: 19](R/W) Enable pad receiver one cycle longer than normal during read operations. */
6518         uint64_t vrefint_seq_deskew    : 1;  /**< [ 20: 20](R/W) Personality bit to change the operation of what is normally the internal Vref training
6519                                                                  sequence into the deskew training sequence. */
6520         uint64_t reserved_21_23        : 3;
6521         uint64_t gen_par               : 1;  /**< [ 24: 24](R/W) Enable parity generation in the DRAM commands; must be set prior to enabling parity in
6522                                                                  register or DRAM devices. */
6523         uint64_t par_include_bg1       : 1;  /**< [ 25: 25](R/W) If set, include BG1 in parity calculations in DDR4 mode. */
6524         uint64_t par_include_a17       : 1;  /**< [ 26: 26](R/W) If set, include A17 in parity calculations in DDR4 mode. */
6525         uint64_t reserved_27           : 1;
6526         uint64_t cal_ena               : 1;  /**< [ 28: 28](R/W) Set to cause LMC to operate in CAL mode. First set LMC()_MODEREG_PARAMS3[CAL], then
6527                                                                  set [CAL_ENA]. */
6528         uint64_t cmd_rti               : 1;  /**< [ 29: 29](R/W) Set this bit to change the behavior of the LMC to return to a completely idle command (no
6529                                                                  CS active, no command pins active, and address/bank address/bank group all low) on the
6530                                                                  interface after an active command, rather than only forcing the CS inactive between
6531                                                                  commands. */
6532         uint64_t reserved_30_31        : 2;
6533         uint64_t invert_data           : 1;  /**< [ 32: 32](R/W) Set this bit to cause all data to be inverted before writing or reading to/from DRAM. This
6534                                                                  effectively uses the scramble logic to instead invert all the data, so this bit must not
6535                                                                  be set if data scrambling is enabled. May be useful if data inversion will result in lower
6536                                                                  power. */
6537         uint64_t reserved_33_35        : 3;
6538         uint64_t mrs_cmd_select        : 1;  /**< [ 36: 36](R/W) When [MRS_CMD_OVERRIDE] is set, use this bit to select which style of operation for MRS
6539                                                                  and
6540                                                                  RCW commands.
6541 
6542                                                                  When this bit is clear, select operation where signals other than CS are active before and
6543                                                                  after the DDR_CS_L active cycle.
6544 
6545                                                                  When this bit is set, select the operation where the other command signals (DDR*_RAS_L,
6546                                                                  DDR*_CAS_L, DDR*_WE_L, DDR*_A\<15:0\>, etc.) all are active only during the cycle where the
6547                                                                  DDR_CS_L is also active. */
6548         uint64_t mrs_cmd_override      : 1;  /**< [ 37: 37](R/W) Set to override the behavior of MRS and RCW operations.
6549                                                                  If this bit is set, the override behavior is governed by the control field
6550                                                                  [MRS_CMD_SELECT]. See LMC()_EXT_CONFIG[MRS_CMD_SELECT] for detail.
6551 
6552                                                                  If this bit is cleared, select operation where signals other than CS are active before
6553                                                                  and after the CS_N active cycle (except for the case when interfacing with DDR3 RDIMM). */
6554         uint64_t reserved_38_39        : 2;
6555         uint64_t par_addr_mask         : 3;  /**< [ 42: 40](R/W) Mask applied to parity for address bits \<14:12\>. Clear to exclude these address
6556                                                                  bits from the parity calculation, necessary if the DRAM device does not have these pins. */
6557         uint64_t sref_seq_stop_clock   : 1;  /**< [ 43: 43](R/W) When enabled, LMC disables all clock output pins to the DIMM at the end of
6558                                                                  LMC_SEQ_SEL_E::SREF_ENTRY
6559                                                                  sequence. In RDIMM applications, this implies that the RCD will be programmed into Clock
6560                                                                  Stop Power Down mode at the end of the LMC_SEQ_SEL_E::SREF_ENTRY sequence.
6561                                                                  It also automatically enables all clock outputs at the start of LMC_SEQ_SEL_E::SREF_EXIT sequence. */
6562         uint64_t ea_int_polarity       : 1;  /**< [ 44: 44](R/W) Set to invert the DDR*_ERROR_ALERT_L interrupt polarity. When clear, interrupt is
6563                                                                  signalled on
6564                                                                  the rising edge of DDR*_ERROR_ALERT_L. When set, interrupt is signalled on the falling
6565                                                                  edge of DDR*_ERROR_ALERT_L. */
6566         uint64_t error_alert_n_sample  : 1;  /**< [ 45: 45](RO) Read to get a sample of the DDR*_ERROR_ALERT_L signal. */
6567         uint64_t reserved_46_47        : 2;
6568         uint64_t rcd_parity_check      : 1;  /**< [ 48: 48](R/W) Enables the one cycle delay of the CA parity output. This MUST be set to one
6569                                                                  when using DDR4 RDIMM AND parity checking in RCD is enabled (RC0E DA0 = 1). Set
6570                                                                  this to zero otherwise. To enable the parity checking in RCD, set this bit first
6571                                                                  BEFORE issuing the RCW write RC0E DA0 = 1. */
6572         uint64_t dimm0_cid             : 2;  /**< [ 50: 49](R/W) Reserved.
6573                                                                  Internal:
6574                                                                  DIMM0 configuration bits that represent the number of the chip
6575                                                                  ID of the DRAM. This value is use for decoding the address
6576                                                                  as well as routing Chip IDs to the appropriate output
6577                                                                  pins.
6578                                                                  0x0 = 0 Chip ID  (Mono-Die stack).
6579                                                                  0x1 = 1 Chip ID  (2H 3DS).
6580                                                                  0x2 = 2 Chip IDs (4H 3DS).
6581                                                                  0x3 = 3 Chip IDs (8H 3DS). */
6582         uint64_t dimm1_cid             : 2;  /**< [ 52: 51](R/W) Reserved.
6583                                                                  Internal:
6584                                                                  DIMM1 configuration bits that represent the number of the chip
6585                                                                  ID of the DRAM. This value is use for decoding the address
6586                                                                  as well as routing Chip IDs to the appropriate output
6587                                                                  pins.
6588                                                                  0x0 = 0 Chip ID  (Mono-Die stack).
6589                                                                  0x1 = 1 Chip ID  (2H 3DS).
6590                                                                  0x2 = 2 Chip IDs (4H 3DS).
6591                                                                  0x3 = 3 Chip IDs (8H 3DS). */
6592         uint64_t coalesce_address_mode : 1;  /**< [ 53: 53](R/W) When set to one, LMC coalesces the L2C+LMC internal address mapping
6593                                                                  to create a uniform memory space that is free from holes in
6594                                                                  between ranks. When different size DIMMs are used, the DIMM with
6595                                                                  the higher capacity is mapped to the lower address space. */
6596         uint64_t dimm_sel_force_invert : 1;  /**< [ 54: 54](R/W) Reserved.
6597                                                                  Internal:
6598                                                                  When set to 1, this bit forces the pbank bit to be inverted
6599                                                                  when in coalesce_address_mode. That is, pbank value of 0 selects
6600                                                                  DIMM1 instead of DIMM0.
6601                                                                  Intended to be used for the case of DIMM1 having bigger rank/s
6602                                                                  than DIMM0. This bit has priority over [DIMM_SEL_INVERT_OFF]. */
6603         uint64_t dimm_sel_invert_off   : 1;  /**< [ 55: 55](R/W) During coalesce_address_mode, the default logic would be to invert
6604                                                                  the pbank bit whenever LMC()_NXM[MEM_MSB_D1_R0] \> LMC()_NXM[MEM_MSB_D0_R0].
6605                                                                  When this bit is set to one, it disables this default behavior.
6606                                                                  This configuration has lower priority compared to
6607                                                                  [DIMM_SEL_FORCE_INVERT]. */
6608         uint64_t mrs_bside_invert_disable : 1;/**< [ 56: 56](R/W) When set, the command decoder cancels the auto inversion of
6609                                                                  A3-A9, A11, A13, A17, BA0, BA1 and BG0 during MRS/MRS_PDA
6610                                                                  command to the B side of the RDIMM.
6611                                                                  When set, make sure that the RCD's control word
6612                                                                  RC00 DA[0] = 1 so that the output inversion is disabled in
6613                                                                  the DDR4 RCD. */
6614         uint64_t mrs_one_side          : 1;  /**< [ 57: 57](R/W) Only applies to DDR4 RDIMM.
6615                                                                  When set, MRS commands are directed to either the A or B
6616                                                                  side of the RCD.
6617 
6618                                                                  PDA operation is NOT allowed when this bit is set. In
6619                                                                  other words, LMC()_MR_MPR_CTL[MR_WR_PDA_ENABLE]
6620                                                                  must be cleared before running MRW sequence with this
6621                                                                  bit turned on. */
6622         uint64_t mrs_side              : 1;  /**< [ 58: 58](R/W) Specifies the RDIMM side. Only applies when [MRS_ONE_SIDE] is set.
6623                                                                  0 = MRS command is sent to the A side of an RDIMM.
6624                                                                  1 = MRS command is sent to the B side of an RDIMM. */
6625         uint64_t reserved_59_60        : 2;
6626         uint64_t ref_mode              : 2;  /**< [ 62: 61](R/W) Selects the refresh mode.
6627                                                                  0x0 = All ranks get refreshed together at the end of TREFI and all traffic is halted.
6628                                                                  0x1 = Ranks are refreshed in pairs during TREFI window. At TREFI/2, ranks 1 & 3
6629                                                                      are refreshed while allowing traffic to 0 & 2. At TREFI, ranks 0 & 2 are
6630                                                                      refreshed while allowing traffic to 1 & 3.
6631                                                                  0x2 = Ranks are refreshed in pairs during TREFI window. All traffic is halted
6632                                                                      whenever each pair is refreshed. */
6633         uint64_t ref_rank_all          : 1;  /**< [ 63: 63](R/W) Reserved.
6634                                                                  Internal:
6635                                                                  For diagnostic use only.
6636                                                                    When set, cycles through all ranks during the refresh sequence disregarding
6637                                                                    rank availability status. */
6638 #endif /* Word 0 - End */
6639     } s;
6640     struct bdk_lmcx_ext_config_cn9
6641     {
6642 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
6643         uint64_t ref_rank_all          : 1;  /**< [ 63: 63](R/W) Reserved.
6644                                                                  Internal:
6645                                                                  For diagnostic use only.
6646                                                                    When set, cycles through all ranks during the refresh sequence disregarding
6647                                                                    rank availability status. */
6648         uint64_t ref_mode              : 2;  /**< [ 62: 61](R/W) Selects the refresh mode.
6649                                                                  0x0 = All ranks get refreshed together at the end of TREFI and all traffic is halted.
6650                                                                  0x1 = Ranks are refreshed in pairs during TREFI window. At TREFI/2, ranks 1 & 3
6651                                                                      are refreshed while allowing traffic to 0 & 2. At TREFI, ranks 0 & 2 are
6652                                                                      refreshed while allowing traffic to 1 & 3.
6653                                                                  0x2 = Ranks are refreshed in pairs during TREFI window. All traffic is halted
6654                                                                      whenever each pair is refreshed. */
6655         uint64_t ref_block             : 1;  /**< [ 60: 60](R/W) When set, LMC is blocked to initiate any refresh sequence. LMC then only
6656                                                                  allows refresh sequence to start when LMC()_REF_STATUS[REF_COUNT0] or
6657                                                                  LMC()_REF_STATUS[REF_COUNT1] has reached the maximum value of 0x7. */
6658         uint64_t bc4_dqs_ena           : 1;  /**< [ 59: 59](R/W) Reserved.
6659                                                                  Internal:
6660                                                                  For diagnostic use only.
6661                                                                    0 = LMC produces the full bursts of DQS transitions,
6662                                                                    even for BC4 Write ops.
6663                                                                    1 = LMC produces only three cycles of DQS transitions
6664                                                                    every time it sends out a BC4 Write operation. */
6665         uint64_t mrs_side              : 1;  /**< [ 58: 58](R/W) Specifies the RDIMM side. Only applies when [MRS_ONE_SIDE] is set.
6666                                                                  0 = MRS command is sent to the A side of an RDIMM.
6667                                                                  1 = MRS command is sent to the B side of an RDIMM. */
6668         uint64_t mrs_one_side          : 1;  /**< [ 57: 57](R/W) Only applies to DDR4 RDIMM.
6669                                                                  When set, MRS commands are directed to either the A or B
6670                                                                  side of the RCD.
6671 
6672                                                                  PDA operation is NOT allowed when this bit is set. In
6673                                                                  other words, LMC()_MR_MPR_CTL[MR_WR_PDA_ENABLE]
6674                                                                  must be cleared before running MRW sequence with this
6675                                                                  bit turned on. */
6676         uint64_t mrs_bside_invert_disable : 1;/**< [ 56: 56](R/W) When set, the command decoder cancels the auto inversion of
6677                                                                  A3-A9, A11, A13, A17, BA0, BA1 and BG0 during MRS/MRS_PDA
6678                                                                  command to the B side of the RDIMM.
6679                                                                  When set, make sure that the RCD's control word
6680                                                                  RC00 DA[0] = 1 so that the output inversion is disabled in
6681                                                                  the DDR4 RCD. */
6682         uint64_t dimm_sel_invert_off   : 1;  /**< [ 55: 55](R/W) During coalesce_address_mode, the default logic would be to invert
6683                                                                  the pbank bit whenever LMC()_NXM[MEM_MSB_D1_R0] \> LMC()_NXM[MEM_MSB_D0_R0].
6684                                                                  When this bit is set to one, it disables this default behavior.
6685                                                                  This configuration has lower priority compared to
6686                                                                  [DIMM_SEL_FORCE_INVERT]. */
6687         uint64_t dimm_sel_force_invert : 1;  /**< [ 54: 54](R/W) When set to one, this bit forces the pbank bit to be inverted
6688                                                                  when in coalesce_address_mode. That is, pbank value of zero selects
6689                                                                  DIMM1 instead of DIMM0.
6690                                                                  Intended to be used for the case of DIMM1 having bigger rank/s
6691                                                                  than DIMM0. This bit has priority over [DIMM_SEL_INVERT_OFF]. */
6692         uint64_t coalesce_address_mode : 1;  /**< [ 53: 53](R/W) When set to one, LMC coalesces the L2C+LMC internal address mapping
6693                                                                  to create a uniform memory space that is free from holes in
6694                                                                  between ranks. When different size DIMMs are used, the DIMM with
6695                                                                  the higher capacity is mapped to the lower address space. */
6696         uint64_t dimm1_cid             : 2;  /**< [ 52: 51](R/W) Reserved.
6697                                                                  Internal:
6698                                                                  DIMM1 configuration bits that represent the number of the chip
6699                                                                  ID of the DRAM. This value is use for decoding the address
6700                                                                  as well as routing Chip IDs to the appropriate output
6701                                                                  pins.
6702                                                                  0x0 = 0 Chip ID  (Mono-Die stack).
6703                                                                  0x1 = 1 Chip ID  (2H 3DS).
6704                                                                  0x2 = 2 Chip IDs (4H 3DS).
6705                                                                  0x3 = 3 Chip IDs (8H 3DS). */
6706         uint64_t dimm0_cid             : 2;  /**< [ 50: 49](R/W) Reserved.
6707                                                                  Internal:
6708                                                                  DIMM0 configuration bits that represent the number of the chip
6709                                                                  ID of the DRAM. This value is use for decoding the address
6710                                                                  as well as routing Chip IDs to the appropriate output
6711                                                                  pins.
6712                                                                  0x0 = 0 Chip ID  (Mono-Die stack).
6713                                                                  0x1 = 1 Chip ID  (2H 3DS).
6714                                                                  0x2 = 2 Chip IDs (4H 3DS).
6715                                                                  0x3 = 3 Chip IDs (8H 3DS). */
6716         uint64_t rcd_parity_check      : 1;  /**< [ 48: 48](R/W) Enables the one cycle delay of the CA parity output. This MUST be set to one
6717                                                                  when using DDR4 RDIMM AND parity checking in RCD is enabled (RC0E DA0 = 1). Set
6718                                                                  this to zero otherwise. To enable the parity checking in RCD, set this bit first
6719                                                                  BEFORE issuing the RCW write RC0E DA0 = 1. */
6720         uint64_t reserved_46_47        : 2;
6721         uint64_t error_alert_n_sample  : 1;  /**< [ 45: 45](RO) Read to get a sample of the DDR*_ERROR_ALERT_L signal. */
6722         uint64_t ea_int_polarity       : 1;  /**< [ 44: 44](R/W) Set to invert the DDR*_ERROR_ALERT_L interrupt polarity. When clear, interrupt is
6723                                                                  signalled on
6724                                                                  the rising edge of DDR*_ERROR_ALERT_L. When set, interrupt is signalled on the falling
6725                                                                  edge of DDR*_ERROR_ALERT_L. */
6726         uint64_t sref_seq_stop_clock   : 1;  /**< [ 43: 43](R/W) When enabled, LMC disables all clock output pins to the DIMM at the end of
6727                                                                  LMC_SEQ_SEL_E::SREF_ENTRY
6728                                                                  sequence. In RDIMM applications, this implies that the RCD will be programmed into Clock
6729                                                                  Stop Power Down mode at the end of the LMC_SEQ_SEL_E::SREF_ENTRY sequence.
6730                                                                  It also automatically enables all clock outputs at the start of LMC_SEQ_SEL_E::SREF_EXIT sequence. */
6731         uint64_t par_addr_mask         : 3;  /**< [ 42: 40](R/W) Mask applied to parity for address bits \<14:12\>. Clear to exclude these address
6732                                                                  bits from the parity calculation, necessary if the DRAM device does not have these pins. */
6733         uint64_t reserved_38_39        : 2;
6734         uint64_t mrs_cmd_override      : 1;  /**< [ 37: 37](R/W) Set to override the behavior of MRS and RCW operations.
6735                                                                  If this bit is set, the override behavior is governed by the control field
6736                                                                  [MRS_CMD_SELECT]. See LMC()_EXT_CONFIG[MRS_CMD_SELECT] for detail.
6737 
6738                                                                  If this bit is cleared, select operation where signals other than CS are active before
6739                                                                  and after the DDR_CS_L active cycle. */
6740         uint64_t mrs_cmd_select        : 1;  /**< [ 36: 36](R/W) When [MRS_CMD_OVERRIDE] is set, use this bit to select which style of operation for MRS
6741                                                                  and RCW commands.
6742 
6743                                                                  When this bit is clear, select operation where signals other than CS are active before and
6744                                                                  after the DDR_CS_L active cycle.
6745 
6746                                                                  When this bit is set, select the operation where the other command signals (DDR*_RAS_L,
6747                                                                  DDR*_CAS_L, DDR*_WE_L, DDR*_A\<15:0\>, etc.) all are active only during the cycle where the
6748                                                                  DDR_CS_L is also active. */
6749         uint64_t reserved_33_35        : 3;
6750         uint64_t invert_data           : 1;  /**< [ 32: 32](R/W) Set this bit to cause all data to be inverted before writing or reading to/from DRAM. May
6751                                                                  be useful if data inversion will result in lower power. */
6752         uint64_t reserved_30_31        : 2;
6753         uint64_t cmd_rti               : 1;  /**< [ 29: 29](R/W) Set this bit to change the behavior of the LMC to return to a completely idle command (no
6754                                                                  CS active, no command pins active, and address/bank address/bank group all low) on the
6755                                                                  interface after an active command, rather than only forcing the CS inactive between
6756                                                                  commands. */
6757         uint64_t cal_ena               : 1;  /**< [ 28: 28](R/W) Set to cause LMC to operate in CAL mode. First set LMC()_MODEREG_PARAMS3[CAL], then
6758                                                                  set [CAL_ENA]. */
6759         uint64_t reserved_27           : 1;
6760         uint64_t par_include_a17       : 1;  /**< [ 26: 26](R/W) If set, include A17 in parity calculations in DDR4 mode. */
6761         uint64_t par_include_bg1       : 1;  /**< [ 25: 25](R/W) If set, include BG1 in parity calculations in DDR4 mode. */
6762         uint64_t gen_par               : 1;  /**< [ 24: 24](R/W) Enable parity generation in the DRAM commands; must be set prior to enabling parity in
6763                                                                  register or DRAM devices. */
6764         uint64_t reserved_21_23        : 3;
6765         uint64_t vrefint_seq_deskew    : 1;  /**< [ 20: 20](R/W) Personality bit to change the operation of what is normally the internal Vref training
6766                                                                  sequence into the deskew training sequence. */
6767         uint64_t read_ena_bprch        : 1;  /**< [ 19: 19](R/W) Enable pad receiver one cycle longer than normal during read operations. */
6768         uint64_t read_ena_fprch        : 1;  /**< [ 18: 18](R/W) Enable pad receiver starting one cycle earlier than normal during read operations. */
6769         uint64_t slot_ctl_reset_force  : 1;  /**< [ 17: 17](WO) Write 1 to reset the slot-control override for all slot-control registers. After writing a
6770                                                                  1 to this bit, slot-control registers will update with changes made to other timing-
6771                                                                  control registers. This is a one-shot operation; it automatically returns to 0 after a
6772                                                                  write to 1. */
6773         uint64_t ref_int_lsbs          : 9;  /**< [ 16:  8](R/W) The refresh interval value least significant bits. The default is 0x0.
6774                                                                  Refresh interval is represented in number of 512 CK cycle increments and is controlled by
6775                                                                  LMC()_CONFIG[REF_ZQCS_INT]. More precise refresh interval however (in number of
6776                                                                  one CK cycle) can be achieved by setting this field to a nonzero value. */
6777         uint64_t drive_ena_bprch       : 1;  /**< [  7:  7](R/W) Drive DQx for one cycle longer than normal during write operations. */
6778         uint64_t drive_ena_fprch       : 1;  /**< [  6:  6](R/W) Drive DQx starting one cycle earlier than normal during write operations. */
6779         uint64_t reserved_3_5          : 3;
6780         uint64_t dlc_nxm_rd            : 1;  /**< [  2:  2](R/W) Reserved.
6781                                                                  Internal:
6782                                                                  When set, enable NXM events for HFA read operations.
6783                                                                  Default is disabled, but
6784                                                                  could be useful for debug of DLC/DFA accesses. */
6785         uint64_t l2c_nxm_rd            : 1;  /**< [  1:  1](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
6786                                                                  loaded for L2C NXM read operations. NXM read operations may occur during normal operation
6787                                                                  (due to prefetches), so [L2C_NXM_RD] should not be set during normal operation to allow
6788                                                                  LMC()_INT[NXM_WR_ERR] to indicate NXM writes. */
6789         uint64_t l2c_nxm_wr            : 1;  /**< [  0:  0](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
6790                                                                  loaded for L2C NXM write operations. NXM writes are generally an indication of
6791                                                                  failure, so [L2C_NXM_WR] can generally be set. */
6792 #else /* Word 0 - Little Endian */
6793         uint64_t l2c_nxm_wr            : 1;  /**< [  0:  0](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
6794                                                                  loaded for L2C NXM write operations. NXM writes are generally an indication of
6795                                                                  failure, so [L2C_NXM_WR] can generally be set. */
6796         uint64_t l2c_nxm_rd            : 1;  /**< [  1:  1](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
6797                                                                  loaded for L2C NXM read operations. NXM read operations may occur during normal operation
6798                                                                  (due to prefetches), so [L2C_NXM_RD] should not be set during normal operation to allow
6799                                                                  LMC()_INT[NXM_WR_ERR] to indicate NXM writes. */
6800         uint64_t dlc_nxm_rd            : 1;  /**< [  2:  2](R/W) Reserved.
6801                                                                  Internal:
6802                                                                  When set, enable NXM events for HFA read operations.
6803                                                                  Default is disabled, but
6804                                                                  could be useful for debug of DLC/DFA accesses. */
6805         uint64_t reserved_3_5          : 3;
6806         uint64_t drive_ena_fprch       : 1;  /**< [  6:  6](R/W) Drive DQx starting one cycle earlier than normal during write operations. */
6807         uint64_t drive_ena_bprch       : 1;  /**< [  7:  7](R/W) Drive DQx for one cycle longer than normal during write operations. */
6808         uint64_t ref_int_lsbs          : 9;  /**< [ 16:  8](R/W) The refresh interval value least significant bits. The default is 0x0.
6809                                                                  Refresh interval is represented in number of 512 CK cycle increments and is controlled by
6810                                                                  LMC()_CONFIG[REF_ZQCS_INT]. More precise refresh interval however (in number of
6811                                                                  one CK cycle) can be achieved by setting this field to a nonzero value. */
6812         uint64_t slot_ctl_reset_force  : 1;  /**< [ 17: 17](WO) Write 1 to reset the slot-control override for all slot-control registers. After writing a
6813                                                                  1 to this bit, slot-control registers will update with changes made to other timing-
6814                                                                  control registers. This is a one-shot operation; it automatically returns to 0 after a
6815                                                                  write to 1. */
6816         uint64_t read_ena_fprch        : 1;  /**< [ 18: 18](R/W) Enable pad receiver starting one cycle earlier than normal during read operations. */
6817         uint64_t read_ena_bprch        : 1;  /**< [ 19: 19](R/W) Enable pad receiver one cycle longer than normal during read operations. */
6818         uint64_t vrefint_seq_deskew    : 1;  /**< [ 20: 20](R/W) Personality bit to change the operation of what is normally the internal Vref training
6819                                                                  sequence into the deskew training sequence. */
6820         uint64_t reserved_21_23        : 3;
6821         uint64_t gen_par               : 1;  /**< [ 24: 24](R/W) Enable parity generation in the DRAM commands; must be set prior to enabling parity in
6822                                                                  register or DRAM devices. */
6823         uint64_t par_include_bg1       : 1;  /**< [ 25: 25](R/W) If set, include BG1 in parity calculations in DDR4 mode. */
6824         uint64_t par_include_a17       : 1;  /**< [ 26: 26](R/W) If set, include A17 in parity calculations in DDR4 mode. */
6825         uint64_t reserved_27           : 1;
6826         uint64_t cal_ena               : 1;  /**< [ 28: 28](R/W) Set to cause LMC to operate in CAL mode. First set LMC()_MODEREG_PARAMS3[CAL], then
6827                                                                  set [CAL_ENA]. */
6828         uint64_t cmd_rti               : 1;  /**< [ 29: 29](R/W) Set this bit to change the behavior of the LMC to return to a completely idle command (no
6829                                                                  CS active, no command pins active, and address/bank address/bank group all low) on the
6830                                                                  interface after an active command, rather than only forcing the CS inactive between
6831                                                                  commands. */
6832         uint64_t reserved_30_31        : 2;
6833         uint64_t invert_data           : 1;  /**< [ 32: 32](R/W) Set this bit to cause all data to be inverted before writing or reading to/from DRAM. May
6834                                                                  be useful if data inversion will result in lower power. */
6835         uint64_t reserved_33_35        : 3;
6836         uint64_t mrs_cmd_select        : 1;  /**< [ 36: 36](R/W) When [MRS_CMD_OVERRIDE] is set, use this bit to select which style of operation for MRS
6837                                                                  and RCW commands.
6838 
6839                                                                  When this bit is clear, select operation where signals other than CS are active before and
6840                                                                  after the DDR_CS_L active cycle.
6841 
6842                                                                  When this bit is set, select the operation where the other command signals (DDR*_RAS_L,
6843                                                                  DDR*_CAS_L, DDR*_WE_L, DDR*_A\<15:0\>, etc.) all are active only during the cycle where the
6844                                                                  DDR_CS_L is also active. */
6845         uint64_t mrs_cmd_override      : 1;  /**< [ 37: 37](R/W) Set to override the behavior of MRS and RCW operations.
6846                                                                  If this bit is set, the override behavior is governed by the control field
6847                                                                  [MRS_CMD_SELECT]. See LMC()_EXT_CONFIG[MRS_CMD_SELECT] for detail.
6848 
6849                                                                  If this bit is cleared, select operation where signals other than CS are active before
6850                                                                  and after the DDR_CS_L active cycle. */
6851         uint64_t reserved_38_39        : 2;
6852         uint64_t par_addr_mask         : 3;  /**< [ 42: 40](R/W) Mask applied to parity for address bits \<14:12\>. Clear to exclude these address
6853                                                                  bits from the parity calculation, necessary if the DRAM device does not have these pins. */
6854         uint64_t sref_seq_stop_clock   : 1;  /**< [ 43: 43](R/W) When enabled, LMC disables all clock output pins to the DIMM at the end of
6855                                                                  LMC_SEQ_SEL_E::SREF_ENTRY
6856                                                                  sequence. In RDIMM applications, this implies that the RCD will be programmed into Clock
6857                                                                  Stop Power Down mode at the end of the LMC_SEQ_SEL_E::SREF_ENTRY sequence.
6858                                                                  It also automatically enables all clock outputs at the start of LMC_SEQ_SEL_E::SREF_EXIT sequence. */
6859         uint64_t ea_int_polarity       : 1;  /**< [ 44: 44](R/W) Set to invert the DDR*_ERROR_ALERT_L interrupt polarity. When clear, interrupt is
6860                                                                  signalled on
6861                                                                  the rising edge of DDR*_ERROR_ALERT_L. When set, interrupt is signalled on the falling
6862                                                                  edge of DDR*_ERROR_ALERT_L. */
6863         uint64_t error_alert_n_sample  : 1;  /**< [ 45: 45](RO) Read to get a sample of the DDR*_ERROR_ALERT_L signal. */
6864         uint64_t reserved_46_47        : 2;
6865         uint64_t rcd_parity_check      : 1;  /**< [ 48: 48](R/W) Enables the one cycle delay of the CA parity output. This MUST be set to one
6866                                                                  when using DDR4 RDIMM AND parity checking in RCD is enabled (RC0E DA0 = 1). Set
6867                                                                  this to zero otherwise. To enable the parity checking in RCD, set this bit first
6868                                                                  BEFORE issuing the RCW write RC0E DA0 = 1. */
6869         uint64_t dimm0_cid             : 2;  /**< [ 50: 49](R/W) Reserved.
6870                                                                  Internal:
6871                                                                  DIMM0 configuration bits that represent the number of the chip
6872                                                                  ID of the DRAM. This value is use for decoding the address
6873                                                                  as well as routing Chip IDs to the appropriate output
6874                                                                  pins.
6875                                                                  0x0 = 0 Chip ID  (Mono-Die stack).
6876                                                                  0x1 = 1 Chip ID  (2H 3DS).
6877                                                                  0x2 = 2 Chip IDs (4H 3DS).
6878                                                                  0x3 = 3 Chip IDs (8H 3DS). */
6879         uint64_t dimm1_cid             : 2;  /**< [ 52: 51](R/W) Reserved.
6880                                                                  Internal:
6881                                                                  DIMM1 configuration bits that represent the number of the chip
6882                                                                  ID of the DRAM. This value is use for decoding the address
6883                                                                  as well as routing Chip IDs to the appropriate output
6884                                                                  pins.
6885                                                                  0x0 = 0 Chip ID  (Mono-Die stack).
6886                                                                  0x1 = 1 Chip ID  (2H 3DS).
6887                                                                  0x2 = 2 Chip IDs (4H 3DS).
6888                                                                  0x3 = 3 Chip IDs (8H 3DS). */
6889         uint64_t coalesce_address_mode : 1;  /**< [ 53: 53](R/W) When set to one, LMC coalesces the L2C+LMC internal address mapping
6890                                                                  to create a uniform memory space that is free from holes in
6891                                                                  between ranks. When different size DIMMs are used, the DIMM with
6892                                                                  the higher capacity is mapped to the lower address space. */
6893         uint64_t dimm_sel_force_invert : 1;  /**< [ 54: 54](R/W) When set to one, this bit forces the pbank bit to be inverted
6894                                                                  when in coalesce_address_mode. That is, pbank value of zero selects
6895                                                                  DIMM1 instead of DIMM0.
6896                                                                  Intended to be used for the case of DIMM1 having bigger rank/s
6897                                                                  than DIMM0. This bit has priority over [DIMM_SEL_INVERT_OFF]. */
6898         uint64_t dimm_sel_invert_off   : 1;  /**< [ 55: 55](R/W) During coalesce_address_mode, the default logic would be to invert
6899                                                                  the pbank bit whenever LMC()_NXM[MEM_MSB_D1_R0] \> LMC()_NXM[MEM_MSB_D0_R0].
6900                                                                  When this bit is set to one, it disables this default behavior.
6901                                                                  This configuration has lower priority compared to
6902                                                                  [DIMM_SEL_FORCE_INVERT]. */
6903         uint64_t mrs_bside_invert_disable : 1;/**< [ 56: 56](R/W) When set, the command decoder cancels the auto inversion of
6904                                                                  A3-A9, A11, A13, A17, BA0, BA1 and BG0 during MRS/MRS_PDA
6905                                                                  command to the B side of the RDIMM.
6906                                                                  When set, make sure that the RCD's control word
6907                                                                  RC00 DA[0] = 1 so that the output inversion is disabled in
6908                                                                  the DDR4 RCD. */
6909         uint64_t mrs_one_side          : 1;  /**< [ 57: 57](R/W) Only applies to DDR4 RDIMM.
6910                                                                  When set, MRS commands are directed to either the A or B
6911                                                                  side of the RCD.
6912 
6913                                                                  PDA operation is NOT allowed when this bit is set. In
6914                                                                  other words, LMC()_MR_MPR_CTL[MR_WR_PDA_ENABLE]
6915                                                                  must be cleared before running MRW sequence with this
6916                                                                  bit turned on. */
6917         uint64_t mrs_side              : 1;  /**< [ 58: 58](R/W) Specifies the RDIMM side. Only applies when [MRS_ONE_SIDE] is set.
6918                                                                  0 = MRS command is sent to the A side of an RDIMM.
6919                                                                  1 = MRS command is sent to the B side of an RDIMM. */
6920         uint64_t bc4_dqs_ena           : 1;  /**< [ 59: 59](R/W) Reserved.
6921                                                                  Internal:
6922                                                                  For diagnostic use only.
6923                                                                    0 = LMC produces the full bursts of DQS transitions,
6924                                                                    even for BC4 Write ops.
6925                                                                    1 = LMC produces only three cycles of DQS transitions
6926                                                                    every time it sends out a BC4 Write operation. */
6927         uint64_t ref_block             : 1;  /**< [ 60: 60](R/W) When set, LMC is blocked to initiate any refresh sequence. LMC then only
6928                                                                  allows refresh sequence to start when LMC()_REF_STATUS[REF_COUNT0] or
6929                                                                  LMC()_REF_STATUS[REF_COUNT1] has reached the maximum value of 0x7. */
6930         uint64_t ref_mode              : 2;  /**< [ 62: 61](R/W) Selects the refresh mode.
6931                                                                  0x0 = All ranks get refreshed together at the end of TREFI and all traffic is halted.
6932                                                                  0x1 = Ranks are refreshed in pairs during TREFI window. At TREFI/2, ranks 1 & 3
6933                                                                      are refreshed while allowing traffic to 0 & 2. At TREFI, ranks 0 & 2 are
6934                                                                      refreshed while allowing traffic to 1 & 3.
6935                                                                  0x2 = Ranks are refreshed in pairs during TREFI window. All traffic is halted
6936                                                                      whenever each pair is refreshed. */
6937         uint64_t ref_rank_all          : 1;  /**< [ 63: 63](R/W) Reserved.
6938                                                                  Internal:
6939                                                                  For diagnostic use only.
6940                                                                    When set, cycles through all ranks during the refresh sequence disregarding
6941                                                                    rank availability status. */
6942 #endif /* Word 0 - End */
6943     } cn9;
6944     struct bdk_lmcx_ext_config_cn81xx
6945     {
6946 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
6947         uint64_t reserved_61_63        : 3;
6948         uint64_t bc4_dqs_ena           : 1;  /**< [ 60: 60](R/W) Reserved.
6949                                                                  Internal:
6950                                                                  For diagnostic use only.
6951                                                                    0 = LMC produces the full bursts of DQS transitions,
6952                                                                    even for BC4 Write ops.
6953                                                                    1 = LMC produces only three cycles of DQS transitions
6954                                                                    every time it sends out a BC4 Write operation. */
6955         uint64_t ref_block             : 1;  /**< [ 59: 59](R/W) When set, LMC is blocked to initiate any refresh sequence. LMC then only
6956                                                                  allows refresh sequence to start when LMC()_REF_STATUS[REF_COUNT] has
6957                                                                  reached the maximum value of 0x7. */
6958         uint64_t mrs_side              : 1;  /**< [ 58: 58](R/W) Specifies the RDIMM side. Only applies when [MRS_ONE_SIDE] is set.
6959                                                                  0 = MRS command is sent to the A side of an RDIMM.
6960                                                                  1 = MRS command is sent to the B side of an RDIMM. */
6961         uint64_t mrs_one_side          : 1;  /**< [ 57: 57](R/W) Only applies to DDR4 RDIMM.
6962                                                                  When set, MRS commands are directed to either the A or B
6963                                                                  side of the RCD.
6964 
6965                                                                  PDA operation is NOT allowed when this bit is set. In
6966                                                                  other words, LMC()_MR_MPR_CTL[MR_WR_PDA_ENABLE]
6967                                                                  must be cleared before running MRW sequence with this
6968                                                                  bit turned on. */
6969         uint64_t mrs_bside_invert_disable : 1;/**< [ 56: 56](R/W) When set, the command decoder cancels the auto inversion of
6970                                                                  A3-A9, A11, A13, A17, BA0, BA1 and BG0 during MRS/MRS_PDA
6971                                                                  command to the B side of the RDIMM.
6972                                                                  When set, make sure that the RCD's control word
6973                                                                  RC00 DA[0] = 1 so that the output inversion is disabled in
6974                                                                  the DDR4 RCD. */
6975         uint64_t dimm_sel_invert_off   : 1;  /**< [ 55: 55](R/W) During coalesce_address_mode, the default logic would be to invert
6976                                                                  the pbank bit whenever LMC()_NXM[MEM_MSB_D1_R0] \> LMC()_NXM[MEM_MSB_D0_R0].
6977                                                                  When this bit is set to one, it disables this default behavior.
6978                                                                  This configuration has lower priority compared to
6979                                                                  [DIMM_SEL_FORCE_INVERT]. */
6980         uint64_t dimm_sel_force_invert : 1;  /**< [ 54: 54](R/W) Reserved.
6981                                                                  Internal:
6982                                                                  When set to 1, this bit forces the pbank bit to be inverted
6983                                                                  when in coalesce_address_mode. That is, pbank value of 0 selects
6984                                                                  DIMM1 instead of DIMM0.
6985                                                                  Intended to be used for the case of DIMM1 having bigger rank/s
6986                                                                  than DIMM0. This bit has priority over [DIMM_SEL_INVERT_OFF]. */
6987         uint64_t coalesce_address_mode : 1;  /**< [ 53: 53](R/W) When set to one, LMC coalesces the L2C+LMC internal address mapping
6988                                                                  to create a uniform memory space that is free from holes in
6989                                                                  between ranks. When different size DIMMs are used, the DIMM with
6990                                                                  the higher capacity is mapped to the lower address space. */
6991         uint64_t dimm1_cid             : 2;  /**< [ 52: 51](R/W) Reserved.
6992                                                                  Internal:
6993                                                                  DIMM1 configuration bits that represent the number of the chip
6994                                                                  ID of the DRAM. This value is use for decoding the address
6995                                                                  as well as routing Chip IDs to the appropriate output
6996                                                                  pins.
6997                                                                  0x0 = 0 Chip ID  (Mono-Die stack).
6998                                                                  0x1 = 1 Chip ID  (2H 3DS).
6999                                                                  0x2 = 2 Chip IDs (4H 3DS).
7000                                                                  0x3 = 3 Chip IDs (8H 3DS). */
7001         uint64_t dimm0_cid             : 2;  /**< [ 50: 49](R/W) Reserved.
7002                                                                  Internal:
7003                                                                  DIMM0 configuration bits that represent the number of the chip
7004                                                                  ID of the DRAM. This value is use for decoding the address
7005                                                                  as well as routing Chip IDs to the appropriate output
7006                                                                  pins.
7007                                                                  0x0 = 0 Chip ID  (Mono-Die stack).
7008                                                                  0x1 = 1 Chip ID  (2H 3DS).
7009                                                                  0x2 = 2 Chip IDs (4H 3DS).
7010                                                                  0x3 = 3 Chip IDs (8H 3DS). */
7011         uint64_t rcd_parity_check      : 1;  /**< [ 48: 48](R/W) Enables the one cycle delay of the CA parity output. This MUST be set to one
7012                                                                  when using DDR4 RDIMM AND parity checking in RCD is enabled (RC0E DA0 = 1). Set
7013                                                                  this to zero otherwise. To enable the parity checking in RCD, set this bit first
7014                                                                  BEFORE issuing the RCW write RC0E DA0 = 1. */
7015         uint64_t reserved_46_47        : 2;
7016         uint64_t error_alert_n_sample  : 1;  /**< [ 45: 45](RO) Read to get a sample of the DDR*_ERROR_ALERT_L signal. */
7017         uint64_t ea_int_polarity       : 1;  /**< [ 44: 44](R/W) Set to invert the DDR*_ERROR_ALERT_L interrupt polarity. When clear, interrupt is
7018                                                                  signalled on
7019                                                                  the rising edge of DDR*_ERROR_ALERT_L. When set, interrupt is signalled on the falling
7020                                                                  edge of DDR*_ERROR_ALERT_L. */
7021         uint64_t reserved_43           : 1;
7022         uint64_t par_addr_mask         : 3;  /**< [ 42: 40](R/W) Mask applied to parity for address bits \<14:12\>. Clear to exclude these address
7023                                                                  bits from the parity calculation, necessary if the DRAM device does not have these pins. */
7024         uint64_t reserved_38_39        : 2;
7025         uint64_t mrs_cmd_override      : 1;  /**< [ 37: 37](R/W) Set to override the behavior of MRS and RCW operations.
7026                                                                  If this bit is set, the override behavior is governed by the control field
7027                                                                  [MRS_CMD_SELECT]. See LMC()_EXT_CONFIG[MRS_CMD_SELECT] for detail.
7028 
7029                                                                  If this bit is cleared, select operation where signals other than CS are active before
7030                                                                  and after the CS_N active cycle (except for the case when interfacing with DDR3 RDIMM). */
7031         uint64_t mrs_cmd_select        : 1;  /**< [ 36: 36](R/W) When [MRS_CMD_OVERRIDE] is set, use this bit to select which style of operation for MRS
7032                                                                  and
7033                                                                  RCW commands.
7034 
7035                                                                  When this bit is clear, select operation where signals other than CS are active before and
7036                                                                  after the DDR_CS_L active cycle.
7037 
7038                                                                  When this bit is set, select the operation where the other command signals (DDR*_RAS_L,
7039                                                                  DDR*_CAS_L, DDR*_WE_L, DDR*_A\<15:0\>, etc.) all are active only during the cycle where the
7040                                                                  DDR_CS_L is also active. */
7041         uint64_t reserved_33_35        : 3;
7042         uint64_t invert_data           : 1;  /**< [ 32: 32](R/W) Set this bit to cause all data to be inverted before writing or reading to/from DRAM. This
7043                                                                  effectively uses the scramble logic to instead invert all the data, so this bit must not
7044                                                                  be set if data scrambling is enabled. May be useful if data inversion will result in lower
7045                                                                  power. */
7046         uint64_t reserved_30_31        : 2;
7047         uint64_t cmd_rti               : 1;  /**< [ 29: 29](R/W) Set this bit to change the behavior of the LMC to return to a completely idle command (no
7048                                                                  CS active, no command pins active, and address/bank address/bank group all low) on the
7049                                                                  interface after an active command, rather than only forcing the CS inactive between
7050                                                                  commands. */
7051         uint64_t cal_ena               : 1;  /**< [ 28: 28](R/W) Set to cause LMC to operate in CAL mode. First set LMC()_MODEREG_PARAMS3[CAL], then
7052                                                                  set [CAL_ENA]. */
7053         uint64_t reserved_27           : 1;
7054         uint64_t par_include_a17       : 1;  /**< [ 26: 26](R/W) If set, include A17 in parity calculations in DDR4 mode. */
7055         uint64_t par_include_bg1       : 1;  /**< [ 25: 25](R/W) If set, include BG1 in parity calculations in DDR4 mode. */
7056         uint64_t gen_par               : 1;  /**< [ 24: 24](R/W) Enable parity generation in the DRAM commands; must be set prior to enabling parity in
7057                                                                  register or DRAM devices. */
7058         uint64_t reserved_21_23        : 3;
7059         uint64_t vrefint_seq_deskew    : 1;  /**< [ 20: 20](R/W) Personality bit to change the operation of what is normally the internal Vref training
7060                                                                  sequence into the deskew training sequence. */
7061         uint64_t read_ena_bprch        : 1;  /**< [ 19: 19](R/W) Enable pad receiver one cycle longer than normal during read operations. */
7062         uint64_t read_ena_fprch        : 1;  /**< [ 18: 18](R/W) Enable pad receiver starting one cycle earlier than normal during read operations. */
7063         uint64_t slot_ctl_reset_force  : 1;  /**< [ 17: 17](WO) Write 1 to reset the slot-control override for all slot-control registers. After writing a
7064                                                                  1 to this bit, slot-control registers will update with changes made to other timing-
7065                                                                  control registers. This is a one-shot operation; it automatically returns to 0 after a
7066                                                                  write to 1. */
7067         uint64_t ref_int_lsbs          : 9;  /**< [ 16:  8](R/W) The refresh interval value least significant bits. The default is 0x0.
7068                                                                  Refresh interval is represented in number of 512 CK cycle increments and is controlled by
7069                                                                  LMC()_CONFIG[REF_ZQCS_INT]. More precise refresh interval however (in number of
7070                                                                  one CK cycle) can be achieved by setting this field to a nonzero value. */
7071         uint64_t drive_ena_bprch       : 1;  /**< [  7:  7](R/W) Drive DQx for one cycle longer than normal during write operations. */
7072         uint64_t drive_ena_fprch       : 1;  /**< [  6:  6](R/W) Drive DQx starting one cycle earlier than normal during write operations. */
7073         uint64_t dlcram_flip_synd      : 2;  /**< [  5:  4](R/W) Reserved.
7074                                                                  Internal:
7075                                                                  DLC RAM flip syndrome control bits. */
7076         uint64_t dlcram_cor_dis        : 1;  /**< [  3:  3](R/W) Reserved.
7077                                                                  Internal:
7078                                                                  DLC RAM correction disable control. */
7079         uint64_t dlc_nxm_rd            : 1;  /**< [  2:  2](R/W) Reserved.
7080                                                                  Internal:
7081                                                                  When set, enable NXM events for HFA read operations.
7082                                                                  Default is disabled, but
7083                                                                  could be useful for debug of DLC/DFA accesses. */
7084         uint64_t l2c_nxm_rd            : 1;  /**< [  1:  1](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
7085                                                                  loaded for L2C NXM read operations. NXM read operations may occur during normal operation
7086                                                                  (due to prefetches), so [L2C_NXM_RD] should not be set during normal operation to allow
7087                                                                  LMC()_INT[NXM_WR_ERR] to indicate NXM writes. */
7088         uint64_t l2c_nxm_wr            : 1;  /**< [  0:  0](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
7089                                                                  loaded for L2C NXM write operations. NXM writes are generally an indication of
7090                                                                  failure, so [L2C_NXM_WR] can generally be set. */
7091 #else /* Word 0 - Little Endian */
7092         uint64_t l2c_nxm_wr            : 1;  /**< [  0:  0](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
7093                                                                  loaded for L2C NXM write operations. NXM writes are generally an indication of
7094                                                                  failure, so [L2C_NXM_WR] can generally be set. */
7095         uint64_t l2c_nxm_rd            : 1;  /**< [  1:  1](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
7096                                                                  loaded for L2C NXM read operations. NXM read operations may occur during normal operation
7097                                                                  (due to prefetches), so [L2C_NXM_RD] should not be set during normal operation to allow
7098                                                                  LMC()_INT[NXM_WR_ERR] to indicate NXM writes. */
7099         uint64_t dlc_nxm_rd            : 1;  /**< [  2:  2](R/W) Reserved.
7100                                                                  Internal:
7101                                                                  When set, enable NXM events for HFA read operations.
7102                                                                  Default is disabled, but
7103                                                                  could be useful for debug of DLC/DFA accesses. */
7104         uint64_t dlcram_cor_dis        : 1;  /**< [  3:  3](R/W) Reserved.
7105                                                                  Internal:
7106                                                                  DLC RAM correction disable control. */
7107         uint64_t dlcram_flip_synd      : 2;  /**< [  5:  4](R/W) Reserved.
7108                                                                  Internal:
7109                                                                  DLC RAM flip syndrome control bits. */
7110         uint64_t drive_ena_fprch       : 1;  /**< [  6:  6](R/W) Drive DQx starting one cycle earlier than normal during write operations. */
7111         uint64_t drive_ena_bprch       : 1;  /**< [  7:  7](R/W) Drive DQx for one cycle longer than normal during write operations. */
7112         uint64_t ref_int_lsbs          : 9;  /**< [ 16:  8](R/W) The refresh interval value least significant bits. The default is 0x0.
7113                                                                  Refresh interval is represented in number of 512 CK cycle increments and is controlled by
7114                                                                  LMC()_CONFIG[REF_ZQCS_INT]. More precise refresh interval however (in number of
7115                                                                  one CK cycle) can be achieved by setting this field to a nonzero value. */
7116         uint64_t slot_ctl_reset_force  : 1;  /**< [ 17: 17](WO) Write 1 to reset the slot-control override for all slot-control registers. After writing a
7117                                                                  1 to this bit, slot-control registers will update with changes made to other timing-
7118                                                                  control registers. This is a one-shot operation; it automatically returns to 0 after a
7119                                                                  write to 1. */
7120         uint64_t read_ena_fprch        : 1;  /**< [ 18: 18](R/W) Enable pad receiver starting one cycle earlier than normal during read operations. */
7121         uint64_t read_ena_bprch        : 1;  /**< [ 19: 19](R/W) Enable pad receiver one cycle longer than normal during read operations. */
7122         uint64_t vrefint_seq_deskew    : 1;  /**< [ 20: 20](R/W) Personality bit to change the operation of what is normally the internal Vref training
7123                                                                  sequence into the deskew training sequence. */
7124         uint64_t reserved_21_23        : 3;
7125         uint64_t gen_par               : 1;  /**< [ 24: 24](R/W) Enable parity generation in the DRAM commands; must be set prior to enabling parity in
7126                                                                  register or DRAM devices. */
7127         uint64_t par_include_bg1       : 1;  /**< [ 25: 25](R/W) If set, include BG1 in parity calculations in DDR4 mode. */
7128         uint64_t par_include_a17       : 1;  /**< [ 26: 26](R/W) If set, include A17 in parity calculations in DDR4 mode. */
7129         uint64_t reserved_27           : 1;
7130         uint64_t cal_ena               : 1;  /**< [ 28: 28](R/W) Set to cause LMC to operate in CAL mode. First set LMC()_MODEREG_PARAMS3[CAL], then
7131                                                                  set [CAL_ENA]. */
7132         uint64_t cmd_rti               : 1;  /**< [ 29: 29](R/W) Set this bit to change the behavior of the LMC to return to a completely idle command (no
7133                                                                  CS active, no command pins active, and address/bank address/bank group all low) on the
7134                                                                  interface after an active command, rather than only forcing the CS inactive between
7135                                                                  commands. */
7136         uint64_t reserved_30_31        : 2;
7137         uint64_t invert_data           : 1;  /**< [ 32: 32](R/W) Set this bit to cause all data to be inverted before writing or reading to/from DRAM. This
7138                                                                  effectively uses the scramble logic to instead invert all the data, so this bit must not
7139                                                                  be set if data scrambling is enabled. May be useful if data inversion will result in lower
7140                                                                  power. */
7141         uint64_t reserved_33_35        : 3;
7142         uint64_t mrs_cmd_select        : 1;  /**< [ 36: 36](R/W) When [MRS_CMD_OVERRIDE] is set, use this bit to select which style of operation for MRS
7143                                                                  and
7144                                                                  RCW commands.
7145 
7146                                                                  When this bit is clear, select operation where signals other than CS are active before and
7147                                                                  after the DDR_CS_L active cycle.
7148 
7149                                                                  When this bit is set, select the operation where the other command signals (DDR*_RAS_L,
7150                                                                  DDR*_CAS_L, DDR*_WE_L, DDR*_A\<15:0\>, etc.) all are active only during the cycle where the
7151                                                                  DDR_CS_L is also active. */
7152         uint64_t mrs_cmd_override      : 1;  /**< [ 37: 37](R/W) Set to override the behavior of MRS and RCW operations.
7153                                                                  If this bit is set, the override behavior is governed by the control field
7154                                                                  [MRS_CMD_SELECT]. See LMC()_EXT_CONFIG[MRS_CMD_SELECT] for detail.
7155 
7156                                                                  If this bit is cleared, select operation where signals other than CS are active before
7157                                                                  and after the CS_N active cycle (except for the case when interfacing with DDR3 RDIMM). */
7158         uint64_t reserved_38_39        : 2;
7159         uint64_t par_addr_mask         : 3;  /**< [ 42: 40](R/W) Mask applied to parity for address bits \<14:12\>. Clear to exclude these address
7160                                                                  bits from the parity calculation, necessary if the DRAM device does not have these pins. */
7161         uint64_t reserved_43           : 1;
7162         uint64_t ea_int_polarity       : 1;  /**< [ 44: 44](R/W) Set to invert the DDR*_ERROR_ALERT_L interrupt polarity. When clear, interrupt is
7163                                                                  signalled on
7164                                                                  the rising edge of DDR*_ERROR_ALERT_L. When set, interrupt is signalled on the falling
7165                                                                  edge of DDR*_ERROR_ALERT_L. */
7166         uint64_t error_alert_n_sample  : 1;  /**< [ 45: 45](RO) Read to get a sample of the DDR*_ERROR_ALERT_L signal. */
7167         uint64_t reserved_46_47        : 2;
7168         uint64_t rcd_parity_check      : 1;  /**< [ 48: 48](R/W) Enables the one cycle delay of the CA parity output. This MUST be set to one
7169                                                                  when using DDR4 RDIMM AND parity checking in RCD is enabled (RC0E DA0 = 1). Set
7170                                                                  this to zero otherwise. To enable the parity checking in RCD, set this bit first
7171                                                                  BEFORE issuing the RCW write RC0E DA0 = 1. */
7172         uint64_t dimm0_cid             : 2;  /**< [ 50: 49](R/W) Reserved.
7173                                                                  Internal:
7174                                                                  DIMM0 configuration bits that represent the number of the chip
7175                                                                  ID of the DRAM. This value is use for decoding the address
7176                                                                  as well as routing Chip IDs to the appropriate output
7177                                                                  pins.
7178                                                                  0x0 = 0 Chip ID  (Mono-Die stack).
7179                                                                  0x1 = 1 Chip ID  (2H 3DS).
7180                                                                  0x2 = 2 Chip IDs (4H 3DS).
7181                                                                  0x3 = 3 Chip IDs (8H 3DS). */
7182         uint64_t dimm1_cid             : 2;  /**< [ 52: 51](R/W) Reserved.
7183                                                                  Internal:
7184                                                                  DIMM1 configuration bits that represent the number of the chip
7185                                                                  ID of the DRAM. This value is use for decoding the address
7186                                                                  as well as routing Chip IDs to the appropriate output
7187                                                                  pins.
7188                                                                  0x0 = 0 Chip ID  (Mono-Die stack).
7189                                                                  0x1 = 1 Chip ID  (2H 3DS).
7190                                                                  0x2 = 2 Chip IDs (4H 3DS).
7191                                                                  0x3 = 3 Chip IDs (8H 3DS). */
7192         uint64_t coalesce_address_mode : 1;  /**< [ 53: 53](R/W) When set to one, LMC coalesces the L2C+LMC internal address mapping
7193                                                                  to create a uniform memory space that is free from holes in
7194                                                                  between ranks. When different size DIMMs are used, the DIMM with
7195                                                                  the higher capacity is mapped to the lower address space. */
7196         uint64_t dimm_sel_force_invert : 1;  /**< [ 54: 54](R/W) Reserved.
7197                                                                  Internal:
7198                                                                  When set to 1, this bit forces the pbank bit to be inverted
7199                                                                  when in coalesce_address_mode. That is, pbank value of 0 selects
7200                                                                  DIMM1 instead of DIMM0.
7201                                                                  Intended to be used for the case of DIMM1 having bigger rank/s
7202                                                                  than DIMM0. This bit has priority over [DIMM_SEL_INVERT_OFF]. */
7203         uint64_t dimm_sel_invert_off   : 1;  /**< [ 55: 55](R/W) During coalesce_address_mode, the default logic would be to invert
7204                                                                  the pbank bit whenever LMC()_NXM[MEM_MSB_D1_R0] \> LMC()_NXM[MEM_MSB_D0_R0].
7205                                                                  When this bit is set to one, it disables this default behavior.
7206                                                                  This configuration has lower priority compared to
7207                                                                  [DIMM_SEL_FORCE_INVERT]. */
7208         uint64_t mrs_bside_invert_disable : 1;/**< [ 56: 56](R/W) When set, the command decoder cancels the auto inversion of
7209                                                                  A3-A9, A11, A13, A17, BA0, BA1 and BG0 during MRS/MRS_PDA
7210                                                                  command to the B side of the RDIMM.
7211                                                                  When set, make sure that the RCD's control word
7212                                                                  RC00 DA[0] = 1 so that the output inversion is disabled in
7213                                                                  the DDR4 RCD. */
7214         uint64_t mrs_one_side          : 1;  /**< [ 57: 57](R/W) Only applies to DDR4 RDIMM.
7215                                                                  When set, MRS commands are directed to either the A or B
7216                                                                  side of the RCD.
7217 
7218                                                                  PDA operation is NOT allowed when this bit is set. In
7219                                                                  other words, LMC()_MR_MPR_CTL[MR_WR_PDA_ENABLE]
7220                                                                  must be cleared before running MRW sequence with this
7221                                                                  bit turned on. */
7222         uint64_t mrs_side              : 1;  /**< [ 58: 58](R/W) Specifies the RDIMM side. Only applies when [MRS_ONE_SIDE] is set.
7223                                                                  0 = MRS command is sent to the A side of an RDIMM.
7224                                                                  1 = MRS command is sent to the B side of an RDIMM. */
7225         uint64_t ref_block             : 1;  /**< [ 59: 59](R/W) When set, LMC is blocked to initiate any refresh sequence. LMC then only
7226                                                                  allows refresh sequence to start when LMC()_REF_STATUS[REF_COUNT] has
7227                                                                  reached the maximum value of 0x7. */
7228         uint64_t bc4_dqs_ena           : 1;  /**< [ 60: 60](R/W) Reserved.
7229                                                                  Internal:
7230                                                                  For diagnostic use only.
7231                                                                    0 = LMC produces the full bursts of DQS transitions,
7232                                                                    even for BC4 Write ops.
7233                                                                    1 = LMC produces only three cycles of DQS transitions
7234                                                                    every time it sends out a BC4 Write operation. */
7235         uint64_t reserved_61_63        : 3;
7236 #endif /* Word 0 - End */
7237     } cn81xx;
7238     struct bdk_lmcx_ext_config_cn88xx
7239     {
7240 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
7241         uint64_t reserved_61_63        : 3;
7242         uint64_t bc4_dqs_ena           : 1;  /**< [ 60: 60](RO) Reserved. */
7243         uint64_t ref_block             : 1;  /**< [ 59: 59](R/W) When set, LMC is blocked to initiate any refresh sequence. LMC then only
7244                                                                  allows refresh sequence to start when LMC()_REF_STATUS[REF_COUNT] has
7245                                                                  reached the maximum value of 0x7. */
7246         uint64_t mrs_side              : 1;  /**< [ 58: 58](R/W) Specifies the RDIMM side. Only applies when [MRS_ONE_SIDE] is set.
7247                                                                  0 = MRS command is sent to the A side of an RDIMM.
7248                                                                  1 = MRS command is sent to the B side of an RDIMM. */
7249         uint64_t mrs_one_side          : 1;  /**< [ 57: 57](R/W) Only applies to DDR4 RDIMM.
7250                                                                  When set, MRS commands are directed to either the A or B
7251                                                                  side of the RCD.
7252 
7253                                                                  PDA operation is NOT allowed when this bit is set. In
7254                                                                  other words, LMC()_MR_MPR_CTL[MR_WR_PDA_ENABLE]
7255                                                                  must be cleared before running MRW sequence with this
7256                                                                  bit turned on. */
7257         uint64_t mrs_bside_invert_disable : 1;/**< [ 56: 56](R/W) When set, the command decoder cancels the auto inversion of
7258                                                                  A3-A9, A11, A13, A17, BA0, BA1 and BG0 during MRS/MRS_PDA
7259                                                                  command to the B side of the RDIMM.
7260                                                                  When set, make sure that the RCD's control word
7261                                                                  RC00 DA[0] = 1 so that the output inversion is disabled in
7262                                                                  the DDR4 RCD. */
7263         uint64_t dimm_sel_invert_off   : 1;  /**< [ 55: 55](R/W) During coalesce_address_mode, the default logic would be to invert
7264                                                                  the pbank bit whenever LMC()_NXM[MEM_MSB_D1_R0] \> LMC()_NXM[MEM_MSB_D0_R0].
7265                                                                  When this bit is set to one, it disables this default behavior.
7266                                                                  This configuration has lower priority compared to
7267                                                                  [DIMM_SEL_FORCE_INVERT]. */
7268         uint64_t dimm_sel_force_invert : 1;  /**< [ 54: 54](R/W) When set to one, this bit forces the pbank bit to be inverted
7269                                                                  when in coalesce_address_mode. That is, pbank value of zero selects
7270                                                                  DIMM1 instead of DIMM0.
7271                                                                  Intended to be used for the case of DIMM1 having bigger rank/s
7272                                                                  than DIMM0. This bit has priority over [DIMM_SEL_INVERT_OFF]. */
7273         uint64_t coalesce_address_mode : 1;  /**< [ 53: 53](R/W) When set to one, LMC coalesces the L2C+LMC internal address mapping
7274                                                                  to create a uniform memory space that is free from holes in
7275                                                                  between ranks. When different size DIMMs are used, the DIMM with
7276                                                                  the higher capacity is mapped to the lower address space. */
7277         uint64_t dimm1_cid             : 2;  /**< [ 52: 51](R/W) Reserved.
7278                                                                  Internal:
7279                                                                  DIMM1 configuration bits that represent the number of the chip
7280                                                                  ID of the DRAM. This value is use for decoding the address
7281                                                                  as well as routing Chip IDs to the appropriate output
7282                                                                  pins.
7283                                                                  0x0 = 0 Chip ID  (Mono-Die stack).
7284                                                                  0x1 = 1 Chip ID  (2H 3DS).
7285                                                                  0x2 = 2 Chip IDs (4H 3DS).
7286                                                                  0x3 = 3 Chip IDs (8H 3DS). */
7287         uint64_t dimm0_cid             : 2;  /**< [ 50: 49](R/W) Reserved.
7288                                                                  Internal:
7289                                                                  DIMM0 configuration bits that represent the number of the chip
7290                                                                  ID of the DRAM. This value is use for decoding the address
7291                                                                  as well as routing Chip IDs to the appropriate output
7292                                                                  pins.
7293                                                                  0x0 = 0 Chip ID  (Mono-Die stack).
7294                                                                  0x1 = 1 Chip ID  (2H 3DS).
7295                                                                  0x2 = 2 Chip IDs (4H 3DS).
7296                                                                  0x3 = 3 Chip IDs (8H 3DS). */
7297         uint64_t rcd_parity_check      : 1;  /**< [ 48: 48](R/W) Enables the one cycle delay of the CA parity output. This MUST be set to one
7298                                                                  when using DDR4 RDIMM AND parity checking in RCD is enabled (RC0E DA0 = 1). Set
7299                                                                  this to zero otherwise. To enable the parity checking in RCD, set this bit first
7300                                                                  BEFORE issuing the RCW write RC0E DA0 = 1. */
7301         uint64_t reserved_46_47        : 2;
7302         uint64_t error_alert_n_sample  : 1;  /**< [ 45: 45](RO) Read to get a sample of the DDR*_ERROR_ALERT_L signal. */
7303         uint64_t ea_int_polarity       : 1;  /**< [ 44: 44](R/W) Set to invert the DDR*_ERROR_ALERT_L interrupt polarity. When clear, interrupt is
7304                                                                  signalled on
7305                                                                  the rising edge of DDR*_ERROR_ALERT_L. When set, interrupt is signalled on the falling
7306                                                                  edge of DDR*_ERROR_ALERT_L. */
7307         uint64_t reserved_43           : 1;
7308         uint64_t par_addr_mask         : 3;  /**< [ 42: 40](R/W) Mask applied to parity for address bits \<14:12\>. Clear to exclude these address
7309                                                                  bits from the parity calculation, necessary if the DRAM device does not have these pins. */
7310         uint64_t reserved_38_39        : 2;
7311         uint64_t mrs_cmd_override      : 1;  /**< [ 37: 37](R/W) Set to override the behavior of MRS and RCW operations.
7312                                                                  If this bit is set, the override behavior is governed by the control field
7313                                                                  [MRS_CMD_SELECT]. See LMC()_EXT_CONFIG[MRS_CMD_SELECT] for detail.
7314 
7315                                                                  If this bit is cleared, select operation where signals other than CS are active before
7316                                                                  and after the CS_N active cycle (except for the case when interfacing with DDR3 RDIMM). */
7317         uint64_t mrs_cmd_select        : 1;  /**< [ 36: 36](R/W) When [MRS_CMD_OVERRIDE] is set, use this bit to select which style of operation for MRS
7318                                                                  and
7319                                                                  RCW commands.
7320 
7321                                                                  When this bit is clear, select operation where signals other than CS are active before and
7322                                                                  after the DDR_CS_L active cycle.
7323 
7324                                                                  When this bit is set, select the operation where the other command signals (DDR*_RAS_L,
7325                                                                  DDR*_CAS_L, DDR*_WE_L, DDR*_A\<15:0\>, etc.) all are active only during the cycle where the
7326                                                                  DDR_CS_L is also active. */
7327         uint64_t reserved_33_35        : 3;
7328         uint64_t invert_data           : 1;  /**< [ 32: 32](R/W) Set this bit to cause all data to be inverted before writing or reading to/from DRAM. This
7329                                                                  effectively uses the scramble logic to instead invert all the data, so this bit must not
7330                                                                  be set if data scrambling is enabled. May be useful if data inversion will result in lower
7331                                                                  power. */
7332         uint64_t reserved_30_31        : 2;
7333         uint64_t cmd_rti               : 1;  /**< [ 29: 29](R/W) Set this bit to change the behavior of the LMC to return to a completely idle command (no
7334                                                                  CS active, no command pins active, and address/bank address/bank group all low) on the
7335                                                                  interface after an active command, rather than only forcing the CS inactive between
7336                                                                  commands. */
7337         uint64_t cal_ena               : 1;  /**< [ 28: 28](R/W) Set to cause LMC to operate in CAL mode. First set LMC()_MODEREG_PARAMS3[CAL], then
7338                                                                  set [CAL_ENA]. */
7339         uint64_t reserved_27           : 1;
7340         uint64_t par_include_a17       : 1;  /**< [ 26: 26](R/W) If set, include A17 in parity calculations in DDR4 mode. */
7341         uint64_t par_include_bg1       : 1;  /**< [ 25: 25](R/W) If set, include BG1 in parity calculations in DDR4 mode. */
7342         uint64_t gen_par               : 1;  /**< [ 24: 24](R/W) Enable parity generation in the DRAM commands; must be set prior to enabling parity in
7343                                                                  register or DRAM devices. */
7344         uint64_t reserved_21_23        : 3;
7345         uint64_t vrefint_seq_deskew    : 1;  /**< [ 20: 20](R/W) Personality bit to change the operation of what is normally the internal Vref training
7346                                                                  sequence into the deskew training sequence. */
7347         uint64_t read_ena_bprch        : 1;  /**< [ 19: 19](R/W) Enable pad receiver one cycle longer than normal during read operations. */
7348         uint64_t read_ena_fprch        : 1;  /**< [ 18: 18](R/W) Enable pad receiver starting one cycle earlier than normal during read operations. */
7349         uint64_t slot_ctl_reset_force  : 1;  /**< [ 17: 17](WO) Write 1 to reset the slot-control override for all slot-control registers. After writing a
7350                                                                  1 to this bit, slot-control registers will update with changes made to other timing-
7351                                                                  control registers. This is a one-shot operation; it automatically returns to 0 after a
7352                                                                  write to 1. */
7353         uint64_t ref_int_lsbs          : 9;  /**< [ 16:  8](R/W) The refresh interval value least significant bits. The default is 0x0.
7354                                                                  Refresh interval is represented in number of 512 CK cycle increments and is controlled by
7355                                                                  LMC()_CONFIG[REF_ZQCS_INT]. More precise refresh interval however (in number of
7356                                                                  one CK cycle) can be achieved by setting this field to a nonzero value. */
7357         uint64_t drive_ena_bprch       : 1;  /**< [  7:  7](R/W) Drive DQx for one cycle longer than normal during write operations. */
7358         uint64_t drive_ena_fprch       : 1;  /**< [  6:  6](R/W) Drive DQx starting one cycle earlier than normal during write operations. */
7359         uint64_t dlcram_flip_synd      : 2;  /**< [  5:  4](R/W) Reserved.
7360                                                                  Internal:
7361                                                                  DLC RAM flip syndrome control bits. */
7362         uint64_t dlcram_cor_dis        : 1;  /**< [  3:  3](R/W) Reserved.
7363                                                                  Internal:
7364                                                                  DLC RAM correction disable control. */
7365         uint64_t dlc_nxm_rd            : 1;  /**< [  2:  2](R/W) When set, enable NXM events for HFA read operations.
7366                                                                  Internal:
7367                                                                  Default is disabled, but
7368                                                                  could be useful for debug of DLC/DFA accesses. */
7369         uint64_t l2c_nxm_rd            : 1;  /**< [  1:  1](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
7370                                                                  loaded for L2C NXM read operations. NXM read operations may occur during normal operation
7371                                                                  (due to prefetches), so [L2C_NXM_RD] should not be set during normal operation to allow
7372                                                                  LMC()_INT[NXM_WR_ERR] to indicate NXM writes. */
7373         uint64_t l2c_nxm_wr            : 1;  /**< [  0:  0](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
7374                                                                  loaded for L2C NXM write operations. NXM writes are generally an indication of
7375                                                                  failure, so [L2C_NXM_WR] can generally be set. */
7376 #else /* Word 0 - Little Endian */
7377         uint64_t l2c_nxm_wr            : 1;  /**< [  0:  0](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
7378                                                                  loaded for L2C NXM write operations. NXM writes are generally an indication of
7379                                                                  failure, so [L2C_NXM_WR] can generally be set. */
7380         uint64_t l2c_nxm_rd            : 1;  /**< [  1:  1](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
7381                                                                  loaded for L2C NXM read operations. NXM read operations may occur during normal operation
7382                                                                  (due to prefetches), so [L2C_NXM_RD] should not be set during normal operation to allow
7383                                                                  LMC()_INT[NXM_WR_ERR] to indicate NXM writes. */
7384         uint64_t dlc_nxm_rd            : 1;  /**< [  2:  2](R/W) When set, enable NXM events for HFA read operations.
7385                                                                  Internal:
7386                                                                  Default is disabled, but
7387                                                                  could be useful for debug of DLC/DFA accesses. */
7388         uint64_t dlcram_cor_dis        : 1;  /**< [  3:  3](R/W) Reserved.
7389                                                                  Internal:
7390                                                                  DLC RAM correction disable control. */
7391         uint64_t dlcram_flip_synd      : 2;  /**< [  5:  4](R/W) Reserved.
7392                                                                  Internal:
7393                                                                  DLC RAM flip syndrome control bits. */
7394         uint64_t drive_ena_fprch       : 1;  /**< [  6:  6](R/W) Drive DQx starting one cycle earlier than normal during write operations. */
7395         uint64_t drive_ena_bprch       : 1;  /**< [  7:  7](R/W) Drive DQx for one cycle longer than normal during write operations. */
7396         uint64_t ref_int_lsbs          : 9;  /**< [ 16:  8](R/W) The refresh interval value least significant bits. The default is 0x0.
7397                                                                  Refresh interval is represented in number of 512 CK cycle increments and is controlled by
7398                                                                  LMC()_CONFIG[REF_ZQCS_INT]. More precise refresh interval however (in number of
7399                                                                  one CK cycle) can be achieved by setting this field to a nonzero value. */
7400         uint64_t slot_ctl_reset_force  : 1;  /**< [ 17: 17](WO) Write 1 to reset the slot-control override for all slot-control registers. After writing a
7401                                                                  1 to this bit, slot-control registers will update with changes made to other timing-
7402                                                                  control registers. This is a one-shot operation; it automatically returns to 0 after a
7403                                                                  write to 1. */
7404         uint64_t read_ena_fprch        : 1;  /**< [ 18: 18](R/W) Enable pad receiver starting one cycle earlier than normal during read operations. */
7405         uint64_t read_ena_bprch        : 1;  /**< [ 19: 19](R/W) Enable pad receiver one cycle longer than normal during read operations. */
7406         uint64_t vrefint_seq_deskew    : 1;  /**< [ 20: 20](R/W) Personality bit to change the operation of what is normally the internal Vref training
7407                                                                  sequence into the deskew training sequence. */
7408         uint64_t reserved_21_23        : 3;
7409         uint64_t gen_par               : 1;  /**< [ 24: 24](R/W) Enable parity generation in the DRAM commands; must be set prior to enabling parity in
7410                                                                  register or DRAM devices. */
7411         uint64_t par_include_bg1       : 1;  /**< [ 25: 25](R/W) If set, include BG1 in parity calculations in DDR4 mode. */
7412         uint64_t par_include_a17       : 1;  /**< [ 26: 26](R/W) If set, include A17 in parity calculations in DDR4 mode. */
7413         uint64_t reserved_27           : 1;
7414         uint64_t cal_ena               : 1;  /**< [ 28: 28](R/W) Set to cause LMC to operate in CAL mode. First set LMC()_MODEREG_PARAMS3[CAL], then
7415                                                                  set [CAL_ENA]. */
7416         uint64_t cmd_rti               : 1;  /**< [ 29: 29](R/W) Set this bit to change the behavior of the LMC to return to a completely idle command (no
7417                                                                  CS active, no command pins active, and address/bank address/bank group all low) on the
7418                                                                  interface after an active command, rather than only forcing the CS inactive between
7419                                                                  commands. */
7420         uint64_t reserved_30_31        : 2;
7421         uint64_t invert_data           : 1;  /**< [ 32: 32](R/W) Set this bit to cause all data to be inverted before writing or reading to/from DRAM. This
7422                                                                  effectively uses the scramble logic to instead invert all the data, so this bit must not
7423                                                                  be set if data scrambling is enabled. May be useful if data inversion will result in lower
7424                                                                  power. */
7425         uint64_t reserved_33_35        : 3;
7426         uint64_t mrs_cmd_select        : 1;  /**< [ 36: 36](R/W) When [MRS_CMD_OVERRIDE] is set, use this bit to select which style of operation for MRS
7427                                                                  and
7428                                                                  RCW commands.
7429 
7430                                                                  When this bit is clear, select operation where signals other than CS are active before and
7431                                                                  after the DDR_CS_L active cycle.
7432 
7433                                                                  When this bit is set, select the operation where the other command signals (DDR*_RAS_L,
7434                                                                  DDR*_CAS_L, DDR*_WE_L, DDR*_A\<15:0\>, etc.) all are active only during the cycle where the
7435                                                                  DDR_CS_L is also active. */
7436         uint64_t mrs_cmd_override      : 1;  /**< [ 37: 37](R/W) Set to override the behavior of MRS and RCW operations.
7437                                                                  If this bit is set, the override behavior is governed by the control field
7438                                                                  [MRS_CMD_SELECT]. See LMC()_EXT_CONFIG[MRS_CMD_SELECT] for detail.
7439 
7440                                                                  If this bit is cleared, select operation where signals other than CS are active before
7441                                                                  and after the CS_N active cycle (except for the case when interfacing with DDR3 RDIMM). */
7442         uint64_t reserved_38_39        : 2;
7443         uint64_t par_addr_mask         : 3;  /**< [ 42: 40](R/W) Mask applied to parity for address bits \<14:12\>. Clear to exclude these address
7444                                                                  bits from the parity calculation, necessary if the DRAM device does not have these pins. */
7445         uint64_t reserved_43           : 1;
7446         uint64_t ea_int_polarity       : 1;  /**< [ 44: 44](R/W) Set to invert the DDR*_ERROR_ALERT_L interrupt polarity. When clear, interrupt is
7447                                                                  signalled on
7448                                                                  the rising edge of DDR*_ERROR_ALERT_L. When set, interrupt is signalled on the falling
7449                                                                  edge of DDR*_ERROR_ALERT_L. */
7450         uint64_t error_alert_n_sample  : 1;  /**< [ 45: 45](RO) Read to get a sample of the DDR*_ERROR_ALERT_L signal. */
7451         uint64_t reserved_46_47        : 2;
7452         uint64_t rcd_parity_check      : 1;  /**< [ 48: 48](R/W) Enables the one cycle delay of the CA parity output. This MUST be set to one
7453                                                                  when using DDR4 RDIMM AND parity checking in RCD is enabled (RC0E DA0 = 1). Set
7454                                                                  this to zero otherwise. To enable the parity checking in RCD, set this bit first
7455                                                                  BEFORE issuing the RCW write RC0E DA0 = 1. */
7456         uint64_t dimm0_cid             : 2;  /**< [ 50: 49](R/W) Reserved.
7457                                                                  Internal:
7458                                                                  DIMM0 configuration bits that represent the number of the chip
7459                                                                  ID of the DRAM. This value is use for decoding the address
7460                                                                  as well as routing Chip IDs to the appropriate output
7461                                                                  pins.
7462                                                                  0x0 = 0 Chip ID  (Mono-Die stack).
7463                                                                  0x1 = 1 Chip ID  (2H 3DS).
7464                                                                  0x2 = 2 Chip IDs (4H 3DS).
7465                                                                  0x3 = 3 Chip IDs (8H 3DS). */
7466         uint64_t dimm1_cid             : 2;  /**< [ 52: 51](R/W) Reserved.
7467                                                                  Internal:
7468                                                                  DIMM1 configuration bits that represent the number of the chip
7469                                                                  ID of the DRAM. This value is use for decoding the address
7470                                                                  as well as routing Chip IDs to the appropriate output
7471                                                                  pins.
7472                                                                  0x0 = 0 Chip ID  (Mono-Die stack).
7473                                                                  0x1 = 1 Chip ID  (2H 3DS).
7474                                                                  0x2 = 2 Chip IDs (4H 3DS).
7475                                                                  0x3 = 3 Chip IDs (8H 3DS). */
7476         uint64_t coalesce_address_mode : 1;  /**< [ 53: 53](R/W) When set to one, LMC coalesces the L2C+LMC internal address mapping
7477                                                                  to create a uniform memory space that is free from holes in
7478                                                                  between ranks. When different size DIMMs are used, the DIMM with
7479                                                                  the higher capacity is mapped to the lower address space. */
7480         uint64_t dimm_sel_force_invert : 1;  /**< [ 54: 54](R/W) When set to one, this bit forces the pbank bit to be inverted
7481                                                                  when in coalesce_address_mode. That is, pbank value of zero selects
7482                                                                  DIMM1 instead of DIMM0.
7483                                                                  Intended to be used for the case of DIMM1 having bigger rank/s
7484                                                                  than DIMM0. This bit has priority over [DIMM_SEL_INVERT_OFF]. */
7485         uint64_t dimm_sel_invert_off   : 1;  /**< [ 55: 55](R/W) During coalesce_address_mode, the default logic would be to invert
7486                                                                  the pbank bit whenever LMC()_NXM[MEM_MSB_D1_R0] \> LMC()_NXM[MEM_MSB_D0_R0].
7487                                                                  When this bit is set to one, it disables this default behavior.
7488                                                                  This configuration has lower priority compared to
7489                                                                  [DIMM_SEL_FORCE_INVERT]. */
7490         uint64_t mrs_bside_invert_disable : 1;/**< [ 56: 56](R/W) When set, the command decoder cancels the auto inversion of
7491                                                                  A3-A9, A11, A13, A17, BA0, BA1 and BG0 during MRS/MRS_PDA
7492                                                                  command to the B side of the RDIMM.
7493                                                                  When set, make sure that the RCD's control word
7494                                                                  RC00 DA[0] = 1 so that the output inversion is disabled in
7495                                                                  the DDR4 RCD. */
7496         uint64_t mrs_one_side          : 1;  /**< [ 57: 57](R/W) Only applies to DDR4 RDIMM.
7497                                                                  When set, MRS commands are directed to either the A or B
7498                                                                  side of the RCD.
7499 
7500                                                                  PDA operation is NOT allowed when this bit is set. In
7501                                                                  other words, LMC()_MR_MPR_CTL[MR_WR_PDA_ENABLE]
7502                                                                  must be cleared before running MRW sequence with this
7503                                                                  bit turned on. */
7504         uint64_t mrs_side              : 1;  /**< [ 58: 58](R/W) Specifies the RDIMM side. Only applies when [MRS_ONE_SIDE] is set.
7505                                                                  0 = MRS command is sent to the A side of an RDIMM.
7506                                                                  1 = MRS command is sent to the B side of an RDIMM. */
7507         uint64_t ref_block             : 1;  /**< [ 59: 59](R/W) When set, LMC is blocked to initiate any refresh sequence. LMC then only
7508                                                                  allows refresh sequence to start when LMC()_REF_STATUS[REF_COUNT] has
7509                                                                  reached the maximum value of 0x7. */
7510         uint64_t bc4_dqs_ena           : 1;  /**< [ 60: 60](RO) Reserved. */
7511         uint64_t reserved_61_63        : 3;
7512 #endif /* Word 0 - End */
7513     } cn88xx;
7514     struct bdk_lmcx_ext_config_cn83xx
7515     {
7516 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
7517         uint64_t reserved_61_63        : 3;
7518         uint64_t bc4_dqs_ena           : 1;  /**< [ 60: 60](R/W) Reserved.
7519                                                                  Internal:
7520                                                                  For diagnostic use only.
7521                                                                    0 = LMC produces the full bursts of DQS transitions,
7522                                                                    even for BC4 Write ops.
7523                                                                    1 = LMC produces only three cycles of DQS transitions
7524                                                                    every time it sends out a BC4 Write operation. */
7525         uint64_t ref_block             : 1;  /**< [ 59: 59](R/W) When set, LMC is blocked to initiate any refresh sequence. LMC then only
7526                                                                  allows refresh sequence to start when LMC()_REF_STATUS[REF_COUNT] has
7527                                                                  reached the maximum value of 0x7. */
7528         uint64_t mrs_side              : 1;  /**< [ 58: 58](R/W) Specifies the RDIMM side. Only applies when [MRS_ONE_SIDE] is set.
7529                                                                  0 = MRS command is sent to the A side of an RDIMM.
7530                                                                  1 = MRS command is sent to the B side of an RDIMM. */
7531         uint64_t mrs_one_side          : 1;  /**< [ 57: 57](R/W) Only applies to DDR4 RDIMM.
7532                                                                  When set, MRS commands are directed to either the A or B
7533                                                                  side of the RCD.
7534 
7535                                                                  PDA operation is NOT allowed when this bit is set. In
7536                                                                  other words, LMC()_MR_MPR_CTL[MR_WR_PDA_ENABLE]
7537                                                                  must be cleared before running MRW sequence with this
7538                                                                  bit turned on. */
7539         uint64_t mrs_bside_invert_disable : 1;/**< [ 56: 56](R/W) When set, the command decoder cancels the auto inversion of
7540                                                                  A3-A9, A11, A13, A17, BA0, BA1 and BG0 during MRS/MRS_PDA
7541                                                                  command to the B side of the RDIMM.
7542                                                                  When set, make sure that the RCD's control word
7543                                                                  RC00 DA[0] = 1 so that the output inversion is disabled in
7544                                                                  the DDR4 RCD. */
7545         uint64_t dimm_sel_invert_off   : 1;  /**< [ 55: 55](R/W) During coalesce_address_mode, the default logic would be to invert
7546                                                                  the pbank bit whenever [MEM_MSB_D1_R0] \> [MEM_MSB_D0_R0].
7547                                                                  When this bit is set to 1, it disables this default behaviour.
7548                                                                  This configuration has lower priority compared to
7549                                                                  [DIMM_SEL_FORCE_INVERT]. */
7550         uint64_t dimm_sel_force_invert : 1;  /**< [ 54: 54](R/W) When set to one, this bit forces the pbank bit to be inverted
7551                                                                  when in coalesce_address_mode. That is, pbank value of zero selects
7552                                                                  DIMM1 instead of DIMM0.
7553                                                                  Intended to be used for the case of DIMM1 having bigger rank/s
7554                                                                  than DIMM0. This bit has priority over [DIMM_SEL_INVERT_OFF]. */
7555         uint64_t coalesce_address_mode : 1;  /**< [ 53: 53](R/W) When set to one, LMC coalesces the L2C+LMC internal address mapping
7556                                                                  to create a uniform memory space that is free from holes in
7557                                                                  between ranks. When different size DIMMs are used, the DIMM with
7558                                                                  the higher capacity is mapped to the lower address space. */
7559         uint64_t dimm1_cid             : 2;  /**< [ 52: 51](R/W) Reserved.
7560                                                                  Internal:
7561                                                                  DIMM1 configuration bits that represent the number of the chip
7562                                                                  ID of the DRAM. This value is use for decoding the address
7563                                                                  as well as routing Chip IDs to the appropriate output
7564                                                                  pins.
7565                                                                  0x0 = 0 Chip ID  (Mono-Die stack).
7566                                                                  0x1 = 1 Chip ID  (2H 3DS).
7567                                                                  0x2 = 2 Chip IDs (4H 3DS).
7568                                                                  0x3 = 3 Chip IDs (8H 3DS). */
7569         uint64_t dimm0_cid             : 2;  /**< [ 50: 49](R/W) Reserved.
7570                                                                  Internal:
7571                                                                  DIMM0 configuration bits that represent the number of the chip
7572                                                                  ID of the DRAM. This value is use for decoding the address
7573                                                                  as well as routing Chip IDs to the appropriate output
7574                                                                  pins.
7575                                                                  0x0 = 0 Chip ID  (Mono-Die stack).
7576                                                                  0x1 = 1 Chip ID  (2H 3DS).
7577                                                                  0x2 = 2 Chip IDs (4H 3DS).
7578                                                                  0x3 = 3 Chip IDs (8H 3DS). */
7579         uint64_t rcd_parity_check      : 1;  /**< [ 48: 48](R/W) Enables the one cycle delay of the CA parity output. This MUST be set to one
7580                                                                  when using DDR4 RDIMM AND parity checking in RCD is enabled (RC0E DA0 = 1). Set
7581                                                                  this to zero otherwise. To enable the parity checking in RCD, set this bit first
7582                                                                  BEFORE issuing the RCW write RC0E DA0 = 1. */
7583         uint64_t reserved_46_47        : 2;
7584         uint64_t error_alert_n_sample  : 1;  /**< [ 45: 45](RO) Read to get a sample of the DDR*_ERROR_ALERT_L signal. */
7585         uint64_t ea_int_polarity       : 1;  /**< [ 44: 44](R/W) Set to invert the DDR*_ERROR_ALERT_L interrupt polarity. When clear, interrupt is
7586                                                                  signalled on
7587                                                                  the rising edge of DDR*_ERROR_ALERT_L. When set, interrupt is signalled on the falling
7588                                                                  edge of DDR*_ERROR_ALERT_L. */
7589         uint64_t reserved_43           : 1;
7590         uint64_t par_addr_mask         : 3;  /**< [ 42: 40](R/W) Mask applied to parity for address bits \<14:12\>. Clear to exclude these address
7591                                                                  bits from the parity calculation, necessary if the DRAM device does not have these pins. */
7592         uint64_t reserved_38_39        : 2;
7593         uint64_t mrs_cmd_override      : 1;  /**< [ 37: 37](R/W) Set to override the behavior of MRS and RCW operations.
7594                                                                  If this bit is set, the override behavior is governed by the control field
7595                                                                  [MRS_CMD_SELECT]. See LMC()_EXT_CONFIG[MRS_CMD_SELECT] for detail.
7596 
7597                                                                  If this bit is cleared, select operation where signals other than CS are active before
7598                                                                  and after the CS_N active cycle (except for the case when interfacing with DDR3 RDIMM). */
7599         uint64_t mrs_cmd_select        : 1;  /**< [ 36: 36](R/W) When [MRS_CMD_OVERRIDE] is set, use this bit to select which style of operation for MRS
7600                                                                  and
7601                                                                  RCW commands.
7602 
7603                                                                  When this bit is clear, select operation where signals other than CS are active before and
7604                                                                  after the DDR_CS_L active cycle.
7605 
7606                                                                  When this bit is set, select the operation where the other command signals (DDR*_RAS_L,
7607                                                                  DDR*_CAS_L, DDR*_WE_L, DDR*_A\<15:0\>, etc.) all are active only during the cycle where the
7608                                                                  DDR_CS_L is also active. */
7609         uint64_t reserved_33_35        : 3;
7610         uint64_t invert_data           : 1;  /**< [ 32: 32](R/W) Set this bit to cause all data to be inverted before writing or reading to/from DRAM. This
7611                                                                  effectively uses the scramble logic to instead invert all the data, so this bit must not
7612                                                                  be set if data scrambling is enabled. May be useful if data inversion will result in lower
7613                                                                  power. */
7614         uint64_t reserved_30_31        : 2;
7615         uint64_t cmd_rti               : 1;  /**< [ 29: 29](R/W) Set this bit to change the behavior of the LMC to return to a completely idle command (no
7616                                                                  CS active, no command pins active, and address/bank address/bank group all low) on the
7617                                                                  interface after an active command, rather than only forcing the CS inactive between
7618                                                                  commands. */
7619         uint64_t cal_ena               : 1;  /**< [ 28: 28](R/W) Set to cause LMC to operate in CAL mode. First set LMC()_MODEREG_PARAMS3[CAL], then
7620                                                                  set [CAL_ENA]. */
7621         uint64_t reserved_27           : 1;
7622         uint64_t par_include_a17       : 1;  /**< [ 26: 26](R/W) If set, include A17 in parity calculations in DDR4 mode. */
7623         uint64_t par_include_bg1       : 1;  /**< [ 25: 25](R/W) If set, include BG1 in parity calculations in DDR4 mode. */
7624         uint64_t gen_par               : 1;  /**< [ 24: 24](R/W) Enable parity generation in the DRAM commands; must be set prior to enabling parity in
7625                                                                  register or DRAM devices. */
7626         uint64_t reserved_21_23        : 3;
7627         uint64_t vrefint_seq_deskew    : 1;  /**< [ 20: 20](R/W) Personality bit to change the operation of what is normally the internal Vref training
7628                                                                  sequence into the deskew training sequence. */
7629         uint64_t read_ena_bprch        : 1;  /**< [ 19: 19](R/W) Enable pad receiver one cycle longer than normal during read operations. */
7630         uint64_t read_ena_fprch        : 1;  /**< [ 18: 18](R/W) Enable pad receiver starting one cycle earlier than normal during read operations. */
7631         uint64_t slot_ctl_reset_force  : 1;  /**< [ 17: 17](WO) Write 1 to reset the slot-control override for all slot-control registers. After writing a
7632                                                                  1 to this bit, slot-control registers will update with changes made to other timing-
7633                                                                  control registers. This is a one-shot operation; it automatically returns to 0 after a
7634                                                                  write to 1. */
7635         uint64_t ref_int_lsbs          : 9;  /**< [ 16:  8](R/W) The refresh interval value least significant bits. The default is 0x0.
7636                                                                  Refresh interval is represented in number of 512 CK cycle increments and is controlled by
7637                                                                  LMC()_CONFIG[REF_ZQCS_INT]. More precise refresh interval however (in number of
7638                                                                  one CK cycle) can be achieved by setting this field to a nonzero value. */
7639         uint64_t drive_ena_bprch       : 1;  /**< [  7:  7](R/W) Drive DQx for one cycle longer than normal during write operations. */
7640         uint64_t drive_ena_fprch       : 1;  /**< [  6:  6](R/W) Drive DQx starting one cycle earlier than normal during write operations. */
7641         uint64_t dlcram_flip_synd      : 2;  /**< [  5:  4](R/W) Reserved.
7642                                                                  Internal:
7643                                                                  DLC RAM flip syndrome control bits. */
7644         uint64_t dlcram_cor_dis        : 1;  /**< [  3:  3](R/W) Reserved.
7645                                                                  Internal:
7646                                                                  DLC RAM correction disable control. */
7647         uint64_t dlc_nxm_rd            : 1;  /**< [  2:  2](R/W) Reserved.
7648                                                                  Internal:
7649                                                                  When set, enable NXM events for HFA read operations.
7650                                                                  Default is disabled, but
7651                                                                  could be useful for debug of DLC/DFA accesses. */
7652         uint64_t l2c_nxm_rd            : 1;  /**< [  1:  1](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
7653                                                                  loaded for L2C NXM read operations. NXM read operations may occur during normal operation
7654                                                                  (due to prefetches), so [L2C_NXM_RD] should not be set during normal operation to allow
7655                                                                  LMC()_INT[NXM_WR_ERR] to indicate NXM writes. */
7656         uint64_t l2c_nxm_wr            : 1;  /**< [  0:  0](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
7657                                                                  loaded for L2C NXM write operations. NXM writes are generally an indication of
7658                                                                  failure, so [L2C_NXM_WR] can generally be set. */
7659 #else /* Word 0 - Little Endian */
7660         uint64_t l2c_nxm_wr            : 1;  /**< [  0:  0](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
7661                                                                  loaded for L2C NXM write operations. NXM writes are generally an indication of
7662                                                                  failure, so [L2C_NXM_WR] can generally be set. */
7663         uint64_t l2c_nxm_rd            : 1;  /**< [  1:  1](R/W) When set, corresponding LMC()_INT[NXM_WR_ERR] will be set and LMC()_NXM_FADR will be
7664                                                                  loaded for L2C NXM read operations. NXM read operations may occur during normal operation
7665                                                                  (due to prefetches), so [L2C_NXM_RD] should not be set during normal operation to allow
7666                                                                  LMC()_INT[NXM_WR_ERR] to indicate NXM writes. */
7667         uint64_t dlc_nxm_rd            : 1;  /**< [  2:  2](R/W) Reserved.
7668                                                                  Internal:
7669                                                                  When set, enable NXM events for HFA read operations.
7670                                                                  Default is disabled, but
7671                                                                  could be useful for debug of DLC/DFA accesses. */
7672         uint64_t dlcram_cor_dis        : 1;  /**< [  3:  3](R/W) Reserved.
7673                                                                  Internal:
7674                                                                  DLC RAM correction disable control. */
7675         uint64_t dlcram_flip_synd      : 2;  /**< [  5:  4](R/W) Reserved.
7676                                                                  Internal:
7677                                                                  DLC RAM flip syndrome control bits. */
7678         uint64_t drive_ena_fprch       : 1;  /**< [  6:  6](R/W) Drive DQx starting one cycle earlier than normal during write operations. */
7679         uint64_t drive_ena_bprch       : 1;  /**< [  7:  7](R/W) Drive DQx for one cycle longer than normal during write operations. */
7680         uint64_t ref_int_lsbs          : 9;  /**< [ 16:  8](R/W) The refresh interval value least significant bits. The default is 0x0.
7681                                                                  Refresh interval is represented in number of 512 CK cycle increments and is controlled by
7682                                                                  LMC()_CONFIG[REF_ZQCS_INT]. More precise refresh interval however (in number of
7683                                                                  one CK cycle) can be achieved by setting this field to a nonzero value. */
7684         uint64_t slot_ctl_reset_force  : 1;  /**< [ 17: 17](WO) Write 1 to reset the slot-control override for all slot-control registers. After writing a
7685                                                                  1 to this bit, slot-control registers will update with changes made to other timing-
7686                                                                  control registers. This is a one-shot operation; it automatically returns to 0 after a
7687                                                                  write to 1. */
7688         uint64_t read_ena_fprch        : 1;  /**< [ 18: 18](R/W) Enable pad receiver starting one cycle earlier than normal during read operations. */
7689         uint64_t read_ena_bprch        : 1;  /**< [ 19: 19](R/W) Enable pad receiver one cycle longer than normal during read operations. */
7690         uint64_t vrefint_seq_deskew    : 1;  /**< [ 20: 20](R/W) Personality bit to change the operation of what is normally the internal Vref training
7691                                                                  sequence into the deskew training sequence. */
7692         uint64_t reserved_21_23        : 3;
7693         uint64_t gen_par               : 1;  /**< [ 24: 24](R/W) Enable parity generation in the DRAM commands; must be set prior to enabling parity in
7694                                                                  register or DRAM devices. */
7695         uint64_t par_include_bg1       : 1;  /**< [ 25: 25](R/W) If set, include BG1 in parity calculations in DDR4 mode. */
7696         uint64_t par_include_a17       : 1;  /**< [ 26: 26](R/W) If set, include A17 in parity calculations in DDR4 mode. */
7697         uint64_t reserved_27           : 1;
7698         uint64_t cal_ena               : 1;  /**< [ 28: 28](R/W) Set to cause LMC to operate in CAL mode. First set LMC()_MODEREG_PARAMS3[CAL], then
7699                                                                  set [CAL_ENA]. */
7700         uint64_t cmd_rti               : 1;  /**< [ 29: 29](R/W) Set this bit to change the behavior of the LMC to return to a completely idle command (no
7701                                                                  CS active, no command pins active, and address/bank address/bank group all low) on the
7702                                                                  interface after an active command, rather than only forcing the CS inactive between
7703                                                                  commands. */
7704         uint64_t reserved_30_31        : 2;
7705         uint64_t invert_data           : 1;  /**< [ 32: 32](R/W) Set this bit to cause all data to be inverted before writing or reading to/from DRAM. This
7706                                                                  effectively uses the scramble logic to instead invert all the data, so this bit must not
7707                                                                  be set if data scrambling is enabled. May be useful if data inversion will result in lower
7708                                                                  power. */
7709         uint64_t reserved_33_35        : 3;
7710         uint64_t mrs_cmd_select        : 1;  /**< [ 36: 36](R/W) When [MRS_CMD_OVERRIDE] is set, use this bit to select which style of operation for MRS
7711                                                                  and
7712                                                                  RCW commands.
7713 
7714                                                                  When this bit is clear, select operation where signals other than CS are active before and
7715                                                                  after the DDR_CS_L active cycle.
7716 
7717                                                                  When this bit is set, select the operation where the other command signals (DDR*_RAS_L,
7718                                                                  DDR*_CAS_L, DDR*_WE_L, DDR*_A\<15:0\>, etc.) all are active only during the cycle where the
7719                                                                  DDR_CS_L is also active. */
7720         uint64_t mrs_cmd_override      : 1;  /**< [ 37: 37](R/W) Set to override the behavior of MRS and RCW operations.
7721                                                                  If this bit is set, the override behavior is governed by the control field
7722                                                                  [MRS_CMD_SELECT]. See LMC()_EXT_CONFIG[MRS_CMD_SELECT] for detail.
7723 
7724                                                                  If this bit is cleared, select operation where signals other than CS are active before
7725                                                                  and after the CS_N active cycle (except for the case when interfacing with DDR3 RDIMM). */
7726         uint64_t reserved_38_39        : 2;
7727         uint64_t par_addr_mask         : 3;  /**< [ 42: 40](R/W) Mask applied to parity for address bits \<14:12\>. Clear to exclude these address
7728                                                                  bits from the parity calculation, necessary if the DRAM device does not have these pins. */
7729         uint64_t reserved_43           : 1;
7730         uint64_t ea_int_polarity       : 1;  /**< [ 44: 44](R/W) Set to invert the DDR*_ERROR_ALERT_L interrupt polarity. When clear, interrupt is
7731                                                                  signalled on
7732                                                                  the rising edge of DDR*_ERROR_ALERT_L. When set, interrupt is signalled on the falling
7733                                                                  edge of DDR*_ERROR_ALERT_L. */
7734         uint64_t error_alert_n_sample  : 1;  /**< [ 45: 45](RO) Read to get a sample of the DDR*_ERROR_ALERT_L signal. */
7735         uint64_t reserved_46_47        : 2;
7736         uint64_t rcd_parity_check      : 1;  /**< [ 48: 48](R/W) Enables the one cycle delay of the CA parity output. This MUST be set to one
7737                                                                  when using DDR4 RDIMM AND parity checking in RCD is enabled (RC0E DA0 = 1). Set
7738                                                                  this to zero otherwise. To enable the parity checking in RCD, set this bit first
7739                                                                  BEFORE issuing the RCW write RC0E DA0 = 1. */
7740         uint64_t dimm0_cid             : 2;  /**< [ 50: 49](R/W) Reserved.
7741                                                                  Internal:
7742                                                                  DIMM0 configuration bits that represent the number of the chip
7743                                                                  ID of the DRAM. This value is use for decoding the address
7744                                                                  as well as routing Chip IDs to the appropriate output
7745                                                                  pins.
7746                                                                  0x0 = 0 Chip ID  (Mono-Die stack).
7747                                                                  0x1 = 1 Chip ID  (2H 3DS).
7748                                                                  0x2 = 2 Chip IDs (4H 3DS).
7749                                                                  0x3 = 3 Chip IDs (8H 3DS). */
7750         uint64_t dimm1_cid             : 2;  /**< [ 52: 51](R/W) Reserved.
7751                                                                  Internal:
7752                                                                  DIMM1 configuration bits that represent the number of the chip
7753                                                                  ID of the DRAM. This value is use for decoding the address
7754                                                                  as well as routing Chip IDs to the appropriate output
7755                                                                  pins.
7756                                                                  0x0 = 0 Chip ID  (Mono-Die stack).
7757                                                                  0x1 = 1 Chip ID  (2H 3DS).
7758                                                                  0x2 = 2 Chip IDs (4H 3DS).
7759                                                                  0x3 = 3 Chip IDs (8H 3DS). */
7760         uint64_t coalesce_address_mode : 1;  /**< [ 53: 53](R/W) When set to one, LMC coalesces the L2C+LMC internal address mapping
7761                                                                  to create a uniform memory space that is free from holes in
7762                                                                  between ranks. When different size DIMMs are used, the DIMM with
7763                                                                  the higher capacity is mapped to the lower address space. */
7764         uint64_t dimm_sel_force_invert : 1;  /**< [ 54: 54](R/W) When set to one, this bit forces the pbank bit to be inverted
7765                                                                  when in coalesce_address_mode. That is, pbank value of zero selects
7766                                                                  DIMM1 instead of DIMM0.
7767                                                                  Intended to be used for the case of DIMM1 having bigger rank/s
7768                                                                  than DIMM0. This bit has priority over [DIMM_SEL_INVERT_OFF]. */
7769         uint64_t dimm_sel_invert_off   : 1;  /**< [ 55: 55](R/W) During coalesce_address_mode, the default logic would be to invert
7770                                                                  the pbank bit whenever [MEM_MSB_D1_R0] \> [MEM_MSB_D0_R0].
7771                                                                  When this bit is set to 1, it disables this default behaviour.
7772                                                                  This configuration has lower priority compared to
7773                                                                  [DIMM_SEL_FORCE_INVERT]. */
7774         uint64_t mrs_bside_invert_disable : 1;/**< [ 56: 56](R/W) When set, the command decoder cancels the auto inversion of
7775                                                                  A3-A9, A11, A13, A17, BA0, BA1 and BG0 during MRS/MRS_PDA
7776                                                                  command to the B side of the RDIMM.
7777                                                                  When set, make sure that the RCD's control word
7778                                                                  RC00 DA[0] = 1 so that the output inversion is disabled in
7779                                                                  the DDR4 RCD. */
7780         uint64_t mrs_one_side          : 1;  /**< [ 57: 57](R/W) Only applies to DDR4 RDIMM.
7781                                                                  When set, MRS commands are directed to either the A or B
7782                                                                  side of the RCD.
7783 
7784                                                                  PDA operation is NOT allowed when this bit is set. In
7785                                                                  other words, LMC()_MR_MPR_CTL[MR_WR_PDA_ENABLE]
7786                                                                  must be cleared before running MRW sequence with this
7787                                                                  bit turned on. */
7788         uint64_t mrs_side              : 1;  /**< [ 58: 58](R/W) Specifies the RDIMM side. Only applies when [MRS_ONE_SIDE] is set.
7789                                                                  0 = MRS command is sent to the A side of an RDIMM.
7790                                                                  1 = MRS command is sent to the B side of an RDIMM. */
7791         uint64_t ref_block             : 1;  /**< [ 59: 59](R/W) When set, LMC is blocked to initiate any refresh sequence. LMC then only
7792                                                                  allows refresh sequence to start when LMC()_REF_STATUS[REF_COUNT] has
7793                                                                  reached the maximum value of 0x7. */
7794         uint64_t bc4_dqs_ena           : 1;  /**< [ 60: 60](R/W) Reserved.
7795                                                                  Internal:
7796                                                                  For diagnostic use only.
7797                                                                    0 = LMC produces the full bursts of DQS transitions,
7798                                                                    even for BC4 Write ops.
7799                                                                    1 = LMC produces only three cycles of DQS transitions
7800                                                                    every time it sends out a BC4 Write operation. */
7801         uint64_t reserved_61_63        : 3;
7802 #endif /* Word 0 - End */
7803     } cn83xx;
7804 };
7805 typedef union bdk_lmcx_ext_config bdk_lmcx_ext_config_t;
7806 
7807 static inline uint64_t BDK_LMCX_EXT_CONFIG(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_EXT_CONFIG(unsigned long a)7808 static inline uint64_t BDK_LMCX_EXT_CONFIG(unsigned long a)
7809 {
7810     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
7811         return 0x87e088000030ll + 0x1000000ll * ((a) & 0x0);
7812     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
7813         return 0x87e088000030ll + 0x1000000ll * ((a) & 0x1);
7814     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
7815         return 0x87e088000030ll + 0x1000000ll * ((a) & 0x3);
7816     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
7817         return 0x87e088000030ll + 0x1000000ll * ((a) & 0x3);
7818     __bdk_csr_fatal("LMCX_EXT_CONFIG", 1, a, 0, 0, 0);
7819 }
7820 
7821 #define typedef_BDK_LMCX_EXT_CONFIG(a) bdk_lmcx_ext_config_t
7822 #define bustype_BDK_LMCX_EXT_CONFIG(a) BDK_CSR_TYPE_RSL
7823 #define basename_BDK_LMCX_EXT_CONFIG(a) "LMCX_EXT_CONFIG"
7824 #define device_bar_BDK_LMCX_EXT_CONFIG(a) 0x0 /* PF_BAR0 */
7825 #define busnum_BDK_LMCX_EXT_CONFIG(a) (a)
7826 #define arguments_BDK_LMCX_EXT_CONFIG(a) (a),-1,-1,-1
7827 
7828 /**
7829  * Register (RSL) lmc#_ext_config2
7830  *
7831  * LMC Extended Configuration Register
7832  * This register has additional configuration and control bits for the LMC.
7833  */
7834 union bdk_lmcx_ext_config2
7835 {
7836     uint64_t u;
7837     struct bdk_lmcx_ext_config2_s
7838     {
7839 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
7840         uint64_t reserved_27_63        : 37;
7841         uint64_t sref_auto_idle_thres  : 5;  /**< [ 26: 22](RO) Reserved. */
7842         uint64_t sref_auto_enable      : 1;  /**< [ 21: 21](RO) Reserved. */
7843         uint64_t delay_unload_r3       : 1;  /**< [ 20: 20](RO) Reserved. */
7844         uint64_t delay_unload_r2       : 1;  /**< [ 19: 19](RO) Reserved. */
7845         uint64_t delay_unload_r1       : 1;  /**< [ 18: 18](RO) Reserved. */
7846         uint64_t delay_unload_r0       : 1;  /**< [ 17: 17](RO) Reserved. */
7847         uint64_t early_dqx2            : 1;  /**< [ 16: 16](RO) Reserved. */
7848         uint64_t xor_bank_sel          : 4;  /**< [ 15: 12](RO) Reserved. */
7849         uint64_t reserved_10_11        : 2;
7850         uint64_t row_col_switch        : 1;  /**< [  9:  9](R/W) When set, the memory address bit position that represents bit 4 of the COLUMN
7851                                                                  address (bit 5 in 32-bit mode) becomes the low order DDR ROW address bit.
7852                                                                  The upper DDR COLUMN address portion is selected using LMC()_CONFIG[ROW_LSB]
7853                                                                  (and LMC()_DUAL_MEMCFG[ROW_LSB] for dual-memory configuration).
7854                                                                  It is recommended to set this bit to one when TRR_ON is set. */
7855         uint64_t trr_on                : 1;  /**< [  8:  8](R/W) When set, this enables row activates counts of the
7856                                                                  DRAM used in target row refresh mode. This bit can
7857                                                                  be safely set after the LMC()_EXT_CONFIG2[MACRAM_SCRUB_DONE]
7858                                                                  has a value of 1. */
7859         uint64_t reserved_0_7          : 8;
7860 #else /* Word 0 - Little Endian */
7861         uint64_t reserved_0_7          : 8;
7862         uint64_t trr_on                : 1;  /**< [  8:  8](R/W) When set, this enables row activates counts of the
7863                                                                  DRAM used in target row refresh mode. This bit can
7864                                                                  be safely set after the LMC()_EXT_CONFIG2[MACRAM_SCRUB_DONE]
7865                                                                  has a value of 1. */
7866         uint64_t row_col_switch        : 1;  /**< [  9:  9](R/W) When set, the memory address bit position that represents bit 4 of the COLUMN
7867                                                                  address (bit 5 in 32-bit mode) becomes the low order DDR ROW address bit.
7868                                                                  The upper DDR COLUMN address portion is selected using LMC()_CONFIG[ROW_LSB]
7869                                                                  (and LMC()_DUAL_MEMCFG[ROW_LSB] for dual-memory configuration).
7870                                                                  It is recommended to set this bit to one when TRR_ON is set. */
7871         uint64_t reserved_10_11        : 2;
7872         uint64_t xor_bank_sel          : 4;  /**< [ 15: 12](RO) Reserved. */
7873         uint64_t early_dqx2            : 1;  /**< [ 16: 16](RO) Reserved. */
7874         uint64_t delay_unload_r0       : 1;  /**< [ 17: 17](RO) Reserved. */
7875         uint64_t delay_unload_r1       : 1;  /**< [ 18: 18](RO) Reserved. */
7876         uint64_t delay_unload_r2       : 1;  /**< [ 19: 19](RO) Reserved. */
7877         uint64_t delay_unload_r3       : 1;  /**< [ 20: 20](RO) Reserved. */
7878         uint64_t sref_auto_enable      : 1;  /**< [ 21: 21](RO) Reserved. */
7879         uint64_t sref_auto_idle_thres  : 5;  /**< [ 26: 22](RO) Reserved. */
7880         uint64_t reserved_27_63        : 37;
7881 #endif /* Word 0 - End */
7882     } s;
7883     struct bdk_lmcx_ext_config2_cn88xxp1
7884     {
7885 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
7886         uint64_t reserved_27_63        : 37;
7887         uint64_t sref_auto_idle_thres  : 5;  /**< [ 26: 22](RO) Reserved. */
7888         uint64_t sref_auto_enable      : 1;  /**< [ 21: 21](RO) Reserved. */
7889         uint64_t delay_unload_r3       : 1;  /**< [ 20: 20](RO) Reserved. */
7890         uint64_t delay_unload_r2       : 1;  /**< [ 19: 19](RO) Reserved. */
7891         uint64_t delay_unload_r1       : 1;  /**< [ 18: 18](RO) Reserved. */
7892         uint64_t delay_unload_r0       : 1;  /**< [ 17: 17](RO) Reserved. */
7893         uint64_t early_dqx2            : 1;  /**< [ 16: 16](RO) Reserved. */
7894         uint64_t xor_bank_sel          : 4;  /**< [ 15: 12](RO) Reserved. */
7895         uint64_t reserved_10_11        : 2;
7896         uint64_t row_col_switch        : 1;  /**< [  9:  9](R/W) When set, the memory address bit position that represents bit 4 of the COLUMN
7897                                                                  address (bit 5 in 32-bit mode) becomes the low order DDR ROW address bit.
7898                                                                  The upper DDR COLUMN address portion is selected using LMC()_CONFIG[ROW_LSB]
7899                                                                  (and LMC()_DUAL_MEMCFG[ROW_LSB] for dual-memory configuration).
7900                                                                  It is recommended to set this bit to one when TRR_ON is set. */
7901         uint64_t trr_on                : 1;  /**< [  8:  8](R/W) When set, this enables row activates counts of the
7902                                                                  DRAM used in target row refresh mode. This bit can
7903                                                                  be safely set after the LMC()_EXT_CONFIG2[MACRAM_SCRUB_DONE]
7904                                                                  has a value of 1. */
7905         uint64_t mac                   : 3;  /**< [  7:  5](R/W) Sets the maximum number of activates allowed within a tMAW interval.
7906                                                                  0x0 = 100K.
7907                                                                  0x1 = 400K/2.
7908                                                                  0x2 = 500K/2.
7909                                                                  0x3 = 600K/2.
7910                                                                  0x4 = 700K/2.
7911                                                                  0x5 = 800K/2.
7912                                                                  0x6 = 900K/2.
7913                                                                  0x7 = 1000K/2. */
7914         uint64_t macram_scrub_done     : 1;  /**< [  4:  4](RO/H) Maximum activate count memory scrub complete indication;
7915                                                                  1 means the memory has been scrubbed to all zero. */
7916         uint64_t macram_scrub          : 1;  /**< [  3:  3](WO) When set, the maximum activate count memory will be scrubbed to all zero values. This
7917                                                                  should be done before enabling TRR mode by setting LMC()_EXT_CONFIG2[TRR_ON].
7918                                                                  This is a one-shot operation; it automatically returns to 0 after a write to 1. */
7919         uint64_t macram_flip_synd      : 2;  /**< [  2:  1](R/W) Reserved.
7920                                                                  Internal:
7921                                                                  MAC RAM flip syndrome control bits. */
7922         uint64_t macram_cor_dis        : 1;  /**< [  0:  0](R/W) Reserved.
7923                                                                  Internal:
7924                                                                  MAC RAM correction disable control. */
7925 #else /* Word 0 - Little Endian */
7926         uint64_t macram_cor_dis        : 1;  /**< [  0:  0](R/W) Reserved.
7927                                                                  Internal:
7928                                                                  MAC RAM correction disable control. */
7929         uint64_t macram_flip_synd      : 2;  /**< [  2:  1](R/W) Reserved.
7930                                                                  Internal:
7931                                                                  MAC RAM flip syndrome control bits. */
7932         uint64_t macram_scrub          : 1;  /**< [  3:  3](WO) When set, the maximum activate count memory will be scrubbed to all zero values. This
7933                                                                  should be done before enabling TRR mode by setting LMC()_EXT_CONFIG2[TRR_ON].
7934                                                                  This is a one-shot operation; it automatically returns to 0 after a write to 1. */
7935         uint64_t macram_scrub_done     : 1;  /**< [  4:  4](RO/H) Maximum activate count memory scrub complete indication;
7936                                                                  1 means the memory has been scrubbed to all zero. */
7937         uint64_t mac                   : 3;  /**< [  7:  5](R/W) Sets the maximum number of activates allowed within a tMAW interval.
7938                                                                  0x0 = 100K.
7939                                                                  0x1 = 400K/2.
7940                                                                  0x2 = 500K/2.
7941                                                                  0x3 = 600K/2.
7942                                                                  0x4 = 700K/2.
7943                                                                  0x5 = 800K/2.
7944                                                                  0x6 = 900K/2.
7945                                                                  0x7 = 1000K/2. */
7946         uint64_t trr_on                : 1;  /**< [  8:  8](R/W) When set, this enables row activates counts of the
7947                                                                  DRAM used in target row refresh mode. This bit can
7948                                                                  be safely set after the LMC()_EXT_CONFIG2[MACRAM_SCRUB_DONE]
7949                                                                  has a value of 1. */
7950         uint64_t row_col_switch        : 1;  /**< [  9:  9](R/W) When set, the memory address bit position that represents bit 4 of the COLUMN
7951                                                                  address (bit 5 in 32-bit mode) becomes the low order DDR ROW address bit.
7952                                                                  The upper DDR COLUMN address portion is selected using LMC()_CONFIG[ROW_LSB]
7953                                                                  (and LMC()_DUAL_MEMCFG[ROW_LSB] for dual-memory configuration).
7954                                                                  It is recommended to set this bit to one when TRR_ON is set. */
7955         uint64_t reserved_10_11        : 2;
7956         uint64_t xor_bank_sel          : 4;  /**< [ 15: 12](RO) Reserved. */
7957         uint64_t early_dqx2            : 1;  /**< [ 16: 16](RO) Reserved. */
7958         uint64_t delay_unload_r0       : 1;  /**< [ 17: 17](RO) Reserved. */
7959         uint64_t delay_unload_r1       : 1;  /**< [ 18: 18](RO) Reserved. */
7960         uint64_t delay_unload_r2       : 1;  /**< [ 19: 19](RO) Reserved. */
7961         uint64_t delay_unload_r3       : 1;  /**< [ 20: 20](RO) Reserved. */
7962         uint64_t sref_auto_enable      : 1;  /**< [ 21: 21](RO) Reserved. */
7963         uint64_t sref_auto_idle_thres  : 5;  /**< [ 26: 22](RO) Reserved. */
7964         uint64_t reserved_27_63        : 37;
7965 #endif /* Word 0 - End */
7966     } cn88xxp1;
7967     struct bdk_lmcx_ext_config2_cn9
7968     {
7969 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
7970         uint64_t reserved_27_63        : 37;
7971         uint64_t sref_auto_idle_thres  : 5;  /**< [ 26: 22](R/W) Self-refresh idle threshold.
7972                                                                  Enter self-refresh mode after the memory controller has been idle for
7973                                                                  2^([SREF_AUTO_IDLE_THRES]-1) * TREFI.
7974                                                                  Where TREFI time is controlled by LMC()_CONFIG[REF_ZQCS_INT]\<6:0\>.
7975 
7976                                                                  0x0 = Automatic self refresh interval is controlled by
7977                                                                  2^(2+LMC()_CONFIG[IDLEPOWER]) CK cycles instead. Self refresh has priority
7978                                                                  over precharge power-down.
7979 
7980                                                                  Only valid when LMC()_EXT_CONFIG2[SREF_AUTO_ENABLE] is set. */
7981         uint64_t sref_auto_enable      : 1;  /**< [ 21: 21](R/W) Enable automatic self-refresh mode.
7982                                                                  This field should only be set after initialization.
7983                                                                  When set, software must not issue self refesh enter commands (LMC_SEQ_SEL_E::SREF_ENTRY).
7984 
7985                                                                  Internal:
7986                                                                  FIXME The LMC_SEQ_SEL_E::SREF_ENTRY requirement can be removed in 98xx when bug28110 is closed. */
7987         uint64_t delay_unload_r3       : 1;  /**< [ 20: 20](R/W) Reserved, MBZ.
7988                                                                  Internal:
7989                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
7990                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R1]. */
7991         uint64_t delay_unload_r2       : 1;  /**< [ 19: 19](R/W) Reserved, MBZ.
7992                                                                  Internal:
7993                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
7994                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R0]. */
7995         uint64_t delay_unload_r1       : 1;  /**< [ 18: 18](R/W) Reserved, MBZ.
7996                                                                  Internal:
7997                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
7998                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R1]. */
7999         uint64_t delay_unload_r0       : 1;  /**< [ 17: 17](R/W) Reserved, MBZ.
8000                                                                  Internal:
8001                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
8002                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R0]. */
8003         uint64_t early_dqx2            : 1;  /**< [ 16: 16](R/W) Similar to LMC()_CONFIG[EARLY_DQX]. This field provides an additional setting to send DQx
8004                                                                  signals one more CK cycle earlier on top of LMC()_CONFIG[EARLY_DQX]. */
8005         uint64_t xor_bank_sel          : 4;  /**< [ 15: 12](R/W) When LMC()_CONTROL[XOR_BANK] is set to one, this field selects which
8006                                                                  L2C-LMC address bits are used to XOR the bank bits with.
8007                                                                  0x0: bank\<3:0\> = address\<10:7\> ^ address\<15:12\>.
8008                                                                  0x1: bank\<3:0\> = address\<10:7\> ^ address\<13:10\>.
8009                                                                  0x2: bank\<3:0\> = address\<10:7\> ^ address\<14:11\>.
8010                                                                  0x3: bank\<3:0\> = address\<10:7\> ^ address\<16:13\>.
8011                                                                  0x4: bank\<3:0\> = address\<10:7\> ^ address\<17:14\>.
8012                                                                  0x5: bank\<3:0\> = address\<10:7\> ^ address\<18:15\>.
8013                                                                  0x6: bank\<3:0\> = address\<10:7\> ^ address\<22:19\>.
8014                                                                  0x7: bank\<3:0\> = address\<10:7\> ^ address\<23:20\>.
8015                                                                  0x8: bank\<3:0\> = address\<10:7\> ^ address\<26:23\>.
8016                                                                  0x9: bank\<3:0\> = address\<10:7\> ^ address\<27:24\>.
8017                                                                  0xA: bank\<3:0\> = address\<10:7\> ^ address\<30:27\>.
8018                                                                  0xB: bank\<3:0\> = address\<10:7\> ^ address\<31:28\>.
8019                                                                  0xC: bank\<3:0\> = address\<10:7\> ^ address\<32:29\>.
8020                                                                  0xD: bank\<3:0\> = address\<10:7\> ^ address\<35:32\>.
8021                                                                  0xE: bank\<3:0\> = address\<10:7\> ^ address\<36:33\>.
8022                                                                  0xF: Reserved. */
8023         uint64_t reserved_10_11        : 2;
8024         uint64_t row_col_switch        : 1;  /**< [  9:  9](R/W) When set, the memory address bit position that represents bit 4 of the COLUMN
8025                                                                  address (bit 5 in 32-bit mode) becomes the low order DDR ROW address bit.
8026                                                                  The upper DDR COLUMN address portion is selected using LMC()_CONFIG[ROW_LSB]
8027                                                                  (and LMC()_DUAL_MEMCFG[ROW_LSB] for dual-memory configuration).
8028                                                                  It is recommended to set this bit to one when TRR_ON is set. */
8029         uint64_t reserved_8            : 1;
8030         uint64_t throttle_wr           : 4;  /**< [  7:  4](R/W) When set, throttle 1/16th of available command bandwidth by limiting IFB usage.
8031                                                                  0x0 = Full bandwidth,    32 IFBs available.
8032                                                                  0x1 = 1/16th bandwidth,   2 IFBs available.
8033                                                                  0x2 = 2/16th bandwidth,   4 IFBs available.
8034                                                                  ...
8035                                                                  0xF = 15/16th bandwidth, 30 IFBs available. */
8036         uint64_t throttle_rd           : 4;  /**< [  3:  0](R/W) When set, throttle 1/16th of available command bandwidth by limiting IFB usage.
8037                                                                  0x0 = Full bandwidth,    32 IFBs available.
8038                                                                  0x1 = 1/16th bandwidth,   2 IFBs available.
8039                                                                  0x2 = 2/16th bandwidth,   4 IFBs available.
8040                                                                  ...
8041                                                                  0xF = 15/16th bandwidth, 30 IFBs available. */
8042 #else /* Word 0 - Little Endian */
8043         uint64_t throttle_rd           : 4;  /**< [  3:  0](R/W) When set, throttle 1/16th of available command bandwidth by limiting IFB usage.
8044                                                                  0x0 = Full bandwidth,    32 IFBs available.
8045                                                                  0x1 = 1/16th bandwidth,   2 IFBs available.
8046                                                                  0x2 = 2/16th bandwidth,   4 IFBs available.
8047                                                                  ...
8048                                                                  0xF = 15/16th bandwidth, 30 IFBs available. */
8049         uint64_t throttle_wr           : 4;  /**< [  7:  4](R/W) When set, throttle 1/16th of available command bandwidth by limiting IFB usage.
8050                                                                  0x0 = Full bandwidth,    32 IFBs available.
8051                                                                  0x1 = 1/16th bandwidth,   2 IFBs available.
8052                                                                  0x2 = 2/16th bandwidth,   4 IFBs available.
8053                                                                  ...
8054                                                                  0xF = 15/16th bandwidth, 30 IFBs available. */
8055         uint64_t reserved_8            : 1;
8056         uint64_t row_col_switch        : 1;  /**< [  9:  9](R/W) When set, the memory address bit position that represents bit 4 of the COLUMN
8057                                                                  address (bit 5 in 32-bit mode) becomes the low order DDR ROW address bit.
8058                                                                  The upper DDR COLUMN address portion is selected using LMC()_CONFIG[ROW_LSB]
8059                                                                  (and LMC()_DUAL_MEMCFG[ROW_LSB] for dual-memory configuration).
8060                                                                  It is recommended to set this bit to one when TRR_ON is set. */
8061         uint64_t reserved_10_11        : 2;
8062         uint64_t xor_bank_sel          : 4;  /**< [ 15: 12](R/W) When LMC()_CONTROL[XOR_BANK] is set to one, this field selects which
8063                                                                  L2C-LMC address bits are used to XOR the bank bits with.
8064                                                                  0x0: bank\<3:0\> = address\<10:7\> ^ address\<15:12\>.
8065                                                                  0x1: bank\<3:0\> = address\<10:7\> ^ address\<13:10\>.
8066                                                                  0x2: bank\<3:0\> = address\<10:7\> ^ address\<14:11\>.
8067                                                                  0x3: bank\<3:0\> = address\<10:7\> ^ address\<16:13\>.
8068                                                                  0x4: bank\<3:0\> = address\<10:7\> ^ address\<17:14\>.
8069                                                                  0x5: bank\<3:0\> = address\<10:7\> ^ address\<18:15\>.
8070                                                                  0x6: bank\<3:0\> = address\<10:7\> ^ address\<22:19\>.
8071                                                                  0x7: bank\<3:0\> = address\<10:7\> ^ address\<23:20\>.
8072                                                                  0x8: bank\<3:0\> = address\<10:7\> ^ address\<26:23\>.
8073                                                                  0x9: bank\<3:0\> = address\<10:7\> ^ address\<27:24\>.
8074                                                                  0xA: bank\<3:0\> = address\<10:7\> ^ address\<30:27\>.
8075                                                                  0xB: bank\<3:0\> = address\<10:7\> ^ address\<31:28\>.
8076                                                                  0xC: bank\<3:0\> = address\<10:7\> ^ address\<32:29\>.
8077                                                                  0xD: bank\<3:0\> = address\<10:7\> ^ address\<35:32\>.
8078                                                                  0xE: bank\<3:0\> = address\<10:7\> ^ address\<36:33\>.
8079                                                                  0xF: Reserved. */
8080         uint64_t early_dqx2            : 1;  /**< [ 16: 16](R/W) Similar to LMC()_CONFIG[EARLY_DQX]. This field provides an additional setting to send DQx
8081                                                                  signals one more CK cycle earlier on top of LMC()_CONFIG[EARLY_DQX]. */
8082         uint64_t delay_unload_r0       : 1;  /**< [ 17: 17](R/W) Reserved, MBZ.
8083                                                                  Internal:
8084                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
8085                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R0]. */
8086         uint64_t delay_unload_r1       : 1;  /**< [ 18: 18](R/W) Reserved, MBZ.
8087                                                                  Internal:
8088                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
8089                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R1]. */
8090         uint64_t delay_unload_r2       : 1;  /**< [ 19: 19](R/W) Reserved, MBZ.
8091                                                                  Internal:
8092                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
8093                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R0]. */
8094         uint64_t delay_unload_r3       : 1;  /**< [ 20: 20](R/W) Reserved, MBZ.
8095                                                                  Internal:
8096                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
8097                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R1]. */
8098         uint64_t sref_auto_enable      : 1;  /**< [ 21: 21](R/W) Enable automatic self-refresh mode.
8099                                                                  This field should only be set after initialization.
8100                                                                  When set, software must not issue self refesh enter commands (LMC_SEQ_SEL_E::SREF_ENTRY).
8101 
8102                                                                  Internal:
8103                                                                  FIXME The LMC_SEQ_SEL_E::SREF_ENTRY requirement can be removed in 98xx when bug28110 is closed. */
8104         uint64_t sref_auto_idle_thres  : 5;  /**< [ 26: 22](R/W) Self-refresh idle threshold.
8105                                                                  Enter self-refresh mode after the memory controller has been idle for
8106                                                                  2^([SREF_AUTO_IDLE_THRES]-1) * TREFI.
8107                                                                  Where TREFI time is controlled by LMC()_CONFIG[REF_ZQCS_INT]\<6:0\>.
8108 
8109                                                                  0x0 = Automatic self refresh interval is controlled by
8110                                                                  2^(2+LMC()_CONFIG[IDLEPOWER]) CK cycles instead. Self refresh has priority
8111                                                                  over precharge power-down.
8112 
8113                                                                  Only valid when LMC()_EXT_CONFIG2[SREF_AUTO_ENABLE] is set. */
8114         uint64_t reserved_27_63        : 37;
8115 #endif /* Word 0 - End */
8116     } cn9;
8117     struct bdk_lmcx_ext_config2_cn81xx
8118     {
8119 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
8120         uint64_t reserved_27_63        : 37;
8121         uint64_t sref_auto_idle_thres  : 5;  /**< [ 26: 22](R/W) Self-refresh idle threshold.
8122                                                                  Enter self-refresh mode after the memory controller has been idle for
8123                                                                  2^([SREF_AUTO_IDLE_THRES]-1) * TREFI.
8124                                                                  Where TREFI time is controlled by LMC()_CONFIG[REF_ZQCS_INT]\<6:0\>.
8125 
8126                                                                  0x0 = Automatic self refresh interval is controlled by
8127                                                                  2^(2+LMC()_CONFIG[IDLEPOWER]) CK cycles instead. Self refresh has priority
8128                                                                  over precharge power-down.
8129 
8130                                                                  Only valid when LMC()_EXT_CONFIG2[SREF_AUTO_ENABLE] is set. */
8131         uint64_t sref_auto_enable      : 1;  /**< [ 21: 21](R/W) Enable automatic self-refresh mode.
8132                                                                  This field should only be set after initialization.
8133                                                                  When set, software must not issue self refesh enter commands (LMC_SEQ_SEL_E::SREF_ENTRY).
8134 
8135                                                                  Internal:
8136                                                                  FIXME The LMC_SEQ_SEL_E::SREF_ENTRY requirement can be removed in 98xx when bug28110 is closed. */
8137         uint64_t delay_unload_r3       : 1;  /**< [ 20: 20](R/W) Reserved, MBZ.
8138                                                                  Internal:
8139                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
8140                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R1]. */
8141         uint64_t delay_unload_r2       : 1;  /**< [ 19: 19](R/W) Reserved, MBZ.
8142                                                                  Internal:
8143                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
8144                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R0]. */
8145         uint64_t delay_unload_r1       : 1;  /**< [ 18: 18](R/W) Reserved, MBZ.
8146                                                                  Internal:
8147                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
8148                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R1]. */
8149         uint64_t delay_unload_r0       : 1;  /**< [ 17: 17](R/W) Reserved, MBZ.
8150                                                                  Internal:
8151                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
8152                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R0]. */
8153         uint64_t early_dqx2            : 1;  /**< [ 16: 16](R/W) Similar to LMC()_CONFIG[EARLY_DQX]. This field provides an additional setting to send DQx
8154                                                                  signals one more CK cycle earlier on top of LMC()_CONFIG[EARLY_DQX]. */
8155         uint64_t xor_bank_sel          : 4;  /**< [ 15: 12](R/W) When LMC()_CONTROL[XOR_BANK] is set to one, this field selects which
8156                                                                  L2C-LMC address bits are used to XOR the bank bits with.
8157                                                                  0x0: bank\<3:0\> = address\<10:7\> ^ address\<15:12\>.
8158                                                                  0x1: bank\<3:0\> = address\<10:7\> ^ address\<13:10\>.
8159                                                                  0x2: bank\<3:0\> = address\<10:7\> ^ address\<14:11\>.
8160                                                                  0x3: bank\<3:0\> = address\<10:7\> ^ address\<16:13\>.
8161                                                                  0x4: bank\<3:0\> = address\<10:7\> ^ address\<17:14\>.
8162                                                                  0x5: bank\<3:0\> = address\<10:7\> ^ address\<18:15\>.
8163                                                                  0x6: bank\<3:0\> = address\<10:7\> ^ address\<22:19\>.
8164                                                                  0x7: bank\<3:0\> = address\<10:7\> ^ address\<23:20\>.
8165                                                                  0x8: bank\<3:0\> = address\<10:7\> ^ address\<26:23\>.
8166                                                                  0x9: bank\<3:0\> = address\<10:7\> ^ address\<27:24\>.
8167                                                                  0xA: bank\<3:0\> = address\<10:7\> ^ address\<30:27\>.
8168                                                                  0xB: bank\<3:0\> = address\<10:7\> ^ address\<31:28\>.
8169                                                                  0xC: bank\<3:0\> = address\<10:7\> ^ address\<32:29\>.
8170                                                                  0xD: bank\<3:0\> = address\<10:7\> ^ address\<35:32\>.
8171                                                                  0xE: bank\<3:0\> = address\<10:7\> ^ address\<36:33\>.
8172                                                                  0xF: Reserved. */
8173         uint64_t reserved_10_11        : 2;
8174         uint64_t row_col_switch        : 1;  /**< [  9:  9](R/W) When set, the memory address bit position that represents bit 4 of the COLUMN
8175                                                                  address (bit 5 in 32-bit mode) becomes the low order DDR ROW address bit.
8176                                                                  The upper DDR COLUMN address portion is selected using LMC()_CONFIG[ROW_LSB]
8177                                                                  (and LMC()_DUAL_MEMCFG[ROW_LSB] for dual-memory configuration).
8178                                                                  It is recommended to set this bit to one when TRR_ON is set. */
8179         uint64_t trr_on                : 1;  /**< [  8:  8](R/W) When set, this enables row activates counts of the
8180                                                                  DRAM used in target row refresh mode. This bit can
8181                                                                  be safely set after the LMC()_EXT_CONFIG2[MACRAM_SCRUB_DONE]
8182                                                                  has a value of 1. */
8183         uint64_t mac                   : 3;  /**< [  7:  5](R/W) Sets the maximum number of activates allowed within a tMAW interval.
8184                                                                  0x0 = 100K.
8185                                                                  0x1 = 400K/2.
8186                                                                  0x2 = 500K/2.
8187                                                                  0x3 = 600K/2.
8188                                                                  0x4 = 700K/2.
8189                                                                  0x5 = 800K/2.
8190                                                                  0x6 = 900K/2.
8191                                                                  0x7 = 1000K/2. */
8192         uint64_t macram_scrub_done     : 1;  /**< [  4:  4](RO/H) Maximum activate count memory scrub complete indication;
8193                                                                  1 means the memory has been scrubbed to all zero. */
8194         uint64_t macram_scrub          : 1;  /**< [  3:  3](WO) When set, the maximum activate count memory will be scrubbed to all zero values. This
8195                                                                  should be done before enabling TRR mode by setting LMC()_EXT_CONFIG2[TRR_ON].
8196                                                                  This is a one-shot operation; it automatically returns to 0 after a write to 1. */
8197         uint64_t macram_flip_synd      : 2;  /**< [  2:  1](R/W) Reserved.
8198                                                                  Internal:
8199                                                                  MAC RAM flip syndrome control bits. */
8200         uint64_t macram_cor_dis        : 1;  /**< [  0:  0](R/W) Reserved.
8201                                                                  Internal:
8202                                                                  MAC RAM correction disable control. */
8203 #else /* Word 0 - Little Endian */
8204         uint64_t macram_cor_dis        : 1;  /**< [  0:  0](R/W) Reserved.
8205                                                                  Internal:
8206                                                                  MAC RAM correction disable control. */
8207         uint64_t macram_flip_synd      : 2;  /**< [  2:  1](R/W) Reserved.
8208                                                                  Internal:
8209                                                                  MAC RAM flip syndrome control bits. */
8210         uint64_t macram_scrub          : 1;  /**< [  3:  3](WO) When set, the maximum activate count memory will be scrubbed to all zero values. This
8211                                                                  should be done before enabling TRR mode by setting LMC()_EXT_CONFIG2[TRR_ON].
8212                                                                  This is a one-shot operation; it automatically returns to 0 after a write to 1. */
8213         uint64_t macram_scrub_done     : 1;  /**< [  4:  4](RO/H) Maximum activate count memory scrub complete indication;
8214                                                                  1 means the memory has been scrubbed to all zero. */
8215         uint64_t mac                   : 3;  /**< [  7:  5](R/W) Sets the maximum number of activates allowed within a tMAW interval.
8216                                                                  0x0 = 100K.
8217                                                                  0x1 = 400K/2.
8218                                                                  0x2 = 500K/2.
8219                                                                  0x3 = 600K/2.
8220                                                                  0x4 = 700K/2.
8221                                                                  0x5 = 800K/2.
8222                                                                  0x6 = 900K/2.
8223                                                                  0x7 = 1000K/2. */
8224         uint64_t trr_on                : 1;  /**< [  8:  8](R/W) When set, this enables row activates counts of the
8225                                                                  DRAM used in target row refresh mode. This bit can
8226                                                                  be safely set after the LMC()_EXT_CONFIG2[MACRAM_SCRUB_DONE]
8227                                                                  has a value of 1. */
8228         uint64_t row_col_switch        : 1;  /**< [  9:  9](R/W) When set, the memory address bit position that represents bit 4 of the COLUMN
8229                                                                  address (bit 5 in 32-bit mode) becomes the low order DDR ROW address bit.
8230                                                                  The upper DDR COLUMN address portion is selected using LMC()_CONFIG[ROW_LSB]
8231                                                                  (and LMC()_DUAL_MEMCFG[ROW_LSB] for dual-memory configuration).
8232                                                                  It is recommended to set this bit to one when TRR_ON is set. */
8233         uint64_t reserved_10_11        : 2;
8234         uint64_t xor_bank_sel          : 4;  /**< [ 15: 12](R/W) When LMC()_CONTROL[XOR_BANK] is set to one, this field selects which
8235                                                                  L2C-LMC address bits are used to XOR the bank bits with.
8236                                                                  0x0: bank\<3:0\> = address\<10:7\> ^ address\<15:12\>.
8237                                                                  0x1: bank\<3:0\> = address\<10:7\> ^ address\<13:10\>.
8238                                                                  0x2: bank\<3:0\> = address\<10:7\> ^ address\<14:11\>.
8239                                                                  0x3: bank\<3:0\> = address\<10:7\> ^ address\<16:13\>.
8240                                                                  0x4: bank\<3:0\> = address\<10:7\> ^ address\<17:14\>.
8241                                                                  0x5: bank\<3:0\> = address\<10:7\> ^ address\<18:15\>.
8242                                                                  0x6: bank\<3:0\> = address\<10:7\> ^ address\<22:19\>.
8243                                                                  0x7: bank\<3:0\> = address\<10:7\> ^ address\<23:20\>.
8244                                                                  0x8: bank\<3:0\> = address\<10:7\> ^ address\<26:23\>.
8245                                                                  0x9: bank\<3:0\> = address\<10:7\> ^ address\<27:24\>.
8246                                                                  0xA: bank\<3:0\> = address\<10:7\> ^ address\<30:27\>.
8247                                                                  0xB: bank\<3:0\> = address\<10:7\> ^ address\<31:28\>.
8248                                                                  0xC: bank\<3:0\> = address\<10:7\> ^ address\<32:29\>.
8249                                                                  0xD: bank\<3:0\> = address\<10:7\> ^ address\<35:32\>.
8250                                                                  0xE: bank\<3:0\> = address\<10:7\> ^ address\<36:33\>.
8251                                                                  0xF: Reserved. */
8252         uint64_t early_dqx2            : 1;  /**< [ 16: 16](R/W) Similar to LMC()_CONFIG[EARLY_DQX]. This field provides an additional setting to send DQx
8253                                                                  signals one more CK cycle earlier on top of LMC()_CONFIG[EARLY_DQX]. */
8254         uint64_t delay_unload_r0       : 1;  /**< [ 17: 17](R/W) Reserved, MBZ.
8255                                                                  Internal:
8256                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
8257                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R0]. */
8258         uint64_t delay_unload_r1       : 1;  /**< [ 18: 18](R/W) Reserved, MBZ.
8259                                                                  Internal:
8260                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
8261                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R1]. */
8262         uint64_t delay_unload_r2       : 1;  /**< [ 19: 19](R/W) Reserved, MBZ.
8263                                                                  Internal:
8264                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
8265                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R0]. */
8266         uint64_t delay_unload_r3       : 1;  /**< [ 20: 20](R/W) Reserved, MBZ.
8267                                                                  Internal:
8268                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
8269                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R1]. */
8270         uint64_t sref_auto_enable      : 1;  /**< [ 21: 21](R/W) Enable automatic self-refresh mode.
8271                                                                  This field should only be set after initialization.
8272                                                                  When set, software must not issue self refesh enter commands (LMC_SEQ_SEL_E::SREF_ENTRY).
8273 
8274                                                                  Internal:
8275                                                                  FIXME The LMC_SEQ_SEL_E::SREF_ENTRY requirement can be removed in 98xx when bug28110 is closed. */
8276         uint64_t sref_auto_idle_thres  : 5;  /**< [ 26: 22](R/W) Self-refresh idle threshold.
8277                                                                  Enter self-refresh mode after the memory controller has been idle for
8278                                                                  2^([SREF_AUTO_IDLE_THRES]-1) * TREFI.
8279                                                                  Where TREFI time is controlled by LMC()_CONFIG[REF_ZQCS_INT]\<6:0\>.
8280 
8281                                                                  0x0 = Automatic self refresh interval is controlled by
8282                                                                  2^(2+LMC()_CONFIG[IDLEPOWER]) CK cycles instead. Self refresh has priority
8283                                                                  over precharge power-down.
8284 
8285                                                                  Only valid when LMC()_EXT_CONFIG2[SREF_AUTO_ENABLE] is set. */
8286         uint64_t reserved_27_63        : 37;
8287 #endif /* Word 0 - End */
8288     } cn81xx;
8289     /* struct bdk_lmcx_ext_config2_cn81xx cn83xx; */
8290     struct bdk_lmcx_ext_config2_cn88xxp2
8291     {
8292 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
8293         uint64_t reserved_27_63        : 37;
8294         uint64_t sref_auto_idle_thres  : 5;  /**< [ 26: 22](RO) Reserved. */
8295         uint64_t sref_auto_enable      : 1;  /**< [ 21: 21](RO) Reserved. */
8296         uint64_t delay_unload_r3       : 1;  /**< [ 20: 20](R/W) Reserved, MBZ.
8297                                                                  Internal:
8298                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
8299                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R1]. */
8300         uint64_t delay_unload_r2       : 1;  /**< [ 19: 19](R/W) Reserved, MBZ.
8301                                                                  Internal:
8302                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
8303                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R0]. */
8304         uint64_t delay_unload_r1       : 1;  /**< [ 18: 18](R/W) Reserved, MBZ.
8305                                                                  Internal:
8306                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
8307                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R1]. */
8308         uint64_t delay_unload_r0       : 1;  /**< [ 17: 17](R/W) Reserved, MBZ.
8309                                                                  Internal:
8310                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
8311                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R0]. */
8312         uint64_t early_dqx2            : 1;  /**< [ 16: 16](R/W) Similar to LMC()_CONFIG[EARLY_DQX]. This field provides an additional setting to send DQx
8313                                                                  signals one more CK cycle earlier on top of LMC()_CONFIG[EARLY_DQX]. */
8314         uint64_t xor_bank_sel          : 4;  /**< [ 15: 12](R/W) When LMC()_CONTROL[XOR_BANK] is set to one, this field selects which
8315                                                                  L2C-LMC address bits are used to XOR the bank bits with.
8316                                                                  0x0: bank\<3:0\> = address\<10:7\> ^ address\<15:12\>.
8317                                                                  0x1: bank\<3:0\> = address\<10:7\> ^ address\<13:10\>.
8318                                                                  0x2: bank\<3:0\> = address\<10:7\> ^ address\<14:11\>.
8319                                                                  0x3: bank\<3:0\> = address\<10:7\> ^ address\<16:13\>.
8320                                                                  0x4: bank\<3:0\> = address\<10:7\> ^ address\<17:14\>.
8321                                                                  0x5: bank\<3:0\> = address\<10:7\> ^ address\<18:15\>.
8322                                                                  0x6: bank\<3:0\> = address\<10:7\> ^ address\<22:19\>.
8323                                                                  0x7: bank\<3:0\> = address\<10:7\> ^ address\<23:20\>.
8324                                                                  0x8: bank\<3:0\> = address\<10:7\> ^ address\<26:23\>.
8325                                                                  0x9: bank\<3:0\> = address\<10:7\> ^ address\<27:24\>.
8326                                                                  0xA: bank\<3:0\> = address\<10:7\> ^ address\<30:27\>.
8327                                                                  0xB: bank\<3:0\> = address\<10:7\> ^ address\<31:28\>.
8328                                                                  0xC: bank\<3:0\> = address\<10:7\> ^ address\<32:29\>.
8329                                                                  0xD: bank\<3:0\> = address\<10:7\> ^ address\<35:32\>.
8330                                                                  0xE: bank\<3:0\> = address\<10:7\> ^ address\<36:33\>.
8331                                                                  0xF: Reserved. */
8332         uint64_t reserved_10_11        : 2;
8333         uint64_t row_col_switch        : 1;  /**< [  9:  9](R/W) When set, the memory address bit position that represents bit 4 of the COLUMN
8334                                                                  address (bit 5 in 32-bit mode) becomes the low order DDR ROW address bit.
8335                                                                  The upper DDR COLUMN address portion is selected using LMC()_CONFIG[ROW_LSB]
8336                                                                  (and LMC()_DUAL_MEMCFG[ROW_LSB] for dual-memory configuration).
8337                                                                  It is recommended to set this bit to one when TRR_ON is set. */
8338         uint64_t trr_on                : 1;  /**< [  8:  8](R/W) When set, this enables row activates counts of the
8339                                                                  DRAM used in target row refresh mode. This bit can
8340                                                                  be safely set after the LMC()_EXT_CONFIG2[MACRAM_SCRUB_DONE]
8341                                                                  has a value of 1. */
8342         uint64_t mac                   : 3;  /**< [  7:  5](R/W) Sets the maximum number of activates allowed within a tMAW interval.
8343                                                                  0x0 = 100K.
8344                                                                  0x1 = 400K/2.
8345                                                                  0x2 = 500K/2.
8346                                                                  0x3 = 600K/2.
8347                                                                  0x4 = 700K/2.
8348                                                                  0x5 = 800K/2.
8349                                                                  0x6 = 900K/2.
8350                                                                  0x7 = 1000K/2. */
8351         uint64_t macram_scrub_done     : 1;  /**< [  4:  4](RO/H) Maximum activate count memory scrub complete indication;
8352                                                                  1 means the memory has been scrubbed to all zero. */
8353         uint64_t macram_scrub          : 1;  /**< [  3:  3](WO) When set, the maximum activate count memory will be scrubbed to all zero values. This
8354                                                                  should be done before enabling TRR mode by setting LMC()_EXT_CONFIG2[TRR_ON].
8355                                                                  This is a one-shot operation; it automatically returns to 0 after a write to 1. */
8356         uint64_t macram_flip_synd      : 2;  /**< [  2:  1](R/W) Reserved.
8357                                                                  Internal:
8358                                                                  MAC RAM flip syndrome control bits. */
8359         uint64_t macram_cor_dis        : 1;  /**< [  0:  0](R/W) Reserved.
8360                                                                  Internal:
8361                                                                  MAC RAM correction disable control. */
8362 #else /* Word 0 - Little Endian */
8363         uint64_t macram_cor_dis        : 1;  /**< [  0:  0](R/W) Reserved.
8364                                                                  Internal:
8365                                                                  MAC RAM correction disable control. */
8366         uint64_t macram_flip_synd      : 2;  /**< [  2:  1](R/W) Reserved.
8367                                                                  Internal:
8368                                                                  MAC RAM flip syndrome control bits. */
8369         uint64_t macram_scrub          : 1;  /**< [  3:  3](WO) When set, the maximum activate count memory will be scrubbed to all zero values. This
8370                                                                  should be done before enabling TRR mode by setting LMC()_EXT_CONFIG2[TRR_ON].
8371                                                                  This is a one-shot operation; it automatically returns to 0 after a write to 1. */
8372         uint64_t macram_scrub_done     : 1;  /**< [  4:  4](RO/H) Maximum activate count memory scrub complete indication;
8373                                                                  1 means the memory has been scrubbed to all zero. */
8374         uint64_t mac                   : 3;  /**< [  7:  5](R/W) Sets the maximum number of activates allowed within a tMAW interval.
8375                                                                  0x0 = 100K.
8376                                                                  0x1 = 400K/2.
8377                                                                  0x2 = 500K/2.
8378                                                                  0x3 = 600K/2.
8379                                                                  0x4 = 700K/2.
8380                                                                  0x5 = 800K/2.
8381                                                                  0x6 = 900K/2.
8382                                                                  0x7 = 1000K/2. */
8383         uint64_t trr_on                : 1;  /**< [  8:  8](R/W) When set, this enables row activates counts of the
8384                                                                  DRAM used in target row refresh mode. This bit can
8385                                                                  be safely set after the LMC()_EXT_CONFIG2[MACRAM_SCRUB_DONE]
8386                                                                  has a value of 1. */
8387         uint64_t row_col_switch        : 1;  /**< [  9:  9](R/W) When set, the memory address bit position that represents bit 4 of the COLUMN
8388                                                                  address (bit 5 in 32-bit mode) becomes the low order DDR ROW address bit.
8389                                                                  The upper DDR COLUMN address portion is selected using LMC()_CONFIG[ROW_LSB]
8390                                                                  (and LMC()_DUAL_MEMCFG[ROW_LSB] for dual-memory configuration).
8391                                                                  It is recommended to set this bit to one when TRR_ON is set. */
8392         uint64_t reserved_10_11        : 2;
8393         uint64_t xor_bank_sel          : 4;  /**< [ 15: 12](R/W) When LMC()_CONTROL[XOR_BANK] is set to one, this field selects which
8394                                                                  L2C-LMC address bits are used to XOR the bank bits with.
8395                                                                  0x0: bank\<3:0\> = address\<10:7\> ^ address\<15:12\>.
8396                                                                  0x1: bank\<3:0\> = address\<10:7\> ^ address\<13:10\>.
8397                                                                  0x2: bank\<3:0\> = address\<10:7\> ^ address\<14:11\>.
8398                                                                  0x3: bank\<3:0\> = address\<10:7\> ^ address\<16:13\>.
8399                                                                  0x4: bank\<3:0\> = address\<10:7\> ^ address\<17:14\>.
8400                                                                  0x5: bank\<3:0\> = address\<10:7\> ^ address\<18:15\>.
8401                                                                  0x6: bank\<3:0\> = address\<10:7\> ^ address\<22:19\>.
8402                                                                  0x7: bank\<3:0\> = address\<10:7\> ^ address\<23:20\>.
8403                                                                  0x8: bank\<3:0\> = address\<10:7\> ^ address\<26:23\>.
8404                                                                  0x9: bank\<3:0\> = address\<10:7\> ^ address\<27:24\>.
8405                                                                  0xA: bank\<3:0\> = address\<10:7\> ^ address\<30:27\>.
8406                                                                  0xB: bank\<3:0\> = address\<10:7\> ^ address\<31:28\>.
8407                                                                  0xC: bank\<3:0\> = address\<10:7\> ^ address\<32:29\>.
8408                                                                  0xD: bank\<3:0\> = address\<10:7\> ^ address\<35:32\>.
8409                                                                  0xE: bank\<3:0\> = address\<10:7\> ^ address\<36:33\>.
8410                                                                  0xF: Reserved. */
8411         uint64_t early_dqx2            : 1;  /**< [ 16: 16](R/W) Similar to LMC()_CONFIG[EARLY_DQX]. This field provides an additional setting to send DQx
8412                                                                  signals one more CK cycle earlier on top of LMC()_CONFIG[EARLY_DQX]. */
8413         uint64_t delay_unload_r0       : 1;  /**< [ 17: 17](R/W) Reserved, MBZ.
8414                                                                  Internal:
8415                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
8416                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R0]. */
8417         uint64_t delay_unload_r1       : 1;  /**< [ 18: 18](R/W) Reserved, MBZ.
8418                                                                  Internal:
8419                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
8420                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D0_R1]. */
8421         uint64_t delay_unload_r2       : 1;  /**< [ 19: 19](R/W) Reserved, MBZ.
8422                                                                  Internal:
8423                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
8424                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R0]. */
8425         uint64_t delay_unload_r3       : 1;  /**< [ 20: 20](R/W) Reserved, MBZ.
8426                                                                  Internal:
8427                                                                  When set, unload the PHY silo one cycle later for Rank 0 reads.
8428                                                                  Setting this field has priority over LMC()_CONFIG[EARLY_UNLOAD_D1_R1]. */
8429         uint64_t sref_auto_enable      : 1;  /**< [ 21: 21](RO) Reserved. */
8430         uint64_t sref_auto_idle_thres  : 5;  /**< [ 26: 22](RO) Reserved. */
8431         uint64_t reserved_27_63        : 37;
8432 #endif /* Word 0 - End */
8433     } cn88xxp2;
8434 };
8435 typedef union bdk_lmcx_ext_config2 bdk_lmcx_ext_config2_t;
8436 
8437 static inline uint64_t BDK_LMCX_EXT_CONFIG2(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_EXT_CONFIG2(unsigned long a)8438 static inline uint64_t BDK_LMCX_EXT_CONFIG2(unsigned long a)
8439 {
8440     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
8441         return 0x87e088000090ll + 0x1000000ll * ((a) & 0x0);
8442     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
8443         return 0x87e088000090ll + 0x1000000ll * ((a) & 0x1);
8444     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
8445         return 0x87e088000090ll + 0x1000000ll * ((a) & 0x3);
8446     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
8447         return 0x87e088000090ll + 0x1000000ll * ((a) & 0x3);
8448     __bdk_csr_fatal("LMCX_EXT_CONFIG2", 1, a, 0, 0, 0);
8449 }
8450 
8451 #define typedef_BDK_LMCX_EXT_CONFIG2(a) bdk_lmcx_ext_config2_t
8452 #define bustype_BDK_LMCX_EXT_CONFIG2(a) BDK_CSR_TYPE_RSL
8453 #define basename_BDK_LMCX_EXT_CONFIG2(a) "LMCX_EXT_CONFIG2"
8454 #define device_bar_BDK_LMCX_EXT_CONFIG2(a) 0x0 /* PF_BAR0 */
8455 #define busnum_BDK_LMCX_EXT_CONFIG2(a) (a)
8456 #define arguments_BDK_LMCX_EXT_CONFIG2(a) (a),-1,-1,-1
8457 
8458 /**
8459  * Register (RSL) lmc#_fadr
8460  *
8461  * LMC Failing (SEC/DED/NXM) Address Register
8462  * This register captures only the first transaction with ECC errors. A DED error can over-write
8463  * this register with its failing addresses if the first error was a SEC. If you write
8464  * LMC()_INT -\> SEC_ERR/DED_ERR, it clears the error bits and captures the next failing
8465  * address. If FDIMM is 1, that means the error is in the high DIMM.
8466  * LMC()_FADR captures the failing prescrambled address location (split into DIMM, bunk,
8467  * bank, etc). If scrambling is off, then LMC()_FADR will also capture the failing physical
8468  * location in the DRAM parts. LMC()_SCRAMBLED_FADR captures the actual failing address
8469  * location in the physical DRAM parts, i.e.,
8470  * * If scrambling is on, LMC()_SCRAMBLED_FADR contains the failing physical location in the
8471  * DRAM parts (split into DIMM, bunk, bank, etc.)
8472  * If scrambling is off, the prescramble and postscramble addresses are the same; and so the
8473  * contents of LMC()_SCRAMBLED_FADR match the contents of LMC()_FADR.
8474  */
8475 union bdk_lmcx_fadr
8476 {
8477     uint64_t u;
8478     struct bdk_lmcx_fadr_s
8479     {
8480 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
8481         uint64_t reserved_43_63        : 21;
8482         uint64_t fcid                  : 3;  /**< [ 42: 40](RO/H) Reserved.
8483                                                                  Internal:
8484                                                                  Failing CID number. This field is only valid when interfacing with 3DS DRAMs (i.e., when
8485                                                                  LMC()_EXT_CONFIG[DIMM0_CID] is nonzero). Returns a value of zero otherwise. */
8486         uint64_t fill_order            : 2;  /**< [ 39: 38](RO/H) Fill order for failing transaction. */
8487         uint64_t fdimm                 : 1;  /**< [ 37: 37](RO/H) Failing DIMM number. */
8488         uint64_t fbunk                 : 1;  /**< [ 36: 36](RO/H) Failing rank number. */
8489         uint64_t fbank                 : 4;  /**< [ 35: 32](RO/H) Failing bank number, bits \<3:0\>. */
8490         uint64_t frow                  : 18; /**< [ 31: 14](RO/H) Failing row address, bits \<17:0\>. */
8491         uint64_t fcol                  : 14; /**< [ 13:  0](RO/H) Failing column address \<13:0\>. Technically, represents the address of the 64b data that
8492                                                                  had an ECC error, i.e. FCOL[0] is always 0. Can be used in conjunction with
8493                                                                  LMC()_INT[DED_ERR] to isolate the 64b chunk of data in error. */
8494 #else /* Word 0 - Little Endian */
8495         uint64_t fcol                  : 14; /**< [ 13:  0](RO/H) Failing column address \<13:0\>. Technically, represents the address of the 64b data that
8496                                                                  had an ECC error, i.e. FCOL[0] is always 0. Can be used in conjunction with
8497                                                                  LMC()_INT[DED_ERR] to isolate the 64b chunk of data in error. */
8498         uint64_t frow                  : 18; /**< [ 31: 14](RO/H) Failing row address, bits \<17:0\>. */
8499         uint64_t fbank                 : 4;  /**< [ 35: 32](RO/H) Failing bank number, bits \<3:0\>. */
8500         uint64_t fbunk                 : 1;  /**< [ 36: 36](RO/H) Failing rank number. */
8501         uint64_t fdimm                 : 1;  /**< [ 37: 37](RO/H) Failing DIMM number. */
8502         uint64_t fill_order            : 2;  /**< [ 39: 38](RO/H) Fill order for failing transaction. */
8503         uint64_t fcid                  : 3;  /**< [ 42: 40](RO/H) Reserved.
8504                                                                  Internal:
8505                                                                  Failing CID number. This field is only valid when interfacing with 3DS DRAMs (i.e., when
8506                                                                  LMC()_EXT_CONFIG[DIMM0_CID] is nonzero). Returns a value of zero otherwise. */
8507         uint64_t reserved_43_63        : 21;
8508 #endif /* Word 0 - End */
8509     } s;
8510     /* struct bdk_lmcx_fadr_s cn81xx; */
8511     struct bdk_lmcx_fadr_cn88xx
8512     {
8513 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
8514         uint64_t reserved_43_63        : 21;
8515         uint64_t fcid                  : 3;  /**< [ 42: 40](RO/H) Reserved.
8516                                                                  Internal:
8517                                                                  Failing CID number. This field is only valid when interfacing with 3DS DRAMs
8518                                                                  (i.e. when either LMC()_EXT_CONFIG[DIMM0_CID] or LMC()_EXT_CONFIG[DIMM1_CID] is
8519                                                                  nonzero). Returns a value of zero otherwise. */
8520         uint64_t fill_order            : 2;  /**< [ 39: 38](RO/H) Fill order for failing transaction. */
8521         uint64_t fdimm                 : 1;  /**< [ 37: 37](RO/H) Failing DIMM number. */
8522         uint64_t fbunk                 : 1;  /**< [ 36: 36](RO/H) Failing rank number. */
8523         uint64_t fbank                 : 4;  /**< [ 35: 32](RO/H) Failing bank number, bits \<3:0\>. */
8524         uint64_t frow                  : 18; /**< [ 31: 14](RO/H) Failing row address, bits \<17:0\>. */
8525         uint64_t fcol                  : 14; /**< [ 13:  0](RO/H) Failing column address \<13:0\>. Technically, represents the address of the 64b data that
8526                                                                  had an ECC error, i.e. FCOL[0] is always 0. Can be used in conjunction with
8527                                                                  LMC()_INT[DED_ERR] to isolate the 64b chunk of data in error. */
8528 #else /* Word 0 - Little Endian */
8529         uint64_t fcol                  : 14; /**< [ 13:  0](RO/H) Failing column address \<13:0\>. Technically, represents the address of the 64b data that
8530                                                                  had an ECC error, i.e. FCOL[0] is always 0. Can be used in conjunction with
8531                                                                  LMC()_INT[DED_ERR] to isolate the 64b chunk of data in error. */
8532         uint64_t frow                  : 18; /**< [ 31: 14](RO/H) Failing row address, bits \<17:0\>. */
8533         uint64_t fbank                 : 4;  /**< [ 35: 32](RO/H) Failing bank number, bits \<3:0\>. */
8534         uint64_t fbunk                 : 1;  /**< [ 36: 36](RO/H) Failing rank number. */
8535         uint64_t fdimm                 : 1;  /**< [ 37: 37](RO/H) Failing DIMM number. */
8536         uint64_t fill_order            : 2;  /**< [ 39: 38](RO/H) Fill order for failing transaction. */
8537         uint64_t fcid                  : 3;  /**< [ 42: 40](RO/H) Reserved.
8538                                                                  Internal:
8539                                                                  Failing CID number. This field is only valid when interfacing with 3DS DRAMs
8540                                                                  (i.e. when either LMC()_EXT_CONFIG[DIMM0_CID] or LMC()_EXT_CONFIG[DIMM1_CID] is
8541                                                                  nonzero). Returns a value of zero otherwise. */
8542         uint64_t reserved_43_63        : 21;
8543 #endif /* Word 0 - End */
8544     } cn88xx;
8545     /* struct bdk_lmcx_fadr_cn88xx cn83xx; */
8546 };
8547 typedef union bdk_lmcx_fadr bdk_lmcx_fadr_t;
8548 
8549 static inline uint64_t BDK_LMCX_FADR(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_FADR(unsigned long a)8550 static inline uint64_t BDK_LMCX_FADR(unsigned long a)
8551 {
8552     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
8553         return 0x87e088000020ll + 0x1000000ll * ((a) & 0x0);
8554     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
8555         return 0x87e088000020ll + 0x1000000ll * ((a) & 0x1);
8556     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
8557         return 0x87e088000020ll + 0x1000000ll * ((a) & 0x3);
8558     __bdk_csr_fatal("LMCX_FADR", 1, a, 0, 0, 0);
8559 }
8560 
8561 #define typedef_BDK_LMCX_FADR(a) bdk_lmcx_fadr_t
8562 #define bustype_BDK_LMCX_FADR(a) BDK_CSR_TYPE_RSL
8563 #define basename_BDK_LMCX_FADR(a) "LMCX_FADR"
8564 #define device_bar_BDK_LMCX_FADR(a) 0x0 /* PF_BAR0 */
8565 #define busnum_BDK_LMCX_FADR(a) (a)
8566 #define arguments_BDK_LMCX_FADR(a) (a),-1,-1,-1
8567 
8568 /**
8569  * Register (RSL) lmc#_ffe_ctle_ctl
8570  *
8571  * LMC FFE & CTLE Control Register
8572  */
8573 union bdk_lmcx_ffe_ctle_ctl
8574 {
8575     uint64_t u;
8576     struct bdk_lmcx_ffe_ctle_ctl_s
8577     {
8578 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
8579         uint64_t reserved_27_63        : 37;
8580         uint64_t ctle_mem_ld           : 9;  /**< [ 26: 18](WO) Reserved.
8581                                                                  Internal:
8582                                                                  When set, the CTLE settings are loaded into the chosen rank's byte CTLE
8583                                                                  storage. Bits 18-26 correspond to bytes 0-7, bit 27 corresponds to ECC. The byte
8584                                                                  targeted will load its corresponding value from the CSR
8585                                                                  LMC()_FFE_CTLE_SETTINGS[CTLE*]. The rank is chosen by the CSR
8586                                                                  LMC()_MR_MPR_CTL[MR_WR_RANK]. This is a oneshot operation and clears itself each
8587                                                                  time it is set. Note this has to be done during the bringup state where there
8588                                                                  isn't yet any traffic to DRAM. */
8589         uint64_t ffe_mem_ld            : 9;  /**< [ 17:  9](WO) Reserved.
8590                                                                  Internal:
8591                                                                  When set, the FFE settings are loaded into the chosen rank's byte FFE
8592                                                                  storage. Bits 9-16 correspond to bytes 0-7, bit 17 corresponds to ECC. The byte
8593                                                                  targeted will load its corresponding value from the CSR
8594                                                                  LMC()_FFE_CTLE_SETTINGS[FFE*]. The rank is chosen by the CSR
8595                                                                  LMC()_MR_MPR_CTL[MR_WR_RANK]. This is a oneshot operation and clears itself each
8596                                                                  time it is set. Note this has to be done during the bringup state where there
8597                                                                  isn't yet any traffic to DRAM. */
8598         uint64_t ffe_enable            : 9;  /**< [  8:  0](R/W) When set, it enables the FFE feature per byte.
8599                                                                  Bits 0-7 correspond to bytes 0-7, bit 8 corresponds to ECC. */
8600 #else /* Word 0 - Little Endian */
8601         uint64_t ffe_enable            : 9;  /**< [  8:  0](R/W) When set, it enables the FFE feature per byte.
8602                                                                  Bits 0-7 correspond to bytes 0-7, bit 8 corresponds to ECC. */
8603         uint64_t ffe_mem_ld            : 9;  /**< [ 17:  9](WO) Reserved.
8604                                                                  Internal:
8605                                                                  When set, the FFE settings are loaded into the chosen rank's byte FFE
8606                                                                  storage. Bits 9-16 correspond to bytes 0-7, bit 17 corresponds to ECC. The byte
8607                                                                  targeted will load its corresponding value from the CSR
8608                                                                  LMC()_FFE_CTLE_SETTINGS[FFE*]. The rank is chosen by the CSR
8609                                                                  LMC()_MR_MPR_CTL[MR_WR_RANK]. This is a oneshot operation and clears itself each
8610                                                                  time it is set. Note this has to be done during the bringup state where there
8611                                                                  isn't yet any traffic to DRAM. */
8612         uint64_t ctle_mem_ld           : 9;  /**< [ 26: 18](WO) Reserved.
8613                                                                  Internal:
8614                                                                  When set, the CTLE settings are loaded into the chosen rank's byte CTLE
8615                                                                  storage. Bits 18-26 correspond to bytes 0-7, bit 27 corresponds to ECC. The byte
8616                                                                  targeted will load its corresponding value from the CSR
8617                                                                  LMC()_FFE_CTLE_SETTINGS[CTLE*]. The rank is chosen by the CSR
8618                                                                  LMC()_MR_MPR_CTL[MR_WR_RANK]. This is a oneshot operation and clears itself each
8619                                                                  time it is set. Note this has to be done during the bringup state where there
8620                                                                  isn't yet any traffic to DRAM. */
8621         uint64_t reserved_27_63        : 37;
8622 #endif /* Word 0 - End */
8623     } s;
8624     /* struct bdk_lmcx_ffe_ctle_ctl_s cn; */
8625 };
8626 typedef union bdk_lmcx_ffe_ctle_ctl bdk_lmcx_ffe_ctle_ctl_t;
8627 
8628 static inline uint64_t BDK_LMCX_FFE_CTLE_CTL(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_FFE_CTLE_CTL(unsigned long a)8629 static inline uint64_t BDK_LMCX_FFE_CTLE_CTL(unsigned long a)
8630 {
8631     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
8632         return 0x87e0880002f0ll + 0x1000000ll * ((a) & 0x3);
8633     __bdk_csr_fatal("LMCX_FFE_CTLE_CTL", 1, a, 0, 0, 0);
8634 }
8635 
8636 #define typedef_BDK_LMCX_FFE_CTLE_CTL(a) bdk_lmcx_ffe_ctle_ctl_t
8637 #define bustype_BDK_LMCX_FFE_CTLE_CTL(a) BDK_CSR_TYPE_RSL
8638 #define basename_BDK_LMCX_FFE_CTLE_CTL(a) "LMCX_FFE_CTLE_CTL"
8639 #define device_bar_BDK_LMCX_FFE_CTLE_CTL(a) 0x0 /* PF_BAR0 */
8640 #define busnum_BDK_LMCX_FFE_CTLE_CTL(a) (a)
8641 #define arguments_BDK_LMCX_FFE_CTLE_CTL(a) (a),-1,-1,-1
8642 
8643 /**
8644  * Register (RSL) lmc#_ffe_ctle_settings
8645  *
8646  * LMC FFE & CTLE Settings Register
8647  */
8648 union bdk_lmcx_ffe_ctle_settings
8649 {
8650     uint64_t u;
8651     struct bdk_lmcx_ffe_ctle_settings_s
8652     {
8653 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
8654         uint64_t reserved_45_63        : 19;
8655         uint64_t ctle8                 : 2;  /**< [ 44: 43](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
8656         uint64_t ctle7                 : 2;  /**< [ 42: 41](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
8657         uint64_t ctle6                 : 2;  /**< [ 40: 39](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
8658         uint64_t ctle5                 : 2;  /**< [ 38: 37](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
8659         uint64_t ctle4                 : 2;  /**< [ 36: 35](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
8660         uint64_t ctle3                 : 2;  /**< [ 34: 33](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
8661         uint64_t ctle2                 : 2;  /**< [ 32: 31](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
8662         uint64_t ctle1                 : 2;  /**< [ 30: 29](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
8663         uint64_t ctle0                 : 2;  /**< [ 28: 27](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
8664         uint64_t ffe8                  : 3;  /**< [ 26: 24](R/W) Provides FFE TX calibration store per byte per rank. */
8665         uint64_t ffe7                  : 3;  /**< [ 23: 21](R/W) Provides FFE TX calibration store per byte per rank. */
8666         uint64_t ffe6                  : 3;  /**< [ 20: 18](R/W) Provides FFE TX calibration store per byte per rank. */
8667         uint64_t ffe5                  : 3;  /**< [ 17: 15](R/W) Provides FFE TX calibration store per byte per rank. */
8668         uint64_t ffe4                  : 3;  /**< [ 14: 12](R/W) Provides FFE TX calibration store per byte per rank. */
8669         uint64_t ffe3                  : 3;  /**< [ 11:  9](R/W) Provides FFE TX calibration store per byte per rank. */
8670         uint64_t ffe2                  : 3;  /**< [  8:  6](R/W) Provides FFE TX calibration store per byte per rank. */
8671         uint64_t ffe1                  : 3;  /**< [  5:  3](R/W) Provides FFE TX calibration store per byte per rank. */
8672         uint64_t ffe0                  : 3;  /**< [  2:  0](R/W) Provides FFE TX calibration store per byte per rank. */
8673 #else /* Word 0 - Little Endian */
8674         uint64_t ffe0                  : 3;  /**< [  2:  0](R/W) Provides FFE TX calibration store per byte per rank. */
8675         uint64_t ffe1                  : 3;  /**< [  5:  3](R/W) Provides FFE TX calibration store per byte per rank. */
8676         uint64_t ffe2                  : 3;  /**< [  8:  6](R/W) Provides FFE TX calibration store per byte per rank. */
8677         uint64_t ffe3                  : 3;  /**< [ 11:  9](R/W) Provides FFE TX calibration store per byte per rank. */
8678         uint64_t ffe4                  : 3;  /**< [ 14: 12](R/W) Provides FFE TX calibration store per byte per rank. */
8679         uint64_t ffe5                  : 3;  /**< [ 17: 15](R/W) Provides FFE TX calibration store per byte per rank. */
8680         uint64_t ffe6                  : 3;  /**< [ 20: 18](R/W) Provides FFE TX calibration store per byte per rank. */
8681         uint64_t ffe7                  : 3;  /**< [ 23: 21](R/W) Provides FFE TX calibration store per byte per rank. */
8682         uint64_t ffe8                  : 3;  /**< [ 26: 24](R/W) Provides FFE TX calibration store per byte per rank. */
8683         uint64_t ctle0                 : 2;  /**< [ 28: 27](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
8684         uint64_t ctle1                 : 2;  /**< [ 30: 29](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
8685         uint64_t ctle2                 : 2;  /**< [ 32: 31](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
8686         uint64_t ctle3                 : 2;  /**< [ 34: 33](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
8687         uint64_t ctle4                 : 2;  /**< [ 36: 35](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
8688         uint64_t ctle5                 : 2;  /**< [ 38: 37](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
8689         uint64_t ctle6                 : 2;  /**< [ 40: 39](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
8690         uint64_t ctle7                 : 2;  /**< [ 42: 41](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
8691         uint64_t ctle8                 : 2;  /**< [ 44: 43](R/W) This register will set the RX CTLE setting for both DQ and DQS per byte and per rank. */
8692         uint64_t reserved_45_63        : 19;
8693 #endif /* Word 0 - End */
8694     } s;
8695     /* struct bdk_lmcx_ffe_ctle_settings_s cn; */
8696 };
8697 typedef union bdk_lmcx_ffe_ctle_settings bdk_lmcx_ffe_ctle_settings_t;
8698 
8699 static inline uint64_t BDK_LMCX_FFE_CTLE_SETTINGS(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_FFE_CTLE_SETTINGS(unsigned long a)8700 static inline uint64_t BDK_LMCX_FFE_CTLE_SETTINGS(unsigned long a)
8701 {
8702     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
8703         return 0x87e0880002e0ll + 0x1000000ll * ((a) & 0x3);
8704     __bdk_csr_fatal("LMCX_FFE_CTLE_SETTINGS", 1, a, 0, 0, 0);
8705 }
8706 
8707 #define typedef_BDK_LMCX_FFE_CTLE_SETTINGS(a) bdk_lmcx_ffe_ctle_settings_t
8708 #define bustype_BDK_LMCX_FFE_CTLE_SETTINGS(a) BDK_CSR_TYPE_RSL
8709 #define basename_BDK_LMCX_FFE_CTLE_SETTINGS(a) "LMCX_FFE_CTLE_SETTINGS"
8710 #define device_bar_BDK_LMCX_FFE_CTLE_SETTINGS(a) 0x0 /* PF_BAR0 */
8711 #define busnum_BDK_LMCX_FFE_CTLE_SETTINGS(a) (a)
8712 #define arguments_BDK_LMCX_FFE_CTLE_SETTINGS(a) (a),-1,-1,-1
8713 
8714 /**
8715  * Register (RSL) lmc#_general_purpose0
8716  *
8717  * LMC General Purpose Register
8718  */
8719 union bdk_lmcx_general_purpose0
8720 {
8721     uint64_t u;
8722     struct bdk_lmcx_general_purpose0_s
8723     {
8724 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
8725         uint64_t data                  : 64; /**< [ 63:  0](R/W) General purpose data register.  See LMC()_PPR_CTL and LMC()_DBTRAIN_CTL[RW_TRAIN]. */
8726 #else /* Word 0 - Little Endian */
8727         uint64_t data                  : 64; /**< [ 63:  0](R/W) General purpose data register.  See LMC()_PPR_CTL and LMC()_DBTRAIN_CTL[RW_TRAIN]. */
8728 #endif /* Word 0 - End */
8729     } s;
8730     /* struct bdk_lmcx_general_purpose0_s cn; */
8731 };
8732 typedef union bdk_lmcx_general_purpose0 bdk_lmcx_general_purpose0_t;
8733 
8734 static inline uint64_t BDK_LMCX_GENERAL_PURPOSE0(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_GENERAL_PURPOSE0(unsigned long a)8735 static inline uint64_t BDK_LMCX_GENERAL_PURPOSE0(unsigned long a)
8736 {
8737     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
8738         return 0x87e088000340ll + 0x1000000ll * ((a) & 0x0);
8739     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
8740         return 0x87e088000340ll + 0x1000000ll * ((a) & 0x1);
8741     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
8742         return 0x87e088000340ll + 0x1000000ll * ((a) & 0x3);
8743     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
8744         return 0x87e088000340ll + 0x1000000ll * ((a) & 0x3);
8745     __bdk_csr_fatal("LMCX_GENERAL_PURPOSE0", 1, a, 0, 0, 0);
8746 }
8747 
8748 #define typedef_BDK_LMCX_GENERAL_PURPOSE0(a) bdk_lmcx_general_purpose0_t
8749 #define bustype_BDK_LMCX_GENERAL_PURPOSE0(a) BDK_CSR_TYPE_RSL
8750 #define basename_BDK_LMCX_GENERAL_PURPOSE0(a) "LMCX_GENERAL_PURPOSE0"
8751 #define device_bar_BDK_LMCX_GENERAL_PURPOSE0(a) 0x0 /* PF_BAR0 */
8752 #define busnum_BDK_LMCX_GENERAL_PURPOSE0(a) (a)
8753 #define arguments_BDK_LMCX_GENERAL_PURPOSE0(a) (a),-1,-1,-1
8754 
8755 /**
8756  * Register (RSL) lmc#_general_purpose1
8757  *
8758  * LMC General Purpose 1 Register
8759  */
8760 union bdk_lmcx_general_purpose1
8761 {
8762     uint64_t u;
8763     struct bdk_lmcx_general_purpose1_s
8764     {
8765 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
8766         uint64_t data                  : 64; /**< [ 63:  0](R/W) General purpose data register.  See LMC()_PPR_CTL and LMC()_DBTRAIN_CTL[RW_TRAIN]. */
8767 #else /* Word 0 - Little Endian */
8768         uint64_t data                  : 64; /**< [ 63:  0](R/W) General purpose data register.  See LMC()_PPR_CTL and LMC()_DBTRAIN_CTL[RW_TRAIN]. */
8769 #endif /* Word 0 - End */
8770     } s;
8771     /* struct bdk_lmcx_general_purpose1_s cn; */
8772 };
8773 typedef union bdk_lmcx_general_purpose1 bdk_lmcx_general_purpose1_t;
8774 
8775 static inline uint64_t BDK_LMCX_GENERAL_PURPOSE1(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_GENERAL_PURPOSE1(unsigned long a)8776 static inline uint64_t BDK_LMCX_GENERAL_PURPOSE1(unsigned long a)
8777 {
8778     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
8779         return 0x87e088000348ll + 0x1000000ll * ((a) & 0x0);
8780     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
8781         return 0x87e088000348ll + 0x1000000ll * ((a) & 0x1);
8782     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
8783         return 0x87e088000348ll + 0x1000000ll * ((a) & 0x3);
8784     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
8785         return 0x87e088000348ll + 0x1000000ll * ((a) & 0x3);
8786     __bdk_csr_fatal("LMCX_GENERAL_PURPOSE1", 1, a, 0, 0, 0);
8787 }
8788 
8789 #define typedef_BDK_LMCX_GENERAL_PURPOSE1(a) bdk_lmcx_general_purpose1_t
8790 #define bustype_BDK_LMCX_GENERAL_PURPOSE1(a) BDK_CSR_TYPE_RSL
8791 #define basename_BDK_LMCX_GENERAL_PURPOSE1(a) "LMCX_GENERAL_PURPOSE1"
8792 #define device_bar_BDK_LMCX_GENERAL_PURPOSE1(a) 0x0 /* PF_BAR0 */
8793 #define busnum_BDK_LMCX_GENERAL_PURPOSE1(a) (a)
8794 #define arguments_BDK_LMCX_GENERAL_PURPOSE1(a) (a),-1,-1,-1
8795 
8796 /**
8797  * Register (RSL) lmc#_general_purpose2
8798  *
8799  * LMC General Purpose 2 Register
8800  */
8801 union bdk_lmcx_general_purpose2
8802 {
8803     uint64_t u;
8804     struct bdk_lmcx_general_purpose2_s
8805     {
8806 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
8807         uint64_t reserved_16_63        : 48;
8808         uint64_t data                  : 16; /**< [ 15:  0](R/W) General purpose data register.  See LMC()_PPR_CTL and LMC()_DBTRAIN_CTL[RW_TRAIN]. */
8809 #else /* Word 0 - Little Endian */
8810         uint64_t data                  : 16; /**< [ 15:  0](R/W) General purpose data register.  See LMC()_PPR_CTL and LMC()_DBTRAIN_CTL[RW_TRAIN]. */
8811         uint64_t reserved_16_63        : 48;
8812 #endif /* Word 0 - End */
8813     } s;
8814     /* struct bdk_lmcx_general_purpose2_s cn; */
8815 };
8816 typedef union bdk_lmcx_general_purpose2 bdk_lmcx_general_purpose2_t;
8817 
8818 static inline uint64_t BDK_LMCX_GENERAL_PURPOSE2(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_GENERAL_PURPOSE2(unsigned long a)8819 static inline uint64_t BDK_LMCX_GENERAL_PURPOSE2(unsigned long a)
8820 {
8821     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
8822         return 0x87e088000350ll + 0x1000000ll * ((a) & 0x0);
8823     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
8824         return 0x87e088000350ll + 0x1000000ll * ((a) & 0x1);
8825     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
8826         return 0x87e088000350ll + 0x1000000ll * ((a) & 0x3);
8827     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
8828         return 0x87e088000350ll + 0x1000000ll * ((a) & 0x3);
8829     __bdk_csr_fatal("LMCX_GENERAL_PURPOSE2", 1, a, 0, 0, 0);
8830 }
8831 
8832 #define typedef_BDK_LMCX_GENERAL_PURPOSE2(a) bdk_lmcx_general_purpose2_t
8833 #define bustype_BDK_LMCX_GENERAL_PURPOSE2(a) BDK_CSR_TYPE_RSL
8834 #define basename_BDK_LMCX_GENERAL_PURPOSE2(a) "LMCX_GENERAL_PURPOSE2"
8835 #define device_bar_BDK_LMCX_GENERAL_PURPOSE2(a) 0x0 /* PF_BAR0 */
8836 #define busnum_BDK_LMCX_GENERAL_PURPOSE2(a) (a)
8837 #define arguments_BDK_LMCX_GENERAL_PURPOSE2(a) (a),-1,-1,-1
8838 
8839 /**
8840  * Register (RSL) lmc#_ifb_cnt
8841  *
8842  * LMC IFB Performance Counter Register
8843  */
8844 union bdk_lmcx_ifb_cnt
8845 {
8846     uint64_t u;
8847     struct bdk_lmcx_ifb_cnt_s
8848     {
8849 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
8850         uint64_t ifbcnt                : 64; /**< [ 63:  0](RO/H) Performance counter. 64-bit counter that increments every CK cycle that there is something
8851                                                                  in the in-flight buffer. */
8852 #else /* Word 0 - Little Endian */
8853         uint64_t ifbcnt                : 64; /**< [ 63:  0](RO/H) Performance counter. 64-bit counter that increments every CK cycle that there is something
8854                                                                  in the in-flight buffer. */
8855 #endif /* Word 0 - End */
8856     } s;
8857     /* struct bdk_lmcx_ifb_cnt_s cn; */
8858 };
8859 typedef union bdk_lmcx_ifb_cnt bdk_lmcx_ifb_cnt_t;
8860 
8861 static inline uint64_t BDK_LMCX_IFB_CNT(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_IFB_CNT(unsigned long a)8862 static inline uint64_t BDK_LMCX_IFB_CNT(unsigned long a)
8863 {
8864     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
8865         return 0x87e0880001d0ll + 0x1000000ll * ((a) & 0x0);
8866     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
8867         return 0x87e0880001d0ll + 0x1000000ll * ((a) & 0x1);
8868     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
8869         return 0x87e0880001d0ll + 0x1000000ll * ((a) & 0x3);
8870     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
8871         return 0x87e0880001d0ll + 0x1000000ll * ((a) & 0x3);
8872     __bdk_csr_fatal("LMCX_IFB_CNT", 1, a, 0, 0, 0);
8873 }
8874 
8875 #define typedef_BDK_LMCX_IFB_CNT(a) bdk_lmcx_ifb_cnt_t
8876 #define bustype_BDK_LMCX_IFB_CNT(a) BDK_CSR_TYPE_RSL
8877 #define basename_BDK_LMCX_IFB_CNT(a) "LMCX_IFB_CNT"
8878 #define device_bar_BDK_LMCX_IFB_CNT(a) 0x0 /* PF_BAR0 */
8879 #define busnum_BDK_LMCX_IFB_CNT(a) (a)
8880 #define arguments_BDK_LMCX_IFB_CNT(a) (a),-1,-1,-1
8881 
8882 /**
8883  * Register (RSL) lmc#_int
8884  *
8885  * LMC Interrupt Register
8886  * This register contains the different interrupt-summary bits of the LMC.
8887  */
8888 union bdk_lmcx_int
8889 {
8890     uint64_t u;
8891     struct bdk_lmcx_int_s
8892     {
8893 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
8894         uint64_t reserved_15_63        : 49;
8895         uint64_t ddr_alert_sat         : 1;  /**< [ 14: 14](R/W1C/H) Error threshold set by LMC()_RETRY_CONFIG[MAX_ERRORS] is reached. */
8896         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1C/H) Reserved. */
8897         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1C/H) Reserved. */
8898         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1C/H) DDR RAM error alert interrupt.
8899                                                                  Asserts whenever the corresponding DDR*_ERROR_ALERT_L pin (e.g. DDR4 ALERT_n)
8900                                                                  asserts.
8901 
8902                                                                  If LMC is auto-retrying address parity and/or write CRC errors, i.e. if
8903                                                                  LMC()_RETRY_CONFIG[RETRY_ENABLE,AUTO_ERROR_CONTINUE]=1,1
8904                                                                  (LMC()_MODEREG_PARAMS3[CA_PAR_PERS] should also be set - the DRAM should
8905                                                                  be in persistent parity error mode), then the DDR_ERR interrupt routine
8906                                                                  should:
8907                                                                    \<pre\>
8908                                                                    X=LMC()_RETRY_STATUS[ERROR_COUNT]
8909                                                                    do {
8910                                                                    Y = X
8911                                                                    Wait approximately 100ns
8912                                                                    Write a one to [DDR_ERR] to clear it (if set)
8913                                                                    X = LMC()_RETRY_STATUS[ERROR_COUNT]
8914                                                                    } while (X != Y);
8915                                                                    Write LMC()_RETRY_STATUS[CLEAR_ERROR_COUNT]=1 (to clear
8916                                                                    LMC()_RETRY_STATUS[ERROR_COUNT])
8917                                                                    \</pre\>
8918 
8919                                                                  If X \< LMC()_RETRY_CONFIG[MAX_ERRORS] after this sequence, assume that
8920                                                                  the hardware successfully corrected the error - software may
8921                                                                  choose to count the number of these errors. Else consider the error
8922                                                                  to be uncorrected and possibly fatal.
8923 
8924                                                                  Otherwise, if LMC is not auto-retrying, a [DDR_ERR] error may always be
8925                                                                  considered fatal. */
8926         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1C/H) Reserved. */
8927         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1C/H) Reserved. */
8928         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1C/H) Double-bit error detected on a DRAM read. Generally an indication of DRAM
8929                                                                  corruption and may be considered fatal.
8930 
8931                                                                  In 64b mode:
8932                                                                  _ \<5\> corresponds to DQ[63:0]_c0_p0.
8933                                                                  _ \<6\> corresponds to DQ[63:0]_c0_p1.
8934                                                                  _ \<7\> corresponds to DQ[63:0]_c1_p0.
8935                                                                  _ \<8\> corresponds to DQ[63:0]_c1_p1.
8936                                                                  _ where _cC_pP denotes cycle C and phase P.
8937 
8938                                                                  In 32b mode, each bit corresponds to 2 phases:
8939                                                                  _ \<5\> corresponds to DQ[31:0]_c0_p1/0.
8940                                                                  _ \<6\> corresponds to DQ[31:0]_c1_p1/0.
8941                                                                  _ \<7\> corresponds to DQ[31:0]_c2_p1/0.
8942                                                                  _ \<8\> corresponds to DQ[31:0]_c3_p1/0. */
8943         uint64_t reserved_1_4          : 4;
8944         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1C/H) When set, indicates an access to nonexistent memory. Normally only NXM writes,
8945                                                                  but LMC()_EXT_CONFIG[L2C_NXM_RD,L2C_NXM_WR] actually determine whether NXM reads and
8946                                                                  writes (respectively) participate in [NXM_WR_ERR]. NXM writes are generally an indication
8947                                                                  of failure. When [LMC()_NXM_FADR] is set, LMC()_NXM_FADR indicates the NXM address. */
8948 #else /* Word 0 - Little Endian */
8949         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1C/H) When set, indicates an access to nonexistent memory. Normally only NXM writes,
8950                                                                  but LMC()_EXT_CONFIG[L2C_NXM_RD,L2C_NXM_WR] actually determine whether NXM reads and
8951                                                                  writes (respectively) participate in [NXM_WR_ERR]. NXM writes are generally an indication
8952                                                                  of failure. When [LMC()_NXM_FADR] is set, LMC()_NXM_FADR indicates the NXM address. */
8953         uint64_t reserved_1_4          : 4;
8954         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1C/H) Double-bit error detected on a DRAM read. Generally an indication of DRAM
8955                                                                  corruption and may be considered fatal.
8956 
8957                                                                  In 64b mode:
8958                                                                  _ \<5\> corresponds to DQ[63:0]_c0_p0.
8959                                                                  _ \<6\> corresponds to DQ[63:0]_c0_p1.
8960                                                                  _ \<7\> corresponds to DQ[63:0]_c1_p0.
8961                                                                  _ \<8\> corresponds to DQ[63:0]_c1_p1.
8962                                                                  _ where _cC_pP denotes cycle C and phase P.
8963 
8964                                                                  In 32b mode, each bit corresponds to 2 phases:
8965                                                                  _ \<5\> corresponds to DQ[31:0]_c0_p1/0.
8966                                                                  _ \<6\> corresponds to DQ[31:0]_c1_p1/0.
8967                                                                  _ \<7\> corresponds to DQ[31:0]_c2_p1/0.
8968                                                                  _ \<8\> corresponds to DQ[31:0]_c3_p1/0. */
8969         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1C/H) Reserved. */
8970         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1C/H) Reserved. */
8971         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1C/H) DDR RAM error alert interrupt.
8972                                                                  Asserts whenever the corresponding DDR*_ERROR_ALERT_L pin (e.g. DDR4 ALERT_n)
8973                                                                  asserts.
8974 
8975                                                                  If LMC is auto-retrying address parity and/or write CRC errors, i.e. if
8976                                                                  LMC()_RETRY_CONFIG[RETRY_ENABLE,AUTO_ERROR_CONTINUE]=1,1
8977                                                                  (LMC()_MODEREG_PARAMS3[CA_PAR_PERS] should also be set - the DRAM should
8978                                                                  be in persistent parity error mode), then the DDR_ERR interrupt routine
8979                                                                  should:
8980                                                                    \<pre\>
8981                                                                    X=LMC()_RETRY_STATUS[ERROR_COUNT]
8982                                                                    do {
8983                                                                    Y = X
8984                                                                    Wait approximately 100ns
8985                                                                    Write a one to [DDR_ERR] to clear it (if set)
8986                                                                    X = LMC()_RETRY_STATUS[ERROR_COUNT]
8987                                                                    } while (X != Y);
8988                                                                    Write LMC()_RETRY_STATUS[CLEAR_ERROR_COUNT]=1 (to clear
8989                                                                    LMC()_RETRY_STATUS[ERROR_COUNT])
8990                                                                    \</pre\>
8991 
8992                                                                  If X \< LMC()_RETRY_CONFIG[MAX_ERRORS] after this sequence, assume that
8993                                                                  the hardware successfully corrected the error - software may
8994                                                                  choose to count the number of these errors. Else consider the error
8995                                                                  to be uncorrected and possibly fatal.
8996 
8997                                                                  Otherwise, if LMC is not auto-retrying, a [DDR_ERR] error may always be
8998                                                                  considered fatal. */
8999         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1C/H) Reserved. */
9000         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1C/H) Reserved. */
9001         uint64_t ddr_alert_sat         : 1;  /**< [ 14: 14](R/W1C/H) Error threshold set by LMC()_RETRY_CONFIG[MAX_ERRORS] is reached. */
9002         uint64_t reserved_15_63        : 49;
9003 #endif /* Word 0 - End */
9004     } s;
9005     struct bdk_lmcx_int_cn8
9006     {
9007 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
9008         uint64_t reserved_14_63        : 50;
9009         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1C/H) Reserved. */
9010         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1C/H) Reserved. */
9011         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1C/H) DDR RAM error alert interrupt.
9012                                                                  Asserts whenever the corresponding DDR*_ERROR_ALERT_L pin (e.g. DDR4 ALERT_n)
9013                                                                  asserts.
9014 
9015                                                                  If LMC is auto-retrying address parity and/or write CRC errors, i.e. if
9016                                                                  LMC()_RETRY_CONFIG[RETRY_ENABLE,AUTO_ERROR_CONTINUE]=1,1
9017                                                                  (LMC()_MODEREG_PARAMS3[CA_PAR_PERS] should also be set - the DRAM should
9018                                                                  be in persistent parity error mode), then the DDR_ERR interrupt routine
9019                                                                  should:
9020                                                                    \<pre\>
9021                                                                    X=LMC()_RETRY_STATUS[ERROR_COUNT]
9022                                                                    do {
9023                                                                    Y = X
9024                                                                    Wait approximately 100ns
9025                                                                    Write a one to [DDR_ERR] to clear it (if set)
9026                                                                    X = LMC()_RETRY_STATUS[ERROR_COUNT]
9027                                                                    } while (X != Y);
9028                                                                    Write LMC()_RETRY_STATUS[CLEAR_ERROR_COUNT]=1 (to clear
9029                                                                    LMC()_RETRY_STATUS[ERROR_COUNT])
9030                                                                    \</pre\>
9031 
9032                                                                  If X \< LMC()_RETRY_CONFIG[MAX_ERRORS] after this sequence, assume that
9033                                                                  the hardware successfully corrected the error - software may
9034                                                                  choose to count the number of these errors. Else consider the error
9035                                                                  to be uncorrected and possibly fatal.
9036 
9037                                                                  Otherwise, if LMC is not auto-retrying, a [DDR_ERR] error may always be
9038                                                                  considered fatal. */
9039         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1C/H) Reserved. */
9040         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1C/H) Reserved. */
9041         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1C/H) Double-bit error detected on a DRAM read. Generally an indication of DRAM
9042                                                                  corruption and may be considered fatal.
9043 
9044                                                                  In 64b mode:
9045                                                                  _ \<5\> corresponds to DQ[63:0]_c0_p0.
9046                                                                  _ \<6\> corresponds to DQ[63:0]_c0_p1.
9047                                                                  _ \<7\> corresponds to DQ[63:0]_c1_p0.
9048                                                                  _ \<8\> corresponds to DQ[63:0]_c1_p1.
9049                                                                  _ where _cC_pP denotes cycle C and phase P.
9050 
9051                                                                  In 32b mode, each bit corresponds to 2 phases:
9052                                                                  _ \<5\> corresponds to DQ[31:0]_c0_p1/0.
9053                                                                  _ \<6\> corresponds to DQ[31:0]_c1_p1/0.
9054                                                                  _ \<7\> corresponds to DQ[31:0]_c2_p1/0.
9055                                                                  _ \<8\> corresponds to DQ[31:0]_c3_p1/0. */
9056         uint64_t sec_err               : 4;  /**< [  4:  1](R/W1C/H) Single-bit error detected on a DRAM read.
9057                                                                  When any of [SEC_ERR\<3:0\>] are set, hardware corrected the error before using the value,
9058                                                                  but did not correct any stored value. When any of [SEC_ERR\<3:0\>] are set, software should
9059                                                                  scrub the memory location whose address is in LMC()_SCRAMBLED_FADR before clearing the
9060                                                                  [SEC_ERR\<3:0\>] bits. Otherwise, hardware may encounter the error again the next time the
9061                                                                  same memory location is referenced. We recommend that the entire 128-byte cache block be
9062                                                                  scrubbed via load-exclusive/store-release instructions, but other methods are possible.
9063                                                                  Software may also choose to count the number of these single-bit errors.
9064 
9065                                                                  In 64b mode:
9066                                                                  _ \<1\> corresponds to DQ[63:0]_c0_p0.
9067                                                                  _ \<2\> corresponds to DQ[63:0]_c0_p1.
9068                                                                  _ \<3\> corresponds to DQ[63:0]_c1_p0.
9069                                                                  _ \<4\> corresponds to DQ[63:0]_c1_p1.
9070                                                                  _ where _cC_pP denotes cycle C and phase P.
9071 
9072                                                                  In 32b mode, each bit corresponds to 2 phases:
9073                                                                  _ \<1\> corresponds to DQ[31:0]_c0_p1/0.
9074                                                                  _ \<2\> corresponds to DQ[31:0]_c1_p1/0.
9075                                                                  _ \<3\> corresponds to DQ[31:0]_c2_p1/0.
9076                                                                  _ \<4\> corresponds to DQ[31:0]_c3_p1/0. */
9077         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1C/H) When set, indicates an access to nonexistent memory. Normally only NXM writes,
9078                                                                  but LMC()_EXT_CONFIG[L2C_NXM_RD,L2C_NXM_WR] actually determine whether NXM reads and
9079                                                                  writes (respectively) participate in [NXM_WR_ERR]. NXM writes are generally an indication
9080                                                                  of failure. When [LMC()_NXM_FADR] is set, LMC()_NXM_FADR indicates the NXM address. */
9081 #else /* Word 0 - Little Endian */
9082         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1C/H) When set, indicates an access to nonexistent memory. Normally only NXM writes,
9083                                                                  but LMC()_EXT_CONFIG[L2C_NXM_RD,L2C_NXM_WR] actually determine whether NXM reads and
9084                                                                  writes (respectively) participate in [NXM_WR_ERR]. NXM writes are generally an indication
9085                                                                  of failure. When [LMC()_NXM_FADR] is set, LMC()_NXM_FADR indicates the NXM address. */
9086         uint64_t sec_err               : 4;  /**< [  4:  1](R/W1C/H) Single-bit error detected on a DRAM read.
9087                                                                  When any of [SEC_ERR\<3:0\>] are set, hardware corrected the error before using the value,
9088                                                                  but did not correct any stored value. When any of [SEC_ERR\<3:0\>] are set, software should
9089                                                                  scrub the memory location whose address is in LMC()_SCRAMBLED_FADR before clearing the
9090                                                                  [SEC_ERR\<3:0\>] bits. Otherwise, hardware may encounter the error again the next time the
9091                                                                  same memory location is referenced. We recommend that the entire 128-byte cache block be
9092                                                                  scrubbed via load-exclusive/store-release instructions, but other methods are possible.
9093                                                                  Software may also choose to count the number of these single-bit errors.
9094 
9095                                                                  In 64b mode:
9096                                                                  _ \<1\> corresponds to DQ[63:0]_c0_p0.
9097                                                                  _ \<2\> corresponds to DQ[63:0]_c0_p1.
9098                                                                  _ \<3\> corresponds to DQ[63:0]_c1_p0.
9099                                                                  _ \<4\> corresponds to DQ[63:0]_c1_p1.
9100                                                                  _ where _cC_pP denotes cycle C and phase P.
9101 
9102                                                                  In 32b mode, each bit corresponds to 2 phases:
9103                                                                  _ \<1\> corresponds to DQ[31:0]_c0_p1/0.
9104                                                                  _ \<2\> corresponds to DQ[31:0]_c1_p1/0.
9105                                                                  _ \<3\> corresponds to DQ[31:0]_c2_p1/0.
9106                                                                  _ \<4\> corresponds to DQ[31:0]_c3_p1/0. */
9107         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1C/H) Double-bit error detected on a DRAM read. Generally an indication of DRAM
9108                                                                  corruption and may be considered fatal.
9109 
9110                                                                  In 64b mode:
9111                                                                  _ \<5\> corresponds to DQ[63:0]_c0_p0.
9112                                                                  _ \<6\> corresponds to DQ[63:0]_c0_p1.
9113                                                                  _ \<7\> corresponds to DQ[63:0]_c1_p0.
9114                                                                  _ \<8\> corresponds to DQ[63:0]_c1_p1.
9115                                                                  _ where _cC_pP denotes cycle C and phase P.
9116 
9117                                                                  In 32b mode, each bit corresponds to 2 phases:
9118                                                                  _ \<5\> corresponds to DQ[31:0]_c0_p1/0.
9119                                                                  _ \<6\> corresponds to DQ[31:0]_c1_p1/0.
9120                                                                  _ \<7\> corresponds to DQ[31:0]_c2_p1/0.
9121                                                                  _ \<8\> corresponds to DQ[31:0]_c3_p1/0. */
9122         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1C/H) Reserved. */
9123         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1C/H) Reserved. */
9124         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1C/H) DDR RAM error alert interrupt.
9125                                                                  Asserts whenever the corresponding DDR*_ERROR_ALERT_L pin (e.g. DDR4 ALERT_n)
9126                                                                  asserts.
9127 
9128                                                                  If LMC is auto-retrying address parity and/or write CRC errors, i.e. if
9129                                                                  LMC()_RETRY_CONFIG[RETRY_ENABLE,AUTO_ERROR_CONTINUE]=1,1
9130                                                                  (LMC()_MODEREG_PARAMS3[CA_PAR_PERS] should also be set - the DRAM should
9131                                                                  be in persistent parity error mode), then the DDR_ERR interrupt routine
9132                                                                  should:
9133                                                                    \<pre\>
9134                                                                    X=LMC()_RETRY_STATUS[ERROR_COUNT]
9135                                                                    do {
9136                                                                    Y = X
9137                                                                    Wait approximately 100ns
9138                                                                    Write a one to [DDR_ERR] to clear it (if set)
9139                                                                    X = LMC()_RETRY_STATUS[ERROR_COUNT]
9140                                                                    } while (X != Y);
9141                                                                    Write LMC()_RETRY_STATUS[CLEAR_ERROR_COUNT]=1 (to clear
9142                                                                    LMC()_RETRY_STATUS[ERROR_COUNT])
9143                                                                    \</pre\>
9144 
9145                                                                  If X \< LMC()_RETRY_CONFIG[MAX_ERRORS] after this sequence, assume that
9146                                                                  the hardware successfully corrected the error - software may
9147                                                                  choose to count the number of these errors. Else consider the error
9148                                                                  to be uncorrected and possibly fatal.
9149 
9150                                                                  Otherwise, if LMC is not auto-retrying, a [DDR_ERR] error may always be
9151                                                                  considered fatal. */
9152         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1C/H) Reserved. */
9153         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1C/H) Reserved. */
9154         uint64_t reserved_14_63        : 50;
9155 #endif /* Word 0 - End */
9156     } cn8;
9157     struct bdk_lmcx_int_cn9
9158     {
9159 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
9160         uint64_t reserved_15_63        : 49;
9161         uint64_t ddr_alert_sat         : 1;  /**< [ 14: 14](R/W1C/H) Error threshold set by LMC()_RETRY_CONFIG[MAX_ERRORS] is reached. */
9162         uint64_t reserved_12_13        : 2;
9163         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1C/H) DDR RAM error alert interrupt. */
9164         uint64_t reserved_3_10         : 8;
9165         uint64_t ref_pend_max1         : 1;  /**< [  2:  2](R/W1C/H) Indicates that the number of pending refreshes has reached 7.
9166                                                                  Software needs to clear the interrupt before a new max can be detected.
9167                                                                  This is only useful when LMC()_EXT_CONFIG[REF_BLOCK] mode is engaged. */
9168         uint64_t ref_pend_max0         : 1;  /**< [  1:  1](R/W1C/H) Indicates that the number of pending refreshes has reached 7.
9169                                                                  Software needs to clear the interrupt before a new max can be detected.
9170                                                                  This is only useful when LMC()_EXT_CONFIG[REF_BLOCK] mode is engaged. */
9171         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1C/H) When set, indicates an access to nonexistent memory. Normally only NXM writes,
9172                                                                  but LMC()_EXT_CONFIG[L2C_NXM_RD], LMC()_EXT_CONFIG[L2C_NXM_WR] actually
9173                                                                  determine whether NXM reads and writes (respectively) participate in
9174                                                                  [NXM_WR_ERR]. NXM writes are generally an indication of failure. LMC()_NXM_FADR
9175                                                                  indicates the NXM address. */
9176 #else /* Word 0 - Little Endian */
9177         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1C/H) When set, indicates an access to nonexistent memory. Normally only NXM writes,
9178                                                                  but LMC()_EXT_CONFIG[L2C_NXM_RD], LMC()_EXT_CONFIG[L2C_NXM_WR] actually
9179                                                                  determine whether NXM reads and writes (respectively) participate in
9180                                                                  [NXM_WR_ERR]. NXM writes are generally an indication of failure. LMC()_NXM_FADR
9181                                                                  indicates the NXM address. */
9182         uint64_t ref_pend_max0         : 1;  /**< [  1:  1](R/W1C/H) Indicates that the number of pending refreshes has reached 7.
9183                                                                  Software needs to clear the interrupt before a new max can be detected.
9184                                                                  This is only useful when LMC()_EXT_CONFIG[REF_BLOCK] mode is engaged. */
9185         uint64_t ref_pend_max1         : 1;  /**< [  2:  2](R/W1C/H) Indicates that the number of pending refreshes has reached 7.
9186                                                                  Software needs to clear the interrupt before a new max can be detected.
9187                                                                  This is only useful when LMC()_EXT_CONFIG[REF_BLOCK] mode is engaged. */
9188         uint64_t reserved_3_10         : 8;
9189         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1C/H) DDR RAM error alert interrupt. */
9190         uint64_t reserved_12_13        : 2;
9191         uint64_t ddr_alert_sat         : 1;  /**< [ 14: 14](R/W1C/H) Error threshold set by LMC()_RETRY_CONFIG[MAX_ERRORS] is reached. */
9192         uint64_t reserved_15_63        : 49;
9193 #endif /* Word 0 - End */
9194     } cn9;
9195 };
9196 typedef union bdk_lmcx_int bdk_lmcx_int_t;
9197 
9198 static inline uint64_t BDK_LMCX_INT(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_INT(unsigned long a)9199 static inline uint64_t BDK_LMCX_INT(unsigned long a)
9200 {
9201     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
9202         return 0x87e0880001f0ll + 0x1000000ll * ((a) & 0x0);
9203     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
9204         return 0x87e0880001f0ll + 0x1000000ll * ((a) & 0x1);
9205     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
9206         return 0x87e0880001f0ll + 0x1000000ll * ((a) & 0x3);
9207     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
9208         return 0x87e0880001f0ll + 0x1000000ll * ((a) & 0x3);
9209     __bdk_csr_fatal("LMCX_INT", 1, a, 0, 0, 0);
9210 }
9211 
9212 #define typedef_BDK_LMCX_INT(a) bdk_lmcx_int_t
9213 #define bustype_BDK_LMCX_INT(a) BDK_CSR_TYPE_RSL
9214 #define basename_BDK_LMCX_INT(a) "LMCX_INT"
9215 #define device_bar_BDK_LMCX_INT(a) 0x0 /* PF_BAR0 */
9216 #define busnum_BDK_LMCX_INT(a) (a)
9217 #define arguments_BDK_LMCX_INT(a) (a),-1,-1,-1
9218 
9219 /**
9220  * Register (RSL) lmc#_int_en
9221  *
9222  * INTERNAL: LMC Legacy Interrupt Enable Register
9223  *
9224  * Internal:
9225  * Deprecated and unused CSR.
9226  */
9227 union bdk_lmcx_int_en
9228 {
9229     uint64_t u;
9230     struct bdk_lmcx_int_en_s
9231     {
9232 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
9233         uint64_t reserved_6_63         : 58;
9234         uint64_t ddr_error_alert_ena   : 1;  /**< [  5:  5](R/W) DDR4 error alert interrupt enable bit. */
9235         uint64_t dlcram_ded_ena        : 1;  /**< [  4:  4](R/W) DLC RAM ECC double error detect (DED) interrupt enable bit. */
9236         uint64_t dlcram_sec_ena        : 1;  /**< [  3:  3](R/W) DLC RAM ECC single error correct (SEC) interrupt enable bit. */
9237         uint64_t intr_ded_ena          : 1;  /**< [  2:  2](R/W) ECC double error detect (DED) interrupt enable bit. When set, the memory controller raises
9238                                                                  a processor interrupt on detecting an uncorrectable double-bit ECC error. */
9239         uint64_t intr_sec_ena          : 1;  /**< [  1:  1](R/W) ECC single error correct (SEC) interrupt enable bit. When set, the memory controller
9240                                                                  raises a processor interrupt on detecting a correctable single-bit ECC error. */
9241         uint64_t intr_nxm_wr_ena       : 1;  /**< [  0:  0](R/W) Nonwrite error interrupt enable bit. When set, the memory controller raises a processor
9242                                                                  interrupt on detecting an nonexistent memory write. */
9243 #else /* Word 0 - Little Endian */
9244         uint64_t intr_nxm_wr_ena       : 1;  /**< [  0:  0](R/W) Nonwrite error interrupt enable bit. When set, the memory controller raises a processor
9245                                                                  interrupt on detecting an nonexistent memory write. */
9246         uint64_t intr_sec_ena          : 1;  /**< [  1:  1](R/W) ECC single error correct (SEC) interrupt enable bit. When set, the memory controller
9247                                                                  raises a processor interrupt on detecting a correctable single-bit ECC error. */
9248         uint64_t intr_ded_ena          : 1;  /**< [  2:  2](R/W) ECC double error detect (DED) interrupt enable bit. When set, the memory controller raises
9249                                                                  a processor interrupt on detecting an uncorrectable double-bit ECC error. */
9250         uint64_t dlcram_sec_ena        : 1;  /**< [  3:  3](R/W) DLC RAM ECC single error correct (SEC) interrupt enable bit. */
9251         uint64_t dlcram_ded_ena        : 1;  /**< [  4:  4](R/W) DLC RAM ECC double error detect (DED) interrupt enable bit. */
9252         uint64_t ddr_error_alert_ena   : 1;  /**< [  5:  5](R/W) DDR4 error alert interrupt enable bit. */
9253         uint64_t reserved_6_63         : 58;
9254 #endif /* Word 0 - End */
9255     } s;
9256     /* struct bdk_lmcx_int_en_s cn; */
9257 };
9258 typedef union bdk_lmcx_int_en bdk_lmcx_int_en_t;
9259 
9260 static inline uint64_t BDK_LMCX_INT_EN(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_INT_EN(unsigned long a)9261 static inline uint64_t BDK_LMCX_INT_EN(unsigned long a)
9262 {
9263     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
9264         return 0x87e0880001e8ll + 0x1000000ll * ((a) & 0x3);
9265     __bdk_csr_fatal("LMCX_INT_EN", 1, a, 0, 0, 0);
9266 }
9267 
9268 #define typedef_BDK_LMCX_INT_EN(a) bdk_lmcx_int_en_t
9269 #define bustype_BDK_LMCX_INT_EN(a) BDK_CSR_TYPE_RSL
9270 #define basename_BDK_LMCX_INT_EN(a) "LMCX_INT_EN"
9271 #define device_bar_BDK_LMCX_INT_EN(a) 0x0 /* PF_BAR0 */
9272 #define busnum_BDK_LMCX_INT_EN(a) (a)
9273 #define arguments_BDK_LMCX_INT_EN(a) (a),-1,-1,-1
9274 
9275 /**
9276  * Register (RSL) lmc#_int_ena_w1c
9277  *
9278  * LMC Interrupt Enable Clear Register
9279  * This register clears interrupt enable bits.
9280  */
9281 union bdk_lmcx_int_ena_w1c
9282 {
9283     uint64_t u;
9284     struct bdk_lmcx_int_ena_w1c_s
9285     {
9286 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
9287         uint64_t reserved_15_63        : 49;
9288         uint64_t ddr_alert_sat         : 1;  /**< [ 14: 14](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[DDR_ALERT_SAT]. */
9289         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[MACRAM_DED_ERR]. */
9290         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[MACRAM_SEC_ERR]. */
9291         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DDR_ERR]. */
9292         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DLCRAM_DED_ERR]. */
9293         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
9294         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DED_ERR]. */
9295         uint64_t reserved_1_4          : 4;
9296         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[NXM_WR_ERR]. */
9297 #else /* Word 0 - Little Endian */
9298         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[NXM_WR_ERR]. */
9299         uint64_t reserved_1_4          : 4;
9300         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DED_ERR]. */
9301         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
9302         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DLCRAM_DED_ERR]. */
9303         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DDR_ERR]. */
9304         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[MACRAM_SEC_ERR]. */
9305         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[MACRAM_DED_ERR]. */
9306         uint64_t ddr_alert_sat         : 1;  /**< [ 14: 14](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[DDR_ALERT_SAT]. */
9307         uint64_t reserved_15_63        : 49;
9308 #endif /* Word 0 - End */
9309     } s;
9310     struct bdk_lmcx_int_ena_w1c_cn9
9311     {
9312 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
9313         uint64_t reserved_15_63        : 49;
9314         uint64_t ddr_alert_sat         : 1;  /**< [ 14: 14](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[DDR_ALERT_SAT]. */
9315         uint64_t reserved_12_13        : 2;
9316         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[DDR_ERR]. */
9317         uint64_t reserved_3_10         : 8;
9318         uint64_t ref_pend_max1         : 1;  /**< [  2:  2](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[REF_PEND_MAX1]. */
9319         uint64_t ref_pend_max0         : 1;  /**< [  1:  1](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[REF_PEND_MAX0]. */
9320         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[NXM_WR_ERR]. */
9321 #else /* Word 0 - Little Endian */
9322         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[NXM_WR_ERR]. */
9323         uint64_t ref_pend_max0         : 1;  /**< [  1:  1](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[REF_PEND_MAX0]. */
9324         uint64_t ref_pend_max1         : 1;  /**< [  2:  2](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[REF_PEND_MAX1]. */
9325         uint64_t reserved_3_10         : 8;
9326         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[DDR_ERR]. */
9327         uint64_t reserved_12_13        : 2;
9328         uint64_t ddr_alert_sat         : 1;  /**< [ 14: 14](R/W1C/H) Reads or clears enable for LMC(0..2)_INT[DDR_ALERT_SAT]. */
9329         uint64_t reserved_15_63        : 49;
9330 #endif /* Word 0 - End */
9331     } cn9;
9332     struct bdk_lmcx_int_ena_w1c_cn81xx
9333     {
9334 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
9335         uint64_t reserved_14_63        : 50;
9336         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1C/H) Reads or clears enable for LMC(0)_INT[MACRAM_DED_ERR]. */
9337         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1C/H) Reads or clears enable for LMC(0)_INT[MACRAM_SEC_ERR]. */
9338         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1C/H) Reads or clears enable for LMC(0)_INT[DDR_ERR]. */
9339         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1C/H) Reads or clears enable for LMC(0)_INT[DLCRAM_DED_ERR]. */
9340         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1C/H) Reads or clears enable for LMC(0)_INT[DLCRAM_SEC_ERR]. */
9341         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1C/H) Reads or clears enable for LMC(0)_INT[DED_ERR]. */
9342         uint64_t sec_err               : 4;  /**< [  4:  1](R/W1C/H) Reads or clears enable for LMC(0)_INT[SEC_ERR]. */
9343         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1C/H) Reads or clears enable for LMC(0)_INT[NXM_WR_ERR]. */
9344 #else /* Word 0 - Little Endian */
9345         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1C/H) Reads or clears enable for LMC(0)_INT[NXM_WR_ERR]. */
9346         uint64_t sec_err               : 4;  /**< [  4:  1](R/W1C/H) Reads or clears enable for LMC(0)_INT[SEC_ERR]. */
9347         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1C/H) Reads or clears enable for LMC(0)_INT[DED_ERR]. */
9348         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1C/H) Reads or clears enable for LMC(0)_INT[DLCRAM_SEC_ERR]. */
9349         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1C/H) Reads or clears enable for LMC(0)_INT[DLCRAM_DED_ERR]. */
9350         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1C/H) Reads or clears enable for LMC(0)_INT[DDR_ERR]. */
9351         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1C/H) Reads or clears enable for LMC(0)_INT[MACRAM_SEC_ERR]. */
9352         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1C/H) Reads or clears enable for LMC(0)_INT[MACRAM_DED_ERR]. */
9353         uint64_t reserved_14_63        : 50;
9354 #endif /* Word 0 - End */
9355     } cn81xx;
9356     struct bdk_lmcx_int_ena_w1c_cn88xx
9357     {
9358 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
9359         uint64_t reserved_14_63        : 50;
9360         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[MACRAM_DED_ERR]. */
9361         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[MACRAM_SEC_ERR]. */
9362         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DDR_ERR]. */
9363         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DLCRAM_DED_ERR]. */
9364         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
9365         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DED_ERR]. */
9366         uint64_t sec_err               : 4;  /**< [  4:  1](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[SEC_ERR]. */
9367         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[NXM_WR_ERR]. */
9368 #else /* Word 0 - Little Endian */
9369         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[NXM_WR_ERR]. */
9370         uint64_t sec_err               : 4;  /**< [  4:  1](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[SEC_ERR]. */
9371         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DED_ERR]. */
9372         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
9373         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DLCRAM_DED_ERR]. */
9374         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[DDR_ERR]. */
9375         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[MACRAM_SEC_ERR]. */
9376         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1C/H) Reads or clears enable for LMC(0..3)_INT[MACRAM_DED_ERR]. */
9377         uint64_t reserved_14_63        : 50;
9378 #endif /* Word 0 - End */
9379     } cn88xx;
9380     struct bdk_lmcx_int_ena_w1c_cn83xx
9381     {
9382 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
9383         uint64_t reserved_14_63        : 50;
9384         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[MACRAM_DED_ERR]. */
9385         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[MACRAM_SEC_ERR]. */
9386         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[DDR_ERR]. */
9387         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[DLCRAM_DED_ERR]. */
9388         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[DLCRAM_SEC_ERR]. */
9389         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[DED_ERR]. */
9390         uint64_t sec_err               : 4;  /**< [  4:  1](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[SEC_ERR]. */
9391         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[NXM_WR_ERR]. */
9392 #else /* Word 0 - Little Endian */
9393         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[NXM_WR_ERR]. */
9394         uint64_t sec_err               : 4;  /**< [  4:  1](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[SEC_ERR]. */
9395         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[DED_ERR]. */
9396         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[DLCRAM_SEC_ERR]. */
9397         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[DLCRAM_DED_ERR]. */
9398         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[DDR_ERR]. */
9399         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[MACRAM_SEC_ERR]. */
9400         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1C/H) Reads or clears enable for LMC(0..1)_INT[MACRAM_DED_ERR]. */
9401         uint64_t reserved_14_63        : 50;
9402 #endif /* Word 0 - End */
9403     } cn83xx;
9404 };
9405 typedef union bdk_lmcx_int_ena_w1c bdk_lmcx_int_ena_w1c_t;
9406 
9407 static inline uint64_t BDK_LMCX_INT_ENA_W1C(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_INT_ENA_W1C(unsigned long a)9408 static inline uint64_t BDK_LMCX_INT_ENA_W1C(unsigned long a)
9409 {
9410     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
9411         return 0x87e088000158ll + 0x1000000ll * ((a) & 0x0);
9412     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
9413         return 0x87e088000158ll + 0x1000000ll * ((a) & 0x1);
9414     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
9415         return 0x87e088000158ll + 0x1000000ll * ((a) & 0x3);
9416     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
9417         return 0x87e088000158ll + 0x1000000ll * ((a) & 0x3);
9418     __bdk_csr_fatal("LMCX_INT_ENA_W1C", 1, a, 0, 0, 0);
9419 }
9420 
9421 #define typedef_BDK_LMCX_INT_ENA_W1C(a) bdk_lmcx_int_ena_w1c_t
9422 #define bustype_BDK_LMCX_INT_ENA_W1C(a) BDK_CSR_TYPE_RSL
9423 #define basename_BDK_LMCX_INT_ENA_W1C(a) "LMCX_INT_ENA_W1C"
9424 #define device_bar_BDK_LMCX_INT_ENA_W1C(a) 0x0 /* PF_BAR0 */
9425 #define busnum_BDK_LMCX_INT_ENA_W1C(a) (a)
9426 #define arguments_BDK_LMCX_INT_ENA_W1C(a) (a),-1,-1,-1
9427 
9428 /**
9429  * Register (RSL) lmc#_int_ena_w1s
9430  *
9431  * LMC Interrupt Enable Set Register
9432  * This register sets interrupt enable bits.
9433  */
9434 union bdk_lmcx_int_ena_w1s
9435 {
9436     uint64_t u;
9437     struct bdk_lmcx_int_ena_w1s_s
9438     {
9439 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
9440         uint64_t reserved_15_63        : 49;
9441         uint64_t ddr_alert_sat         : 1;  /**< [ 14: 14](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[DDR_ALERT_SAT]. */
9442         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[MACRAM_DED_ERR]. */
9443         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[MACRAM_SEC_ERR]. */
9444         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DDR_ERR]. */
9445         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DLCRAM_DED_ERR]. */
9446         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
9447         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DED_ERR]. */
9448         uint64_t reserved_1_4          : 4;
9449         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[NXM_WR_ERR]. */
9450 #else /* Word 0 - Little Endian */
9451         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[NXM_WR_ERR]. */
9452         uint64_t reserved_1_4          : 4;
9453         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DED_ERR]. */
9454         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
9455         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DLCRAM_DED_ERR]. */
9456         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DDR_ERR]. */
9457         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[MACRAM_SEC_ERR]. */
9458         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[MACRAM_DED_ERR]. */
9459         uint64_t ddr_alert_sat         : 1;  /**< [ 14: 14](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[DDR_ALERT_SAT]. */
9460         uint64_t reserved_15_63        : 49;
9461 #endif /* Word 0 - End */
9462     } s;
9463     struct bdk_lmcx_int_ena_w1s_cn9
9464     {
9465 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
9466         uint64_t reserved_15_63        : 49;
9467         uint64_t ddr_alert_sat         : 1;  /**< [ 14: 14](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[DDR_ALERT_SAT]. */
9468         uint64_t reserved_12_13        : 2;
9469         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[DDR_ERR]. */
9470         uint64_t reserved_3_10         : 8;
9471         uint64_t ref_pend_max1         : 1;  /**< [  2:  2](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[REF_PEND_MAX1]. */
9472         uint64_t ref_pend_max0         : 1;  /**< [  1:  1](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[REF_PEND_MAX0]. */
9473         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[NXM_WR_ERR]. */
9474 #else /* Word 0 - Little Endian */
9475         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[NXM_WR_ERR]. */
9476         uint64_t ref_pend_max0         : 1;  /**< [  1:  1](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[REF_PEND_MAX0]. */
9477         uint64_t ref_pend_max1         : 1;  /**< [  2:  2](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[REF_PEND_MAX1]. */
9478         uint64_t reserved_3_10         : 8;
9479         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[DDR_ERR]. */
9480         uint64_t reserved_12_13        : 2;
9481         uint64_t ddr_alert_sat         : 1;  /**< [ 14: 14](R/W1S/H) Reads or sets enable for LMC(0..2)_INT[DDR_ALERT_SAT]. */
9482         uint64_t reserved_15_63        : 49;
9483 #endif /* Word 0 - End */
9484     } cn9;
9485     struct bdk_lmcx_int_ena_w1s_cn81xx
9486     {
9487 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
9488         uint64_t reserved_14_63        : 50;
9489         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1S/H) Reads or sets enable for LMC(0)_INT[MACRAM_DED_ERR]. */
9490         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1S/H) Reads or sets enable for LMC(0)_INT[MACRAM_SEC_ERR]. */
9491         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1S/H) Reads or sets enable for LMC(0)_INT[DDR_ERR]. */
9492         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1S/H) Reads or sets enable for LMC(0)_INT[DLCRAM_DED_ERR]. */
9493         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1S/H) Reads or sets enable for LMC(0)_INT[DLCRAM_SEC_ERR]. */
9494         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1S/H) Reads or sets enable for LMC(0)_INT[DED_ERR]. */
9495         uint64_t sec_err               : 4;  /**< [  4:  1](R/W1S/H) Reads or sets enable for LMC(0)_INT[SEC_ERR]. */
9496         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1S/H) Reads or sets enable for LMC(0)_INT[NXM_WR_ERR]. */
9497 #else /* Word 0 - Little Endian */
9498         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1S/H) Reads or sets enable for LMC(0)_INT[NXM_WR_ERR]. */
9499         uint64_t sec_err               : 4;  /**< [  4:  1](R/W1S/H) Reads or sets enable for LMC(0)_INT[SEC_ERR]. */
9500         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1S/H) Reads or sets enable for LMC(0)_INT[DED_ERR]. */
9501         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1S/H) Reads or sets enable for LMC(0)_INT[DLCRAM_SEC_ERR]. */
9502         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1S/H) Reads or sets enable for LMC(0)_INT[DLCRAM_DED_ERR]. */
9503         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1S/H) Reads or sets enable for LMC(0)_INT[DDR_ERR]. */
9504         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1S/H) Reads or sets enable for LMC(0)_INT[MACRAM_SEC_ERR]. */
9505         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1S/H) Reads or sets enable for LMC(0)_INT[MACRAM_DED_ERR]. */
9506         uint64_t reserved_14_63        : 50;
9507 #endif /* Word 0 - End */
9508     } cn81xx;
9509     struct bdk_lmcx_int_ena_w1s_cn88xx
9510     {
9511 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
9512         uint64_t reserved_14_63        : 50;
9513         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[MACRAM_DED_ERR]. */
9514         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[MACRAM_SEC_ERR]. */
9515         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DDR_ERR]. */
9516         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DLCRAM_DED_ERR]. */
9517         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
9518         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DED_ERR]. */
9519         uint64_t sec_err               : 4;  /**< [  4:  1](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[SEC_ERR]. */
9520         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[NXM_WR_ERR]. */
9521 #else /* Word 0 - Little Endian */
9522         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[NXM_WR_ERR]. */
9523         uint64_t sec_err               : 4;  /**< [  4:  1](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[SEC_ERR]. */
9524         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DED_ERR]. */
9525         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
9526         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DLCRAM_DED_ERR]. */
9527         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[DDR_ERR]. */
9528         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[MACRAM_SEC_ERR]. */
9529         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1S/H) Reads or sets enable for LMC(0..3)_INT[MACRAM_DED_ERR]. */
9530         uint64_t reserved_14_63        : 50;
9531 #endif /* Word 0 - End */
9532     } cn88xx;
9533     struct bdk_lmcx_int_ena_w1s_cn83xx
9534     {
9535 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
9536         uint64_t reserved_14_63        : 50;
9537         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[MACRAM_DED_ERR]. */
9538         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[MACRAM_SEC_ERR]. */
9539         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[DDR_ERR]. */
9540         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[DLCRAM_DED_ERR]. */
9541         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[DLCRAM_SEC_ERR]. */
9542         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[DED_ERR]. */
9543         uint64_t sec_err               : 4;  /**< [  4:  1](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[SEC_ERR]. */
9544         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[NXM_WR_ERR]. */
9545 #else /* Word 0 - Little Endian */
9546         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[NXM_WR_ERR]. */
9547         uint64_t sec_err               : 4;  /**< [  4:  1](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[SEC_ERR]. */
9548         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[DED_ERR]. */
9549         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[DLCRAM_SEC_ERR]. */
9550         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[DLCRAM_DED_ERR]. */
9551         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[DDR_ERR]. */
9552         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[MACRAM_SEC_ERR]. */
9553         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1S/H) Reads or sets enable for LMC(0..1)_INT[MACRAM_DED_ERR]. */
9554         uint64_t reserved_14_63        : 50;
9555 #endif /* Word 0 - End */
9556     } cn83xx;
9557 };
9558 typedef union bdk_lmcx_int_ena_w1s bdk_lmcx_int_ena_w1s_t;
9559 
9560 static inline uint64_t BDK_LMCX_INT_ENA_W1S(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_INT_ENA_W1S(unsigned long a)9561 static inline uint64_t BDK_LMCX_INT_ENA_W1S(unsigned long a)
9562 {
9563     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
9564         return 0x87e088000160ll + 0x1000000ll * ((a) & 0x0);
9565     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
9566         return 0x87e088000160ll + 0x1000000ll * ((a) & 0x1);
9567     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
9568         return 0x87e088000160ll + 0x1000000ll * ((a) & 0x3);
9569     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
9570         return 0x87e088000160ll + 0x1000000ll * ((a) & 0x3);
9571     __bdk_csr_fatal("LMCX_INT_ENA_W1S", 1, a, 0, 0, 0);
9572 }
9573 
9574 #define typedef_BDK_LMCX_INT_ENA_W1S(a) bdk_lmcx_int_ena_w1s_t
9575 #define bustype_BDK_LMCX_INT_ENA_W1S(a) BDK_CSR_TYPE_RSL
9576 #define basename_BDK_LMCX_INT_ENA_W1S(a) "LMCX_INT_ENA_W1S"
9577 #define device_bar_BDK_LMCX_INT_ENA_W1S(a) 0x0 /* PF_BAR0 */
9578 #define busnum_BDK_LMCX_INT_ENA_W1S(a) (a)
9579 #define arguments_BDK_LMCX_INT_ENA_W1S(a) (a),-1,-1,-1
9580 
9581 /**
9582  * Register (RSL) lmc#_int_w1s
9583  *
9584  * LMC Interrupt Set Register
9585  * This register sets interrupt bits.
9586  */
9587 union bdk_lmcx_int_w1s
9588 {
9589     uint64_t u;
9590     struct bdk_lmcx_int_w1s_s
9591     {
9592 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
9593         uint64_t reserved_15_63        : 49;
9594         uint64_t ddr_alert_sat         : 1;  /**< [ 14: 14](R/W1S/H) Reads or sets LMC(0..2)_INT[DDR_ALERT_SAT]. */
9595         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1S/H) Reads or sets LMC(0..3)_INT[MACRAM_DED_ERR]. */
9596         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1S/H) Reads or sets LMC(0..3)_INT[MACRAM_SEC_ERR]. */
9597         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1S/H) Reads or sets LMC(0..3)_INT[DDR_ERR]. */
9598         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1S/H) Reads or sets LMC(0..3)_INT[DLCRAM_DED_ERR]. */
9599         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1S/H) Reads or sets LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
9600         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1S/H) Reads or sets LMC(0..3)_INT[DED_ERR]. */
9601         uint64_t reserved_1_4          : 4;
9602         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1S/H) Reads or sets LMC(0..3)_INT[NXM_WR_ERR]. */
9603 #else /* Word 0 - Little Endian */
9604         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1S/H) Reads or sets LMC(0..3)_INT[NXM_WR_ERR]. */
9605         uint64_t reserved_1_4          : 4;
9606         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1S/H) Reads or sets LMC(0..3)_INT[DED_ERR]. */
9607         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1S/H) Reads or sets LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
9608         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1S/H) Reads or sets LMC(0..3)_INT[DLCRAM_DED_ERR]. */
9609         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1S/H) Reads or sets LMC(0..3)_INT[DDR_ERR]. */
9610         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1S/H) Reads or sets LMC(0..3)_INT[MACRAM_SEC_ERR]. */
9611         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1S/H) Reads or sets LMC(0..3)_INT[MACRAM_DED_ERR]. */
9612         uint64_t ddr_alert_sat         : 1;  /**< [ 14: 14](R/W1S/H) Reads or sets LMC(0..2)_INT[DDR_ALERT_SAT]. */
9613         uint64_t reserved_15_63        : 49;
9614 #endif /* Word 0 - End */
9615     } s;
9616     struct bdk_lmcx_int_w1s_cn9
9617     {
9618 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
9619         uint64_t reserved_15_63        : 49;
9620         uint64_t ddr_alert_sat         : 1;  /**< [ 14: 14](R/W1S/H) Reads or sets LMC(0..2)_INT[DDR_ALERT_SAT]. */
9621         uint64_t reserved_12_13        : 2;
9622         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1S/H) Reads or sets LMC(0..2)_INT[DDR_ERR]. */
9623         uint64_t reserved_3_10         : 8;
9624         uint64_t ref_pend_max1         : 1;  /**< [  2:  2](R/W1S/H) Reads or sets LMC(0..2)_INT[REF_PEND_MAX1]. */
9625         uint64_t ref_pend_max0         : 1;  /**< [  1:  1](R/W1S/H) Reads or sets LMC(0..2)_INT[REF_PEND_MAX0]. */
9626         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1S/H) Reads or sets LMC(0..2)_INT[NXM_WR_ERR]. */
9627 #else /* Word 0 - Little Endian */
9628         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1S/H) Reads or sets LMC(0..2)_INT[NXM_WR_ERR]. */
9629         uint64_t ref_pend_max0         : 1;  /**< [  1:  1](R/W1S/H) Reads or sets LMC(0..2)_INT[REF_PEND_MAX0]. */
9630         uint64_t ref_pend_max1         : 1;  /**< [  2:  2](R/W1S/H) Reads or sets LMC(0..2)_INT[REF_PEND_MAX1]. */
9631         uint64_t reserved_3_10         : 8;
9632         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1S/H) Reads or sets LMC(0..2)_INT[DDR_ERR]. */
9633         uint64_t reserved_12_13        : 2;
9634         uint64_t ddr_alert_sat         : 1;  /**< [ 14: 14](R/W1S/H) Reads or sets LMC(0..2)_INT[DDR_ALERT_SAT]. */
9635         uint64_t reserved_15_63        : 49;
9636 #endif /* Word 0 - End */
9637     } cn9;
9638     struct bdk_lmcx_int_w1s_cn81xx
9639     {
9640 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
9641         uint64_t reserved_14_63        : 50;
9642         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1S/H) Reads or sets LMC(0)_INT[MACRAM_DED_ERR]. */
9643         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1S/H) Reads or sets LMC(0)_INT[MACRAM_SEC_ERR]. */
9644         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1S/H) Reads or sets LMC(0)_INT[DDR_ERR]. */
9645         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1S/H) Reads or sets LMC(0)_INT[DLCRAM_DED_ERR]. */
9646         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1S/H) Reads or sets LMC(0)_INT[DLCRAM_SEC_ERR]. */
9647         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1S/H) Reads or sets LMC(0)_INT[DED_ERR]. */
9648         uint64_t sec_err               : 4;  /**< [  4:  1](R/W1S/H) Reads or sets LMC(0)_INT[SEC_ERR]. */
9649         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1S/H) Reads or sets LMC(0)_INT[NXM_WR_ERR]. */
9650 #else /* Word 0 - Little Endian */
9651         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1S/H) Reads or sets LMC(0)_INT[NXM_WR_ERR]. */
9652         uint64_t sec_err               : 4;  /**< [  4:  1](R/W1S/H) Reads or sets LMC(0)_INT[SEC_ERR]. */
9653         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1S/H) Reads or sets LMC(0)_INT[DED_ERR]. */
9654         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1S/H) Reads or sets LMC(0)_INT[DLCRAM_SEC_ERR]. */
9655         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1S/H) Reads or sets LMC(0)_INT[DLCRAM_DED_ERR]. */
9656         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1S/H) Reads or sets LMC(0)_INT[DDR_ERR]. */
9657         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1S/H) Reads or sets LMC(0)_INT[MACRAM_SEC_ERR]. */
9658         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1S/H) Reads or sets LMC(0)_INT[MACRAM_DED_ERR]. */
9659         uint64_t reserved_14_63        : 50;
9660 #endif /* Word 0 - End */
9661     } cn81xx;
9662     struct bdk_lmcx_int_w1s_cn88xx
9663     {
9664 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
9665         uint64_t reserved_14_63        : 50;
9666         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1S/H) Reads or sets LMC(0..3)_INT[MACRAM_DED_ERR]. */
9667         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1S/H) Reads or sets LMC(0..3)_INT[MACRAM_SEC_ERR]. */
9668         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1S/H) Reads or sets LMC(0..3)_INT[DDR_ERR]. */
9669         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1S/H) Reads or sets LMC(0..3)_INT[DLCRAM_DED_ERR]. */
9670         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1S/H) Reads or sets LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
9671         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1S/H) Reads or sets LMC(0..3)_INT[DED_ERR]. */
9672         uint64_t sec_err               : 4;  /**< [  4:  1](R/W1S/H) Reads or sets LMC(0..3)_INT[SEC_ERR]. */
9673         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1S/H) Reads or sets LMC(0..3)_INT[NXM_WR_ERR]. */
9674 #else /* Word 0 - Little Endian */
9675         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1S/H) Reads or sets LMC(0..3)_INT[NXM_WR_ERR]. */
9676         uint64_t sec_err               : 4;  /**< [  4:  1](R/W1S/H) Reads or sets LMC(0..3)_INT[SEC_ERR]. */
9677         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1S/H) Reads or sets LMC(0..3)_INT[DED_ERR]. */
9678         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1S/H) Reads or sets LMC(0..3)_INT[DLCRAM_SEC_ERR]. */
9679         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1S/H) Reads or sets LMC(0..3)_INT[DLCRAM_DED_ERR]. */
9680         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1S/H) Reads or sets LMC(0..3)_INT[DDR_ERR]. */
9681         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1S/H) Reads or sets LMC(0..3)_INT[MACRAM_SEC_ERR]. */
9682         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1S/H) Reads or sets LMC(0..3)_INT[MACRAM_DED_ERR]. */
9683         uint64_t reserved_14_63        : 50;
9684 #endif /* Word 0 - End */
9685     } cn88xx;
9686     struct bdk_lmcx_int_w1s_cn83xx
9687     {
9688 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
9689         uint64_t reserved_14_63        : 50;
9690         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1S/H) Reads or sets LMC(0..1)_INT[MACRAM_DED_ERR]. */
9691         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1S/H) Reads or sets LMC(0..1)_INT[MACRAM_SEC_ERR]. */
9692         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1S/H) Reads or sets LMC(0..1)_INT[DDR_ERR]. */
9693         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1S/H) Reads or sets LMC(0..1)_INT[DLCRAM_DED_ERR]. */
9694         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1S/H) Reads or sets LMC(0..1)_INT[DLCRAM_SEC_ERR]. */
9695         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1S/H) Reads or sets LMC(0..1)_INT[DED_ERR]. */
9696         uint64_t sec_err               : 4;  /**< [  4:  1](R/W1S/H) Reads or sets LMC(0..1)_INT[SEC_ERR]. */
9697         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1S/H) Reads or sets LMC(0..1)_INT[NXM_WR_ERR]. */
9698 #else /* Word 0 - Little Endian */
9699         uint64_t nxm_wr_err            : 1;  /**< [  0:  0](R/W1S/H) Reads or sets LMC(0..1)_INT[NXM_WR_ERR]. */
9700         uint64_t sec_err               : 4;  /**< [  4:  1](R/W1S/H) Reads or sets LMC(0..1)_INT[SEC_ERR]. */
9701         uint64_t ded_err               : 4;  /**< [  8:  5](R/W1S/H) Reads or sets LMC(0..1)_INT[DED_ERR]. */
9702         uint64_t dlcram_sec_err        : 1;  /**< [  9:  9](R/W1S/H) Reads or sets LMC(0..1)_INT[DLCRAM_SEC_ERR]. */
9703         uint64_t dlcram_ded_err        : 1;  /**< [ 10: 10](R/W1S/H) Reads or sets LMC(0..1)_INT[DLCRAM_DED_ERR]. */
9704         uint64_t ddr_err               : 1;  /**< [ 11: 11](R/W1S/H) Reads or sets LMC(0..1)_INT[DDR_ERR]. */
9705         uint64_t macram_sec_err        : 1;  /**< [ 12: 12](R/W1S/H) Reads or sets LMC(0..1)_INT[MACRAM_SEC_ERR]. */
9706         uint64_t macram_ded_err        : 1;  /**< [ 13: 13](R/W1S/H) Reads or sets LMC(0..1)_INT[MACRAM_DED_ERR]. */
9707         uint64_t reserved_14_63        : 50;
9708 #endif /* Word 0 - End */
9709     } cn83xx;
9710 };
9711 typedef union bdk_lmcx_int_w1s bdk_lmcx_int_w1s_t;
9712 
9713 static inline uint64_t BDK_LMCX_INT_W1S(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_INT_W1S(unsigned long a)9714 static inline uint64_t BDK_LMCX_INT_W1S(unsigned long a)
9715 {
9716     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
9717         return 0x87e088000150ll + 0x1000000ll * ((a) & 0x0);
9718     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
9719         return 0x87e088000150ll + 0x1000000ll * ((a) & 0x1);
9720     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
9721         return 0x87e088000150ll + 0x1000000ll * ((a) & 0x3);
9722     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
9723         return 0x87e088000150ll + 0x1000000ll * ((a) & 0x3);
9724     __bdk_csr_fatal("LMCX_INT_W1S", 1, a, 0, 0, 0);
9725 }
9726 
9727 #define typedef_BDK_LMCX_INT_W1S(a) bdk_lmcx_int_w1s_t
9728 #define bustype_BDK_LMCX_INT_W1S(a) BDK_CSR_TYPE_RSL
9729 #define basename_BDK_LMCX_INT_W1S(a) "LMCX_INT_W1S"
9730 #define device_bar_BDK_LMCX_INT_W1S(a) 0x0 /* PF_BAR0 */
9731 #define busnum_BDK_LMCX_INT_W1S(a) (a)
9732 #define arguments_BDK_LMCX_INT_W1S(a) (a),-1,-1,-1
9733 
9734 /**
9735  * Register (RSL) lmc#_lane#_crc_swiz
9736  *
9737  * LMC MR Write Control Register
9738  * This register contains the CRC bit swizzle for even and odd ranks.
9739  */
9740 union bdk_lmcx_lanex_crc_swiz
9741 {
9742     uint64_t u;
9743     struct bdk_lmcx_lanex_crc_swiz_s
9744     {
9745 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
9746         uint64_t reserved_56_63        : 8;
9747         uint64_t r1_swiz7              : 3;  /**< [ 55: 53](R/W) Bit select for odd rank, bit 7. */
9748         uint64_t r1_swiz6              : 3;  /**< [ 52: 50](R/W) Bit select for odd rank, bit 6. */
9749         uint64_t r1_swiz5              : 3;  /**< [ 49: 47](R/W) Bit select for odd rank, bit 5. */
9750         uint64_t r1_swiz4              : 3;  /**< [ 46: 44](R/W) Bit select for odd rank, bit 4. */
9751         uint64_t r1_swiz3              : 3;  /**< [ 43: 41](R/W) Bit select for odd rank, bit 3. */
9752         uint64_t r1_swiz2              : 3;  /**< [ 40: 38](R/W) Bit select for odd rank, bit 2. */
9753         uint64_t r1_swiz1              : 3;  /**< [ 37: 35](R/W) Bit select for odd rank, bit 1. */
9754         uint64_t r1_swiz0              : 3;  /**< [ 34: 32](R/W) Bit select for odd rank, bit 0. */
9755         uint64_t reserved_24_31        : 8;
9756         uint64_t r0_swiz7              : 3;  /**< [ 23: 21](R/W) Bit select for even rank, bit 7. */
9757         uint64_t r0_swiz6              : 3;  /**< [ 20: 18](R/W) Bit select for even rank, bit 6. */
9758         uint64_t r0_swiz5              : 3;  /**< [ 17: 15](R/W) Bit select for even rank, bit 5. */
9759         uint64_t r0_swiz4              : 3;  /**< [ 14: 12](R/W) Bit select for even rank, bit 4. */
9760         uint64_t r0_swiz3              : 3;  /**< [ 11:  9](R/W) Bit select for even rank, bit 3. */
9761         uint64_t r0_swiz2              : 3;  /**< [  8:  6](R/W) Bit select for even rank, bit 2. */
9762         uint64_t r0_swiz1              : 3;  /**< [  5:  3](R/W) Bit select for even rank, bit 1. */
9763         uint64_t r0_swiz0              : 3;  /**< [  2:  0](R/W) Bit select for even rank, bit 0. */
9764 #else /* Word 0 - Little Endian */
9765         uint64_t r0_swiz0              : 3;  /**< [  2:  0](R/W) Bit select for even rank, bit 0. */
9766         uint64_t r0_swiz1              : 3;  /**< [  5:  3](R/W) Bit select for even rank, bit 1. */
9767         uint64_t r0_swiz2              : 3;  /**< [  8:  6](R/W) Bit select for even rank, bit 2. */
9768         uint64_t r0_swiz3              : 3;  /**< [ 11:  9](R/W) Bit select for even rank, bit 3. */
9769         uint64_t r0_swiz4              : 3;  /**< [ 14: 12](R/W) Bit select for even rank, bit 4. */
9770         uint64_t r0_swiz5              : 3;  /**< [ 17: 15](R/W) Bit select for even rank, bit 5. */
9771         uint64_t r0_swiz6              : 3;  /**< [ 20: 18](R/W) Bit select for even rank, bit 6. */
9772         uint64_t r0_swiz7              : 3;  /**< [ 23: 21](R/W) Bit select for even rank, bit 7. */
9773         uint64_t reserved_24_31        : 8;
9774         uint64_t r1_swiz0              : 3;  /**< [ 34: 32](R/W) Bit select for odd rank, bit 0. */
9775         uint64_t r1_swiz1              : 3;  /**< [ 37: 35](R/W) Bit select for odd rank, bit 1. */
9776         uint64_t r1_swiz2              : 3;  /**< [ 40: 38](R/W) Bit select for odd rank, bit 2. */
9777         uint64_t r1_swiz3              : 3;  /**< [ 43: 41](R/W) Bit select for odd rank, bit 3. */
9778         uint64_t r1_swiz4              : 3;  /**< [ 46: 44](R/W) Bit select for odd rank, bit 4. */
9779         uint64_t r1_swiz5              : 3;  /**< [ 49: 47](R/W) Bit select for odd rank, bit 5. */
9780         uint64_t r1_swiz6              : 3;  /**< [ 52: 50](R/W) Bit select for odd rank, bit 6. */
9781         uint64_t r1_swiz7              : 3;  /**< [ 55: 53](R/W) Bit select for odd rank, bit 7. */
9782         uint64_t reserved_56_63        : 8;
9783 #endif /* Word 0 - End */
9784     } s;
9785     /* struct bdk_lmcx_lanex_crc_swiz_s cn; */
9786 };
9787 typedef union bdk_lmcx_lanex_crc_swiz bdk_lmcx_lanex_crc_swiz_t;
9788 
9789 static inline uint64_t BDK_LMCX_LANEX_CRC_SWIZ(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
BDK_LMCX_LANEX_CRC_SWIZ(unsigned long a,unsigned long b)9790 static inline uint64_t BDK_LMCX_LANEX_CRC_SWIZ(unsigned long a, unsigned long b)
9791 {
9792     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=8)))
9793         return 0x87e088000380ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0xf);
9794     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=8)))
9795         return 0x87e088000380ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0xf);
9796     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=8)))
9797         return 0x87e088000380ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0xf);
9798     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b<=8)))
9799         return 0x87e088000380ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0xf);
9800     __bdk_csr_fatal("LMCX_LANEX_CRC_SWIZ", 2, a, b, 0, 0);
9801 }
9802 
9803 #define typedef_BDK_LMCX_LANEX_CRC_SWIZ(a,b) bdk_lmcx_lanex_crc_swiz_t
9804 #define bustype_BDK_LMCX_LANEX_CRC_SWIZ(a,b) BDK_CSR_TYPE_RSL
9805 #define basename_BDK_LMCX_LANEX_CRC_SWIZ(a,b) "LMCX_LANEX_CRC_SWIZ"
9806 #define device_bar_BDK_LMCX_LANEX_CRC_SWIZ(a,b) 0x0 /* PF_BAR0 */
9807 #define busnum_BDK_LMCX_LANEX_CRC_SWIZ(a,b) (a)
9808 #define arguments_BDK_LMCX_LANEX_CRC_SWIZ(a,b) (a),(b),-1,-1
9809 
9810 /**
9811  * Register (RSL) lmc#_modereg_params0
9812  *
9813  * LMC Mode Register Parameters 0 Register
9814  * These parameters are written into the DDR4 MR0, MR1, MR2 and MR3 registers.
9815  */
9816 union bdk_lmcx_modereg_params0
9817 {
9818     uint64_t u;
9819     struct bdk_lmcx_modereg_params0_s
9820     {
9821 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
9822         uint64_t reserved_28_63        : 36;
9823         uint64_t wrp_ext               : 1;  /**< [ 27: 27](R/W) A 1-bit extension to the WRP register. */
9824         uint64_t cl_ext                : 1;  /**< [ 26: 26](R/W) Reserved; must be zero.
9825                                                                  Internal:
9826                                                                  The extended bit for the proposed CAS Latency spec change. The new
9827                                                                  CAS Latency in DDR4 DRAM is defined in MR0(A12,A6,A5,A4,A2). This bit sets
9828                                                                  the A12 bit.
9829 
9830                                                                  See LMC()_MODEREG_PARAMS0[CL]. */
9831         uint64_t al_ext                : 1;  /**< [ 25: 25](R/W) Reserved; must be zero.
9832                                                                  Internal:
9833                                                                  The extended bit for the new Additive latency settings for DDR4 3DS.
9834                                                                  Together with LMC()_MODEREG_PARAMS0[AL], this covers additive latency settings
9835                                                                  of up to CL-6.
9836 
9837                                                                  0: CL - (LMC()_MODEREG_PARAMS0[AL])
9838                                                                  1: CL - (LMC()_MODEREG_PARAMS0[AL] + 4)
9839 
9840                                                                  See LMC()_MODEREG_PARAMS0[AL]. */
9841         uint64_t ppd                   : 1;  /**< [ 24: 24](R/W) DLL control for precharge powerdown.
9842                                                                  0 = Slow exit (DLL off).
9843                                                                  1 = Fast exit (DLL on).
9844 
9845                                                                  LMC writes this value to MR0[PPD] in the selected DDR3/DDR4 parts during power-up/init
9846                                                                  and, if
9847                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
9848                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. This value must
9849                                                                  equal
9850                                                                  the MR0[PPD] value in all the DDR3/DDR4 parts attached to all ranks during normal
9851                                                                  operation. */
9852         uint64_t wrp                   : 3;  /**< [ 23: 21](R/W) Write recovery for auto precharge. Should be programmed to be equal to or greater than
9853                                                                  RNDUP[TWR(ns) / Tcyc(ns)].
9854 
9855                                                                  DDR3:
9856                                                                  0x0 = 16.
9857                                                                  0x1 = 5.
9858                                                                  0x2 = 6.
9859                                                                  0x3 = 7.
9860                                                                  0x4 = 8.
9861                                                                  0x5 = 10.
9862                                                                  0x6 = 12.
9863                                                                  0x7 = 14.
9864 
9865                                                                  DDR4:
9866                                                                  0x0 = 10.
9867                                                                  0x1 = 12.
9868                                                                  0x2 = 14.
9869                                                                  0x3 = 16.
9870                                                                  0x4 = 18.
9871                                                                  0x5 = 20.
9872                                                                  0x6 = 24.
9873                                                                  0x7 = 22.
9874                                                                  0x8 = 26. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
9875                                                                  0x9-0xf = Reserved. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
9876 
9877                                                                  LMC writes this value to MR0[WR] in the selected DDR3/DDR4 parts during power-up/init and,
9878                                                                  if
9879                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
9880                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. This value must
9881                                                                  equal
9882                                                                  the MR0[WR] value in all the DDR3/DDR4 parts attached to all ranks during normal
9883                                                                  operation. */
9884         uint64_t dllr                  : 1;  /**< [ 20: 20](R/W) DLL reset. LMC writes this value to MR0[DLL] in the selected DDR3/DDR4 parts during power-
9885                                                                  up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
9886                                                                  sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK].
9887                                                                  The MR0[DLL] value must be 0 in all the DDR3/DDR4 parts attached to all ranks during
9888                                                                  normal operation. */
9889         uint64_t tm                    : 1;  /**< [ 19: 19](R/W) Test mode. LMC writes this value to MR0[TM] in the selected DDR3/DDR4 parts during power-
9890                                                                  up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
9891                                                                  sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK].
9892                                                                  The MR0[TM] value must be 0 in all the DDR3/DDR4 parts attached to all ranks during normal
9893                                                                  operation. */
9894         uint64_t rbt                   : 1;  /**< [ 18: 18](RO) Read burst. Type 1 = interleaved (fixed). LMC writes this value to MR0[RBT] in the
9895                                                                  selected DDR3/DDR4 parts during power-up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set,
9896                                                                  self-refresh exit instruction sequences. See LMC()_CONFIG[SEQ_SEL,INIT_START,
9897                                                                  RANKMASK]. The MR0[RBT] value must be 1 in all the DDR3/DDR4 parts attached to all ranks
9898                                                                  during normal operation. */
9899         uint64_t cl                    : 4;  /**< [ 17: 14](R/W) CAS latency.
9900 
9901                                                                  In DDR3 mode:
9902 
9903                                                                  0x2 = 5. 0x1 = 12.
9904                                                                  0x4 = 6. 0x3 = 13.
9905                                                                  0x6 = 7. 0x5 = 14.
9906                                                                  0x8 = 8. 0x7 = 15.
9907                                                                  0xA = 9. 0x9 = 16.
9908                                                                  0xC = 10.
9909                                                                  0xE = 11.
9910                                                                  0x0, 0xB, 0xD, 0xF = Reserved.
9911 
9912                                                                  In DDR4 mode:
9913 
9914                                                                  0x0 =  9. 0x1 = 10.
9915                                                                  0x2 = 11. 0x3 = 12.
9916                                                                  0x4 = 13. 0x5 = 14.
9917                                                                  0x6 = 15. 0x7 = 16.
9918                                                                  0x8 = 18. 0x9 = 20.
9919                                                                  0xA = 22. 0xB = 24.
9920                                                                  0xD = 17, 0xE = 19.
9921                                                                  0xF = 21, 0xC = Reserved.
9922 
9923                                                                  LMC writes this value to MR0[CAS Latency / CL] in the selected DDR3 parts during power-
9924                                                                  up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
9925                                                                  sequences. See LMC()_CONFIG[SEQ_SEL,INIT_START,RANKMASK]. This value must equal the
9926                                                                  MR0[CAS Latency / CL] value in all the DDR3/4 parts attached to all ranks during normal
9927                                                                  operation.
9928 
9929                                                                  tCL must be programmed to greater than or equal to tCWL for proper LMC operation. */
9930         uint64_t bl                    : 2;  /**< [ 13: 12](R/W) Burst length.
9931                                                                  0x0 = 8 (fixed).
9932                                                                  0x1 = 4 or 8 (on-the-fly).
9933 
9934                                                                  LMC writes this value to MR0[BL] in the selected DDR3 parts during power-up/init and, if
9935                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
9936                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. The MR0[BL] value
9937                                                                  must be 1 in all the DDR3/4 parts attached to all ranks during normal operation. */
9938         uint64_t qoff                  : 1;  /**< [ 11: 11](R/W) Qoff enable. 0: enable; 1: disable.
9939                                                                  LMC writes this value to MR1[Qoff] in the DDR3 parts in the selected ranks during power-
9940                                                                  up/init, write-leveling, and if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry
9941                                                                  and exit instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
9942                                                                  LMC()_CONFIG[RANKMASK,INIT_STATUS] and LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. The
9943                                                                  MR1[Qoff] value must be 0 in all the DDR3 parts attached to all ranks during normal
9944                                                                  operation. */
9945         uint64_t tdqs                  : 1;  /**< [ 10: 10](R/W) TDQS enable. 0: disable. LMC writes this value to MR1[TDQS] in the DDR3 parts in the
9946                                                                  selected ranks during power-up/init, write-leveling, and, if
9947                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
9948                                                                  See LMC()_CONFIG[SEQ_SEL, INIT_START,RANKMASK,INIT_STATUS] and
9949                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
9950         uint64_t wlev                  : 1;  /**< [  9:  9](RO) Write leveling enable. 0: disable. LMC writes MR1[Level]=0 in the DDR3 parts in the
9951                                                                  selected ranks during power-up/init, write-leveling, and, if
9952                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit sequences. (Write
9953                                                                  leveling can only be initiated via the write leveling instruction sequence.) See
9954                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK,INIT_STATUS] and
9955                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
9956         uint64_t al                    : 2;  /**< [  8:  7](R/W) Reserved; must be zero.
9957                                                                  Internal:
9958                                                                  Additive latency:
9959                                                                  0x0: 0.
9960                                                                  0x1: CL-1.
9961                                                                  0x2: CL - 2.
9962                                                                  0x3: Reserved.
9963                                                                  LMC writes this value to MR1[AL] in the selected DDR3 parts during power-up/init, write
9964                                                                  leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
9965                                                                  instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
9966                                                                  LMC()_CONFIG[RANKMASK]
9967                                                                  and LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. This value must equal the MR1[AL] value in
9968                                                                  all
9969                                                                  the DDR3 parts attached to all ranks during normal operation. See also
9970                                                                  LMC()_CONTROL[POCAS]. */
9971         uint64_t dll                   : 1;  /**< [  6:  6](R/W) DLL Enable. 0: enable; 1: disable. LMC writes this value to MR1[DLL] in the selected DDR3
9972                                                                  parts during power-up/init, write-leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is
9973                                                                  set, self-refresh entry and exit instruction sequences. See
9974                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START]
9975                                                                  and LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. This value
9976                                                                  must equal the MR1[DLL] value in all the DDR3 parts attached to all ranks during normal
9977                                                                  operation. In DLL-off mode, CL/CWL must be programmed equal to 6/6, respectively, as per
9978                                                                  the JEDEC DDR3 specifications. */
9979         uint64_t mpr                   : 1;  /**< [  5:  5](R/W) MPR. LMC writes this value to MR3[MPR] in the selected DDR3 parts during power-up/init,
9980                                                                  read-leveling, and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit
9981                                                                  instruction sequences. (LMC also writes MR3[MPR] = 1 at the beginning of the read-leveling
9982                                                                  instruction sequence. Read-leveling should only be initiated via the read-leveling
9983                                                                  instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
9984                                                                  LMC()_CONFIG[RANKMASK].
9985                                                                  The MR3[MPR] value must be 0 in all the DDR3 parts attached to all ranks during normal
9986                                                                  operation. */
9987         uint64_t mprloc                : 2;  /**< [  4:  3](R/W) MPR location. LMC writes this value to MR3[MPRLoc] in the selected DDR3 parts during
9988                                                                  power-up/init, read-leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh
9989                                                                  exit instruction sequences. (LMC also writes MR3[MPRLoc] = 0 at the beginning of the read-
9990                                                                  leveling instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
9991                                                                  LMC()_CONFIG[RANKMASK]. The MR3[MPRLoc] value must be 0 in all the DDR3 parts attached
9992                                                                  to all ranks during normal operation. */
9993         uint64_t cwl                   : 3;  /**< [  2:  0](R/W) CAS write latency.
9994 
9995                                                                  In DDR3 mode:
9996                                                                  0x0 = 5.
9997                                                                  0x1 = 6.
9998                                                                  0x2 = 7.
9999                                                                  0x3 = 8.
10000                                                                  0x4 = 9.
10001                                                                  0x5 = 10.
10002                                                                  0x6 = 11.
10003                                                                  0x7 = 12.
10004 
10005                                                                  In DDR4 mode:
10006                                                                  0x0 = 9.
10007                                                                  0x1 = 10.
10008                                                                  0x2 = 11.
10009                                                                  0x3 = 12.
10010                                                                  0x4 = 13.
10011                                                                  0x5 = 16.
10012                                                                  0x6 = 18.
10013                                                                  0x7 = Reserved.
10014 
10015                                                                  LMC writes this value to MR2[CWL] in the selected DDR3 parts during power-up/init, write
10016                                                                  leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
10017                                                                  instruction sequences. See LMC()_CONFIG[SEQ_SEL, INIT_START,RANKMASK] and
10018                                                                  LMC()_RESET_CTL [DDR3PWARM, DDR3PSOFT]. This value must equal the MR2[CWL] value in
10019                                                                  all the DDR3 parts attached to all ranks during normal operation.
10020                                                                  tCWL must be programmed to less than or equal to tCL for proper LMC operation. */
10021 #else /* Word 0 - Little Endian */
10022         uint64_t cwl                   : 3;  /**< [  2:  0](R/W) CAS write latency.
10023 
10024                                                                  In DDR3 mode:
10025                                                                  0x0 = 5.
10026                                                                  0x1 = 6.
10027                                                                  0x2 = 7.
10028                                                                  0x3 = 8.
10029                                                                  0x4 = 9.
10030                                                                  0x5 = 10.
10031                                                                  0x6 = 11.
10032                                                                  0x7 = 12.
10033 
10034                                                                  In DDR4 mode:
10035                                                                  0x0 = 9.
10036                                                                  0x1 = 10.
10037                                                                  0x2 = 11.
10038                                                                  0x3 = 12.
10039                                                                  0x4 = 13.
10040                                                                  0x5 = 16.
10041                                                                  0x6 = 18.
10042                                                                  0x7 = Reserved.
10043 
10044                                                                  LMC writes this value to MR2[CWL] in the selected DDR3 parts during power-up/init, write
10045                                                                  leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
10046                                                                  instruction sequences. See LMC()_CONFIG[SEQ_SEL, INIT_START,RANKMASK] and
10047                                                                  LMC()_RESET_CTL [DDR3PWARM, DDR3PSOFT]. This value must equal the MR2[CWL] value in
10048                                                                  all the DDR3 parts attached to all ranks during normal operation.
10049                                                                  tCWL must be programmed to less than or equal to tCL for proper LMC operation. */
10050         uint64_t mprloc                : 2;  /**< [  4:  3](R/W) MPR location. LMC writes this value to MR3[MPRLoc] in the selected DDR3 parts during
10051                                                                  power-up/init, read-leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh
10052                                                                  exit instruction sequences. (LMC also writes MR3[MPRLoc] = 0 at the beginning of the read-
10053                                                                  leveling instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
10054                                                                  LMC()_CONFIG[RANKMASK]. The MR3[MPRLoc] value must be 0 in all the DDR3 parts attached
10055                                                                  to all ranks during normal operation. */
10056         uint64_t mpr                   : 1;  /**< [  5:  5](R/W) MPR. LMC writes this value to MR3[MPR] in the selected DDR3 parts during power-up/init,
10057                                                                  read-leveling, and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit
10058                                                                  instruction sequences. (LMC also writes MR3[MPR] = 1 at the beginning of the read-leveling
10059                                                                  instruction sequence. Read-leveling should only be initiated via the read-leveling
10060                                                                  instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
10061                                                                  LMC()_CONFIG[RANKMASK].
10062                                                                  The MR3[MPR] value must be 0 in all the DDR3 parts attached to all ranks during normal
10063                                                                  operation. */
10064         uint64_t dll                   : 1;  /**< [  6:  6](R/W) DLL Enable. 0: enable; 1: disable. LMC writes this value to MR1[DLL] in the selected DDR3
10065                                                                  parts during power-up/init, write-leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is
10066                                                                  set, self-refresh entry and exit instruction sequences. See
10067                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START]
10068                                                                  and LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. This value
10069                                                                  must equal the MR1[DLL] value in all the DDR3 parts attached to all ranks during normal
10070                                                                  operation. In DLL-off mode, CL/CWL must be programmed equal to 6/6, respectively, as per
10071                                                                  the JEDEC DDR3 specifications. */
10072         uint64_t al                    : 2;  /**< [  8:  7](R/W) Reserved; must be zero.
10073                                                                  Internal:
10074                                                                  Additive latency:
10075                                                                  0x0: 0.
10076                                                                  0x1: CL-1.
10077                                                                  0x2: CL - 2.
10078                                                                  0x3: Reserved.
10079                                                                  LMC writes this value to MR1[AL] in the selected DDR3 parts during power-up/init, write
10080                                                                  leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
10081                                                                  instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
10082                                                                  LMC()_CONFIG[RANKMASK]
10083                                                                  and LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. This value must equal the MR1[AL] value in
10084                                                                  all
10085                                                                  the DDR3 parts attached to all ranks during normal operation. See also
10086                                                                  LMC()_CONTROL[POCAS]. */
10087         uint64_t wlev                  : 1;  /**< [  9:  9](RO) Write leveling enable. 0: disable. LMC writes MR1[Level]=0 in the DDR3 parts in the
10088                                                                  selected ranks during power-up/init, write-leveling, and, if
10089                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit sequences. (Write
10090                                                                  leveling can only be initiated via the write leveling instruction sequence.) See
10091                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK,INIT_STATUS] and
10092                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
10093         uint64_t tdqs                  : 1;  /**< [ 10: 10](R/W) TDQS enable. 0: disable. LMC writes this value to MR1[TDQS] in the DDR3 parts in the
10094                                                                  selected ranks during power-up/init, write-leveling, and, if
10095                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
10096                                                                  See LMC()_CONFIG[SEQ_SEL, INIT_START,RANKMASK,INIT_STATUS] and
10097                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
10098         uint64_t qoff                  : 1;  /**< [ 11: 11](R/W) Qoff enable. 0: enable; 1: disable.
10099                                                                  LMC writes this value to MR1[Qoff] in the DDR3 parts in the selected ranks during power-
10100                                                                  up/init, write-leveling, and if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry
10101                                                                  and exit instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
10102                                                                  LMC()_CONFIG[RANKMASK,INIT_STATUS] and LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. The
10103                                                                  MR1[Qoff] value must be 0 in all the DDR3 parts attached to all ranks during normal
10104                                                                  operation. */
10105         uint64_t bl                    : 2;  /**< [ 13: 12](R/W) Burst length.
10106                                                                  0x0 = 8 (fixed).
10107                                                                  0x1 = 4 or 8 (on-the-fly).
10108 
10109                                                                  LMC writes this value to MR0[BL] in the selected DDR3 parts during power-up/init and, if
10110                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
10111                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. The MR0[BL] value
10112                                                                  must be 1 in all the DDR3/4 parts attached to all ranks during normal operation. */
10113         uint64_t cl                    : 4;  /**< [ 17: 14](R/W) CAS latency.
10114 
10115                                                                  In DDR3 mode:
10116 
10117                                                                  0x2 = 5. 0x1 = 12.
10118                                                                  0x4 = 6. 0x3 = 13.
10119                                                                  0x6 = 7. 0x5 = 14.
10120                                                                  0x8 = 8. 0x7 = 15.
10121                                                                  0xA = 9. 0x9 = 16.
10122                                                                  0xC = 10.
10123                                                                  0xE = 11.
10124                                                                  0x0, 0xB, 0xD, 0xF = Reserved.
10125 
10126                                                                  In DDR4 mode:
10127 
10128                                                                  0x0 =  9. 0x1 = 10.
10129                                                                  0x2 = 11. 0x3 = 12.
10130                                                                  0x4 = 13. 0x5 = 14.
10131                                                                  0x6 = 15. 0x7 = 16.
10132                                                                  0x8 = 18. 0x9 = 20.
10133                                                                  0xA = 22. 0xB = 24.
10134                                                                  0xD = 17, 0xE = 19.
10135                                                                  0xF = 21, 0xC = Reserved.
10136 
10137                                                                  LMC writes this value to MR0[CAS Latency / CL] in the selected DDR3 parts during power-
10138                                                                  up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
10139                                                                  sequences. See LMC()_CONFIG[SEQ_SEL,INIT_START,RANKMASK]. This value must equal the
10140                                                                  MR0[CAS Latency / CL] value in all the DDR3/4 parts attached to all ranks during normal
10141                                                                  operation.
10142 
10143                                                                  tCL must be programmed to greater than or equal to tCWL for proper LMC operation. */
10144         uint64_t rbt                   : 1;  /**< [ 18: 18](RO) Read burst. Type 1 = interleaved (fixed). LMC writes this value to MR0[RBT] in the
10145                                                                  selected DDR3/DDR4 parts during power-up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set,
10146                                                                  self-refresh exit instruction sequences. See LMC()_CONFIG[SEQ_SEL,INIT_START,
10147                                                                  RANKMASK]. The MR0[RBT] value must be 1 in all the DDR3/DDR4 parts attached to all ranks
10148                                                                  during normal operation. */
10149         uint64_t tm                    : 1;  /**< [ 19: 19](R/W) Test mode. LMC writes this value to MR0[TM] in the selected DDR3/DDR4 parts during power-
10150                                                                  up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
10151                                                                  sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK].
10152                                                                  The MR0[TM] value must be 0 in all the DDR3/DDR4 parts attached to all ranks during normal
10153                                                                  operation. */
10154         uint64_t dllr                  : 1;  /**< [ 20: 20](R/W) DLL reset. LMC writes this value to MR0[DLL] in the selected DDR3/DDR4 parts during power-
10155                                                                  up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
10156                                                                  sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK].
10157                                                                  The MR0[DLL] value must be 0 in all the DDR3/DDR4 parts attached to all ranks during
10158                                                                  normal operation. */
10159         uint64_t wrp                   : 3;  /**< [ 23: 21](R/W) Write recovery for auto precharge. Should be programmed to be equal to or greater than
10160                                                                  RNDUP[TWR(ns) / Tcyc(ns)].
10161 
10162                                                                  DDR3:
10163                                                                  0x0 = 16.
10164                                                                  0x1 = 5.
10165                                                                  0x2 = 6.
10166                                                                  0x3 = 7.
10167                                                                  0x4 = 8.
10168                                                                  0x5 = 10.
10169                                                                  0x6 = 12.
10170                                                                  0x7 = 14.
10171 
10172                                                                  DDR4:
10173                                                                  0x0 = 10.
10174                                                                  0x1 = 12.
10175                                                                  0x2 = 14.
10176                                                                  0x3 = 16.
10177                                                                  0x4 = 18.
10178                                                                  0x5 = 20.
10179                                                                  0x6 = 24.
10180                                                                  0x7 = 22.
10181                                                                  0x8 = 26. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
10182                                                                  0x9-0xf = Reserved. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
10183 
10184                                                                  LMC writes this value to MR0[WR] in the selected DDR3/DDR4 parts during power-up/init and,
10185                                                                  if
10186                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
10187                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. This value must
10188                                                                  equal
10189                                                                  the MR0[WR] value in all the DDR3/DDR4 parts attached to all ranks during normal
10190                                                                  operation. */
10191         uint64_t ppd                   : 1;  /**< [ 24: 24](R/W) DLL control for precharge powerdown.
10192                                                                  0 = Slow exit (DLL off).
10193                                                                  1 = Fast exit (DLL on).
10194 
10195                                                                  LMC writes this value to MR0[PPD] in the selected DDR3/DDR4 parts during power-up/init
10196                                                                  and, if
10197                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
10198                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. This value must
10199                                                                  equal
10200                                                                  the MR0[PPD] value in all the DDR3/DDR4 parts attached to all ranks during normal
10201                                                                  operation. */
10202         uint64_t al_ext                : 1;  /**< [ 25: 25](R/W) Reserved; must be zero.
10203                                                                  Internal:
10204                                                                  The extended bit for the new Additive latency settings for DDR4 3DS.
10205                                                                  Together with LMC()_MODEREG_PARAMS0[AL], this covers additive latency settings
10206                                                                  of up to CL-6.
10207 
10208                                                                  0: CL - (LMC()_MODEREG_PARAMS0[AL])
10209                                                                  1: CL - (LMC()_MODEREG_PARAMS0[AL] + 4)
10210 
10211                                                                  See LMC()_MODEREG_PARAMS0[AL]. */
10212         uint64_t cl_ext                : 1;  /**< [ 26: 26](R/W) Reserved; must be zero.
10213                                                                  Internal:
10214                                                                  The extended bit for the proposed CAS Latency spec change. The new
10215                                                                  CAS Latency in DDR4 DRAM is defined in MR0(A12,A6,A5,A4,A2). This bit sets
10216                                                                  the A12 bit.
10217 
10218                                                                  See LMC()_MODEREG_PARAMS0[CL]. */
10219         uint64_t wrp_ext               : 1;  /**< [ 27: 27](R/W) A 1-bit extension to the WRP register. */
10220         uint64_t reserved_28_63        : 36;
10221 #endif /* Word 0 - End */
10222     } s;
10223     struct bdk_lmcx_modereg_params0_cn88xxp1
10224     {
10225 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
10226         uint64_t reserved_28_63        : 36;
10227         uint64_t wrp_ext               : 1;  /**< [ 27: 27](RO) Reserved. */
10228         uint64_t cl_ext                : 1;  /**< [ 26: 26](R/W) Reserved; must be zero.
10229                                                                  Internal:
10230                                                                  The extended bit for the proposed CAS Latency spec change. The new
10231                                                                  CAS Latency in DDR4 DRAM is defined in MR0(A12,A6,A5,A4,A2). This bit sets
10232                                                                  the A12 bit.
10233 
10234                                                                  See LMC()_MODEREG_PARAMS0[CL]. */
10235         uint64_t al_ext                : 1;  /**< [ 25: 25](R/W) Reserved; must be zero.
10236                                                                  Internal:
10237                                                                  The extended bit for the new Additive latency settings for DDR4 3DS.
10238                                                                  Together with LMC()_MODEREG_PARAMS0[AL], this covers additive latency settings
10239                                                                  of up to CL-6.
10240 
10241                                                                  0: CL - (LMC()_MODEREG_PARAMS0[AL])
10242                                                                  1: CL - (LMC()_MODEREG_PARAMS0[AL] + 4)
10243 
10244                                                                  See LMC()_MODEREG_PARAMS0[AL]. */
10245         uint64_t ppd                   : 1;  /**< [ 24: 24](R/W) DLL control for precharge powerdown.
10246                                                                  0 = Slow exit (DLL off).
10247                                                                  1 = Fast exit (DLL on).
10248 
10249                                                                  LMC writes this value to MR0[PPD] in the selected DDR3/DDR4 parts during power-up/init
10250                                                                  and, if
10251                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
10252                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. This value must
10253                                                                  equal
10254                                                                  the MR0[PPD] value in all the DDR3/DDR4 parts attached to all ranks during normal
10255                                                                  operation. */
10256         uint64_t wrp                   : 3;  /**< [ 23: 21](R/W) Write recovery for auto precharge. Should be programmed to be equal to or greater than
10257                                                                  RNDUP[TWR(ns) / Tcyc(ns)].
10258 
10259                                                                  DDR3:
10260                                                                  0x0 = 16.
10261                                                                  0x1 = 5.
10262                                                                  0x2 = 6.
10263                                                                  0x3 = 7.
10264                                                                  0x4 = 8.
10265                                                                  0x5 = 10.
10266                                                                  0x6 = 12.
10267                                                                  0x7 = 14.
10268 
10269                                                                  DDR4:
10270                                                                  0x0 = 10.
10271                                                                  0x1 = 12.
10272                                                                  0x2 = 14.
10273                                                                  0x3 = 16.
10274                                                                  0x4 = 18.
10275                                                                  0x5 = 20.
10276                                                                  0x6 = 24.
10277                                                                  0x7 = 22.
10278                                                                  0x8 = 26. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
10279                                                                  0x9-0xf = Reserved. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
10280 
10281                                                                  LMC writes this value to MR0[WR] in the selected DDR3/DDR4 parts during power-up/init and,
10282                                                                  if
10283                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
10284                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. This value must
10285                                                                  equal
10286                                                                  the MR0[WR] value in all the DDR3/DDR4 parts attached to all ranks during normal
10287                                                                  operation. */
10288         uint64_t dllr                  : 1;  /**< [ 20: 20](R/W) DLL reset. LMC writes this value to MR0[DLL] in the selected DDR3/DDR4 parts during power-
10289                                                                  up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
10290                                                                  sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK].
10291                                                                  The MR0[DLL] value must be 0 in all the DDR3/DDR4 parts attached to all ranks during
10292                                                                  normal operation. */
10293         uint64_t tm                    : 1;  /**< [ 19: 19](R/W) Test mode. LMC writes this value to MR0[TM] in the selected DDR3/DDR4 parts during power-
10294                                                                  up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
10295                                                                  sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK].
10296                                                                  The MR0[TM] value must be 0 in all the DDR3/DDR4 parts attached to all ranks during normal
10297                                                                  operation. */
10298         uint64_t rbt                   : 1;  /**< [ 18: 18](RO) Read burst. Type 1 = interleaved (fixed). LMC writes this value to MR0[RBT] in the
10299                                                                  selected DDR3/DDR4 parts during power-up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set,
10300                                                                  self-refresh exit instruction sequences. See LMC()_CONFIG[SEQ_SEL,INIT_START,
10301                                                                  RANKMASK]. The MR0[RBT] value must be 1 in all the DDR3/DDR4 parts attached to all ranks
10302                                                                  during normal operation. */
10303         uint64_t cl                    : 4;  /**< [ 17: 14](R/W) CAS latency.
10304 
10305                                                                  In DDR3 mode:
10306 
10307                                                                  0x2 = 5. 0x1 = 12.
10308                                                                  0x4 = 6. 0x3 = 13.
10309                                                                  0x6 = 7. 0x5 = 14.
10310                                                                  0x8 = 8. 0x7 = 15.
10311                                                                  0xA = 9. 0x9 = 16.
10312                                                                  0xC = 10.
10313                                                                  0xE = 11.
10314                                                                  0x0, 0xB, 0xD, 0xF = Reserved.
10315 
10316                                                                  In DDR4 mode:
10317 
10318                                                                  0x0 =  9. 0x1 = 10.
10319                                                                  0x2 = 11. 0x3 = 12.
10320                                                                  0x4 = 13. 0x5 = 14.
10321                                                                  0x6 = 15. 0x7 = 16.
10322                                                                  0x8 = 18. 0x9 = 20.
10323                                                                  0xA = 22. 0xB = 24.
10324                                                                  0xD = 17, 0xE = 19.
10325                                                                  0xF = 21, 0xC = Reserved.
10326 
10327                                                                  LMC writes this value to MR0[CAS Latency / CL] in the selected DDR3 parts during power-
10328                                                                  up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
10329                                                                  sequences. See LMC()_CONFIG[SEQ_SEL,INIT_START,RANKMASK]. This value must equal the
10330                                                                  MR0[CAS Latency / CL] value in all the DDR3/4 parts attached to all ranks during normal
10331                                                                  operation.
10332 
10333                                                                  tCL must be programmed to greater than or equal to tCWL for proper LMC operation. */
10334         uint64_t bl                    : 2;  /**< [ 13: 12](R/W) Burst length.
10335                                                                  0x0 = 8 (fixed).
10336                                                                  0x1 = 4 or 8 (on-the-fly).
10337 
10338                                                                  LMC writes this value to MR0[BL] in the selected DDR3 parts during power-up/init and, if
10339                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
10340                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. The MR0[BL] value
10341                                                                  must be 1 in all the DDR3/4 parts attached to all ranks during normal operation. */
10342         uint64_t qoff                  : 1;  /**< [ 11: 11](R/W) Qoff enable. 0: enable; 1: disable.
10343                                                                  LMC writes this value to MR1[Qoff] in the DDR3 parts in the selected ranks during power-
10344                                                                  up/init, write-leveling, and if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry
10345                                                                  and exit instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
10346                                                                  LMC()_CONFIG[RANKMASK,INIT_STATUS] and LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. The
10347                                                                  MR1[Qoff] value must be 0 in all the DDR3 parts attached to all ranks during normal
10348                                                                  operation. */
10349         uint64_t tdqs                  : 1;  /**< [ 10: 10](R/W) TDQS enable. 0: disable. LMC writes this value to MR1[TDQS] in the DDR3 parts in the
10350                                                                  selected ranks during power-up/init, write-leveling, and, if
10351                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
10352                                                                  See LMC()_CONFIG[SEQ_SEL, INIT_START,RANKMASK,INIT_STATUS] and
10353                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
10354         uint64_t wlev                  : 1;  /**< [  9:  9](RO) Write leveling enable. 0: disable. LMC writes MR1[Level]=0 in the DDR3 parts in the
10355                                                                  selected ranks during power-up/init, write-leveling, and, if
10356                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit sequences. (Write
10357                                                                  leveling can only be initiated via the write leveling instruction sequence.) See
10358                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK,INIT_STATUS] and
10359                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
10360         uint64_t al                    : 2;  /**< [  8:  7](R/W) Reserved; must be zero.
10361                                                                  Internal:
10362                                                                  Additive latency:
10363                                                                  0x0: 0.
10364                                                                  0x1: CL-1.
10365                                                                  0x2: CL - 2.
10366                                                                  0x3: Reserved.
10367                                                                  LMC writes this value to MR1[AL] in the selected DDR3 parts during power-up/init, write
10368                                                                  leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
10369                                                                  instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
10370                                                                  LMC()_CONFIG[RANKMASK]
10371                                                                  and LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. This value must equal the MR1[AL] value in
10372                                                                  all
10373                                                                  the DDR3 parts attached to all ranks during normal operation. See also
10374                                                                  LMC()_CONTROL[POCAS]. */
10375         uint64_t dll                   : 1;  /**< [  6:  6](R/W) DLL Enable. 0: enable; 1: disable. LMC writes this value to MR1[DLL] in the selected DDR3
10376                                                                  parts during power-up/init, write-leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is
10377                                                                  set, self-refresh entry and exit instruction sequences. See
10378                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START]
10379                                                                  and LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. This value
10380                                                                  must equal the MR1[DLL] value in all the DDR3 parts attached to all ranks during normal
10381                                                                  operation. In DLL-off mode, CL/CWL must be programmed equal to 6/6, respectively, as per
10382                                                                  the JEDEC DDR3 specifications. */
10383         uint64_t mpr                   : 1;  /**< [  5:  5](R/W) MPR. LMC writes this value to MR3[MPR] in the selected DDR3 parts during power-up/init,
10384                                                                  read-leveling, and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit
10385                                                                  instruction sequences. (LMC also writes MR3[MPR] = 1 at the beginning of the read-leveling
10386                                                                  instruction sequence. Read-leveling should only be initiated via the read-leveling
10387                                                                  instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
10388                                                                  LMC()_CONFIG[RANKMASK].
10389                                                                  The MR3[MPR] value must be 0 in all the DDR3 parts attached to all ranks during normal
10390                                                                  operation. */
10391         uint64_t mprloc                : 2;  /**< [  4:  3](R/W) MPR location. LMC writes this value to MR3[MPRLoc] in the selected DDR3 parts during
10392                                                                  power-up/init, read-leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh
10393                                                                  exit instruction sequences. (LMC also writes MR3[MPRLoc] = 0 at the beginning of the read-
10394                                                                  leveling instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
10395                                                                  LMC()_CONFIG[RANKMASK]. The MR3[MPRLoc] value must be 0 in all the DDR3 parts attached
10396                                                                  to all ranks during normal operation. */
10397         uint64_t cwl                   : 3;  /**< [  2:  0](R/W) CAS write latency.
10398 
10399                                                                  In DDR3 mode:
10400                                                                  0x0 = 5.
10401                                                                  0x1 = 6.
10402                                                                  0x2 = 7.
10403                                                                  0x3 = 8.
10404                                                                  0x4 = 9.
10405                                                                  0x5 = 10.
10406                                                                  0x6 = 11.
10407                                                                  0x7 = 12.
10408 
10409                                                                  In DDR4 mode:
10410                                                                  0x0 = 9.
10411                                                                  0x1 = 10.
10412                                                                  0x2 = 11.
10413                                                                  0x3 = 12.
10414                                                                  0x4 = 13.
10415                                                                  0x5 = 16.
10416                                                                  0x6 = 18.
10417                                                                  0x7 = Reserved.
10418 
10419                                                                  LMC writes this value to MR2[CWL] in the selected DDR3 parts during power-up/init, write
10420                                                                  leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
10421                                                                  instruction sequences. See LMC()_CONFIG[SEQ_SEL, INIT_START,RANKMASK] and
10422                                                                  LMC()_RESET_CTL [DDR3PWARM, DDR3PSOFT]. This value must equal the MR2[CWL] value in
10423                                                                  all the DDR3 parts attached to all ranks during normal operation.
10424                                                                  tCWL must be programmed to less than or equal to tCL for proper LMC operation. */
10425 #else /* Word 0 - Little Endian */
10426         uint64_t cwl                   : 3;  /**< [  2:  0](R/W) CAS write latency.
10427 
10428                                                                  In DDR3 mode:
10429                                                                  0x0 = 5.
10430                                                                  0x1 = 6.
10431                                                                  0x2 = 7.
10432                                                                  0x3 = 8.
10433                                                                  0x4 = 9.
10434                                                                  0x5 = 10.
10435                                                                  0x6 = 11.
10436                                                                  0x7 = 12.
10437 
10438                                                                  In DDR4 mode:
10439                                                                  0x0 = 9.
10440                                                                  0x1 = 10.
10441                                                                  0x2 = 11.
10442                                                                  0x3 = 12.
10443                                                                  0x4 = 13.
10444                                                                  0x5 = 16.
10445                                                                  0x6 = 18.
10446                                                                  0x7 = Reserved.
10447 
10448                                                                  LMC writes this value to MR2[CWL] in the selected DDR3 parts during power-up/init, write
10449                                                                  leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
10450                                                                  instruction sequences. See LMC()_CONFIG[SEQ_SEL, INIT_START,RANKMASK] and
10451                                                                  LMC()_RESET_CTL [DDR3PWARM, DDR3PSOFT]. This value must equal the MR2[CWL] value in
10452                                                                  all the DDR3 parts attached to all ranks during normal operation.
10453                                                                  tCWL must be programmed to less than or equal to tCL for proper LMC operation. */
10454         uint64_t mprloc                : 2;  /**< [  4:  3](R/W) MPR location. LMC writes this value to MR3[MPRLoc] in the selected DDR3 parts during
10455                                                                  power-up/init, read-leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh
10456                                                                  exit instruction sequences. (LMC also writes MR3[MPRLoc] = 0 at the beginning of the read-
10457                                                                  leveling instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
10458                                                                  LMC()_CONFIG[RANKMASK]. The MR3[MPRLoc] value must be 0 in all the DDR3 parts attached
10459                                                                  to all ranks during normal operation. */
10460         uint64_t mpr                   : 1;  /**< [  5:  5](R/W) MPR. LMC writes this value to MR3[MPR] in the selected DDR3 parts during power-up/init,
10461                                                                  read-leveling, and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit
10462                                                                  instruction sequences. (LMC also writes MR3[MPR] = 1 at the beginning of the read-leveling
10463                                                                  instruction sequence. Read-leveling should only be initiated via the read-leveling
10464                                                                  instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
10465                                                                  LMC()_CONFIG[RANKMASK].
10466                                                                  The MR3[MPR] value must be 0 in all the DDR3 parts attached to all ranks during normal
10467                                                                  operation. */
10468         uint64_t dll                   : 1;  /**< [  6:  6](R/W) DLL Enable. 0: enable; 1: disable. LMC writes this value to MR1[DLL] in the selected DDR3
10469                                                                  parts during power-up/init, write-leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is
10470                                                                  set, self-refresh entry and exit instruction sequences. See
10471                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START]
10472                                                                  and LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. This value
10473                                                                  must equal the MR1[DLL] value in all the DDR3 parts attached to all ranks during normal
10474                                                                  operation. In DLL-off mode, CL/CWL must be programmed equal to 6/6, respectively, as per
10475                                                                  the JEDEC DDR3 specifications. */
10476         uint64_t al                    : 2;  /**< [  8:  7](R/W) Reserved; must be zero.
10477                                                                  Internal:
10478                                                                  Additive latency:
10479                                                                  0x0: 0.
10480                                                                  0x1: CL-1.
10481                                                                  0x2: CL - 2.
10482                                                                  0x3: Reserved.
10483                                                                  LMC writes this value to MR1[AL] in the selected DDR3 parts during power-up/init, write
10484                                                                  leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
10485                                                                  instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
10486                                                                  LMC()_CONFIG[RANKMASK]
10487                                                                  and LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. This value must equal the MR1[AL] value in
10488                                                                  all
10489                                                                  the DDR3 parts attached to all ranks during normal operation. See also
10490                                                                  LMC()_CONTROL[POCAS]. */
10491         uint64_t wlev                  : 1;  /**< [  9:  9](RO) Write leveling enable. 0: disable. LMC writes MR1[Level]=0 in the DDR3 parts in the
10492                                                                  selected ranks during power-up/init, write-leveling, and, if
10493                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit sequences. (Write
10494                                                                  leveling can only be initiated via the write leveling instruction sequence.) See
10495                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK,INIT_STATUS] and
10496                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
10497         uint64_t tdqs                  : 1;  /**< [ 10: 10](R/W) TDQS enable. 0: disable. LMC writes this value to MR1[TDQS] in the DDR3 parts in the
10498                                                                  selected ranks during power-up/init, write-leveling, and, if
10499                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
10500                                                                  See LMC()_CONFIG[SEQ_SEL, INIT_START,RANKMASK,INIT_STATUS] and
10501                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
10502         uint64_t qoff                  : 1;  /**< [ 11: 11](R/W) Qoff enable. 0: enable; 1: disable.
10503                                                                  LMC writes this value to MR1[Qoff] in the DDR3 parts in the selected ranks during power-
10504                                                                  up/init, write-leveling, and if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry
10505                                                                  and exit instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and
10506                                                                  LMC()_CONFIG[RANKMASK,INIT_STATUS] and LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. The
10507                                                                  MR1[Qoff] value must be 0 in all the DDR3 parts attached to all ranks during normal
10508                                                                  operation. */
10509         uint64_t bl                    : 2;  /**< [ 13: 12](R/W) Burst length.
10510                                                                  0x0 = 8 (fixed).
10511                                                                  0x1 = 4 or 8 (on-the-fly).
10512 
10513                                                                  LMC writes this value to MR0[BL] in the selected DDR3 parts during power-up/init and, if
10514                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
10515                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. The MR0[BL] value
10516                                                                  must be 1 in all the DDR3/4 parts attached to all ranks during normal operation. */
10517         uint64_t cl                    : 4;  /**< [ 17: 14](R/W) CAS latency.
10518 
10519                                                                  In DDR3 mode:
10520 
10521                                                                  0x2 = 5. 0x1 = 12.
10522                                                                  0x4 = 6. 0x3 = 13.
10523                                                                  0x6 = 7. 0x5 = 14.
10524                                                                  0x8 = 8. 0x7 = 15.
10525                                                                  0xA = 9. 0x9 = 16.
10526                                                                  0xC = 10.
10527                                                                  0xE = 11.
10528                                                                  0x0, 0xB, 0xD, 0xF = Reserved.
10529 
10530                                                                  In DDR4 mode:
10531 
10532                                                                  0x0 =  9. 0x1 = 10.
10533                                                                  0x2 = 11. 0x3 = 12.
10534                                                                  0x4 = 13. 0x5 = 14.
10535                                                                  0x6 = 15. 0x7 = 16.
10536                                                                  0x8 = 18. 0x9 = 20.
10537                                                                  0xA = 22. 0xB = 24.
10538                                                                  0xD = 17, 0xE = 19.
10539                                                                  0xF = 21, 0xC = Reserved.
10540 
10541                                                                  LMC writes this value to MR0[CAS Latency / CL] in the selected DDR3 parts during power-
10542                                                                  up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
10543                                                                  sequences. See LMC()_CONFIG[SEQ_SEL,INIT_START,RANKMASK]. This value must equal the
10544                                                                  MR0[CAS Latency / CL] value in all the DDR3/4 parts attached to all ranks during normal
10545                                                                  operation.
10546 
10547                                                                  tCL must be programmed to greater than or equal to tCWL for proper LMC operation. */
10548         uint64_t rbt                   : 1;  /**< [ 18: 18](RO) Read burst. Type 1 = interleaved (fixed). LMC writes this value to MR0[RBT] in the
10549                                                                  selected DDR3/DDR4 parts during power-up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set,
10550                                                                  self-refresh exit instruction sequences. See LMC()_CONFIG[SEQ_SEL,INIT_START,
10551                                                                  RANKMASK]. The MR0[RBT] value must be 1 in all the DDR3/DDR4 parts attached to all ranks
10552                                                                  during normal operation. */
10553         uint64_t tm                    : 1;  /**< [ 19: 19](R/W) Test mode. LMC writes this value to MR0[TM] in the selected DDR3/DDR4 parts during power-
10554                                                                  up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
10555                                                                  sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK].
10556                                                                  The MR0[TM] value must be 0 in all the DDR3/DDR4 parts attached to all ranks during normal
10557                                                                  operation. */
10558         uint64_t dllr                  : 1;  /**< [ 20: 20](R/W) DLL reset. LMC writes this value to MR0[DLL] in the selected DDR3/DDR4 parts during power-
10559                                                                  up/init and, if LMC()_CONFIG [SREF_WITH_DLL] is set, self-refresh exit instruction
10560                                                                  sequences. See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK].
10561                                                                  The MR0[DLL] value must be 0 in all the DDR3/DDR4 parts attached to all ranks during
10562                                                                  normal operation. */
10563         uint64_t wrp                   : 3;  /**< [ 23: 21](R/W) Write recovery for auto precharge. Should be programmed to be equal to or greater than
10564                                                                  RNDUP[TWR(ns) / Tcyc(ns)].
10565 
10566                                                                  DDR3:
10567                                                                  0x0 = 16.
10568                                                                  0x1 = 5.
10569                                                                  0x2 = 6.
10570                                                                  0x3 = 7.
10571                                                                  0x4 = 8.
10572                                                                  0x5 = 10.
10573                                                                  0x6 = 12.
10574                                                                  0x7 = 14.
10575 
10576                                                                  DDR4:
10577                                                                  0x0 = 10.
10578                                                                  0x1 = 12.
10579                                                                  0x2 = 14.
10580                                                                  0x3 = 16.
10581                                                                  0x4 = 18.
10582                                                                  0x5 = 20.
10583                                                                  0x6 = 24.
10584                                                                  0x7 = 22.
10585                                                                  0x8 = 26. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
10586                                                                  0x9-0xf = Reserved. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
10587 
10588                                                                  LMC writes this value to MR0[WR] in the selected DDR3/DDR4 parts during power-up/init and,
10589                                                                  if
10590                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
10591                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. This value must
10592                                                                  equal
10593                                                                  the MR0[WR] value in all the DDR3/DDR4 parts attached to all ranks during normal
10594                                                                  operation. */
10595         uint64_t ppd                   : 1;  /**< [ 24: 24](R/W) DLL control for precharge powerdown.
10596                                                                  0 = Slow exit (DLL off).
10597                                                                  1 = Fast exit (DLL on).
10598 
10599                                                                  LMC writes this value to MR0[PPD] in the selected DDR3/DDR4 parts during power-up/init
10600                                                                  and, if
10601                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
10602                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK]. This value must
10603                                                                  equal
10604                                                                  the MR0[PPD] value in all the DDR3/DDR4 parts attached to all ranks during normal
10605                                                                  operation. */
10606         uint64_t al_ext                : 1;  /**< [ 25: 25](R/W) Reserved; must be zero.
10607                                                                  Internal:
10608                                                                  The extended bit for the new Additive latency settings for DDR4 3DS.
10609                                                                  Together with LMC()_MODEREG_PARAMS0[AL], this covers additive latency settings
10610                                                                  of up to CL-6.
10611 
10612                                                                  0: CL - (LMC()_MODEREG_PARAMS0[AL])
10613                                                                  1: CL - (LMC()_MODEREG_PARAMS0[AL] + 4)
10614 
10615                                                                  See LMC()_MODEREG_PARAMS0[AL]. */
10616         uint64_t cl_ext                : 1;  /**< [ 26: 26](R/W) Reserved; must be zero.
10617                                                                  Internal:
10618                                                                  The extended bit for the proposed CAS Latency spec change. The new
10619                                                                  CAS Latency in DDR4 DRAM is defined in MR0(A12,A6,A5,A4,A2). This bit sets
10620                                                                  the A12 bit.
10621 
10622                                                                  See LMC()_MODEREG_PARAMS0[CL]. */
10623         uint64_t wrp_ext               : 1;  /**< [ 27: 27](RO) Reserved. */
10624         uint64_t reserved_28_63        : 36;
10625 #endif /* Word 0 - End */
10626     } cn88xxp1;
10627     struct bdk_lmcx_modereg_params0_cn9
10628     {
10629 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
10630         uint64_t reserved_28_63        : 36;
10631         uint64_t wrp_ext               : 1;  /**< [ 27: 27](R/W) A 1-bit extension to the WRP register. */
10632         uint64_t cl_ext                : 1;  /**< [ 26: 26](R/W) Reserved; must be zero.
10633                                                                  Internal:
10634                                                                  The extended bit for the proposed CAS Latency spec change. The new
10635                                                                  CAS Latency in DDR4 DRAM is defined in MR0(A12,A6,A5,A4,A2). This bit sets
10636                                                                  the A12 bit.
10637 
10638                                                                  See LMC()_MODEREG_PARAMS0[CL]. */
10639         uint64_t al_ext                : 1;  /**< [ 25: 25](R/W) Reserved; must be zero.
10640                                                                  Internal:
10641                                                                  The extended bit for the new Additive latency settings for DDR4 3DS.
10642                                                                  Together with LMC()_MODEREG_PARAMS0[AL], this covers additive latency settings
10643                                                                  of up to CL-6.
10644 
10645                                                                  0 = CL - (LMC()_MODEREG_PARAMS0[AL]).
10646                                                                  1 = CL - (LMC()_MODEREG_PARAMS0[AL] + 4).
10647 
10648                                                                  See LMC()_MODEREG_PARAMS0[AL]. */
10649         uint64_t ppd                   : 1;  /**< [ 24: 24](R/W) DLL control for precharge powerdown.
10650                                                                  0 = Slow exit (DLL off).
10651                                                                  1 = Fast exit (DLL on).
10652 
10653                                                                  LMC writes this value to MR0[PPD] in the selected DDR4 parts during power-up/init and, if
10654                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
10655                                                                  LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK]. This value must
10656                                                                  equal the MR0[PPD] value in all the DDR4 parts attached to all ranks during normal
10657                                                                  operation. */
10658         uint64_t wrp                   : 3;  /**< [ 23: 21](R/W) Write recovery for auto precharge. Should be programmed to be equal to or greater than
10659                                                                  RNDUP[TWR(ns) / Tcyc(ns)].
10660 
10661                                                                  0x0 = 10.
10662                                                                  0x1 = 12.
10663                                                                  0x2 = 14.
10664                                                                  0x3 = 16.
10665                                                                  0x4 = 18.
10666                                                                  0x5 = 20.
10667                                                                  0x6 = 24.
10668                                                                  0x7 = 22.
10669                                                                  0x8 = 26. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
10670                                                                  0x9-0xf = Reserved. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
10671 
10672                                                                  LMC writes this value to MR0[WR] in the selected DDR4 parts during power-up/init and, if
10673                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences.
10674                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK].
10675                                                                  This value must equal the MR0[WR] value in all the DDR4 parts attached to all ranks during
10676                                                                  normal operation. */
10677         uint64_t dllr                  : 1;  /**< [ 20: 20](R/W) DLL reset. LMC writes this value to MR0[DLL Reset] in the selected DDR4 parts during power-
10678                                                                  up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction
10679                                                                  sequences.
10680                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK].
10681                                                                  The MR0[DLL Reset] value must be zero in all the DDR4 parts attached to all ranks during normal
10682                                                                  operation. */
10683         uint64_t tm                    : 1;  /**< [ 19: 19](R/W) Test mode. LMC writes this value to MR0[TM] in the selected DDR4 parts during power-
10684                                                                  up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction
10685                                                                  sequences.
10686                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK].
10687                                                                  The MR0[TM] value must be zero in all the DDR4 parts attached to all ranks during normal
10688                                                                  operation. */
10689         uint64_t rbt                   : 1;  /**< [ 18: 18](RO) Read burst. Type 1 = interleaved (fixed). LMC writes this value to MR0[RBT] in the
10690                                                                  selected DDR4 parts during power-up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-
10691                                                                  refresh exit instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START],
10692                                                                  LMC()_CONFIG[RANKMASK]. The MR0[RBT] value must be one in all the DDR4 parts attached to all ranks
10693                                                                  during normal operation. */
10694         uint64_t cl                    : 4;  /**< [ 17: 14](R/W) CAS latency. Together with [CL_EXT] field, the following shows all possible CAS latency
10695                                                                  values.
10696 
10697                                                                  0x0 =  9. 0x1 = 10.
10698                                                                  0x2 = 11. 0x3 = 12.
10699                                                                  0x4 = 13. 0x5 = 14.
10700                                                                  0x6 = 15. 0x7 = 16.
10701                                                                  0x8 = 18. 0x9 = 20.
10702                                                                  0xA = 22. 0xB = 24.
10703                                                                  0xC = 23. 0xD = 17.
10704                                                                  0xE = 19. 0xF = 21.
10705                                                                  0x10 = 25. 0x11 = 26.
10706                                                                  0x12 = 27. 0x13 = 28.
10707                                                                  0x14 = 29. 0x15 = 30.
10708                                                                  0x16 = 31. 0x17 = 32.
10709                                                                  0x18-0x1F = Reserved.
10710 
10711                                                                  LMC writes this value to MR0[CAS Latency / CL] in the selected DDR4 parts during power-
10712                                                                  up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction
10713                                                                  sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START],
10714                                                                  LMC()_CONFIG[RANKMASK]. This value must equal the
10715                                                                  MR0[CAS Latency / CL] value in all the DDR4 parts attached to all ranks during normal
10716                                                                  operation.
10717 
10718                                                                  tCL must be programmed to greater than or equal to tCWL for proper LMC operation. */
10719         uint64_t bl                    : 2;  /**< [ 13: 12](R/W) Burst length.
10720                                                                  0x0 = 8 (fixed).
10721                                                                  0x1 = 4 or 8 (on-the-fly).
10722 
10723                                                                  LMC writes this value to MR0[BL] in the selected DDR4 parts during power-up/init and, if
10724                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
10725                                                                  LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK]. The MR0[BL] value
10726                                                                  must be one in all the DDR4 parts attached to all ranks during normal operation. */
10727         uint64_t qoff                  : 1;  /**< [ 11: 11](R/W) Qoff enable. 0 = enable; 1 = disable.
10728                                                                  LMC writes this value to MR1[Qoff] in the DDR4 parts in the selected ranks during power
10729                                                                  up/init, write leveling, and if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry
10730                                                                  and exit instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
10731                                                                  LMC()_CONFIG[RANKMASK], LMC()_CONFIG[INIT_STATUS] and LMC()_RESET_CTL[DDR4PDOMAIN]. The
10732                                                                  MR1[Qoff] value must be zero in all the DDR4 parts attached to all ranks during normal
10733                                                                  operation. */
10734         uint64_t tdqs                  : 1;  /**< [ 10: 10](R/W) TDQS enable. 0 = disable. LMC writes this value to MR1[TDQS] in the DDR4 parts in the
10735                                                                  selected ranks during power-up/init, write leveling, and, if
10736                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
10737                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START], LMC()_CONFIG[RANKMASK],
10738                                                                  LMC()_CONFIG[INIT_STATUS] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
10739         uint64_t wlev                  : 1;  /**< [  9:  9](RO) Write leveling enable. 0 = disable. LMC writes MR1[Level]=0 in the DDR4 parts in the
10740                                                                  selected ranks during power-up/init, write leveling, and, if
10741                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit sequences. (Write
10742                                                                  leveling can only be initiated via the write leveling instruction sequence.) See
10743                                                                  LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START], LMC()_CONFIG[RANKMASK],
10744                                                                  LMC()_CONFIG[INIT_STATUS] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
10745         uint64_t al                    : 2;  /**< [  8:  7](R/W) Reserved; must be zero.
10746                                                                  Internal:
10747                                                                  Additive latency:
10748                                                                  0x0: 0.
10749                                                                  0x1: CL-1.
10750                                                                  0x2: CL - 2.
10751                                                                  0x3: Reserved.
10752                                                                  LMC writes this value to MR1[AL] in the selected DDR4 parts during power-up/init, write
10753                                                                  leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
10754                                                                  instruction sequences.
10755                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START], LMC()_CONFIG[RANKMASK] and
10756                                                                  LMC()_RESET_CTL[DDR4PDOMAIN]. This value must equal the MR1[AL] value in all the DDR4
10757                                                                  parts attached to all ranks during normal operation. See also LMC()_CONTROL[POCAS]. */
10758         uint64_t dll                   : 1;  /**< [  6:  6](R/W) DLL Enable. 0 = enable; 1 = disable. LMC writes this value to MR1[DLL] in the selected
10759                                                                  DDR4
10760                                                                  parts during power-up/init, write leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is
10761                                                                  set, self-refresh entry and exit instruction sequences.
10762                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START], LMC()_CONFIG[RANKMASK] and
10763                                                                  LMC()_RESET_CTL[DDR4PDOMAIN].
10764                                                                  This value must equal the MR1[DLL] value in all the DDR4 parts attached to all ranks
10765                                                                  during normal operation. In DLL-off mode, CL/CWL must be programmed equal to 6/6,
10766                                                                  respectively, as per the JEDEC DDR4 specifications. */
10767         uint64_t mpr                   : 1;  /**< [  5:  5](R/W) MPR. LMC writes this value to MR3[MPR] in the selected DDR4 parts during power-up/init,
10768                                                                  read leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit
10769                                                                  instruction sequences. (LMC also writes MR3[MPR] = 1 at the beginning of the read leveling
10770                                                                  instruction sequence. Read leveling should only be initiated via the read leveling
10771                                                                  instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
10772                                                                  LMC()_CONFIG[RANKMASK].
10773                                                                  The MR3[MPR] value must be zero in all the DDR4 parts attached to all ranks during normal
10774                                                                  operation. */
10775         uint64_t mprloc                : 2;  /**< [  4:  3](R/W) MPR location. LMC writes this value to MR3[MPRLoc] in the selected DDR4 parts during
10776                                                                  power-up/init, read leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh
10777                                                                  exit instruction sequences. (LMC also writes MR3[MPRLoc] = 0 at the beginning of the read-
10778                                                                  leveling instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
10779                                                                  LMC()_CONFIG[RANKMASK]. The MR3[MPRLoc] value must be zero in all the DDR4 parts attached
10780                                                                  to all ranks during normal operation. */
10781         uint64_t cwl                   : 3;  /**< [  2:  0](R/W) CAS write latency.
10782 
10783                                                                  0x0 = 9.
10784                                                                  0x1 = 10.
10785                                                                  0x2 = 11.
10786                                                                  0x3 = 12.
10787                                                                  0x4 = 14.
10788                                                                  0x5 = 16.
10789                                                                  0x6 = 18.
10790                                                                  0x7 = 20.
10791 
10792                                                                  LMC writes this value to MR2[CWL] in the selected DDR4 parts during power-up/init, write
10793                                                                  leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
10794                                                                  instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START],
10795                                                                  LMC()_CONFIG[RANKMASK] and
10796                                                                  LMC()_RESET_CTL[DDR4PDOMAIN]. This value must equal the MR2[CWL] value in
10797                                                                  all the DDR4 parts attached to all ranks during normal operation.
10798                                                                  tCWL must be programmed to less than or equal to tCL for proper LMC operation. */
10799 #else /* Word 0 - Little Endian */
10800         uint64_t cwl                   : 3;  /**< [  2:  0](R/W) CAS write latency.
10801 
10802                                                                  0x0 = 9.
10803                                                                  0x1 = 10.
10804                                                                  0x2 = 11.
10805                                                                  0x3 = 12.
10806                                                                  0x4 = 14.
10807                                                                  0x5 = 16.
10808                                                                  0x6 = 18.
10809                                                                  0x7 = 20.
10810 
10811                                                                  LMC writes this value to MR2[CWL] in the selected DDR4 parts during power-up/init, write
10812                                                                  leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
10813                                                                  instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START],
10814                                                                  LMC()_CONFIG[RANKMASK] and
10815                                                                  LMC()_RESET_CTL[DDR4PDOMAIN]. This value must equal the MR2[CWL] value in
10816                                                                  all the DDR4 parts attached to all ranks during normal operation.
10817                                                                  tCWL must be programmed to less than or equal to tCL for proper LMC operation. */
10818         uint64_t mprloc                : 2;  /**< [  4:  3](R/W) MPR location. LMC writes this value to MR3[MPRLoc] in the selected DDR4 parts during
10819                                                                  power-up/init, read leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh
10820                                                                  exit instruction sequences. (LMC also writes MR3[MPRLoc] = 0 at the beginning of the read-
10821                                                                  leveling instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
10822                                                                  LMC()_CONFIG[RANKMASK]. The MR3[MPRLoc] value must be zero in all the DDR4 parts attached
10823                                                                  to all ranks during normal operation. */
10824         uint64_t mpr                   : 1;  /**< [  5:  5](R/W) MPR. LMC writes this value to MR3[MPR] in the selected DDR4 parts during power-up/init,
10825                                                                  read leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit
10826                                                                  instruction sequences. (LMC also writes MR3[MPR] = 1 at the beginning of the read leveling
10827                                                                  instruction sequence. Read leveling should only be initiated via the read leveling
10828                                                                  instruction sequence.) See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
10829                                                                  LMC()_CONFIG[RANKMASK].
10830                                                                  The MR3[MPR] value must be zero in all the DDR4 parts attached to all ranks during normal
10831                                                                  operation. */
10832         uint64_t dll                   : 1;  /**< [  6:  6](R/W) DLL Enable. 0 = enable; 1 = disable. LMC writes this value to MR1[DLL] in the selected
10833                                                                  DDR4
10834                                                                  parts during power-up/init, write leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is
10835                                                                  set, self-refresh entry and exit instruction sequences.
10836                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START], LMC()_CONFIG[RANKMASK] and
10837                                                                  LMC()_RESET_CTL[DDR4PDOMAIN].
10838                                                                  This value must equal the MR1[DLL] value in all the DDR4 parts attached to all ranks
10839                                                                  during normal operation. In DLL-off mode, CL/CWL must be programmed equal to 6/6,
10840                                                                  respectively, as per the JEDEC DDR4 specifications. */
10841         uint64_t al                    : 2;  /**< [  8:  7](R/W) Reserved; must be zero.
10842                                                                  Internal:
10843                                                                  Additive latency:
10844                                                                  0x0: 0.
10845                                                                  0x1: CL-1.
10846                                                                  0x2: CL - 2.
10847                                                                  0x3: Reserved.
10848                                                                  LMC writes this value to MR1[AL] in the selected DDR4 parts during power-up/init, write
10849                                                                  leveling, and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
10850                                                                  instruction sequences.
10851                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START], LMC()_CONFIG[RANKMASK] and
10852                                                                  LMC()_RESET_CTL[DDR4PDOMAIN]. This value must equal the MR1[AL] value in all the DDR4
10853                                                                  parts attached to all ranks during normal operation. See also LMC()_CONTROL[POCAS]. */
10854         uint64_t wlev                  : 1;  /**< [  9:  9](RO) Write leveling enable. 0 = disable. LMC writes MR1[Level]=0 in the DDR4 parts in the
10855                                                                  selected ranks during power-up/init, write leveling, and, if
10856                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit sequences. (Write
10857                                                                  leveling can only be initiated via the write leveling instruction sequence.) See
10858                                                                  LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START], LMC()_CONFIG[RANKMASK],
10859                                                                  LMC()_CONFIG[INIT_STATUS] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
10860         uint64_t tdqs                  : 1;  /**< [ 10: 10](R/W) TDQS enable. 0 = disable. LMC writes this value to MR1[TDQS] in the DDR4 parts in the
10861                                                                  selected ranks during power-up/init, write leveling, and, if
10862                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
10863                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START], LMC()_CONFIG[RANKMASK],
10864                                                                  LMC()_CONFIG[INIT_STATUS] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
10865         uint64_t qoff                  : 1;  /**< [ 11: 11](R/W) Qoff enable. 0 = enable; 1 = disable.
10866                                                                  LMC writes this value to MR1[Qoff] in the DDR4 parts in the selected ranks during power
10867                                                                  up/init, write leveling, and if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry
10868                                                                  and exit instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
10869                                                                  LMC()_CONFIG[RANKMASK], LMC()_CONFIG[INIT_STATUS] and LMC()_RESET_CTL[DDR4PDOMAIN]. The
10870                                                                  MR1[Qoff] value must be zero in all the DDR4 parts attached to all ranks during normal
10871                                                                  operation. */
10872         uint64_t bl                    : 2;  /**< [ 13: 12](R/W) Burst length.
10873                                                                  0x0 = 8 (fixed).
10874                                                                  0x1 = 4 or 8 (on-the-fly).
10875 
10876                                                                  LMC writes this value to MR0[BL] in the selected DDR4 parts during power-up/init and, if
10877                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
10878                                                                  LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK]. The MR0[BL] value
10879                                                                  must be one in all the DDR4 parts attached to all ranks during normal operation. */
10880         uint64_t cl                    : 4;  /**< [ 17: 14](R/W) CAS latency. Together with [CL_EXT] field, the following shows all possible CAS latency
10881                                                                  values.
10882 
10883                                                                  0x0 =  9. 0x1 = 10.
10884                                                                  0x2 = 11. 0x3 = 12.
10885                                                                  0x4 = 13. 0x5 = 14.
10886                                                                  0x6 = 15. 0x7 = 16.
10887                                                                  0x8 = 18. 0x9 = 20.
10888                                                                  0xA = 22. 0xB = 24.
10889                                                                  0xC = 23. 0xD = 17.
10890                                                                  0xE = 19. 0xF = 21.
10891                                                                  0x10 = 25. 0x11 = 26.
10892                                                                  0x12 = 27. 0x13 = 28.
10893                                                                  0x14 = 29. 0x15 = 30.
10894                                                                  0x16 = 31. 0x17 = 32.
10895                                                                  0x18-0x1F = Reserved.
10896 
10897                                                                  LMC writes this value to MR0[CAS Latency / CL] in the selected DDR4 parts during power-
10898                                                                  up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction
10899                                                                  sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START],
10900                                                                  LMC()_CONFIG[RANKMASK]. This value must equal the
10901                                                                  MR0[CAS Latency / CL] value in all the DDR4 parts attached to all ranks during normal
10902                                                                  operation.
10903 
10904                                                                  tCL must be programmed to greater than or equal to tCWL for proper LMC operation. */
10905         uint64_t rbt                   : 1;  /**< [ 18: 18](RO) Read burst. Type 1 = interleaved (fixed). LMC writes this value to MR0[RBT] in the
10906                                                                  selected DDR4 parts during power-up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-
10907                                                                  refresh exit instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START],
10908                                                                  LMC()_CONFIG[RANKMASK]. The MR0[RBT] value must be one in all the DDR4 parts attached to all ranks
10909                                                                  during normal operation. */
10910         uint64_t tm                    : 1;  /**< [ 19: 19](R/W) Test mode. LMC writes this value to MR0[TM] in the selected DDR4 parts during power-
10911                                                                  up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction
10912                                                                  sequences.
10913                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK].
10914                                                                  The MR0[TM] value must be zero in all the DDR4 parts attached to all ranks during normal
10915                                                                  operation. */
10916         uint64_t dllr                  : 1;  /**< [ 20: 20](R/W) DLL reset. LMC writes this value to MR0[DLL Reset] in the selected DDR4 parts during power-
10917                                                                  up/init and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction
10918                                                                  sequences.
10919                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK].
10920                                                                  The MR0[DLL Reset] value must be zero in all the DDR4 parts attached to all ranks during normal
10921                                                                  operation. */
10922         uint64_t wrp                   : 3;  /**< [ 23: 21](R/W) Write recovery for auto precharge. Should be programmed to be equal to or greater than
10923                                                                  RNDUP[TWR(ns) / Tcyc(ns)].
10924 
10925                                                                  0x0 = 10.
10926                                                                  0x1 = 12.
10927                                                                  0x2 = 14.
10928                                                                  0x3 = 16.
10929                                                                  0x4 = 18.
10930                                                                  0x5 = 20.
10931                                                                  0x6 = 24.
10932                                                                  0x7 = 22.
10933                                                                  0x8 = 26. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
10934                                                                  0x9-0xf = Reserved. (Note that LMC()_MODEREG_PARAMS0[WRP_EXT] = 1).
10935 
10936                                                                  LMC writes this value to MR0[WR] in the selected DDR4 parts during power-up/init and, if
10937                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences.
10938                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK].
10939                                                                  This value must equal the MR0[WR] value in all the DDR4 parts attached to all ranks during
10940                                                                  normal operation. */
10941         uint64_t ppd                   : 1;  /**< [ 24: 24](R/W) DLL control for precharge powerdown.
10942                                                                  0 = Slow exit (DLL off).
10943                                                                  1 = Fast exit (DLL on).
10944 
10945                                                                  LMC writes this value to MR0[PPD] in the selected DDR4 parts during power-up/init and, if
10946                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh exit instruction sequences. See
10947                                                                  LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK]. This value must
10948                                                                  equal the MR0[PPD] value in all the DDR4 parts attached to all ranks during normal
10949                                                                  operation. */
10950         uint64_t al_ext                : 1;  /**< [ 25: 25](R/W) Reserved; must be zero.
10951                                                                  Internal:
10952                                                                  The extended bit for the new Additive latency settings for DDR4 3DS.
10953                                                                  Together with LMC()_MODEREG_PARAMS0[AL], this covers additive latency settings
10954                                                                  of up to CL-6.
10955 
10956                                                                  0 = CL - (LMC()_MODEREG_PARAMS0[AL]).
10957                                                                  1 = CL - (LMC()_MODEREG_PARAMS0[AL] + 4).
10958 
10959                                                                  See LMC()_MODEREG_PARAMS0[AL]. */
10960         uint64_t cl_ext                : 1;  /**< [ 26: 26](R/W) Reserved; must be zero.
10961                                                                  Internal:
10962                                                                  The extended bit for the proposed CAS Latency spec change. The new
10963                                                                  CAS Latency in DDR4 DRAM is defined in MR0(A12,A6,A5,A4,A2). This bit sets
10964                                                                  the A12 bit.
10965 
10966                                                                  See LMC()_MODEREG_PARAMS0[CL]. */
10967         uint64_t wrp_ext               : 1;  /**< [ 27: 27](R/W) A 1-bit extension to the WRP register. */
10968         uint64_t reserved_28_63        : 36;
10969 #endif /* Word 0 - End */
10970     } cn9;
10971     /* struct bdk_lmcx_modereg_params0_s cn81xx; */
10972     /* struct bdk_lmcx_modereg_params0_s cn83xx; */
10973     /* struct bdk_lmcx_modereg_params0_s cn88xxp2; */
10974 };
10975 typedef union bdk_lmcx_modereg_params0 bdk_lmcx_modereg_params0_t;
10976 
10977 static inline uint64_t BDK_LMCX_MODEREG_PARAMS0(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_MODEREG_PARAMS0(unsigned long a)10978 static inline uint64_t BDK_LMCX_MODEREG_PARAMS0(unsigned long a)
10979 {
10980     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
10981         return 0x87e0880001a8ll + 0x1000000ll * ((a) & 0x0);
10982     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
10983         return 0x87e0880001a8ll + 0x1000000ll * ((a) & 0x1);
10984     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
10985         return 0x87e0880001a8ll + 0x1000000ll * ((a) & 0x3);
10986     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
10987         return 0x87e0880001a8ll + 0x1000000ll * ((a) & 0x3);
10988     __bdk_csr_fatal("LMCX_MODEREG_PARAMS0", 1, a, 0, 0, 0);
10989 }
10990 
10991 #define typedef_BDK_LMCX_MODEREG_PARAMS0(a) bdk_lmcx_modereg_params0_t
10992 #define bustype_BDK_LMCX_MODEREG_PARAMS0(a) BDK_CSR_TYPE_RSL
10993 #define basename_BDK_LMCX_MODEREG_PARAMS0(a) "LMCX_MODEREG_PARAMS0"
10994 #define device_bar_BDK_LMCX_MODEREG_PARAMS0(a) 0x0 /* PF_BAR0 */
10995 #define busnum_BDK_LMCX_MODEREG_PARAMS0(a) (a)
10996 #define arguments_BDK_LMCX_MODEREG_PARAMS0(a) (a),-1,-1,-1
10997 
10998 /**
10999  * Register (RSL) lmc#_modereg_params1
11000  *
11001  * LMC Mode Register Parameters 1 Register
11002  * These parameters are written into the DDR4 MR0, MR1, MR2 and MR3 registers.
11003  */
11004 union bdk_lmcx_modereg_params1
11005 {
11006     uint64_t u;
11007     struct bdk_lmcx_modereg_params1_s
11008     {
11009 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
11010         uint64_t reserved_55_63        : 9;
11011         uint64_t rtt_wr_11_ext         : 1;  /**< [ 54: 54](R/W) Reserved.
11012                                                                  Internal:
11013                                                                  RTT_WR rank 3 extension bit for DDR4. */
11014         uint64_t rtt_wr_10_ext         : 1;  /**< [ 53: 53](R/W) Reserved.
11015                                                                  Internal:
11016                                                                  RTT_WR rank 2 extension bit for DDR4. */
11017         uint64_t rtt_wr_01_ext         : 1;  /**< [ 52: 52](R/W) RTT_WR rank 1 extension bit for DDR4. */
11018         uint64_t rtt_wr_00_ext         : 1;  /**< [ 51: 51](R/W) RTT_WR rank 0 extension bit for DDR4. */
11019         uint64_t db_output_impedance   : 3;  /**< [ 50: 48](R/W) Reserved.
11020                                                                  Internal:
11021                                                                  Host Interface DQ/DQS Output Driver Impedance control for DIMM0's Data Buffer.
11022                                                                  This is the default value used during Host Interface Write Leveling in LRDIMM
11023                                                                  environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
11024                                                                  0x0 = RZQ/6 (40 ohm).
11025                                                                  0x1 = RZQ/7 (34 ohm).
11026                                                                  0x2 = RZQ/5 (48 ohm).
11027                                                                  0x3-0x7 = Reserved. */
11028         uint64_t rtt_nom_11            : 3;  /**< [ 47: 45](R/W) Reserved.
11029                                                                  Internal:
11030                                                                  RTT_NOM rank 3. LMC writes this value to MR1[RTT_NOM] in the rank 3 (i.e. DIMM1_CS1) DDR3
11031                                                                  parts when selected during power-up/init, write-leveling, and, if
11032                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11033                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11034                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
11035                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
11036                                                                  (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
11037                                                                  allowed. */
11038         uint64_t dic_11                : 2;  /**< [ 44: 43](R/W) Reserved.
11039                                                                  Internal:
11040                                                                  Output driver impedance control rank 3. LMC writes this value to MR1[D.I.C.] in the rank 3
11041                                                                  (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11042                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11043                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11044                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11045         uint64_t rtt_wr_11             : 2;  /**< [ 42: 41](R/W) Reserved.
11046                                                                  Internal:
11047                                                                  RTT_WR rank 3. LMC writes this value to MR2[Rtt_WR] in the rank 3 (i.e. DIMM1_CS1) DDR3
11048                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11049                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11050                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11051                                                                  LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
11052         uint64_t srt_11                : 1;  /**< [ 40: 40](R/W) Reserved.
11053                                                                  Internal:
11054                                                                  Self-refresh temperature range rank 3. LMC writes this value to MR2[SRT] in the rank 3
11055                                                                  (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11056                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11057                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
11058                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11059         uint64_t asr_11                : 1;  /**< [ 39: 39](R/W) Reserved.
11060                                                                  Internal:
11061                                                                  Auto self-refresh rank 3. LMC writes this value to MR2[ASR] in the rank 3 (i.e. DIMM1_CS1)
11062                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11063                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11064                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
11065                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11066         uint64_t pasr_11               : 3;  /**< [ 38: 36](R/W) Reserved.
11067                                                                  Internal:
11068                                                                  Partial array self-refresh rank 3. LMC writes this value to MR2[PASR] in the rank 3 (i.e.
11069                                                                  DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11070                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11071                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11072                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11073         uint64_t rtt_nom_10            : 3;  /**< [ 35: 33](R/W) Reserved.
11074                                                                  Internal:
11075                                                                  RTT_NOM rank 2. LMC writes this value to MR1[Rtt_Nom] in the rank 2 (i.e. DIMM1_CS0) DDR3
11076                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11077                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11078                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11079                                                                  LMC()_RESET_CTL [DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM
11080                                                                  is used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or
11081                                                                  3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are
11082                                                                  also allowed. */
11083         uint64_t dic_10                : 2;  /**< [ 32: 31](R/W) Reserved.
11084                                                                  Internal:
11085                                                                  Output driver impedance control rank 2. LMC writes this value to MR1[D.I.C.] in the rank 2
11086                                                                  (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11087                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11088                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11089                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11090         uint64_t rtt_wr_10             : 2;  /**< [ 30: 29](R/W) Reserved.
11091                                                                  Internal:
11092                                                                  RTT_WR rank 2. LMC writes this value to MR2[Rtt_WR] in the rank 2 (i.e. DIMM1_CS0) DDR3
11093                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11094                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11095                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11096                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11097         uint64_t srt_10                : 1;  /**< [ 28: 28](R/W) Reserved.
11098                                                                  Internal:
11099                                                                  Self-refresh temperature range rank 2. LMC writes this value to MR2[SRT] in the rank 2
11100                                                                  (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11101                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11102                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11103                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11104         uint64_t asr_10                : 1;  /**< [ 27: 27](R/W) Reserved.
11105                                                                  Internal:
11106                                                                  Auto self-refresh rank 2. LMC writes this value to MR2[ASR] in the rank 2 (i.e. DIMM1_CS0)
11107                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11108                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11109                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11110                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11111         uint64_t pasr_10               : 3;  /**< [ 26: 24](R/W) Reserved.
11112                                                                  Internal:
11113                                                                  Partial array self-refresh rank 2. LMC writes this value to MR2[PASR] in the rank 2 (i.e.
11114                                                                  DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11115                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11116                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11117                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11118         uint64_t rtt_nom_01            : 3;  /**< [ 23: 21](R/W) RTT_NOM rank 1. LMC writes this value to MR1[RTT_NOM] in the rank 1 (i.e. DIMM0_CS1) DDR3
11119                                                                  parts when selected during power-up/init, write-leveling, and, if
11120                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11121                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11122                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
11123                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
11124                                                                  (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
11125                                                                  allowed. */
11126         uint64_t dic_01                : 2;  /**< [ 20: 19](R/W) Output driver impedance control rank 1. LMC writes this value to MR1[D.I.C.] in the rank 1
11127                                                                  (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11128                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11129                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11130                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11131         uint64_t rtt_wr_01             : 2;  /**< [ 18: 17](R/W) RTT_WR rank 1. LMC writes this value to MR2[RTT_WR] in the rank 1 (i.e. DIMM0_CS1) DDR3
11132                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11133                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11134                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11135                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11136         uint64_t srt_01                : 1;  /**< [ 16: 16](R/W) Self-refresh temperature range rank 1. LMC writes this value to MR2[SRT] in the rank 1
11137                                                                  (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11138                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11139                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11140                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11141         uint64_t asr_01                : 1;  /**< [ 15: 15](R/W) Auto self-refresh rank 1. LMC writes this value to MR2[ASR] in the rank 1 (i.e. DIMM0_CS1)
11142                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11143                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11144                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11145                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11146         uint64_t pasr_01               : 3;  /**< [ 14: 12](R/W) Partial array self-refresh rank 1. LMC writes this value to MR2[PASR] in the rank 1 (i.e.
11147                                                                  DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11148                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11149                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11150                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11151         uint64_t rtt_nom_00            : 3;  /**< [ 11:  9](R/W) RTT_NOM rank 0. LMC writes this value to MR1[RTT_NOM] in the rank 0 (i.e. DIMM0_CS0) DDR3
11152                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11153                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11154                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11155                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
11156                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2),
11157                                                                  or 3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8)
11158                                                                  are also allowed. */
11159         uint64_t dic_00                : 2;  /**< [  8:  7](R/W) Output driver impedance control rank 0. LMC writes this value to MR1[D.I.C.] in the rank 0
11160                                                                  (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11161                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11162                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11163                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11164         uint64_t rtt_wr_00             : 2;  /**< [  6:  5](R/W) RTT_WR rank 0. LMC writes this value to MR2[RTT_WR] in the rank 0 (i.e. DIMM0_CS0) DDR3
11165                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11166                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11167                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11168                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11169         uint64_t srt_00                : 1;  /**< [  4:  4](R/W) Self-refresh temperature range rank 0. LMC writes this value to MR2[SRT] in the rank 0
11170                                                                  (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11171                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11172                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11173                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11174         uint64_t asr_00                : 1;  /**< [  3:  3](R/W) Auto self-refresh rank 0. LMC writes this value to MR2[ASR] in the rank 0 (i.e. DIMM0_CS0)
11175                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11176                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11177                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11178                                                                  LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
11179         uint64_t pasr_00               : 3;  /**< [  2:  0](R/W) Partial array self-refresh rank 0. LMC writes this value to MR2[PASR] in the rank 0 (i.e.
11180                                                                  DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11181                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11182                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11183                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11184 #else /* Word 0 - Little Endian */
11185         uint64_t pasr_00               : 3;  /**< [  2:  0](R/W) Partial array self-refresh rank 0. LMC writes this value to MR2[PASR] in the rank 0 (i.e.
11186                                                                  DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11187                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11188                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11189                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11190         uint64_t asr_00                : 1;  /**< [  3:  3](R/W) Auto self-refresh rank 0. LMC writes this value to MR2[ASR] in the rank 0 (i.e. DIMM0_CS0)
11191                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11192                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11193                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11194                                                                  LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
11195         uint64_t srt_00                : 1;  /**< [  4:  4](R/W) Self-refresh temperature range rank 0. LMC writes this value to MR2[SRT] in the rank 0
11196                                                                  (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11197                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11198                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11199                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11200         uint64_t rtt_wr_00             : 2;  /**< [  6:  5](R/W) RTT_WR rank 0. LMC writes this value to MR2[RTT_WR] in the rank 0 (i.e. DIMM0_CS0) DDR3
11201                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11202                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11203                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11204                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11205         uint64_t dic_00                : 2;  /**< [  8:  7](R/W) Output driver impedance control rank 0. LMC writes this value to MR1[D.I.C.] in the rank 0
11206                                                                  (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11207                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11208                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11209                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11210         uint64_t rtt_nom_00            : 3;  /**< [ 11:  9](R/W) RTT_NOM rank 0. LMC writes this value to MR1[RTT_NOM] in the rank 0 (i.e. DIMM0_CS0) DDR3
11211                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11212                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11213                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11214                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
11215                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2),
11216                                                                  or 3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8)
11217                                                                  are also allowed. */
11218         uint64_t pasr_01               : 3;  /**< [ 14: 12](R/W) Partial array self-refresh rank 1. LMC writes this value to MR2[PASR] in the rank 1 (i.e.
11219                                                                  DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11220                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11221                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11222                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11223         uint64_t asr_01                : 1;  /**< [ 15: 15](R/W) Auto self-refresh rank 1. LMC writes this value to MR2[ASR] in the rank 1 (i.e. DIMM0_CS1)
11224                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11225                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11226                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11227                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11228         uint64_t srt_01                : 1;  /**< [ 16: 16](R/W) Self-refresh temperature range rank 1. LMC writes this value to MR2[SRT] in the rank 1
11229                                                                  (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11230                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11231                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11232                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11233         uint64_t rtt_wr_01             : 2;  /**< [ 18: 17](R/W) RTT_WR rank 1. LMC writes this value to MR2[RTT_WR] in the rank 1 (i.e. DIMM0_CS1) DDR3
11234                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11235                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11236                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11237                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11238         uint64_t dic_01                : 2;  /**< [ 20: 19](R/W) Output driver impedance control rank 1. LMC writes this value to MR1[D.I.C.] in the rank 1
11239                                                                  (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11240                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11241                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11242                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11243         uint64_t rtt_nom_01            : 3;  /**< [ 23: 21](R/W) RTT_NOM rank 1. LMC writes this value to MR1[RTT_NOM] in the rank 1 (i.e. DIMM0_CS1) DDR3
11244                                                                  parts when selected during power-up/init, write-leveling, and, if
11245                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11246                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11247                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
11248                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
11249                                                                  (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
11250                                                                  allowed. */
11251         uint64_t pasr_10               : 3;  /**< [ 26: 24](R/W) Reserved.
11252                                                                  Internal:
11253                                                                  Partial array self-refresh rank 2. LMC writes this value to MR2[PASR] in the rank 2 (i.e.
11254                                                                  DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11255                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11256                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11257                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11258         uint64_t asr_10                : 1;  /**< [ 27: 27](R/W) Reserved.
11259                                                                  Internal:
11260                                                                  Auto self-refresh rank 2. LMC writes this value to MR2[ASR] in the rank 2 (i.e. DIMM1_CS0)
11261                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11262                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11263                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11264                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11265         uint64_t srt_10                : 1;  /**< [ 28: 28](R/W) Reserved.
11266                                                                  Internal:
11267                                                                  Self-refresh temperature range rank 2. LMC writes this value to MR2[SRT] in the rank 2
11268                                                                  (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11269                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11270                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11271                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11272         uint64_t rtt_wr_10             : 2;  /**< [ 30: 29](R/W) Reserved.
11273                                                                  Internal:
11274                                                                  RTT_WR rank 2. LMC writes this value to MR2[Rtt_WR] in the rank 2 (i.e. DIMM1_CS0) DDR3
11275                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11276                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11277                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11278                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11279         uint64_t dic_10                : 2;  /**< [ 32: 31](R/W) Reserved.
11280                                                                  Internal:
11281                                                                  Output driver impedance control rank 2. LMC writes this value to MR1[D.I.C.] in the rank 2
11282                                                                  (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11283                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11284                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11285                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11286         uint64_t rtt_nom_10            : 3;  /**< [ 35: 33](R/W) Reserved.
11287                                                                  Internal:
11288                                                                  RTT_NOM rank 2. LMC writes this value to MR1[Rtt_Nom] in the rank 2 (i.e. DIMM1_CS0) DDR3
11289                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11290                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11291                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11292                                                                  LMC()_RESET_CTL [DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM
11293                                                                  is used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or
11294                                                                  3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are
11295                                                                  also allowed. */
11296         uint64_t pasr_11               : 3;  /**< [ 38: 36](R/W) Reserved.
11297                                                                  Internal:
11298                                                                  Partial array self-refresh rank 3. LMC writes this value to MR2[PASR] in the rank 3 (i.e.
11299                                                                  DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11300                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11301                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11302                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11303         uint64_t asr_11                : 1;  /**< [ 39: 39](R/W) Reserved.
11304                                                                  Internal:
11305                                                                  Auto self-refresh rank 3. LMC writes this value to MR2[ASR] in the rank 3 (i.e. DIMM1_CS1)
11306                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11307                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11308                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
11309                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11310         uint64_t srt_11                : 1;  /**< [ 40: 40](R/W) Reserved.
11311                                                                  Internal:
11312                                                                  Self-refresh temperature range rank 3. LMC writes this value to MR2[SRT] in the rank 3
11313                                                                  (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11314                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11315                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
11316                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11317         uint64_t rtt_wr_11             : 2;  /**< [ 42: 41](R/W) Reserved.
11318                                                                  Internal:
11319                                                                  RTT_WR rank 3. LMC writes this value to MR2[Rtt_WR] in the rank 3 (i.e. DIMM1_CS1) DDR3
11320                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11321                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11322                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11323                                                                  LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
11324         uint64_t dic_11                : 2;  /**< [ 44: 43](R/W) Reserved.
11325                                                                  Internal:
11326                                                                  Output driver impedance control rank 3. LMC writes this value to MR1[D.I.C.] in the rank 3
11327                                                                  (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11328                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11329                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11330                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11331         uint64_t rtt_nom_11            : 3;  /**< [ 47: 45](R/W) Reserved.
11332                                                                  Internal:
11333                                                                  RTT_NOM rank 3. LMC writes this value to MR1[RTT_NOM] in the rank 3 (i.e. DIMM1_CS1) DDR3
11334                                                                  parts when selected during power-up/init, write-leveling, and, if
11335                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11336                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11337                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
11338                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
11339                                                                  (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
11340                                                                  allowed. */
11341         uint64_t db_output_impedance   : 3;  /**< [ 50: 48](R/W) Reserved.
11342                                                                  Internal:
11343                                                                  Host Interface DQ/DQS Output Driver Impedance control for DIMM0's Data Buffer.
11344                                                                  This is the default value used during Host Interface Write Leveling in LRDIMM
11345                                                                  environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
11346                                                                  0x0 = RZQ/6 (40 ohm).
11347                                                                  0x1 = RZQ/7 (34 ohm).
11348                                                                  0x2 = RZQ/5 (48 ohm).
11349                                                                  0x3-0x7 = Reserved. */
11350         uint64_t rtt_wr_00_ext         : 1;  /**< [ 51: 51](R/W) RTT_WR rank 0 extension bit for DDR4. */
11351         uint64_t rtt_wr_01_ext         : 1;  /**< [ 52: 52](R/W) RTT_WR rank 1 extension bit for DDR4. */
11352         uint64_t rtt_wr_10_ext         : 1;  /**< [ 53: 53](R/W) Reserved.
11353                                                                  Internal:
11354                                                                  RTT_WR rank 2 extension bit for DDR4. */
11355         uint64_t rtt_wr_11_ext         : 1;  /**< [ 54: 54](R/W) Reserved.
11356                                                                  Internal:
11357                                                                  RTT_WR rank 3 extension bit for DDR4. */
11358         uint64_t reserved_55_63        : 9;
11359 #endif /* Word 0 - End */
11360     } s;
11361     struct bdk_lmcx_modereg_params1_cn88xxp1
11362     {
11363 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
11364         uint64_t reserved_55_63        : 9;
11365         uint64_t rtt_wr_11_ext         : 1;  /**< [ 54: 54](RO) Reserved. */
11366         uint64_t rtt_wr_10_ext         : 1;  /**< [ 53: 53](RO) Reserved. */
11367         uint64_t rtt_wr_01_ext         : 1;  /**< [ 52: 52](RO) Reserved. */
11368         uint64_t rtt_wr_00_ext         : 1;  /**< [ 51: 51](RO) Reserved. */
11369         uint64_t db_output_impedance   : 3;  /**< [ 50: 48](R/W) Reserved.
11370                                                                  Internal:
11371                                                                  Host Interface DQ/DQS Output Driver Impedance control for DIMM0's Data Buffer.
11372                                                                  This is the default value used during Host Interface Write Leveling in LRDIMM
11373                                                                  environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
11374                                                                  0x0 = RZQ/6 (40 ohm).
11375                                                                  0x1 = RZQ/7 (34 ohm).
11376                                                                  0x2 = RZQ/5 (48 ohm).
11377                                                                  0x3-0x7 = Reserved. */
11378         uint64_t rtt_nom_11            : 3;  /**< [ 47: 45](R/W) RTT_NOM rank 3. LMC writes this value to MR1[RTT_NOM] in the rank 3 (i.e. DIMM1_CS1) DDR3
11379                                                                  parts when selected during power-up/init, write-leveling, and, if
11380                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11381                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11382                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
11383                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
11384                                                                  (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
11385                                                                  allowed. */
11386         uint64_t dic_11                : 2;  /**< [ 44: 43](R/W) Output driver impedance control rank 3. LMC writes this value to MR1[D.I.C.] in the rank 3
11387                                                                  (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11388                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11389                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11390                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11391         uint64_t rtt_wr_11             : 2;  /**< [ 42: 41](R/W) RTT_WR rank 3. LMC writes this value to MR2[Rtt_WR] in the rank 3 (i.e. DIMM1_CS1) DDR3
11392                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11393                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11394                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11395                                                                  LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
11396         uint64_t srt_11                : 1;  /**< [ 40: 40](R/W) Self-refresh temperature range rank 3. LMC writes this value to MR2[SRT] in the rank 3
11397                                                                  (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11398                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11399                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
11400                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11401         uint64_t asr_11                : 1;  /**< [ 39: 39](R/W) Auto self-refresh rank 3. LMC writes this value to MR2[ASR] in the rank 3 (i.e. DIMM1_CS1)
11402                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11403                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11404                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
11405                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11406         uint64_t pasr_11               : 3;  /**< [ 38: 36](R/W) Partial array self-refresh rank 3. LMC writes this value to MR2[PASR] in the rank 3 (i.e.
11407                                                                  DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11408                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11409                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11410                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11411         uint64_t rtt_nom_10            : 3;  /**< [ 35: 33](R/W) RTT_NOM rank 2. LMC writes this value to MR1[Rtt_Nom] in the rank 2 (i.e. DIMM1_CS0) DDR3
11412                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11413                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11414                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11415                                                                  LMC()_RESET_CTL [DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM
11416                                                                  is used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or
11417                                                                  3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are
11418                                                                  also allowed. */
11419         uint64_t dic_10                : 2;  /**< [ 32: 31](R/W) Output driver impedance control rank 2. LMC writes this value to MR1[D.I.C.] in the rank 2
11420                                                                  (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11421                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11422                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11423                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11424         uint64_t rtt_wr_10             : 2;  /**< [ 30: 29](R/W) RTT_WR rank 2. LMC writes this value to MR2[Rtt_WR] in the rank 2 (i.e. DIMM1_CS0) DDR3
11425                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11426                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11427                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11428                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11429         uint64_t srt_10                : 1;  /**< [ 28: 28](R/W) Self-refresh temperature range rank 2. LMC writes this value to MR2[SRT] in the rank 2
11430                                                                  (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11431                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11432                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11433                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11434         uint64_t asr_10                : 1;  /**< [ 27: 27](R/W) Auto self-refresh rank 2. LMC writes this value to MR2[ASR] in the rank 2 (i.e. DIMM1_CS0)
11435                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11436                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11437                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11438                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11439         uint64_t pasr_10               : 3;  /**< [ 26: 24](R/W) Partial array self-refresh rank 2. LMC writes this value to MR2[PASR] in the rank 2 (i.e.
11440                                                                  DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11441                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11442                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11443                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11444         uint64_t rtt_nom_01            : 3;  /**< [ 23: 21](R/W) RTT_NOM rank 1. LMC writes this value to MR1[RTT_NOM] in the rank 1 (i.e. DIMM0_CS1) DDR3
11445                                                                  parts when selected during power-up/init, write-leveling, and, if
11446                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11447                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11448                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
11449                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
11450                                                                  (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
11451                                                                  allowed. */
11452         uint64_t dic_01                : 2;  /**< [ 20: 19](R/W) Output driver impedance control rank 1. LMC writes this value to MR1[D.I.C.] in the rank 1
11453                                                                  (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11454                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11455                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11456                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11457         uint64_t rtt_wr_01             : 2;  /**< [ 18: 17](R/W) RTT_WR rank 1. LMC writes this value to MR2[RTT_WR] in the rank 1 (i.e. DIMM0_CS1) DDR3
11458                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11459                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11460                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11461                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11462         uint64_t srt_01                : 1;  /**< [ 16: 16](R/W) Self-refresh temperature range rank 1. LMC writes this value to MR2[SRT] in the rank 1
11463                                                                  (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11464                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11465                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11466                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11467         uint64_t asr_01                : 1;  /**< [ 15: 15](R/W) Auto self-refresh rank 1. LMC writes this value to MR2[ASR] in the rank 1 (i.e. DIMM0_CS1)
11468                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11469                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11470                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11471                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11472         uint64_t pasr_01               : 3;  /**< [ 14: 12](R/W) Partial array self-refresh rank 1. LMC writes this value to MR2[PASR] in the rank 1 (i.e.
11473                                                                  DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11474                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11475                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11476                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11477         uint64_t rtt_nom_00            : 3;  /**< [ 11:  9](R/W) RTT_NOM rank 0. LMC writes this value to MR1[RTT_NOM] in the rank 0 (i.e. DIMM0_CS0) DDR3
11478                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11479                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11480                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11481                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
11482                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2),
11483                                                                  or 3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8)
11484                                                                  are also allowed. */
11485         uint64_t dic_00                : 2;  /**< [  8:  7](R/W) Output driver impedance control rank 0. LMC writes this value to MR1[D.I.C.] in the rank 0
11486                                                                  (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11487                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11488                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11489                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11490         uint64_t rtt_wr_00             : 2;  /**< [  6:  5](R/W) RTT_WR rank 0. LMC writes this value to MR2[RTT_WR] in the rank 0 (i.e. DIMM0_CS0) DDR3
11491                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11492                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11493                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11494                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11495         uint64_t srt_00                : 1;  /**< [  4:  4](R/W) Self-refresh temperature range rank 0. LMC writes this value to MR2[SRT] in the rank 0
11496                                                                  (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11497                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11498                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11499                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11500         uint64_t asr_00                : 1;  /**< [  3:  3](R/W) Auto self-refresh rank 0. LMC writes this value to MR2[ASR] in the rank 0 (i.e. DIMM0_CS0)
11501                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11502                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11503                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11504                                                                  LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
11505         uint64_t pasr_00               : 3;  /**< [  2:  0](R/W) Partial array self-refresh rank 0. LMC writes this value to MR2[PASR] in the rank 0 (i.e.
11506                                                                  DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11507                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11508                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11509                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11510 #else /* Word 0 - Little Endian */
11511         uint64_t pasr_00               : 3;  /**< [  2:  0](R/W) Partial array self-refresh rank 0. LMC writes this value to MR2[PASR] in the rank 0 (i.e.
11512                                                                  DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11513                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11514                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11515                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11516         uint64_t asr_00                : 1;  /**< [  3:  3](R/W) Auto self-refresh rank 0. LMC writes this value to MR2[ASR] in the rank 0 (i.e. DIMM0_CS0)
11517                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11518                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11519                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11520                                                                  LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
11521         uint64_t srt_00                : 1;  /**< [  4:  4](R/W) Self-refresh temperature range rank 0. LMC writes this value to MR2[SRT] in the rank 0
11522                                                                  (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11523                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11524                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11525                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11526         uint64_t rtt_wr_00             : 2;  /**< [  6:  5](R/W) RTT_WR rank 0. LMC writes this value to MR2[RTT_WR] in the rank 0 (i.e. DIMM0_CS0) DDR3
11527                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11528                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11529                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11530                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11531         uint64_t dic_00                : 2;  /**< [  8:  7](R/W) Output driver impedance control rank 0. LMC writes this value to MR1[D.I.C.] in the rank 0
11532                                                                  (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11533                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11534                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11535                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11536         uint64_t rtt_nom_00            : 3;  /**< [ 11:  9](R/W) RTT_NOM rank 0. LMC writes this value to MR1[RTT_NOM] in the rank 0 (i.e. DIMM0_CS0) DDR3
11537                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11538                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11539                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11540                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
11541                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2),
11542                                                                  or 3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8)
11543                                                                  are also allowed. */
11544         uint64_t pasr_01               : 3;  /**< [ 14: 12](R/W) Partial array self-refresh rank 1. LMC writes this value to MR2[PASR] in the rank 1 (i.e.
11545                                                                  DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11546                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11547                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11548                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11549         uint64_t asr_01                : 1;  /**< [ 15: 15](R/W) Auto self-refresh rank 1. LMC writes this value to MR2[ASR] in the rank 1 (i.e. DIMM0_CS1)
11550                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11551                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11552                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11553                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11554         uint64_t srt_01                : 1;  /**< [ 16: 16](R/W) Self-refresh temperature range rank 1. LMC writes this value to MR2[SRT] in the rank 1
11555                                                                  (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11556                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11557                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11558                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11559         uint64_t rtt_wr_01             : 2;  /**< [ 18: 17](R/W) RTT_WR rank 1. LMC writes this value to MR2[RTT_WR] in the rank 1 (i.e. DIMM0_CS1) DDR3
11560                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11561                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11562                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11563                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11564         uint64_t dic_01                : 2;  /**< [ 20: 19](R/W) Output driver impedance control rank 1. LMC writes this value to MR1[D.I.C.] in the rank 1
11565                                                                  (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11566                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11567                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11568                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11569         uint64_t rtt_nom_01            : 3;  /**< [ 23: 21](R/W) RTT_NOM rank 1. LMC writes this value to MR1[RTT_NOM] in the rank 1 (i.e. DIMM0_CS1) DDR3
11570                                                                  parts when selected during power-up/init, write-leveling, and, if
11571                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11572                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11573                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
11574                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
11575                                                                  (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
11576                                                                  allowed. */
11577         uint64_t pasr_10               : 3;  /**< [ 26: 24](R/W) Partial array self-refresh rank 2. LMC writes this value to MR2[PASR] in the rank 2 (i.e.
11578                                                                  DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11579                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11580                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11581                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11582         uint64_t asr_10                : 1;  /**< [ 27: 27](R/W) Auto self-refresh rank 2. LMC writes this value to MR2[ASR] in the rank 2 (i.e. DIMM1_CS0)
11583                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11584                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11585                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11586                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11587         uint64_t srt_10                : 1;  /**< [ 28: 28](R/W) Self-refresh temperature range rank 2. LMC writes this value to MR2[SRT] in the rank 2
11588                                                                  (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11589                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11590                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11591                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11592         uint64_t rtt_wr_10             : 2;  /**< [ 30: 29](R/W) RTT_WR rank 2. LMC writes this value to MR2[Rtt_WR] in the rank 2 (i.e. DIMM1_CS0) DDR3
11593                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11594                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11595                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11596                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11597         uint64_t dic_10                : 2;  /**< [ 32: 31](R/W) Output driver impedance control rank 2. LMC writes this value to MR1[D.I.C.] in the rank 2
11598                                                                  (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11599                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11600                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11601                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11602         uint64_t rtt_nom_10            : 3;  /**< [ 35: 33](R/W) RTT_NOM rank 2. LMC writes this value to MR1[Rtt_Nom] in the rank 2 (i.e. DIMM1_CS0) DDR3
11603                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11604                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11605                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11606                                                                  LMC()_RESET_CTL [DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM
11607                                                                  is used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or
11608                                                                  3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are
11609                                                                  also allowed. */
11610         uint64_t pasr_11               : 3;  /**< [ 38: 36](R/W) Partial array self-refresh rank 3. LMC writes this value to MR2[PASR] in the rank 3 (i.e.
11611                                                                  DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11612                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11613                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11614                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11615         uint64_t asr_11                : 1;  /**< [ 39: 39](R/W) Auto self-refresh rank 3. LMC writes this value to MR2[ASR] in the rank 3 (i.e. DIMM1_CS1)
11616                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11617                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11618                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
11619                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11620         uint64_t srt_11                : 1;  /**< [ 40: 40](R/W) Self-refresh temperature range rank 3. LMC writes this value to MR2[SRT] in the rank 3
11621                                                                  (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11622                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11623                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
11624                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11625         uint64_t rtt_wr_11             : 2;  /**< [ 42: 41](R/W) RTT_WR rank 3. LMC writes this value to MR2[Rtt_WR] in the rank 3 (i.e. DIMM1_CS1) DDR3
11626                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11627                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11628                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11629                                                                  LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
11630         uint64_t dic_11                : 2;  /**< [ 44: 43](R/W) Output driver impedance control rank 3. LMC writes this value to MR1[D.I.C.] in the rank 3
11631                                                                  (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11632                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11633                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11634                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11635         uint64_t rtt_nom_11            : 3;  /**< [ 47: 45](R/W) RTT_NOM rank 3. LMC writes this value to MR1[RTT_NOM] in the rank 3 (i.e. DIMM1_CS1) DDR3
11636                                                                  parts when selected during power-up/init, write-leveling, and, if
11637                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11638                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11639                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
11640                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
11641                                                                  (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
11642                                                                  allowed. */
11643         uint64_t db_output_impedance   : 3;  /**< [ 50: 48](R/W) Reserved.
11644                                                                  Internal:
11645                                                                  Host Interface DQ/DQS Output Driver Impedance control for DIMM0's Data Buffer.
11646                                                                  This is the default value used during Host Interface Write Leveling in LRDIMM
11647                                                                  environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
11648                                                                  0x0 = RZQ/6 (40 ohm).
11649                                                                  0x1 = RZQ/7 (34 ohm).
11650                                                                  0x2 = RZQ/5 (48 ohm).
11651                                                                  0x3-0x7 = Reserved. */
11652         uint64_t rtt_wr_00_ext         : 1;  /**< [ 51: 51](RO) Reserved. */
11653         uint64_t rtt_wr_01_ext         : 1;  /**< [ 52: 52](RO) Reserved. */
11654         uint64_t rtt_wr_10_ext         : 1;  /**< [ 53: 53](RO) Reserved. */
11655         uint64_t rtt_wr_11_ext         : 1;  /**< [ 54: 54](RO) Reserved. */
11656         uint64_t reserved_55_63        : 9;
11657 #endif /* Word 0 - End */
11658     } cn88xxp1;
11659     struct bdk_lmcx_modereg_params1_cn9
11660     {
11661 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
11662         uint64_t reserved_55_63        : 9;
11663         uint64_t rtt_wr_11_ext         : 1;  /**< [ 54: 54](R/W) RTT_WR rank 3 extension bit for DDR4. */
11664         uint64_t rtt_wr_10_ext         : 1;  /**< [ 53: 53](R/W) RTT_WR rank 2 extension bit for DDR4. */
11665         uint64_t rtt_wr_01_ext         : 1;  /**< [ 52: 52](R/W) RTT_WR rank 1 extension bit for DDR4. */
11666         uint64_t rtt_wr_00_ext         : 1;  /**< [ 51: 51](R/W) RTT_WR rank 0 extension bit for DDR4. */
11667         uint64_t db_output_impedance   : 3;  /**< [ 50: 48](R/W) Reserved.
11668                                                                  Internal:
11669                                                                  Host Interface DQ/DQS Output Driver Impedance control for DIMM0's Data Buffer.
11670                                                                  This is the default value used during Host Interface Write Leveling in LRDIMM
11671                                                                  environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
11672                                                                  0x0 = RZQ/6 (40 ohm).
11673                                                                  0x1 = RZQ/7 (34 ohm).
11674                                                                  0x2 = RZQ/5 (48 ohm).
11675                                                                  0x3-0x7 = Reserved. */
11676         uint64_t rtt_nom_11            : 3;  /**< [ 47: 45](R/W) RTT_NOM rank 3. LMC writes this value to MR1[RTT_NOM] in the rank 3 (i.e. DIMM1_CS1) DDR4
11677                                                                  parts when selected during power-up/init, write leveling, and, if
11678                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11679                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
11680                                                                  LMC()_RESET_CTL[DDR4PDOMAIN]. Per JEDEC DDR4 specifications, if RTT_NOM is
11681                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
11682                                                                  (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
11683                                                                  allowed. */
11684         uint64_t dic_11                : 2;  /**< [ 44: 43](R/W) Output driver impedance control rank 3. LMC writes this value to MR1[D.I.C.] in the rank 3
11685                                                                  (i.e. DIMM1_CS1) DDR4 parts when selected during power-up/init, write leveling, and, if
11686                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11687                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
11688                                                                  LMC()_RESET_CTL[DDR4PDOMAIN]. */
11689         uint64_t rtt_wr_11             : 2;  /**< [ 42: 41](R/W) RTT_WR rank 3. LMC writes this value to MR2[Rtt_WR] in the rank 3
11690                                                                  (i.e. DIMM1_CS1) DDR4 parts when selected during power-up/init, write leveling,
11691                                                                  and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
11692                                                                  instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
11693                                                                  LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
11694         uint64_t srt_11                : 1;  /**< [ 40: 40](R/W) Reserved.
11695                                                                  Internal:
11696                                                                  FIXME, No longer needed. */
11697         uint64_t asr_11                : 1;  /**< [ 39: 39](R/W) Reserved.
11698                                                                  Internal:
11699                                                                  FIXME, No longer needed. */
11700         uint64_t pasr_11               : 3;  /**< [ 38: 36](R/W) Reserved.
11701                                                                  Internal:
11702                                                                  FIXME, No longer needed. */
11703         uint64_t rtt_nom_10            : 3;  /**< [ 35: 33](R/W) RTT_NOM rank 2. LMC writes this value to MR1[Rtt_Nom] in the rank 2
11704                                                                  (i.e. DIMM1_CS0) DDR4 parts when selected during power-up/init, write leveling,
11705                                                                  and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
11706                                                                  instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
11707                                                                  LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. Per JEDEC DDR4
11708                                                                  specifications, if RTT_NOM is used during write operations, only values
11709                                                                  MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3 (RZQ/6) are allowed. Otherwise, values
11710                                                                  MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also allowed. */
11711         uint64_t dic_10                : 2;  /**< [ 32: 31](R/W) Output driver impedance control rank 2. LMC writes this value to MR1[D.I.C.] in the rank 2
11712                                                                  (i.e. DIMM1_CS0) DDR4 parts when selected during power-up/init, write leveling, and, if
11713                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11714                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
11715                                                                  LMC()_RESET_CTL[DDR4PDOMAIN]. */
11716         uint64_t rtt_wr_10             : 2;  /**< [ 30: 29](R/W) RTT_WR rank 2. LMC writes this value to MR2[Rtt_WR] in the rank 2
11717                                                                  (i.e. DIMM1_CS0) DDR4 parts when selected during power-up/init, write leveling,
11718                                                                  and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
11719                                                                  instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
11720                                                                  LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
11721         uint64_t srt_10                : 1;  /**< [ 28: 28](R/W) Reserved.
11722                                                                  Internal:
11723                                                                  FIXME, No longer needed. */
11724         uint64_t asr_10                : 1;  /**< [ 27: 27](R/W) Reserved.
11725                                                                  Internal:
11726                                                                  FIXME, no longer needed. */
11727         uint64_t pasr_10               : 3;  /**< [ 26: 24](R/W) Reserved.
11728                                                                  Internal:
11729                                                                  FIXME, no longer needed. */
11730         uint64_t rtt_nom_01            : 3;  /**< [ 23: 21](R/W) RTT_NOM rank 1. LMC writes this value to MR1[RTT_NOM] in the rank 1 (i.e. DIMM0_CS1) DDR4
11731                                                                  parts when selected during power-up/init, write leveling, and, if
11732                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11733                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
11734                                                                  LMC()_RESET_CTL[DDR4PDOMAIN]. Per JEDEC DDR4 specifications, if RTT_NOM is
11735                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
11736                                                                  (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
11737                                                                  allowed. */
11738         uint64_t dic_01                : 2;  /**< [ 20: 19](R/W) Output driver impedance control rank 1. LMC writes this value to MR1[D.I.C.] in the rank 1
11739                                                                  (i.e. DIMM0_CS1) DDR4 parts when selected during power-up/init, write leveling, and, if
11740                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11741                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
11742                                                                  LMC()_RESET_CTL[DDR4PDOMAIN]. */
11743         uint64_t rtt_wr_01             : 2;  /**< [ 18: 17](R/W) RTT_WR rank 1. LMC writes this value to MR2[RTT_WR] in the rank 1
11744                                                                  (i.e. DIMM0_CS1) DDR4 parts when selected during power-up/init, write leveling,
11745                                                                  and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
11746                                                                  instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
11747                                                                  LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
11748         uint64_t srt_01                : 1;  /**< [ 16: 16](R/W) Reserved.
11749                                                                  Internal:
11750                                                                  FIXME, No longer needed. */
11751         uint64_t asr_01                : 1;  /**< [ 15: 15](R/W) Reserved.
11752                                                                  Internal:
11753                                                                  FIXME, No longer needed. */
11754         uint64_t pasr_01               : 3;  /**< [ 14: 12](R/W) Reserved.
11755                                                                  Internal:
11756                                                                  FIXME, No longer needed. */
11757         uint64_t rtt_nom_00            : 3;  /**< [ 11:  9](R/W) RTT_NOM rank 0. LMC writes this value to MR1[RTT_NOM] in the rank 0
11758                                                                  (i.e. DIMM0_CS0) DDR4 parts when selected during power-up/init, write leveling,
11759                                                                  and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
11760                                                                  instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
11761                                                                  LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. Per JEDEC DDR4
11762                                                                  specifications, if RTT_NOM is used during write operations, only values
11763                                                                  MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3 (RZQ/6) are allowed. Otherwise, values
11764                                                                  MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also allowed. */
11765         uint64_t dic_00                : 2;  /**< [  8:  7](R/W) Output driver impedance control rank 0. LMC writes this value to MR1[D.I.C.] in the rank 0
11766                                                                  (i.e. DIMM0_CS0) DDR4 parts when selected during power-up/init, write leveling, and, if
11767                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11768                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
11769                                                                  LMC()_RESET_CTL[DDR4PDOMAIN]. */
11770         uint64_t rtt_wr_00             : 2;  /**< [  6:  5](R/W) RTT_WR rank 0. LMC writes this value to MR2[RTT_WR] in the rank 0
11771                                                                  (i.e. DIMM0_CS0) DDR4 parts when selected during power-up/init, write leveling,
11772                                                                  and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
11773                                                                  instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
11774                                                                  LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
11775         uint64_t srt_00                : 1;  /**< [  4:  4](R/W) Reserved.
11776                                                                  Internal:
11777                                                                  FIXME, No longer needed. */
11778         uint64_t asr_00                : 1;  /**< [  3:  3](R/W) Reserved.
11779                                                                  Internal:
11780                                                                  FIXME, No longer needed. */
11781         uint64_t pasr_00               : 3;  /**< [  2:  0](R/W) Reserved.
11782                                                                  Internal:
11783                                                                  FIXME, No longer needed. */
11784 #else /* Word 0 - Little Endian */
11785         uint64_t pasr_00               : 3;  /**< [  2:  0](R/W) Reserved.
11786                                                                  Internal:
11787                                                                  FIXME, No longer needed. */
11788         uint64_t asr_00                : 1;  /**< [  3:  3](R/W) Reserved.
11789                                                                  Internal:
11790                                                                  FIXME, No longer needed. */
11791         uint64_t srt_00                : 1;  /**< [  4:  4](R/W) Reserved.
11792                                                                  Internal:
11793                                                                  FIXME, No longer needed. */
11794         uint64_t rtt_wr_00             : 2;  /**< [  6:  5](R/W) RTT_WR rank 0. LMC writes this value to MR2[RTT_WR] in the rank 0
11795                                                                  (i.e. DIMM0_CS0) DDR4 parts when selected during power-up/init, write leveling,
11796                                                                  and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
11797                                                                  instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
11798                                                                  LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
11799         uint64_t dic_00                : 2;  /**< [  8:  7](R/W) Output driver impedance control rank 0. LMC writes this value to MR1[D.I.C.] in the rank 0
11800                                                                  (i.e. DIMM0_CS0) DDR4 parts when selected during power-up/init, write leveling, and, if
11801                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11802                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
11803                                                                  LMC()_RESET_CTL[DDR4PDOMAIN]. */
11804         uint64_t rtt_nom_00            : 3;  /**< [ 11:  9](R/W) RTT_NOM rank 0. LMC writes this value to MR1[RTT_NOM] in the rank 0
11805                                                                  (i.e. DIMM0_CS0) DDR4 parts when selected during power-up/init, write leveling,
11806                                                                  and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
11807                                                                  instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
11808                                                                  LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. Per JEDEC DDR4
11809                                                                  specifications, if RTT_NOM is used during write operations, only values
11810                                                                  MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3 (RZQ/6) are allowed. Otherwise, values
11811                                                                  MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also allowed. */
11812         uint64_t pasr_01               : 3;  /**< [ 14: 12](R/W) Reserved.
11813                                                                  Internal:
11814                                                                  FIXME, No longer needed. */
11815         uint64_t asr_01                : 1;  /**< [ 15: 15](R/W) Reserved.
11816                                                                  Internal:
11817                                                                  FIXME, No longer needed. */
11818         uint64_t srt_01                : 1;  /**< [ 16: 16](R/W) Reserved.
11819                                                                  Internal:
11820                                                                  FIXME, No longer needed. */
11821         uint64_t rtt_wr_01             : 2;  /**< [ 18: 17](R/W) RTT_WR rank 1. LMC writes this value to MR2[RTT_WR] in the rank 1
11822                                                                  (i.e. DIMM0_CS1) DDR4 parts when selected during power-up/init, write leveling,
11823                                                                  and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
11824                                                                  instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
11825                                                                  LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
11826         uint64_t dic_01                : 2;  /**< [ 20: 19](R/W) Output driver impedance control rank 1. LMC writes this value to MR1[D.I.C.] in the rank 1
11827                                                                  (i.e. DIMM0_CS1) DDR4 parts when selected during power-up/init, write leveling, and, if
11828                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11829                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
11830                                                                  LMC()_RESET_CTL[DDR4PDOMAIN]. */
11831         uint64_t rtt_nom_01            : 3;  /**< [ 23: 21](R/W) RTT_NOM rank 1. LMC writes this value to MR1[RTT_NOM] in the rank 1 (i.e. DIMM0_CS1) DDR4
11832                                                                  parts when selected during power-up/init, write leveling, and, if
11833                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11834                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
11835                                                                  LMC()_RESET_CTL[DDR4PDOMAIN]. Per JEDEC DDR4 specifications, if RTT_NOM is
11836                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
11837                                                                  (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
11838                                                                  allowed. */
11839         uint64_t pasr_10               : 3;  /**< [ 26: 24](R/W) Reserved.
11840                                                                  Internal:
11841                                                                  FIXME, no longer needed. */
11842         uint64_t asr_10                : 1;  /**< [ 27: 27](R/W) Reserved.
11843                                                                  Internal:
11844                                                                  FIXME, no longer needed. */
11845         uint64_t srt_10                : 1;  /**< [ 28: 28](R/W) Reserved.
11846                                                                  Internal:
11847                                                                  FIXME, No longer needed. */
11848         uint64_t rtt_wr_10             : 2;  /**< [ 30: 29](R/W) RTT_WR rank 2. LMC writes this value to MR2[Rtt_WR] in the rank 2
11849                                                                  (i.e. DIMM1_CS0) DDR4 parts when selected during power-up/init, write leveling,
11850                                                                  and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
11851                                                                  instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
11852                                                                  LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
11853         uint64_t dic_10                : 2;  /**< [ 32: 31](R/W) Output driver impedance control rank 2. LMC writes this value to MR1[D.I.C.] in the rank 2
11854                                                                  (i.e. DIMM1_CS0) DDR4 parts when selected during power-up/init, write leveling, and, if
11855                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11856                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
11857                                                                  LMC()_RESET_CTL[DDR4PDOMAIN]. */
11858         uint64_t rtt_nom_10            : 3;  /**< [ 35: 33](R/W) RTT_NOM rank 2. LMC writes this value to MR1[Rtt_Nom] in the rank 2
11859                                                                  (i.e. DIMM1_CS0) DDR4 parts when selected during power-up/init, write leveling,
11860                                                                  and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
11861                                                                  instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
11862                                                                  LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. Per JEDEC DDR4
11863                                                                  specifications, if RTT_NOM is used during write operations, only values
11864                                                                  MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3 (RZQ/6) are allowed. Otherwise, values
11865                                                                  MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also allowed. */
11866         uint64_t pasr_11               : 3;  /**< [ 38: 36](R/W) Reserved.
11867                                                                  Internal:
11868                                                                  FIXME, No longer needed. */
11869         uint64_t asr_11                : 1;  /**< [ 39: 39](R/W) Reserved.
11870                                                                  Internal:
11871                                                                  FIXME, No longer needed. */
11872         uint64_t srt_11                : 1;  /**< [ 40: 40](R/W) Reserved.
11873                                                                  Internal:
11874                                                                  FIXME, No longer needed. */
11875         uint64_t rtt_wr_11             : 2;  /**< [ 42: 41](R/W) RTT_WR rank 3. LMC writes this value to MR2[Rtt_WR] in the rank 3
11876                                                                  (i.e. DIMM1_CS1) DDR4 parts when selected during power-up/init, write leveling,
11877                                                                  and, if LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit
11878                                                                  instruction sequences. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and
11879                                                                  LMC()_CONFIG[RANKMASK] and LMC()_RESET_CTL[DDR4PDOMAIN]. */
11880         uint64_t dic_11                : 2;  /**< [ 44: 43](R/W) Output driver impedance control rank 3. LMC writes this value to MR1[D.I.C.] in the rank 3
11881                                                                  (i.e. DIMM1_CS1) DDR4 parts when selected during power-up/init, write leveling, and, if
11882                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11883                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
11884                                                                  LMC()_RESET_CTL[DDR4PDOMAIN]. */
11885         uint64_t rtt_nom_11            : 3;  /**< [ 47: 45](R/W) RTT_NOM rank 3. LMC writes this value to MR1[RTT_NOM] in the rank 3 (i.e. DIMM1_CS1) DDR4
11886                                                                  parts when selected during power-up/init, write leveling, and, if
11887                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11888                                                                  See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] and LMC()_CONFIG[RANKMASK] and
11889                                                                  LMC()_RESET_CTL[DDR4PDOMAIN]. Per JEDEC DDR4 specifications, if RTT_NOM is
11890                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
11891                                                                  (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
11892                                                                  allowed. */
11893         uint64_t db_output_impedance   : 3;  /**< [ 50: 48](R/W) Reserved.
11894                                                                  Internal:
11895                                                                  Host Interface DQ/DQS Output Driver Impedance control for DIMM0's Data Buffer.
11896                                                                  This is the default value used during Host Interface Write Leveling in LRDIMM
11897                                                                  environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
11898                                                                  0x0 = RZQ/6 (40 ohm).
11899                                                                  0x1 = RZQ/7 (34 ohm).
11900                                                                  0x2 = RZQ/5 (48 ohm).
11901                                                                  0x3-0x7 = Reserved. */
11902         uint64_t rtt_wr_00_ext         : 1;  /**< [ 51: 51](R/W) RTT_WR rank 0 extension bit for DDR4. */
11903         uint64_t rtt_wr_01_ext         : 1;  /**< [ 52: 52](R/W) RTT_WR rank 1 extension bit for DDR4. */
11904         uint64_t rtt_wr_10_ext         : 1;  /**< [ 53: 53](R/W) RTT_WR rank 2 extension bit for DDR4. */
11905         uint64_t rtt_wr_11_ext         : 1;  /**< [ 54: 54](R/W) RTT_WR rank 3 extension bit for DDR4. */
11906         uint64_t reserved_55_63        : 9;
11907 #endif /* Word 0 - End */
11908     } cn9;
11909     /* struct bdk_lmcx_modereg_params1_s cn81xx; */
11910     struct bdk_lmcx_modereg_params1_cn83xx
11911     {
11912 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
11913         uint64_t reserved_55_63        : 9;
11914         uint64_t rtt_wr_11_ext         : 1;  /**< [ 54: 54](R/W) RTT_WR rank 3 extension bit for DDR4. */
11915         uint64_t rtt_wr_10_ext         : 1;  /**< [ 53: 53](R/W) RTT_WR rank 2 extension bit for DDR4. */
11916         uint64_t rtt_wr_01_ext         : 1;  /**< [ 52: 52](R/W) RTT_WR rank 1 extension bit for DDR4. */
11917         uint64_t rtt_wr_00_ext         : 1;  /**< [ 51: 51](R/W) RTT_WR rank 0 extension bit for DDR4. */
11918         uint64_t db_output_impedance   : 3;  /**< [ 50: 48](R/W) Reserved.
11919                                                                  Internal:
11920                                                                  Host Interface DQ/DQS Output Driver Impedance control for DIMM0's Data Buffer.
11921                                                                  This is the default value used during Host Interface Write Leveling in LRDIMM
11922                                                                  environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
11923                                                                  0x0 = RZQ/6 (40 ohm).
11924                                                                  0x1 = RZQ/7 (34 ohm).
11925                                                                  0x2 = RZQ/5 (48 ohm).
11926                                                                  0x3-0x7 = Reserved. */
11927         uint64_t rtt_nom_11            : 3;  /**< [ 47: 45](R/W) RTT_NOM rank 3. LMC writes this value to MR1[RTT_NOM] in the rank 3 (i.e. DIMM1_CS1) DDR3
11928                                                                  parts when selected during power-up/init, write-leveling, and, if
11929                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11930                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11931                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
11932                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
11933                                                                  (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
11934                                                                  allowed. */
11935         uint64_t dic_11                : 2;  /**< [ 44: 43](R/W) Output driver impedance control rank 3. LMC writes this value to MR1[D.I.C.] in the rank 3
11936                                                                  (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11937                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11938                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11939                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11940         uint64_t rtt_wr_11             : 2;  /**< [ 42: 41](R/W) RTT_WR rank 3. LMC writes this value to MR2[Rtt_WR] in the rank 3 (i.e. DIMM1_CS1) DDR3
11941                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11942                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11943                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11944                                                                  LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
11945         uint64_t srt_11                : 1;  /**< [ 40: 40](R/W) Self-refresh temperature range rank 3. LMC writes this value to MR2[SRT] in the rank 3
11946                                                                  (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11947                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11948                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
11949                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11950         uint64_t asr_11                : 1;  /**< [ 39: 39](R/W) Auto self-refresh rank 3. LMC writes this value to MR2[ASR] in the rank 3 (i.e. DIMM1_CS1)
11951                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11952                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11953                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
11954                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11955         uint64_t pasr_11               : 3;  /**< [ 38: 36](R/W) Partial array self-refresh rank 3. LMC writes this value to MR2[PASR] in the rank 3 (i.e.
11956                                                                  DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
11957                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11958                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11959                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11960         uint64_t rtt_nom_10            : 3;  /**< [ 35: 33](R/W) RTT_NOM rank 2. LMC writes this value to MR1[Rtt_Nom] in the rank 2 (i.e. DIMM1_CS0) DDR3
11961                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11962                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11963                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11964                                                                  LMC()_RESET_CTL [DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM
11965                                                                  is used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or
11966                                                                  3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are
11967                                                                  also allowed. */
11968         uint64_t dic_10                : 2;  /**< [ 32: 31](R/W) Output driver impedance control rank 2. LMC writes this value to MR1[D.I.C.] in the rank 2
11969                                                                  (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11970                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11971                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11972                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11973         uint64_t rtt_wr_10             : 2;  /**< [ 30: 29](R/W) RTT_WR rank 2. LMC writes this value to MR2[Rtt_WR] in the rank 2 (i.e. DIMM1_CS0) DDR3
11974                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11975                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11976                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11977                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11978         uint64_t srt_10                : 1;  /**< [ 28: 28](R/W) Self-refresh temperature range rank 2. LMC writes this value to MR2[SRT] in the rank 2
11979                                                                  (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11980                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11981                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11982                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11983         uint64_t asr_10                : 1;  /**< [ 27: 27](R/W) Auto self-refresh rank 2. LMC writes this value to MR2[ASR] in the rank 2 (i.e. DIMM1_CS0)
11984                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
11985                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
11986                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11987                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
11988         uint64_t pasr_10               : 3;  /**< [ 26: 24](R/W) Partial array self-refresh rank 2. LMC writes this value to MR2[PASR] in the rank 2 (i.e.
11989                                                                  DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
11990                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11991                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11992                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
11993         uint64_t rtt_nom_01            : 3;  /**< [ 23: 21](R/W) RTT_NOM rank 1. LMC writes this value to MR1[RTT_NOM] in the rank 1 (i.e. DIMM0_CS1) DDR3
11994                                                                  parts when selected during power-up/init, write-leveling, and, if
11995                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
11996                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
11997                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
11998                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
11999                                                                  (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
12000                                                                  allowed. */
12001         uint64_t dic_01                : 2;  /**< [ 20: 19](R/W) Output driver impedance control rank 1. LMC writes this value to MR1[D.I.C.] in the rank 1
12002                                                                  (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
12003                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
12004                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12005                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
12006         uint64_t rtt_wr_01             : 2;  /**< [ 18: 17](R/W) RTT_WR rank 1. LMC writes this value to MR2[RTT_WR] in the rank 1 (i.e. DIMM0_CS1) DDR3
12007                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
12008                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
12009                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12010                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
12011         uint64_t srt_01                : 1;  /**< [ 16: 16](R/W) Self-refresh temperature range rank 1. LMC writes this value to MR2[SRT] in the rank 1
12012                                                                  (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
12013                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
12014                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12015                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
12016         uint64_t asr_01                : 1;  /**< [ 15: 15](R/W) Auto self-refresh rank 1. LMC writes this value to MR2[ASR] in the rank 1 (i.e. DIMM0_CS1)
12017                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
12018                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
12019                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12020                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
12021         uint64_t pasr_01               : 3;  /**< [ 14: 12](R/W) Partial array self-refresh rank 1. LMC writes this value to MR2[PASR] in the rank 1 (i.e.
12022                                                                  DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
12023                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
12024                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12025                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
12026         uint64_t rtt_nom_00            : 3;  /**< [ 11:  9](R/W) RTT_NOM rank 0. LMC writes this value to MR1[RTT_NOM] in the rank 0 (i.e. DIMM0_CS0) DDR3
12027                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
12028                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
12029                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12030                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
12031                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2),
12032                                                                  or 3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8)
12033                                                                  are also allowed. */
12034         uint64_t dic_00                : 2;  /**< [  8:  7](R/W) Output driver impedance control rank 0. LMC writes this value to MR1[D.I.C.] in the rank 0
12035                                                                  (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
12036                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
12037                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12038                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
12039         uint64_t rtt_wr_00             : 2;  /**< [  6:  5](R/W) RTT_WR rank 0. LMC writes this value to MR2[RTT_WR] in the rank 0 (i.e. DIMM0_CS0) DDR3
12040                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
12041                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
12042                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12043                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
12044         uint64_t srt_00                : 1;  /**< [  4:  4](R/W) Self-refresh temperature range rank 0. LMC writes this value to MR2[SRT] in the rank 0
12045                                                                  (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
12046                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
12047                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12048                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
12049         uint64_t asr_00                : 1;  /**< [  3:  3](R/W) Auto self-refresh rank 0. LMC writes this value to MR2[ASR] in the rank 0 (i.e. DIMM0_CS0)
12050                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
12051                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
12052                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12053                                                                  LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
12054         uint64_t pasr_00               : 3;  /**< [  2:  0](R/W) Partial array self-refresh rank 0. LMC writes this value to MR2[PASR] in the rank 0 (i.e.
12055                                                                  DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
12056                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
12057                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12058                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
12059 #else /* Word 0 - Little Endian */
12060         uint64_t pasr_00               : 3;  /**< [  2:  0](R/W) Partial array self-refresh rank 0. LMC writes this value to MR2[PASR] in the rank 0 (i.e.
12061                                                                  DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
12062                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
12063                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12064                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
12065         uint64_t asr_00                : 1;  /**< [  3:  3](R/W) Auto self-refresh rank 0. LMC writes this value to MR2[ASR] in the rank 0 (i.e. DIMM0_CS0)
12066                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
12067                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
12068                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12069                                                                  LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
12070         uint64_t srt_00                : 1;  /**< [  4:  4](R/W) Self-refresh temperature range rank 0. LMC writes this value to MR2[SRT] in the rank 0
12071                                                                  (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
12072                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
12073                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12074                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
12075         uint64_t rtt_wr_00             : 2;  /**< [  6:  5](R/W) RTT_WR rank 0. LMC writes this value to MR2[RTT_WR] in the rank 0 (i.e. DIMM0_CS0) DDR3
12076                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
12077                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
12078                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12079                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
12080         uint64_t dic_00                : 2;  /**< [  8:  7](R/W) Output driver impedance control rank 0. LMC writes this value to MR1[D.I.C.] in the rank 0
12081                                                                  (i.e. DIMM0_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
12082                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
12083                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12084                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
12085         uint64_t rtt_nom_00            : 3;  /**< [ 11:  9](R/W) RTT_NOM rank 0. LMC writes this value to MR1[RTT_NOM] in the rank 0 (i.e. DIMM0_CS0) DDR3
12086                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
12087                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
12088                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12089                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
12090                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2),
12091                                                                  or 3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8)
12092                                                                  are also allowed. */
12093         uint64_t pasr_01               : 3;  /**< [ 14: 12](R/W) Partial array self-refresh rank 1. LMC writes this value to MR2[PASR] in the rank 1 (i.e.
12094                                                                  DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
12095                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
12096                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12097                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
12098         uint64_t asr_01                : 1;  /**< [ 15: 15](R/W) Auto self-refresh rank 1. LMC writes this value to MR2[ASR] in the rank 1 (i.e. DIMM0_CS1)
12099                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
12100                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
12101                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12102                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
12103         uint64_t srt_01                : 1;  /**< [ 16: 16](R/W) Self-refresh temperature range rank 1. LMC writes this value to MR2[SRT] in the rank 1
12104                                                                  (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
12105                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
12106                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12107                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
12108         uint64_t rtt_wr_01             : 2;  /**< [ 18: 17](R/W) RTT_WR rank 1. LMC writes this value to MR2[RTT_WR] in the rank 1 (i.e. DIMM0_CS1) DDR3
12109                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
12110                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
12111                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12112                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
12113         uint64_t dic_01                : 2;  /**< [ 20: 19](R/W) Output driver impedance control rank 1. LMC writes this value to MR1[D.I.C.] in the rank 1
12114                                                                  (i.e. DIMM0_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
12115                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
12116                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12117                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
12118         uint64_t rtt_nom_01            : 3;  /**< [ 23: 21](R/W) RTT_NOM rank 1. LMC writes this value to MR1[RTT_NOM] in the rank 1 (i.e. DIMM0_CS1) DDR3
12119                                                                  parts when selected during power-up/init, write-leveling, and, if
12120                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
12121                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12122                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
12123                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
12124                                                                  (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
12125                                                                  allowed. */
12126         uint64_t pasr_10               : 3;  /**< [ 26: 24](R/W) Partial array self-refresh rank 2. LMC writes this value to MR2[PASR] in the rank 2 (i.e.
12127                                                                  DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
12128                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
12129                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12130                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
12131         uint64_t asr_10                : 1;  /**< [ 27: 27](R/W) Auto self-refresh rank 2. LMC writes this value to MR2[ASR] in the rank 2 (i.e. DIMM1_CS0)
12132                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
12133                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
12134                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12135                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
12136         uint64_t srt_10                : 1;  /**< [ 28: 28](R/W) Self-refresh temperature range rank 2. LMC writes this value to MR2[SRT] in the rank 2
12137                                                                  (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
12138                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
12139                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12140                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
12141         uint64_t rtt_wr_10             : 2;  /**< [ 30: 29](R/W) RTT_WR rank 2. LMC writes this value to MR2[Rtt_WR] in the rank 2 (i.e. DIMM1_CS0) DDR3
12142                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
12143                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
12144                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12145                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
12146         uint64_t dic_10                : 2;  /**< [ 32: 31](R/W) Output driver impedance control rank 2. LMC writes this value to MR1[D.I.C.] in the rank 2
12147                                                                  (i.e. DIMM1_CS0) DDR3 parts when selected during power-up/init, write-leveling, and, if
12148                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
12149                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12150                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
12151         uint64_t rtt_nom_10            : 3;  /**< [ 35: 33](R/W) RTT_NOM rank 2. LMC writes this value to MR1[Rtt_Nom] in the rank 2 (i.e. DIMM1_CS0) DDR3
12152                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
12153                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
12154                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12155                                                                  LMC()_RESET_CTL [DDR3PWARM, DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM
12156                                                                  is used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or
12157                                                                  3 (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are
12158                                                                  also allowed. */
12159         uint64_t pasr_11               : 3;  /**< [ 38: 36](R/W) Partial array self-refresh rank 3. LMC writes this value to MR2[PASR] in the rank 3 (i.e.
12160                                                                  DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
12161                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
12162                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12163                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
12164         uint64_t asr_11                : 1;  /**< [ 39: 39](R/W) Auto self-refresh rank 3. LMC writes this value to MR2[ASR] in the rank 3 (i.e. DIMM1_CS1)
12165                                                                  DDR3 parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
12166                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
12167                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
12168                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
12169         uint64_t srt_11                : 1;  /**< [ 40: 40](R/W) Self-refresh temperature range rank 3. LMC writes this value to MR2[SRT] in the rank 3
12170                                                                  (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
12171                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
12172                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START], LMC()_CONFIG[RANKMASK] and
12173                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. */
12174         uint64_t rtt_wr_11             : 2;  /**< [ 42: 41](R/W) RTT_WR rank 3. LMC writes this value to MR2[Rtt_WR] in the rank 3 (i.e. DIMM1_CS1) DDR3
12175                                                                  parts when selected during power-up/init, write-leveling, and, if LMC()_CONFIG
12176                                                                  [SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences. See
12177                                                                  LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12178                                                                  LMC()_RESET_CTL [DDR3PWARM,DDR3PSOFT]. */
12179         uint64_t dic_11                : 2;  /**< [ 44: 43](R/W) Output driver impedance control rank 3. LMC writes this value to MR1[D.I.C.] in the rank 3
12180                                                                  (i.e. DIMM1_CS1) DDR3 parts when selected during power-up/init, write-leveling, and, if
12181                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
12182                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12183                                                                  LMC()_RESET_CTL[DDR3PWARM, DDR3PSOFT]. */
12184         uint64_t rtt_nom_11            : 3;  /**< [ 47: 45](R/W) RTT_NOM rank 3. LMC writes this value to MR1[RTT_NOM] in the rank 3 (i.e. DIMM1_CS1) DDR3
12185                                                                  parts when selected during power-up/init, write-leveling, and, if
12186                                                                  LMC()_CONFIG[SREF_WITH_DLL] is set, self-refresh entry and exit instruction sequences.
12187                                                                  See LMC()_SEQ_CTL[SEQ_SEL,INIT_START] and LMC()_CONFIG[RANKMASK] and
12188                                                                  LMC()_RESET_CTL[DDR3PWARM,DDR3PSOFT]. Per JEDEC DDR3 specifications, if RTT_NOM is
12189                                                                  used during write operations, only values MR1[RTT_NOM] = 1 (RZQ/4), 2 (RZQ/2), or 3
12190                                                                  (RZQ/6) are allowed. Otherwise, values MR1[RTT_NOM] = 4 (RZQ/12) and 5 (RZQ/8) are also
12191                                                                  allowed. */
12192         uint64_t db_output_impedance   : 3;  /**< [ 50: 48](R/W) Reserved.
12193                                                                  Internal:
12194                                                                  Host Interface DQ/DQS Output Driver Impedance control for DIMM0's Data Buffer.
12195                                                                  This is the default value used during Host Interface Write Leveling in LRDIMM
12196                                                                  environment, i.e., LMC()_CONFIG[LRDIMM_ENA] = 1, LMC()_SEQ_CTL[SEQ_SEL] = 0x6.
12197                                                                  0x0 = RZQ/6 (40 ohm).
12198                                                                  0x1 = RZQ/7 (34 ohm).
12199                                                                  0x2 = RZQ/5 (48 ohm).
12200                                                                  0x3-0x7 = Reserved. */
12201         uint64_t rtt_wr_00_ext         : 1;  /**< [ 51: 51](R/W) RTT_WR rank 0 extension bit for DDR4. */
12202         uint64_t rtt_wr_01_ext         : 1;  /**< [ 52: 52](R/W) RTT_WR rank 1 extension bit for DDR4. */
12203         uint64_t rtt_wr_10_ext         : 1;  /**< [ 53: 53](R/W) RTT_WR rank 2 extension bit for DDR4. */
12204         uint64_t rtt_wr_11_ext         : 1;  /**< [ 54: 54](R/W) RTT_WR rank 3 extension bit for DDR4. */
12205         uint64_t reserved_55_63        : 9;
12206 #endif /* Word 0 - End */
12207     } cn83xx;
12208     /* struct bdk_lmcx_modereg_params1_cn83xx cn88xxp2; */
12209 };
12210 typedef union bdk_lmcx_modereg_params1 bdk_lmcx_modereg_params1_t;
12211 
12212 static inline uint64_t BDK_LMCX_MODEREG_PARAMS1(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_MODEREG_PARAMS1(unsigned long a)12213 static inline uint64_t BDK_LMCX_MODEREG_PARAMS1(unsigned long a)
12214 {
12215     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
12216         return 0x87e088000260ll + 0x1000000ll * ((a) & 0x0);
12217     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
12218         return 0x87e088000260ll + 0x1000000ll * ((a) & 0x1);
12219     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
12220         return 0x87e088000260ll + 0x1000000ll * ((a) & 0x3);
12221     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
12222         return 0x87e088000260ll + 0x1000000ll * ((a) & 0x3);
12223     __bdk_csr_fatal("LMCX_MODEREG_PARAMS1", 1, a, 0, 0, 0);
12224 }
12225 
12226 #define typedef_BDK_LMCX_MODEREG_PARAMS1(a) bdk_lmcx_modereg_params1_t
12227 #define bustype_BDK_LMCX_MODEREG_PARAMS1(a) BDK_CSR_TYPE_RSL
12228 #define basename_BDK_LMCX_MODEREG_PARAMS1(a) "LMCX_MODEREG_PARAMS1"
12229 #define device_bar_BDK_LMCX_MODEREG_PARAMS1(a) 0x0 /* PF_BAR0 */
12230 #define busnum_BDK_LMCX_MODEREG_PARAMS1(a) (a)
12231 #define arguments_BDK_LMCX_MODEREG_PARAMS1(a) (a),-1,-1,-1
12232 
12233 /**
12234  * Register (RSL) lmc#_modereg_params2
12235  *
12236  * LMC Mode Register Parameters Register 2
12237  * These parameters are written into the DDR4 mode registers.
12238  */
12239 union bdk_lmcx_modereg_params2
12240 {
12241     uint64_t u;
12242     struct bdk_lmcx_modereg_params2_s
12243     {
12244 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
12245         uint64_t reserved_41_63        : 23;
12246         uint64_t vrefdq_train_en       : 1;  /**< [ 40: 40](R/W) Vref training mode enable, used for all ranks. */
12247         uint64_t vref_range_11         : 1;  /**< [ 39: 39](R/W) VREF range for rank 3. */
12248         uint64_t vref_value_11         : 6;  /**< [ 38: 33](R/W) VREF value for rank 3. */
12249         uint64_t rtt_park_11           : 3;  /**< [ 32: 30](R/W) RTT park value for rank 3. */
12250         uint64_t vref_range_10         : 1;  /**< [ 29: 29](R/W) VREF range for rank 2. */
12251         uint64_t vref_value_10         : 6;  /**< [ 28: 23](R/W) VREF value for rank 2. */
12252         uint64_t rtt_park_10           : 3;  /**< [ 22: 20](R/W) RTT park value for rank 2. */
12253         uint64_t vref_range_01         : 1;  /**< [ 19: 19](R/W) VREF range for rank 1. */
12254         uint64_t vref_value_01         : 6;  /**< [ 18: 13](R/W) VREF value for rank 1. */
12255         uint64_t rtt_park_01           : 3;  /**< [ 12: 10](R/W) RTT park value for rank 1. */
12256         uint64_t vref_range_00         : 1;  /**< [  9:  9](R/W) VREF range for rank 0. */
12257         uint64_t vref_value_00         : 6;  /**< [  8:  3](R/W) VREF value for rank 0. */
12258         uint64_t rtt_park_00           : 3;  /**< [  2:  0](R/W) RTT park value for rank 0. */
12259 #else /* Word 0 - Little Endian */
12260         uint64_t rtt_park_00           : 3;  /**< [  2:  0](R/W) RTT park value for rank 0. */
12261         uint64_t vref_value_00         : 6;  /**< [  8:  3](R/W) VREF value for rank 0. */
12262         uint64_t vref_range_00         : 1;  /**< [  9:  9](R/W) VREF range for rank 0. */
12263         uint64_t rtt_park_01           : 3;  /**< [ 12: 10](R/W) RTT park value for rank 1. */
12264         uint64_t vref_value_01         : 6;  /**< [ 18: 13](R/W) VREF value for rank 1. */
12265         uint64_t vref_range_01         : 1;  /**< [ 19: 19](R/W) VREF range for rank 1. */
12266         uint64_t rtt_park_10           : 3;  /**< [ 22: 20](R/W) RTT park value for rank 2. */
12267         uint64_t vref_value_10         : 6;  /**< [ 28: 23](R/W) VREF value for rank 2. */
12268         uint64_t vref_range_10         : 1;  /**< [ 29: 29](R/W) VREF range for rank 2. */
12269         uint64_t rtt_park_11           : 3;  /**< [ 32: 30](R/W) RTT park value for rank 3. */
12270         uint64_t vref_value_11         : 6;  /**< [ 38: 33](R/W) VREF value for rank 3. */
12271         uint64_t vref_range_11         : 1;  /**< [ 39: 39](R/W) VREF range for rank 3. */
12272         uint64_t vrefdq_train_en       : 1;  /**< [ 40: 40](R/W) Vref training mode enable, used for all ranks. */
12273         uint64_t reserved_41_63        : 23;
12274 #endif /* Word 0 - End */
12275     } s;
12276     /* struct bdk_lmcx_modereg_params2_s cn; */
12277 };
12278 typedef union bdk_lmcx_modereg_params2 bdk_lmcx_modereg_params2_t;
12279 
12280 static inline uint64_t BDK_LMCX_MODEREG_PARAMS2(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_MODEREG_PARAMS2(unsigned long a)12281 static inline uint64_t BDK_LMCX_MODEREG_PARAMS2(unsigned long a)
12282 {
12283     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
12284         return 0x87e088000050ll + 0x1000000ll * ((a) & 0x0);
12285     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
12286         return 0x87e088000050ll + 0x1000000ll * ((a) & 0x1);
12287     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
12288         return 0x87e088000050ll + 0x1000000ll * ((a) & 0x3);
12289     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
12290         return 0x87e088000050ll + 0x1000000ll * ((a) & 0x3);
12291     __bdk_csr_fatal("LMCX_MODEREG_PARAMS2", 1, a, 0, 0, 0);
12292 }
12293 
12294 #define typedef_BDK_LMCX_MODEREG_PARAMS2(a) bdk_lmcx_modereg_params2_t
12295 #define bustype_BDK_LMCX_MODEREG_PARAMS2(a) BDK_CSR_TYPE_RSL
12296 #define basename_BDK_LMCX_MODEREG_PARAMS2(a) "LMCX_MODEREG_PARAMS2"
12297 #define device_bar_BDK_LMCX_MODEREG_PARAMS2(a) 0x0 /* PF_BAR0 */
12298 #define busnum_BDK_LMCX_MODEREG_PARAMS2(a) (a)
12299 #define arguments_BDK_LMCX_MODEREG_PARAMS2(a) (a),-1,-1,-1
12300 
12301 /**
12302  * Register (RSL) lmc#_modereg_params3
12303  *
12304  * LMC Mode Register Parameters Register 3
12305  * These parameters are written into the DDR4 mode registers.
12306  */
12307 union bdk_lmcx_modereg_params3
12308 {
12309     uint64_t u;
12310     struct bdk_lmcx_modereg_params3_s
12311     {
12312 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
12313         uint64_t reserved_41_63        : 23;
12314         uint64_t tc_ref_range          : 1;  /**< [ 40: 40](R/W) Temperature controlled refresh range: 0 = normal, 1 = extended. */
12315         uint64_t reserved_39           : 1;
12316         uint64_t xrank_add_tccd_l      : 3;  /**< [ 38: 36](R/W) Reserved.
12317                                                                  Internal:
12318                                                                  Add additional cycles on top of the 4 cycles applied to tCCD_L
12319                                                                  when crossing logical rank (to the same bank group) of a 3DS DRAM. */
12320         uint64_t xrank_add_tccd_s      : 3;  /**< [ 35: 33](R/W) Reserved.
12321                                                                  Internal:
12322                                                                  Add additional cycles on top of the 4 cycles applied to tCCD_S
12323                                                                  when crossing logical rank (to a different bank group) of a 3DS DRAM. */
12324         uint64_t mpr_fmt               : 2;  /**< [ 32: 31](R/W) MPR format. */
12325         uint64_t wr_cmd_lat            : 2;  /**< [ 30: 29](R/W) Write command latency when CRC and DM are both enabled. */
12326         uint64_t fgrm                  : 3;  /**< [ 28: 26](R/W) Fine granularity refresh mode. */
12327         uint64_t temp_sense            : 1;  /**< [ 25: 25](R/W) Temperature sensor readout enable. */
12328         uint64_t pda                   : 1;  /**< [ 24: 24](R/W) Per DRAM addressability. */
12329         uint64_t gd                    : 1;  /**< [ 23: 23](R/W) Gear-down mode. */
12330         uint64_t crc                   : 1;  /**< [ 22: 22](R/W) CRC mode. */
12331         uint64_t lpasr                 : 2;  /**< [ 21: 20](R/W) LP auto self refresh. */
12332         uint64_t tccd_l                : 3;  /**< [ 19: 17](R/W) tCCD_L timing parameter:
12333                                                                  0x0 = 4.
12334                                                                  0x1 = 5.
12335                                                                  0x2 = 6.
12336                                                                  0x3 = 7.
12337                                                                  0x4 = 8.
12338                                                                  0x5-0x7 = reserved. */
12339         uint64_t rd_dbi                : 1;  /**< [ 16: 16](R/W) Read DBI. */
12340         uint64_t wr_dbi                : 1;  /**< [ 15: 15](R/W) Write DBI. */
12341         uint64_t dm                    : 1;  /**< [ 14: 14](R/W) Data mask enable. */
12342         uint64_t ca_par_pers           : 1;  /**< [ 13: 13](R/W) Command/address persistent parity error mode. */
12343         uint64_t odt_pd                : 1;  /**< [ 12: 12](R/W) ODT in PD mode. */
12344         uint64_t par_lat_mode          : 3;  /**< [ 11:  9](R/W) Parity latency mode. */
12345         uint64_t wr_preamble           : 1;  /**< [  8:  8](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
12346         uint64_t rd_preamble           : 1;  /**< [  7:  7](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
12347         uint64_t sre_abort             : 1;  /**< [  6:  6](R/W) Self refresh abort. */
12348         uint64_t cal                   : 3;  /**< [  5:  3](R/W) CS-to-CMD/ADDR latency mode (cycles). */
12349         uint64_t vref_mon              : 1;  /**< [  2:  2](R/W) Internal VREF monitor: 0 = disable, 1 = enable. */
12350         uint64_t reserved_1            : 1;
12351         uint64_t max_pd                : 1;  /**< [  0:  0](R/W) Maximum power-down mode: 0 = disable, 1 = enable. */
12352 #else /* Word 0 - Little Endian */
12353         uint64_t max_pd                : 1;  /**< [  0:  0](R/W) Maximum power-down mode: 0 = disable, 1 = enable. */
12354         uint64_t reserved_1            : 1;
12355         uint64_t vref_mon              : 1;  /**< [  2:  2](R/W) Internal VREF monitor: 0 = disable, 1 = enable. */
12356         uint64_t cal                   : 3;  /**< [  5:  3](R/W) CS-to-CMD/ADDR latency mode (cycles). */
12357         uint64_t sre_abort             : 1;  /**< [  6:  6](R/W) Self refresh abort. */
12358         uint64_t rd_preamble           : 1;  /**< [  7:  7](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
12359         uint64_t wr_preamble           : 1;  /**< [  8:  8](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
12360         uint64_t par_lat_mode          : 3;  /**< [ 11:  9](R/W) Parity latency mode. */
12361         uint64_t odt_pd                : 1;  /**< [ 12: 12](R/W) ODT in PD mode. */
12362         uint64_t ca_par_pers           : 1;  /**< [ 13: 13](R/W) Command/address persistent parity error mode. */
12363         uint64_t dm                    : 1;  /**< [ 14: 14](R/W) Data mask enable. */
12364         uint64_t wr_dbi                : 1;  /**< [ 15: 15](R/W) Write DBI. */
12365         uint64_t rd_dbi                : 1;  /**< [ 16: 16](R/W) Read DBI. */
12366         uint64_t tccd_l                : 3;  /**< [ 19: 17](R/W) tCCD_L timing parameter:
12367                                                                  0x0 = 4.
12368                                                                  0x1 = 5.
12369                                                                  0x2 = 6.
12370                                                                  0x3 = 7.
12371                                                                  0x4 = 8.
12372                                                                  0x5-0x7 = reserved. */
12373         uint64_t lpasr                 : 2;  /**< [ 21: 20](R/W) LP auto self refresh. */
12374         uint64_t crc                   : 1;  /**< [ 22: 22](R/W) CRC mode. */
12375         uint64_t gd                    : 1;  /**< [ 23: 23](R/W) Gear-down mode. */
12376         uint64_t pda                   : 1;  /**< [ 24: 24](R/W) Per DRAM addressability. */
12377         uint64_t temp_sense            : 1;  /**< [ 25: 25](R/W) Temperature sensor readout enable. */
12378         uint64_t fgrm                  : 3;  /**< [ 28: 26](R/W) Fine granularity refresh mode. */
12379         uint64_t wr_cmd_lat            : 2;  /**< [ 30: 29](R/W) Write command latency when CRC and DM are both enabled. */
12380         uint64_t mpr_fmt               : 2;  /**< [ 32: 31](R/W) MPR format. */
12381         uint64_t xrank_add_tccd_s      : 3;  /**< [ 35: 33](R/W) Reserved.
12382                                                                  Internal:
12383                                                                  Add additional cycles on top of the 4 cycles applied to tCCD_S
12384                                                                  when crossing logical rank (to a different bank group) of a 3DS DRAM. */
12385         uint64_t xrank_add_tccd_l      : 3;  /**< [ 38: 36](R/W) Reserved.
12386                                                                  Internal:
12387                                                                  Add additional cycles on top of the 4 cycles applied to tCCD_L
12388                                                                  when crossing logical rank (to the same bank group) of a 3DS DRAM. */
12389         uint64_t reserved_39           : 1;
12390         uint64_t tc_ref_range          : 1;  /**< [ 40: 40](R/W) Temperature controlled refresh range: 0 = normal, 1 = extended. */
12391         uint64_t reserved_41_63        : 23;
12392 #endif /* Word 0 - End */
12393     } s;
12394     struct bdk_lmcx_modereg_params3_cn8
12395     {
12396 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
12397         uint64_t reserved_39_63        : 25;
12398         uint64_t xrank_add_tccd_l      : 3;  /**< [ 38: 36](R/W) Reserved.
12399                                                                  Internal:
12400                                                                  Add additional cycles on top of the 4 cycles applied to tCCD_L
12401                                                                  when crossing logical rank (to the same bank group) of a 3DS DRAM. */
12402         uint64_t xrank_add_tccd_s      : 3;  /**< [ 35: 33](R/W) Reserved.
12403                                                                  Internal:
12404                                                                  Add additional cycles on top of the 4 cycles applied to tCCD_S
12405                                                                  when crossing logical rank (to a different bank group) of a 3DS DRAM. */
12406         uint64_t mpr_fmt               : 2;  /**< [ 32: 31](R/W) MPR format. */
12407         uint64_t wr_cmd_lat            : 2;  /**< [ 30: 29](R/W) Write command latency when CRC and DM are both enabled. */
12408         uint64_t fgrm                  : 3;  /**< [ 28: 26](R/W) Fine granularity refresh mode. */
12409         uint64_t temp_sense            : 1;  /**< [ 25: 25](R/W) Temperature sensor readout enable. */
12410         uint64_t pda                   : 1;  /**< [ 24: 24](R/W) Per DRAM addressability. */
12411         uint64_t gd                    : 1;  /**< [ 23: 23](R/W) Gear-down mode. */
12412         uint64_t crc                   : 1;  /**< [ 22: 22](R/W) CRC mode. */
12413         uint64_t lpasr                 : 2;  /**< [ 21: 20](R/W) LP auto self refresh. */
12414         uint64_t tccd_l                : 3;  /**< [ 19: 17](R/W) tCCD_L timing parameter:
12415                                                                  0x0 = 4.
12416                                                                  0x1 = 5.
12417                                                                  0x2 = 6.
12418                                                                  0x3 = 7.
12419                                                                  0x4 = 8.
12420                                                                  0x5-0x7 = reserved. */
12421         uint64_t rd_dbi                : 1;  /**< [ 16: 16](R/W) Read DBI. */
12422         uint64_t wr_dbi                : 1;  /**< [ 15: 15](R/W) Write DBI. */
12423         uint64_t dm                    : 1;  /**< [ 14: 14](R/W) Data mask enable. */
12424         uint64_t ca_par_pers           : 1;  /**< [ 13: 13](R/W) Command/address persistent parity error mode. */
12425         uint64_t odt_pd                : 1;  /**< [ 12: 12](R/W) ODT in PD mode. */
12426         uint64_t par_lat_mode          : 3;  /**< [ 11:  9](R/W) Parity latency mode. */
12427         uint64_t wr_preamble           : 1;  /**< [  8:  8](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
12428         uint64_t rd_preamble           : 1;  /**< [  7:  7](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
12429         uint64_t sre_abort             : 1;  /**< [  6:  6](R/W) Self refresh abort. */
12430         uint64_t cal                   : 3;  /**< [  5:  3](R/W) CS-to-CMD/ADDR latency mode (cycles). */
12431         uint64_t vref_mon              : 1;  /**< [  2:  2](R/W) Internal VREF monitor: 0 = disable, 1 = enable. */
12432         uint64_t tc_ref                : 1;  /**< [  1:  1](R/W) Temperature controlled refresh range: 0 = normal, 1 = extended. */
12433         uint64_t max_pd                : 1;  /**< [  0:  0](R/W) Maximum power-down mode: 0 = disable, 1 = enable. */
12434 #else /* Word 0 - Little Endian */
12435         uint64_t max_pd                : 1;  /**< [  0:  0](R/W) Maximum power-down mode: 0 = disable, 1 = enable. */
12436         uint64_t tc_ref                : 1;  /**< [  1:  1](R/W) Temperature controlled refresh range: 0 = normal, 1 = extended. */
12437         uint64_t vref_mon              : 1;  /**< [  2:  2](R/W) Internal VREF monitor: 0 = disable, 1 = enable. */
12438         uint64_t cal                   : 3;  /**< [  5:  3](R/W) CS-to-CMD/ADDR latency mode (cycles). */
12439         uint64_t sre_abort             : 1;  /**< [  6:  6](R/W) Self refresh abort. */
12440         uint64_t rd_preamble           : 1;  /**< [  7:  7](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
12441         uint64_t wr_preamble           : 1;  /**< [  8:  8](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
12442         uint64_t par_lat_mode          : 3;  /**< [ 11:  9](R/W) Parity latency mode. */
12443         uint64_t odt_pd                : 1;  /**< [ 12: 12](R/W) ODT in PD mode. */
12444         uint64_t ca_par_pers           : 1;  /**< [ 13: 13](R/W) Command/address persistent parity error mode. */
12445         uint64_t dm                    : 1;  /**< [ 14: 14](R/W) Data mask enable. */
12446         uint64_t wr_dbi                : 1;  /**< [ 15: 15](R/W) Write DBI. */
12447         uint64_t rd_dbi                : 1;  /**< [ 16: 16](R/W) Read DBI. */
12448         uint64_t tccd_l                : 3;  /**< [ 19: 17](R/W) tCCD_L timing parameter:
12449                                                                  0x0 = 4.
12450                                                                  0x1 = 5.
12451                                                                  0x2 = 6.
12452                                                                  0x3 = 7.
12453                                                                  0x4 = 8.
12454                                                                  0x5-0x7 = reserved. */
12455         uint64_t lpasr                 : 2;  /**< [ 21: 20](R/W) LP auto self refresh. */
12456         uint64_t crc                   : 1;  /**< [ 22: 22](R/W) CRC mode. */
12457         uint64_t gd                    : 1;  /**< [ 23: 23](R/W) Gear-down mode. */
12458         uint64_t pda                   : 1;  /**< [ 24: 24](R/W) Per DRAM addressability. */
12459         uint64_t temp_sense            : 1;  /**< [ 25: 25](R/W) Temperature sensor readout enable. */
12460         uint64_t fgrm                  : 3;  /**< [ 28: 26](R/W) Fine granularity refresh mode. */
12461         uint64_t wr_cmd_lat            : 2;  /**< [ 30: 29](R/W) Write command latency when CRC and DM are both enabled. */
12462         uint64_t mpr_fmt               : 2;  /**< [ 32: 31](R/W) MPR format. */
12463         uint64_t xrank_add_tccd_s      : 3;  /**< [ 35: 33](R/W) Reserved.
12464                                                                  Internal:
12465                                                                  Add additional cycles on top of the 4 cycles applied to tCCD_S
12466                                                                  when crossing logical rank (to a different bank group) of a 3DS DRAM. */
12467         uint64_t xrank_add_tccd_l      : 3;  /**< [ 38: 36](R/W) Reserved.
12468                                                                  Internal:
12469                                                                  Add additional cycles on top of the 4 cycles applied to tCCD_L
12470                                                                  when crossing logical rank (to the same bank group) of a 3DS DRAM. */
12471         uint64_t reserved_39_63        : 25;
12472 #endif /* Word 0 - End */
12473     } cn8;
12474     struct bdk_lmcx_modereg_params3_cn9
12475     {
12476 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
12477         uint64_t reserved_41_63        : 23;
12478         uint64_t tc_ref_range          : 1;  /**< [ 40: 40](R/W) Temperature controlled refresh range: 0 = normal, 1 = extended. */
12479         uint64_t tc_ref                : 1;  /**< [ 39: 39](R/W) Temperature controlled refresh mode: 0 = disable, 1 = enable. */
12480         uint64_t xrank_add_tccd_l      : 3;  /**< [ 38: 36](R/W) Reserved.
12481                                                                  Internal:
12482                                                                  Add additional cycles on top of the 4 cycles applied to tCCD_L
12483                                                                  when crossing logical rank (to the same bank group) of a 3DS DRAM. */
12484         uint64_t xrank_add_tccd_s      : 3;  /**< [ 35: 33](R/W) Reserved.
12485                                                                  Internal:
12486                                                                  Add additional cycles on top of the 4 cycles applied to tCCD_S
12487                                                                  when crossing logical rank (to a different bank group) of a 3DS DRAM. */
12488         uint64_t mpr_fmt               : 2;  /**< [ 32: 31](R/W) MPR format. */
12489         uint64_t wr_cmd_lat            : 2;  /**< [ 30: 29](R/W) Write command latency when CRC and DM are both enabled. */
12490         uint64_t fgrm                  : 3;  /**< [ 28: 26](R/W) Fine granularity refresh mode. */
12491         uint64_t temp_sense            : 1;  /**< [ 25: 25](R/W) Temperature sensor readout enable. */
12492         uint64_t pda                   : 1;  /**< [ 24: 24](R/W) Per DRAM addressability. */
12493         uint64_t gd                    : 1;  /**< [ 23: 23](R/W) Gear-down mode enable. Software must first ensure that LMC()_CONTROL[DDR2T] is
12494                                                                  cleared, prior to setting this field.
12495                                                                  Setting of this field must happen prior to running the INIT sequence at the start of DRAM
12496                                                                  bringup. */
12497         uint64_t crc                   : 1;  /**< [ 22: 22](R/W) CRC mode. */
12498         uint64_t lpasr                 : 2;  /**< [ 21: 20](R/W) LP auto self refresh. */
12499         uint64_t tccd_l                : 3;  /**< [ 19: 17](R/W) tCCD_L timing parameter:
12500                                                                  0x0 = 4.
12501                                                                  0x1 = 5.
12502                                                                  0x2 = 6.
12503                                                                  0x3 = 7.
12504                                                                  0x4 = 8.
12505                                                                  0x5-0x7 = reserved. */
12506         uint64_t rd_dbi                : 1;  /**< [ 16: 16](R/W) Read DBI. */
12507         uint64_t wr_dbi                : 1;  /**< [ 15: 15](R/W) Write DBI. */
12508         uint64_t dm                    : 1;  /**< [ 14: 14](R/W) Data mask enable. */
12509         uint64_t ca_par_pers           : 1;  /**< [ 13: 13](R/W) Command/address persistent parity error mode. */
12510         uint64_t odt_pd                : 1;  /**< [ 12: 12](R/W) ODT in PD mode. */
12511         uint64_t par_lat_mode          : 3;  /**< [ 11:  9](R/W) Parity latency mode. */
12512         uint64_t wr_preamble           : 1;  /**< [  8:  8](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
12513         uint64_t rd_preamble           : 1;  /**< [  7:  7](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
12514         uint64_t sre_abort             : 1;  /**< [  6:  6](R/W) Self refresh abort. */
12515         uint64_t cal                   : 3;  /**< [  5:  3](R/W) CS-to-CMD/ADDR latency mode (cycles). */
12516         uint64_t vref_mon              : 1;  /**< [  2:  2](R/W) Internal VREF monitor: 0 = disable, 1 = enable. */
12517         uint64_t reserved_1            : 1;
12518         uint64_t max_pd                : 1;  /**< [  0:  0](R/W) Maximum power-down mode: 0 = disable, 1 = enable. */
12519 #else /* Word 0 - Little Endian */
12520         uint64_t max_pd                : 1;  /**< [  0:  0](R/W) Maximum power-down mode: 0 = disable, 1 = enable. */
12521         uint64_t reserved_1            : 1;
12522         uint64_t vref_mon              : 1;  /**< [  2:  2](R/W) Internal VREF monitor: 0 = disable, 1 = enable. */
12523         uint64_t cal                   : 3;  /**< [  5:  3](R/W) CS-to-CMD/ADDR latency mode (cycles). */
12524         uint64_t sre_abort             : 1;  /**< [  6:  6](R/W) Self refresh abort. */
12525         uint64_t rd_preamble           : 1;  /**< [  7:  7](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
12526         uint64_t wr_preamble           : 1;  /**< [  8:  8](R/W) Write preamble, 0 = one nCK, 1 = two nCK. */
12527         uint64_t par_lat_mode          : 3;  /**< [ 11:  9](R/W) Parity latency mode. */
12528         uint64_t odt_pd                : 1;  /**< [ 12: 12](R/W) ODT in PD mode. */
12529         uint64_t ca_par_pers           : 1;  /**< [ 13: 13](R/W) Command/address persistent parity error mode. */
12530         uint64_t dm                    : 1;  /**< [ 14: 14](R/W) Data mask enable. */
12531         uint64_t wr_dbi                : 1;  /**< [ 15: 15](R/W) Write DBI. */
12532         uint64_t rd_dbi                : 1;  /**< [ 16: 16](R/W) Read DBI. */
12533         uint64_t tccd_l                : 3;  /**< [ 19: 17](R/W) tCCD_L timing parameter:
12534                                                                  0x0 = 4.
12535                                                                  0x1 = 5.
12536                                                                  0x2 = 6.
12537                                                                  0x3 = 7.
12538                                                                  0x4 = 8.
12539                                                                  0x5-0x7 = reserved. */
12540         uint64_t lpasr                 : 2;  /**< [ 21: 20](R/W) LP auto self refresh. */
12541         uint64_t crc                   : 1;  /**< [ 22: 22](R/W) CRC mode. */
12542         uint64_t gd                    : 1;  /**< [ 23: 23](R/W) Gear-down mode enable. Software must first ensure that LMC()_CONTROL[DDR2T] is
12543                                                                  cleared, prior to setting this field.
12544                                                                  Setting of this field must happen prior to running the INIT sequence at the start of DRAM
12545                                                                  bringup. */
12546         uint64_t pda                   : 1;  /**< [ 24: 24](R/W) Per DRAM addressability. */
12547         uint64_t temp_sense            : 1;  /**< [ 25: 25](R/W) Temperature sensor readout enable. */
12548         uint64_t fgrm                  : 3;  /**< [ 28: 26](R/W) Fine granularity refresh mode. */
12549         uint64_t wr_cmd_lat            : 2;  /**< [ 30: 29](R/W) Write command latency when CRC and DM are both enabled. */
12550         uint64_t mpr_fmt               : 2;  /**< [ 32: 31](R/W) MPR format. */
12551         uint64_t xrank_add_tccd_s      : 3;  /**< [ 35: 33](R/W) Reserved.
12552                                                                  Internal:
12553                                                                  Add additional cycles on top of the 4 cycles applied to tCCD_S
12554                                                                  when crossing logical rank (to a different bank group) of a 3DS DRAM. */
12555         uint64_t xrank_add_tccd_l      : 3;  /**< [ 38: 36](R/W) Reserved.
12556                                                                  Internal:
12557                                                                  Add additional cycles on top of the 4 cycles applied to tCCD_L
12558                                                                  when crossing logical rank (to the same bank group) of a 3DS DRAM. */
12559         uint64_t tc_ref                : 1;  /**< [ 39: 39](R/W) Temperature controlled refresh mode: 0 = disable, 1 = enable. */
12560         uint64_t tc_ref_range          : 1;  /**< [ 40: 40](R/W) Temperature controlled refresh range: 0 = normal, 1 = extended. */
12561         uint64_t reserved_41_63        : 23;
12562 #endif /* Word 0 - End */
12563     } cn9;
12564 };
12565 typedef union bdk_lmcx_modereg_params3 bdk_lmcx_modereg_params3_t;
12566 
12567 static inline uint64_t BDK_LMCX_MODEREG_PARAMS3(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_MODEREG_PARAMS3(unsigned long a)12568 static inline uint64_t BDK_LMCX_MODEREG_PARAMS3(unsigned long a)
12569 {
12570     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
12571         return 0x87e088000058ll + 0x1000000ll * ((a) & 0x0);
12572     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
12573         return 0x87e088000058ll + 0x1000000ll * ((a) & 0x1);
12574     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
12575         return 0x87e088000058ll + 0x1000000ll * ((a) & 0x3);
12576     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
12577         return 0x87e088000058ll + 0x1000000ll * ((a) & 0x3);
12578     __bdk_csr_fatal("LMCX_MODEREG_PARAMS3", 1, a, 0, 0, 0);
12579 }
12580 
12581 #define typedef_BDK_LMCX_MODEREG_PARAMS3(a) bdk_lmcx_modereg_params3_t
12582 #define bustype_BDK_LMCX_MODEREG_PARAMS3(a) BDK_CSR_TYPE_RSL
12583 #define basename_BDK_LMCX_MODEREG_PARAMS3(a) "LMCX_MODEREG_PARAMS3"
12584 #define device_bar_BDK_LMCX_MODEREG_PARAMS3(a) 0x0 /* PF_BAR0 */
12585 #define busnum_BDK_LMCX_MODEREG_PARAMS3(a) (a)
12586 #define arguments_BDK_LMCX_MODEREG_PARAMS3(a) (a),-1,-1,-1
12587 
12588 /**
12589  * Register (RSL) lmc#_mpr_data0
12590  *
12591  * LMC MR Data Register 0
12592  * This register provides bits \<63:0\> of MPR data register.
12593  */
12594 union bdk_lmcx_mpr_data0
12595 {
12596     uint64_t u;
12597     struct bdk_lmcx_mpr_data0_s
12598     {
12599 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
12600         uint64_t mpr_data              : 64; /**< [ 63:  0](RO/H) MPR data bits\<63:0\>. Bits\<7:0\> represent the MPR data for the lowest-order x4 device (x4
12601                                                                  device 0); bits\<15:8\> represent x4 device 1; ..., bits\<63:56\> are for x4 device 7.
12602 
12603                                                                  This field is also used to store the results after running the general R/W training
12604                                                                  sequence (LMC()_SEQ_CTL[SEQ_SEL] = 0xE).
12605                                                                  The format of the stored results is controlled by LMC()_DBTRAIN_CTL[RW_TRAIN].
12606                                                                  When LMC()_DBTRAIN_CTL[RW_TRAIN] = 1, this field stores the R/W comparison output
12607                                                                  from all DQ63 - DQ0.
12608                                                                  When LMC()_DBTRAIN_CTL[RW_TRAIN] = 0, this field stores the positive edge read data
12609                                                                  on a particular cycle coming from DQ63 - DQ0. */
12610 #else /* Word 0 - Little Endian */
12611         uint64_t mpr_data              : 64; /**< [ 63:  0](RO/H) MPR data bits\<63:0\>. Bits\<7:0\> represent the MPR data for the lowest-order x4 device (x4
12612                                                                  device 0); bits\<15:8\> represent x4 device 1; ..., bits\<63:56\> are for x4 device 7.
12613 
12614                                                                  This field is also used to store the results after running the general R/W training
12615                                                                  sequence (LMC()_SEQ_CTL[SEQ_SEL] = 0xE).
12616                                                                  The format of the stored results is controlled by LMC()_DBTRAIN_CTL[RW_TRAIN].
12617                                                                  When LMC()_DBTRAIN_CTL[RW_TRAIN] = 1, this field stores the R/W comparison output
12618                                                                  from all DQ63 - DQ0.
12619                                                                  When LMC()_DBTRAIN_CTL[RW_TRAIN] = 0, this field stores the positive edge read data
12620                                                                  on a particular cycle coming from DQ63 - DQ0. */
12621 #endif /* Word 0 - End */
12622     } s;
12623     /* struct bdk_lmcx_mpr_data0_s cn; */
12624 };
12625 typedef union bdk_lmcx_mpr_data0 bdk_lmcx_mpr_data0_t;
12626 
12627 static inline uint64_t BDK_LMCX_MPR_DATA0(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_MPR_DATA0(unsigned long a)12628 static inline uint64_t BDK_LMCX_MPR_DATA0(unsigned long a)
12629 {
12630     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
12631         return 0x87e088000070ll + 0x1000000ll * ((a) & 0x0);
12632     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
12633         return 0x87e088000070ll + 0x1000000ll * ((a) & 0x1);
12634     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
12635         return 0x87e088000070ll + 0x1000000ll * ((a) & 0x3);
12636     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
12637         return 0x87e088000070ll + 0x1000000ll * ((a) & 0x3);
12638     __bdk_csr_fatal("LMCX_MPR_DATA0", 1, a, 0, 0, 0);
12639 }
12640 
12641 #define typedef_BDK_LMCX_MPR_DATA0(a) bdk_lmcx_mpr_data0_t
12642 #define bustype_BDK_LMCX_MPR_DATA0(a) BDK_CSR_TYPE_RSL
12643 #define basename_BDK_LMCX_MPR_DATA0(a) "LMCX_MPR_DATA0"
12644 #define device_bar_BDK_LMCX_MPR_DATA0(a) 0x0 /* PF_BAR0 */
12645 #define busnum_BDK_LMCX_MPR_DATA0(a) (a)
12646 #define arguments_BDK_LMCX_MPR_DATA0(a) (a),-1,-1,-1
12647 
12648 /**
12649  * Register (RSL) lmc#_mpr_data1
12650  *
12651  * LMC MR Data Register 1
12652  * This register provides bits \<127:64\> of MPR data register.
12653  */
12654 union bdk_lmcx_mpr_data1
12655 {
12656     uint64_t u;
12657     struct bdk_lmcx_mpr_data1_s
12658     {
12659 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
12660         uint64_t mpr_data              : 64; /**< [ 63:  0](RO/H) MPR data bits\<127:64\>. Bits\<7:0\> represent the MPR data for x4 device 8; bits\<15:8\>
12661                                                                  represent x4 device 9; ...; bits\<63:56\> are for x4 device 15.
12662 
12663                                                                  This field is also used to store the results after running the general R/W training
12664                                                                  sequence (LMC()_SEQ_CTL[SEQ_SEL] = 0xE).
12665                                                                  The format of the stored results is controlled by LMC()_DBTRAIN_CTL[RW_TRAIN].
12666                                                                  When LMC()_DBTRAIN_CTL[RW_TRAIN] = 1, this field stores the R/W comparison output
12667                                                                  from the ECC byte (DQ71 - DQ64).
12668                                                                  When LMC()_DBTRAIN_CTL[RW_TRAIN] = 0, [MPR_DATA]\<7:0\> stores the positive edge read data
12669                                                                  on a particular cycle coming from the ECC byte (DQ71 - DQ64), while
12670                                                                  [MPR_DATA]\<64:8\> stores the negative edge read data coming from DQ55 - DQ0. */
12671 #else /* Word 0 - Little Endian */
12672         uint64_t mpr_data              : 64; /**< [ 63:  0](RO/H) MPR data bits\<127:64\>. Bits\<7:0\> represent the MPR data for x4 device 8; bits\<15:8\>
12673                                                                  represent x4 device 9; ...; bits\<63:56\> are for x4 device 15.
12674 
12675                                                                  This field is also used to store the results after running the general R/W training
12676                                                                  sequence (LMC()_SEQ_CTL[SEQ_SEL] = 0xE).
12677                                                                  The format of the stored results is controlled by LMC()_DBTRAIN_CTL[RW_TRAIN].
12678                                                                  When LMC()_DBTRAIN_CTL[RW_TRAIN] = 1, this field stores the R/W comparison output
12679                                                                  from the ECC byte (DQ71 - DQ64).
12680                                                                  When LMC()_DBTRAIN_CTL[RW_TRAIN] = 0, [MPR_DATA]\<7:0\> stores the positive edge read data
12681                                                                  on a particular cycle coming from the ECC byte (DQ71 - DQ64), while
12682                                                                  [MPR_DATA]\<64:8\> stores the negative edge read data coming from DQ55 - DQ0. */
12683 #endif /* Word 0 - End */
12684     } s;
12685     /* struct bdk_lmcx_mpr_data1_s cn; */
12686 };
12687 typedef union bdk_lmcx_mpr_data1 bdk_lmcx_mpr_data1_t;
12688 
12689 static inline uint64_t BDK_LMCX_MPR_DATA1(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_MPR_DATA1(unsigned long a)12690 static inline uint64_t BDK_LMCX_MPR_DATA1(unsigned long a)
12691 {
12692     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
12693         return 0x87e088000078ll + 0x1000000ll * ((a) & 0x0);
12694     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
12695         return 0x87e088000078ll + 0x1000000ll * ((a) & 0x1);
12696     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
12697         return 0x87e088000078ll + 0x1000000ll * ((a) & 0x3);
12698     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
12699         return 0x87e088000078ll + 0x1000000ll * ((a) & 0x3);
12700     __bdk_csr_fatal("LMCX_MPR_DATA1", 1, a, 0, 0, 0);
12701 }
12702 
12703 #define typedef_BDK_LMCX_MPR_DATA1(a) bdk_lmcx_mpr_data1_t
12704 #define bustype_BDK_LMCX_MPR_DATA1(a) BDK_CSR_TYPE_RSL
12705 #define basename_BDK_LMCX_MPR_DATA1(a) "LMCX_MPR_DATA1"
12706 #define device_bar_BDK_LMCX_MPR_DATA1(a) 0x0 /* PF_BAR0 */
12707 #define busnum_BDK_LMCX_MPR_DATA1(a) (a)
12708 #define arguments_BDK_LMCX_MPR_DATA1(a) (a),-1,-1,-1
12709 
12710 /**
12711  * Register (RSL) lmc#_mpr_data2
12712  *
12713  * LMC MR Data Register 2
12714  * This register provides bits \<143:128\> of MPR data register.
12715  */
12716 union bdk_lmcx_mpr_data2
12717 {
12718     uint64_t u;
12719     struct bdk_lmcx_mpr_data2_s
12720     {
12721 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
12722         uint64_t reserved_16_63        : 48;
12723         uint64_t mpr_data              : 16; /**< [ 15:  0](RO/H) MPR data bits\<143:128\>. Bits\<7:0\> represent the MPR data for x4 device 16; bits\<15:8\>
12724                                                                  represent x4 device 17.
12725 
12726                                                                  This field is also used to store the results after running the general R/W training
12727                                                                  sequence (LMC()_SEQ_CTL[SEQ_SEL] = 0xE).
12728                                                                  The format of the stored results is controlled by LMC()_DBTRAIN_CTL[RW_TRAIN].
12729                                                                  When LMC()_DBTRAIN_CTL[RW_TRAIN] = 1, this field is not used.
12730                                                                  When LMC()_DBTRAIN_CTL[RW_TRAIN] = 0, [MPR_DATA]\<15:0\> stores the negative edge read data
12731                                                                  on a particular cycle coming from DQ71 - DQ56. */
12732 #else /* Word 0 - Little Endian */
12733         uint64_t mpr_data              : 16; /**< [ 15:  0](RO/H) MPR data bits\<143:128\>. Bits\<7:0\> represent the MPR data for x4 device 16; bits\<15:8\>
12734                                                                  represent x4 device 17.
12735 
12736                                                                  This field is also used to store the results after running the general R/W training
12737                                                                  sequence (LMC()_SEQ_CTL[SEQ_SEL] = 0xE).
12738                                                                  The format of the stored results is controlled by LMC()_DBTRAIN_CTL[RW_TRAIN].
12739                                                                  When LMC()_DBTRAIN_CTL[RW_TRAIN] = 1, this field is not used.
12740                                                                  When LMC()_DBTRAIN_CTL[RW_TRAIN] = 0, [MPR_DATA]\<15:0\> stores the negative edge read data
12741                                                                  on a particular cycle coming from DQ71 - DQ56. */
12742         uint64_t reserved_16_63        : 48;
12743 #endif /* Word 0 - End */
12744     } s;
12745     /* struct bdk_lmcx_mpr_data2_s cn; */
12746 };
12747 typedef union bdk_lmcx_mpr_data2 bdk_lmcx_mpr_data2_t;
12748 
12749 static inline uint64_t BDK_LMCX_MPR_DATA2(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_MPR_DATA2(unsigned long a)12750 static inline uint64_t BDK_LMCX_MPR_DATA2(unsigned long a)
12751 {
12752     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
12753         return 0x87e088000080ll + 0x1000000ll * ((a) & 0x0);
12754     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
12755         return 0x87e088000080ll + 0x1000000ll * ((a) & 0x1);
12756     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
12757         return 0x87e088000080ll + 0x1000000ll * ((a) & 0x3);
12758     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
12759         return 0x87e088000080ll + 0x1000000ll * ((a) & 0x3);
12760     __bdk_csr_fatal("LMCX_MPR_DATA2", 1, a, 0, 0, 0);
12761 }
12762 
12763 #define typedef_BDK_LMCX_MPR_DATA2(a) bdk_lmcx_mpr_data2_t
12764 #define bustype_BDK_LMCX_MPR_DATA2(a) BDK_CSR_TYPE_RSL
12765 #define basename_BDK_LMCX_MPR_DATA2(a) "LMCX_MPR_DATA2"
12766 #define device_bar_BDK_LMCX_MPR_DATA2(a) 0x0 /* PF_BAR0 */
12767 #define busnum_BDK_LMCX_MPR_DATA2(a) (a)
12768 #define arguments_BDK_LMCX_MPR_DATA2(a) (a),-1,-1,-1
12769 
12770 /**
12771  * Register (RSL) lmc#_mr_mpr_ctl
12772  *
12773  * LMC MR Write and MPR Control Register
12774  * This register provides the control functions when programming the MPR of DDR4 DRAMs.
12775  */
12776 union bdk_lmcx_mr_mpr_ctl
12777 {
12778     uint64_t u;
12779     struct bdk_lmcx_mr_mpr_ctl_s
12780     {
12781 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
12782         uint64_t reserved_61_63        : 3;
12783         uint64_t mr_wr_secure_key_ena  : 1;  /**< [ 60: 60](R/W) When set, this enables the issuing of security key with the
12784                                                                  unique address field A[17:0] set by LMC()_MR_MPR_CTL[MR_WR_ADDR]
12785                                                                  during the MRW sequence.
12786                                                                  Set this to one when executing DRAM post package repair manually
12787                                                                  by using MRW operation. */
12788         uint64_t pba_func_space        : 3;  /**< [ 59: 57](R/W) Set the function space selector during PBA mode of the MRW
12789                                                                  sequence. */
12790         uint64_t mr_wr_bg1             : 1;  /**< [ 56: 56](R/W) BG1 part of the address select for MRS in DDR4 mode. */
12791         uint64_t mpr_sample_dq_enable  : 1;  /**< [ 55: 55](R/W) Reserved.
12792                                                                  Internal:
12793                                                                  No longer used due to logic change from
12794                                                                  initial design. */
12795         uint64_t pda_early_dqx         : 1;  /**< [ 54: 54](R/W) When set, it enables lmc_dqx early for PDA/PBA operation. */
12796         uint64_t mr_wr_pba_enable      : 1;  /**< [ 53: 53](R/W) Reserved.
12797                                                                  Internal:
12798                                                                  Per buffer addressability write enable. When set, MRW operations use PBA,
12799                                                                  enabled by [MR_WR_PDA_MASK] per buffer. Only available for DDR4 LRDIMM. */
12800         uint64_t mr_wr_use_default_value : 1;/**< [ 52: 52](R/W) When set, write the value to the MR that is computed from the value set in various CSR
12801                                                                  fields that would be used during initialization, rather that using the value in the
12802                                                                  LMC()_MR_MPR_CTL[MR_WR_ADDR]. Useful to rewrite the same value or to change single
12803                                                                  bits without having to compute a whole new value for the MR. */
12804         uint64_t mpr_whole_byte_enable : 1;  /**< [ 51: 51](R/W) Reserved.
12805                                                                  Internal:
12806                                                                  Select a whole byte of DRAM data to read when whole-byte mode enabled. */
12807         uint64_t mpr_byte_select       : 4;  /**< [ 50: 47](R/W) Reserved.
12808                                                                  Internal:
12809                                                                  Select a whole byte of DRAM data to read when whole-byte mode enabled. */
12810         uint64_t mpr_bit_select        : 2;  /**< [ 46: 45](R/W) Select which of four bits to read for each nibble of DRAM data. Typically all four bits
12811                                                                  from a x4 device, or all eight bits from a x8 device, or all 16 bits from a x16 device
12812                                                                  carry the same data, but this field allows selection of which device bit will be used to
12813                                                                  read the MPR data. */
12814         uint64_t mpr_wr                : 1;  /**< [ 44: 44](R/W) MPR sequence will perform a write operation when set. */
12815         uint64_t mpr_loc               : 2;  /**< [ 43: 42](R/W) MPR location select for MPR sequence. Only makes a difference for DDR4. */
12816         uint64_t mr_wr_pda_enable      : 1;  /**< [ 41: 41](R/W) PDA write enable. When set, MRW operations use PDA, enabled by [MR_WR_PDA_MASK] per device.
12817                                                                  Only available for DDR4 devices. */
12818         uint64_t mr_wr_pda_mask        : 18; /**< [ 40: 23](R/W) PDA mask. If [MR_WR_PDA_ENABLE] = 1 and there is a one in the bit for this mask value, then
12819                                                                  the corresponding DRAM device is enabled for the PDA MR write operation.
12820                                                                  Bit\<23\> corresponds to the lowest order, x4 device, and bit\<40\> corresponds to the highest
12821                                                                  order x4 device, for a total of up to 18 devices. */
12822         uint64_t mr_wr_rank            : 2;  /**< [ 22: 21](R/W) Selects the DRAM rank for either MRW or MPR sequences. */
12823         uint64_t mr_wr_sel             : 3;  /**< [ 20: 18](R/W) Selects which MR to write with the MR write sequence.
12824                                                                  Which pins to drive and how to drive them is automatically controlled through the DDR3/4
12825                                                                  mode setting. Bits\<19:18\> are also used to select the MPR page for an MPR sequence.
12826                                                                  A value of 0x7 selects an RCW write for both DDR4 and DDR3 MRW operations. */
12827         uint64_t mr_wr_addr            : 18; /**< [ 17:  0](R/W) Sets a value for A\<17:0\> for MR write operations. Note that many of these bits
12828                                                                  must be zero for various MRs. Bits\<7:0\> are also used for write data on an MPR
12829                                                                  sequence write operation. */
12830 #else /* Word 0 - Little Endian */
12831         uint64_t mr_wr_addr            : 18; /**< [ 17:  0](R/W) Sets a value for A\<17:0\> for MR write operations. Note that many of these bits
12832                                                                  must be zero for various MRs. Bits\<7:0\> are also used for write data on an MPR
12833                                                                  sequence write operation. */
12834         uint64_t mr_wr_sel             : 3;  /**< [ 20: 18](R/W) Selects which MR to write with the MR write sequence.
12835                                                                  Which pins to drive and how to drive them is automatically controlled through the DDR3/4
12836                                                                  mode setting. Bits\<19:18\> are also used to select the MPR page for an MPR sequence.
12837                                                                  A value of 0x7 selects an RCW write for both DDR4 and DDR3 MRW operations. */
12838         uint64_t mr_wr_rank            : 2;  /**< [ 22: 21](R/W) Selects the DRAM rank for either MRW or MPR sequences. */
12839         uint64_t mr_wr_pda_mask        : 18; /**< [ 40: 23](R/W) PDA mask. If [MR_WR_PDA_ENABLE] = 1 and there is a one in the bit for this mask value, then
12840                                                                  the corresponding DRAM device is enabled for the PDA MR write operation.
12841                                                                  Bit\<23\> corresponds to the lowest order, x4 device, and bit\<40\> corresponds to the highest
12842                                                                  order x4 device, for a total of up to 18 devices. */
12843         uint64_t mr_wr_pda_enable      : 1;  /**< [ 41: 41](R/W) PDA write enable. When set, MRW operations use PDA, enabled by [MR_WR_PDA_MASK] per device.
12844                                                                  Only available for DDR4 devices. */
12845         uint64_t mpr_loc               : 2;  /**< [ 43: 42](R/W) MPR location select for MPR sequence. Only makes a difference for DDR4. */
12846         uint64_t mpr_wr                : 1;  /**< [ 44: 44](R/W) MPR sequence will perform a write operation when set. */
12847         uint64_t mpr_bit_select        : 2;  /**< [ 46: 45](R/W) Select which of four bits to read for each nibble of DRAM data. Typically all four bits
12848                                                                  from a x4 device, or all eight bits from a x8 device, or all 16 bits from a x16 device
12849                                                                  carry the same data, but this field allows selection of which device bit will be used to
12850                                                                  read the MPR data. */
12851         uint64_t mpr_byte_select       : 4;  /**< [ 50: 47](R/W) Reserved.
12852                                                                  Internal:
12853                                                                  Select a whole byte of DRAM data to read when whole-byte mode enabled. */
12854         uint64_t mpr_whole_byte_enable : 1;  /**< [ 51: 51](R/W) Reserved.
12855                                                                  Internal:
12856                                                                  Select a whole byte of DRAM data to read when whole-byte mode enabled. */
12857         uint64_t mr_wr_use_default_value : 1;/**< [ 52: 52](R/W) When set, write the value to the MR that is computed from the value set in various CSR
12858                                                                  fields that would be used during initialization, rather that using the value in the
12859                                                                  LMC()_MR_MPR_CTL[MR_WR_ADDR]. Useful to rewrite the same value or to change single
12860                                                                  bits without having to compute a whole new value for the MR. */
12861         uint64_t mr_wr_pba_enable      : 1;  /**< [ 53: 53](R/W) Reserved.
12862                                                                  Internal:
12863                                                                  Per buffer addressability write enable. When set, MRW operations use PBA,
12864                                                                  enabled by [MR_WR_PDA_MASK] per buffer. Only available for DDR4 LRDIMM. */
12865         uint64_t pda_early_dqx         : 1;  /**< [ 54: 54](R/W) When set, it enables lmc_dqx early for PDA/PBA operation. */
12866         uint64_t mpr_sample_dq_enable  : 1;  /**< [ 55: 55](R/W) Reserved.
12867                                                                  Internal:
12868                                                                  No longer used due to logic change from
12869                                                                  initial design. */
12870         uint64_t mr_wr_bg1             : 1;  /**< [ 56: 56](R/W) BG1 part of the address select for MRS in DDR4 mode. */
12871         uint64_t pba_func_space        : 3;  /**< [ 59: 57](R/W) Set the function space selector during PBA mode of the MRW
12872                                                                  sequence. */
12873         uint64_t mr_wr_secure_key_ena  : 1;  /**< [ 60: 60](R/W) When set, this enables the issuing of security key with the
12874                                                                  unique address field A[17:0] set by LMC()_MR_MPR_CTL[MR_WR_ADDR]
12875                                                                  during the MRW sequence.
12876                                                                  Set this to one when executing DRAM post package repair manually
12877                                                                  by using MRW operation. */
12878         uint64_t reserved_61_63        : 3;
12879 #endif /* Word 0 - End */
12880     } s;
12881     /* struct bdk_lmcx_mr_mpr_ctl_s cn8; */
12882     struct bdk_lmcx_mr_mpr_ctl_cn9
12883     {
12884 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
12885         uint64_t reserved_61_63        : 3;
12886         uint64_t mr_wr_secure_key_ena  : 1;  /**< [ 60: 60](R/W) When set, this enables the issuing of security key with the
12887                                                                  unique address field A[17:0] set by LMC()_MR_MPR_CTL[MR_WR_ADDR]
12888                                                                  during the MRW sequence.
12889                                                                  Set this to one when executing DRAM post package repair manually
12890                                                                  by using MRW operation. */
12891         uint64_t pba_func_space        : 3;  /**< [ 59: 57](R/W) Set the function space selector during PBA mode of the MRW
12892                                                                  sequence. */
12893         uint64_t mr_wr_bg1             : 1;  /**< [ 56: 56](R/W) BG1 part of the address select for MRS in DDR4 mode. */
12894         uint64_t mpr_sample_dq_enable  : 1;  /**< [ 55: 55](R/W) Reserved.
12895                                                                  Internal:
12896                                                                  No longer used due to logic change from
12897                                                                  initial design. */
12898         uint64_t pda_early_dqx         : 1;  /**< [ 54: 54](R/W) When set, it enables lmc_dqx early for PDA/PBA operation. */
12899         uint64_t mr_wr_pba_enable      : 1;  /**< [ 53: 53](R/W) Reserved.
12900                                                                  Internal:
12901                                                                  Per buffer addressability write enable. When set, MRW operations use PBA,
12902                                                                  enabled by [MR_WR_PDA_MASK] per buffer. Only available for DDR4 LRDIMM. */
12903         uint64_t mr_wr_use_default_value : 1;/**< [ 52: 52](R/W) When set, write the value to the MR that is computed from the value set in various CSR
12904                                                                  fields that would be used during initialization, rather that using the value in the
12905                                                                  LMC()_MR_MPR_CTL[MR_WR_ADDR]. Useful to rewrite the same value or to change single
12906                                                                  bits without having to compute a whole new value for the MR. */
12907         uint64_t mpr_whole_byte_enable : 1;  /**< [ 51: 51](R/W) Reserved.
12908                                                                  Internal:
12909                                                                  Select a whole byte of DRAM data to read when whole-byte mode enabled. */
12910         uint64_t mpr_byte_select       : 4;  /**< [ 50: 47](R/W) Reserved.
12911                                                                  Internal:
12912                                                                  Select a whole byte of DRAM data to read when whole-byte mode enabled. */
12913         uint64_t mpr_bit_select        : 2;  /**< [ 46: 45](R/W) Select which of four bits to read for each nibble of DRAM data. Typically all four bits
12914                                                                  from a x4 device, or all eight bits from a x8 device, or all 16 bits from a x16 device
12915                                                                  carry the same data, but this field allows selection of which device bit will be used to
12916                                                                  read the MPR data. */
12917         uint64_t mpr_wr                : 1;  /**< [ 44: 44](R/W) MPR sequence will perform a write operation when set. */
12918         uint64_t mpr_loc               : 2;  /**< [ 43: 42](R/W) MPR location select for MPR sequence. Only makes a difference for DDR4. */
12919         uint64_t mr_wr_pda_enable      : 1;  /**< [ 41: 41](R/W) PDA write enable. When set, MRW operations use PDA, enabled by [MR_WR_PDA_MASK] per device.
12920                                                                  Only available for DDR4 devices. */
12921         uint64_t mr_wr_pda_mask        : 18; /**< [ 40: 23](R/W) PDA mask. If [MR_WR_PDA_ENABLE] = 1 and there is a one in the bit for this mask value, then
12922                                                                  the corresponding DRAM device is enabled for the PDA MR write operation.
12923                                                                  Bit\<23\> corresponds to the lowest order, x4 device, and bit\<40\> corresponds to the highest
12924                                                                  order x4 device, for a total of up to 18 devices. */
12925         uint64_t mr_wr_rank            : 2;  /**< [ 22: 21](R/W) Selects the DRAM rank for either MRW or MPR sequences. This field also selects the DRAM
12926                                                                  rank when running LMC_SEQ_SEL_E::VREF_INT sequence. */
12927         uint64_t mr_wr_sel             : 3;  /**< [ 20: 18](R/W) Selects which Mode Register to write with the MR Write sequence.
12928                                                                  Which pins to drive and how to drive them is automatically controlled through the DDR4
12929                                                                  mode setting.
12930                                                                  Bits\<19:18\> are also used to select the MPR page for an MPR sequence.
12931                                                                  A value of 0x7 selects a DDR4RCD control word (RCW) write. */
12932         uint64_t mr_wr_addr            : 18; /**< [ 17:  0](R/W) Sets a value for A\<17:0\> for MR write operations. Note that many of these bits
12933                                                                  must be zero for various MRs. Bits\<7:0\> are also used for write data on an MPR
12934                                                                  sequence write operation. */
12935 #else /* Word 0 - Little Endian */
12936         uint64_t mr_wr_addr            : 18; /**< [ 17:  0](R/W) Sets a value for A\<17:0\> for MR write operations. Note that many of these bits
12937                                                                  must be zero for various MRs. Bits\<7:0\> are also used for write data on an MPR
12938                                                                  sequence write operation. */
12939         uint64_t mr_wr_sel             : 3;  /**< [ 20: 18](R/W) Selects which Mode Register to write with the MR Write sequence.
12940                                                                  Which pins to drive and how to drive them is automatically controlled through the DDR4
12941                                                                  mode setting.
12942                                                                  Bits\<19:18\> are also used to select the MPR page for an MPR sequence.
12943                                                                  A value of 0x7 selects a DDR4RCD control word (RCW) write. */
12944         uint64_t mr_wr_rank            : 2;  /**< [ 22: 21](R/W) Selects the DRAM rank for either MRW or MPR sequences. This field also selects the DRAM
12945                                                                  rank when running LMC_SEQ_SEL_E::VREF_INT sequence. */
12946         uint64_t mr_wr_pda_mask        : 18; /**< [ 40: 23](R/W) PDA mask. If [MR_WR_PDA_ENABLE] = 1 and there is a one in the bit for this mask value, then
12947                                                                  the corresponding DRAM device is enabled for the PDA MR write operation.
12948                                                                  Bit\<23\> corresponds to the lowest order, x4 device, and bit\<40\> corresponds to the highest
12949                                                                  order x4 device, for a total of up to 18 devices. */
12950         uint64_t mr_wr_pda_enable      : 1;  /**< [ 41: 41](R/W) PDA write enable. When set, MRW operations use PDA, enabled by [MR_WR_PDA_MASK] per device.
12951                                                                  Only available for DDR4 devices. */
12952         uint64_t mpr_loc               : 2;  /**< [ 43: 42](R/W) MPR location select for MPR sequence. Only makes a difference for DDR4. */
12953         uint64_t mpr_wr                : 1;  /**< [ 44: 44](R/W) MPR sequence will perform a write operation when set. */
12954         uint64_t mpr_bit_select        : 2;  /**< [ 46: 45](R/W) Select which of four bits to read for each nibble of DRAM data. Typically all four bits
12955                                                                  from a x4 device, or all eight bits from a x8 device, or all 16 bits from a x16 device
12956                                                                  carry the same data, but this field allows selection of which device bit will be used to
12957                                                                  read the MPR data. */
12958         uint64_t mpr_byte_select       : 4;  /**< [ 50: 47](R/W) Reserved.
12959                                                                  Internal:
12960                                                                  Select a whole byte of DRAM data to read when whole-byte mode enabled. */
12961         uint64_t mpr_whole_byte_enable : 1;  /**< [ 51: 51](R/W) Reserved.
12962                                                                  Internal:
12963                                                                  Select a whole byte of DRAM data to read when whole-byte mode enabled. */
12964         uint64_t mr_wr_use_default_value : 1;/**< [ 52: 52](R/W) When set, write the value to the MR that is computed from the value set in various CSR
12965                                                                  fields that would be used during initialization, rather that using the value in the
12966                                                                  LMC()_MR_MPR_CTL[MR_WR_ADDR]. Useful to rewrite the same value or to change single
12967                                                                  bits without having to compute a whole new value for the MR. */
12968         uint64_t mr_wr_pba_enable      : 1;  /**< [ 53: 53](R/W) Reserved.
12969                                                                  Internal:
12970                                                                  Per buffer addressability write enable. When set, MRW operations use PBA,
12971                                                                  enabled by [MR_WR_PDA_MASK] per buffer. Only available for DDR4 LRDIMM. */
12972         uint64_t pda_early_dqx         : 1;  /**< [ 54: 54](R/W) When set, it enables lmc_dqx early for PDA/PBA operation. */
12973         uint64_t mpr_sample_dq_enable  : 1;  /**< [ 55: 55](R/W) Reserved.
12974                                                                  Internal:
12975                                                                  No longer used due to logic change from
12976                                                                  initial design. */
12977         uint64_t mr_wr_bg1             : 1;  /**< [ 56: 56](R/W) BG1 part of the address select for MRS in DDR4 mode. */
12978         uint64_t pba_func_space        : 3;  /**< [ 59: 57](R/W) Set the function space selector during PBA mode of the MRW
12979                                                                  sequence. */
12980         uint64_t mr_wr_secure_key_ena  : 1;  /**< [ 60: 60](R/W) When set, this enables the issuing of security key with the
12981                                                                  unique address field A[17:0] set by LMC()_MR_MPR_CTL[MR_WR_ADDR]
12982                                                                  during the MRW sequence.
12983                                                                  Set this to one when executing DRAM post package repair manually
12984                                                                  by using MRW operation. */
12985         uint64_t reserved_61_63        : 3;
12986 #endif /* Word 0 - End */
12987     } cn9;
12988 };
12989 typedef union bdk_lmcx_mr_mpr_ctl bdk_lmcx_mr_mpr_ctl_t;
12990 
12991 static inline uint64_t BDK_LMCX_MR_MPR_CTL(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_MR_MPR_CTL(unsigned long a)12992 static inline uint64_t BDK_LMCX_MR_MPR_CTL(unsigned long a)
12993 {
12994     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
12995         return 0x87e088000068ll + 0x1000000ll * ((a) & 0x0);
12996     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
12997         return 0x87e088000068ll + 0x1000000ll * ((a) & 0x1);
12998     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
12999         return 0x87e088000068ll + 0x1000000ll * ((a) & 0x3);
13000     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
13001         return 0x87e088000068ll + 0x1000000ll * ((a) & 0x3);
13002     __bdk_csr_fatal("LMCX_MR_MPR_CTL", 1, a, 0, 0, 0);
13003 }
13004 
13005 #define typedef_BDK_LMCX_MR_MPR_CTL(a) bdk_lmcx_mr_mpr_ctl_t
13006 #define bustype_BDK_LMCX_MR_MPR_CTL(a) BDK_CSR_TYPE_RSL
13007 #define basename_BDK_LMCX_MR_MPR_CTL(a) "LMCX_MR_MPR_CTL"
13008 #define device_bar_BDK_LMCX_MR_MPR_CTL(a) 0x0 /* PF_BAR0 */
13009 #define busnum_BDK_LMCX_MR_MPR_CTL(a) (a)
13010 #define arguments_BDK_LMCX_MR_MPR_CTL(a) (a),-1,-1,-1
13011 
13012 /**
13013  * Register (RSL) lmc#_msix_pba#
13014  *
13015  * LMC MSI-X Pending Bit Array Registers
13016  * This register is the LMC-X PBA table; the bit number is indexed by the LMC_INT_VEC_E enumeration.
13017  */
13018 union bdk_lmcx_msix_pbax
13019 {
13020     uint64_t u;
13021     struct bdk_lmcx_msix_pbax_s
13022     {
13023 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
13024         uint64_t pend                  : 64; /**< [ 63:  0](RO/H) Pending message for the associated LMC()_MSIX_VEC()_CTL, enumerated by LMC_INT_VEC_E. Bits
13025                                                                  that have no associated LMC_INT_VEC_E are zero. */
13026 #else /* Word 0 - Little Endian */
13027         uint64_t pend                  : 64; /**< [ 63:  0](RO/H) Pending message for the associated LMC()_MSIX_VEC()_CTL, enumerated by LMC_INT_VEC_E. Bits
13028                                                                  that have no associated LMC_INT_VEC_E are zero. */
13029 #endif /* Word 0 - End */
13030     } s;
13031     /* struct bdk_lmcx_msix_pbax_s cn; */
13032 };
13033 typedef union bdk_lmcx_msix_pbax bdk_lmcx_msix_pbax_t;
13034 
13035 static inline uint64_t BDK_LMCX_MSIX_PBAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
BDK_LMCX_MSIX_PBAX(unsigned long a,unsigned long b)13036 static inline uint64_t BDK_LMCX_MSIX_PBAX(unsigned long a, unsigned long b)
13037 {
13038     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0)))
13039         return 0x87e088ff0000ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0x0);
13040     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
13041         return 0x87e088ff0000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x0);
13042     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b==0)))
13043         return 0x87e088ff0000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x0);
13044     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b==0)))
13045         return 0x87e088ff0000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x0);
13046     __bdk_csr_fatal("LMCX_MSIX_PBAX", 2, a, b, 0, 0);
13047 }
13048 
13049 #define typedef_BDK_LMCX_MSIX_PBAX(a,b) bdk_lmcx_msix_pbax_t
13050 #define bustype_BDK_LMCX_MSIX_PBAX(a,b) BDK_CSR_TYPE_RSL
13051 #define basename_BDK_LMCX_MSIX_PBAX(a,b) "LMCX_MSIX_PBAX"
13052 #define device_bar_BDK_LMCX_MSIX_PBAX(a,b) 0x4 /* PF_BAR4 */
13053 #define busnum_BDK_LMCX_MSIX_PBAX(a,b) (a)
13054 #define arguments_BDK_LMCX_MSIX_PBAX(a,b) (a),(b),-1,-1
13055 
13056 /**
13057  * Register (RSL) lmc#_msix_vec#_addr
13058  *
13059  * LMC MSI-X Vector-Table Address Register
13060  * This register is the MSI-X vector table, indexed by the LMC_INT_VEC_E enumeration.
13061  */
13062 union bdk_lmcx_msix_vecx_addr
13063 {
13064     uint64_t u;
13065     struct bdk_lmcx_msix_vecx_addr_s
13066     {
13067 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
13068         uint64_t reserved_53_63        : 11;
13069         uint64_t addr                  : 51; /**< [ 52:  2](R/W) IOVA to use for MSI-X delivery of this vector. */
13070         uint64_t reserved_1            : 1;
13071         uint64_t secvec                : 1;  /**< [  0:  0](SR/W) Secure vector.
13072                                                                  0 = This vector may be read or written by either secure or nonsecure states.
13073                                                                  1 = This vector's LMC()_MSIX_VEC()_ADDR, LMC()_MSIX_VEC()_CTL, and corresponding
13074                                                                  bit of LMC()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
13075                                                                  by the nonsecure world. */
13076 #else /* Word 0 - Little Endian */
13077         uint64_t secvec                : 1;  /**< [  0:  0](SR/W) Secure vector.
13078                                                                  0 = This vector may be read or written by either secure or nonsecure states.
13079                                                                  1 = This vector's LMC()_MSIX_VEC()_ADDR, LMC()_MSIX_VEC()_CTL, and corresponding
13080                                                                  bit of LMC()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
13081                                                                  by the nonsecure world. */
13082         uint64_t reserved_1            : 1;
13083         uint64_t addr                  : 51; /**< [ 52:  2](R/W) IOVA to use for MSI-X delivery of this vector. */
13084         uint64_t reserved_53_63        : 11;
13085 #endif /* Word 0 - End */
13086     } s;
13087     struct bdk_lmcx_msix_vecx_addr_cn8
13088     {
13089 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
13090         uint64_t reserved_49_63        : 15;
13091         uint64_t addr                  : 47; /**< [ 48:  2](R/W) IOVA to use for MSI-X delivery of this vector. */
13092         uint64_t reserved_1            : 1;
13093         uint64_t secvec                : 1;  /**< [  0:  0](SR/W) Secure vector.
13094                                                                  0 = This vector may be read or written by either secure or nonsecure states.
13095                                                                  1 = This vector's LMC()_MSIX_VEC()_ADDR, LMC()_MSIX_VEC()_CTL, and corresponding
13096                                                                  bit of LMC()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
13097                                                                  by the nonsecure world. */
13098 #else /* Word 0 - Little Endian */
13099         uint64_t secvec                : 1;  /**< [  0:  0](SR/W) Secure vector.
13100                                                                  0 = This vector may be read or written by either secure or nonsecure states.
13101                                                                  1 = This vector's LMC()_MSIX_VEC()_ADDR, LMC()_MSIX_VEC()_CTL, and corresponding
13102                                                                  bit of LMC()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
13103                                                                  by the nonsecure world. */
13104         uint64_t reserved_1            : 1;
13105         uint64_t addr                  : 47; /**< [ 48:  2](R/W) IOVA to use for MSI-X delivery of this vector. */
13106         uint64_t reserved_49_63        : 15;
13107 #endif /* Word 0 - End */
13108     } cn8;
13109     /* struct bdk_lmcx_msix_vecx_addr_s cn9; */
13110 };
13111 typedef union bdk_lmcx_msix_vecx_addr bdk_lmcx_msix_vecx_addr_t;
13112 
13113 static inline uint64_t BDK_LMCX_MSIX_VECX_ADDR(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
BDK_LMCX_MSIX_VECX_ADDR(unsigned long a,unsigned long b)13114 static inline uint64_t BDK_LMCX_MSIX_VECX_ADDR(unsigned long a, unsigned long b)
13115 {
13116     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0)))
13117         return 0x87e088f00000ll + 0x1000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x0);
13118     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
13119         return 0x87e088f00000ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
13120     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b==0)))
13121         return 0x87e088f00000ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x0);
13122     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b==0)))
13123         return 0x87e088f00000ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x0);
13124     __bdk_csr_fatal("LMCX_MSIX_VECX_ADDR", 2, a, b, 0, 0);
13125 }
13126 
13127 #define typedef_BDK_LMCX_MSIX_VECX_ADDR(a,b) bdk_lmcx_msix_vecx_addr_t
13128 #define bustype_BDK_LMCX_MSIX_VECX_ADDR(a,b) BDK_CSR_TYPE_RSL
13129 #define basename_BDK_LMCX_MSIX_VECX_ADDR(a,b) "LMCX_MSIX_VECX_ADDR"
13130 #define device_bar_BDK_LMCX_MSIX_VECX_ADDR(a,b) 0x4 /* PF_BAR4 */
13131 #define busnum_BDK_LMCX_MSIX_VECX_ADDR(a,b) (a)
13132 #define arguments_BDK_LMCX_MSIX_VECX_ADDR(a,b) (a),(b),-1,-1
13133 
13134 /**
13135  * Register (RSL) lmc#_msix_vec#_ctl
13136  *
13137  * LMC MSI-X Vector-Table Control and Data Register
13138  * This register is the MSI-X vector table, indexed by the LMC_INT_VEC_E enumeration.
13139  */
13140 union bdk_lmcx_msix_vecx_ctl
13141 {
13142     uint64_t u;
13143     struct bdk_lmcx_msix_vecx_ctl_s
13144     {
13145 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
13146         uint64_t reserved_33_63        : 31;
13147         uint64_t mask                  : 1;  /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
13148         uint64_t data                  : 32; /**< [ 31:  0](R/W) Data to use for MSI-X delivery of this vector. */
13149 #else /* Word 0 - Little Endian */
13150         uint64_t data                  : 32; /**< [ 31:  0](R/W) Data to use for MSI-X delivery of this vector. */
13151         uint64_t mask                  : 1;  /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
13152         uint64_t reserved_33_63        : 31;
13153 #endif /* Word 0 - End */
13154     } s;
13155     struct bdk_lmcx_msix_vecx_ctl_cn8
13156     {
13157 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
13158         uint64_t reserved_33_63        : 31;
13159         uint64_t mask                  : 1;  /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
13160         uint64_t reserved_20_31        : 12;
13161         uint64_t data                  : 20; /**< [ 19:  0](R/W) Data to use for MSI-X delivery of this vector. */
13162 #else /* Word 0 - Little Endian */
13163         uint64_t data                  : 20; /**< [ 19:  0](R/W) Data to use for MSI-X delivery of this vector. */
13164         uint64_t reserved_20_31        : 12;
13165         uint64_t mask                  : 1;  /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
13166         uint64_t reserved_33_63        : 31;
13167 #endif /* Word 0 - End */
13168     } cn8;
13169     /* struct bdk_lmcx_msix_vecx_ctl_s cn9; */
13170 };
13171 typedef union bdk_lmcx_msix_vecx_ctl bdk_lmcx_msix_vecx_ctl_t;
13172 
13173 static inline uint64_t BDK_LMCX_MSIX_VECX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
BDK_LMCX_MSIX_VECX_CTL(unsigned long a,unsigned long b)13174 static inline uint64_t BDK_LMCX_MSIX_VECX_CTL(unsigned long a, unsigned long b)
13175 {
13176     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0)))
13177         return 0x87e088f00008ll + 0x1000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x0);
13178     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
13179         return 0x87e088f00008ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
13180     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b==0)))
13181         return 0x87e088f00008ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x0);
13182     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b==0)))
13183         return 0x87e088f00008ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x0);
13184     __bdk_csr_fatal("LMCX_MSIX_VECX_CTL", 2, a, b, 0, 0);
13185 }
13186 
13187 #define typedef_BDK_LMCX_MSIX_VECX_CTL(a,b) bdk_lmcx_msix_vecx_ctl_t
13188 #define bustype_BDK_LMCX_MSIX_VECX_CTL(a,b) BDK_CSR_TYPE_RSL
13189 #define basename_BDK_LMCX_MSIX_VECX_CTL(a,b) "LMCX_MSIX_VECX_CTL"
13190 #define device_bar_BDK_LMCX_MSIX_VECX_CTL(a,b) 0x4 /* PF_BAR4 */
13191 #define busnum_BDK_LMCX_MSIX_VECX_CTL(a,b) (a)
13192 #define arguments_BDK_LMCX_MSIX_VECX_CTL(a,b) (a),(b),-1,-1
13193 
13194 /**
13195  * Register (RSL) lmc#_ns_ctl
13196  *
13197  * LMC Non Secure Control Register
13198  * This register contains control parameters for handling nonsecure accesses.
13199  */
13200 union bdk_lmcx_ns_ctl
13201 {
13202     uint64_t u;
13203     struct bdk_lmcx_ns_ctl_s
13204     {
13205 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
13206         uint64_t reserved_26_63        : 38;
13207         uint64_t ns_scramble_dis       : 1;  /**< [ 25: 25](R/W) When set, this field disables data scrambling on nonsecure accesses only.
13208                                                                  When data scrambling is enabled by setting CONTROL[SCRAMBLE_ENA] to one, this
13209                                                                  field needs to be cleared to zero in order to enable data scrambling on
13210                                                                  nonsecure mode. */
13211         uint64_t ns_dynamic_dis        : 1;  /**< [ 24: 24](RO) Reserved. */
13212         uint64_t reserved_22_23        : 2;
13213         uint64_t adr_offset            : 22; /**< [ 21:  0](R/W) Sets the offset to the nonsecure region of the DRAM/L2 address space.
13214 
13215                                                                  In 4 LMC mode, this specifies the address offset \<39:22\> for nonsecure transaction.
13216 
13217                                                                  In 2 LMC mode, this specifies the address offset \<38:21\> for nonsecure transaction. */
13218 #else /* Word 0 - Little Endian */
13219         uint64_t adr_offset            : 22; /**< [ 21:  0](R/W) Sets the offset to the nonsecure region of the DRAM/L2 address space.
13220 
13221                                                                  In 4 LMC mode, this specifies the address offset \<39:22\> for nonsecure transaction.
13222 
13223                                                                  In 2 LMC mode, this specifies the address offset \<38:21\> for nonsecure transaction. */
13224         uint64_t reserved_22_23        : 2;
13225         uint64_t ns_dynamic_dis        : 1;  /**< [ 24: 24](RO) Reserved. */
13226         uint64_t ns_scramble_dis       : 1;  /**< [ 25: 25](R/W) When set, this field disables data scrambling on nonsecure accesses only.
13227                                                                  When data scrambling is enabled by setting CONTROL[SCRAMBLE_ENA] to one, this
13228                                                                  field needs to be cleared to zero in order to enable data scrambling on
13229                                                                  nonsecure mode. */
13230         uint64_t reserved_26_63        : 38;
13231 #endif /* Word 0 - End */
13232     } s;
13233     struct bdk_lmcx_ns_ctl_cn88xxp1
13234     {
13235 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
13236         uint64_t reserved_26_63        : 38;
13237         uint64_t ns_scramble_dis       : 1;  /**< [ 25: 25](R/W) When set, this field disables data scrambling on nonsecure accesses only.
13238                                                                  When data scrambling is enabled by setting CONTROL[SCRAMBLE_ENA] to one, this
13239                                                                  field needs to be cleared to zero in order to enable data scrambling on
13240                                                                  nonsecure mode. */
13241         uint64_t ns_dynamic_dis        : 1;  /**< [ 24: 24](RO) Reserved. */
13242         uint64_t reserved_18_23        : 6;
13243         uint64_t adr_offset            : 18; /**< [ 17:  0](R/W) Sets the offset to the nonsecure region of the DRAM/L2 address space.
13244 
13245                                                                  In 4 LMC mode, this specifies the address offset \<39:22\> for nonsecure transaction.
13246 
13247                                                                  In 2 LMC mode, this specifies the address offset \<38:21\> for nonsecure transaction. */
13248 #else /* Word 0 - Little Endian */
13249         uint64_t adr_offset            : 18; /**< [ 17:  0](R/W) Sets the offset to the nonsecure region of the DRAM/L2 address space.
13250 
13251                                                                  In 4 LMC mode, this specifies the address offset \<39:22\> for nonsecure transaction.
13252 
13253                                                                  In 2 LMC mode, this specifies the address offset \<38:21\> for nonsecure transaction. */
13254         uint64_t reserved_18_23        : 6;
13255         uint64_t ns_dynamic_dis        : 1;  /**< [ 24: 24](RO) Reserved. */
13256         uint64_t ns_scramble_dis       : 1;  /**< [ 25: 25](R/W) When set, this field disables data scrambling on nonsecure accesses only.
13257                                                                  When data scrambling is enabled by setting CONTROL[SCRAMBLE_ENA] to one, this
13258                                                                  field needs to be cleared to zero in order to enable data scrambling on
13259                                                                  nonsecure mode. */
13260         uint64_t reserved_26_63        : 38;
13261 #endif /* Word 0 - End */
13262     } cn88xxp1;
13263     struct bdk_lmcx_ns_ctl_cn9
13264     {
13265 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
13266         uint64_t reserved_26_63        : 38;
13267         uint64_t ns_scramble_dis       : 1;  /**< [ 25: 25](R/W) When set, this field disables data scrambling on nonsecure accesses only.
13268                                                                  When data scrambling is enabled by setting CONTROL[SCRAMBLE_ENA] to one, this
13269                                                                  field needs to be cleared to zero in order to enable data scrambling on
13270                                                                  nonsecure mode. */
13271         uint64_t ns_dynamic_dis        : 1;  /**< [ 24: 24](R/W) Disable optimization that dynamically reduces read latency when there are no
13272                                                                  longer any secure operations in flight.  For diagnostic use only. */
13273         uint64_t reserved_22_23        : 2;
13274         uint64_t adr_offset            : 22; /**< [ 21:  0](R/W) Sets the offset to the nonsecure region of the DRAM/L2 address space.
13275 
13276                                                                  In 4 LMC mode, this specifies the address offset \<39:22\> for nonsecure transaction.
13277 
13278                                                                  In 2 LMC mode, this specifies the address offset \<38:21\> for nonsecure transaction. */
13279 #else /* Word 0 - Little Endian */
13280         uint64_t adr_offset            : 22; /**< [ 21:  0](R/W) Sets the offset to the nonsecure region of the DRAM/L2 address space.
13281 
13282                                                                  In 4 LMC mode, this specifies the address offset \<39:22\> for nonsecure transaction.
13283 
13284                                                                  In 2 LMC mode, this specifies the address offset \<38:21\> for nonsecure transaction. */
13285         uint64_t reserved_22_23        : 2;
13286         uint64_t ns_dynamic_dis        : 1;  /**< [ 24: 24](R/W) Disable optimization that dynamically reduces read latency when there are no
13287                                                                  longer any secure operations in flight.  For diagnostic use only. */
13288         uint64_t ns_scramble_dis       : 1;  /**< [ 25: 25](R/W) When set, this field disables data scrambling on nonsecure accesses only.
13289                                                                  When data scrambling is enabled by setting CONTROL[SCRAMBLE_ENA] to one, this
13290                                                                  field needs to be cleared to zero in order to enable data scrambling on
13291                                                                  nonsecure mode. */
13292         uint64_t reserved_26_63        : 38;
13293 #endif /* Word 0 - End */
13294     } cn9;
13295     struct bdk_lmcx_ns_ctl_cn81xx
13296     {
13297 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
13298         uint64_t reserved_26_63        : 38;
13299         uint64_t ns_scramble_dis       : 1;  /**< [ 25: 25](R/W) When set, this field disables data scrambling on nonsecure accesses only.
13300                                                                  When data scrambling is enabled by setting CONTROL[SCRAMBLE_ENA] to one, this
13301                                                                  field needs to be cleared to zero in order to enable data scrambling on
13302                                                                  nonsecure mode. */
13303         uint64_t ns_dynamic_dis        : 1;  /**< [ 24: 24](R/W) Disable optimization that dynamically reduces read latency when there are no
13304                                                                  longer any secure operations in flight.  For diagnostic use only. */
13305         uint64_t reserved_18_23        : 6;
13306         uint64_t adr_offset            : 18; /**< [ 17:  0](R/W) Sets the offset to the nonsecure region of the DRAM/L2 address space.
13307 
13308                                                                  In 4 LMC mode, this specifies the address offset \<39:22\> for nonsecure transaction.
13309 
13310                                                                  In 2 LMC mode, this specifies the address offset \<38:21\> for nonsecure transaction. */
13311 #else /* Word 0 - Little Endian */
13312         uint64_t adr_offset            : 18; /**< [ 17:  0](R/W) Sets the offset to the nonsecure region of the DRAM/L2 address space.
13313 
13314                                                                  In 4 LMC mode, this specifies the address offset \<39:22\> for nonsecure transaction.
13315 
13316                                                                  In 2 LMC mode, this specifies the address offset \<38:21\> for nonsecure transaction. */
13317         uint64_t reserved_18_23        : 6;
13318         uint64_t ns_dynamic_dis        : 1;  /**< [ 24: 24](R/W) Disable optimization that dynamically reduces read latency when there are no
13319                                                                  longer any secure operations in flight.  For diagnostic use only. */
13320         uint64_t ns_scramble_dis       : 1;  /**< [ 25: 25](R/W) When set, this field disables data scrambling on nonsecure accesses only.
13321                                                                  When data scrambling is enabled by setting CONTROL[SCRAMBLE_ENA] to one, this
13322                                                                  field needs to be cleared to zero in order to enable data scrambling on
13323                                                                  nonsecure mode. */
13324         uint64_t reserved_26_63        : 38;
13325 #endif /* Word 0 - End */
13326     } cn81xx;
13327     /* struct bdk_lmcx_ns_ctl_cn81xx cn83xx; */
13328     /* struct bdk_lmcx_ns_ctl_cn81xx cn88xxp2; */
13329 };
13330 typedef union bdk_lmcx_ns_ctl bdk_lmcx_ns_ctl_t;
13331 
13332 static inline uint64_t BDK_LMCX_NS_CTL(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_NS_CTL(unsigned long a)13333 static inline uint64_t BDK_LMCX_NS_CTL(unsigned long a)
13334 {
13335     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
13336         return 0x87e088000178ll + 0x1000000ll * ((a) & 0x0);
13337     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
13338         return 0x87e088000178ll + 0x1000000ll * ((a) & 0x1);
13339     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
13340         return 0x87e088000178ll + 0x1000000ll * ((a) & 0x3);
13341     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
13342         return 0x87e088000178ll + 0x1000000ll * ((a) & 0x3);
13343     __bdk_csr_fatal("LMCX_NS_CTL", 1, a, 0, 0, 0);
13344 }
13345 
13346 #define typedef_BDK_LMCX_NS_CTL(a) bdk_lmcx_ns_ctl_t
13347 #define bustype_BDK_LMCX_NS_CTL(a) BDK_CSR_TYPE_RSL
13348 #define basename_BDK_LMCX_NS_CTL(a) "LMCX_NS_CTL"
13349 #define device_bar_BDK_LMCX_NS_CTL(a) 0x0 /* PF_BAR0 */
13350 #define busnum_BDK_LMCX_NS_CTL(a) (a)
13351 #define arguments_BDK_LMCX_NS_CTL(a) (a),-1,-1,-1
13352 
13353 /**
13354  * Register (RSL) lmc#_nxm
13355  *
13356  * LMC Nonexistent Memory Register
13357  * Following is the decoding for mem_msb/rank:
13358  * 0x0: mem_msb = mem_adr[25].
13359  * 0x1: mem_msb = mem_adr[26].
13360  * 0x2: mem_msb = mem_adr[27].
13361  * 0x3: mem_msb = mem_adr[28].
13362  * 0x4: mem_msb = mem_adr[29].
13363  * 0x5: mem_msb = mem_adr[30].
13364  * 0x6: mem_msb = mem_adr[31].
13365  * 0x7: mem_msb = mem_adr[32].
13366  * 0x8: mem_msb = mem_adr[33].
13367  * 0x9: mem_msb = mem_adr[34].
13368  * 0xA: mem_msb = mem_adr[35].
13369  * 0xB: mem_msb = mem_adr[36].
13370  * 0xC-0xF = Reserved.
13371  *
13372  * For example, for a DIMM made of Samsung's K4B1G0846C-ZCF7 1Gb (16M * 8 bit * 8 bank)
13373  * parts, the column address width = 10; so with 10b of col, 3b of bus, 3b of bank, row_lsb = 16.
13374  * Therefore, row = mem_adr[29:16] and mem_msb = 4.
13375  *
13376  * Note also that addresses greater than the max defined space (pbank_msb) are also treated as
13377  * NXM accesses.
13378  */
13379 union bdk_lmcx_nxm
13380 {
13381     uint64_t u;
13382     struct bdk_lmcx_nxm_s
13383     {
13384 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
13385         uint64_t reserved_24_63        : 40;
13386         uint64_t mem_msb_d1_r1         : 4;  /**< [ 23: 20](R/W) Reserved.
13387                                                                  Internal:
13388                                                                  Maximum row MSB for DIMM1, RANK1/DIMM1 in single ranked.
13389                                                                  If DIMM1 is dual-sided, this should be set to
13390                                                                  NXM[MEM_MSB_D1_R0]. If LMC()_CONFIG[RANK_ENA] is cleared, this field is ignored. */
13391         uint64_t mem_msb_d1_r0         : 4;  /**< [ 19: 16](R/W) Reserved.
13392                                                                  Internal:
13393                                                                  Maximum row MSB for DIMM1, RANK0.
13394                                                                  if DIMM1 contains 3DS DRAMs, this would point to
13395                                                                  the logical rank's most significant bit. */
13396         uint64_t mem_msb_d0_r1         : 4;  /**< [ 15: 12](R/W) Maximum row MSB for DIMM0, RANK1/DIMM0 in single ranked.
13397                                                                  If DIMM0 is dual-sided, this should be set to
13398                                                                  [MEM_MSB_D0_R0]. If LMC()_CONFIG[RANK_ENA] is cleared, this field is ignored. */
13399         uint64_t mem_msb_d0_r0         : 4;  /**< [ 11:  8](R/W) Maximum row MSB for DIMM0, RANK0.
13400                                                                  Internal:
13401                                                                  If DIMM0 contains 3DS DRAMs, this would point to
13402                                                                  the logical rank's most significant bit. */
13403         uint64_t reserved_4_7          : 4;
13404         uint64_t cs_mask               : 4;  /**< [  3:  0](R/W) Chip select mask. This mask corresponds to the four chip selects for a memory
13405                                                                  configuration. If LMC()_CONFIG[RANK_ENA]=0 then this mask must be set in pairs because
13406                                                                  each reference address will assert a pair of chip selects. If the chip select(s) have a
13407                                                                  corresponding [CS_MASK] bit set, then the reference is to nonexistent memory (NXM). LMC will
13408                                                                  alias a NXM read reference to use the lowest, legal chip select(s) and return zeros. LMC
13409                                                                  normally discards NXM write operations, but will also alias them when
13410                                                                  LMC()_CONTROL[NXM_WRITE_EN]=1. */
13411 #else /* Word 0 - Little Endian */
13412         uint64_t cs_mask               : 4;  /**< [  3:  0](R/W) Chip select mask. This mask corresponds to the four chip selects for a memory
13413                                                                  configuration. If LMC()_CONFIG[RANK_ENA]=0 then this mask must be set in pairs because
13414                                                                  each reference address will assert a pair of chip selects. If the chip select(s) have a
13415                                                                  corresponding [CS_MASK] bit set, then the reference is to nonexistent memory (NXM). LMC will
13416                                                                  alias a NXM read reference to use the lowest, legal chip select(s) and return zeros. LMC
13417                                                                  normally discards NXM write operations, but will also alias them when
13418                                                                  LMC()_CONTROL[NXM_WRITE_EN]=1. */
13419         uint64_t reserved_4_7          : 4;
13420         uint64_t mem_msb_d0_r0         : 4;  /**< [ 11:  8](R/W) Maximum row MSB for DIMM0, RANK0.
13421                                                                  Internal:
13422                                                                  If DIMM0 contains 3DS DRAMs, this would point to
13423                                                                  the logical rank's most significant bit. */
13424         uint64_t mem_msb_d0_r1         : 4;  /**< [ 15: 12](R/W) Maximum row MSB for DIMM0, RANK1/DIMM0 in single ranked.
13425                                                                  If DIMM0 is dual-sided, this should be set to
13426                                                                  [MEM_MSB_D0_R0]. If LMC()_CONFIG[RANK_ENA] is cleared, this field is ignored. */
13427         uint64_t mem_msb_d1_r0         : 4;  /**< [ 19: 16](R/W) Reserved.
13428                                                                  Internal:
13429                                                                  Maximum row MSB for DIMM1, RANK0.
13430                                                                  if DIMM1 contains 3DS DRAMs, this would point to
13431                                                                  the logical rank's most significant bit. */
13432         uint64_t mem_msb_d1_r1         : 4;  /**< [ 23: 20](R/W) Reserved.
13433                                                                  Internal:
13434                                                                  Maximum row MSB for DIMM1, RANK1/DIMM1 in single ranked.
13435                                                                  If DIMM1 is dual-sided, this should be set to
13436                                                                  NXM[MEM_MSB_D1_R0]. If LMC()_CONFIG[RANK_ENA] is cleared, this field is ignored. */
13437         uint64_t reserved_24_63        : 40;
13438 #endif /* Word 0 - End */
13439     } s;
13440     struct bdk_lmcx_nxm_cn9
13441     {
13442 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
13443         uint64_t reserved_24_63        : 40;
13444         uint64_t mem_msb_d1_r1         : 4;  /**< [ 23: 20](R/W) Maximum row MSB for DIMM1, RANK1/DIMM1 in single ranked.
13445                                                                  If DIMM1 is dual-sided, this should be set to
13446                                                                  NXM[MEM_MSB_D1_R0]. If LMC()_CONFIG[RANK_ENA] is cleared, this field is ignored. */
13447         uint64_t mem_msb_d1_r0         : 4;  /**< [ 19: 16](R/W) Maximum row MSB for DIMM1, RANK0.
13448                                                                  Internal:
13449                                                                  if DIMM1 contains 3DS DRAMs, this would point to
13450                                                                  the logical rank's most significant bit. */
13451         uint64_t mem_msb_d0_r1         : 4;  /**< [ 15: 12](R/W) Maximum row MSB for DIMM0, RANK1/DIMM0 in single ranked.
13452                                                                  If DIMM0 is dual-sided, this should be set to
13453                                                                  [MEM_MSB_D0_R0]. If LMC()_CONFIG[RANK_ENA] is cleared, this field is ignored. */
13454         uint64_t mem_msb_d0_r0         : 4;  /**< [ 11:  8](R/W) Maximum row MSB for DIMM0, RANK0.
13455                                                                  Internal:
13456                                                                  If DIMM0 contains 3DS DRAMs, this would point to
13457                                                                  the logical rank's most significant bit. */
13458         uint64_t reserved_4_7          : 4;
13459         uint64_t cs_mask               : 4;  /**< [  3:  0](R/W) Chip select mask. This mask corresponds to the four chip selects for a memory
13460                                                                  configuration. If LMC()_CONFIG[RANK_ENA]=0 then this mask must be set in pairs because
13461                                                                  each reference address will assert a pair of chip selects. If the chip select(s) have a
13462                                                                  corresponding [CS_MASK] bit set, then the reference is to nonexistent memory (NXM). LMC will
13463                                                                  alias a NXM read reference to use the lowest, legal chip select(s) and return zeros. LMC
13464                                                                  normally discards NXM write operations, but will also alias them when
13465                                                                  LMC()_CONTROL[NXM_WRITE_EN]=1. */
13466 #else /* Word 0 - Little Endian */
13467         uint64_t cs_mask               : 4;  /**< [  3:  0](R/W) Chip select mask. This mask corresponds to the four chip selects for a memory
13468                                                                  configuration. If LMC()_CONFIG[RANK_ENA]=0 then this mask must be set in pairs because
13469                                                                  each reference address will assert a pair of chip selects. If the chip select(s) have a
13470                                                                  corresponding [CS_MASK] bit set, then the reference is to nonexistent memory (NXM). LMC will
13471                                                                  alias a NXM read reference to use the lowest, legal chip select(s) and return zeros. LMC
13472                                                                  normally discards NXM write operations, but will also alias them when
13473                                                                  LMC()_CONTROL[NXM_WRITE_EN]=1. */
13474         uint64_t reserved_4_7          : 4;
13475         uint64_t mem_msb_d0_r0         : 4;  /**< [ 11:  8](R/W) Maximum row MSB for DIMM0, RANK0.
13476                                                                  Internal:
13477                                                                  If DIMM0 contains 3DS DRAMs, this would point to
13478                                                                  the logical rank's most significant bit. */
13479         uint64_t mem_msb_d0_r1         : 4;  /**< [ 15: 12](R/W) Maximum row MSB for DIMM0, RANK1/DIMM0 in single ranked.
13480                                                                  If DIMM0 is dual-sided, this should be set to
13481                                                                  [MEM_MSB_D0_R0]. If LMC()_CONFIG[RANK_ENA] is cleared, this field is ignored. */
13482         uint64_t mem_msb_d1_r0         : 4;  /**< [ 19: 16](R/W) Maximum row MSB for DIMM1, RANK0.
13483                                                                  Internal:
13484                                                                  if DIMM1 contains 3DS DRAMs, this would point to
13485                                                                  the logical rank's most significant bit. */
13486         uint64_t mem_msb_d1_r1         : 4;  /**< [ 23: 20](R/W) Maximum row MSB for DIMM1, RANK1/DIMM1 in single ranked.
13487                                                                  If DIMM1 is dual-sided, this should be set to
13488                                                                  NXM[MEM_MSB_D1_R0]. If LMC()_CONFIG[RANK_ENA] is cleared, this field is ignored. */
13489         uint64_t reserved_24_63        : 40;
13490 #endif /* Word 0 - End */
13491     } cn9;
13492     /* struct bdk_lmcx_nxm_s cn81xx; */
13493     /* struct bdk_lmcx_nxm_cn9 cn88xx; */
13494     /* struct bdk_lmcx_nxm_cn9 cn83xx; */
13495 };
13496 typedef union bdk_lmcx_nxm bdk_lmcx_nxm_t;
13497 
13498 static inline uint64_t BDK_LMCX_NXM(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_NXM(unsigned long a)13499 static inline uint64_t BDK_LMCX_NXM(unsigned long a)
13500 {
13501     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
13502         return 0x87e0880000c8ll + 0x1000000ll * ((a) & 0x0);
13503     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
13504         return 0x87e0880000c8ll + 0x1000000ll * ((a) & 0x1);
13505     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
13506         return 0x87e0880000c8ll + 0x1000000ll * ((a) & 0x3);
13507     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
13508         return 0x87e0880000c8ll + 0x1000000ll * ((a) & 0x3);
13509     __bdk_csr_fatal("LMCX_NXM", 1, a, 0, 0, 0);
13510 }
13511 
13512 #define typedef_BDK_LMCX_NXM(a) bdk_lmcx_nxm_t
13513 #define bustype_BDK_LMCX_NXM(a) BDK_CSR_TYPE_RSL
13514 #define basename_BDK_LMCX_NXM(a) "LMCX_NXM"
13515 #define device_bar_BDK_LMCX_NXM(a) 0x0 /* PF_BAR0 */
13516 #define busnum_BDK_LMCX_NXM(a) (a)
13517 #define arguments_BDK_LMCX_NXM(a) (a),-1,-1,-1
13518 
13519 /**
13520  * Register (RSL) lmc#_nxm_fadr
13521  *
13522  * LMC NXM Failing Address Register
13523  * This register captures only the first transaction with a NXM error while an
13524  * interrupt is pending, and only captures a subsequent event once the interrupt is
13525  * cleared by writing a one to LMC()_INT[NXM_ERR]. It captures the actual L2C-LMC
13526  * address provided to the LMC that caused the NXM error. A read or write NXM error is
13527  * captured only if enabled using the NXM event enables.
13528  */
13529 union bdk_lmcx_nxm_fadr
13530 {
13531     uint64_t u;
13532     struct bdk_lmcx_nxm_fadr_s
13533     {
13534 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
13535         uint64_t reserved_0_63         : 64;
13536 #else /* Word 0 - Little Endian */
13537         uint64_t reserved_0_63         : 64;
13538 #endif /* Word 0 - End */
13539     } s;
13540     struct bdk_lmcx_nxm_fadr_cn8
13541     {
13542 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
13543         uint64_t reserved_40_63        : 24;
13544         uint64_t nxm_faddr_ext         : 1;  /**< [ 39: 39](RO/H) Extended bit for the failing L2C-LMC address (bit 37). */
13545         uint64_t nxm_src               : 1;  /**< [ 38: 38](RO/H) Indicates the source of the operation that caused a NXM error:
13546                                                                  0 = L2C, 1 = HFA. */
13547         uint64_t nxm_type              : 1;  /**< [ 37: 37](RO/H) Indicates the type of operation that caused NXM error:
13548                                                                  0 = Read, 1 = Write. */
13549         uint64_t nxm_faddr             : 37; /**< [ 36:  0](RO/H) Failing L2C-LMC address. Bits\<3:0\> are always zeros for an HFA access, and bits\<4:0\> are
13550                                                                  always 0s for an L2C access. Bits\<5:4\> represent the fill order for an L2C read operation,
13551                                                                  and the start point within a cache line for a write operation. */
13552 #else /* Word 0 - Little Endian */
13553         uint64_t nxm_faddr             : 37; /**< [ 36:  0](RO/H) Failing L2C-LMC address. Bits\<3:0\> are always zeros for an HFA access, and bits\<4:0\> are
13554                                                                  always 0s for an L2C access. Bits\<5:4\> represent the fill order for an L2C read operation,
13555                                                                  and the start point within a cache line for a write operation. */
13556         uint64_t nxm_type              : 1;  /**< [ 37: 37](RO/H) Indicates the type of operation that caused NXM error:
13557                                                                  0 = Read, 1 = Write. */
13558         uint64_t nxm_src               : 1;  /**< [ 38: 38](RO/H) Indicates the source of the operation that caused a NXM error:
13559                                                                  0 = L2C, 1 = HFA. */
13560         uint64_t nxm_faddr_ext         : 1;  /**< [ 39: 39](RO/H) Extended bit for the failing L2C-LMC address (bit 37). */
13561         uint64_t reserved_40_63        : 24;
13562 #endif /* Word 0 - End */
13563     } cn8;
13564     struct bdk_lmcx_nxm_fadr_cn9
13565     {
13566 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
13567         uint64_t reserved_46_63        : 18;
13568         uint64_t nxm_src               : 1;  /**< [ 45: 45](RO/H) Indicates the source of the operation that caused a NXM error:
13569                                                                  0 = L2C, 1 = HFA. */
13570         uint64_t nxm_type              : 1;  /**< [ 44: 44](RO/H) Indicates the type of operation that caused NXM error:
13571                                                                  0 = Read, 1 = Write. */
13572         uint64_t reserved_42_43        : 2;
13573         uint64_t nxm_faddr             : 42; /**< [ 41:  0](RO/H) Failing L2C-LMC address. Bits\<3:0\> are always zeros for an HFA access, and bits\<4:0\> are
13574                                                                  always 0s for an L2C access. Bits\<5:4\> represent the fill order for an L2C read operation,
13575                                                                  and the start point within a cache line for a write operation. */
13576 #else /* Word 0 - Little Endian */
13577         uint64_t nxm_faddr             : 42; /**< [ 41:  0](RO/H) Failing L2C-LMC address. Bits\<3:0\> are always zeros for an HFA access, and bits\<4:0\> are
13578                                                                  always 0s for an L2C access. Bits\<5:4\> represent the fill order for an L2C read operation,
13579                                                                  and the start point within a cache line for a write operation. */
13580         uint64_t reserved_42_43        : 2;
13581         uint64_t nxm_type              : 1;  /**< [ 44: 44](RO/H) Indicates the type of operation that caused NXM error:
13582                                                                  0 = Read, 1 = Write. */
13583         uint64_t nxm_src               : 1;  /**< [ 45: 45](RO/H) Indicates the source of the operation that caused a NXM error:
13584                                                                  0 = L2C, 1 = HFA. */
13585         uint64_t reserved_46_63        : 18;
13586 #endif /* Word 0 - End */
13587     } cn9;
13588 };
13589 typedef union bdk_lmcx_nxm_fadr bdk_lmcx_nxm_fadr_t;
13590 
13591 static inline uint64_t BDK_LMCX_NXM_FADR(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_NXM_FADR(unsigned long a)13592 static inline uint64_t BDK_LMCX_NXM_FADR(unsigned long a)
13593 {
13594     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
13595         return 0x87e088000028ll + 0x1000000ll * ((a) & 0x0);
13596     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
13597         return 0x87e088000028ll + 0x1000000ll * ((a) & 0x1);
13598     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
13599         return 0x87e088000028ll + 0x1000000ll * ((a) & 0x3);
13600     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
13601         return 0x87e088000028ll + 0x1000000ll * ((a) & 0x3);
13602     __bdk_csr_fatal("LMCX_NXM_FADR", 1, a, 0, 0, 0);
13603 }
13604 
13605 #define typedef_BDK_LMCX_NXM_FADR(a) bdk_lmcx_nxm_fadr_t
13606 #define bustype_BDK_LMCX_NXM_FADR(a) BDK_CSR_TYPE_RSL
13607 #define basename_BDK_LMCX_NXM_FADR(a) "LMCX_NXM_FADR"
13608 #define device_bar_BDK_LMCX_NXM_FADR(a) 0x0 /* PF_BAR0 */
13609 #define busnum_BDK_LMCX_NXM_FADR(a) (a)
13610 #define arguments_BDK_LMCX_NXM_FADR(a) (a),-1,-1,-1
13611 
13612 /**
13613  * Register (RSL) lmc#_ops_cnt
13614  *
13615  * LMC OPS Performance Counter Register
13616  */
13617 union bdk_lmcx_ops_cnt
13618 {
13619     uint64_t u;
13620     struct bdk_lmcx_ops_cnt_s
13621     {
13622 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
13623         uint64_t opscnt                : 64; /**< [ 63:  0](RO/H) Performance counter. A 64-bit counter that increments when the DDR3 data bus is being
13624                                                                  used.
13625                                                                  DDR bus utilization = OPSCNT / LMC()_DCLK_CNT. */
13626 #else /* Word 0 - Little Endian */
13627         uint64_t opscnt                : 64; /**< [ 63:  0](RO/H) Performance counter. A 64-bit counter that increments when the DDR3 data bus is being
13628                                                                  used.
13629                                                                  DDR bus utilization = OPSCNT / LMC()_DCLK_CNT. */
13630 #endif /* Word 0 - End */
13631     } s;
13632     /* struct bdk_lmcx_ops_cnt_s cn8; */
13633     struct bdk_lmcx_ops_cnt_cn9
13634     {
13635 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
13636         uint64_t opscnt                : 64; /**< [ 63:  0](RO/H) Performance counter. A 64-bit counter that increments when the DDR4 data bus is being
13637                                                                  used.
13638                                                                  DDR bus utilization = OPSCNT / LMC()_DCLK_CNT. */
13639 #else /* Word 0 - Little Endian */
13640         uint64_t opscnt                : 64; /**< [ 63:  0](RO/H) Performance counter. A 64-bit counter that increments when the DDR4 data bus is being
13641                                                                  used.
13642                                                                  DDR bus utilization = OPSCNT / LMC()_DCLK_CNT. */
13643 #endif /* Word 0 - End */
13644     } cn9;
13645 };
13646 typedef union bdk_lmcx_ops_cnt bdk_lmcx_ops_cnt_t;
13647 
13648 static inline uint64_t BDK_LMCX_OPS_CNT(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_OPS_CNT(unsigned long a)13649 static inline uint64_t BDK_LMCX_OPS_CNT(unsigned long a)
13650 {
13651     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
13652         return 0x87e0880001d8ll + 0x1000000ll * ((a) & 0x0);
13653     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
13654         return 0x87e0880001d8ll + 0x1000000ll * ((a) & 0x1);
13655     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
13656         return 0x87e0880001d8ll + 0x1000000ll * ((a) & 0x3);
13657     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
13658         return 0x87e0880001d8ll + 0x1000000ll * ((a) & 0x3);
13659     __bdk_csr_fatal("LMCX_OPS_CNT", 1, a, 0, 0, 0);
13660 }
13661 
13662 #define typedef_BDK_LMCX_OPS_CNT(a) bdk_lmcx_ops_cnt_t
13663 #define bustype_BDK_LMCX_OPS_CNT(a) BDK_CSR_TYPE_RSL
13664 #define basename_BDK_LMCX_OPS_CNT(a) "LMCX_OPS_CNT"
13665 #define device_bar_BDK_LMCX_OPS_CNT(a) 0x0 /* PF_BAR0 */
13666 #define busnum_BDK_LMCX_OPS_CNT(a) (a)
13667 #define arguments_BDK_LMCX_OPS_CNT(a) (a),-1,-1,-1
13668 
13669 /**
13670  * Register (RSL) lmc#_phy_ctl
13671  *
13672  * LMC PHY Control Register
13673  */
13674 union bdk_lmcx_phy_ctl
13675 {
13676     uint64_t u;
13677     struct bdk_lmcx_phy_ctl_s
13678     {
13679 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
13680         uint64_t rx_vref_sel           : 1;  /**< [ 63: 63](R/W) Run Vref training twice, sampling from DQ3 and DQ4 sequentially, then use the
13681                                                                  average of two as the final Vref training result. */
13682         uint64_t double_vref_training  : 1;  /**< [ 62: 62](R/W) Vref longer training.
13683                                                                  0 = Normal training period.
13684                                                                  1 = Double training period. */
13685         uint64_t phy_dsk_lock_en       : 1;  /**< [ 61: 61](RO) Reserved. */
13686         uint64_t dsk_dbg_load_dis      : 1;  /**< [ 60: 60](RO) Reserved. */
13687         uint64_t dsk_dbg_overwrt_ena   : 1;  /**< [ 59: 59](RO) Reserved. */
13688         uint64_t dsk_dbg_wr_mode       : 1;  /**< [ 58: 58](RO) Reserved. */
13689         uint64_t data_rate_loopback    : 1;  /**< [ 57: 57](R/W) Reserved.
13690                                                                  Internal:
13691                                                                  DQ data rate loopback, working in conjunction with LOOPBACK assertion.
13692                                                                  When asserted, incoming PRBS at even DQ can be set at data rate, and the data is loop
13693                                                                  backed out through odd DQ at the same rate.
13694                                                                  When de-asserted, LOOPBACK assertion is working along with [LOOPBACK_POS] to check on even
13695                                                                  DQ against each DQS edge seperately. This is done at the clock rate. */
13696         uint64_t dq_shallow_loopback   : 1;  /**< [ 56: 56](R/W) Reserved.
13697                                                                  Internal:
13698                                                                  DQ shallow loopback, working in conjunction with LOOPBACK assertion.
13699                                                                  When asserted, even DQ inputs can be loop-backed out through its adjacent odd DQ outputs
13700                                                                  without being flop'd by DQS. Need to make sure LMC()_PHY_CTL[PHY_DSK_BYP] is set and
13701                                                                  LMC()_PHY_CTL[INT_PHY_LOOPBACK_ENA] is unset. */
13702         uint64_t dm_disable            : 1;  /**< [ 55: 55](R/W) Write to 1 to disable the DRAM data mask feature by having LMC driving a constant value on
13703                                                                  the
13704                                                                  DDRX_DQS\<17:9\>_P pins of the chip during write operations. LMC drives a constant 0 in DDR3
13705                                                                  and drives a constant 1 in DDR4.
13706                                                                  Note that setting this field high is NOT allowed when LMC has the write DBI feature turned
13707                                                                  on
13708                                                                  (MODEREG_PARAMS3[WR_DBI]=1). */
13709         uint64_t reserved_51_54        : 4;
13710         uint64_t phy_reset             : 1;  /**< [ 50: 50](WO) Reserved.
13711                                                                  Internal:
13712                                                                  Write to one to reset the PHY, one-shot operation, will automatically
13713                                                                  clear to value of zero. */
13714         uint64_t dsk_dbg_rd_complete   : 1;  /**< [ 49: 49](RO/H) Reserved.
13715                                                                  Internal:
13716                                                                  Indicates completion of a read operation, will clear to zero when a read
13717                                                                  operation is started, then set to one when operation is complete. */
13718         uint64_t dsk_dbg_rd_data       : 10; /**< [ 48: 39](RO/H) Reserved.
13719                                                                  Internal:
13720                                                                  Data from a deskew read operation. Only valid when the
13721                                                                  LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
13722         uint64_t dsk_dbg_rd_start      : 1;  /**< [ 38: 38](WO/H) Reserved.
13723                                                                  Internal:
13724                                                                  Write one to start deskew data read operation, will automatically clear
13725                                                                  to zero. Write to one will also clear the complete bit. */
13726         uint64_t dsk_dbg_clk_scaler    : 2;  /**< [ 37: 36](R/W) Reserved.
13727                                                                  Internal:
13728                                                                  Adjust clock toggle rate for reading deskew debug information:
13729                                                                  0x0 = Deskew read clock toggles every 4 DCLKs.
13730                                                                  0x1 = Deskew read clock toggles every 8 DCLKs.
13731                                                                  0x2 = Deskew read clock toggles every 12 DCLKs.
13732                                                                  0x3 = Deskew read clock toggles every 16 DCLKs. */
13733         uint64_t dsk_dbg_offset        : 2;  /**< [ 35: 34](R/W) Reserved.
13734                                                                  Internal:
13735                                                                  Offset to change delay of deskew debug data return time to LMC from
13736                                                                  DDR PHY. */
13737         uint64_t dsk_dbg_num_bits_sel  : 1;  /**< [ 33: 33](R/W) Reserved.
13738                                                                  Internal:
13739                                                                  Deskew debug, select number of bits per byte lane.
13740                                                                  0 = 8 bits per byte lane, no DBI, no DAC debug.
13741                                                                  1 = 10 bits per byte lane, including DBI and DAC. CN81XX needs to be set to this value. */
13742         uint64_t dsk_dbg_byte_sel      : 4;  /**< [ 32: 29](R/W) Reserved.
13743                                                                  Internal:
13744                                                                  Deskew debug byte select for read operation. Values 0-3 correspond to
13745                                                                  byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
13746         uint64_t dsk_dbg_bit_sel       : 4;  /**< [ 28: 25](R/W) Reserved.
13747                                                                  Internal:
13748                                                                  Deskew debug bit select for dsk read operation.
13749                                                                  0x0 = DQ0.
13750                                                                  0x1 = DQ1.
13751                                                                  0x2 = DQ2.
13752                                                                  0x3 = DQ3.
13753                                                                  0x4 = DAC.
13754                                                                  0x5 = DBI.
13755                                                                  0x6 = DQ4.
13756                                                                  0x7 = DQ5.
13757                                                                  0x8 = DQ6.
13758                                                                  0x9 = DQ7. */
13759         uint64_t dbi_mode_ena          : 1;  /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
13760         uint64_t ddr_error_n_ena       : 1;  /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
13761         uint64_t ref_pin_on            : 1;  /**< [ 22: 22](R/W) Reserved.
13762                                                                  Internal:
13763                                                                  Voltage reference pin enabled. */
13764         uint64_t dac_on                : 1;  /**< [ 21: 21](R/W) Reserved.
13765                                                                  Internal:
13766                                                                  PHY DAC on. */
13767         uint64_t int_pad_loopback_ena  : 1;  /**< [ 20: 20](R/W) Reserved.
13768                                                                  Internal:
13769                                                                  DDR pad loopback enable.  Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
13770                                                                  when loopback is enabled. */
13771         uint64_t int_phy_loopback_ena  : 1;  /**< [ 19: 19](R/W) Reserved.
13772                                                                  Internal:
13773                                                                  PHY loopback enable. */
13774         uint64_t phy_dsk_reset         : 1;  /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
13775                                                                  training sequence is in the idle state. */
13776         uint64_t phy_dsk_byp           : 1;  /**< [ 17: 17](R/W) PHY deskew bypass. */
13777         uint64_t phy_pwr_save_disable  : 1;  /**< [ 16: 16](R/W) DDR PHY power save disable. */
13778         uint64_t ten                   : 1;  /**< [ 15: 15](R/W) DDR PHY test enable pin. */
13779         uint64_t rx_always_on          : 1;  /**< [ 14: 14](R/W) Reserved; must be zero.
13780                                                                  Internal:
13781                                                                  Set to force read_enable to PHY active all the time.
13782                                                                  This bit MUST not be set when LMC initialization is in progress. Internal VREF and
13783                                                                  Deskew training requires normal operation on the dqx/s read_enable signals. */
13784         uint64_t reserved_13           : 1;
13785         uint64_t ck_tune1              : 1;  /**< [ 12: 12](R/W) Reserved; must be zero.
13786                                                                  Internal:
13787                                                                  Clock tune. */
13788         uint64_t ck_dlyout1            : 4;  /**< [ 11:  8](R/W) Reserved; must be zero.
13789                                                                  Internal:
13790                                                                  Clock delay out. */
13791         uint64_t ck_tune0              : 1;  /**< [  7:  7](R/W) Reserved; must be zero.
13792                                                                  Internal:
13793                                                                  Clock tune. */
13794         uint64_t ck_dlyout0            : 4;  /**< [  6:  3](R/W) Reserved; must be zero.
13795                                                                  Internal:
13796                                                                  Clock delay out. */
13797         uint64_t loopback              : 1;  /**< [  2:  2](R/W) Reserved; must be zero.
13798                                                                  Internal:
13799                                                                  external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
13800                                                                  bits
13801                                                                  are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
13802                                                                  LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
13803                                                                  can be loop-backed out through DQS1 of the same byte. For DQ, when
13804                                                                  LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
13805                                                                  flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
13806                                                                  loop-backed out after being flop'd by incoming DQS. */
13807         uint64_t loopback_pos          : 1;  /**< [  1:  1](R/W) Reserved; must be zero.
13808                                                                  Internal:
13809                                                                  Loopback pos mode. This works in conjunction with
13810                                                                  LMC()_PHY_CTL[LOOPBACK] mentioned above. */
13811         uint64_t ts_stagger            : 1;  /**< [  0:  0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
13812                                                                  avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
13813                                                                  mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
13814                                                                  impedance) at the first CK cycle, and change drivers to the designated drive strengths
13815                                                                  specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
13816                                                                  LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
13817 #else /* Word 0 - Little Endian */
13818         uint64_t ts_stagger            : 1;  /**< [  0:  0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
13819                                                                  avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
13820                                                                  mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
13821                                                                  impedance) at the first CK cycle, and change drivers to the designated drive strengths
13822                                                                  specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
13823                                                                  LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
13824         uint64_t loopback_pos          : 1;  /**< [  1:  1](R/W) Reserved; must be zero.
13825                                                                  Internal:
13826                                                                  Loopback pos mode. This works in conjunction with
13827                                                                  LMC()_PHY_CTL[LOOPBACK] mentioned above. */
13828         uint64_t loopback              : 1;  /**< [  2:  2](R/W) Reserved; must be zero.
13829                                                                  Internal:
13830                                                                  external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
13831                                                                  bits
13832                                                                  are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
13833                                                                  LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
13834                                                                  can be loop-backed out through DQS1 of the same byte. For DQ, when
13835                                                                  LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
13836                                                                  flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
13837                                                                  loop-backed out after being flop'd by incoming DQS. */
13838         uint64_t ck_dlyout0            : 4;  /**< [  6:  3](R/W) Reserved; must be zero.
13839                                                                  Internal:
13840                                                                  Clock delay out. */
13841         uint64_t ck_tune0              : 1;  /**< [  7:  7](R/W) Reserved; must be zero.
13842                                                                  Internal:
13843                                                                  Clock tune. */
13844         uint64_t ck_dlyout1            : 4;  /**< [ 11:  8](R/W) Reserved; must be zero.
13845                                                                  Internal:
13846                                                                  Clock delay out. */
13847         uint64_t ck_tune1              : 1;  /**< [ 12: 12](R/W) Reserved; must be zero.
13848                                                                  Internal:
13849                                                                  Clock tune. */
13850         uint64_t reserved_13           : 1;
13851         uint64_t rx_always_on          : 1;  /**< [ 14: 14](R/W) Reserved; must be zero.
13852                                                                  Internal:
13853                                                                  Set to force read_enable to PHY active all the time.
13854                                                                  This bit MUST not be set when LMC initialization is in progress. Internal VREF and
13855                                                                  Deskew training requires normal operation on the dqx/s read_enable signals. */
13856         uint64_t ten                   : 1;  /**< [ 15: 15](R/W) DDR PHY test enable pin. */
13857         uint64_t phy_pwr_save_disable  : 1;  /**< [ 16: 16](R/W) DDR PHY power save disable. */
13858         uint64_t phy_dsk_byp           : 1;  /**< [ 17: 17](R/W) PHY deskew bypass. */
13859         uint64_t phy_dsk_reset         : 1;  /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
13860                                                                  training sequence is in the idle state. */
13861         uint64_t int_phy_loopback_ena  : 1;  /**< [ 19: 19](R/W) Reserved.
13862                                                                  Internal:
13863                                                                  PHY loopback enable. */
13864         uint64_t int_pad_loopback_ena  : 1;  /**< [ 20: 20](R/W) Reserved.
13865                                                                  Internal:
13866                                                                  DDR pad loopback enable.  Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
13867                                                                  when loopback is enabled. */
13868         uint64_t dac_on                : 1;  /**< [ 21: 21](R/W) Reserved.
13869                                                                  Internal:
13870                                                                  PHY DAC on. */
13871         uint64_t ref_pin_on            : 1;  /**< [ 22: 22](R/W) Reserved.
13872                                                                  Internal:
13873                                                                  Voltage reference pin enabled. */
13874         uint64_t ddr_error_n_ena       : 1;  /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
13875         uint64_t dbi_mode_ena          : 1;  /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
13876         uint64_t dsk_dbg_bit_sel       : 4;  /**< [ 28: 25](R/W) Reserved.
13877                                                                  Internal:
13878                                                                  Deskew debug bit select for dsk read operation.
13879                                                                  0x0 = DQ0.
13880                                                                  0x1 = DQ1.
13881                                                                  0x2 = DQ2.
13882                                                                  0x3 = DQ3.
13883                                                                  0x4 = DAC.
13884                                                                  0x5 = DBI.
13885                                                                  0x6 = DQ4.
13886                                                                  0x7 = DQ5.
13887                                                                  0x8 = DQ6.
13888                                                                  0x9 = DQ7. */
13889         uint64_t dsk_dbg_byte_sel      : 4;  /**< [ 32: 29](R/W) Reserved.
13890                                                                  Internal:
13891                                                                  Deskew debug byte select for read operation. Values 0-3 correspond to
13892                                                                  byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
13893         uint64_t dsk_dbg_num_bits_sel  : 1;  /**< [ 33: 33](R/W) Reserved.
13894                                                                  Internal:
13895                                                                  Deskew debug, select number of bits per byte lane.
13896                                                                  0 = 8 bits per byte lane, no DBI, no DAC debug.
13897                                                                  1 = 10 bits per byte lane, including DBI and DAC. CN81XX needs to be set to this value. */
13898         uint64_t dsk_dbg_offset        : 2;  /**< [ 35: 34](R/W) Reserved.
13899                                                                  Internal:
13900                                                                  Offset to change delay of deskew debug data return time to LMC from
13901                                                                  DDR PHY. */
13902         uint64_t dsk_dbg_clk_scaler    : 2;  /**< [ 37: 36](R/W) Reserved.
13903                                                                  Internal:
13904                                                                  Adjust clock toggle rate for reading deskew debug information:
13905                                                                  0x0 = Deskew read clock toggles every 4 DCLKs.
13906                                                                  0x1 = Deskew read clock toggles every 8 DCLKs.
13907                                                                  0x2 = Deskew read clock toggles every 12 DCLKs.
13908                                                                  0x3 = Deskew read clock toggles every 16 DCLKs. */
13909         uint64_t dsk_dbg_rd_start      : 1;  /**< [ 38: 38](WO/H) Reserved.
13910                                                                  Internal:
13911                                                                  Write one to start deskew data read operation, will automatically clear
13912                                                                  to zero. Write to one will also clear the complete bit. */
13913         uint64_t dsk_dbg_rd_data       : 10; /**< [ 48: 39](RO/H) Reserved.
13914                                                                  Internal:
13915                                                                  Data from a deskew read operation. Only valid when the
13916                                                                  LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
13917         uint64_t dsk_dbg_rd_complete   : 1;  /**< [ 49: 49](RO/H) Reserved.
13918                                                                  Internal:
13919                                                                  Indicates completion of a read operation, will clear to zero when a read
13920                                                                  operation is started, then set to one when operation is complete. */
13921         uint64_t phy_reset             : 1;  /**< [ 50: 50](WO) Reserved.
13922                                                                  Internal:
13923                                                                  Write to one to reset the PHY, one-shot operation, will automatically
13924                                                                  clear to value of zero. */
13925         uint64_t reserved_51_54        : 4;
13926         uint64_t dm_disable            : 1;  /**< [ 55: 55](R/W) Write to 1 to disable the DRAM data mask feature by having LMC driving a constant value on
13927                                                                  the
13928                                                                  DDRX_DQS\<17:9\>_P pins of the chip during write operations. LMC drives a constant 0 in DDR3
13929                                                                  and drives a constant 1 in DDR4.
13930                                                                  Note that setting this field high is NOT allowed when LMC has the write DBI feature turned
13931                                                                  on
13932                                                                  (MODEREG_PARAMS3[WR_DBI]=1). */
13933         uint64_t dq_shallow_loopback   : 1;  /**< [ 56: 56](R/W) Reserved.
13934                                                                  Internal:
13935                                                                  DQ shallow loopback, working in conjunction with LOOPBACK assertion.
13936                                                                  When asserted, even DQ inputs can be loop-backed out through its adjacent odd DQ outputs
13937                                                                  without being flop'd by DQS. Need to make sure LMC()_PHY_CTL[PHY_DSK_BYP] is set and
13938                                                                  LMC()_PHY_CTL[INT_PHY_LOOPBACK_ENA] is unset. */
13939         uint64_t data_rate_loopback    : 1;  /**< [ 57: 57](R/W) Reserved.
13940                                                                  Internal:
13941                                                                  DQ data rate loopback, working in conjunction with LOOPBACK assertion.
13942                                                                  When asserted, incoming PRBS at even DQ can be set at data rate, and the data is loop
13943                                                                  backed out through odd DQ at the same rate.
13944                                                                  When de-asserted, LOOPBACK assertion is working along with [LOOPBACK_POS] to check on even
13945                                                                  DQ against each DQS edge seperately. This is done at the clock rate. */
13946         uint64_t dsk_dbg_wr_mode       : 1;  /**< [ 58: 58](RO) Reserved. */
13947         uint64_t dsk_dbg_overwrt_ena   : 1;  /**< [ 59: 59](RO) Reserved. */
13948         uint64_t dsk_dbg_load_dis      : 1;  /**< [ 60: 60](RO) Reserved. */
13949         uint64_t phy_dsk_lock_en       : 1;  /**< [ 61: 61](RO) Reserved. */
13950         uint64_t double_vref_training  : 1;  /**< [ 62: 62](R/W) Vref longer training.
13951                                                                  0 = Normal training period.
13952                                                                  1 = Double training period. */
13953         uint64_t rx_vref_sel           : 1;  /**< [ 63: 63](R/W) Run Vref training twice, sampling from DQ3 and DQ4 sequentially, then use the
13954                                                                  average of two as the final Vref training result. */
13955 #endif /* Word 0 - End */
13956     } s;
13957     struct bdk_lmcx_phy_ctl_cn88xxp1
13958     {
13959 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
13960         uint64_t reserved_62_63        : 2;
13961         uint64_t phy_dsk_lock_en       : 1;  /**< [ 61: 61](RO) Reserved. */
13962         uint64_t dsk_dbg_load_dis      : 1;  /**< [ 60: 60](RO) Reserved. */
13963         uint64_t dsk_dbg_overwrt_ena   : 1;  /**< [ 59: 59](RO) Reserved. */
13964         uint64_t dsk_dbg_wr_mode       : 1;  /**< [ 58: 58](RO) Reserved. */
13965         uint64_t data_rate_loopback    : 1;  /**< [ 57: 57](RO) Reserved. */
13966         uint64_t dq_shallow_loopback   : 1;  /**< [ 56: 56](RO) Reserved. */
13967         uint64_t dm_disable            : 1;  /**< [ 55: 55](RO) Reserved. */
13968         uint64_t c1_sel                : 2;  /**< [ 54: 53](R/W) Reserved.
13969                                                                  Internal:
13970                                                                  0x0 = C1 is not routed to any output pin.
13971                                                                  0x1 = C1 is routed to CS3.
13972                                                                  0x2 = C1 is routed to A17 address pin.
13973                                                                  0x3 = C1 is not routed to any output pin.
13974 
13975                                                                  Set to 0x0 if not interfacing with 3DS DRAM. */
13976         uint64_t c0_sel                : 2;  /**< [ 52: 51](R/W) Reserved.
13977                                                                  Internal:
13978                                                                  0x0 = C0 is not routed to any output pin.
13979                                                                  0x1 = C0 is routed to CS2.
13980                                                                  0x2 = C0 is routed to TEN output pin.
13981                                                                  0x3 = C0 is not routed to any output pin.
13982 
13983                                                                  Set to 0x0 if not interfacing with 3DS DRAM. */
13984         uint64_t phy_reset             : 1;  /**< [ 50: 50](WO) Reserved.
13985                                                                  Internal:
13986                                                                  Write to one to reset the PHY, one-shot operation, will automatically
13987                                                                  clear to value of zero. */
13988         uint64_t dsk_dbg_rd_complete   : 1;  /**< [ 49: 49](RO/H) Reserved.
13989                                                                  Internal:
13990                                                                  Indicates completion of a read operation, will clear to zero when a read
13991                                                                  operation is started, then set to one when operation is complete. */
13992         uint64_t dsk_dbg_rd_data       : 10; /**< [ 48: 39](RO/H) Reserved.
13993                                                                  Internal:
13994                                                                  Data from a deskew read operation. Only valid when the
13995                                                                  LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
13996         uint64_t dsk_dbg_rd_start      : 1;  /**< [ 38: 38](WO/H) Reserved.
13997                                                                  Internal:
13998                                                                  Write one to start deskew data read operation, will automatically clear
13999                                                                  to zero. Write to one will also clear the complete bit. */
14000         uint64_t dsk_dbg_clk_scaler    : 2;  /**< [ 37: 36](R/W) Reserved.
14001                                                                  Internal:
14002                                                                  Adjust clock toggle rate for reading deskew debug information:
14003                                                                  0x0 = Deskew read clock toggles every 4 DCLKs.
14004                                                                  0x1 = Deskew read clock toggles every 8 DCLKs.
14005                                                                  0x2 = Deskew read clock toggles every 12 DCLKs.
14006                                                                  0x3 = Deskew read clock toggles every 16 DCLKs. */
14007         uint64_t dsk_dbg_offset        : 2;  /**< [ 35: 34](R/W) Reserved.
14008                                                                  Internal:
14009                                                                  Offset to change delay of deskew debug data return time to LMC from
14010                                                                  DDR PHY. */
14011         uint64_t dsk_dbg_num_bits_sel  : 1;  /**< [ 33: 33](R/W) Reserved.
14012                                                                  Internal:
14013                                                                  Deskew debug, select number of bits per byte lane.
14014                                                                  0 = 8 bits per byte lane, no DBI.
14015                                                                  1 = 9 bits per byte lane, including DBI. CN88XX needs to bet set to this value. */
14016         uint64_t dsk_dbg_byte_sel      : 4;  /**< [ 32: 29](R/W) Reserved.
14017                                                                  Internal:
14018                                                                  Deskew debug byte select for read operation. Values 0-3 correspond to
14019                                                                  byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
14020         uint64_t dsk_dbg_bit_sel       : 4;  /**< [ 28: 25](R/W) Reserved.
14021                                                                  Internal:
14022                                                                  Deskew debug bit select for dsk read operation.
14023                                                                  0x0 = DQ0.
14024                                                                  0x1 = DQ1.
14025                                                                  0x2 = DQ2.
14026                                                                  0x3 = DQ3.
14027                                                                  0x4 = DBI.
14028                                                                  0x5 = DQ4.
14029                                                                  0x6 = DQ5.
14030                                                                  0x7 = DQ6.
14031                                                                  0x8 = DQ7. */
14032         uint64_t dbi_mode_ena          : 1;  /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
14033         uint64_t ddr_error_n_ena       : 1;  /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
14034         uint64_t ref_pin_on            : 1;  /**< [ 22: 22](R/W) Reserved.
14035                                                                  Internal:
14036                                                                  Voltage reference pin enabled. */
14037         uint64_t dac_on                : 1;  /**< [ 21: 21](R/W) Reserved.
14038                                                                  Internal:
14039                                                                  PHY DAC on. */
14040         uint64_t int_pad_loopback_ena  : 1;  /**< [ 20: 20](R/W) Reserved.
14041                                                                  Internal:
14042                                                                  DDR pad loopback enable.  Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
14043                                                                  when loopback is enabled. */
14044         uint64_t int_phy_loopback_ena  : 1;  /**< [ 19: 19](R/W) Reserved.
14045                                                                  Internal:
14046                                                                  PHY loopback enable. */
14047         uint64_t phy_dsk_reset         : 1;  /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
14048                                                                  training sequence is in the idle state. */
14049         uint64_t phy_dsk_byp           : 1;  /**< [ 17: 17](R/W) PHY deskew bypass. */
14050         uint64_t phy_pwr_save_disable  : 1;  /**< [ 16: 16](R/W) DDR PHY power save disable. */
14051         uint64_t ten                   : 1;  /**< [ 15: 15](R/W) DDR PHY test enable pin. */
14052         uint64_t rx_always_on          : 1;  /**< [ 14: 14](R/W) Reserved; must be zero.
14053                                                                  Internal:
14054                                                                  Set to force read_enable to PHY active all the time.
14055                                                                  This bit MUST not be set when LMC initialization is in progress. Internal VREF and
14056                                                                  Deskew training requires normal operation on the dqx/s read_enable signals. */
14057         uint64_t lv_mode               : 1;  /**< [ 13: 13](R/W) Reserved; must be zero.
14058                                                                  Internal:
14059                                                                  Low Voltage Mode (1.35V.) */
14060         uint64_t ck_tune1              : 1;  /**< [ 12: 12](R/W) Reserved; must be zero.
14061                                                                  Internal:
14062                                                                  Clock tune. */
14063         uint64_t ck_dlyout1            : 4;  /**< [ 11:  8](R/W) Reserved; must be zero.
14064                                                                  Internal:
14065                                                                  Clock delay out. */
14066         uint64_t ck_tune0              : 1;  /**< [  7:  7](R/W) Reserved; must be zero.
14067                                                                  Internal:
14068                                                                  Clock tune. */
14069         uint64_t ck_dlyout0            : 4;  /**< [  6:  3](R/W) Reserved; must be zero.
14070                                                                  Internal:
14071                                                                  Clock delay out. */
14072         uint64_t loopback              : 1;  /**< [  2:  2](R/W) Reserved; must be zero.
14073                                                                  Internal:
14074                                                                  external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
14075                                                                  bits
14076                                                                  are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
14077                                                                  LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
14078                                                                  can be loop-backed out through DQS1 of the same byte. For DQ, when
14079                                                                  LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
14080                                                                  flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
14081                                                                  loop-backed out after being flop'd by incoming DQS. */
14082         uint64_t loopback_pos          : 1;  /**< [  1:  1](R/W) Reserved; must be zero.
14083                                                                  Internal:
14084                                                                  Loopback pos mode. This works in conjunction with
14085                                                                  LMC()_PHY_CTL[LOOPBACK] mentioned above. */
14086         uint64_t ts_stagger            : 1;  /**< [  0:  0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
14087                                                                  avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
14088                                                                  mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
14089                                                                  impedance) at the first CK cycle, and change drivers to the designated drive strengths
14090                                                                  specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
14091                                                                  LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
14092 #else /* Word 0 - Little Endian */
14093         uint64_t ts_stagger            : 1;  /**< [  0:  0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
14094                                                                  avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
14095                                                                  mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
14096                                                                  impedance) at the first CK cycle, and change drivers to the designated drive strengths
14097                                                                  specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
14098                                                                  LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
14099         uint64_t loopback_pos          : 1;  /**< [  1:  1](R/W) Reserved; must be zero.
14100                                                                  Internal:
14101                                                                  Loopback pos mode. This works in conjunction with
14102                                                                  LMC()_PHY_CTL[LOOPBACK] mentioned above. */
14103         uint64_t loopback              : 1;  /**< [  2:  2](R/W) Reserved; must be zero.
14104                                                                  Internal:
14105                                                                  external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
14106                                                                  bits
14107                                                                  are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
14108                                                                  LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
14109                                                                  can be loop-backed out through DQS1 of the same byte. For DQ, when
14110                                                                  LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
14111                                                                  flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
14112                                                                  loop-backed out after being flop'd by incoming DQS. */
14113         uint64_t ck_dlyout0            : 4;  /**< [  6:  3](R/W) Reserved; must be zero.
14114                                                                  Internal:
14115                                                                  Clock delay out. */
14116         uint64_t ck_tune0              : 1;  /**< [  7:  7](R/W) Reserved; must be zero.
14117                                                                  Internal:
14118                                                                  Clock tune. */
14119         uint64_t ck_dlyout1            : 4;  /**< [ 11:  8](R/W) Reserved; must be zero.
14120                                                                  Internal:
14121                                                                  Clock delay out. */
14122         uint64_t ck_tune1              : 1;  /**< [ 12: 12](R/W) Reserved; must be zero.
14123                                                                  Internal:
14124                                                                  Clock tune. */
14125         uint64_t lv_mode               : 1;  /**< [ 13: 13](R/W) Reserved; must be zero.
14126                                                                  Internal:
14127                                                                  Low Voltage Mode (1.35V.) */
14128         uint64_t rx_always_on          : 1;  /**< [ 14: 14](R/W) Reserved; must be zero.
14129                                                                  Internal:
14130                                                                  Set to force read_enable to PHY active all the time.
14131                                                                  This bit MUST not be set when LMC initialization is in progress. Internal VREF and
14132                                                                  Deskew training requires normal operation on the dqx/s read_enable signals. */
14133         uint64_t ten                   : 1;  /**< [ 15: 15](R/W) DDR PHY test enable pin. */
14134         uint64_t phy_pwr_save_disable  : 1;  /**< [ 16: 16](R/W) DDR PHY power save disable. */
14135         uint64_t phy_dsk_byp           : 1;  /**< [ 17: 17](R/W) PHY deskew bypass. */
14136         uint64_t phy_dsk_reset         : 1;  /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
14137                                                                  training sequence is in the idle state. */
14138         uint64_t int_phy_loopback_ena  : 1;  /**< [ 19: 19](R/W) Reserved.
14139                                                                  Internal:
14140                                                                  PHY loopback enable. */
14141         uint64_t int_pad_loopback_ena  : 1;  /**< [ 20: 20](R/W) Reserved.
14142                                                                  Internal:
14143                                                                  DDR pad loopback enable.  Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
14144                                                                  when loopback is enabled. */
14145         uint64_t dac_on                : 1;  /**< [ 21: 21](R/W) Reserved.
14146                                                                  Internal:
14147                                                                  PHY DAC on. */
14148         uint64_t ref_pin_on            : 1;  /**< [ 22: 22](R/W) Reserved.
14149                                                                  Internal:
14150                                                                  Voltage reference pin enabled. */
14151         uint64_t ddr_error_n_ena       : 1;  /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
14152         uint64_t dbi_mode_ena          : 1;  /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
14153         uint64_t dsk_dbg_bit_sel       : 4;  /**< [ 28: 25](R/W) Reserved.
14154                                                                  Internal:
14155                                                                  Deskew debug bit select for dsk read operation.
14156                                                                  0x0 = DQ0.
14157                                                                  0x1 = DQ1.
14158                                                                  0x2 = DQ2.
14159                                                                  0x3 = DQ3.
14160                                                                  0x4 = DBI.
14161                                                                  0x5 = DQ4.
14162                                                                  0x6 = DQ5.
14163                                                                  0x7 = DQ6.
14164                                                                  0x8 = DQ7. */
14165         uint64_t dsk_dbg_byte_sel      : 4;  /**< [ 32: 29](R/W) Reserved.
14166                                                                  Internal:
14167                                                                  Deskew debug byte select for read operation. Values 0-3 correspond to
14168                                                                  byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
14169         uint64_t dsk_dbg_num_bits_sel  : 1;  /**< [ 33: 33](R/W) Reserved.
14170                                                                  Internal:
14171                                                                  Deskew debug, select number of bits per byte lane.
14172                                                                  0 = 8 bits per byte lane, no DBI.
14173                                                                  1 = 9 bits per byte lane, including DBI. CN88XX needs to bet set to this value. */
14174         uint64_t dsk_dbg_offset        : 2;  /**< [ 35: 34](R/W) Reserved.
14175                                                                  Internal:
14176                                                                  Offset to change delay of deskew debug data return time to LMC from
14177                                                                  DDR PHY. */
14178         uint64_t dsk_dbg_clk_scaler    : 2;  /**< [ 37: 36](R/W) Reserved.
14179                                                                  Internal:
14180                                                                  Adjust clock toggle rate for reading deskew debug information:
14181                                                                  0x0 = Deskew read clock toggles every 4 DCLKs.
14182                                                                  0x1 = Deskew read clock toggles every 8 DCLKs.
14183                                                                  0x2 = Deskew read clock toggles every 12 DCLKs.
14184                                                                  0x3 = Deskew read clock toggles every 16 DCLKs. */
14185         uint64_t dsk_dbg_rd_start      : 1;  /**< [ 38: 38](WO/H) Reserved.
14186                                                                  Internal:
14187                                                                  Write one to start deskew data read operation, will automatically clear
14188                                                                  to zero. Write to one will also clear the complete bit. */
14189         uint64_t dsk_dbg_rd_data       : 10; /**< [ 48: 39](RO/H) Reserved.
14190                                                                  Internal:
14191                                                                  Data from a deskew read operation. Only valid when the
14192                                                                  LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
14193         uint64_t dsk_dbg_rd_complete   : 1;  /**< [ 49: 49](RO/H) Reserved.
14194                                                                  Internal:
14195                                                                  Indicates completion of a read operation, will clear to zero when a read
14196                                                                  operation is started, then set to one when operation is complete. */
14197         uint64_t phy_reset             : 1;  /**< [ 50: 50](WO) Reserved.
14198                                                                  Internal:
14199                                                                  Write to one to reset the PHY, one-shot operation, will automatically
14200                                                                  clear to value of zero. */
14201         uint64_t c0_sel                : 2;  /**< [ 52: 51](R/W) Reserved.
14202                                                                  Internal:
14203                                                                  0x0 = C0 is not routed to any output pin.
14204                                                                  0x1 = C0 is routed to CS2.
14205                                                                  0x2 = C0 is routed to TEN output pin.
14206                                                                  0x3 = C0 is not routed to any output pin.
14207 
14208                                                                  Set to 0x0 if not interfacing with 3DS DRAM. */
14209         uint64_t c1_sel                : 2;  /**< [ 54: 53](R/W) Reserved.
14210                                                                  Internal:
14211                                                                  0x0 = C1 is not routed to any output pin.
14212                                                                  0x1 = C1 is routed to CS3.
14213                                                                  0x2 = C1 is routed to A17 address pin.
14214                                                                  0x3 = C1 is not routed to any output pin.
14215 
14216                                                                  Set to 0x0 if not interfacing with 3DS DRAM. */
14217         uint64_t dm_disable            : 1;  /**< [ 55: 55](RO) Reserved. */
14218         uint64_t dq_shallow_loopback   : 1;  /**< [ 56: 56](RO) Reserved. */
14219         uint64_t data_rate_loopback    : 1;  /**< [ 57: 57](RO) Reserved. */
14220         uint64_t dsk_dbg_wr_mode       : 1;  /**< [ 58: 58](RO) Reserved. */
14221         uint64_t dsk_dbg_overwrt_ena   : 1;  /**< [ 59: 59](RO) Reserved. */
14222         uint64_t dsk_dbg_load_dis      : 1;  /**< [ 60: 60](RO) Reserved. */
14223         uint64_t phy_dsk_lock_en       : 1;  /**< [ 61: 61](RO) Reserved. */
14224         uint64_t reserved_62_63        : 2;
14225 #endif /* Word 0 - End */
14226     } cn88xxp1;
14227     struct bdk_lmcx_phy_ctl_cn9
14228     {
14229 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
14230         uint64_t rx_vref_sel           : 1;  /**< [ 63: 63](R/W) Run Vref training twice, sampling from DQ3 and DQ4 sequentially, then use the
14231                                                                  average of two as the final Vref training result. */
14232         uint64_t double_vref_training  : 1;  /**< [ 62: 62](R/W) Vref longer training.
14233                                                                  0 = Normal training period.
14234                                                                  1 = Double training period. */
14235         uint64_t phy_dsk_lock_en       : 1;  /**< [ 61: 61](R/W) When set, the PHY attempts to lock all DQ/DBI bit deskew settings once alignment is
14236                                                                  achieved.
14237 
14238                                                                  When clear, LMC disengages the PHY bit deskew lock control mechanism. This
14239                                                                  causes the PHY to continuously perform and/or adjust the read deskew training on
14240                                                                  all DQ/DBI bits during any read operations. */
14241         uint64_t dsk_dbg_load_dis      : 1;  /**< [ 60: 60](R/W) Reserved.
14242                                                                  Internal:
14243                                                                  When set, LMC prevents PHY from loading the deskew shift
14244                                                                  registers with its internal settings. When Read Deskew sequence is kicked off
14245                                                                  by setting [DSK_DBG_RD_START] = 1 and [DSK_DBG_WR_MODE] = 0, this field determines
14246                                                                  whether or not to load the shift register with PHY's internal settings before
14247                                                                  the shifting process. */
14248         uint64_t dsk_dbg_overwrt_ena   : 1;  /**< [ 59: 59](R/W) Reserved.
14249                                                                  Internal:
14250                                                                  When set high, PHY selects all of the preloaded data
14251                                                                  when configuring the read deskew settings. */
14252         uint64_t dsk_dbg_wr_mode       : 1;  /**< [ 58: 58](R/W) Reserved.
14253                                                                  Internal:
14254                                                                  When set high along with [DSK_DBG_RD_START], LMC kicks off Deskew
14255                                                                  Overwrite sequence to shift out a 10-bits setting for a single DQ.
14256                                                                  Note that there are a total of 9 bytes and the chain structure are split into two
14257                                                                  halves such that the top chain covers byte 7,6,5,4 and bottom chain cover byte
14258                                                                  ECC,3,2,1,0.
14259                                                                  Each byte has 10 DQs (DQ7,DQ6,DQ5,DQ4,DBI,DAC,DQ3,DQ2,DQ1,DQ0) and that each
14260                                                                  DQ has 10-bits deskew setting. */
14261         uint64_t data_rate_loopback    : 1;  /**< [ 57: 57](R/W) Reserved.
14262                                                                  Internal:
14263                                                                  DQ data rate loopback, working in conjunction with LOOPBACK assertion.
14264                                                                  When asserted, incoming PRBS at even DQ can be set at data rate, and the data is loop
14265                                                                  backed out through odd DQ at the same rate.
14266                                                                  When de-asserted, LOOPBACK assertion is working along with [LOOPBACK_POS] to check on even
14267                                                                  DQ against each DQS edge seperately. This is done at the clock rate. */
14268         uint64_t dq_shallow_loopback   : 1;  /**< [ 56: 56](R/W) Reserved.
14269                                                                  Internal:
14270                                                                  DQ shallow loopback, working in conjunction with LOOPBACK assertion.
14271                                                                  When asserted, even DQ inputs can be loop-backed out through its adjacent odd DQ outputs
14272                                                                  without being flop'd by DQS. Need to make sure LMC()_PHY_CTL[PHY_DSK_BYP] is set and
14273                                                                  LMC()_PHY_CTL[INT_PHY_LOOPBACK_ENA] is unset. */
14274         uint64_t dm_disable            : 1;  /**< [ 55: 55](R/W) Write to one to disable the DRAM data mask feature by having LMC driving a constant value
14275                                                                  on
14276                                                                  the DDR_DQS\<17:9\>_P pins of the chip during write operations. LMC drives a constant one.
14277                                                                  Note that setting this field high is NOT allowed when LMC has the write DBI feature turned
14278                                                                  on (LMC()_MODEREG_PARAMS3[WR_DBI]=1). */
14279         uint64_t reserved_54           : 1;
14280         uint64_t c1_cs3_switch         : 1;  /**< [ 53: 53](R/W) 0 = Routes C1 data to the C1 output pin, and
14281                                                                      routes CS3 data to the CS3 output pin.
14282 
14283                                                                  1 = Routes C1 data to the CS3 output pin, and
14284                                                                      routes CS3 data to the C1 output pin. */
14285         uint64_t c0_cs2_switch         : 1;  /**< [ 52: 52](R/W) 0 = Routes C0/TEN data to the C0/TEN output pin, and
14286                                                                      routes CS2 data to the CS2 output pin.
14287 
14288                                                                  1 = Routes C0/TEN data to the CS2 output pin, and
14289                                                                      routes CS2 data to the C0/TEN output pin. */
14290         uint64_t ten_sel               : 1;  /**< [ 51: 51](R/W) DDR PHY test enable select signal. When asserted, LMC drives C0/TEN pin with the value set
14291                                                                  in LMC()_PHY_CTL[TEN] as part of any commands being sent out. */
14292         uint64_t phy_reset             : 1;  /**< [ 50: 50](WO) Reserved.
14293                                                                  Internal:
14294                                                                  Write to one to reset the PHY, one-shot operation, will automatically
14295                                                                  clear to value of zero. */
14296         uint64_t dsk_dbg_rd_complete   : 1;  /**< [ 49: 49](RO/H) Reserved.
14297                                                                  Internal:
14298                                                                  Indicates completion of a read operation, will clear to zero when a read
14299                                                                  operation is started, then set to one when operation is complete. */
14300         uint64_t dsk_dbg_rd_data       : 10; /**< [ 48: 39](RO/H) Reserved.
14301                                                                  Internal:
14302                                                                  Data from a deskew read operation. Only valid when the
14303                                                                  LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
14304         uint64_t dsk_dbg_rd_start      : 1;  /**< [ 38: 38](WO/H) Reserved.
14305                                                                  Internal:
14306                                                                  Write one to start deskew data read operation, will automatically clear
14307                                                                  to zero. Write to one will also clear the complete bit. */
14308         uint64_t dsk_dbg_clk_scaler    : 2;  /**< [ 37: 36](R/W) Reserved.
14309                                                                  Internal:
14310                                                                  Adjust clock toggle rate for reading deskew debug information:
14311                                                                  0x0 = Deskew read clock toggles every 4 DCLKs.
14312                                                                  0x1 = Deskew read clock toggles every 8 DCLKs.
14313                                                                  0x2 = Deskew read clock toggles every 12 DCLKs.
14314                                                                  0x3 = Deskew read clock toggles every 16 DCLKs. */
14315         uint64_t dsk_dbg_offset        : 2;  /**< [ 35: 34](R/W) Reserved.
14316                                                                  Internal:
14317                                                                  Offset to change delay of deskew debug data return time to LMC from
14318                                                                  DDR PHY. */
14319         uint64_t dsk_dbg_num_bits_sel  : 1;  /**< [ 33: 33](R/W) Reserved.
14320                                                                  Internal:
14321                                                                  Deskew debug, select number of bits per byte lane.
14322                                                                  0 = 8 bits per byte lane, no DBI, no DAC debug.
14323                                                                  1 = 10 bits per byte lane, including DBI and DAC. CN83XX needs to be set to this value. */
14324         uint64_t dsk_dbg_byte_sel      : 4;  /**< [ 32: 29](R/W) Reserved.
14325                                                                  Internal:
14326                                                                  Deskew debug byte select for read operation. Values 0-3 correspond to
14327                                                                  byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
14328         uint64_t dsk_dbg_bit_sel       : 4;  /**< [ 28: 25](R/W) Reserved.
14329                                                                  Internal:
14330                                                                  Deskew debug bit select for dsk read operation.
14331                                                                  0x0 = DQ0.
14332                                                                  0x1 = DQ1.
14333                                                                  0x2 = DQ2.
14334                                                                  0x3 = DQ3.
14335                                                                  0x4 = DAC.
14336                                                                  0x5 = DBI/DQS1.
14337                                                                  0x6 = DQ4.
14338                                                                  0x7 = DQ5.
14339                                                                  0x8 = DQ6.
14340                                                                  0x9 = DQ7. */
14341         uint64_t dbi_mode_ena          : 1;  /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
14342         uint64_t ddr_error_n_ena       : 1;  /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
14343         uint64_t ref_pin_on            : 1;  /**< [ 22: 22](R/W) Reserved.
14344                                                                  Internal:
14345                                                                  Voltage reference pin enabled. */
14346         uint64_t dac_on                : 1;  /**< [ 21: 21](R/W) Reserved.
14347                                                                  Internal:
14348                                                                  PHY DAC on. */
14349         uint64_t int_pad_loopback_ena  : 1;  /**< [ 20: 20](R/W) Reserved.
14350                                                                  Internal:
14351                                                                  DDR pad loopback enable.  Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
14352                                                                  when loopback is enabled. */
14353         uint64_t int_phy_loopback_ena  : 1;  /**< [ 19: 19](R/W) Reserved.
14354                                                                  Internal:
14355                                                                  PHY loopback enable. */
14356         uint64_t phy_dsk_reset         : 1;  /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
14357                                                                  training sequence is in the idle state. */
14358         uint64_t phy_dsk_byp           : 1;  /**< [ 17: 17](R/W) PHY deskew bypass. */
14359         uint64_t phy_pwr_save_disable  : 1;  /**< [ 16: 16](R/W) DDR PHY power save disable. */
14360         uint64_t ten                   : 1;  /**< [ 15: 15](R/W) DDR PHY test enable pin. */
14361         uint64_t rx_always_on          : 1;  /**< [ 14: 14](R/W) Reserved; must be zero.
14362                                                                  Internal:
14363                                                                  Set to force read_enable to PHY active all the time.
14364                                                                  This bit MUST not be set when LMC initialization is in progress. Internal VREF and
14365                                                                  Deskew training requires normal operation on the dqx/s read_enable signals. */
14366         uint64_t dqs1_loopback         : 1;  /**< [ 13: 13](R/W) Reserved; must be zero.
14367                                                                  Internal:
14368                                                                  The same as [LOOPBACK] except DQS1 is loopbacked through DQS0. */
14369         uint64_t ck_tune1              : 1;  /**< [ 12: 12](R/W) Reserved; must be zero.
14370                                                                  Internal:
14371                                                                  Clock tune. */
14372         uint64_t ck_dlyout1            : 4;  /**< [ 11:  8](R/W) Reserved; must be zero.
14373                                                                  Internal:
14374                                                                  Clock delay out. */
14375         uint64_t ck_tune0              : 1;  /**< [  7:  7](R/W) Reserved; must be zero.
14376                                                                  Internal:
14377                                                                  Clock tune. */
14378         uint64_t ck_dlyout0            : 4;  /**< [  6:  3](R/W) Reserved; must be zero.
14379                                                                  Internal:
14380                                                                  Clock delay out. */
14381         uint64_t loopback              : 1;  /**< [  2:  2](R/W) Reserved; must be zero.
14382                                                                  Internal:
14383                                                                  external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
14384                                                                  bits
14385                                                                  are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
14386                                                                  LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
14387                                                                  can be loop-backed out through DQS1 of the same byte. For DQ, when
14388                                                                  LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
14389                                                                  flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
14390                                                                  loop-backed out after being flop'd by incoming DQS. */
14391         uint64_t loopback_pos          : 1;  /**< [  1:  1](R/W) Reserved; must be zero.
14392                                                                  Internal:
14393                                                                  Loopback pos mode. This works in conjunction with
14394                                                                  LMC()_PHY_CTL[LOOPBACK] mentioned above. */
14395         uint64_t ts_stagger            : 1;  /**< [  0:  0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
14396                                                                  avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
14397                                                                  mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
14398                                                                  impedance) at the first CK cycle, and change drivers to the designated drive strengths
14399                                                                  specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
14400                                                                  LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
14401 #else /* Word 0 - Little Endian */
14402         uint64_t ts_stagger            : 1;  /**< [  0:  0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
14403                                                                  avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
14404                                                                  mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
14405                                                                  impedance) at the first CK cycle, and change drivers to the designated drive strengths
14406                                                                  specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
14407                                                                  LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
14408         uint64_t loopback_pos          : 1;  /**< [  1:  1](R/W) Reserved; must be zero.
14409                                                                  Internal:
14410                                                                  Loopback pos mode. This works in conjunction with
14411                                                                  LMC()_PHY_CTL[LOOPBACK] mentioned above. */
14412         uint64_t loopback              : 1;  /**< [  2:  2](R/W) Reserved; must be zero.
14413                                                                  Internal:
14414                                                                  external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
14415                                                                  bits
14416                                                                  are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
14417                                                                  LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
14418                                                                  can be loop-backed out through DQS1 of the same byte. For DQ, when
14419                                                                  LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
14420                                                                  flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
14421                                                                  loop-backed out after being flop'd by incoming DQS. */
14422         uint64_t ck_dlyout0            : 4;  /**< [  6:  3](R/W) Reserved; must be zero.
14423                                                                  Internal:
14424                                                                  Clock delay out. */
14425         uint64_t ck_tune0              : 1;  /**< [  7:  7](R/W) Reserved; must be zero.
14426                                                                  Internal:
14427                                                                  Clock tune. */
14428         uint64_t ck_dlyout1            : 4;  /**< [ 11:  8](R/W) Reserved; must be zero.
14429                                                                  Internal:
14430                                                                  Clock delay out. */
14431         uint64_t ck_tune1              : 1;  /**< [ 12: 12](R/W) Reserved; must be zero.
14432                                                                  Internal:
14433                                                                  Clock tune. */
14434         uint64_t dqs1_loopback         : 1;  /**< [ 13: 13](R/W) Reserved; must be zero.
14435                                                                  Internal:
14436                                                                  The same as [LOOPBACK] except DQS1 is loopbacked through DQS0. */
14437         uint64_t rx_always_on          : 1;  /**< [ 14: 14](R/W) Reserved; must be zero.
14438                                                                  Internal:
14439                                                                  Set to force read_enable to PHY active all the time.
14440                                                                  This bit MUST not be set when LMC initialization is in progress. Internal VREF and
14441                                                                  Deskew training requires normal operation on the dqx/s read_enable signals. */
14442         uint64_t ten                   : 1;  /**< [ 15: 15](R/W) DDR PHY test enable pin. */
14443         uint64_t phy_pwr_save_disable  : 1;  /**< [ 16: 16](R/W) DDR PHY power save disable. */
14444         uint64_t phy_dsk_byp           : 1;  /**< [ 17: 17](R/W) PHY deskew bypass. */
14445         uint64_t phy_dsk_reset         : 1;  /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
14446                                                                  training sequence is in the idle state. */
14447         uint64_t int_phy_loopback_ena  : 1;  /**< [ 19: 19](R/W) Reserved.
14448                                                                  Internal:
14449                                                                  PHY loopback enable. */
14450         uint64_t int_pad_loopback_ena  : 1;  /**< [ 20: 20](R/W) Reserved.
14451                                                                  Internal:
14452                                                                  DDR pad loopback enable.  Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
14453                                                                  when loopback is enabled. */
14454         uint64_t dac_on                : 1;  /**< [ 21: 21](R/W) Reserved.
14455                                                                  Internal:
14456                                                                  PHY DAC on. */
14457         uint64_t ref_pin_on            : 1;  /**< [ 22: 22](R/W) Reserved.
14458                                                                  Internal:
14459                                                                  Voltage reference pin enabled. */
14460         uint64_t ddr_error_n_ena       : 1;  /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
14461         uint64_t dbi_mode_ena          : 1;  /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
14462         uint64_t dsk_dbg_bit_sel       : 4;  /**< [ 28: 25](R/W) Reserved.
14463                                                                  Internal:
14464                                                                  Deskew debug bit select for dsk read operation.
14465                                                                  0x0 = DQ0.
14466                                                                  0x1 = DQ1.
14467                                                                  0x2 = DQ2.
14468                                                                  0x3 = DQ3.
14469                                                                  0x4 = DAC.
14470                                                                  0x5 = DBI/DQS1.
14471                                                                  0x6 = DQ4.
14472                                                                  0x7 = DQ5.
14473                                                                  0x8 = DQ6.
14474                                                                  0x9 = DQ7. */
14475         uint64_t dsk_dbg_byte_sel      : 4;  /**< [ 32: 29](R/W) Reserved.
14476                                                                  Internal:
14477                                                                  Deskew debug byte select for read operation. Values 0-3 correspond to
14478                                                                  byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
14479         uint64_t dsk_dbg_num_bits_sel  : 1;  /**< [ 33: 33](R/W) Reserved.
14480                                                                  Internal:
14481                                                                  Deskew debug, select number of bits per byte lane.
14482                                                                  0 = 8 bits per byte lane, no DBI, no DAC debug.
14483                                                                  1 = 10 bits per byte lane, including DBI and DAC. CN83XX needs to be set to this value. */
14484         uint64_t dsk_dbg_offset        : 2;  /**< [ 35: 34](R/W) Reserved.
14485                                                                  Internal:
14486                                                                  Offset to change delay of deskew debug data return time to LMC from
14487                                                                  DDR PHY. */
14488         uint64_t dsk_dbg_clk_scaler    : 2;  /**< [ 37: 36](R/W) Reserved.
14489                                                                  Internal:
14490                                                                  Adjust clock toggle rate for reading deskew debug information:
14491                                                                  0x0 = Deskew read clock toggles every 4 DCLKs.
14492                                                                  0x1 = Deskew read clock toggles every 8 DCLKs.
14493                                                                  0x2 = Deskew read clock toggles every 12 DCLKs.
14494                                                                  0x3 = Deskew read clock toggles every 16 DCLKs. */
14495         uint64_t dsk_dbg_rd_start      : 1;  /**< [ 38: 38](WO/H) Reserved.
14496                                                                  Internal:
14497                                                                  Write one to start deskew data read operation, will automatically clear
14498                                                                  to zero. Write to one will also clear the complete bit. */
14499         uint64_t dsk_dbg_rd_data       : 10; /**< [ 48: 39](RO/H) Reserved.
14500                                                                  Internal:
14501                                                                  Data from a deskew read operation. Only valid when the
14502                                                                  LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
14503         uint64_t dsk_dbg_rd_complete   : 1;  /**< [ 49: 49](RO/H) Reserved.
14504                                                                  Internal:
14505                                                                  Indicates completion of a read operation, will clear to zero when a read
14506                                                                  operation is started, then set to one when operation is complete. */
14507         uint64_t phy_reset             : 1;  /**< [ 50: 50](WO) Reserved.
14508                                                                  Internal:
14509                                                                  Write to one to reset the PHY, one-shot operation, will automatically
14510                                                                  clear to value of zero. */
14511         uint64_t ten_sel               : 1;  /**< [ 51: 51](R/W) DDR PHY test enable select signal. When asserted, LMC drives C0/TEN pin with the value set
14512                                                                  in LMC()_PHY_CTL[TEN] as part of any commands being sent out. */
14513         uint64_t c0_cs2_switch         : 1;  /**< [ 52: 52](R/W) 0 = Routes C0/TEN data to the C0/TEN output pin, and
14514                                                                      routes CS2 data to the CS2 output pin.
14515 
14516                                                                  1 = Routes C0/TEN data to the CS2 output pin, and
14517                                                                      routes CS2 data to the C0/TEN output pin. */
14518         uint64_t c1_cs3_switch         : 1;  /**< [ 53: 53](R/W) 0 = Routes C1 data to the C1 output pin, and
14519                                                                      routes CS3 data to the CS3 output pin.
14520 
14521                                                                  1 = Routes C1 data to the CS3 output pin, and
14522                                                                      routes CS3 data to the C1 output pin. */
14523         uint64_t reserved_54           : 1;
14524         uint64_t dm_disable            : 1;  /**< [ 55: 55](R/W) Write to one to disable the DRAM data mask feature by having LMC driving a constant value
14525                                                                  on
14526                                                                  the DDR_DQS\<17:9\>_P pins of the chip during write operations. LMC drives a constant one.
14527                                                                  Note that setting this field high is NOT allowed when LMC has the write DBI feature turned
14528                                                                  on (LMC()_MODEREG_PARAMS3[WR_DBI]=1). */
14529         uint64_t dq_shallow_loopback   : 1;  /**< [ 56: 56](R/W) Reserved.
14530                                                                  Internal:
14531                                                                  DQ shallow loopback, working in conjunction with LOOPBACK assertion.
14532                                                                  When asserted, even DQ inputs can be loop-backed out through its adjacent odd DQ outputs
14533                                                                  without being flop'd by DQS. Need to make sure LMC()_PHY_CTL[PHY_DSK_BYP] is set and
14534                                                                  LMC()_PHY_CTL[INT_PHY_LOOPBACK_ENA] is unset. */
14535         uint64_t data_rate_loopback    : 1;  /**< [ 57: 57](R/W) Reserved.
14536                                                                  Internal:
14537                                                                  DQ data rate loopback, working in conjunction with LOOPBACK assertion.
14538                                                                  When asserted, incoming PRBS at even DQ can be set at data rate, and the data is loop
14539                                                                  backed out through odd DQ at the same rate.
14540                                                                  When de-asserted, LOOPBACK assertion is working along with [LOOPBACK_POS] to check on even
14541                                                                  DQ against each DQS edge seperately. This is done at the clock rate. */
14542         uint64_t dsk_dbg_wr_mode       : 1;  /**< [ 58: 58](R/W) Reserved.
14543                                                                  Internal:
14544                                                                  When set high along with [DSK_DBG_RD_START], LMC kicks off Deskew
14545                                                                  Overwrite sequence to shift out a 10-bits setting for a single DQ.
14546                                                                  Note that there are a total of 9 bytes and the chain structure are split into two
14547                                                                  halves such that the top chain covers byte 7,6,5,4 and bottom chain cover byte
14548                                                                  ECC,3,2,1,0.
14549                                                                  Each byte has 10 DQs (DQ7,DQ6,DQ5,DQ4,DBI,DAC,DQ3,DQ2,DQ1,DQ0) and that each
14550                                                                  DQ has 10-bits deskew setting. */
14551         uint64_t dsk_dbg_overwrt_ena   : 1;  /**< [ 59: 59](R/W) Reserved.
14552                                                                  Internal:
14553                                                                  When set high, PHY selects all of the preloaded data
14554                                                                  when configuring the read deskew settings. */
14555         uint64_t dsk_dbg_load_dis      : 1;  /**< [ 60: 60](R/W) Reserved.
14556                                                                  Internal:
14557                                                                  When set, LMC prevents PHY from loading the deskew shift
14558                                                                  registers with its internal settings. When Read Deskew sequence is kicked off
14559                                                                  by setting [DSK_DBG_RD_START] = 1 and [DSK_DBG_WR_MODE] = 0, this field determines
14560                                                                  whether or not to load the shift register with PHY's internal settings before
14561                                                                  the shifting process. */
14562         uint64_t phy_dsk_lock_en       : 1;  /**< [ 61: 61](R/W) When set, the PHY attempts to lock all DQ/DBI bit deskew settings once alignment is
14563                                                                  achieved.
14564 
14565                                                                  When clear, LMC disengages the PHY bit deskew lock control mechanism. This
14566                                                                  causes the PHY to continuously perform and/or adjust the read deskew training on
14567                                                                  all DQ/DBI bits during any read operations. */
14568         uint64_t double_vref_training  : 1;  /**< [ 62: 62](R/W) Vref longer training.
14569                                                                  0 = Normal training period.
14570                                                                  1 = Double training period. */
14571         uint64_t rx_vref_sel           : 1;  /**< [ 63: 63](R/W) Run Vref training twice, sampling from DQ3 and DQ4 sequentially, then use the
14572                                                                  average of two as the final Vref training result. */
14573 #endif /* Word 0 - End */
14574     } cn9;
14575     struct bdk_lmcx_phy_ctl_cn81xx
14576     {
14577 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
14578         uint64_t reserved_62_63        : 2;
14579         uint64_t phy_dsk_lock_en       : 1;  /**< [ 61: 61](RO) Reserved. */
14580         uint64_t dsk_dbg_load_dis      : 1;  /**< [ 60: 60](R/W) Reserved.
14581                                                                  Internal:
14582                                                                  When set, LMC prevents PHY from loading the deskew shift
14583                                                                  registers with its internal settings. When Read Deskew sequence is kicked off
14584                                                                  by setting [DSK_DBG_RD_START] = 1 and [DSK_DBG_WR_MODE] = 0, this field determines
14585                                                                  whether or not to load the shift register with PHY's internal settings before
14586                                                                  the shifting process. */
14587         uint64_t dsk_dbg_overwrt_ena   : 1;  /**< [ 59: 59](R/W) Reserved.
14588                                                                  Internal:
14589                                                                  When set high, PHY selects all of the preloaded data
14590                                                                  when configuring the read deskew settings. */
14591         uint64_t dsk_dbg_wr_mode       : 1;  /**< [ 58: 58](R/W) Reserved.
14592                                                                  Internal:
14593                                                                  When set high along with [DSK_DBG_RD_START], LMC kicks off Deskew
14594                                                                  Overwrite sequence to shift out a 10-bits setting for a single DQ.
14595                                                                  Note that there are a total of 9 bytes and the chain structure are split into two
14596                                                                  halves such that the top chain covers byte 7,6,5,4 and bottom chain cover byte
14597                                                                  ECC,3,2,1,0.
14598                                                                  Each byte has 10 DQs (DQ7,DQ6,DQ5,DQ4,DBI,DAC,DQ3,DQ2,DQ1,DQ0) and that each
14599                                                                  DQ has 10-bits deskew setting. */
14600         uint64_t data_rate_loopback    : 1;  /**< [ 57: 57](R/W) Reserved.
14601                                                                  Internal:
14602                                                                  DQ data rate loopback, working in conjunction with LOOPBACK assertion.
14603                                                                  When asserted, incoming PRBS at even DQ can be set at data rate, and the data is loop
14604                                                                  backed out through odd DQ at the same rate.
14605                                                                  When de-asserted, LOOPBACK assertion is working along with [LOOPBACK_POS] to check on even
14606                                                                  DQ against each DQS edge seperately. This is done at the clock rate. */
14607         uint64_t dq_shallow_loopback   : 1;  /**< [ 56: 56](R/W) Reserved.
14608                                                                  Internal:
14609                                                                  DQ shallow loopback, working in conjunction with LOOPBACK assertion.
14610                                                                  When asserted, even DQ inputs can be loop-backed out through its adjacent odd DQ outputs
14611                                                                  without being flop'd by DQS. Need to make sure LMC()_PHY_CTL[PHY_DSK_BYP] is set and
14612                                                                  LMC()_PHY_CTL[INT_PHY_LOOPBACK_ENA] is unset. */
14613         uint64_t dm_disable            : 1;  /**< [ 55: 55](R/W) Write to 1 to disable the DRAM data mask feature by having LMC driving a constant value on
14614                                                                  the
14615                                                                  DDRX_DQS\<17:9\>_P pins of the chip during write operations. LMC drives a constant 0 in DDR3
14616                                                                  and drives a constant 1 in DDR4.
14617                                                                  Note that setting this field high is NOT allowed when LMC has the write DBI feature turned
14618                                                                  on
14619                                                                  (MODEREG_PARAMS3[WR_DBI]=1). */
14620         uint64_t c1_sel                : 2;  /**< [ 54: 53](R/W) Reserved.
14621                                                                  Internal:
14622                                                                  0x0 = C1 is not routed to any output pin.
14623                                                                  0x1 = C1 is routed to CS3.
14624                                                                  0x2 = C1 is routed to A17 address pin.
14625                                                                  0x3 = C1 is not routed to any output pin.
14626 
14627                                                                  Set to 0x0 if not interfacing with 3DS DRAM. */
14628         uint64_t c0_sel                : 2;  /**< [ 52: 51](R/W) Reserved.
14629                                                                  Internal:
14630                                                                  0x0 = C0 is not routed to any output pin.
14631                                                                  0x1 = C0 is routed to CS2.
14632                                                                  0x2 = C0 is routed to TEN output pin.
14633                                                                  0x3 = C0 is not routed to any output pin.
14634 
14635                                                                  Set to 0x0 if not interfacing with 3DS DRAM. */
14636         uint64_t phy_reset             : 1;  /**< [ 50: 50](WO) Reserved.
14637                                                                  Internal:
14638                                                                  Write to one to reset the PHY, one-shot operation, will automatically
14639                                                                  clear to value of zero. */
14640         uint64_t dsk_dbg_rd_complete   : 1;  /**< [ 49: 49](RO/H) Reserved.
14641                                                                  Internal:
14642                                                                  Indicates completion of a read operation, will clear to zero when a read
14643                                                                  operation is started, then set to one when operation is complete. */
14644         uint64_t dsk_dbg_rd_data       : 10; /**< [ 48: 39](RO/H) Reserved.
14645                                                                  Internal:
14646                                                                  Data from a deskew read operation. Only valid when the
14647                                                                  LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
14648         uint64_t dsk_dbg_rd_start      : 1;  /**< [ 38: 38](WO/H) Reserved.
14649                                                                  Internal:
14650                                                                  Write one to start deskew data read operation, will automatically clear
14651                                                                  to zero. Write to one will also clear the complete bit. */
14652         uint64_t dsk_dbg_clk_scaler    : 2;  /**< [ 37: 36](R/W) Reserved.
14653                                                                  Internal:
14654                                                                  Adjust clock toggle rate for reading deskew debug information:
14655                                                                  0x0 = Deskew read clock toggles every 4 DCLKs.
14656                                                                  0x1 = Deskew read clock toggles every 8 DCLKs.
14657                                                                  0x2 = Deskew read clock toggles every 12 DCLKs.
14658                                                                  0x3 = Deskew read clock toggles every 16 DCLKs. */
14659         uint64_t dsk_dbg_offset        : 2;  /**< [ 35: 34](R/W) Reserved.
14660                                                                  Internal:
14661                                                                  Offset to change delay of deskew debug data return time to LMC from
14662                                                                  DDR PHY. */
14663         uint64_t dsk_dbg_num_bits_sel  : 1;  /**< [ 33: 33](R/W) Reserved.
14664                                                                  Internal:
14665                                                                  Deskew debug, select number of bits per byte lane.
14666                                                                  0 = 8 bits per byte lane, no DBI, no DAC debug.
14667                                                                  1 = 10 bits per byte lane, including DBI and DAC. CN81XX needs to be set to this value. */
14668         uint64_t dsk_dbg_byte_sel      : 4;  /**< [ 32: 29](R/W) Reserved.
14669                                                                  Internal:
14670                                                                  Deskew debug byte select for read operation. Values 0-3 correspond to
14671                                                                  byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
14672         uint64_t dsk_dbg_bit_sel       : 4;  /**< [ 28: 25](R/W) Reserved.
14673                                                                  Internal:
14674                                                                  Deskew debug bit select for dsk read operation.
14675                                                                  0x0 = DQ0.
14676                                                                  0x1 = DQ1.
14677                                                                  0x2 = DQ2.
14678                                                                  0x3 = DQ3.
14679                                                                  0x4 = DAC.
14680                                                                  0x5 = DBI.
14681                                                                  0x6 = DQ4.
14682                                                                  0x7 = DQ5.
14683                                                                  0x8 = DQ6.
14684                                                                  0x9 = DQ7. */
14685         uint64_t dbi_mode_ena          : 1;  /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
14686         uint64_t ddr_error_n_ena       : 1;  /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
14687         uint64_t ref_pin_on            : 1;  /**< [ 22: 22](R/W) Reserved.
14688                                                                  Internal:
14689                                                                  Voltage reference pin enabled. */
14690         uint64_t dac_on                : 1;  /**< [ 21: 21](R/W) Reserved.
14691                                                                  Internal:
14692                                                                  PHY DAC on. */
14693         uint64_t int_pad_loopback_ena  : 1;  /**< [ 20: 20](R/W) Reserved.
14694                                                                  Internal:
14695                                                                  DDR pad loopback enable.  Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
14696                                                                  when loopback is enabled. */
14697         uint64_t int_phy_loopback_ena  : 1;  /**< [ 19: 19](R/W) Reserved.
14698                                                                  Internal:
14699                                                                  PHY loopback enable. */
14700         uint64_t phy_dsk_reset         : 1;  /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
14701                                                                  training sequence is in the idle state. */
14702         uint64_t phy_dsk_byp           : 1;  /**< [ 17: 17](R/W) PHY deskew bypass. */
14703         uint64_t phy_pwr_save_disable  : 1;  /**< [ 16: 16](R/W) DDR PHY power save disable. */
14704         uint64_t ten                   : 1;  /**< [ 15: 15](R/W) DDR PHY test enable pin. */
14705         uint64_t rx_always_on          : 1;  /**< [ 14: 14](R/W) Reserved; must be zero.
14706                                                                  Internal:
14707                                                                  Set to force read_enable to PHY active all the time.
14708                                                                  This bit MUST not be set when LMC initialization is in progress. Internal VREF and
14709                                                                  Deskew training requires normal operation on the dqx/s read_enable signals. */
14710         uint64_t lv_mode               : 1;  /**< [ 13: 13](R/W) Reserved; must be zero.
14711                                                                  Internal:
14712                                                                  Low Voltage Mode (1.35V.) */
14713         uint64_t ck_tune1              : 1;  /**< [ 12: 12](R/W) Reserved; must be zero.
14714                                                                  Internal:
14715                                                                  Clock tune. */
14716         uint64_t ck_dlyout1            : 4;  /**< [ 11:  8](R/W) Reserved; must be zero.
14717                                                                  Internal:
14718                                                                  Clock delay out. */
14719         uint64_t ck_tune0              : 1;  /**< [  7:  7](R/W) Reserved; must be zero.
14720                                                                  Internal:
14721                                                                  Clock tune. */
14722         uint64_t ck_dlyout0            : 4;  /**< [  6:  3](R/W) Reserved; must be zero.
14723                                                                  Internal:
14724                                                                  Clock delay out. */
14725         uint64_t loopback              : 1;  /**< [  2:  2](R/W) Reserved; must be zero.
14726                                                                  Internal:
14727                                                                  external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
14728                                                                  bits
14729                                                                  are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
14730                                                                  LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
14731                                                                  can be loop-backed out through DQS1 of the same byte. For DQ, when
14732                                                                  LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
14733                                                                  flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
14734                                                                  loop-backed out after being flop'd by incoming DQS. */
14735         uint64_t loopback_pos          : 1;  /**< [  1:  1](R/W) Reserved; must be zero.
14736                                                                  Internal:
14737                                                                  Loopback pos mode. This works in conjunction with
14738                                                                  LMC()_PHY_CTL[LOOPBACK] mentioned above. */
14739         uint64_t ts_stagger            : 1;  /**< [  0:  0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
14740                                                                  avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
14741                                                                  mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
14742                                                                  impedance) at the first CK cycle, and change drivers to the designated drive strengths
14743                                                                  specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
14744                                                                  LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
14745 #else /* Word 0 - Little Endian */
14746         uint64_t ts_stagger            : 1;  /**< [  0:  0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
14747                                                                  avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
14748                                                                  mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
14749                                                                  impedance) at the first CK cycle, and change drivers to the designated drive strengths
14750                                                                  specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
14751                                                                  LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
14752         uint64_t loopback_pos          : 1;  /**< [  1:  1](R/W) Reserved; must be zero.
14753                                                                  Internal:
14754                                                                  Loopback pos mode. This works in conjunction with
14755                                                                  LMC()_PHY_CTL[LOOPBACK] mentioned above. */
14756         uint64_t loopback              : 1;  /**< [  2:  2](R/W) Reserved; must be zero.
14757                                                                  Internal:
14758                                                                  external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
14759                                                                  bits
14760                                                                  are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
14761                                                                  LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
14762                                                                  can be loop-backed out through DQS1 of the same byte. For DQ, when
14763                                                                  LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
14764                                                                  flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
14765                                                                  loop-backed out after being flop'd by incoming DQS. */
14766         uint64_t ck_dlyout0            : 4;  /**< [  6:  3](R/W) Reserved; must be zero.
14767                                                                  Internal:
14768                                                                  Clock delay out. */
14769         uint64_t ck_tune0              : 1;  /**< [  7:  7](R/W) Reserved; must be zero.
14770                                                                  Internal:
14771                                                                  Clock tune. */
14772         uint64_t ck_dlyout1            : 4;  /**< [ 11:  8](R/W) Reserved; must be zero.
14773                                                                  Internal:
14774                                                                  Clock delay out. */
14775         uint64_t ck_tune1              : 1;  /**< [ 12: 12](R/W) Reserved; must be zero.
14776                                                                  Internal:
14777                                                                  Clock tune. */
14778         uint64_t lv_mode               : 1;  /**< [ 13: 13](R/W) Reserved; must be zero.
14779                                                                  Internal:
14780                                                                  Low Voltage Mode (1.35V.) */
14781         uint64_t rx_always_on          : 1;  /**< [ 14: 14](R/W) Reserved; must be zero.
14782                                                                  Internal:
14783                                                                  Set to force read_enable to PHY active all the time.
14784                                                                  This bit MUST not be set when LMC initialization is in progress. Internal VREF and
14785                                                                  Deskew training requires normal operation on the dqx/s read_enable signals. */
14786         uint64_t ten                   : 1;  /**< [ 15: 15](R/W) DDR PHY test enable pin. */
14787         uint64_t phy_pwr_save_disable  : 1;  /**< [ 16: 16](R/W) DDR PHY power save disable. */
14788         uint64_t phy_dsk_byp           : 1;  /**< [ 17: 17](R/W) PHY deskew bypass. */
14789         uint64_t phy_dsk_reset         : 1;  /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
14790                                                                  training sequence is in the idle state. */
14791         uint64_t int_phy_loopback_ena  : 1;  /**< [ 19: 19](R/W) Reserved.
14792                                                                  Internal:
14793                                                                  PHY loopback enable. */
14794         uint64_t int_pad_loopback_ena  : 1;  /**< [ 20: 20](R/W) Reserved.
14795                                                                  Internal:
14796                                                                  DDR pad loopback enable.  Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
14797                                                                  when loopback is enabled. */
14798         uint64_t dac_on                : 1;  /**< [ 21: 21](R/W) Reserved.
14799                                                                  Internal:
14800                                                                  PHY DAC on. */
14801         uint64_t ref_pin_on            : 1;  /**< [ 22: 22](R/W) Reserved.
14802                                                                  Internal:
14803                                                                  Voltage reference pin enabled. */
14804         uint64_t ddr_error_n_ena       : 1;  /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
14805         uint64_t dbi_mode_ena          : 1;  /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
14806         uint64_t dsk_dbg_bit_sel       : 4;  /**< [ 28: 25](R/W) Reserved.
14807                                                                  Internal:
14808                                                                  Deskew debug bit select for dsk read operation.
14809                                                                  0x0 = DQ0.
14810                                                                  0x1 = DQ1.
14811                                                                  0x2 = DQ2.
14812                                                                  0x3 = DQ3.
14813                                                                  0x4 = DAC.
14814                                                                  0x5 = DBI.
14815                                                                  0x6 = DQ4.
14816                                                                  0x7 = DQ5.
14817                                                                  0x8 = DQ6.
14818                                                                  0x9 = DQ7. */
14819         uint64_t dsk_dbg_byte_sel      : 4;  /**< [ 32: 29](R/W) Reserved.
14820                                                                  Internal:
14821                                                                  Deskew debug byte select for read operation. Values 0-3 correspond to
14822                                                                  byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
14823         uint64_t dsk_dbg_num_bits_sel  : 1;  /**< [ 33: 33](R/W) Reserved.
14824                                                                  Internal:
14825                                                                  Deskew debug, select number of bits per byte lane.
14826                                                                  0 = 8 bits per byte lane, no DBI, no DAC debug.
14827                                                                  1 = 10 bits per byte lane, including DBI and DAC. CN81XX needs to be set to this value. */
14828         uint64_t dsk_dbg_offset        : 2;  /**< [ 35: 34](R/W) Reserved.
14829                                                                  Internal:
14830                                                                  Offset to change delay of deskew debug data return time to LMC from
14831                                                                  DDR PHY. */
14832         uint64_t dsk_dbg_clk_scaler    : 2;  /**< [ 37: 36](R/W) Reserved.
14833                                                                  Internal:
14834                                                                  Adjust clock toggle rate for reading deskew debug information:
14835                                                                  0x0 = Deskew read clock toggles every 4 DCLKs.
14836                                                                  0x1 = Deskew read clock toggles every 8 DCLKs.
14837                                                                  0x2 = Deskew read clock toggles every 12 DCLKs.
14838                                                                  0x3 = Deskew read clock toggles every 16 DCLKs. */
14839         uint64_t dsk_dbg_rd_start      : 1;  /**< [ 38: 38](WO/H) Reserved.
14840                                                                  Internal:
14841                                                                  Write one to start deskew data read operation, will automatically clear
14842                                                                  to zero. Write to one will also clear the complete bit. */
14843         uint64_t dsk_dbg_rd_data       : 10; /**< [ 48: 39](RO/H) Reserved.
14844                                                                  Internal:
14845                                                                  Data from a deskew read operation. Only valid when the
14846                                                                  LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
14847         uint64_t dsk_dbg_rd_complete   : 1;  /**< [ 49: 49](RO/H) Reserved.
14848                                                                  Internal:
14849                                                                  Indicates completion of a read operation, will clear to zero when a read
14850                                                                  operation is started, then set to one when operation is complete. */
14851         uint64_t phy_reset             : 1;  /**< [ 50: 50](WO) Reserved.
14852                                                                  Internal:
14853                                                                  Write to one to reset the PHY, one-shot operation, will automatically
14854                                                                  clear to value of zero. */
14855         uint64_t c0_sel                : 2;  /**< [ 52: 51](R/W) Reserved.
14856                                                                  Internal:
14857                                                                  0x0 = C0 is not routed to any output pin.
14858                                                                  0x1 = C0 is routed to CS2.
14859                                                                  0x2 = C0 is routed to TEN output pin.
14860                                                                  0x3 = C0 is not routed to any output pin.
14861 
14862                                                                  Set to 0x0 if not interfacing with 3DS DRAM. */
14863         uint64_t c1_sel                : 2;  /**< [ 54: 53](R/W) Reserved.
14864                                                                  Internal:
14865                                                                  0x0 = C1 is not routed to any output pin.
14866                                                                  0x1 = C1 is routed to CS3.
14867                                                                  0x2 = C1 is routed to A17 address pin.
14868                                                                  0x3 = C1 is not routed to any output pin.
14869 
14870                                                                  Set to 0x0 if not interfacing with 3DS DRAM. */
14871         uint64_t dm_disable            : 1;  /**< [ 55: 55](R/W) Write to 1 to disable the DRAM data mask feature by having LMC driving a constant value on
14872                                                                  the
14873                                                                  DDRX_DQS\<17:9\>_P pins of the chip during write operations. LMC drives a constant 0 in DDR3
14874                                                                  and drives a constant 1 in DDR4.
14875                                                                  Note that setting this field high is NOT allowed when LMC has the write DBI feature turned
14876                                                                  on
14877                                                                  (MODEREG_PARAMS3[WR_DBI]=1). */
14878         uint64_t dq_shallow_loopback   : 1;  /**< [ 56: 56](R/W) Reserved.
14879                                                                  Internal:
14880                                                                  DQ shallow loopback, working in conjunction with LOOPBACK assertion.
14881                                                                  When asserted, even DQ inputs can be loop-backed out through its adjacent odd DQ outputs
14882                                                                  without being flop'd by DQS. Need to make sure LMC()_PHY_CTL[PHY_DSK_BYP] is set and
14883                                                                  LMC()_PHY_CTL[INT_PHY_LOOPBACK_ENA] is unset. */
14884         uint64_t data_rate_loopback    : 1;  /**< [ 57: 57](R/W) Reserved.
14885                                                                  Internal:
14886                                                                  DQ data rate loopback, working in conjunction with LOOPBACK assertion.
14887                                                                  When asserted, incoming PRBS at even DQ can be set at data rate, and the data is loop
14888                                                                  backed out through odd DQ at the same rate.
14889                                                                  When de-asserted, LOOPBACK assertion is working along with [LOOPBACK_POS] to check on even
14890                                                                  DQ against each DQS edge seperately. This is done at the clock rate. */
14891         uint64_t dsk_dbg_wr_mode       : 1;  /**< [ 58: 58](R/W) Reserved.
14892                                                                  Internal:
14893                                                                  When set high along with [DSK_DBG_RD_START], LMC kicks off Deskew
14894                                                                  Overwrite sequence to shift out a 10-bits setting for a single DQ.
14895                                                                  Note that there are a total of 9 bytes and the chain structure are split into two
14896                                                                  halves such that the top chain covers byte 7,6,5,4 and bottom chain cover byte
14897                                                                  ECC,3,2,1,0.
14898                                                                  Each byte has 10 DQs (DQ7,DQ6,DQ5,DQ4,DBI,DAC,DQ3,DQ2,DQ1,DQ0) and that each
14899                                                                  DQ has 10-bits deskew setting. */
14900         uint64_t dsk_dbg_overwrt_ena   : 1;  /**< [ 59: 59](R/W) Reserved.
14901                                                                  Internal:
14902                                                                  When set high, PHY selects all of the preloaded data
14903                                                                  when configuring the read deskew settings. */
14904         uint64_t dsk_dbg_load_dis      : 1;  /**< [ 60: 60](R/W) Reserved.
14905                                                                  Internal:
14906                                                                  When set, LMC prevents PHY from loading the deskew shift
14907                                                                  registers with its internal settings. When Read Deskew sequence is kicked off
14908                                                                  by setting [DSK_DBG_RD_START] = 1 and [DSK_DBG_WR_MODE] = 0, this field determines
14909                                                                  whether or not to load the shift register with PHY's internal settings before
14910                                                                  the shifting process. */
14911         uint64_t phy_dsk_lock_en       : 1;  /**< [ 61: 61](RO) Reserved. */
14912         uint64_t reserved_62_63        : 2;
14913 #endif /* Word 0 - End */
14914     } cn81xx;
14915     struct bdk_lmcx_phy_ctl_cn83xx
14916     {
14917 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
14918         uint64_t reserved_62_63        : 2;
14919         uint64_t phy_dsk_lock_en       : 1;  /**< [ 61: 61](R/W) When set, the PHY attempts to lock all DQ/DBI bit deskew settings once alignment is
14920                                                                  achieved.
14921 
14922                                                                  When clear, LMC disengages the PHY bit deskew lock control mechanism. This
14923                                                                  causes the PHY to continuously perform and/or adjust the read deskew training on
14924                                                                  all DQ/DBI bits during any read operations. */
14925         uint64_t dsk_dbg_load_dis      : 1;  /**< [ 60: 60](R/W) Reserved.
14926                                                                  Internal:
14927                                                                  When set, LMC prevents PHY from loading the deskew shift
14928                                                                  registers with its internal settings. When Read Deskew sequence is kicked off
14929                                                                  by setting [DSK_DBG_RD_START] = 1 and [DSK_DBG_WR_MODE] = 0, this field determines
14930                                                                  whether or not to load the shift register with PHY's internal settings before
14931                                                                  the shifting process. */
14932         uint64_t dsk_dbg_overwrt_ena   : 1;  /**< [ 59: 59](R/W) Reserved.
14933                                                                  Internal:
14934                                                                  When set high, PHY selects all of the preloaded data
14935                                                                  when configuring the read deskew settings. */
14936         uint64_t dsk_dbg_wr_mode       : 1;  /**< [ 58: 58](R/W) Reserved.
14937                                                                  Internal:
14938                                                                  When set high along with [DSK_DBG_RD_START], LMC kicks off Deskew
14939                                                                  Overwrite sequence to shift out a 10-bits setting for a single DQ.
14940                                                                  Note that there are a total of 9 bytes and the chain structure are split into two
14941                                                                  halves such that the top chain covers byte 7,6,5,4 and bottom chain cover byte
14942                                                                  ECC,3,2,1,0.
14943                                                                  Each byte has 10 DQs (DQ7,DQ6,DQ5,DQ4,DBI,DAC,DQ3,DQ2,DQ1,DQ0) and that each
14944                                                                  DQ has 10-bits deskew setting. */
14945         uint64_t data_rate_loopback    : 1;  /**< [ 57: 57](R/W) Reserved.
14946                                                                  Internal:
14947                                                                  DQ data rate loopback, working in conjunction with LOOPBACK assertion.
14948                                                                  When asserted, incoming PRBS at even DQ can be set at data rate, and the data is loop
14949                                                                  backed out through odd DQ at the same rate.
14950                                                                  When de-asserted, LOOPBACK assertion is working along with [LOOPBACK_POS] to check on even
14951                                                                  DQ against each DQS edge seperately. This is done at the clock rate. */
14952         uint64_t dq_shallow_loopback   : 1;  /**< [ 56: 56](R/W) Reserved.
14953                                                                  Internal:
14954                                                                  DQ shallow loopback, working in conjunction with LOOPBACK assertion.
14955                                                                  When asserted, even DQ inputs can be loop-backed out through its adjacent odd DQ outputs
14956                                                                  without being flop'd by DQS. Need to make sure LMC()_PHY_CTL[PHY_DSK_BYP] is set and
14957                                                                  LMC()_PHY_CTL[INT_PHY_LOOPBACK_ENA] is unset. */
14958         uint64_t dm_disable            : 1;  /**< [ 55: 55](R/W) Write to 1 to disable the DRAM data mask feature by having LMC driving a constant value on
14959                                                                  the
14960                                                                  DDRX_DQS\<17:9\>_P pins of the chip during write operations. LMC drives a constant 0 in DDR3
14961                                                                  and drives a constant 1 in DDR4.
14962                                                                  Note that setting this field high is NOT allowed when LMC has the write DBI feature turned
14963                                                                  on
14964                                                                  (MODEREG_PARAMS3[WR_DBI]=1). */
14965         uint64_t c1_sel                : 2;  /**< [ 54: 53](R/W) Reserved.
14966                                                                  Internal:
14967                                                                  0x0 = C1 is not routed to any output pin.
14968                                                                  0x1 = C1 is routed to CS3.
14969                                                                  0x2 = C1 is routed to A17 address pin.
14970                                                                  0x3 = C1 is not routed to any output pin.
14971 
14972                                                                  Set to 0x0 if not interfacing with 3DS DRAM. */
14973         uint64_t c0_sel                : 2;  /**< [ 52: 51](R/W) Reserved.
14974                                                                  Internal:
14975                                                                  0x0 = C0 is not routed to any output pin.
14976                                                                  0x1 = C0 is routed to CS2.
14977                                                                  0x2 = C0 is routed to TEN output pin.
14978                                                                  0x3 = C0 is not routed to any output pin.
14979 
14980                                                                  Set to 0x0 if not interfacing with 3DS DRAM. */
14981         uint64_t phy_reset             : 1;  /**< [ 50: 50](WO) Reserved.
14982                                                                  Internal:
14983                                                                  Write to one to reset the PHY, one-shot operation, will automatically
14984                                                                  clear to value of zero. */
14985         uint64_t dsk_dbg_rd_complete   : 1;  /**< [ 49: 49](RO/H) Reserved.
14986                                                                  Internal:
14987                                                                  Indicates completion of a read operation, will clear to zero when a read
14988                                                                  operation is started, then set to one when operation is complete. */
14989         uint64_t dsk_dbg_rd_data       : 10; /**< [ 48: 39](RO/H) Reserved.
14990                                                                  Internal:
14991                                                                  Data from a deskew read operation. Only valid when the
14992                                                                  LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
14993         uint64_t dsk_dbg_rd_start      : 1;  /**< [ 38: 38](WO/H) Reserved.
14994                                                                  Internal:
14995                                                                  Write one to start deskew data read operation, will automatically clear
14996                                                                  to zero. Write to one will also clear the complete bit. */
14997         uint64_t dsk_dbg_clk_scaler    : 2;  /**< [ 37: 36](R/W) Reserved.
14998                                                                  Internal:
14999                                                                  Adjust clock toggle rate for reading deskew debug information:
15000                                                                  0x0 = Deskew read clock toggles every 4 DCLKs.
15001                                                                  0x1 = Deskew read clock toggles every 8 DCLKs.
15002                                                                  0x2 = Deskew read clock toggles every 12 DCLKs.
15003                                                                  0x3 = Deskew read clock toggles every 16 DCLKs. */
15004         uint64_t dsk_dbg_offset        : 2;  /**< [ 35: 34](R/W) Reserved.
15005                                                                  Internal:
15006                                                                  Offset to change delay of deskew debug data return time to LMC from
15007                                                                  DDR PHY. */
15008         uint64_t dsk_dbg_num_bits_sel  : 1;  /**< [ 33: 33](R/W) Reserved.
15009                                                                  Internal:
15010                                                                  Deskew debug, select number of bits per byte lane.
15011                                                                  0 = 8 bits per byte lane, no DBI, no DAC debug.
15012                                                                  1 = 10 bits per byte lane, including DBI and DAC. CN83XX needs to be set to this value. */
15013         uint64_t dsk_dbg_byte_sel      : 4;  /**< [ 32: 29](R/W) Reserved.
15014                                                                  Internal:
15015                                                                  Deskew debug byte select for read operation. Values 0-3 correspond to
15016                                                                  byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
15017         uint64_t dsk_dbg_bit_sel       : 4;  /**< [ 28: 25](R/W) Reserved.
15018                                                                  Internal:
15019                                                                  Deskew debug bit select for dsk read operation.
15020                                                                  0x0 = DQ0.
15021                                                                  0x1 = DQ1.
15022                                                                  0x2 = DQ2.
15023                                                                  0x3 = DQ3.
15024                                                                  0x4 = DAC.
15025                                                                  0x5 = DBI.
15026                                                                  0x6 = DQ4.
15027                                                                  0x7 = DQ5.
15028                                                                  0x8 = DQ6.
15029                                                                  0x9 = DQ7. */
15030         uint64_t dbi_mode_ena          : 1;  /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
15031         uint64_t ddr_error_n_ena       : 1;  /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
15032         uint64_t ref_pin_on            : 1;  /**< [ 22: 22](R/W) Reserved.
15033                                                                  Internal:
15034                                                                  Voltage reference pin enabled. */
15035         uint64_t dac_on                : 1;  /**< [ 21: 21](R/W) Reserved.
15036                                                                  Internal:
15037                                                                  PHY DAC on. */
15038         uint64_t int_pad_loopback_ena  : 1;  /**< [ 20: 20](R/W) Reserved.
15039                                                                  Internal:
15040                                                                  DDR pad loopback enable.  Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
15041                                                                  when loopback is enabled. */
15042         uint64_t int_phy_loopback_ena  : 1;  /**< [ 19: 19](R/W) Reserved.
15043                                                                  Internal:
15044                                                                  PHY loopback enable. */
15045         uint64_t phy_dsk_reset         : 1;  /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
15046                                                                  training sequence is in the idle state. */
15047         uint64_t phy_dsk_byp           : 1;  /**< [ 17: 17](R/W) PHY deskew bypass. */
15048         uint64_t phy_pwr_save_disable  : 1;  /**< [ 16: 16](R/W) DDR PHY power save disable. */
15049         uint64_t ten                   : 1;  /**< [ 15: 15](R/W) DDR PHY test enable pin. */
15050         uint64_t rx_always_on          : 1;  /**< [ 14: 14](R/W) Reserved; must be zero.
15051                                                                  Internal:
15052                                                                  Set to force read_enable to PHY active all the time.
15053                                                                  This bit MUST not be set when LMC initialization is in progress. Internal VREF and
15054                                                                  Deskew training requires normal operation on the dqx/s read_enable signals. */
15055         uint64_t lv_mode               : 1;  /**< [ 13: 13](R/W) Reserved; must be zero.
15056                                                                  Internal:
15057                                                                  Low Voltage Mode (1.35V.) */
15058         uint64_t ck_tune1              : 1;  /**< [ 12: 12](R/W) Reserved; must be zero.
15059                                                                  Internal:
15060                                                                  Clock tune. */
15061         uint64_t ck_dlyout1            : 4;  /**< [ 11:  8](R/W) Reserved; must be zero.
15062                                                                  Internal:
15063                                                                  Clock delay out. */
15064         uint64_t ck_tune0              : 1;  /**< [  7:  7](R/W) Reserved; must be zero.
15065                                                                  Internal:
15066                                                                  Clock tune. */
15067         uint64_t ck_dlyout0            : 4;  /**< [  6:  3](R/W) Reserved; must be zero.
15068                                                                  Internal:
15069                                                                  Clock delay out. */
15070         uint64_t loopback              : 1;  /**< [  2:  2](R/W) Reserved; must be zero.
15071                                                                  Internal:
15072                                                                  external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
15073                                                                  bits
15074                                                                  are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
15075                                                                  LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
15076                                                                  can be loop-backed out through DQS1 of the same byte. For DQ, when
15077                                                                  LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
15078                                                                  flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
15079                                                                  loop-backed out after being flop'd by incoming DQS. */
15080         uint64_t loopback_pos          : 1;  /**< [  1:  1](R/W) Reserved; must be zero.
15081                                                                  Internal:
15082                                                                  Loopback pos mode. This works in conjunction with
15083                                                                  LMC()_PHY_CTL[LOOPBACK] mentioned above. */
15084         uint64_t ts_stagger            : 1;  /**< [  0:  0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
15085                                                                  avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
15086                                                                  mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
15087                                                                  impedance) at the first CK cycle, and change drivers to the designated drive strengths
15088                                                                  specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
15089                                                                  LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
15090 #else /* Word 0 - Little Endian */
15091         uint64_t ts_stagger            : 1;  /**< [  0:  0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
15092                                                                  avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
15093                                                                  mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
15094                                                                  impedance) at the first CK cycle, and change drivers to the designated drive strengths
15095                                                                  specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
15096                                                                  LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
15097         uint64_t loopback_pos          : 1;  /**< [  1:  1](R/W) Reserved; must be zero.
15098                                                                  Internal:
15099                                                                  Loopback pos mode. This works in conjunction with
15100                                                                  LMC()_PHY_CTL[LOOPBACK] mentioned above. */
15101         uint64_t loopback              : 1;  /**< [  2:  2](R/W) Reserved; must be zero.
15102                                                                  Internal:
15103                                                                  external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
15104                                                                  bits
15105                                                                  are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
15106                                                                  LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
15107                                                                  can be loop-backed out through DQS1 of the same byte. For DQ, when
15108                                                                  LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
15109                                                                  flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
15110                                                                  loop-backed out after being flop'd by incoming DQS. */
15111         uint64_t ck_dlyout0            : 4;  /**< [  6:  3](R/W) Reserved; must be zero.
15112                                                                  Internal:
15113                                                                  Clock delay out. */
15114         uint64_t ck_tune0              : 1;  /**< [  7:  7](R/W) Reserved; must be zero.
15115                                                                  Internal:
15116                                                                  Clock tune. */
15117         uint64_t ck_dlyout1            : 4;  /**< [ 11:  8](R/W) Reserved; must be zero.
15118                                                                  Internal:
15119                                                                  Clock delay out. */
15120         uint64_t ck_tune1              : 1;  /**< [ 12: 12](R/W) Reserved; must be zero.
15121                                                                  Internal:
15122                                                                  Clock tune. */
15123         uint64_t lv_mode               : 1;  /**< [ 13: 13](R/W) Reserved; must be zero.
15124                                                                  Internal:
15125                                                                  Low Voltage Mode (1.35V.) */
15126         uint64_t rx_always_on          : 1;  /**< [ 14: 14](R/W) Reserved; must be zero.
15127                                                                  Internal:
15128                                                                  Set to force read_enable to PHY active all the time.
15129                                                                  This bit MUST not be set when LMC initialization is in progress. Internal VREF and
15130                                                                  Deskew training requires normal operation on the dqx/s read_enable signals. */
15131         uint64_t ten                   : 1;  /**< [ 15: 15](R/W) DDR PHY test enable pin. */
15132         uint64_t phy_pwr_save_disable  : 1;  /**< [ 16: 16](R/W) DDR PHY power save disable. */
15133         uint64_t phy_dsk_byp           : 1;  /**< [ 17: 17](R/W) PHY deskew bypass. */
15134         uint64_t phy_dsk_reset         : 1;  /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
15135                                                                  training sequence is in the idle state. */
15136         uint64_t int_phy_loopback_ena  : 1;  /**< [ 19: 19](R/W) Reserved.
15137                                                                  Internal:
15138                                                                  PHY loopback enable. */
15139         uint64_t int_pad_loopback_ena  : 1;  /**< [ 20: 20](R/W) Reserved.
15140                                                                  Internal:
15141                                                                  DDR pad loopback enable.  Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
15142                                                                  when loopback is enabled. */
15143         uint64_t dac_on                : 1;  /**< [ 21: 21](R/W) Reserved.
15144                                                                  Internal:
15145                                                                  PHY DAC on. */
15146         uint64_t ref_pin_on            : 1;  /**< [ 22: 22](R/W) Reserved.
15147                                                                  Internal:
15148                                                                  Voltage reference pin enabled. */
15149         uint64_t ddr_error_n_ena       : 1;  /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
15150         uint64_t dbi_mode_ena          : 1;  /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
15151         uint64_t dsk_dbg_bit_sel       : 4;  /**< [ 28: 25](R/W) Reserved.
15152                                                                  Internal:
15153                                                                  Deskew debug bit select for dsk read operation.
15154                                                                  0x0 = DQ0.
15155                                                                  0x1 = DQ1.
15156                                                                  0x2 = DQ2.
15157                                                                  0x3 = DQ3.
15158                                                                  0x4 = DAC.
15159                                                                  0x5 = DBI.
15160                                                                  0x6 = DQ4.
15161                                                                  0x7 = DQ5.
15162                                                                  0x8 = DQ6.
15163                                                                  0x9 = DQ7. */
15164         uint64_t dsk_dbg_byte_sel      : 4;  /**< [ 32: 29](R/W) Reserved.
15165                                                                  Internal:
15166                                                                  Deskew debug byte select for read operation. Values 0-3 correspond to
15167                                                                  byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
15168         uint64_t dsk_dbg_num_bits_sel  : 1;  /**< [ 33: 33](R/W) Reserved.
15169                                                                  Internal:
15170                                                                  Deskew debug, select number of bits per byte lane.
15171                                                                  0 = 8 bits per byte lane, no DBI, no DAC debug.
15172                                                                  1 = 10 bits per byte lane, including DBI and DAC. CN83XX needs to be set to this value. */
15173         uint64_t dsk_dbg_offset        : 2;  /**< [ 35: 34](R/W) Reserved.
15174                                                                  Internal:
15175                                                                  Offset to change delay of deskew debug data return time to LMC from
15176                                                                  DDR PHY. */
15177         uint64_t dsk_dbg_clk_scaler    : 2;  /**< [ 37: 36](R/W) Reserved.
15178                                                                  Internal:
15179                                                                  Adjust clock toggle rate for reading deskew debug information:
15180                                                                  0x0 = Deskew read clock toggles every 4 DCLKs.
15181                                                                  0x1 = Deskew read clock toggles every 8 DCLKs.
15182                                                                  0x2 = Deskew read clock toggles every 12 DCLKs.
15183                                                                  0x3 = Deskew read clock toggles every 16 DCLKs. */
15184         uint64_t dsk_dbg_rd_start      : 1;  /**< [ 38: 38](WO/H) Reserved.
15185                                                                  Internal:
15186                                                                  Write one to start deskew data read operation, will automatically clear
15187                                                                  to zero. Write to one will also clear the complete bit. */
15188         uint64_t dsk_dbg_rd_data       : 10; /**< [ 48: 39](RO/H) Reserved.
15189                                                                  Internal:
15190                                                                  Data from a deskew read operation. Only valid when the
15191                                                                  LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
15192         uint64_t dsk_dbg_rd_complete   : 1;  /**< [ 49: 49](RO/H) Reserved.
15193                                                                  Internal:
15194                                                                  Indicates completion of a read operation, will clear to zero when a read
15195                                                                  operation is started, then set to one when operation is complete. */
15196         uint64_t phy_reset             : 1;  /**< [ 50: 50](WO) Reserved.
15197                                                                  Internal:
15198                                                                  Write to one to reset the PHY, one-shot operation, will automatically
15199                                                                  clear to value of zero. */
15200         uint64_t c0_sel                : 2;  /**< [ 52: 51](R/W) Reserved.
15201                                                                  Internal:
15202                                                                  0x0 = C0 is not routed to any output pin.
15203                                                                  0x1 = C0 is routed to CS2.
15204                                                                  0x2 = C0 is routed to TEN output pin.
15205                                                                  0x3 = C0 is not routed to any output pin.
15206 
15207                                                                  Set to 0x0 if not interfacing with 3DS DRAM. */
15208         uint64_t c1_sel                : 2;  /**< [ 54: 53](R/W) Reserved.
15209                                                                  Internal:
15210                                                                  0x0 = C1 is not routed to any output pin.
15211                                                                  0x1 = C1 is routed to CS3.
15212                                                                  0x2 = C1 is routed to A17 address pin.
15213                                                                  0x3 = C1 is not routed to any output pin.
15214 
15215                                                                  Set to 0x0 if not interfacing with 3DS DRAM. */
15216         uint64_t dm_disable            : 1;  /**< [ 55: 55](R/W) Write to 1 to disable the DRAM data mask feature by having LMC driving a constant value on
15217                                                                  the
15218                                                                  DDRX_DQS\<17:9\>_P pins of the chip during write operations. LMC drives a constant 0 in DDR3
15219                                                                  and drives a constant 1 in DDR4.
15220                                                                  Note that setting this field high is NOT allowed when LMC has the write DBI feature turned
15221                                                                  on
15222                                                                  (MODEREG_PARAMS3[WR_DBI]=1). */
15223         uint64_t dq_shallow_loopback   : 1;  /**< [ 56: 56](R/W) Reserved.
15224                                                                  Internal:
15225                                                                  DQ shallow loopback, working in conjunction with LOOPBACK assertion.
15226                                                                  When asserted, even DQ inputs can be loop-backed out through its adjacent odd DQ outputs
15227                                                                  without being flop'd by DQS. Need to make sure LMC()_PHY_CTL[PHY_DSK_BYP] is set and
15228                                                                  LMC()_PHY_CTL[INT_PHY_LOOPBACK_ENA] is unset. */
15229         uint64_t data_rate_loopback    : 1;  /**< [ 57: 57](R/W) Reserved.
15230                                                                  Internal:
15231                                                                  DQ data rate loopback, working in conjunction with LOOPBACK assertion.
15232                                                                  When asserted, incoming PRBS at even DQ can be set at data rate, and the data is loop
15233                                                                  backed out through odd DQ at the same rate.
15234                                                                  When de-asserted, LOOPBACK assertion is working along with [LOOPBACK_POS] to check on even
15235                                                                  DQ against each DQS edge seperately. This is done at the clock rate. */
15236         uint64_t dsk_dbg_wr_mode       : 1;  /**< [ 58: 58](R/W) Reserved.
15237                                                                  Internal:
15238                                                                  When set high along with [DSK_DBG_RD_START], LMC kicks off Deskew
15239                                                                  Overwrite sequence to shift out a 10-bits setting for a single DQ.
15240                                                                  Note that there are a total of 9 bytes and the chain structure are split into two
15241                                                                  halves such that the top chain covers byte 7,6,5,4 and bottom chain cover byte
15242                                                                  ECC,3,2,1,0.
15243                                                                  Each byte has 10 DQs (DQ7,DQ6,DQ5,DQ4,DBI,DAC,DQ3,DQ2,DQ1,DQ0) and that each
15244                                                                  DQ has 10-bits deskew setting. */
15245         uint64_t dsk_dbg_overwrt_ena   : 1;  /**< [ 59: 59](R/W) Reserved.
15246                                                                  Internal:
15247                                                                  When set high, PHY selects all of the preloaded data
15248                                                                  when configuring the read deskew settings. */
15249         uint64_t dsk_dbg_load_dis      : 1;  /**< [ 60: 60](R/W) Reserved.
15250                                                                  Internal:
15251                                                                  When set, LMC prevents PHY from loading the deskew shift
15252                                                                  registers with its internal settings. When Read Deskew sequence is kicked off
15253                                                                  by setting [DSK_DBG_RD_START] = 1 and [DSK_DBG_WR_MODE] = 0, this field determines
15254                                                                  whether or not to load the shift register with PHY's internal settings before
15255                                                                  the shifting process. */
15256         uint64_t phy_dsk_lock_en       : 1;  /**< [ 61: 61](R/W) When set, the PHY attempts to lock all DQ/DBI bit deskew settings once alignment is
15257                                                                  achieved.
15258 
15259                                                                  When clear, LMC disengages the PHY bit deskew lock control mechanism. This
15260                                                                  causes the PHY to continuously perform and/or adjust the read deskew training on
15261                                                                  all DQ/DBI bits during any read operations. */
15262         uint64_t reserved_62_63        : 2;
15263 #endif /* Word 0 - End */
15264     } cn83xx;
15265     struct bdk_lmcx_phy_ctl_cn88xxp2
15266     {
15267 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
15268         uint64_t reserved_62_63        : 2;
15269         uint64_t phy_dsk_lock_en       : 1;  /**< [ 61: 61](RO) Reserved. */
15270         uint64_t dsk_dbg_load_dis      : 1;  /**< [ 60: 60](R/W) Reserved.
15271                                                                  Internal:
15272                                                                  When set, LMC prevents PHY from loading the deskew shift
15273                                                                  registers with its internal settings. When Read Deskew sequence is kicked off
15274                                                                  by setting [DSK_DBG_RD_START] = 1 and [DSK_DBG_WR_MODE] = 0, this field determines
15275                                                                  whether or not to load the shift register with PHY's internal settings before
15276                                                                  the shifting process. */
15277         uint64_t dsk_dbg_overwrt_ena   : 1;  /**< [ 59: 59](R/W) Reserved.
15278                                                                  Internal:
15279                                                                  When set high, PHY selects all of the preloaded data
15280                                                                  when configuring the read deskew settings. */
15281         uint64_t dsk_dbg_wr_mode       : 1;  /**< [ 58: 58](R/W) Reserved.
15282                                                                  Internal:
15283                                                                  When set high along with [DSK_DBG_RD_START], LMC kicks off Deskew
15284                                                                  Overwrite sequence to shift out a 10-bits setting for a single DQ.
15285                                                                  Note that there are a total of 9 bytes and the chain structure are split into two
15286                                                                  halves such that the top chain covers byte 7,6,5,4 and bottom chain cover byte
15287                                                                  ECC,3,2,1,0.
15288                                                                  Each byte has 10 DQs (DQ7,DQ6,DQ5,DQ4,DBI,DAC,DQ3,DQ2,DQ1,DQ0) and that each
15289                                                                  DQ has 10-bits deskew setting. */
15290         uint64_t data_rate_loopback    : 1;  /**< [ 57: 57](R/W) Reserved.
15291                                                                  Internal:
15292                                                                  DQ data rate loopback, working in conjunction with LOOPBACK assertion.
15293                                                                  When asserted, incoming PRBS at even DQ can be set at data rate, and the data is loop
15294                                                                  backed out through odd DQ at the same rate.
15295                                                                  When de-asserted, LOOPBACK assertion is working along with [LOOPBACK_POS] to check on even
15296                                                                  DQ against each DQS edge seperately. This is done at the clock rate. */
15297         uint64_t dq_shallow_loopback   : 1;  /**< [ 56: 56](R/W) Reserved.
15298                                                                  Internal:
15299                                                                  DQ shallow loopback, working in conjunction with LOOPBACK assertion.
15300                                                                  When asserted, even DQ inputs can be loop-backed out through its adjacent odd DQ outputs
15301                                                                  without being flop'd by DQS. Need to make sure LMC()_PHY_CTL[PHY_DSK_BYP] is set and
15302                                                                  LMC()_PHY_CTL[INT_PHY_LOOPBACK_ENA] is unset. */
15303         uint64_t dm_disable            : 1;  /**< [ 55: 55](R/W) Write to 1 to disable the DRAM data mask feature by having LMC driving a constant value on
15304                                                                  the
15305                                                                  DDRX_DQS\<17:9\>_P pins of the chip during write operations. LMC drives a constant 0 in DDR3
15306                                                                  and drives a constant 1 in DDR4.
15307                                                                  Note that setting this field high is NOT allowed when LMC has the write DBI feature turned
15308                                                                  on
15309                                                                  (MODEREG_PARAMS3[WR_DBI]=1). */
15310         uint64_t c1_sel                : 2;  /**< [ 54: 53](R/W) Reserved.
15311                                                                  Internal:
15312                                                                  0x0 = C1 is not routed to any output pin.
15313                                                                  0x1 = C1 is routed to CS3.
15314                                                                  0x2 = C1 is routed to A17 address pin.
15315                                                                  0x3 = C1 is not routed to any output pin.
15316 
15317                                                                  Set to 0x0 if not interfacing with 3DS DRAM. */
15318         uint64_t c0_sel                : 2;  /**< [ 52: 51](R/W) Reserved.
15319                                                                  Internal:
15320                                                                  0x0 = C0 is not routed to any output pin.
15321                                                                  0x1 = C0 is routed to CS2.
15322                                                                  0x2 = C0 is routed to TEN output pin.
15323                                                                  0x3 = C0 is not routed to any output pin.
15324 
15325                                                                  Set to 0x0 if not interfacing with 3DS DRAM. */
15326         uint64_t phy_reset             : 1;  /**< [ 50: 50](WO) Reserved.
15327                                                                  Internal:
15328                                                                  Write to one to reset the PHY, one-shot operation, will automatically
15329                                                                  clear to value of zero. */
15330         uint64_t dsk_dbg_rd_complete   : 1;  /**< [ 49: 49](RO/H) Reserved.
15331                                                                  Internal:
15332                                                                  Indicates completion of a read operation, will clear to zero when a read
15333                                                                  operation is started, then set to one when operation is complete. */
15334         uint64_t dsk_dbg_rd_data       : 10; /**< [ 48: 39](RO/H) Reserved.
15335                                                                  Internal:
15336                                                                  Data from a deskew read operation. Only valid when the
15337                                                                  LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
15338         uint64_t dsk_dbg_rd_start      : 1;  /**< [ 38: 38](WO/H) Reserved.
15339                                                                  Internal:
15340                                                                  Write one to start deskew data read operation, will automatically clear
15341                                                                  to zero. Write to one will also clear the complete bit. */
15342         uint64_t dsk_dbg_clk_scaler    : 2;  /**< [ 37: 36](R/W) Reserved.
15343                                                                  Internal:
15344                                                                  Adjust clock toggle rate for reading deskew debug information:
15345                                                                  0x0 = Deskew read clock toggles every 4 DCLKs.
15346                                                                  0x1 = Deskew read clock toggles every 8 DCLKs.
15347                                                                  0x2 = Deskew read clock toggles every 12 DCLKs.
15348                                                                  0x3 = Deskew read clock toggles every 16 DCLKs. */
15349         uint64_t dsk_dbg_offset        : 2;  /**< [ 35: 34](R/W) Reserved.
15350                                                                  Internal:
15351                                                                  Offset to change delay of deskew debug data return time to LMC from
15352                                                                  DDR PHY. */
15353         uint64_t dsk_dbg_num_bits_sel  : 1;  /**< [ 33: 33](R/W) Reserved.
15354                                                                  Internal:
15355                                                                  Deskew debug, select number of bits per byte lane.
15356                                                                  0 = 8 bits per byte lane, no DBI, no DAC debug.
15357                                                                  1 = 10 bits per byte lane, including DBI and DAC. CN88XX needs to bet set to this value. */
15358         uint64_t dsk_dbg_byte_sel      : 4;  /**< [ 32: 29](R/W) Reserved.
15359                                                                  Internal:
15360                                                                  Deskew debug byte select for read operation. Values 0-3 correspond to
15361                                                                  byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
15362         uint64_t dsk_dbg_bit_sel       : 4;  /**< [ 28: 25](R/W) Reserved.
15363                                                                  Internal:
15364                                                                  Deskew debug bit select for dsk read operation.
15365                                                                  0x0 = DQ0.
15366                                                                  0x1 = DQ1.
15367                                                                  0x2 = DQ2.
15368                                                                  0x3 = DQ3.
15369                                                                  0x4 = DAC.
15370                                                                  0x5 = DBI.
15371                                                                  0x6 = DQ4.
15372                                                                  0x7 = DQ5.
15373                                                                  0x8 = DQ6.
15374                                                                  0x9 = DQ7. */
15375         uint64_t dbi_mode_ena          : 1;  /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
15376         uint64_t ddr_error_n_ena       : 1;  /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
15377         uint64_t ref_pin_on            : 1;  /**< [ 22: 22](R/W) Reserved.
15378                                                                  Internal:
15379                                                                  Voltage reference pin enabled. */
15380         uint64_t dac_on                : 1;  /**< [ 21: 21](R/W) Reserved.
15381                                                                  Internal:
15382                                                                  PHY DAC on. */
15383         uint64_t int_pad_loopback_ena  : 1;  /**< [ 20: 20](R/W) Reserved.
15384                                                                  Internal:
15385                                                                  DDR pad loopback enable.  Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
15386                                                                  when loopback is enabled. */
15387         uint64_t int_phy_loopback_ena  : 1;  /**< [ 19: 19](R/W) Reserved.
15388                                                                  Internal:
15389                                                                  PHY loopback enable. */
15390         uint64_t phy_dsk_reset         : 1;  /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
15391                                                                  training sequence is in the idle state. */
15392         uint64_t phy_dsk_byp           : 1;  /**< [ 17: 17](R/W) PHY deskew bypass. */
15393         uint64_t phy_pwr_save_disable  : 1;  /**< [ 16: 16](R/W) DDR PHY power save disable. */
15394         uint64_t ten                   : 1;  /**< [ 15: 15](R/W) DDR PHY test enable pin. */
15395         uint64_t rx_always_on          : 1;  /**< [ 14: 14](R/W) Reserved; must be zero.
15396                                                                  Internal:
15397                                                                  Set to force read_enable to PHY active all the time.
15398                                                                  This bit MUST not be set when LMC initialization is in progress. Internal VREF and
15399                                                                  Deskew training requires normal operation on the dqx/s read_enable signals. */
15400         uint64_t lv_mode               : 1;  /**< [ 13: 13](R/W) Reserved; must be zero.
15401                                                                  Internal:
15402                                                                  Low Voltage Mode (1.35V.) */
15403         uint64_t ck_tune1              : 1;  /**< [ 12: 12](R/W) Reserved; must be zero.
15404                                                                  Internal:
15405                                                                  Clock tune. */
15406         uint64_t ck_dlyout1            : 4;  /**< [ 11:  8](R/W) Reserved; must be zero.
15407                                                                  Internal:
15408                                                                  Clock delay out. */
15409         uint64_t ck_tune0              : 1;  /**< [  7:  7](R/W) Reserved; must be zero.
15410                                                                  Internal:
15411                                                                  Clock tune. */
15412         uint64_t ck_dlyout0            : 4;  /**< [  6:  3](R/W) Reserved; must be zero.
15413                                                                  Internal:
15414                                                                  Clock delay out. */
15415         uint64_t loopback              : 1;  /**< [  2:  2](R/W) Reserved; must be zero.
15416                                                                  Internal:
15417                                                                  external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
15418                                                                  bits
15419                                                                  are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
15420                                                                  LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
15421                                                                  can be loop-backed out through DQS1 of the same byte. For DQ, when
15422                                                                  LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
15423                                                                  flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
15424                                                                  loop-backed out after being flop'd by incoming DQS. */
15425         uint64_t loopback_pos          : 1;  /**< [  1:  1](R/W) Reserved; must be zero.
15426                                                                  Internal:
15427                                                                  Loopback pos mode. This works in conjunction with
15428                                                                  LMC()_PHY_CTL[LOOPBACK] mentioned above. */
15429         uint64_t ts_stagger            : 1;  /**< [  0:  0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
15430                                                                  avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
15431                                                                  mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
15432                                                                  impedance) at the first CK cycle, and change drivers to the designated drive strengths
15433                                                                  specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
15434                                                                  LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
15435 #else /* Word 0 - Little Endian */
15436         uint64_t ts_stagger            : 1;  /**< [  0:  0](R/W) TS stagger mode. This mode configures output drivers with two-stage drive strength to
15437                                                                  avoid undershoot issues on the bus when strong drivers are suddenly turned on. When this
15438                                                                  mode is asserted, CNXXXX will configure output drivers to be weak drivers (60 ohm output
15439                                                                  impedance) at the first CK cycle, and change drivers to the designated drive strengths
15440                                                                  specified in LMC()_COMP_CTL2[CMD_CTL], LMC()_COMP_CTL2[CK_CTL],
15441                                                                  LMC()_COMP_CTL2[DQX_CTL] starting at the following cycle. */
15442         uint64_t loopback_pos          : 1;  /**< [  1:  1](R/W) Reserved; must be zero.
15443                                                                  Internal:
15444                                                                  Loopback pos mode. This works in conjunction with
15445                                                                  LMC()_PHY_CTL[LOOPBACK] mentioned above. */
15446         uint64_t loopback              : 1;  /**< [  2:  2](R/W) Reserved; must be zero.
15447                                                                  Internal:
15448                                                                  external loopback enable. when asserted, Rx is on at DQS0 and data at even DQ
15449                                                                  bits
15450                                                                  are loop-backed out through odd DQ bits. For DQS, when LMC()_PHY_CTL[PHY_DSK_BYP] and
15451                                                                  LMC()_CONFIG[MODE_X4DEV] are asserted along with LOOPBACK, DQS0 input of a given byte
15452                                                                  can be loop-backed out through DQS1 of the same byte. For DQ, when
15453                                                                  LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is asserted, DQ bits are loop-backed out without being
15454                                                                  flop'd by incoming DQS. When LMC()_PHY_CTL[DQ_SHALLOW_LOOPBACK] is deasserted, DQ bits are
15455                                                                  loop-backed out after being flop'd by incoming DQS. */
15456         uint64_t ck_dlyout0            : 4;  /**< [  6:  3](R/W) Reserved; must be zero.
15457                                                                  Internal:
15458                                                                  Clock delay out. */
15459         uint64_t ck_tune0              : 1;  /**< [  7:  7](R/W) Reserved; must be zero.
15460                                                                  Internal:
15461                                                                  Clock tune. */
15462         uint64_t ck_dlyout1            : 4;  /**< [ 11:  8](R/W) Reserved; must be zero.
15463                                                                  Internal:
15464                                                                  Clock delay out. */
15465         uint64_t ck_tune1              : 1;  /**< [ 12: 12](R/W) Reserved; must be zero.
15466                                                                  Internal:
15467                                                                  Clock tune. */
15468         uint64_t lv_mode               : 1;  /**< [ 13: 13](R/W) Reserved; must be zero.
15469                                                                  Internal:
15470                                                                  Low Voltage Mode (1.35V.) */
15471         uint64_t rx_always_on          : 1;  /**< [ 14: 14](R/W) Reserved; must be zero.
15472                                                                  Internal:
15473                                                                  Set to force read_enable to PHY active all the time.
15474                                                                  This bit MUST not be set when LMC initialization is in progress. Internal VREF and
15475                                                                  Deskew training requires normal operation on the dqx/s read_enable signals. */
15476         uint64_t ten                   : 1;  /**< [ 15: 15](R/W) DDR PHY test enable pin. */
15477         uint64_t phy_pwr_save_disable  : 1;  /**< [ 16: 16](R/W) DDR PHY power save disable. */
15478         uint64_t phy_dsk_byp           : 1;  /**< [ 17: 17](R/W) PHY deskew bypass. */
15479         uint64_t phy_dsk_reset         : 1;  /**< [ 18: 18](R/W) PHY deskew reset. When set, the deskew reset signal goes active if the Vrefint/deskew
15480                                                                  training sequence is in the idle state. */
15481         uint64_t int_phy_loopback_ena  : 1;  /**< [ 19: 19](R/W) Reserved.
15482                                                                  Internal:
15483                                                                  PHY loopback enable. */
15484         uint64_t int_pad_loopback_ena  : 1;  /**< [ 20: 20](R/W) Reserved.
15485                                                                  Internal:
15486                                                                  DDR pad loopback enable.  Also must set LMC()_PHY_CTL[PHY_DSK_BYP]
15487                                                                  when loopback is enabled. */
15488         uint64_t dac_on                : 1;  /**< [ 21: 21](R/W) Reserved.
15489                                                                  Internal:
15490                                                                  PHY DAC on. */
15491         uint64_t ref_pin_on            : 1;  /**< [ 22: 22](R/W) Reserved.
15492                                                                  Internal:
15493                                                                  Voltage reference pin enabled. */
15494         uint64_t ddr_error_n_ena       : 1;  /**< [ 23: 23](R/W) Enable error_alert_n signal for PHY. */
15495         uint64_t dbi_mode_ena          : 1;  /**< [ 24: 24](R/W) Enable DBI mode for PHY. */
15496         uint64_t dsk_dbg_bit_sel       : 4;  /**< [ 28: 25](R/W) Reserved.
15497                                                                  Internal:
15498                                                                  Deskew debug bit select for dsk read operation.
15499                                                                  0x0 = DQ0.
15500                                                                  0x1 = DQ1.
15501                                                                  0x2 = DQ2.
15502                                                                  0x3 = DQ3.
15503                                                                  0x4 = DAC.
15504                                                                  0x5 = DBI.
15505                                                                  0x6 = DQ4.
15506                                                                  0x7 = DQ5.
15507                                                                  0x8 = DQ6.
15508                                                                  0x9 = DQ7. */
15509         uint64_t dsk_dbg_byte_sel      : 4;  /**< [ 32: 29](R/W) Reserved.
15510                                                                  Internal:
15511                                                                  Deskew debug byte select for read operation. Values 0-3 correspond to
15512                                                                  byte lanes 0-3, 4 is for ECC, 5-8 are byte lanes 4-7. */
15513         uint64_t dsk_dbg_num_bits_sel  : 1;  /**< [ 33: 33](R/W) Reserved.
15514                                                                  Internal:
15515                                                                  Deskew debug, select number of bits per byte lane.
15516                                                                  0 = 8 bits per byte lane, no DBI, no DAC debug.
15517                                                                  1 = 10 bits per byte lane, including DBI and DAC. CN88XX needs to bet set to this value. */
15518         uint64_t dsk_dbg_offset        : 2;  /**< [ 35: 34](R/W) Reserved.
15519                                                                  Internal:
15520                                                                  Offset to change delay of deskew debug data return time to LMC from
15521                                                                  DDR PHY. */
15522         uint64_t dsk_dbg_clk_scaler    : 2;  /**< [ 37: 36](R/W) Reserved.
15523                                                                  Internal:
15524                                                                  Adjust clock toggle rate for reading deskew debug information:
15525                                                                  0x0 = Deskew read clock toggles every 4 DCLKs.
15526                                                                  0x1 = Deskew read clock toggles every 8 DCLKs.
15527                                                                  0x2 = Deskew read clock toggles every 12 DCLKs.
15528                                                                  0x3 = Deskew read clock toggles every 16 DCLKs. */
15529         uint64_t dsk_dbg_rd_start      : 1;  /**< [ 38: 38](WO/H) Reserved.
15530                                                                  Internal:
15531                                                                  Write one to start deskew data read operation, will automatically clear
15532                                                                  to zero. Write to one will also clear the complete bit. */
15533         uint64_t dsk_dbg_rd_data       : 10; /**< [ 48: 39](RO/H) Reserved.
15534                                                                  Internal:
15535                                                                  Data from a deskew read operation. Only valid when the
15536                                                                  LMC()_PHY_CTL[DSK_DBG_RD_COMPLETE] bit is set. */
15537         uint64_t dsk_dbg_rd_complete   : 1;  /**< [ 49: 49](RO/H) Reserved.
15538                                                                  Internal:
15539                                                                  Indicates completion of a read operation, will clear to zero when a read
15540                                                                  operation is started, then set to one when operation is complete. */
15541         uint64_t phy_reset             : 1;  /**< [ 50: 50](WO) Reserved.
15542                                                                  Internal:
15543                                                                  Write to one to reset the PHY, one-shot operation, will automatically
15544                                                                  clear to value of zero. */
15545         uint64_t c0_sel                : 2;  /**< [ 52: 51](R/W) Reserved.
15546                                                                  Internal:
15547                                                                  0x0 = C0 is not routed to any output pin.
15548                                                                  0x1 = C0 is routed to CS2.
15549                                                                  0x2 = C0 is routed to TEN output pin.
15550                                                                  0x3 = C0 is not routed to any output pin.
15551 
15552                                                                  Set to 0x0 if not interfacing with 3DS DRAM. */
15553         uint64_t c1_sel                : 2;  /**< [ 54: 53](R/W) Reserved.
15554                                                                  Internal:
15555                                                                  0x0 = C1 is not routed to any output pin.
15556                                                                  0x1 = C1 is routed to CS3.
15557                                                                  0x2 = C1 is routed to A17 address pin.
15558                                                                  0x3 = C1 is not routed to any output pin.
15559 
15560                                                                  Set to 0x0 if not interfacing with 3DS DRAM. */
15561         uint64_t dm_disable            : 1;  /**< [ 55: 55](R/W) Write to 1 to disable the DRAM data mask feature by having LMC driving a constant value on
15562                                                                  the
15563                                                                  DDRX_DQS\<17:9\>_P pins of the chip during write operations. LMC drives a constant 0 in DDR3
15564                                                                  and drives a constant 1 in DDR4.
15565                                                                  Note that setting this field high is NOT allowed when LMC has the write DBI feature turned
15566                                                                  on
15567                                                                  (MODEREG_PARAMS3[WR_DBI]=1). */
15568         uint64_t dq_shallow_loopback   : 1;  /**< [ 56: 56](R/W) Reserved.
15569                                                                  Internal:
15570                                                                  DQ shallow loopback, working in conjunction with LOOPBACK assertion.
15571                                                                  When asserted, even DQ inputs can be loop-backed out through its adjacent odd DQ outputs
15572                                                                  without being flop'd by DQS. Need to make sure LMC()_PHY_CTL[PHY_DSK_BYP] is set and
15573                                                                  LMC()_PHY_CTL[INT_PHY_LOOPBACK_ENA] is unset. */
15574         uint64_t data_rate_loopback    : 1;  /**< [ 57: 57](R/W) Reserved.
15575                                                                  Internal:
15576                                                                  DQ data rate loopback, working in conjunction with LOOPBACK assertion.
15577                                                                  When asserted, incoming PRBS at even DQ can be set at data rate, and the data is loop
15578                                                                  backed out through odd DQ at the same rate.
15579                                                                  When de-asserted, LOOPBACK assertion is working along with [LOOPBACK_POS] to check on even
15580                                                                  DQ against each DQS edge seperately. This is done at the clock rate. */
15581         uint64_t dsk_dbg_wr_mode       : 1;  /**< [ 58: 58](R/W) Reserved.
15582                                                                  Internal:
15583                                                                  When set high along with [DSK_DBG_RD_START], LMC kicks off Deskew
15584                                                                  Overwrite sequence to shift out a 10-bits setting for a single DQ.
15585                                                                  Note that there are a total of 9 bytes and the chain structure are split into two
15586                                                                  halves such that the top chain covers byte 7,6,5,4 and bottom chain cover byte
15587                                                                  ECC,3,2,1,0.
15588                                                                  Each byte has 10 DQs (DQ7,DQ6,DQ5,DQ4,DBI,DAC,DQ3,DQ2,DQ1,DQ0) and that each
15589                                                                  DQ has 10-bits deskew setting. */
15590         uint64_t dsk_dbg_overwrt_ena   : 1;  /**< [ 59: 59](R/W) Reserved.
15591                                                                  Internal:
15592                                                                  When set high, PHY selects all of the preloaded data
15593                                                                  when configuring the read deskew settings. */
15594         uint64_t dsk_dbg_load_dis      : 1;  /**< [ 60: 60](R/W) Reserved.
15595                                                                  Internal:
15596                                                                  When set, LMC prevents PHY from loading the deskew shift
15597                                                                  registers with its internal settings. When Read Deskew sequence is kicked off
15598                                                                  by setting [DSK_DBG_RD_START] = 1 and [DSK_DBG_WR_MODE] = 0, this field determines
15599                                                                  whether or not to load the shift register with PHY's internal settings before
15600                                                                  the shifting process. */
15601         uint64_t phy_dsk_lock_en       : 1;  /**< [ 61: 61](RO) Reserved. */
15602         uint64_t reserved_62_63        : 2;
15603 #endif /* Word 0 - End */
15604     } cn88xxp2;
15605 };
15606 typedef union bdk_lmcx_phy_ctl bdk_lmcx_phy_ctl_t;
15607 
15608 static inline uint64_t BDK_LMCX_PHY_CTL(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_PHY_CTL(unsigned long a)15609 static inline uint64_t BDK_LMCX_PHY_CTL(unsigned long a)
15610 {
15611     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
15612         return 0x87e088000210ll + 0x1000000ll * ((a) & 0x0);
15613     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
15614         return 0x87e088000210ll + 0x1000000ll * ((a) & 0x1);
15615     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
15616         return 0x87e088000210ll + 0x1000000ll * ((a) & 0x3);
15617     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
15618         return 0x87e088000210ll + 0x1000000ll * ((a) & 0x3);
15619     __bdk_csr_fatal("LMCX_PHY_CTL", 1, a, 0, 0, 0);
15620 }
15621 
15622 #define typedef_BDK_LMCX_PHY_CTL(a) bdk_lmcx_phy_ctl_t
15623 #define bustype_BDK_LMCX_PHY_CTL(a) BDK_CSR_TYPE_RSL
15624 #define basename_BDK_LMCX_PHY_CTL(a) "LMCX_PHY_CTL"
15625 #define device_bar_BDK_LMCX_PHY_CTL(a) 0x0 /* PF_BAR0 */
15626 #define busnum_BDK_LMCX_PHY_CTL(a) (a)
15627 #define arguments_BDK_LMCX_PHY_CTL(a) (a),-1,-1,-1
15628 
15629 /**
15630  * Register (RSL) lmc#_phy_ctl2
15631  *
15632  * LMC PHY Control Register
15633  */
15634 union bdk_lmcx_phy_ctl2
15635 {
15636     uint64_t u;
15637     struct bdk_lmcx_phy_ctl2_s
15638     {
15639 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
15640         uint64_t reserved_54_63        : 10;
15641         uint64_t dqs1_dsk_adj8         : 3;  /**< [ 53: 51](R/W) Provides adjustable deskew settings for DQS1 signal of the ECC byte. */
15642         uint64_t dqs1_dsk_adj7         : 3;  /**< [ 50: 48](R/W) Provides adjustable deskew settings for DQS1 signal of byte 7. */
15643         uint64_t dqs1_dsk_adj6         : 3;  /**< [ 47: 45](R/W) Provides adjustable deskew settings for DQS1 signal of byte 6. */
15644         uint64_t dqs1_dsk_adj5         : 3;  /**< [ 44: 42](R/W) Provides adjustable deskew settings for DQS1 signal of byte 5. */
15645         uint64_t dqs1_dsk_adj4         : 3;  /**< [ 41: 39](R/W) Provides adjustable deskew settings for DQS1 signal of byte 4. */
15646         uint64_t dqs1_dsk_adj3         : 3;  /**< [ 38: 36](R/W) Provides adjustable deskew settings for DQS1 signal of byte 3. */
15647         uint64_t dqs1_dsk_adj2         : 3;  /**< [ 35: 33](R/W) Provides adjustable deskew settings for DQS1 signal of byte 2. */
15648         uint64_t dqs1_dsk_adj1         : 3;  /**< [ 32: 30](R/W) Provides adjustable deskew settings for DQS1 signal of byte 1. */
15649         uint64_t dqs1_dsk_adj0         : 3;  /**< [ 29: 27](R/W) Provides adjustable deskew settings for DQS1 signal of byte 0. */
15650         uint64_t reserved_0_26         : 27;
15651 #else /* Word 0 - Little Endian */
15652         uint64_t reserved_0_26         : 27;
15653         uint64_t dqs1_dsk_adj0         : 3;  /**< [ 29: 27](R/W) Provides adjustable deskew settings for DQS1 signal of byte 0. */
15654         uint64_t dqs1_dsk_adj1         : 3;  /**< [ 32: 30](R/W) Provides adjustable deskew settings for DQS1 signal of byte 1. */
15655         uint64_t dqs1_dsk_adj2         : 3;  /**< [ 35: 33](R/W) Provides adjustable deskew settings for DQS1 signal of byte 2. */
15656         uint64_t dqs1_dsk_adj3         : 3;  /**< [ 38: 36](R/W) Provides adjustable deskew settings for DQS1 signal of byte 3. */
15657         uint64_t dqs1_dsk_adj4         : 3;  /**< [ 41: 39](R/W) Provides adjustable deskew settings for DQS1 signal of byte 4. */
15658         uint64_t dqs1_dsk_adj5         : 3;  /**< [ 44: 42](R/W) Provides adjustable deskew settings for DQS1 signal of byte 5. */
15659         uint64_t dqs1_dsk_adj6         : 3;  /**< [ 47: 45](R/W) Provides adjustable deskew settings for DQS1 signal of byte 6. */
15660         uint64_t dqs1_dsk_adj7         : 3;  /**< [ 50: 48](R/W) Provides adjustable deskew settings for DQS1 signal of byte 7. */
15661         uint64_t dqs1_dsk_adj8         : 3;  /**< [ 53: 51](R/W) Provides adjustable deskew settings for DQS1 signal of the ECC byte. */
15662         uint64_t reserved_54_63        : 10;
15663 #endif /* Word 0 - End */
15664     } s;
15665     struct bdk_lmcx_phy_ctl2_cn8
15666     {
15667 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
15668         uint64_t reserved_27_63        : 37;
15669         uint64_t dqs8_dsk_adj          : 3;  /**< [ 26: 24](R/W) Provides adjustable deskew settings for DQS signal of the ECC byte. */
15670         uint64_t dqs7_dsk_adj          : 3;  /**< [ 23: 21](R/W) Provides adjustable deskew settings for DQS signal of byte 7. */
15671         uint64_t dqs6_dsk_adj          : 3;  /**< [ 20: 18](R/W) Provides adjustable deskew settings for DQS signal of byte 6. */
15672         uint64_t dqs5_dsk_adj          : 3;  /**< [ 17: 15](R/W) Provides adjustable deskew settings for DQS signal of byte 5. */
15673         uint64_t dqs4_dsk_adj          : 3;  /**< [ 14: 12](R/W) Provides adjustable deskew settings for DQS signal of byte 4. */
15674         uint64_t dqs3_dsk_adj          : 3;  /**< [ 11:  9](R/W) Provides adjustable deskew settings for DQS signal of byte 3. */
15675         uint64_t dqs2_dsk_adj          : 3;  /**< [  8:  6](R/W) Provides adjustable deskew settings for DQS signal of byte 2. */
15676         uint64_t dqs1_dsk_adj          : 3;  /**< [  5:  3](R/W) Provides adjustable deskew settings for DQS signal of byte 1. */
15677         uint64_t dqs0_dsk_adj          : 3;  /**< [  2:  0](R/W) Provides adjustable deskew settings for DQS signal of byte 0. */
15678 #else /* Word 0 - Little Endian */
15679         uint64_t dqs0_dsk_adj          : 3;  /**< [  2:  0](R/W) Provides adjustable deskew settings for DQS signal of byte 0. */
15680         uint64_t dqs1_dsk_adj          : 3;  /**< [  5:  3](R/W) Provides adjustable deskew settings for DQS signal of byte 1. */
15681         uint64_t dqs2_dsk_adj          : 3;  /**< [  8:  6](R/W) Provides adjustable deskew settings for DQS signal of byte 2. */
15682         uint64_t dqs3_dsk_adj          : 3;  /**< [ 11:  9](R/W) Provides adjustable deskew settings for DQS signal of byte 3. */
15683         uint64_t dqs4_dsk_adj          : 3;  /**< [ 14: 12](R/W) Provides adjustable deskew settings for DQS signal of byte 4. */
15684         uint64_t dqs5_dsk_adj          : 3;  /**< [ 17: 15](R/W) Provides adjustable deskew settings for DQS signal of byte 5. */
15685         uint64_t dqs6_dsk_adj          : 3;  /**< [ 20: 18](R/W) Provides adjustable deskew settings for DQS signal of byte 6. */
15686         uint64_t dqs7_dsk_adj          : 3;  /**< [ 23: 21](R/W) Provides adjustable deskew settings for DQS signal of byte 7. */
15687         uint64_t dqs8_dsk_adj          : 3;  /**< [ 26: 24](R/W) Provides adjustable deskew settings for DQS signal of the ECC byte. */
15688         uint64_t reserved_27_63        : 37;
15689 #endif /* Word 0 - End */
15690     } cn8;
15691     struct bdk_lmcx_phy_ctl2_cn9
15692     {
15693 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
15694         uint64_t reserved_54_63        : 10;
15695         uint64_t dqs1_dsk_adj8         : 3;  /**< [ 53: 51](R/W) Provides adjustable deskew settings for DQS1 signal of the ECC byte. */
15696         uint64_t dqs1_dsk_adj7         : 3;  /**< [ 50: 48](R/W) Provides adjustable deskew settings for DQS1 signal of byte 7. */
15697         uint64_t dqs1_dsk_adj6         : 3;  /**< [ 47: 45](R/W) Provides adjustable deskew settings for DQS1 signal of byte 6. */
15698         uint64_t dqs1_dsk_adj5         : 3;  /**< [ 44: 42](R/W) Provides adjustable deskew settings for DQS1 signal of byte 5. */
15699         uint64_t dqs1_dsk_adj4         : 3;  /**< [ 41: 39](R/W) Provides adjustable deskew settings for DQS1 signal of byte 4. */
15700         uint64_t dqs1_dsk_adj3         : 3;  /**< [ 38: 36](R/W) Provides adjustable deskew settings for DQS1 signal of byte 3. */
15701         uint64_t dqs1_dsk_adj2         : 3;  /**< [ 35: 33](R/W) Provides adjustable deskew settings for DQS1 signal of byte 2. */
15702         uint64_t dqs1_dsk_adj1         : 3;  /**< [ 32: 30](R/W) Provides adjustable deskew settings for DQS1 signal of byte 1. */
15703         uint64_t dqs1_dsk_adj0         : 3;  /**< [ 29: 27](R/W) Provides adjustable deskew settings for DQS1 signal of byte 0. */
15704         uint64_t dqs0_dsk_adj8         : 3;  /**< [ 26: 24](R/W) Provides adjustable deskew settings for DQS0 signal of the ECC byte. */
15705         uint64_t dqs0_dsk_adj7         : 3;  /**< [ 23: 21](R/W) Provides adjustable deskew settings for DQS0 signal of byte 7. */
15706         uint64_t dqs0_dsk_adj6         : 3;  /**< [ 20: 18](R/W) Provides adjustable deskew settings for DQS0 signal of byte 6. */
15707         uint64_t dqs0_dsk_adj5         : 3;  /**< [ 17: 15](R/W) Provides adjustable deskew settings for DQS0 signal of byte 5. */
15708         uint64_t dqs0_dsk_adj4         : 3;  /**< [ 14: 12](R/W) Provides adjustable deskew settings for DQS0 signal of byte 4. */
15709         uint64_t dqs0_dsk_adj3         : 3;  /**< [ 11:  9](R/W) Provides adjustable deskew settings for DQS0 signal of byte 3. */
15710         uint64_t dqs0_dsk_adj2         : 3;  /**< [  8:  6](R/W) Provides adjustable deskew settings for DQS0 signal of byte 2. */
15711         uint64_t dqs0_dsk_adj1         : 3;  /**< [  5:  3](R/W) Provides adjustable deskew settings for DQS0 signal of byte 1. */
15712         uint64_t dqs0_dsk_adj0         : 3;  /**< [  2:  0](R/W) Provides adjustable deskew settings for DQS0 signal of byte 0. */
15713 #else /* Word 0 - Little Endian */
15714         uint64_t dqs0_dsk_adj0         : 3;  /**< [  2:  0](R/W) Provides adjustable deskew settings for DQS0 signal of byte 0. */
15715         uint64_t dqs0_dsk_adj1         : 3;  /**< [  5:  3](R/W) Provides adjustable deskew settings for DQS0 signal of byte 1. */
15716         uint64_t dqs0_dsk_adj2         : 3;  /**< [  8:  6](R/W) Provides adjustable deskew settings for DQS0 signal of byte 2. */
15717         uint64_t dqs0_dsk_adj3         : 3;  /**< [ 11:  9](R/W) Provides adjustable deskew settings for DQS0 signal of byte 3. */
15718         uint64_t dqs0_dsk_adj4         : 3;  /**< [ 14: 12](R/W) Provides adjustable deskew settings for DQS0 signal of byte 4. */
15719         uint64_t dqs0_dsk_adj5         : 3;  /**< [ 17: 15](R/W) Provides adjustable deskew settings for DQS0 signal of byte 5. */
15720         uint64_t dqs0_dsk_adj6         : 3;  /**< [ 20: 18](R/W) Provides adjustable deskew settings for DQS0 signal of byte 6. */
15721         uint64_t dqs0_dsk_adj7         : 3;  /**< [ 23: 21](R/W) Provides adjustable deskew settings for DQS0 signal of byte 7. */
15722         uint64_t dqs0_dsk_adj8         : 3;  /**< [ 26: 24](R/W) Provides adjustable deskew settings for DQS0 signal of the ECC byte. */
15723         uint64_t dqs1_dsk_adj0         : 3;  /**< [ 29: 27](R/W) Provides adjustable deskew settings for DQS1 signal of byte 0. */
15724         uint64_t dqs1_dsk_adj1         : 3;  /**< [ 32: 30](R/W) Provides adjustable deskew settings for DQS1 signal of byte 1. */
15725         uint64_t dqs1_dsk_adj2         : 3;  /**< [ 35: 33](R/W) Provides adjustable deskew settings for DQS1 signal of byte 2. */
15726         uint64_t dqs1_dsk_adj3         : 3;  /**< [ 38: 36](R/W) Provides adjustable deskew settings for DQS1 signal of byte 3. */
15727         uint64_t dqs1_dsk_adj4         : 3;  /**< [ 41: 39](R/W) Provides adjustable deskew settings for DQS1 signal of byte 4. */
15728         uint64_t dqs1_dsk_adj5         : 3;  /**< [ 44: 42](R/W) Provides adjustable deskew settings for DQS1 signal of byte 5. */
15729         uint64_t dqs1_dsk_adj6         : 3;  /**< [ 47: 45](R/W) Provides adjustable deskew settings for DQS1 signal of byte 6. */
15730         uint64_t dqs1_dsk_adj7         : 3;  /**< [ 50: 48](R/W) Provides adjustable deskew settings for DQS1 signal of byte 7. */
15731         uint64_t dqs1_dsk_adj8         : 3;  /**< [ 53: 51](R/W) Provides adjustable deskew settings for DQS1 signal of the ECC byte. */
15732         uint64_t reserved_54_63        : 10;
15733 #endif /* Word 0 - End */
15734     } cn9;
15735 };
15736 typedef union bdk_lmcx_phy_ctl2 bdk_lmcx_phy_ctl2_t;
15737 
15738 static inline uint64_t BDK_LMCX_PHY_CTL2(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_PHY_CTL2(unsigned long a)15739 static inline uint64_t BDK_LMCX_PHY_CTL2(unsigned long a)
15740 {
15741     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
15742         return 0x87e088000250ll + 0x1000000ll * ((a) & 0x0);
15743     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
15744         return 0x87e088000250ll + 0x1000000ll * ((a) & 0x1);
15745     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && (a<=3))
15746         return 0x87e088000250ll + 0x1000000ll * ((a) & 0x3);
15747     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
15748         return 0x87e088000250ll + 0x1000000ll * ((a) & 0x3);
15749     __bdk_csr_fatal("LMCX_PHY_CTL2", 1, a, 0, 0, 0);
15750 }
15751 
15752 #define typedef_BDK_LMCX_PHY_CTL2(a) bdk_lmcx_phy_ctl2_t
15753 #define bustype_BDK_LMCX_PHY_CTL2(a) BDK_CSR_TYPE_RSL
15754 #define basename_BDK_LMCX_PHY_CTL2(a) "LMCX_PHY_CTL2"
15755 #define device_bar_BDK_LMCX_PHY_CTL2(a) 0x0 /* PF_BAR0 */
15756 #define busnum_BDK_LMCX_PHY_CTL2(a) (a)
15757 #define arguments_BDK_LMCX_PHY_CTL2(a) (a),-1,-1,-1
15758 
15759 /**
15760  * Register (RSL) lmc#_phy_ctl3
15761  *
15762  * LMC PHY Control Register
15763  */
15764 union bdk_lmcx_phy_ctl3
15765 {
15766     uint64_t u;
15767     struct bdk_lmcx_phy_ctl3_s
15768     {
15769 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
15770         uint64_t reserved_18_63        : 46;
15771         uint64_t ddr_dimm1_ck1_en_clear : 1; /**< [ 17: 17](R/W1/H) Write one to clear DDR_DIMM1_CK_EN[1]. One shot operation. */
15772         uint64_t ddr_dimm1_ck0_en_clear : 1; /**< [ 16: 16](R/W1/H) Write one to clear DDR_DIMM1_CK_EN[0]. One shot operation. */
15773         uint64_t ddr_dimm0_ck1_en_clear : 1; /**< [ 15: 15](R/W1/H) Write one to clear DDR_DIMM0_CK_EN[1]. One shot operation. */
15774         uint64_t ddr_dimm0_ck0_en_clear : 1; /**< [ 14: 14](R/W1/H) Write one to clear DDR_DIMM0_CK_EN[0]. One shot operation. */
15775         uint64_t ddr_dimm1_ck1_en_set  : 1;  /**< [ 13: 13](R/W1/H) Write one to set DDR_DIMM1_CK_EN[1]. One shot operation. */
15776         uint64_t ddr_dimm1_ck0_en_set  : 1;  /**< [ 12: 12](R/W1/H) Write one to set DDR_DIMM1_CK_EN[0]. One shot operation. */
15777         uint64_t ddr_dimm0_ck1_en_set  : 1;  /**< [ 11: 11](R/W1/H) Write one to set DDR_DIMM0_CK_EN[1]. One shot operation. */
15778         uint64_t ddr_dimm0_ck0_en_set  : 1;  /**< [ 10: 10](R/W1/H) Write one to set DDR_DIMM0_CK_EN[0]. One shot operation. */
15779         uint64_t x4_clk_select_overwrite : 1;/**< [  9:  9](R/W) Overwrite mode for the PHY's x4 clock select.
15780                                                                  0 = Hardware automatically asserts the PHY's x4 clk select signal
15781                                                                  during running deskew training of a x4 DIMM (i.e., running LMC_SEQ_SEL_E::VREF_INT sequence
15782                                                                  with both LMC()_EXT_CONFIG[VREFINT_SEQ_DESKEW] and LMC()_CONFIG[MODE_X4DEV] set
15783                                                                  to 1).
15784                                                                  1 = Enable overwrite mode for the PHY's x4 clock select. PHY's x4 clk select
15785                                                                  signal is determined by the state of [X4_CLK_SELECT]. */
15786         uint64_t x4_clk_select         : 1;  /**< [  8:  8](R/W/H) Manually enable/disable the PHY's x4 clk select. Only valid when
15787                                                                  [X4_CLK_SELECT_OVERWRITE] is one, otherwise hardware determines the value. */
15788         uint64_t io_dcc_n              : 2;  /**< [  7:  6](R/W) Duty cycle trim for IO. */
15789         uint64_t io_dcc_p              : 2;  /**< [  5:  4](R/W) Duty cycle trim for IO. */
15790         uint64_t phy_dcc_n             : 2;  /**< [  3:  2](R/W) Duty cycle corrector for PHY. This will be consumed by both DQS and CK bits. */
15791         uint64_t phy_dcc_p             : 2;  /**< [  1:  0](R/W) Duty cycle corrector for PHY. This will be consumed by both DQS and CK bits. */
15792 #else /* Word 0 - Little Endian */
15793         uint64_t phy_dcc_p             : 2;  /**< [  1:  0](R/W) Duty cycle corrector for PHY. This will be consumed by both DQS and CK bits. */
15794         uint64_t phy_dcc_n             : 2;  /**< [  3:  2](R/W) Duty cycle corrector for PHY. This will be consumed by both DQS and CK bits. */
15795         uint64_t io_dcc_p              : 2;  /**< [  5:  4](R/W) Duty cycle trim for IO. */
15796         uint64_t io_dcc_n              : 2;  /**< [  7:  6](R/W) Duty cycle trim for IO. */
15797         uint64_t x4_clk_select         : 1;  /**< [  8:  8](R/W/H) Manually enable/disable the PHY's x4 clk select. Only valid when
15798                                                                  [X4_CLK_SELECT_OVERWRITE] is one, otherwise hardware determines the value. */
15799         uint64_t x4_clk_select_overwrite : 1;/**< [  9:  9](R/W) Overwrite mode for the PHY's x4 clock select.
15800                                                                  0 = Hardware automatically asserts the PHY's x4 clk select signal
15801                                                                  during running deskew training of a x4 DIMM (i.e., running LMC_SEQ_SEL_E::VREF_INT sequence
15802                                                                  with both LMC()_EXT_CONFIG[VREFINT_SEQ_DESKEW] and LMC()_CONFIG[MODE_X4DEV] set
15803                                                                  to 1).
15804                                                                  1 = Enable overwrite mode for the PHY's x4 clock select. PHY's x4 clk select
15805                                                                  signal is determined by the state of [X4_CLK_SELECT]. */
15806         uint64_t ddr_dimm0_ck0_en_set  : 1;  /**< [ 10: 10](R/W1/H) Write one to set DDR_DIMM0_CK_EN[0]. One shot operation. */
15807         uint64_t ddr_dimm0_ck1_en_set  : 1;  /**< [ 11: 11](R/W1/H) Write one to set DDR_DIMM0_CK_EN[1]. One shot operation. */
15808         uint64_t ddr_dimm1_ck0_en_set  : 1;  /**< [ 12: 12](R/W1/H) Write one to set DDR_DIMM1_CK_EN[0]. One shot operation. */
15809         uint64_t ddr_dimm1_ck1_en_set  : 1;  /**< [ 13: 13](R/W1/H) Write one to set DDR_DIMM1_CK_EN[1]. One shot operation. */
15810         uint64_t ddr_dimm0_ck0_en_clear : 1; /**< [ 14: 14](R/W1/H) Write one to clear DDR_DIMM0_CK_EN[0]. One shot operation. */
15811         uint64_t ddr_dimm0_ck1_en_clear : 1; /**< [ 15: 15](R/W1/H) Write one to clear DDR_DIMM0_CK_EN[1]. One shot operation. */
15812         uint64_t ddr_dimm1_ck0_en_clear : 1; /**< [ 16: 16](R/W1/H) Write one to clear DDR_DIMM1_CK_EN[0]. One shot operation. */
15813         uint64_t ddr_dimm1_ck1_en_clear : 1; /**< [ 17: 17](R/W1/H) Write one to clear DDR_DIMM1_CK_EN[1]. One shot operation. */
15814         uint64_t reserved_18_63        : 46;
15815 #endif /* Word 0 - End */
15816     } s;
15817     /* struct bdk_lmcx_phy_ctl3_s cn; */
15818 };
15819 typedef union bdk_lmcx_phy_ctl3 bdk_lmcx_phy_ctl3_t;
15820 
15821 static inline uint64_t BDK_LMCX_PHY_CTL3(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_PHY_CTL3(unsigned long a)15822 static inline uint64_t BDK_LMCX_PHY_CTL3(unsigned long a)
15823 {
15824     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
15825         return 0x87e0880002f8ll + 0x1000000ll * ((a) & 0x3);
15826     __bdk_csr_fatal("LMCX_PHY_CTL3", 1, a, 0, 0, 0);
15827 }
15828 
15829 #define typedef_BDK_LMCX_PHY_CTL3(a) bdk_lmcx_phy_ctl3_t
15830 #define bustype_BDK_LMCX_PHY_CTL3(a) BDK_CSR_TYPE_RSL
15831 #define basename_BDK_LMCX_PHY_CTL3(a) "LMCX_PHY_CTL3"
15832 #define device_bar_BDK_LMCX_PHY_CTL3(a) 0x0 /* PF_BAR0 */
15833 #define busnum_BDK_LMCX_PHY_CTL3(a) (a)
15834 #define arguments_BDK_LMCX_PHY_CTL3(a) (a),-1,-1,-1
15835 
15836 /**
15837  * Register (RSL) lmc#_ppr_ctl
15838  *
15839  * LMC PPR Timing Register
15840  * This register contains programmable timing and control parameters used
15841  * when running the post package repair sequence. The timing fields
15842  * LMC()_PPR_CTL[TPGMPST], LMC()_PPR_CTL[TPGM_EXIT] and LMC()_PPR_CTL[TPGM] need to be set as
15843  * to satisfy the minimum values mentioned in the JEDEC DDR4 spec before
15844  * running the PPR sequence. See LMC()_SEQ_CTL[SEQ_SEL], LMC()_SEQ_CTL[INIT_START] to run
15845  * the PPR sequence.
15846  *
15847  * Running hard PPR may require LMC to issue security key as four consecutive
15848  * MR0 commands, each with a unique address field A[17:0]. Set the security
15849  * key in the general purpose CSRs as follows:
15850  *
15851  * _ Security key 0 = LMC()_GENERAL_PURPOSE0[DATA]\<17:0\>.
15852  * _ Security key 1 = LMC()_GENERAL_PURPOSE0[DATA]\<35:18\>.
15853  * _ Security key 2 = LMC()_GENERAL_PURPOSE1[DATA]\<17:0\>.
15854  * _ Security key 3 = LMC()_GENERAL_PURPOSE1[DATA]\<35:18\>.
15855  */
15856 union bdk_lmcx_ppr_ctl
15857 {
15858     uint64_t u;
15859     struct bdk_lmcx_ppr_ctl_s
15860     {
15861 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
15862         uint64_t reserved_27_63        : 37;
15863         uint64_t lrank_sel             : 3;  /**< [ 26: 24](RO) Reserved. */
15864         uint64_t skip_issue_security   : 1;  /**< [ 23: 23](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to skip
15865                                                                  issuing four consecutive MR0 commands that supply the security key. */
15866         uint64_t sppr                  : 1;  /**< [ 22: 22](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to run
15867                                                                  the soft PPR mode. */
15868         uint64_t tpgm                  : 10; /**< [ 21: 12](R/W) Indicates the programming time (tPGM) constraint used when running PPR sequence.
15869 
15870                                                                  For hard PPR (LMC()_PPR_CTL[SPPR] = 0), set this field as follows:
15871                                                                  RNDUP[TPGM(ns) / (1048576 * TCYC(ns))].
15872 
15873                                                                  For soft PPR (LMC()_PPR_CTL[SPPR] = 1), set this field as follows:
15874                                                                  RNDUP[TPGM(ns) / TCYC(ns))].
15875 
15876                                                                  [TPGM] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
15877                                                                  rate). */
15878         uint64_t tpgm_exit             : 5;  /**< [ 11:  7](R/W) Indicates PPR exit time (tPGM_Exit) contrainst used when running PPR sequence.
15879                                                                  Set this field as follows:
15880                                                                  _ RNDUP[TPGM_EXIT(ns) / TCYC(ns)]
15881 
15882                                                                  where [TPGM_EXIT] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency
15883                                                                  (not
15884                                                                  data rate). */
15885         uint64_t tpgmpst               : 7;  /**< [  6:  0](R/W) Indicates new address setting time (tPGMPST) constraint used when running PPR sequence.
15886                                                                  Set this field as follows:
15887 
15888                                                                  _ RNDUP[TPGMPST(ns) / (1024 * TCYC(ns))]
15889 
15890                                                                  where [TPGMPST] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not
15891                                                                  data rate). */
15892 #else /* Word 0 - Little Endian */
15893         uint64_t tpgmpst               : 7;  /**< [  6:  0](R/W) Indicates new address setting time (tPGMPST) constraint used when running PPR sequence.
15894                                                                  Set this field as follows:
15895 
15896                                                                  _ RNDUP[TPGMPST(ns) / (1024 * TCYC(ns))]
15897 
15898                                                                  where [TPGMPST] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not
15899                                                                  data rate). */
15900         uint64_t tpgm_exit             : 5;  /**< [ 11:  7](R/W) Indicates PPR exit time (tPGM_Exit) contrainst used when running PPR sequence.
15901                                                                  Set this field as follows:
15902                                                                  _ RNDUP[TPGM_EXIT(ns) / TCYC(ns)]
15903 
15904                                                                  where [TPGM_EXIT] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency
15905                                                                  (not
15906                                                                  data rate). */
15907         uint64_t tpgm                  : 10; /**< [ 21: 12](R/W) Indicates the programming time (tPGM) constraint used when running PPR sequence.
15908 
15909                                                                  For hard PPR (LMC()_PPR_CTL[SPPR] = 0), set this field as follows:
15910                                                                  RNDUP[TPGM(ns) / (1048576 * TCYC(ns))].
15911 
15912                                                                  For soft PPR (LMC()_PPR_CTL[SPPR] = 1), set this field as follows:
15913                                                                  RNDUP[TPGM(ns) / TCYC(ns))].
15914 
15915                                                                  [TPGM] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
15916                                                                  rate). */
15917         uint64_t sppr                  : 1;  /**< [ 22: 22](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to run
15918                                                                  the soft PPR mode. */
15919         uint64_t skip_issue_security   : 1;  /**< [ 23: 23](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to skip
15920                                                                  issuing four consecutive MR0 commands that supply the security key. */
15921         uint64_t lrank_sel             : 3;  /**< [ 26: 24](RO) Reserved. */
15922         uint64_t reserved_27_63        : 37;
15923 #endif /* Word 0 - End */
15924     } s;
15925     struct bdk_lmcx_ppr_ctl_cn9
15926     {
15927 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
15928         uint64_t reserved_27_63        : 37;
15929         uint64_t lrank_sel             : 3;  /**< [ 26: 24](R/W) Selects which logical rank to perform the post package repair sequence.
15930                                                                  Package ranks are selected by LMC()_MR_MPR_CTL[MR_WR_RANK]. */
15931         uint64_t skip_issue_security   : 1;  /**< [ 23: 23](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to skip
15932                                                                  issuing four consecutive MR0 commands that supply the security key. */
15933         uint64_t sppr                  : 1;  /**< [ 22: 22](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to run
15934                                                                  the soft PPR mode. */
15935         uint64_t tpgm                  : 10; /**< [ 21: 12](R/W) Indicates the programming time (tPGM) constraint used when running PPR sequence.
15936 
15937                                                                  For hard PPR (LMC()_PPR_CTL[SPPR] = 0), set this field as follows:
15938                                                                  RNDUP[TPGM(ns) / (1048576 * TCYC(ns))].
15939 
15940                                                                  For soft PPR (LMC()_PPR_CTL[SPPR] = 1), set this field as follows:
15941                                                                  RNDUP[TPGM(ns) / TCYC(ns))].
15942 
15943                                                                  [TPGM] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
15944                                                                  rate). */
15945         uint64_t tpgm_exit             : 5;  /**< [ 11:  7](R/W) Indicates PPR exit time (tPGM_Exit) contrainst used when running PPR sequence.
15946                                                                  Set this field as follows:
15947                                                                  _ RNDUP[TPGM_EXIT(ns) / TCYC(ns)]
15948 
15949                                                                  where [TPGM_EXIT] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency
15950                                                                  (not
15951                                                                  data rate). */
15952         uint64_t tpgmpst               : 7;  /**< [  6:  0](R/W) Indicates new address setting time (tPGMPST) constraint used when running PPR sequence.
15953                                                                  Set this field as follows:
15954 
15955                                                                  _ RNDUP[TPGMPST(ns) / (1024 * TCYC(ns))]
15956 
15957                                                                  where [TPGMPST] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not
15958                                                                  data rate). */
15959 #else /* Word 0 - Little Endian */
15960         uint64_t tpgmpst               : 7;  /**< [  6:  0](R/W) Indicates new address setting time (tPGMPST) constraint used when running PPR sequence.
15961                                                                  Set this field as follows:
15962 
15963                                                                  _ RNDUP[TPGMPST(ns) / (1024 * TCYC(ns))]
15964 
15965                                                                  where [TPGMPST] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not
15966                                                                  data rate). */
15967         uint64_t tpgm_exit             : 5;  /**< [ 11:  7](R/W) Indicates PPR exit time (tPGM_Exit) contrainst used when running PPR sequence.
15968                                                                  Set this field as follows:
15969                                                                  _ RNDUP[TPGM_EXIT(ns) / TCYC(ns)]
15970 
15971                                                                  where [TPGM_EXIT] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency
15972                                                                  (not
15973                                                                  data rate). */
15974         uint64_t tpgm                  : 10; /**< [ 21: 12](R/W) Indicates the programming time (tPGM) constraint used when running PPR sequence.
15975 
15976                                                                  For hard PPR (LMC()_PPR_CTL[SPPR] = 0), set this field as follows:
15977                                                                  RNDUP[TPGM(ns) / (1048576 * TCYC(ns))].
15978 
15979                                                                  For soft PPR (LMC()_PPR_CTL[SPPR] = 1), set this field as follows:
15980                                                                  RNDUP[TPGM(ns) / TCYC(ns))].
15981 
15982                                                                  [TPGM] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
15983                                                                  rate). */
15984         uint64_t sppr                  : 1;  /**< [ 22: 22](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to run
15985                                                                  the soft PPR mode. */
15986         uint64_t skip_issue_security   : 1;  /**< [ 23: 23](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to skip
15987                                                                  issuing four consecutive MR0 commands that supply the security key. */
15988         uint64_t lrank_sel             : 3;  /**< [ 26: 24](R/W) Selects which logical rank to perform the post package repair sequence.
15989                                                                  Package ranks are selected by LMC()_MR_MPR_CTL[MR_WR_RANK]. */
15990         uint64_t reserved_27_63        : 37;
15991 #endif /* Word 0 - End */
15992     } cn9;
15993     /* struct bdk_lmcx_ppr_ctl_cn9 cn81xx; */
15994     /* struct bdk_lmcx_ppr_ctl_s cn88xx; */
15995     struct bdk_lmcx_ppr_ctl_cn83xx
15996     {
15997 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
15998         uint64_t reserved_27_63        : 37;
15999         uint64_t lrank_sel             : 3;  /**< [ 26: 24](R/W) Selects which logical rank to perform the post package repair sequence.
16000                                                                  Package ranks are selected by LMC()_MR_MPR_CTL[MR_WR_RANK]. */
16001         uint64_t skip_issue_security   : 1;  /**< [ 23: 23](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to skip
16002                                                                  issuing four consecutive MR0 commands that suppliy the security key. */
16003         uint64_t sppr                  : 1;  /**< [ 22: 22](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to run
16004                                                                  the soft PPR mode. */
16005         uint64_t tpgm                  : 10; /**< [ 21: 12](R/W) Indicates the programming time (tPGM) constraint used when running PPR sequence.
16006 
16007                                                                  For hard PPR (LMC()_PPR_CTL[SPPR] = 0), set this field as follows:
16008                                                                  RNDUP[TPGM(ns) / (1048576 * TCYC(ns))].
16009 
16010                                                                  For soft PPR (LMC()_PPR_CTL[SPPR] = 1), set this field as follows:
16011                                                                  RNDUP[TPGM(ns) / TCYC(ns))].
16012 
16013                                                                  [TPGM] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
16014                                                                  rate). */
16015         uint64_t tpgm_exit             : 5;  /**< [ 11:  7](R/W) Indicates PPR exit time (tPGM_Exit) contrainst used when running PPR sequence.
16016                                                                  Set this field as follows:
16017                                                                  _ RNDUP[TPGM_EXIT(ns) / TCYC(ns)]
16018 
16019                                                                  where [TPGM_EXIT] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency
16020                                                                  (not
16021                                                                  data rate). */
16022         uint64_t tpgmpst               : 7;  /**< [  6:  0](R/W) Indicates new address setting time (tPGMPST) constraint used when running PPR sequence.
16023                                                                  Set this field as follows:
16024 
16025                                                                  _ RNDUP[TPGMPST(ns) / (1024 * TCYC(ns))]
16026 
16027                                                                  where [TPGMPST] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not
16028                                                                  data rate). */
16029 #else /* Word 0 - Little Endian */
16030         uint64_t tpgmpst               : 7;  /**< [  6:  0](R/W) Indicates new address setting time (tPGMPST) constraint used when running PPR sequence.
16031                                                                  Set this field as follows:
16032 
16033                                                                  _ RNDUP[TPGMPST(ns) / (1024 * TCYC(ns))]
16034 
16035                                                                  where [TPGMPST] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not
16036                                                                  data rate). */
16037         uint64_t tpgm_exit             : 5;  /**< [ 11:  7](R/W) Indicates PPR exit time (tPGM_Exit) contrainst used when running PPR sequence.
16038                                                                  Set this field as follows:
16039                                                                  _ RNDUP[TPGM_EXIT(ns) / TCYC(ns)]
16040 
16041                                                                  where [TPGM_EXIT] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency
16042                                                                  (not
16043                                                                  data rate). */
16044         uint64_t tpgm                  : 10; /**< [ 21: 12](R/W) Indicates the programming time (tPGM) constraint used when running PPR sequence.
16045 
16046                                                                  For hard PPR (LMC()_PPR_CTL[SPPR] = 0), set this field as follows:
16047                                                                  RNDUP[TPGM(ns) / (1048576 * TCYC(ns))].
16048 
16049                                                                  For soft PPR (LMC()_PPR_CTL[SPPR] = 1), set this field as follows:
16050                                                                  RNDUP[TPGM(ns) / TCYC(ns))].
16051 
16052                                                                  [TPGM] is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
16053                                                                  rate). */
16054         uint64_t sppr                  : 1;  /**< [ 22: 22](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to run
16055                                                                  the soft PPR mode. */
16056         uint64_t skip_issue_security   : 1;  /**< [ 23: 23](R/W) Personality bit for the PPR sequence. When set, this field forces the sequence to skip
16057                                                                  issuing four consecutive MR0 commands that suppliy the security key. */
16058         uint64_t lrank_sel             : 3;  /**< [ 26: 24](R/W) Selects which logical rank to perform the post package repair sequence.
16059                                                                  Package ranks are selected by LMC()_MR_MPR_CTL[MR_WR_RANK]. */
16060         uint64_t reserved_27_63        : 37;
16061 #endif /* Word 0 - End */
16062     } cn83xx;
16063 };
16064 typedef union bdk_lmcx_ppr_ctl bdk_lmcx_ppr_ctl_t;
16065 
16066 static inline uint64_t BDK_LMCX_PPR_CTL(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_PPR_CTL(unsigned long a)16067 static inline uint64_t BDK_LMCX_PPR_CTL(unsigned long a)
16068 {
16069     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
16070         return 0x87e0880003e0ll + 0x1000000ll * ((a) & 0x0);
16071     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
16072         return 0x87e0880003e0ll + 0x1000000ll * ((a) & 0x1);
16073     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
16074         return 0x87e0880003e0ll + 0x1000000ll * ((a) & 0x3);
16075     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
16076         return 0x87e0880003e0ll + 0x1000000ll * ((a) & 0x3);
16077     __bdk_csr_fatal("LMCX_PPR_CTL", 1, a, 0, 0, 0);
16078 }
16079 
16080 #define typedef_BDK_LMCX_PPR_CTL(a) bdk_lmcx_ppr_ctl_t
16081 #define bustype_BDK_LMCX_PPR_CTL(a) BDK_CSR_TYPE_RSL
16082 #define basename_BDK_LMCX_PPR_CTL(a) "LMCX_PPR_CTL"
16083 #define device_bar_BDK_LMCX_PPR_CTL(a) 0x0 /* PF_BAR0 */
16084 #define busnum_BDK_LMCX_PPR_CTL(a) (a)
16085 #define arguments_BDK_LMCX_PPR_CTL(a) (a),-1,-1,-1
16086 
16087 /**
16088  * Register (RSL) lmc#_ref_status
16089  *
16090  * LMC Refresh Pending Status Register
16091  * This register contains the status of the refresh pending counters.
16092  */
16093 union bdk_lmcx_ref_status
16094 {
16095     uint64_t u;
16096     struct bdk_lmcx_ref_status_s
16097     {
16098 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
16099         uint64_t reserved_0_63         : 64;
16100 #else /* Word 0 - Little Endian */
16101         uint64_t reserved_0_63         : 64;
16102 #endif /* Word 0 - End */
16103     } s;
16104     struct bdk_lmcx_ref_status_cn8
16105     {
16106 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
16107         uint64_t reserved_4_63         : 60;
16108         uint64_t ref_pend_max_clr      : 1;  /**< [  3:  3](R/W1C/H) Indicates that the number of pending refreshes has reached 7, requiring
16109                                                                  software to clear the flag by setting this field to 1.
16110                                                                  This is only useful when LMC()_EXT_CONFIG[REF_BLOCK] mode is engaged. */
16111         uint64_t ref_count             : 3;  /**< [  2:  0](RO/H) Reads back the number of pending refreshes that LMC has yet to execute. */
16112 #else /* Word 0 - Little Endian */
16113         uint64_t ref_count             : 3;  /**< [  2:  0](RO/H) Reads back the number of pending refreshes that LMC has yet to execute. */
16114         uint64_t ref_pend_max_clr      : 1;  /**< [  3:  3](R/W1C/H) Indicates that the number of pending refreshes has reached 7, requiring
16115                                                                  software to clear the flag by setting this field to 1.
16116                                                                  This is only useful when LMC()_EXT_CONFIG[REF_BLOCK] mode is engaged. */
16117         uint64_t reserved_4_63         : 60;
16118 #endif /* Word 0 - End */
16119     } cn8;
16120     struct bdk_lmcx_ref_status_cn9
16121     {
16122 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
16123         uint64_t reserved_6_63         : 58;
16124         uint64_t ref_count1            : 3;  /**< [  5:  3](RO/H) Reads back the number of pending refreshes that LMC has yet to execute.
16125                                                                  This counter updates every TREFI window at TREFI/2. Only active if
16126                                                                  LMC()_EXT_CONFIG[REF_MODE] is using a pair refresh mode. This register
16127                                                                  is only reset on cold reset. */
16128         uint64_t ref_count0            : 3;  /**< [  2:  0](RO/H) Reads back the number of pending refreshes that LMC has yet to execute.
16129                                                                  This counter updates every TREFI. This register is only reset on cold reset. */
16130 #else /* Word 0 - Little Endian */
16131         uint64_t ref_count0            : 3;  /**< [  2:  0](RO/H) Reads back the number of pending refreshes that LMC has yet to execute.
16132                                                                  This counter updates every TREFI. This register is only reset on cold reset. */
16133         uint64_t ref_count1            : 3;  /**< [  5:  3](RO/H) Reads back the number of pending refreshes that LMC has yet to execute.
16134                                                                  This counter updates every TREFI window at TREFI/2. Only active if
16135                                                                  LMC()_EXT_CONFIG[REF_MODE] is using a pair refresh mode. This register
16136                                                                  is only reset on cold reset. */
16137         uint64_t reserved_6_63         : 58;
16138 #endif /* Word 0 - End */
16139     } cn9;
16140 };
16141 typedef union bdk_lmcx_ref_status bdk_lmcx_ref_status_t;
16142 
16143 static inline uint64_t BDK_LMCX_REF_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_REF_STATUS(unsigned long a)16144 static inline uint64_t BDK_LMCX_REF_STATUS(unsigned long a)
16145 {
16146     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
16147         return 0x87e0880000a0ll + 0x1000000ll * ((a) & 0x0);
16148     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
16149         return 0x87e0880000a0ll + 0x1000000ll * ((a) & 0x1);
16150     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
16151         return 0x87e0880000a0ll + 0x1000000ll * ((a) & 0x3);
16152     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
16153         return 0x87e0880000a0ll + 0x1000000ll * ((a) & 0x3);
16154     __bdk_csr_fatal("LMCX_REF_STATUS", 1, a, 0, 0, 0);
16155 }
16156 
16157 #define typedef_BDK_LMCX_REF_STATUS(a) bdk_lmcx_ref_status_t
16158 #define bustype_BDK_LMCX_REF_STATUS(a) BDK_CSR_TYPE_RSL
16159 #define basename_BDK_LMCX_REF_STATUS(a) "LMCX_REF_STATUS"
16160 #define device_bar_BDK_LMCX_REF_STATUS(a) 0x0 /* PF_BAR0 */
16161 #define busnum_BDK_LMCX_REF_STATUS(a) (a)
16162 #define arguments_BDK_LMCX_REF_STATUS(a) (a),-1,-1,-1
16163 
16164 /**
16165  * Register (RSL) lmc#_reset_ctl
16166  *
16167  * LMC Reset Control Register
16168  * Specify the RSL base addresses for the block.
16169  * Internal:
16170  * "DDR4RST DDR4 DRAM parts have a RESET# pin. The DDR4RST CSR field controls the assertion of
16171  * the 9xxx pin that attaches to RESET#. When DDR4RST is set, 9xxx asserts RESET#. When DDR4RST
16172  * is clear, 9xxx de-asserts RESET#. DDR4RST is set on a cold reset. Domain chip resets do not
16173  * affect the DDR4RST value. Outside of cold reset, only software CSR writes change the DDR4RST
16174  * value. DDR4PDOMAIN enables preservation  mode during a domain reset. When set, the LMC
16175  * automatically puts the attached DDR4 DRAM parts into self refresh (see LMC()_SEQ_CTL[SEQ_SEL])
16176  * at the beginning of a domain reset sequence, provided that LMC is up. When cleared, LMC does
16177  * not put the attached DDR4 DRAM parts into self-refresh during a
16178  * domain reset sequence. DDR4PDOMAIN is cleared on a cold reset. Domain chip resets do not
16179  * affect the DDR4PDOMAIN value. Outside of cold reset, only software CSR writes change the
16180  * DDR4PDOMAIN value. DDR4PSV May be useful for system software to determine when the DDR4
16181  * contents have been preserved. Cleared by hardware during a cold reset. Never cleared by
16182  * hardware during a domain reset. Set by hardware during a domain reset if the hardware
16183  * automatically put the DDR4 DRAM into self-refresh during the reset sequence. Can also be
16184  * written by software (to any value).""
16185  */
16186 union bdk_lmcx_reset_ctl
16187 {
16188     uint64_t u;
16189     struct bdk_lmcx_reset_ctl_s
16190     {
16191 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
16192         uint64_t reserved_4_63         : 60;
16193         uint64_t ddr3psv               : 1;  /**< [  3:  3](R/W/H) Memory reset. 1 = DDR contents preserved.
16194 
16195                                                                  May be useful for system software to determine when the DDR3/DDR4 contents have been
16196                                                                  preserved.
16197                                                                  Cleared by hardware during a cold reset. Never cleared by hardware during a warm/soft
16198                                                                  reset. Set by hardware during a warm/soft reset if the hardware automatically put the
16199                                                                  DDR3/DDR4
16200                                                                  DRAM into self-refresh during the reset sequence.
16201                                                                  Can also be written by software (to any value). */
16202         uint64_t reserved_0_2          : 3;
16203 #else /* Word 0 - Little Endian */
16204         uint64_t reserved_0_2          : 3;
16205         uint64_t ddr3psv               : 1;  /**< [  3:  3](R/W/H) Memory reset. 1 = DDR contents preserved.
16206 
16207                                                                  May be useful for system software to determine when the DDR3/DDR4 contents have been
16208                                                                  preserved.
16209                                                                  Cleared by hardware during a cold reset. Never cleared by hardware during a warm/soft
16210                                                                  reset. Set by hardware during a warm/soft reset if the hardware automatically put the
16211                                                                  DDR3/DDR4
16212                                                                  DRAM into self-refresh during the reset sequence.
16213                                                                  Can also be written by software (to any value). */
16214         uint64_t reserved_4_63         : 60;
16215 #endif /* Word 0 - End */
16216     } s;
16217     struct bdk_lmcx_reset_ctl_cn8
16218     {
16219 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
16220         uint64_t reserved_4_63         : 60;
16221         uint64_t ddr3psv               : 1;  /**< [  3:  3](R/W/H) Memory reset. 1 = DDR contents preserved.
16222 
16223                                                                  May be useful for system software to determine when the DDR3/DDR4 contents have been
16224                                                                  preserved.
16225                                                                  Cleared by hardware during a cold reset. Never cleared by hardware during a warm/soft
16226                                                                  reset. Set by hardware during a warm/soft reset if the hardware automatically put the
16227                                                                  DDR3/DDR4
16228                                                                  DRAM into self-refresh during the reset sequence.
16229                                                                  Can also be written by software (to any value). */
16230         uint64_t ddr3psoft             : 1;  /**< [  2:  2](R/W/H) Memory reset. 1 = Enable preserve mode during soft reset.
16231 
16232                                                                  Enables preserve mode during a soft reset. When set, the DDR3/DDR4 controller hardware
16233                                                                  automatically puts the attached DDR3/DDR4 DRAM parts into self-refresh at the beginning of
16234                                                                  a
16235                                                                  soft reset sequence (see LMC()_SEQ_CTL[SEQ_SEL]), provided that the DDR3/DDR4 controller
16236                                                                  is up. When clear, the DDR3/DDR4 controller hardware does not put the attached DDR3/DDR4
16237                                                                  DRAM
16238                                                                  parts into self-refresh during a soft reset sequence.
16239                                                                  DDR3PSOFT is cleared on a cold reset. Warm and soft chip resets do not affect the
16240                                                                  DDR3PSOFT value. Outside of cold reset, only software CSR write operations change the
16241                                                                  DDR3PSOFT value. */
16242         uint64_t ddr3pwarm             : 1;  /**< [  1:  1](R/W/H) Memory reset. 1 = Enable preserve mode during warm reset.
16243 
16244                                                                  Enables preserve mode during a warm reset. When set, the DDR3/DDR4 controller hardware
16245                                                                  automatically puts the attached DDR3/DDR4 DRAM parts into self-refresh at the beginning of
16246                                                                  a
16247                                                                  warm reset sequence (see LMC()_SEQ_CTL[SEQ_SEL]), provided that the DDR3/DDR4 controller
16248                                                                  is up. When clear, the DDR3/DDR4 controller hardware does not put the attached DDR3/DDR4
16249                                                                  DRAM
16250                                                                  parts into self-refresh during a warm reset sequence.
16251                                                                  DDR3PWARM is cleared on a cold reset. Warm and soft chip resets do not affect the
16252                                                                  DDR3PWARM value. Outside of cold reset, only software CSR write operations change the
16253                                                                  DDR3PWARM value.
16254 
16255                                                                  Note that if a warm reset follows a soft reset, DDR3PWARM has no effect, as the DDR3/DDR4
16256                                                                  controller is no longer up after any cold/warm/soft reset sequence. */
16257         uint64_t ddr3rst               : 1;  /**< [  0:  0](R/W/H) "Memory reset. 0 = Reset asserted; 1 = Reset deasserted.
16258 
16259                                                                  DDR3/DDR4 DRAM parts have a RESET# pin. The DDR3RST CSR field controls the assertion of
16260                                                                  the new CNXXXX pin that attaches to RESET#.
16261                                                                  When DDR3RST is set, CNXXXX deasserts RESET#.
16262                                                                  When DDR3RST is clear, CNXXXX asserts RESET#.
16263                                                                  DDR3RST is cleared on a cold reset. Warm and soft chip resets do not affect the DDR3RST
16264                                                                  value.
16265                                                                  Outside of cold reset, only software CSR write operations change the DDR3RST value." */
16266 #else /* Word 0 - Little Endian */
16267         uint64_t ddr3rst               : 1;  /**< [  0:  0](R/W/H) "Memory reset. 0 = Reset asserted; 1 = Reset deasserted.
16268 
16269                                                                  DDR3/DDR4 DRAM parts have a RESET# pin. The DDR3RST CSR field controls the assertion of
16270                                                                  the new CNXXXX pin that attaches to RESET#.
16271                                                                  When DDR3RST is set, CNXXXX deasserts RESET#.
16272                                                                  When DDR3RST is clear, CNXXXX asserts RESET#.
16273                                                                  DDR3RST is cleared on a cold reset. Warm and soft chip resets do not affect the DDR3RST
16274                                                                  value.
16275                                                                  Outside of cold reset, only software CSR write operations change the DDR3RST value." */
16276         uint64_t ddr3pwarm             : 1;  /**< [  1:  1](R/W/H) Memory reset. 1 = Enable preserve mode during warm reset.
16277 
16278                                                                  Enables preserve mode during a warm reset. When set, the DDR3/DDR4 controller hardware
16279                                                                  automatically puts the attached DDR3/DDR4 DRAM parts into self-refresh at the beginning of
16280                                                                  a
16281                                                                  warm reset sequence (see LMC()_SEQ_CTL[SEQ_SEL]), provided that the DDR3/DDR4 controller
16282                                                                  is up. When clear, the DDR3/DDR4 controller hardware does not put the attached DDR3/DDR4
16283                                                                  DRAM
16284                                                                  parts into self-refresh during a warm reset sequence.
16285                                                                  DDR3PWARM is cleared on a cold reset. Warm and soft chip resets do not affect the
16286                                                                  DDR3PWARM value. Outside of cold reset, only software CSR write operations change the
16287                                                                  DDR3PWARM value.
16288 
16289                                                                  Note that if a warm reset follows a soft reset, DDR3PWARM has no effect, as the DDR3/DDR4
16290                                                                  controller is no longer up after any cold/warm/soft reset sequence. */
16291         uint64_t ddr3psoft             : 1;  /**< [  2:  2](R/W/H) Memory reset. 1 = Enable preserve mode during soft reset.
16292 
16293                                                                  Enables preserve mode during a soft reset. When set, the DDR3/DDR4 controller hardware
16294                                                                  automatically puts the attached DDR3/DDR4 DRAM parts into self-refresh at the beginning of
16295                                                                  a
16296                                                                  soft reset sequence (see LMC()_SEQ_CTL[SEQ_SEL]), provided that the DDR3/DDR4 controller
16297                                                                  is up. When clear, the DDR3/DDR4 controller hardware does not put the attached DDR3/DDR4
16298                                                                  DRAM
16299                                                                  parts into self-refresh during a soft reset sequence.
16300                                                                  DDR3PSOFT is cleared on a cold reset. Warm and soft chip resets do not affect the
16301                                                                  DDR3PSOFT value. Outside of cold reset, only software CSR write operations change the
16302                                                                  DDR3PSOFT value. */
16303         uint64_t ddr3psv               : 1;  /**< [  3:  3](R/W/H) Memory reset. 1 = DDR contents preserved.
16304 
16305                                                                  May be useful for system software to determine when the DDR3/DDR4 contents have been
16306                                                                  preserved.
16307                                                                  Cleared by hardware during a cold reset. Never cleared by hardware during a warm/soft
16308                                                                  reset. Set by hardware during a warm/soft reset if the hardware automatically put the
16309                                                                  DDR3/DDR4
16310                                                                  DRAM into self-refresh during the reset sequence.
16311                                                                  Can also be written by software (to any value). */
16312         uint64_t reserved_4_63         : 60;
16313 #endif /* Word 0 - End */
16314     } cn8;
16315     struct bdk_lmcx_reset_ctl_cn9
16316     {
16317 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
16318         uint64_t reserved_3_63         : 61;
16319         uint64_t ddr4psv               : 1;  /**< [  2:  2](R/W/H) Memory reset. 1 = DDR contents preserved.
16320 
16321                                                                  May be useful for system software to determine when the DDR4 contents have
16322                                                                  been preserved.  Cleared by hardware during a cold reset. Never cleared by
16323                                                                  hardware during a core domain reset. Set by hardware during a core domain reset
16324                                                                  if the hardware automatically put the DDR4 DRAM into self-refresh during the
16325                                                                  reset sequence.  Can also be written by software (to any value). */
16326         uint64_t ddr4pdomain           : 1;  /**< [  1:  1](R/W/H) Memory reset. 1 = Enable preserve mode during core domain reset.
16327 
16328                                                                  Enables preserve mode during a core domain reset. When set, the memory controller hardware
16329                                                                  automatically puts the attached DDR4 DRAM parts into self-refresh at the beginning of a
16330                                                                  core domain reset sequence (see LMC()_SEQ_CTL[SEQ_SEL]), provided that the controller is
16331                                                                  up. When clear, the controller hardware does not put the attached DDR4 DRAM parts into
16332                                                                  self-refresh during a core domain reset sequence.
16333 
16334                                                                  DDR4PDOMAIN is cleared on a cold reset. Core domain resets do not affect the
16335                                                                  DDR4PDOMAIN value. Outside of cold reset, only software CSR write operations change the
16336                                                                  DDR4PDOMAIN value. */
16337         uint64_t ddr4rst               : 1;  /**< [  0:  0](R/W/H) "Memory reset. 0 = Reset asserted; 1 = Reset deasserted.
16338 
16339                                                                  DDR4 DRAM parts have a RESET# pin. The DDR4RST CSR field controls the assertion of
16340                                                                  the new CNXXXX pin that attaches to RESET#.
16341                                                                  When DDR4RST is set, CNXXXX deasserts RESET#.
16342                                                                  When DDR4RST is clear, CNXXXX asserts RESET#.
16343                                                                  DDR4RST is cleared on a cold reset. Core domain resets do not affect the DDR4RST
16344                                                                  value.
16345                                                                  Outside of cold reset, only software CSR write operations change the DDR4RST value." */
16346 #else /* Word 0 - Little Endian */
16347         uint64_t ddr4rst               : 1;  /**< [  0:  0](R/W/H) "Memory reset. 0 = Reset asserted; 1 = Reset deasserted.
16348 
16349                                                                  DDR4 DRAM parts have a RESET# pin. The DDR4RST CSR field controls the assertion of
16350                                                                  the new CNXXXX pin that attaches to RESET#.
16351                                                                  When DDR4RST is set, CNXXXX deasserts RESET#.
16352                                                                  When DDR4RST is clear, CNXXXX asserts RESET#.
16353                                                                  DDR4RST is cleared on a cold reset. Core domain resets do not affect the DDR4RST
16354                                                                  value.
16355                                                                  Outside of cold reset, only software CSR write operations change the DDR4RST value." */
16356         uint64_t ddr4pdomain           : 1;  /**< [  1:  1](R/W/H) Memory reset. 1 = Enable preserve mode during core domain reset.
16357 
16358                                                                  Enables preserve mode during a core domain reset. When set, the memory controller hardware
16359                                                                  automatically puts the attached DDR4 DRAM parts into self-refresh at the beginning of a
16360                                                                  core domain reset sequence (see LMC()_SEQ_CTL[SEQ_SEL]), provided that the controller is
16361                                                                  up. When clear, the controller hardware does not put the attached DDR4 DRAM parts into
16362                                                                  self-refresh during a core domain reset sequence.
16363 
16364                                                                  DDR4PDOMAIN is cleared on a cold reset. Core domain resets do not affect the
16365                                                                  DDR4PDOMAIN value. Outside of cold reset, only software CSR write operations change the
16366                                                                  DDR4PDOMAIN value. */
16367         uint64_t ddr4psv               : 1;  /**< [  2:  2](R/W/H) Memory reset. 1 = DDR contents preserved.
16368 
16369                                                                  May be useful for system software to determine when the DDR4 contents have
16370                                                                  been preserved.  Cleared by hardware during a cold reset. Never cleared by
16371                                                                  hardware during a core domain reset. Set by hardware during a core domain reset
16372                                                                  if the hardware automatically put the DDR4 DRAM into self-refresh during the
16373                                                                  reset sequence.  Can also be written by software (to any value). */
16374         uint64_t reserved_3_63         : 61;
16375 #endif /* Word 0 - End */
16376     } cn9;
16377 };
16378 typedef union bdk_lmcx_reset_ctl bdk_lmcx_reset_ctl_t;
16379 
16380 static inline uint64_t BDK_LMCX_RESET_CTL(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_RESET_CTL(unsigned long a)16381 static inline uint64_t BDK_LMCX_RESET_CTL(unsigned long a)
16382 {
16383     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
16384         return 0x87e088000180ll + 0x1000000ll * ((a) & 0x0);
16385     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
16386         return 0x87e088000180ll + 0x1000000ll * ((a) & 0x1);
16387     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
16388         return 0x87e088000180ll + 0x1000000ll * ((a) & 0x3);
16389     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
16390         return 0x87e088000180ll + 0x1000000ll * ((a) & 0x3);
16391     __bdk_csr_fatal("LMCX_RESET_CTL", 1, a, 0, 0, 0);
16392 }
16393 
16394 #define typedef_BDK_LMCX_RESET_CTL(a) bdk_lmcx_reset_ctl_t
16395 #define bustype_BDK_LMCX_RESET_CTL(a) BDK_CSR_TYPE_RSL
16396 #define basename_BDK_LMCX_RESET_CTL(a) "LMCX_RESET_CTL"
16397 #define device_bar_BDK_LMCX_RESET_CTL(a) 0x0 /* PF_BAR0 */
16398 #define busnum_BDK_LMCX_RESET_CTL(a) (a)
16399 #define arguments_BDK_LMCX_RESET_CTL(a) (a),-1,-1,-1
16400 
16401 /**
16402  * Register (RSL) lmc#_retry_config
16403  *
16404  * LMC Automatic Retry Configuration Registers
16405  * This register configures automatic retry operation.
16406  */
16407 union bdk_lmcx_retry_config
16408 {
16409     uint64_t u;
16410     struct bdk_lmcx_retry_config_s
16411     {
16412 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
16413         uint64_t reserved_56_63        : 8;
16414         uint64_t max_errors            : 24; /**< [ 55: 32](R/W) Maximum number of errors before errors are ignored. */
16415         uint64_t reserved_13_31        : 19;
16416         uint64_t error_continue        : 1;  /**< [ 12: 12](WO) If LMC()_RETRY_CONFIG[AUTO_ERROR_CONTINUE] is cleared, LMC will wait
16417                                                                  for a one to be written to LMC()_RETRY_CONFIG[ERROR_CONTINUE] before
16418                                                                  continuing operations after an error. */
16419         uint64_t reserved_9_11         : 3;
16420         uint64_t auto_error_continue   : 1;  /**< [  8:  8](R/W) When set, LMC will automatically proceed with error handling and normal
16421                                                                  operation after an error occurs.  If clear, LMC will cease all operations
16422                                                                  except for refresh as soon as possible, and will not continue with error
16423                                                                  handling or normal operation until LMC()_RETRY_CONFIG[ERROR_CONTINUE]
16424                                                                  is written with a one. */
16425         uint64_t reserved_5_7          : 3;
16426         uint64_t pulse_count_auto_clr  : 1;  /**< [  4:  4](R/W) When set, LMC()_RETRY_STATUS[ERROR_PULSE_COUNT_VALID] will clear
16427                                                                  whenever the error interrupt is cleared. */
16428         uint64_t reserved_1_3          : 3;
16429         uint64_t retry_enable          : 1;  /**< [  0:  0](R/W) Enable retry on errors. */
16430 #else /* Word 0 - Little Endian */
16431         uint64_t retry_enable          : 1;  /**< [  0:  0](R/W) Enable retry on errors. */
16432         uint64_t reserved_1_3          : 3;
16433         uint64_t pulse_count_auto_clr  : 1;  /**< [  4:  4](R/W) When set, LMC()_RETRY_STATUS[ERROR_PULSE_COUNT_VALID] will clear
16434                                                                  whenever the error interrupt is cleared. */
16435         uint64_t reserved_5_7          : 3;
16436         uint64_t auto_error_continue   : 1;  /**< [  8:  8](R/W) When set, LMC will automatically proceed with error handling and normal
16437                                                                  operation after an error occurs.  If clear, LMC will cease all operations
16438                                                                  except for refresh as soon as possible, and will not continue with error
16439                                                                  handling or normal operation until LMC()_RETRY_CONFIG[ERROR_CONTINUE]
16440                                                                  is written with a one. */
16441         uint64_t reserved_9_11         : 3;
16442         uint64_t error_continue        : 1;  /**< [ 12: 12](WO) If LMC()_RETRY_CONFIG[AUTO_ERROR_CONTINUE] is cleared, LMC will wait
16443                                                                  for a one to be written to LMC()_RETRY_CONFIG[ERROR_CONTINUE] before
16444                                                                  continuing operations after an error. */
16445         uint64_t reserved_13_31        : 19;
16446         uint64_t max_errors            : 24; /**< [ 55: 32](R/W) Maximum number of errors before errors are ignored. */
16447         uint64_t reserved_56_63        : 8;
16448 #endif /* Word 0 - End */
16449     } s;
16450     /* struct bdk_lmcx_retry_config_s cn; */
16451 };
16452 typedef union bdk_lmcx_retry_config bdk_lmcx_retry_config_t;
16453 
16454 static inline uint64_t BDK_LMCX_RETRY_CONFIG(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_RETRY_CONFIG(unsigned long a)16455 static inline uint64_t BDK_LMCX_RETRY_CONFIG(unsigned long a)
16456 {
16457     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
16458         return 0x87e088000110ll + 0x1000000ll * ((a) & 0x0);
16459     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
16460         return 0x87e088000110ll + 0x1000000ll * ((a) & 0x1);
16461     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
16462         return 0x87e088000110ll + 0x1000000ll * ((a) & 0x3);
16463     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
16464         return 0x87e088000110ll + 0x1000000ll * ((a) & 0x3);
16465     __bdk_csr_fatal("LMCX_RETRY_CONFIG", 1, a, 0, 0, 0);
16466 }
16467 
16468 #define typedef_BDK_LMCX_RETRY_CONFIG(a) bdk_lmcx_retry_config_t
16469 #define bustype_BDK_LMCX_RETRY_CONFIG(a) BDK_CSR_TYPE_RSL
16470 #define basename_BDK_LMCX_RETRY_CONFIG(a) "LMCX_RETRY_CONFIG"
16471 #define device_bar_BDK_LMCX_RETRY_CONFIG(a) 0x0 /* PF_BAR0 */
16472 #define busnum_BDK_LMCX_RETRY_CONFIG(a) (a)
16473 #define arguments_BDK_LMCX_RETRY_CONFIG(a) (a),-1,-1,-1
16474 
16475 /**
16476  * Register (RSL) lmc#_retry_status
16477  *
16478  * LMC Automatic Retry Status Registers
16479  * This register provides status on automatic retry operation.
16480  */
16481 union bdk_lmcx_retry_status
16482 {
16483     uint64_t u;
16484     struct bdk_lmcx_retry_status_s
16485     {
16486 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
16487         uint64_t clear_error_count     : 1;  /**< [ 63: 63](WO) Clear the error count, one shot operation. */
16488         uint64_t clear_error_pulse_count : 1;/**< [ 62: 62](WO) Clear the error count, one shot operation. */
16489         uint64_t reserved_57_61        : 5;
16490         uint64_t error_pulse_count_valid : 1;/**< [ 56: 56](RO/H) When set and the count is valid, indicates that the counter has saturated,
16491                                                                  which effectively indicates that a command error has occured and not a CRC
16492                                                                  error. */
16493         uint64_t error_pulse_count_sat : 1;  /**< [ 55: 55](RO/H) When set and the count is valid, indicates that the counter has saturated,
16494                                                                  which effectively indicates that a command error has occured and not a CRC
16495                                                                  error. */
16496         uint64_t reserved_52_54        : 3;
16497         uint64_t error_pulse_count     : 4;  /**< [ 51: 48](RO/H) Count of cycles in last error pulse since clear.  This count will be cleared
16498                                                                  either by clearing the interrupt or writing a one to the pulse count clear bit. */
16499         uint64_t reserved_45_47        : 3;
16500         uint64_t error_sequence        : 5;  /**< [ 44: 40](RO/H) Sequence number for sequence that was running when error occurred. */
16501         uint64_t reserved_33_39        : 7;
16502         uint64_t error_type            : 1;  /**< [ 32: 32](RO/H) Error type:
16503                                                                  0 = Error during a sequence run.
16504                                                                  1 = Error during normal operation, which means a read or write operation. Effectively this
16505                                                                  means a command error for a read or write operation, or a CRC error for a write data
16506                                                                  operation. */
16507         uint64_t reserved_24_31        : 8;
16508         uint64_t error_count           : 24; /**< [ 23:  0](RO/H) Number of errors encountered since last cleared. */
16509 #else /* Word 0 - Little Endian */
16510         uint64_t error_count           : 24; /**< [ 23:  0](RO/H) Number of errors encountered since last cleared. */
16511         uint64_t reserved_24_31        : 8;
16512         uint64_t error_type            : 1;  /**< [ 32: 32](RO/H) Error type:
16513                                                                  0 = Error during a sequence run.
16514                                                                  1 = Error during normal operation, which means a read or write operation. Effectively this
16515                                                                  means a command error for a read or write operation, or a CRC error for a write data
16516                                                                  operation. */
16517         uint64_t reserved_33_39        : 7;
16518         uint64_t error_sequence        : 5;  /**< [ 44: 40](RO/H) Sequence number for sequence that was running when error occurred. */
16519         uint64_t reserved_45_47        : 3;
16520         uint64_t error_pulse_count     : 4;  /**< [ 51: 48](RO/H) Count of cycles in last error pulse since clear.  This count will be cleared
16521                                                                  either by clearing the interrupt or writing a one to the pulse count clear bit. */
16522         uint64_t reserved_52_54        : 3;
16523         uint64_t error_pulse_count_sat : 1;  /**< [ 55: 55](RO/H) When set and the count is valid, indicates that the counter has saturated,
16524                                                                  which effectively indicates that a command error has occured and not a CRC
16525                                                                  error. */
16526         uint64_t error_pulse_count_valid : 1;/**< [ 56: 56](RO/H) When set and the count is valid, indicates that the counter has saturated,
16527                                                                  which effectively indicates that a command error has occured and not a CRC
16528                                                                  error. */
16529         uint64_t reserved_57_61        : 5;
16530         uint64_t clear_error_pulse_count : 1;/**< [ 62: 62](WO) Clear the error count, one shot operation. */
16531         uint64_t clear_error_count     : 1;  /**< [ 63: 63](WO) Clear the error count, one shot operation. */
16532 #endif /* Word 0 - End */
16533     } s;
16534     /* struct bdk_lmcx_retry_status_s cn; */
16535 };
16536 typedef union bdk_lmcx_retry_status bdk_lmcx_retry_status_t;
16537 
16538 static inline uint64_t BDK_LMCX_RETRY_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_RETRY_STATUS(unsigned long a)16539 static inline uint64_t BDK_LMCX_RETRY_STATUS(unsigned long a)
16540 {
16541     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
16542         return 0x87e088000118ll + 0x1000000ll * ((a) & 0x0);
16543     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
16544         return 0x87e088000118ll + 0x1000000ll * ((a) & 0x1);
16545     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
16546         return 0x87e088000118ll + 0x1000000ll * ((a) & 0x3);
16547     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
16548         return 0x87e088000118ll + 0x1000000ll * ((a) & 0x3);
16549     __bdk_csr_fatal("LMCX_RETRY_STATUS", 1, a, 0, 0, 0);
16550 }
16551 
16552 #define typedef_BDK_LMCX_RETRY_STATUS(a) bdk_lmcx_retry_status_t
16553 #define bustype_BDK_LMCX_RETRY_STATUS(a) BDK_CSR_TYPE_RSL
16554 #define basename_BDK_LMCX_RETRY_STATUS(a) "LMCX_RETRY_STATUS"
16555 #define device_bar_BDK_LMCX_RETRY_STATUS(a) 0x0 /* PF_BAR0 */
16556 #define busnum_BDK_LMCX_RETRY_STATUS(a) (a)
16557 #define arguments_BDK_LMCX_RETRY_STATUS(a) (a),-1,-1,-1
16558 
16559 /**
16560  * Register (RSL) lmc#_rlevel_ctl
16561  *
16562  * LMC Read Level Control Register
16563  */
16564 union bdk_lmcx_rlevel_ctl
16565 {
16566     uint64_t u;
16567     struct bdk_lmcx_rlevel_ctl_s
16568     {
16569 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
16570         uint64_t reserved_48_63        : 16;
16571         uint64_t rank3_status          : 2;  /**< [ 47: 46](RO/H) Indicates status of the rank3 read-leveling and where the BYTEn programmings came from:
16572                                                                  0x0 = LMC()_RLEVEL_RANK(3) values are their reset value.
16573                                                                  0x1 = LMC()_RLEVEL_RANK(3) values were set via a CSR write.
16574                                                                  0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(3) values are
16575                                                                  unpredictable).
16576                                                                  0x3 = LMC()_RLEVEL_RANK(3) values came from a complete read-leveling sequence. */
16577         uint64_t rank2_status          : 2;  /**< [ 45: 44](RO/H) Indicates status of the rank2 read-leveling and where the BYTEn programmings came from:
16578                                                                  0x0 = LMC()_RLEVEL_RANK(2) values are their reset value.
16579                                                                  0x1 = LMC()_RLEVEL_RANK(2) values were set via a CSR write.
16580                                                                  0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(2) values are
16581                                                                  unpredictable).
16582                                                                  0x3 = LMC()_RLEVEL_RANK(2) values came from a complete read-leveling sequence. */
16583         uint64_t rank1_status          : 2;  /**< [ 43: 42](RO/H) Indicates status of the rank1 read-leveling and where the BYTEn programmings came from:
16584                                                                  0x0 = LMC()_RLEVEL_RANK(1) values are their reset value.
16585                                                                  0x1 = LMC()_RLEVEL_RANK(1) values were set via a CSR write.
16586                                                                  0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(1) values are
16587                                                                  unpredictable).
16588                                                                  0x3 = LMC()_RLEVEL_RANK(1) values came from a complete read-leveling sequence. */
16589         uint64_t rank0_status          : 2;  /**< [ 41: 40](RO/H) Indicates status of the rank0 read-leveling and where the BYTEn programmings came from:
16590                                                                  0x0 = LMC()_RLEVEL_RANK(0) values are their reset value.
16591                                                                  0x1 = LMC()_RLEVEL_RANK(0) values were set via a CSR write.
16592                                                                  0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(0) values are
16593                                                                  unpredictable).
16594                                                                  0x3 = LMC()_RLEVEL_RANK(0) values came from a complete read-leveling sequence. */
16595         uint64_t reserved_33_39        : 7;
16596         uint64_t tccd_sel              : 1;  /**< [ 32: 32](RO) Reserved. */
16597         uint64_t pattern               : 8;  /**< [ 31: 24](R/W) Sets the data pattern used to match in read leveling operations. */
16598         uint64_t reserved_22_23        : 2;
16599         uint64_t delay_unload_3        : 1;  /**< [ 21: 21](R/W) Reserved, must be set.
16600                                                                  Internal:
16601                                                                  When set, unload the PHY silo one cycle later during
16602                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 3. [DELAY_UNLOAD_3] should
16603                                                                  normally be set, particularly at higher speeds. */
16604         uint64_t delay_unload_2        : 1;  /**< [ 20: 20](R/W) Reserved, must be set.
16605                                                                  Internal:
16606                                                                  When set, unload the PHY silo one cycle later during
16607                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 2. [DELAY_UNLOAD_2] should
16608                                                                  normally be set. */
16609         uint64_t delay_unload_1        : 1;  /**< [ 19: 19](R/W) Reserved, must be set.
16610                                                                  Internal:
16611                                                                  When set, unload the PHY silo one cycle later during
16612                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 1. [DELAY_UNLOAD_1] should
16613                                                                  normally be set. */
16614         uint64_t delay_unload_0        : 1;  /**< [ 18: 18](R/W) Reserved, must be set.
16615                                                                  Internal:
16616                                                                  When set, unload the PHY silo one cycle later during
16617                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 0. [DELAY_UNLOAD_0] should
16618                                                                  normally be set. */
16619         uint64_t bitmask               : 8;  /**< [ 17: 10](R/W) Mask to select bit lanes on which read leveling feedback is returned when [OR_DIS] is set to 1. */
16620         uint64_t or_dis                : 1;  /**< [  9:  9](R/W) Disable ORing of bits in a byte lane when computing the read leveling bitmask. [OR_DIS]
16621                                                                  should normally not be set. */
16622         uint64_t offset_en             : 1;  /**< [  8:  8](R/W) When set, LMC attempts to select the read leveling setting that is
16623                                                                  LMC()_RLEVEL_CTL[OFFSET] settings earlier than the last passing read leveling setting
16624                                                                  in the largest contiguous sequence of passing settings. When clear, or if the setting
16625                                                                  selected by LMC()_RLEVEL_CTL[OFFSET] did not pass, LMC selects the middle setting in
16626                                                                  the largest contiguous sequence of passing settings, rounding earlier when necessary. */
16627         uint64_t offset                : 4;  /**< [  7:  4](R/W) The offset used when LMC()_RLEVEL_CTL[OFFSET] is set. */
16628         uint64_t byte                  : 4;  /**< [  3:  0](R/W) 0 \<= BYTE \<= 8. Byte index for which bitmask results are saved in LMC()_RLEVEL_DBG. */
16629 #else /* Word 0 - Little Endian */
16630         uint64_t byte                  : 4;  /**< [  3:  0](R/W) 0 \<= BYTE \<= 8. Byte index for which bitmask results are saved in LMC()_RLEVEL_DBG. */
16631         uint64_t offset                : 4;  /**< [  7:  4](R/W) The offset used when LMC()_RLEVEL_CTL[OFFSET] is set. */
16632         uint64_t offset_en             : 1;  /**< [  8:  8](R/W) When set, LMC attempts to select the read leveling setting that is
16633                                                                  LMC()_RLEVEL_CTL[OFFSET] settings earlier than the last passing read leveling setting
16634                                                                  in the largest contiguous sequence of passing settings. When clear, or if the setting
16635                                                                  selected by LMC()_RLEVEL_CTL[OFFSET] did not pass, LMC selects the middle setting in
16636                                                                  the largest contiguous sequence of passing settings, rounding earlier when necessary. */
16637         uint64_t or_dis                : 1;  /**< [  9:  9](R/W) Disable ORing of bits in a byte lane when computing the read leveling bitmask. [OR_DIS]
16638                                                                  should normally not be set. */
16639         uint64_t bitmask               : 8;  /**< [ 17: 10](R/W) Mask to select bit lanes on which read leveling feedback is returned when [OR_DIS] is set to 1. */
16640         uint64_t delay_unload_0        : 1;  /**< [ 18: 18](R/W) Reserved, must be set.
16641                                                                  Internal:
16642                                                                  When set, unload the PHY silo one cycle later during
16643                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 0. [DELAY_UNLOAD_0] should
16644                                                                  normally be set. */
16645         uint64_t delay_unload_1        : 1;  /**< [ 19: 19](R/W) Reserved, must be set.
16646                                                                  Internal:
16647                                                                  When set, unload the PHY silo one cycle later during
16648                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 1. [DELAY_UNLOAD_1] should
16649                                                                  normally be set. */
16650         uint64_t delay_unload_2        : 1;  /**< [ 20: 20](R/W) Reserved, must be set.
16651                                                                  Internal:
16652                                                                  When set, unload the PHY silo one cycle later during
16653                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 2. [DELAY_UNLOAD_2] should
16654                                                                  normally be set. */
16655         uint64_t delay_unload_3        : 1;  /**< [ 21: 21](R/W) Reserved, must be set.
16656                                                                  Internal:
16657                                                                  When set, unload the PHY silo one cycle later during
16658                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 3. [DELAY_UNLOAD_3] should
16659                                                                  normally be set, particularly at higher speeds. */
16660         uint64_t reserved_22_23        : 2;
16661         uint64_t pattern               : 8;  /**< [ 31: 24](R/W) Sets the data pattern used to match in read leveling operations. */
16662         uint64_t tccd_sel              : 1;  /**< [ 32: 32](RO) Reserved. */
16663         uint64_t reserved_33_39        : 7;
16664         uint64_t rank0_status          : 2;  /**< [ 41: 40](RO/H) Indicates status of the rank0 read-leveling and where the BYTEn programmings came from:
16665                                                                  0x0 = LMC()_RLEVEL_RANK(0) values are their reset value.
16666                                                                  0x1 = LMC()_RLEVEL_RANK(0) values were set via a CSR write.
16667                                                                  0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(0) values are
16668                                                                  unpredictable).
16669                                                                  0x3 = LMC()_RLEVEL_RANK(0) values came from a complete read-leveling sequence. */
16670         uint64_t rank1_status          : 2;  /**< [ 43: 42](RO/H) Indicates status of the rank1 read-leveling and where the BYTEn programmings came from:
16671                                                                  0x0 = LMC()_RLEVEL_RANK(1) values are their reset value.
16672                                                                  0x1 = LMC()_RLEVEL_RANK(1) values were set via a CSR write.
16673                                                                  0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(1) values are
16674                                                                  unpredictable).
16675                                                                  0x3 = LMC()_RLEVEL_RANK(1) values came from a complete read-leveling sequence. */
16676         uint64_t rank2_status          : 2;  /**< [ 45: 44](RO/H) Indicates status of the rank2 read-leveling and where the BYTEn programmings came from:
16677                                                                  0x0 = LMC()_RLEVEL_RANK(2) values are their reset value.
16678                                                                  0x1 = LMC()_RLEVEL_RANK(2) values were set via a CSR write.
16679                                                                  0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(2) values are
16680                                                                  unpredictable).
16681                                                                  0x3 = LMC()_RLEVEL_RANK(2) values came from a complete read-leveling sequence. */
16682         uint64_t rank3_status          : 2;  /**< [ 47: 46](RO/H) Indicates status of the rank3 read-leveling and where the BYTEn programmings came from:
16683                                                                  0x0 = LMC()_RLEVEL_RANK(3) values are their reset value.
16684                                                                  0x1 = LMC()_RLEVEL_RANK(3) values were set via a CSR write.
16685                                                                  0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(3) values are
16686                                                                  unpredictable).
16687                                                                  0x3 = LMC()_RLEVEL_RANK(3) values came from a complete read-leveling sequence. */
16688         uint64_t reserved_48_63        : 16;
16689 #endif /* Word 0 - End */
16690     } s;
16691     struct bdk_lmcx_rlevel_ctl_cn88xxp1
16692     {
16693 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
16694         uint64_t reserved_33_63        : 31;
16695         uint64_t tccd_sel              : 1;  /**< [ 32: 32](RO) Reserved. */
16696         uint64_t pattern               : 8;  /**< [ 31: 24](R/W) Sets the data pattern used to match in read leveling operations. */
16697         uint64_t reserved_22_23        : 2;
16698         uint64_t delay_unload_3        : 1;  /**< [ 21: 21](R/W) Reserved, must be set.
16699                                                                  Internal:
16700                                                                  When set, unload the PHY silo one cycle later during
16701                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 3. [DELAY_UNLOAD_3] should
16702                                                                  normally be set, particularly at higher speeds. */
16703         uint64_t delay_unload_2        : 1;  /**< [ 20: 20](R/W) Reserved, must be set.
16704                                                                  Internal:
16705                                                                  When set, unload the PHY silo one cycle later during
16706                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 2. [DELAY_UNLOAD_2] should
16707                                                                  normally be set. */
16708         uint64_t delay_unload_1        : 1;  /**< [ 19: 19](R/W) Reserved, must be set.
16709                                                                  Internal:
16710                                                                  When set, unload the PHY silo one cycle later during
16711                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 1. [DELAY_UNLOAD_1] should
16712                                                                  normally be set. */
16713         uint64_t delay_unload_0        : 1;  /**< [ 18: 18](R/W) Reserved, must be set.
16714                                                                  Internal:
16715                                                                  When set, unload the PHY silo one cycle later during
16716                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 0. [DELAY_UNLOAD_0] should
16717                                                                  normally be set. */
16718         uint64_t bitmask               : 8;  /**< [ 17: 10](R/W) Mask to select bit lanes on which read leveling feedback is returned when [OR_DIS] is set to 1. */
16719         uint64_t or_dis                : 1;  /**< [  9:  9](R/W) Disable ORing of bits in a byte lane when computing the read leveling bitmask. [OR_DIS]
16720                                                                  should normally not be set. */
16721         uint64_t offset_en             : 1;  /**< [  8:  8](R/W) When set, LMC attempts to select the read leveling setting that is
16722                                                                  LMC()_RLEVEL_CTL[OFFSET] settings earlier than the last passing read leveling setting
16723                                                                  in the largest contiguous sequence of passing settings. When clear, or if the setting
16724                                                                  selected by LMC()_RLEVEL_CTL[OFFSET] did not pass, LMC selects the middle setting in
16725                                                                  the largest contiguous sequence of passing settings, rounding earlier when necessary. */
16726         uint64_t offset                : 4;  /**< [  7:  4](R/W) The offset used when LMC()_RLEVEL_CTL[OFFSET] is set. */
16727         uint64_t byte                  : 4;  /**< [  3:  0](R/W) 0 \<= BYTE \<= 8. Byte index for which bitmask results are saved in LMC()_RLEVEL_DBG. */
16728 #else /* Word 0 - Little Endian */
16729         uint64_t byte                  : 4;  /**< [  3:  0](R/W) 0 \<= BYTE \<= 8. Byte index for which bitmask results are saved in LMC()_RLEVEL_DBG. */
16730         uint64_t offset                : 4;  /**< [  7:  4](R/W) The offset used when LMC()_RLEVEL_CTL[OFFSET] is set. */
16731         uint64_t offset_en             : 1;  /**< [  8:  8](R/W) When set, LMC attempts to select the read leveling setting that is
16732                                                                  LMC()_RLEVEL_CTL[OFFSET] settings earlier than the last passing read leveling setting
16733                                                                  in the largest contiguous sequence of passing settings. When clear, or if the setting
16734                                                                  selected by LMC()_RLEVEL_CTL[OFFSET] did not pass, LMC selects the middle setting in
16735                                                                  the largest contiguous sequence of passing settings, rounding earlier when necessary. */
16736         uint64_t or_dis                : 1;  /**< [  9:  9](R/W) Disable ORing of bits in a byte lane when computing the read leveling bitmask. [OR_DIS]
16737                                                                  should normally not be set. */
16738         uint64_t bitmask               : 8;  /**< [ 17: 10](R/W) Mask to select bit lanes on which read leveling feedback is returned when [OR_DIS] is set to 1. */
16739         uint64_t delay_unload_0        : 1;  /**< [ 18: 18](R/W) Reserved, must be set.
16740                                                                  Internal:
16741                                                                  When set, unload the PHY silo one cycle later during
16742                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 0. [DELAY_UNLOAD_0] should
16743                                                                  normally be set. */
16744         uint64_t delay_unload_1        : 1;  /**< [ 19: 19](R/W) Reserved, must be set.
16745                                                                  Internal:
16746                                                                  When set, unload the PHY silo one cycle later during
16747                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 1. [DELAY_UNLOAD_1] should
16748                                                                  normally be set. */
16749         uint64_t delay_unload_2        : 1;  /**< [ 20: 20](R/W) Reserved, must be set.
16750                                                                  Internal:
16751                                                                  When set, unload the PHY silo one cycle later during
16752                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 2. [DELAY_UNLOAD_2] should
16753                                                                  normally be set. */
16754         uint64_t delay_unload_3        : 1;  /**< [ 21: 21](R/W) Reserved, must be set.
16755                                                                  Internal:
16756                                                                  When set, unload the PHY silo one cycle later during
16757                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 3. [DELAY_UNLOAD_3] should
16758                                                                  normally be set, particularly at higher speeds. */
16759         uint64_t reserved_22_23        : 2;
16760         uint64_t pattern               : 8;  /**< [ 31: 24](R/W) Sets the data pattern used to match in read leveling operations. */
16761         uint64_t tccd_sel              : 1;  /**< [ 32: 32](RO) Reserved. */
16762         uint64_t reserved_33_63        : 31;
16763 #endif /* Word 0 - End */
16764     } cn88xxp1;
16765     struct bdk_lmcx_rlevel_ctl_cn9
16766     {
16767 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
16768         uint64_t reserved_48_63        : 16;
16769         uint64_t rank3_status          : 2;  /**< [ 47: 46](RO/H) Indicates status of the rank3 read-leveling and where the BYTEn programmings came from:
16770                                                                  0x0 = LMC()_RLEVEL_RANK(3) values are their reset value.
16771                                                                  0x1 = LMC()_RLEVEL_RANK(3) values were set via a CSR write.
16772                                                                  0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(3) values are
16773                                                                  unpredictable).
16774                                                                  0x3 = LMC()_RLEVEL_RANK(3) values came from a complete read-leveling sequence. */
16775         uint64_t rank2_status          : 2;  /**< [ 45: 44](RO/H) Indicates status of the rank2 read-leveling and where the BYTEn programmings came from:
16776                                                                  0x0 = LMC()_RLEVEL_RANK(2) values are their reset value.
16777                                                                  0x1 = LMC()_RLEVEL_RANK(2) values were set via a CSR write.
16778                                                                  0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(2) values are
16779                                                                  unpredictable).
16780                                                                  0x3 = LMC()_RLEVEL_RANK(2) values came from a complete read-leveling sequence. */
16781         uint64_t rank1_status          : 2;  /**< [ 43: 42](RO/H) Indicates status of the rank1 read-leveling and where the BYTEn programmings came from:
16782                                                                  0x0 = LMC()_RLEVEL_RANK(1) values are their reset value.
16783                                                                  0x1 = LMC()_RLEVEL_RANK(1) values were set via a CSR write.
16784                                                                  0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(1) values are
16785                                                                  unpredictable).
16786                                                                  0x3 = LMC()_RLEVEL_RANK(1) values came from a complete read-leveling sequence. */
16787         uint64_t rank0_status          : 2;  /**< [ 41: 40](RO/H) Indicates status of the rank0 read-leveling and where the BYTEn programmings came from:
16788                                                                  0x0 = LMC()_RLEVEL_RANK(0) values are their reset value.
16789                                                                  0x1 = LMC()_RLEVEL_RANK(0) values were set via a CSR write.
16790                                                                  0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(0) values are
16791                                                                  unpredictable).
16792                                                                  0x3 = LMC()_RLEVEL_RANK(0) values came from a complete read-leveling sequence. */
16793         uint64_t reserved_33_39        : 7;
16794         uint64_t tccd_sel              : 1;  /**< [ 32: 32](R/W) When set, the read leveling sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to
16795                                                                  space out back-to-back read commands. Otherwise the back-to-back
16796                                                                  reads commands are spaced out by a default 4 cycles. */
16797         uint64_t pattern               : 8;  /**< [ 31: 24](R/W) Sets the data pattern used to match in read leveling operations. */
16798         uint64_t reserved_22_23        : 2;
16799         uint64_t delay_unload_3        : 1;  /**< [ 21: 21](R/W) Reserved, must be set.
16800                                                                  Internal:
16801                                                                  When set, unload the PHY silo one cycle later during
16802                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 3. [DELAY_UNLOAD_3] should
16803                                                                  normally be set, particularly at higher speeds. */
16804         uint64_t delay_unload_2        : 1;  /**< [ 20: 20](R/W) Reserved, must be set.
16805                                                                  Internal:
16806                                                                  When set, unload the PHY silo one cycle later during
16807                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 2. [DELAY_UNLOAD_2] should
16808                                                                  normally be set. */
16809         uint64_t delay_unload_1        : 1;  /**< [ 19: 19](R/W) Reserved, must be set.
16810                                                                  Internal:
16811                                                                  When set, unload the PHY silo one cycle later during
16812                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 1. [DELAY_UNLOAD_1] should
16813                                                                  normally be set. */
16814         uint64_t delay_unload_0        : 1;  /**< [ 18: 18](R/W) Reserved, must be set.
16815                                                                  Internal:
16816                                                                  When set, unload the PHY silo one cycle later during
16817                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 0. [DELAY_UNLOAD_0] should
16818                                                                  normally be set. */
16819         uint64_t bitmask               : 8;  /**< [ 17: 10](R/W) Mask to select bit lanes on which read leveling feedback is returned when [OR_DIS] is set to 1. */
16820         uint64_t or_dis                : 1;  /**< [  9:  9](R/W) Disable ORing of bits in a byte lane when computing the read leveling bitmask. [OR_DIS]
16821                                                                  should normally not be set. */
16822         uint64_t offset_en             : 1;  /**< [  8:  8](R/W) When set, LMC attempts to select the read leveling setting that is
16823                                                                  LMC()_RLEVEL_CTL[OFFSET] settings earlier than the last passing read leveling setting
16824                                                                  in the largest contiguous sequence of passing settings. When clear, or if the setting
16825                                                                  selected by LMC()_RLEVEL_CTL[OFFSET] did not pass, LMC selects the middle setting in
16826                                                                  the largest contiguous sequence of passing settings, rounding earlier when necessary. */
16827         uint64_t offset                : 4;  /**< [  7:  4](R/W) The offset used when LMC()_RLEVEL_CTL[OFFSET] is set. */
16828         uint64_t byte                  : 4;  /**< [  3:  0](R/W) 0 \<= BYTE \<= 8. Byte index for which bitmask results are saved in LMC()_RLEVEL_DBG. */
16829 #else /* Word 0 - Little Endian */
16830         uint64_t byte                  : 4;  /**< [  3:  0](R/W) 0 \<= BYTE \<= 8. Byte index for which bitmask results are saved in LMC()_RLEVEL_DBG. */
16831         uint64_t offset                : 4;  /**< [  7:  4](R/W) The offset used when LMC()_RLEVEL_CTL[OFFSET] is set. */
16832         uint64_t offset_en             : 1;  /**< [  8:  8](R/W) When set, LMC attempts to select the read leveling setting that is
16833                                                                  LMC()_RLEVEL_CTL[OFFSET] settings earlier than the last passing read leveling setting
16834                                                                  in the largest contiguous sequence of passing settings. When clear, or if the setting
16835                                                                  selected by LMC()_RLEVEL_CTL[OFFSET] did not pass, LMC selects the middle setting in
16836                                                                  the largest contiguous sequence of passing settings, rounding earlier when necessary. */
16837         uint64_t or_dis                : 1;  /**< [  9:  9](R/W) Disable ORing of bits in a byte lane when computing the read leveling bitmask. [OR_DIS]
16838                                                                  should normally not be set. */
16839         uint64_t bitmask               : 8;  /**< [ 17: 10](R/W) Mask to select bit lanes on which read leveling feedback is returned when [OR_DIS] is set to 1. */
16840         uint64_t delay_unload_0        : 1;  /**< [ 18: 18](R/W) Reserved, must be set.
16841                                                                  Internal:
16842                                                                  When set, unload the PHY silo one cycle later during
16843                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 0. [DELAY_UNLOAD_0] should
16844                                                                  normally be set. */
16845         uint64_t delay_unload_1        : 1;  /**< [ 19: 19](R/W) Reserved, must be set.
16846                                                                  Internal:
16847                                                                  When set, unload the PHY silo one cycle later during
16848                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 1. [DELAY_UNLOAD_1] should
16849                                                                  normally be set. */
16850         uint64_t delay_unload_2        : 1;  /**< [ 20: 20](R/W) Reserved, must be set.
16851                                                                  Internal:
16852                                                                  When set, unload the PHY silo one cycle later during
16853                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 2. [DELAY_UNLOAD_2] should
16854                                                                  normally be set. */
16855         uint64_t delay_unload_3        : 1;  /**< [ 21: 21](R/W) Reserved, must be set.
16856                                                                  Internal:
16857                                                                  When set, unload the PHY silo one cycle later during
16858                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 3. [DELAY_UNLOAD_3] should
16859                                                                  normally be set, particularly at higher speeds. */
16860         uint64_t reserved_22_23        : 2;
16861         uint64_t pattern               : 8;  /**< [ 31: 24](R/W) Sets the data pattern used to match in read leveling operations. */
16862         uint64_t tccd_sel              : 1;  /**< [ 32: 32](R/W) When set, the read leveling sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to
16863                                                                  space out back-to-back read commands. Otherwise the back-to-back
16864                                                                  reads commands are spaced out by a default 4 cycles. */
16865         uint64_t reserved_33_39        : 7;
16866         uint64_t rank0_status          : 2;  /**< [ 41: 40](RO/H) Indicates status of the rank0 read-leveling and where the BYTEn programmings came from:
16867                                                                  0x0 = LMC()_RLEVEL_RANK(0) values are their reset value.
16868                                                                  0x1 = LMC()_RLEVEL_RANK(0) values were set via a CSR write.
16869                                                                  0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(0) values are
16870                                                                  unpredictable).
16871                                                                  0x3 = LMC()_RLEVEL_RANK(0) values came from a complete read-leveling sequence. */
16872         uint64_t rank1_status          : 2;  /**< [ 43: 42](RO/H) Indicates status of the rank1 read-leveling and where the BYTEn programmings came from:
16873                                                                  0x0 = LMC()_RLEVEL_RANK(1) values are their reset value.
16874                                                                  0x1 = LMC()_RLEVEL_RANK(1) values were set via a CSR write.
16875                                                                  0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(1) values are
16876                                                                  unpredictable).
16877                                                                  0x3 = LMC()_RLEVEL_RANK(1) values came from a complete read-leveling sequence. */
16878         uint64_t rank2_status          : 2;  /**< [ 45: 44](RO/H) Indicates status of the rank2 read-leveling and where the BYTEn programmings came from:
16879                                                                  0x0 = LMC()_RLEVEL_RANK(2) values are their reset value.
16880                                                                  0x1 = LMC()_RLEVEL_RANK(2) values were set via a CSR write.
16881                                                                  0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(2) values are
16882                                                                  unpredictable).
16883                                                                  0x3 = LMC()_RLEVEL_RANK(2) values came from a complete read-leveling sequence. */
16884         uint64_t rank3_status          : 2;  /**< [ 47: 46](RO/H) Indicates status of the rank3 read-leveling and where the BYTEn programmings came from:
16885                                                                  0x0 = LMC()_RLEVEL_RANK(3) values are their reset value.
16886                                                                  0x1 = LMC()_RLEVEL_RANK(3) values were set via a CSR write.
16887                                                                  0x2 = Read-leveling sequence currently in progress (LMC()_RLEVEL_RANK(3) values are
16888                                                                  unpredictable).
16889                                                                  0x3 = LMC()_RLEVEL_RANK(3) values came from a complete read-leveling sequence. */
16890         uint64_t reserved_48_63        : 16;
16891 #endif /* Word 0 - End */
16892     } cn9;
16893     struct bdk_lmcx_rlevel_ctl_cn81xx
16894     {
16895 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
16896         uint64_t reserved_33_63        : 31;
16897         uint64_t tccd_sel              : 1;  /**< [ 32: 32](R/W) When set, the read leveling sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to
16898                                                                  space out back-to-back read commands. Otherwise the back-to-back
16899                                                                  reads commands are spaced out by a default 4 cycles. */
16900         uint64_t pattern               : 8;  /**< [ 31: 24](R/W) Sets the data pattern used to match in read leveling operations. */
16901         uint64_t reserved_22_23        : 2;
16902         uint64_t delay_unload_3        : 1;  /**< [ 21: 21](R/W) Reserved, must be set.
16903                                                                  Internal:
16904                                                                  When set, unload the PHY silo one cycle later during
16905                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 3. [DELAY_UNLOAD_3] should
16906                                                                  normally be set, particularly at higher speeds. */
16907         uint64_t delay_unload_2        : 1;  /**< [ 20: 20](R/W) Reserved, must be set.
16908                                                                  Internal:
16909                                                                  When set, unload the PHY silo one cycle later during
16910                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 2. [DELAY_UNLOAD_2] should
16911                                                                  normally be set. */
16912         uint64_t delay_unload_1        : 1;  /**< [ 19: 19](R/W) Reserved, must be set.
16913                                                                  Internal:
16914                                                                  When set, unload the PHY silo one cycle later during
16915                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 1. [DELAY_UNLOAD_1] should
16916                                                                  normally be set. */
16917         uint64_t delay_unload_0        : 1;  /**< [ 18: 18](R/W) Reserved, must be set.
16918                                                                  Internal:
16919                                                                  When set, unload the PHY silo one cycle later during
16920                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 0. [DELAY_UNLOAD_0] should
16921                                                                  normally be set. */
16922         uint64_t bitmask               : 8;  /**< [ 17: 10](R/W) Mask to select bit lanes on which read leveling feedback is returned when [OR_DIS] is set to 1. */
16923         uint64_t or_dis                : 1;  /**< [  9:  9](R/W) Disable ORing of bits in a byte lane when computing the read leveling bitmask. [OR_DIS]
16924                                                                  should normally not be set. */
16925         uint64_t offset_en             : 1;  /**< [  8:  8](R/W) When set, LMC attempts to select the read leveling setting that is
16926                                                                  LMC()_RLEVEL_CTL[OFFSET] settings earlier than the last passing read leveling setting
16927                                                                  in the largest contiguous sequence of passing settings. When clear, or if the setting
16928                                                                  selected by LMC()_RLEVEL_CTL[OFFSET] did not pass, LMC selects the middle setting in
16929                                                                  the largest contiguous sequence of passing settings, rounding earlier when necessary. */
16930         uint64_t offset                : 4;  /**< [  7:  4](R/W) The offset used when LMC()_RLEVEL_CTL[OFFSET] is set. */
16931         uint64_t byte                  : 4;  /**< [  3:  0](R/W) 0 \<= BYTE \<= 8. Byte index for which bitmask results are saved in LMC()_RLEVEL_DBG. */
16932 #else /* Word 0 - Little Endian */
16933         uint64_t byte                  : 4;  /**< [  3:  0](R/W) 0 \<= BYTE \<= 8. Byte index for which bitmask results are saved in LMC()_RLEVEL_DBG. */
16934         uint64_t offset                : 4;  /**< [  7:  4](R/W) The offset used when LMC()_RLEVEL_CTL[OFFSET] is set. */
16935         uint64_t offset_en             : 1;  /**< [  8:  8](R/W) When set, LMC attempts to select the read leveling setting that is
16936                                                                  LMC()_RLEVEL_CTL[OFFSET] settings earlier than the last passing read leveling setting
16937                                                                  in the largest contiguous sequence of passing settings. When clear, or if the setting
16938                                                                  selected by LMC()_RLEVEL_CTL[OFFSET] did not pass, LMC selects the middle setting in
16939                                                                  the largest contiguous sequence of passing settings, rounding earlier when necessary. */
16940         uint64_t or_dis                : 1;  /**< [  9:  9](R/W) Disable ORing of bits in a byte lane when computing the read leveling bitmask. [OR_DIS]
16941                                                                  should normally not be set. */
16942         uint64_t bitmask               : 8;  /**< [ 17: 10](R/W) Mask to select bit lanes on which read leveling feedback is returned when [OR_DIS] is set to 1. */
16943         uint64_t delay_unload_0        : 1;  /**< [ 18: 18](R/W) Reserved, must be set.
16944                                                                  Internal:
16945                                                                  When set, unload the PHY silo one cycle later during
16946                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 0. [DELAY_UNLOAD_0] should
16947                                                                  normally be set. */
16948         uint64_t delay_unload_1        : 1;  /**< [ 19: 19](R/W) Reserved, must be set.
16949                                                                  Internal:
16950                                                                  When set, unload the PHY silo one cycle later during
16951                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 1. [DELAY_UNLOAD_1] should
16952                                                                  normally be set. */
16953         uint64_t delay_unload_2        : 1;  /**< [ 20: 20](R/W) Reserved, must be set.
16954                                                                  Internal:
16955                                                                  When set, unload the PHY silo one cycle later during
16956                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 2. [DELAY_UNLOAD_2] should
16957                                                                  normally be set. */
16958         uint64_t delay_unload_3        : 1;  /**< [ 21: 21](R/W) Reserved, must be set.
16959                                                                  Internal:
16960                                                                  When set, unload the PHY silo one cycle later during
16961                                                                  read leveling if LMC()_RLEVEL_RANK()[BYTE*\<1:0\>] = 3. [DELAY_UNLOAD_3] should
16962                                                                  normally be set, particularly at higher speeds. */
16963         uint64_t reserved_22_23        : 2;
16964         uint64_t pattern               : 8;  /**< [ 31: 24](R/W) Sets the data pattern used to match in read leveling operations. */
16965         uint64_t tccd_sel              : 1;  /**< [ 32: 32](R/W) When set, the read leveling sequence uses LMC()_MODEREG_PARAMS3[TCCD_L] to
16966                                                                  space out back-to-back read commands. Otherwise the back-to-back
16967                                                                  reads commands are spaced out by a default 4 cycles. */
16968         uint64_t reserved_33_63        : 31;
16969 #endif /* Word 0 - End */
16970     } cn81xx;
16971     /* struct bdk_lmcx_rlevel_ctl_cn81xx cn83xx; */
16972     /* struct bdk_lmcx_rlevel_ctl_cn81xx cn88xxp2; */
16973 };
16974 typedef union bdk_lmcx_rlevel_ctl bdk_lmcx_rlevel_ctl_t;
16975 
16976 static inline uint64_t BDK_LMCX_RLEVEL_CTL(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_RLEVEL_CTL(unsigned long a)16977 static inline uint64_t BDK_LMCX_RLEVEL_CTL(unsigned long a)
16978 {
16979     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
16980         return 0x87e0880002a0ll + 0x1000000ll * ((a) & 0x0);
16981     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
16982         return 0x87e0880002a0ll + 0x1000000ll * ((a) & 0x1);
16983     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
16984         return 0x87e0880002a0ll + 0x1000000ll * ((a) & 0x3);
16985     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
16986         return 0x87e0880002a0ll + 0x1000000ll * ((a) & 0x3);
16987     __bdk_csr_fatal("LMCX_RLEVEL_CTL", 1, a, 0, 0, 0);
16988 }
16989 
16990 #define typedef_BDK_LMCX_RLEVEL_CTL(a) bdk_lmcx_rlevel_ctl_t
16991 #define bustype_BDK_LMCX_RLEVEL_CTL(a) BDK_CSR_TYPE_RSL
16992 #define basename_BDK_LMCX_RLEVEL_CTL(a) "LMCX_RLEVEL_CTL"
16993 #define device_bar_BDK_LMCX_RLEVEL_CTL(a) 0x0 /* PF_BAR0 */
16994 #define busnum_BDK_LMCX_RLEVEL_CTL(a) (a)
16995 #define arguments_BDK_LMCX_RLEVEL_CTL(a) (a),-1,-1,-1
16996 
16997 /**
16998  * Register (RSL) lmc#_rlevel_dbg
16999  *
17000  * LMC Read Level Debug Register
17001  * A given read of LMC()_RLEVEL_DBG returns the read leveling pass/fail results for all
17002  * possible delay settings (i.e. the BITMASK) for only one byte in the last rank that
17003  * the hardware ran read leveling on. LMC()_RLEVEL_CTL[BYTE] selects the particular
17004  * byte. To get these pass/fail results for a different rank, you must run the hardware
17005  * read leveling again. For example, it is possible to get the [BITMASK] results for
17006  * every byte of every rank if you run read leveling separately for each rank, probing
17007  * LMC()_RLEVEL_DBG between each read- leveling.
17008  */
17009 union bdk_lmcx_rlevel_dbg
17010 {
17011     uint64_t u;
17012     struct bdk_lmcx_rlevel_dbg_s
17013     {
17014 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
17015         uint64_t bitmask               : 64; /**< [ 63:  0](RO/H) Bitmask generated during read level settings sweep. BITMASK[n] = 0 means read level
17016                                                                  setting n failed; BITMASK[n] = 1 means read level setting n passed for 0 \<= n \<= 63. */
17017 #else /* Word 0 - Little Endian */
17018         uint64_t bitmask               : 64; /**< [ 63:  0](RO/H) Bitmask generated during read level settings sweep. BITMASK[n] = 0 means read level
17019                                                                  setting n failed; BITMASK[n] = 1 means read level setting n passed for 0 \<= n \<= 63. */
17020 #endif /* Word 0 - End */
17021     } s;
17022     /* struct bdk_lmcx_rlevel_dbg_s cn8; */
17023     struct bdk_lmcx_rlevel_dbg_cn9
17024     {
17025 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
17026         uint64_t bitmask               : 64; /**< [ 63:  0](RO/H) Bitmask generated during read level settings sweep. BITMASK\<{a}\> = 0 means read level
17027                                                                  setting {a} failed; BITMASK\<{a}\> = 1 means read level setting {a} passed for 0 \<= {a} \<= 63. */
17028 #else /* Word 0 - Little Endian */
17029         uint64_t bitmask               : 64; /**< [ 63:  0](RO/H) Bitmask generated during read level settings sweep. BITMASK\<{a}\> = 0 means read level
17030                                                                  setting {a} failed; BITMASK\<{a}\> = 1 means read level setting {a} passed for 0 \<= {a} \<= 63. */
17031 #endif /* Word 0 - End */
17032     } cn9;
17033 };
17034 typedef union bdk_lmcx_rlevel_dbg bdk_lmcx_rlevel_dbg_t;
17035 
17036 static inline uint64_t BDK_LMCX_RLEVEL_DBG(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_RLEVEL_DBG(unsigned long a)17037 static inline uint64_t BDK_LMCX_RLEVEL_DBG(unsigned long a)
17038 {
17039     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
17040         return 0x87e0880002a8ll + 0x1000000ll * ((a) & 0x0);
17041     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
17042         return 0x87e0880002a8ll + 0x1000000ll * ((a) & 0x1);
17043     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
17044         return 0x87e0880002a8ll + 0x1000000ll * ((a) & 0x3);
17045     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
17046         return 0x87e0880002a8ll + 0x1000000ll * ((a) & 0x3);
17047     __bdk_csr_fatal("LMCX_RLEVEL_DBG", 1, a, 0, 0, 0);
17048 }
17049 
17050 #define typedef_BDK_LMCX_RLEVEL_DBG(a) bdk_lmcx_rlevel_dbg_t
17051 #define bustype_BDK_LMCX_RLEVEL_DBG(a) BDK_CSR_TYPE_RSL
17052 #define basename_BDK_LMCX_RLEVEL_DBG(a) "LMCX_RLEVEL_DBG"
17053 #define device_bar_BDK_LMCX_RLEVEL_DBG(a) 0x0 /* PF_BAR0 */
17054 #define busnum_BDK_LMCX_RLEVEL_DBG(a) (a)
17055 #define arguments_BDK_LMCX_RLEVEL_DBG(a) (a),-1,-1,-1
17056 
17057 /**
17058  * Register (RSL) lmc#_rlevel_dbg2
17059  *
17060  * LMC Read Level Debug Register
17061  * A given read of LMC()_RLEVEL_DBG returns the read-leveling pass/fail results for all
17062  * possible delay settings (i.e. the BITMASK) for only one byte in the last rank that
17063  * the hardware ran read-leveling on. LMC()_RLEVEL_CTL[BYTE] selects the particular
17064  * byte. To get these pass/fail results for a different rank, you must run the hardware
17065  * read-leveling again. For example, it is possible to get the [BITMASK] results for
17066  * every byte of every rank if you run read-leveling separately for each rank, probing
17067  * LMC()_RLEVEL_DBG between each read- leveling.
17068  */
17069 union bdk_lmcx_rlevel_dbg2
17070 {
17071     uint64_t u;
17072     struct bdk_lmcx_rlevel_dbg2_s
17073     {
17074 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
17075         uint64_t bitmask               : 64; /**< [ 63:  0](RO/H) Bitmask generated during read level settings sweep. BITMASK\<{a}\> = 0 means read level
17076                                                                  setting {a} failed; BITMASK\<{a}\> = 1 means read level setting {a} passed for 64 \<= {a} \<= 127. */
17077 #else /* Word 0 - Little Endian */
17078         uint64_t bitmask               : 64; /**< [ 63:  0](RO/H) Bitmask generated during read level settings sweep. BITMASK\<{a}\> = 0 means read level
17079                                                                  setting {a} failed; BITMASK\<{a}\> = 1 means read level setting {a} passed for 64 \<= {a} \<= 127. */
17080 #endif /* Word 0 - End */
17081     } s;
17082     /* struct bdk_lmcx_rlevel_dbg2_s cn; */
17083 };
17084 typedef union bdk_lmcx_rlevel_dbg2 bdk_lmcx_rlevel_dbg2_t;
17085 
17086 static inline uint64_t BDK_LMCX_RLEVEL_DBG2(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_RLEVEL_DBG2(unsigned long a)17087 static inline uint64_t BDK_LMCX_RLEVEL_DBG2(unsigned long a)
17088 {
17089     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
17090         return 0x87e0880002b0ll + 0x1000000ll * ((a) & 0x3);
17091     __bdk_csr_fatal("LMCX_RLEVEL_DBG2", 1, a, 0, 0, 0);
17092 }
17093 
17094 #define typedef_BDK_LMCX_RLEVEL_DBG2(a) bdk_lmcx_rlevel_dbg2_t
17095 #define bustype_BDK_LMCX_RLEVEL_DBG2(a) BDK_CSR_TYPE_RSL
17096 #define basename_BDK_LMCX_RLEVEL_DBG2(a) "LMCX_RLEVEL_DBG2"
17097 #define device_bar_BDK_LMCX_RLEVEL_DBG2(a) 0x0 /* PF_BAR0 */
17098 #define busnum_BDK_LMCX_RLEVEL_DBG2(a) (a)
17099 #define arguments_BDK_LMCX_RLEVEL_DBG2(a) (a),-1,-1,-1
17100 
17101 /**
17102  * Register (RSL) lmc#_rlevel_rank#
17103  *
17104  * LMC Read Level Rank Register
17105  * Four of these CSRs exist per LMC, one for each rank. Read level setting is measured
17106  * in units of 1/4 CK, so the BYTEn values can range over 16 CK cycles. Each CSR is
17107  * written by hardware during a read leveling sequence for the rank. (Hardware sets
17108  * [STATUS] to 3 after hardware read leveling completes for the rank.)
17109  *
17110  * If hardware is unable to find a match per LMC()_RLEVEL_CTL[OFFSET_EN] and
17111  * LMC()_RLEVEL_CTL[OFFSET], then hardware sets LMC()_RLEVEL_RANK()[BYTEn\<5:0\>] to
17112  * 0x0.
17113  *
17114  * Each CSR may also be written by software, but not while a read leveling sequence is
17115  * in progress. (Hardware sets [STATUS] to 1 after a CSR write.) Software initiates a
17116  * hardware read leveling sequence by programming LMC()_RLEVEL_CTL and writing
17117  * LMC()_SEQ_CTL[INIT_START] = 1 with LMC()_SEQ_CTL[SEQ_SEL]=1. See LMC()_RLEVEL_CTL.
17118  *
17119  * LMC()_RLEVEL_RANKi values for ranks i without attached DRAM should be set such that
17120  * they do not increase the range of possible BYTE values for any byte lane. The
17121  * easiest way to do this is to set LMC()_RLEVEL_RANK(i) = LMC()_RLEVEL_RANK(j), where j is
17122  * some rank with attached DRAM whose LMC()_RLEVEL_RANK(j) is already fully initialized.
17123  */
17124 union bdk_lmcx_rlevel_rankx
17125 {
17126     uint64_t u;
17127     struct bdk_lmcx_rlevel_rankx_s
17128     {
17129 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
17130         uint64_t reserved_56_63        : 8;
17131         uint64_t status                : 2;  /**< [ 55: 54](RO/H) Indicates status of the read leveling and where the BYTEn programmings in \<53:0\> came
17132                                                                  from:
17133                                                                  0x0 = BYTEn values are their reset value.
17134                                                                  0x1 = BYTEn values were set via a CSR write to this register.
17135                                                                  0x2 = Read leveling sequence currently in progress (BYTEn values are unpredictable).
17136                                                                  0x3 = BYTEn values came from a complete read leveling sequence. */
17137         uint64_t reserved_0_53         : 54;
17138 #else /* Word 0 - Little Endian */
17139         uint64_t reserved_0_53         : 54;
17140         uint64_t status                : 2;  /**< [ 55: 54](RO/H) Indicates status of the read leveling and where the BYTEn programmings in \<53:0\> came
17141                                                                  from:
17142                                                                  0x0 = BYTEn values are their reset value.
17143                                                                  0x1 = BYTEn values were set via a CSR write to this register.
17144                                                                  0x2 = Read leveling sequence currently in progress (BYTEn values are unpredictable).
17145                                                                  0x3 = BYTEn values came from a complete read leveling sequence. */
17146         uint64_t reserved_56_63        : 8;
17147 #endif /* Word 0 - End */
17148     } s;
17149     struct bdk_lmcx_rlevel_rankx_cn9
17150     {
17151 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
17152         uint64_t reserved_63           : 1;
17153         uint64_t byte8                 : 7;  /**< [ 62: 56](R/W/H) "Read level setting.
17154                                                                  When ECC DRAM is not present in 64-bit mode (i.e. when DRAM is not attached to chip
17155                                                                  signals DDR#_CBS_0_* and DDR#_CB\<7:0\>), software should write BYTE8 to a value that does
17156                                                                  not increase the range of possible BYTE* values. The easiest way to do this is to set
17157                                                                  LMC()_RLEVEL_RANK()[BYTE8] = LMC()_RLEVEL_RANK()[BYTE0] when there is no
17158                                                                  ECC DRAM, using the final BYTE0 value." */
17159         uint64_t byte7                 : 7;  /**< [ 55: 49](R/W/H) Read level setting. */
17160         uint64_t byte6                 : 7;  /**< [ 48: 42](R/W/H) Read level setting. */
17161         uint64_t byte5                 : 7;  /**< [ 41: 35](R/W/H) Read level setting. */
17162         uint64_t byte4                 : 7;  /**< [ 34: 28](R/W/H) Read level setting. */
17163         uint64_t byte3                 : 7;  /**< [ 27: 21](R/W/H) Read level setting. */
17164         uint64_t byte2                 : 7;  /**< [ 20: 14](R/W/H) Read level setting. */
17165         uint64_t byte1                 : 7;  /**< [ 13:  7](R/W/H) Read level setting. */
17166         uint64_t byte0                 : 7;  /**< [  6:  0](R/W/H) Read level setting. */
17167 #else /* Word 0 - Little Endian */
17168         uint64_t byte0                 : 7;  /**< [  6:  0](R/W/H) Read level setting. */
17169         uint64_t byte1                 : 7;  /**< [ 13:  7](R/W/H) Read level setting. */
17170         uint64_t byte2                 : 7;  /**< [ 20: 14](R/W/H) Read level setting. */
17171         uint64_t byte3                 : 7;  /**< [ 27: 21](R/W/H) Read level setting. */
17172         uint64_t byte4                 : 7;  /**< [ 34: 28](R/W/H) Read level setting. */
17173         uint64_t byte5                 : 7;  /**< [ 41: 35](R/W/H) Read level setting. */
17174         uint64_t byte6                 : 7;  /**< [ 48: 42](R/W/H) Read level setting. */
17175         uint64_t byte7                 : 7;  /**< [ 55: 49](R/W/H) Read level setting. */
17176         uint64_t byte8                 : 7;  /**< [ 62: 56](R/W/H) "Read level setting.
17177                                                                  When ECC DRAM is not present in 64-bit mode (i.e. when DRAM is not attached to chip
17178                                                                  signals DDR#_CBS_0_* and DDR#_CB\<7:0\>), software should write BYTE8 to a value that does
17179                                                                  not increase the range of possible BYTE* values. The easiest way to do this is to set
17180                                                                  LMC()_RLEVEL_RANK()[BYTE8] = LMC()_RLEVEL_RANK()[BYTE0] when there is no
17181                                                                  ECC DRAM, using the final BYTE0 value." */
17182         uint64_t reserved_63           : 1;
17183 #endif /* Word 0 - End */
17184     } cn9;
17185     struct bdk_lmcx_rlevel_rankx_cn81xx
17186     {
17187 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
17188         uint64_t reserved_56_63        : 8;
17189         uint64_t status                : 2;  /**< [ 55: 54](RO/H) Indicates status of the read leveling and where the BYTEn programmings in \<53:0\> came
17190                                                                  from:
17191                                                                  0x0 = BYTEn values are their reset value.
17192                                                                  0x1 = BYTEn values were set via a CSR write to this register.
17193                                                                  0x2 = Read leveling sequence currently in progress (BYTEn values are unpredictable).
17194                                                                  0x3 = BYTEn values came from a complete read leveling sequence. */
17195         uint64_t byte8                 : 6;  /**< [ 53: 48](R/W/H) "Read level setting.
17196                                                                  When ECC DRAM is not present in 64-bit mode (i.e. when DRAM is not attached to chip
17197                                                                  signals DDR#_DQS_8_* and DDR#_CB\<7:0\>), software should write BYTE8 to a value that does
17198                                                                  not increase the range of possible BYTE* values. The easiest way to do this is to set
17199                                                                  LMC()_RLEVEL_RANK()[BYTE8] = LMC()_RLEVEL_RANK()[BYTE0] when there is no
17200                                                                  ECC DRAM, using the final BYTE0 value." */
17201         uint64_t byte7                 : 6;  /**< [ 47: 42](R/W/H) Read level setting. */
17202         uint64_t byte6                 : 6;  /**< [ 41: 36](R/W/H) Read level setting. */
17203         uint64_t byte5                 : 6;  /**< [ 35: 30](R/W/H) Read level setting. */
17204         uint64_t byte4                 : 6;  /**< [ 29: 24](R/W/H) Read level setting. */
17205         uint64_t byte3                 : 6;  /**< [ 23: 18](R/W/H) Read level setting. */
17206         uint64_t byte2                 : 6;  /**< [ 17: 12](R/W/H) Read level setting. */
17207         uint64_t byte1                 : 6;  /**< [ 11:  6](R/W/H) Read level setting. */
17208         uint64_t byte0                 : 6;  /**< [  5:  0](R/W/H) Read level setting. */
17209 #else /* Word 0 - Little Endian */
17210         uint64_t byte0                 : 6;  /**< [  5:  0](R/W/H) Read level setting. */
17211         uint64_t byte1                 : 6;  /**< [ 11:  6](R/W/H) Read level setting. */
17212         uint64_t byte2                 : 6;  /**< [ 17: 12](R/W/H) Read level setting. */
17213         uint64_t byte3                 : 6;  /**< [ 23: 18](R/W/H) Read level setting. */
17214         uint64_t byte4                 : 6;  /**< [ 29: 24](R/W/H) Read level setting. */
17215         uint64_t byte5                 : 6;  /**< [ 35: 30](R/W/H) Read level setting. */
17216         uint64_t byte6                 : 6;  /**< [ 41: 36](R/W/H) Read level setting. */
17217         uint64_t byte7                 : 6;  /**< [ 47: 42](R/W/H) Read level setting. */
17218         uint64_t byte8                 : 6;  /**< [ 53: 48](R/W/H) "Read level setting.
17219                                                                  When ECC DRAM is not present in 64-bit mode (i.e. when DRAM is not attached to chip
17220                                                                  signals DDR#_DQS_8_* and DDR#_CB\<7:0\>), software should write BYTE8 to a value that does
17221                                                                  not increase the range of possible BYTE* values. The easiest way to do this is to set
17222                                                                  LMC()_RLEVEL_RANK()[BYTE8] = LMC()_RLEVEL_RANK()[BYTE0] when there is no
17223                                                                  ECC DRAM, using the final BYTE0 value." */
17224         uint64_t status                : 2;  /**< [ 55: 54](RO/H) Indicates status of the read leveling and where the BYTEn programmings in \<53:0\> came
17225                                                                  from:
17226                                                                  0x0 = BYTEn values are their reset value.
17227                                                                  0x1 = BYTEn values were set via a CSR write to this register.
17228                                                                  0x2 = Read leveling sequence currently in progress (BYTEn values are unpredictable).
17229                                                                  0x3 = BYTEn values came from a complete read leveling sequence. */
17230         uint64_t reserved_56_63        : 8;
17231 #endif /* Word 0 - End */
17232     } cn81xx;
17233     /* struct bdk_lmcx_rlevel_rankx_cn81xx cn88xx; */
17234     struct bdk_lmcx_rlevel_rankx_cn83xx
17235     {
17236 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
17237         uint64_t reserved_56_63        : 8;
17238         uint64_t status                : 2;  /**< [ 55: 54](RO/H) Indicates status of the read leveling and where the BYTEn programmings in \<53:0\> came
17239                                                                  from:
17240                                                                  0x0 = BYTEn values are their reset value.
17241                                                                  0x1 = BYTEn values were set via a CSR write to this register.
17242                                                                  0x2 = Read leveling sequence currently in progress (BYTEn values are unpredictable).
17243                                                                  0x3 = BYTEn values came from a complete read leveling sequence. */
17244         uint64_t byte8                 : 6;  /**< [ 53: 48](R/W/H) "Read level setting.
17245                                                                  When ECC DRAM is not present in 64-bit mode (i.e. when DRAM is not attached to chip
17246                                                                  signals DDR#_CBS_0_* and DDR#_CB\<7:0\>), software should write BYTE8 to a value that does
17247                                                                  not increase the range of possible BYTE* values. The easiest way to do this is to set
17248                                                                  LMC()_RLEVEL_RANK()[BYTE8] = LMC()_RLEVEL_RANK()[BYTE0] when there is no
17249                                                                  ECC DRAM, using the final BYTE0 value." */
17250         uint64_t byte7                 : 6;  /**< [ 47: 42](R/W/H) Read level setting. */
17251         uint64_t byte6                 : 6;  /**< [ 41: 36](R/W/H) Read level setting. */
17252         uint64_t byte5                 : 6;  /**< [ 35: 30](R/W/H) Read level setting. */
17253         uint64_t byte4                 : 6;  /**< [ 29: 24](R/W/H) Read level setting. */
17254         uint64_t byte3                 : 6;  /**< [ 23: 18](R/W/H) Read level setting. */
17255         uint64_t byte2                 : 6;  /**< [ 17: 12](R/W/H) Read level setting. */
17256         uint64_t byte1                 : 6;  /**< [ 11:  6](R/W/H) Read level setting. */
17257         uint64_t byte0                 : 6;  /**< [  5:  0](R/W/H) Read level setting. */
17258 #else /* Word 0 - Little Endian */
17259         uint64_t byte0                 : 6;  /**< [  5:  0](R/W/H) Read level setting. */
17260         uint64_t byte1                 : 6;  /**< [ 11:  6](R/W/H) Read level setting. */
17261         uint64_t byte2                 : 6;  /**< [ 17: 12](R/W/H) Read level setting. */
17262         uint64_t byte3                 : 6;  /**< [ 23: 18](R/W/H) Read level setting. */
17263         uint64_t byte4                 : 6;  /**< [ 29: 24](R/W/H) Read level setting. */
17264         uint64_t byte5                 : 6;  /**< [ 35: 30](R/W/H) Read level setting. */
17265         uint64_t byte6                 : 6;  /**< [ 41: 36](R/W/H) Read level setting. */
17266         uint64_t byte7                 : 6;  /**< [ 47: 42](R/W/H) Read level setting. */
17267         uint64_t byte8                 : 6;  /**< [ 53: 48](R/W/H) "Read level setting.
17268                                                                  When ECC DRAM is not present in 64-bit mode (i.e. when DRAM is not attached to chip
17269                                                                  signals DDR#_CBS_0_* and DDR#_CB\<7:0\>), software should write BYTE8 to a value that does
17270                                                                  not increase the range of possible BYTE* values. The easiest way to do this is to set
17271                                                                  LMC()_RLEVEL_RANK()[BYTE8] = LMC()_RLEVEL_RANK()[BYTE0] when there is no
17272                                                                  ECC DRAM, using the final BYTE0 value." */
17273         uint64_t status                : 2;  /**< [ 55: 54](RO/H) Indicates status of the read leveling and where the BYTEn programmings in \<53:0\> came
17274                                                                  from:
17275                                                                  0x0 = BYTEn values are their reset value.
17276                                                                  0x1 = BYTEn values were set via a CSR write to this register.
17277                                                                  0x2 = Read leveling sequence currently in progress (BYTEn values are unpredictable).
17278                                                                  0x3 = BYTEn values came from a complete read leveling sequence. */
17279         uint64_t reserved_56_63        : 8;
17280 #endif /* Word 0 - End */
17281     } cn83xx;
17282 };
17283 typedef union bdk_lmcx_rlevel_rankx bdk_lmcx_rlevel_rankx_t;
17284 
17285 static inline uint64_t BDK_LMCX_RLEVEL_RANKX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
BDK_LMCX_RLEVEL_RANKX(unsigned long a,unsigned long b)17286 static inline uint64_t BDK_LMCX_RLEVEL_RANKX(unsigned long a, unsigned long b)
17287 {
17288     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=3)))
17289         return 0x87e088000280ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0x3);
17290     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=3)))
17291         return 0x87e088000280ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x3);
17292     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=3)))
17293         return 0x87e088000280ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x3);
17294     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b<=3)))
17295         return 0x87e088000280ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x3);
17296     __bdk_csr_fatal("LMCX_RLEVEL_RANKX", 2, a, b, 0, 0);
17297 }
17298 
17299 #define typedef_BDK_LMCX_RLEVEL_RANKX(a,b) bdk_lmcx_rlevel_rankx_t
17300 #define bustype_BDK_LMCX_RLEVEL_RANKX(a,b) BDK_CSR_TYPE_RSL
17301 #define basename_BDK_LMCX_RLEVEL_RANKX(a,b) "LMCX_RLEVEL_RANKX"
17302 #define device_bar_BDK_LMCX_RLEVEL_RANKX(a,b) 0x0 /* PF_BAR0 */
17303 #define busnum_BDK_LMCX_RLEVEL_RANKX(a,b) (a)
17304 #define arguments_BDK_LMCX_RLEVEL_RANKX(a,b) (a),(b),-1,-1
17305 
17306 /**
17307  * Register (RSL) lmc#_rodt_mask
17308  *
17309  * LMC Read OnDieTermination Mask Register
17310  * System designers may desire to terminate DQ/DQS lines for higher frequency DDR operations,
17311  * especially on a multirank system. DDR4 DQ/DQS I/Os have built-in termination resistors that
17312  * can be turned on or off by the controller, after meeting TAOND and TAOF timing requirements.
17313  *
17314  * Each rank has its own ODT pin that fans out to all the memory parts in that DIMM. System
17315  * designers may prefer different combinations of ODT ONs for read operations into different
17316  * ranks. CNXXXX supports full programmability by way of the mask register below. Each rank
17317  * position has its own 4-bit programmable field. When the controller does a read to that rank,
17318  * it sets the 4 ODT pins to the MASK pins below. For example, when doing a read from Rank0, a
17319  * system designer may desire to terminate the lines with the resistor on DIMM0/Rank1. The mask
17320  * [RODT_D0_R0] would then be {0010}.
17321  *
17322  * CNXXXX drives the appropriate mask values on the ODT pins by default. If this feature is not
17323  * required, write 0x0 in this register. Note that, as per the JEDEC DDR4 specifications, the ODT
17324  * pin for the rank that is being read should always be 0x0.
17325  * When a given RANK is selected, the RODT mask for that rank is used. The resulting RODT mask is
17326  * driven to the DIMMs in the following manner:
17327  */
17328 union bdk_lmcx_rodt_mask
17329 {
17330     uint64_t u;
17331     struct bdk_lmcx_rodt_mask_s
17332     {
17333 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
17334         uint64_t reserved_28_63        : 36;
17335         uint64_t rodt_d1_r1            : 4;  /**< [ 27: 24](R/W) Reserved.
17336                                                                  Internal:
17337                                                                  Read ODT mask DIMM1, RANK1/DIMM1 in SingleRanked. If RANK_ENA=1, [RODT_D1_R1]\<3\> must be
17338                                                                  zero. Otherwise [RODT_D1_R1]\<3:0\> is not used and must be zero. */
17339         uint64_t reserved_20_23        : 4;
17340         uint64_t rodt_d1_r0            : 4;  /**< [ 19: 16](R/W) Reserved.
17341                                                                  Internal:
17342                                                                  Read ODT mask DIMM1, RANK0. If RANK_ENA=1, [RODT_D1_RO]\<2\> must be zero. Otherwise,
17343                                                                  [RODT_D1_RO]\<3:2,1\> must be zero. */
17344         uint64_t reserved_12_15        : 4;
17345         uint64_t rodt_d0_r1            : 4;  /**< [ 11:  8](R/W) Read ODT mask DIMM0, RANK1/DIMM0 in SingleRanked. If LMC()_CONFIG[RANK_ENA]=1,
17346                                                                  [RODT_D0_R1]\<1\> must be zero. Otherwise, [RODT_D0_R1]\<3:0\> is not used and must
17347                                                                  be zero. */
17348         uint64_t reserved_4_7          : 4;
17349         uint64_t rodt_d0_r0            : 4;  /**< [  3:  0](R/W) Read ODT mask DIMM0, RANK0. If LMC()_CONFIG[RANK_ENA]=1, [RODT_D0_R0]\<0\> must be
17350                                                                  zero. Otherwise, [RODT_D0_R0]\<1:0,3\> must be zero. */
17351 #else /* Word 0 - Little Endian */
17352         uint64_t rodt_d0_r0            : 4;  /**< [  3:  0](R/W) Read ODT mask DIMM0, RANK0. If LMC()_CONFIG[RANK_ENA]=1, [RODT_D0_R0]\<0\> must be
17353                                                                  zero. Otherwise, [RODT_D0_R0]\<1:0,3\> must be zero. */
17354         uint64_t reserved_4_7          : 4;
17355         uint64_t rodt_d0_r1            : 4;  /**< [ 11:  8](R/W) Read ODT mask DIMM0, RANK1/DIMM0 in SingleRanked. If LMC()_CONFIG[RANK_ENA]=1,
17356                                                                  [RODT_D0_R1]\<1\> must be zero. Otherwise, [RODT_D0_R1]\<3:0\> is not used and must
17357                                                                  be zero. */
17358         uint64_t reserved_12_15        : 4;
17359         uint64_t rodt_d1_r0            : 4;  /**< [ 19: 16](R/W) Reserved.
17360                                                                  Internal:
17361                                                                  Read ODT mask DIMM1, RANK0. If RANK_ENA=1, [RODT_D1_RO]\<2\> must be zero. Otherwise,
17362                                                                  [RODT_D1_RO]\<3:2,1\> must be zero. */
17363         uint64_t reserved_20_23        : 4;
17364         uint64_t rodt_d1_r1            : 4;  /**< [ 27: 24](R/W) Reserved.
17365                                                                  Internal:
17366                                                                  Read ODT mask DIMM1, RANK1/DIMM1 in SingleRanked. If RANK_ENA=1, [RODT_D1_R1]\<3\> must be
17367                                                                  zero. Otherwise [RODT_D1_R1]\<3:0\> is not used and must be zero. */
17368         uint64_t reserved_28_63        : 36;
17369 #endif /* Word 0 - End */
17370     } s;
17371     struct bdk_lmcx_rodt_mask_cn9
17372     {
17373 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
17374         uint64_t reserved_28_63        : 36;
17375         uint64_t rodt_d1_r1            : 4;  /**< [ 27: 24](R/W) Read ODT mask DIMM1, RANK1/DIMM1 in SingleRanked. If LMC()_CONFIG[RANK_ENA]=1,
17376                                                                  [RODT_D1_R1]\<3\> must be zero. Otherwise [RODT_D1_R1]\<3:0\> is not used and must
17377                                                                  be zero. */
17378         uint64_t reserved_20_23        : 4;
17379         uint64_t rodt_d1_r0            : 4;  /**< [ 19: 16](R/W) Read ODT mask DIMM1, RANK0. If LMC()_CONFIG[RANK_ENA]=1, [RODT_D1_R0]\<2\> must be
17380                                                                  zero. Otherwise, [RODT_D1_R0]\<3:2,1\> must be zero. */
17381         uint64_t reserved_12_15        : 4;
17382         uint64_t rodt_d0_r1            : 4;  /**< [ 11:  8](R/W) Read ODT mask DIMM0, RANK1/DIMM0 in SingleRanked. If LMC()_CONFIG[RANK_ENA]=1,
17383                                                                  [RODT_D0_R1]\<1\> must be zero. Otherwise, [RODT_D0_R1]\<3:0\> is not used and must
17384                                                                  be zero. */
17385         uint64_t reserved_4_7          : 4;
17386         uint64_t rodt_d0_r0            : 4;  /**< [  3:  0](R/W) Read ODT mask DIMM0, RANK0. If LMC()_CONFIG[RANK_ENA]=1, [RODT_D0_R0]\<0\> must be
17387                                                                  zero. Otherwise, [RODT_D0_R0]\<1:0,3\> must be zero. */
17388 #else /* Word 0 - Little Endian */
17389         uint64_t rodt_d0_r0            : 4;  /**< [  3:  0](R/W) Read ODT mask DIMM0, RANK0. If LMC()_CONFIG[RANK_ENA]=1, [RODT_D0_R0]\<0\> must be
17390                                                                  zero. Otherwise, [RODT_D0_R0]\<1:0,3\> must be zero. */
17391         uint64_t reserved_4_7          : 4;
17392         uint64_t rodt_d0_r1            : 4;  /**< [ 11:  8](R/W) Read ODT mask DIMM0, RANK1/DIMM0 in SingleRanked. If LMC()_CONFIG[RANK_ENA]=1,
17393                                                                  [RODT_D0_R1]\<1\> must be zero. Otherwise, [RODT_D0_R1]\<3:0\> is not used and must
17394                                                                  be zero. */
17395         uint64_t reserved_12_15        : 4;
17396         uint64_t rodt_d1_r0            : 4;  /**< [ 19: 16](R/W) Read ODT mask DIMM1, RANK0. If LMC()_CONFIG[RANK_ENA]=1, [RODT_D1_R0]\<2\> must be
17397                                                                  zero. Otherwise, [RODT_D1_R0]\<3:2,1\> must be zero. */
17398         uint64_t reserved_20_23        : 4;
17399         uint64_t rodt_d1_r1            : 4;  /**< [ 27: 24](R/W) Read ODT mask DIMM1, RANK1/DIMM1 in SingleRanked. If LMC()_CONFIG[RANK_ENA]=1,
17400                                                                  [RODT_D1_R1]\<3\> must be zero. Otherwise [RODT_D1_R1]\<3:0\> is not used and must
17401                                                                  be zero. */
17402         uint64_t reserved_28_63        : 36;
17403 #endif /* Word 0 - End */
17404     } cn9;
17405     /* struct bdk_lmcx_rodt_mask_s cn81xx; */
17406     /* struct bdk_lmcx_rodt_mask_cn9 cn88xx; */
17407     /* struct bdk_lmcx_rodt_mask_cn9 cn83xx; */
17408 };
17409 typedef union bdk_lmcx_rodt_mask bdk_lmcx_rodt_mask_t;
17410 
17411 static inline uint64_t BDK_LMCX_RODT_MASK(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_RODT_MASK(unsigned long a)17412 static inline uint64_t BDK_LMCX_RODT_MASK(unsigned long a)
17413 {
17414     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
17415         return 0x87e088000268ll + 0x1000000ll * ((a) & 0x0);
17416     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
17417         return 0x87e088000268ll + 0x1000000ll * ((a) & 0x1);
17418     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
17419         return 0x87e088000268ll + 0x1000000ll * ((a) & 0x3);
17420     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
17421         return 0x87e088000268ll + 0x1000000ll * ((a) & 0x3);
17422     __bdk_csr_fatal("LMCX_RODT_MASK", 1, a, 0, 0, 0);
17423 }
17424 
17425 #define typedef_BDK_LMCX_RODT_MASK(a) bdk_lmcx_rodt_mask_t
17426 #define bustype_BDK_LMCX_RODT_MASK(a) BDK_CSR_TYPE_RSL
17427 #define basename_BDK_LMCX_RODT_MASK(a) "LMCX_RODT_MASK"
17428 #define device_bar_BDK_LMCX_RODT_MASK(a) 0x0 /* PF_BAR0 */
17429 #define busnum_BDK_LMCX_RODT_MASK(a) (a)
17430 #define arguments_BDK_LMCX_RODT_MASK(a) (a),-1,-1,-1
17431 
17432 /**
17433  * Register (RSL) lmc#_scramble_cfg0
17434  *
17435  * LMC Scramble Configuration 0 Register
17436  */
17437 union bdk_lmcx_scramble_cfg0
17438 {
17439     uint64_t u;
17440     struct bdk_lmcx_scramble_cfg0_s
17441     {
17442 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
17443         uint64_t key                   : 64; /**< [ 63:  0](R/W/H) Scramble key for data. Prior to enabling scrambling this key should be generated from a
17444                                                                  cryptographically-secure random number generator such as RNM_RANDOM. */
17445 #else /* Word 0 - Little Endian */
17446         uint64_t key                   : 64; /**< [ 63:  0](R/W/H) Scramble key for data. Prior to enabling scrambling this key should be generated from a
17447                                                                  cryptographically-secure random number generator such as RNM_RANDOM. */
17448 #endif /* Word 0 - End */
17449     } s;
17450     /* struct bdk_lmcx_scramble_cfg0_s cn; */
17451 };
17452 typedef union bdk_lmcx_scramble_cfg0 bdk_lmcx_scramble_cfg0_t;
17453 
17454 static inline uint64_t BDK_LMCX_SCRAMBLE_CFG0(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_SCRAMBLE_CFG0(unsigned long a)17455 static inline uint64_t BDK_LMCX_SCRAMBLE_CFG0(unsigned long a)
17456 {
17457     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
17458         return 0x87e088000320ll + 0x1000000ll * ((a) & 0x0);
17459     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
17460         return 0x87e088000320ll + 0x1000000ll * ((a) & 0x1);
17461     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
17462         return 0x87e088000320ll + 0x1000000ll * ((a) & 0x3);
17463     __bdk_csr_fatal("LMCX_SCRAMBLE_CFG0", 1, a, 0, 0, 0);
17464 }
17465 
17466 #define typedef_BDK_LMCX_SCRAMBLE_CFG0(a) bdk_lmcx_scramble_cfg0_t
17467 #define bustype_BDK_LMCX_SCRAMBLE_CFG0(a) BDK_CSR_TYPE_RSL
17468 #define basename_BDK_LMCX_SCRAMBLE_CFG0(a) "LMCX_SCRAMBLE_CFG0"
17469 #define device_bar_BDK_LMCX_SCRAMBLE_CFG0(a) 0x0 /* PF_BAR0 */
17470 #define busnum_BDK_LMCX_SCRAMBLE_CFG0(a) (a)
17471 #define arguments_BDK_LMCX_SCRAMBLE_CFG0(a) (a),-1,-1,-1
17472 
17473 /**
17474  * Register (RSL) lmc#_scramble_cfg1
17475  *
17476  * LMC Scramble Configuration 1 Register
17477  * These registers set the aliasing that uses the lowest, legal chip select(s).
17478  */
17479 union bdk_lmcx_scramble_cfg1
17480 {
17481     uint64_t u;
17482     struct bdk_lmcx_scramble_cfg1_s
17483     {
17484 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
17485         uint64_t key                   : 64; /**< [ 63:  0](R/W/H) Scramble key for addresses. Prior to enabling scrambling this key should be generated from
17486                                                                  a cryptographically-secure random number generator such as RNM_RANDOM. */
17487 #else /* Word 0 - Little Endian */
17488         uint64_t key                   : 64; /**< [ 63:  0](R/W/H) Scramble key for addresses. Prior to enabling scrambling this key should be generated from
17489                                                                  a cryptographically-secure random number generator such as RNM_RANDOM. */
17490 #endif /* Word 0 - End */
17491     } s;
17492     /* struct bdk_lmcx_scramble_cfg1_s cn; */
17493 };
17494 typedef union bdk_lmcx_scramble_cfg1 bdk_lmcx_scramble_cfg1_t;
17495 
17496 static inline uint64_t BDK_LMCX_SCRAMBLE_CFG1(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_SCRAMBLE_CFG1(unsigned long a)17497 static inline uint64_t BDK_LMCX_SCRAMBLE_CFG1(unsigned long a)
17498 {
17499     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
17500         return 0x87e088000328ll + 0x1000000ll * ((a) & 0x0);
17501     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
17502         return 0x87e088000328ll + 0x1000000ll * ((a) & 0x1);
17503     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
17504         return 0x87e088000328ll + 0x1000000ll * ((a) & 0x3);
17505     __bdk_csr_fatal("LMCX_SCRAMBLE_CFG1", 1, a, 0, 0, 0);
17506 }
17507 
17508 #define typedef_BDK_LMCX_SCRAMBLE_CFG1(a) bdk_lmcx_scramble_cfg1_t
17509 #define bustype_BDK_LMCX_SCRAMBLE_CFG1(a) BDK_CSR_TYPE_RSL
17510 #define basename_BDK_LMCX_SCRAMBLE_CFG1(a) "LMCX_SCRAMBLE_CFG1"
17511 #define device_bar_BDK_LMCX_SCRAMBLE_CFG1(a) 0x0 /* PF_BAR0 */
17512 #define busnum_BDK_LMCX_SCRAMBLE_CFG1(a) (a)
17513 #define arguments_BDK_LMCX_SCRAMBLE_CFG1(a) (a),-1,-1,-1
17514 
17515 /**
17516  * Register (RSL) lmc#_scramble_cfg2
17517  *
17518  * LMC Scramble Configuration 2 Register
17519  */
17520 union bdk_lmcx_scramble_cfg2
17521 {
17522     uint64_t u;
17523     struct bdk_lmcx_scramble_cfg2_s
17524     {
17525 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
17526         uint64_t key                   : 64; /**< [ 63:  0](R/W/H) Scramble key for data. Prior to enabling scrambling this key should be generated from a
17527                                                                  cryptographically-secure random number generator such as RNM_RANDOM. */
17528 #else /* Word 0 - Little Endian */
17529         uint64_t key                   : 64; /**< [ 63:  0](R/W/H) Scramble key for data. Prior to enabling scrambling this key should be generated from a
17530                                                                  cryptographically-secure random number generator such as RNM_RANDOM. */
17531 #endif /* Word 0 - End */
17532     } s;
17533     /* struct bdk_lmcx_scramble_cfg2_s cn; */
17534 };
17535 typedef union bdk_lmcx_scramble_cfg2 bdk_lmcx_scramble_cfg2_t;
17536 
17537 static inline uint64_t BDK_LMCX_SCRAMBLE_CFG2(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_SCRAMBLE_CFG2(unsigned long a)17538 static inline uint64_t BDK_LMCX_SCRAMBLE_CFG2(unsigned long a)
17539 {
17540     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
17541         return 0x87e088000338ll + 0x1000000ll * ((a) & 0x0);
17542     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
17543         return 0x87e088000338ll + 0x1000000ll * ((a) & 0x1);
17544     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
17545         return 0x87e088000338ll + 0x1000000ll * ((a) & 0x3);
17546     __bdk_csr_fatal("LMCX_SCRAMBLE_CFG2", 1, a, 0, 0, 0);
17547 }
17548 
17549 #define typedef_BDK_LMCX_SCRAMBLE_CFG2(a) bdk_lmcx_scramble_cfg2_t
17550 #define bustype_BDK_LMCX_SCRAMBLE_CFG2(a) BDK_CSR_TYPE_RSL
17551 #define basename_BDK_LMCX_SCRAMBLE_CFG2(a) "LMCX_SCRAMBLE_CFG2"
17552 #define device_bar_BDK_LMCX_SCRAMBLE_CFG2(a) 0x0 /* PF_BAR0 */
17553 #define busnum_BDK_LMCX_SCRAMBLE_CFG2(a) (a)
17554 #define arguments_BDK_LMCX_SCRAMBLE_CFG2(a) (a),-1,-1,-1
17555 
17556 /**
17557  * Register (RSL) lmc#_scrambled_fadr
17558  *
17559  * LMC Scrambled Failing (SEC/DED/NXM) Address Register
17560  * LMC()_FADR captures the failing pre-scrambled address location (split into DIMM, bunk,
17561  * bank, etc). If scrambling is off, LMC()_FADR also captures the failing physical location
17562  * in the DRAM parts. LMC()_SCRAMBLED_FADR captures the actual failing address location in
17563  * the physical DRAM parts, i.e.:
17564  *
17565  * * If scrambling is on, LMC()_SCRAMBLED_FADR contains the failing physical location in the
17566  * DRAM parts (split into DIMM, bunk, bank, etc).
17567  *
17568  * * If scrambling is off, the pre-scramble and post-scramble addresses are the same, and so the
17569  * contents of LMC()_SCRAMBLED_FADR match the contents of LMC()_FADR.
17570  *
17571  * This register only captures the first transaction with ECC errors. A DED error can over-write
17572  * this register with its failing addresses if the first error was a SEC. If you write
17573  * LMC()_CONFIG -\> SEC_ERR/DED_ERR, it clears the error bits and captures the next failing
17574  * address. If [FDIMM] is 1, that means the error is in the higher DIMM.
17575  */
17576 union bdk_lmcx_scrambled_fadr
17577 {
17578     uint64_t u;
17579     struct bdk_lmcx_scrambled_fadr_s
17580     {
17581 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
17582         uint64_t reserved_43_63        : 21;
17583         uint64_t fcid                  : 3;  /**< [ 42: 40](RO/H) Reserved.
17584                                                                  Internal:
17585                                                                  Failing CID number. This field is only valid when interfacing with 3DS DRAMs
17586                                                                  (i.e., when LMC()_EXT_CONFIG[DIMM0_CID] is nonzero). Returns a value of zero otherwise. */
17587         uint64_t fill_order            : 2;  /**< [ 39: 38](RO/H) Fill order for failing transaction. */
17588         uint64_t fdimm                 : 1;  /**< [ 37: 37](RO/H) Failing DIMM number. */
17589         uint64_t fbunk                 : 1;  /**< [ 36: 36](RO/H) Failing rank number. */
17590         uint64_t fbank                 : 4;  /**< [ 35: 32](RO/H) Failing bank number, bits \<3:0\>. */
17591         uint64_t frow                  : 18; /**< [ 31: 14](RO/H) Failing row address, bits \<17:0\>. */
17592         uint64_t fcol                  : 14; /**< [ 13:  0](RO/H) Failing column address \<13:0\>. Technically, represents the address of the 128b data that
17593                                                                  had an ECC error, i.e., FCOL\<0\> is always 0. Can be used in conjunction with
17594                                                                  LMC()_CONFIG[DED_ERR] to isolate the 64b chunk of data in error. */
17595 #else /* Word 0 - Little Endian */
17596         uint64_t fcol                  : 14; /**< [ 13:  0](RO/H) Failing column address \<13:0\>. Technically, represents the address of the 128b data that
17597                                                                  had an ECC error, i.e., FCOL\<0\> is always 0. Can be used in conjunction with
17598                                                                  LMC()_CONFIG[DED_ERR] to isolate the 64b chunk of data in error. */
17599         uint64_t frow                  : 18; /**< [ 31: 14](RO/H) Failing row address, bits \<17:0\>. */
17600         uint64_t fbank                 : 4;  /**< [ 35: 32](RO/H) Failing bank number, bits \<3:0\>. */
17601         uint64_t fbunk                 : 1;  /**< [ 36: 36](RO/H) Failing rank number. */
17602         uint64_t fdimm                 : 1;  /**< [ 37: 37](RO/H) Failing DIMM number. */
17603         uint64_t fill_order            : 2;  /**< [ 39: 38](RO/H) Fill order for failing transaction. */
17604         uint64_t fcid                  : 3;  /**< [ 42: 40](RO/H) Reserved.
17605                                                                  Internal:
17606                                                                  Failing CID number. This field is only valid when interfacing with 3DS DRAMs
17607                                                                  (i.e., when LMC()_EXT_CONFIG[DIMM0_CID] is nonzero). Returns a value of zero otherwise. */
17608         uint64_t reserved_43_63        : 21;
17609 #endif /* Word 0 - End */
17610     } s;
17611     /* struct bdk_lmcx_scrambled_fadr_s cn81xx; */
17612     struct bdk_lmcx_scrambled_fadr_cn88xx
17613     {
17614 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
17615         uint64_t reserved_43_63        : 21;
17616         uint64_t fcid                  : 3;  /**< [ 42: 40](RO/H) Reserved.
17617                                                                  Internal:
17618                                                                  Failing CID number. This field is only valid when interfacing with 3DS DRAMs
17619                                                                  (i.e., when either LMC()_EXT_CONFIG[DIMM0_CID] or LMC()_EXT_CONFIG[DIMM1_CID] is
17620                                                                  nonzero). Returns a value of zero otherwise. */
17621         uint64_t fill_order            : 2;  /**< [ 39: 38](RO/H) Fill order for failing transaction. */
17622         uint64_t fdimm                 : 1;  /**< [ 37: 37](RO/H) Failing DIMM number. */
17623         uint64_t fbunk                 : 1;  /**< [ 36: 36](RO/H) Failing rank number. */
17624         uint64_t fbank                 : 4;  /**< [ 35: 32](RO/H) Failing bank number, bits \<3:0\>. */
17625         uint64_t frow                  : 18; /**< [ 31: 14](RO/H) Failing row address, bits \<17:0\>. */
17626         uint64_t fcol                  : 14; /**< [ 13:  0](RO/H) Failing column address \<13:0\>. Technically, represents the address of the 128b data that
17627                                                                  had an ECC error, i.e., FCOL\<0\> is always 0. Can be used in conjunction with
17628                                                                  LMC()_CONFIG[DED_ERR] to isolate the 64b chunk of data in error. */
17629 #else /* Word 0 - Little Endian */
17630         uint64_t fcol                  : 14; /**< [ 13:  0](RO/H) Failing column address \<13:0\>. Technically, represents the address of the 128b data that
17631                                                                  had an ECC error, i.e., FCOL\<0\> is always 0. Can be used in conjunction with
17632                                                                  LMC()_CONFIG[DED_ERR] to isolate the 64b chunk of data in error. */
17633         uint64_t frow                  : 18; /**< [ 31: 14](RO/H) Failing row address, bits \<17:0\>. */
17634         uint64_t fbank                 : 4;  /**< [ 35: 32](RO/H) Failing bank number, bits \<3:0\>. */
17635         uint64_t fbunk                 : 1;  /**< [ 36: 36](RO/H) Failing rank number. */
17636         uint64_t fdimm                 : 1;  /**< [ 37: 37](RO/H) Failing DIMM number. */
17637         uint64_t fill_order            : 2;  /**< [ 39: 38](RO/H) Fill order for failing transaction. */
17638         uint64_t fcid                  : 3;  /**< [ 42: 40](RO/H) Reserved.
17639                                                                  Internal:
17640                                                                  Failing CID number. This field is only valid when interfacing with 3DS DRAMs
17641                                                                  (i.e., when either LMC()_EXT_CONFIG[DIMM0_CID] or LMC()_EXT_CONFIG[DIMM1_CID] is
17642                                                                  nonzero). Returns a value of zero otherwise. */
17643         uint64_t reserved_43_63        : 21;
17644 #endif /* Word 0 - End */
17645     } cn88xx;
17646     /* struct bdk_lmcx_scrambled_fadr_cn88xx cn83xx; */
17647 };
17648 typedef union bdk_lmcx_scrambled_fadr bdk_lmcx_scrambled_fadr_t;
17649 
17650 static inline uint64_t BDK_LMCX_SCRAMBLED_FADR(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_SCRAMBLED_FADR(unsigned long a)17651 static inline uint64_t BDK_LMCX_SCRAMBLED_FADR(unsigned long a)
17652 {
17653     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
17654         return 0x87e088000330ll + 0x1000000ll * ((a) & 0x0);
17655     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
17656         return 0x87e088000330ll + 0x1000000ll * ((a) & 0x1);
17657     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
17658         return 0x87e088000330ll + 0x1000000ll * ((a) & 0x3);
17659     __bdk_csr_fatal("LMCX_SCRAMBLED_FADR", 1, a, 0, 0, 0);
17660 }
17661 
17662 #define typedef_BDK_LMCX_SCRAMBLED_FADR(a) bdk_lmcx_scrambled_fadr_t
17663 #define bustype_BDK_LMCX_SCRAMBLED_FADR(a) BDK_CSR_TYPE_RSL
17664 #define basename_BDK_LMCX_SCRAMBLED_FADR(a) "LMCX_SCRAMBLED_FADR"
17665 #define device_bar_BDK_LMCX_SCRAMBLED_FADR(a) 0x0 /* PF_BAR0 */
17666 #define busnum_BDK_LMCX_SCRAMBLED_FADR(a) (a)
17667 #define arguments_BDK_LMCX_SCRAMBLED_FADR(a) (a),-1,-1,-1
17668 
17669 /**
17670  * Register (RSL) lmc#_seq_ctl
17671  *
17672  * LMC Sequence Control Register
17673  * This register is used to initiate the various control sequences in the LMC.
17674  */
17675 union bdk_lmcx_seq_ctl
17676 {
17677     uint64_t u;
17678     struct bdk_lmcx_seq_ctl_s
17679     {
17680 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
17681         uint64_t reserved_10_63        : 54;
17682         uint64_t lmc_mode              : 2;  /**< [  9:  8](RO/H) Readable internal state of LMC.
17683                                                                  0x0 = Init state. LMC is fresh out of reset. Only INIT or
17684                                                                  LMC_SEQ_SEL_E::SREF_EXIT sequence can
17685                                                                  take LMC out of this state to the normal state.
17686                                                                  0x1 = Normal state. LMC is in mission mode.
17687                                                                  0x2 = Self-refresh state. LMC and DRAMs are in Self-refresh mode. If software
17688                                                                  initiated (by running SREF_ENTRY sequence), only LMC_SEQ_SEL_E::SREF_EXIT
17689                                                                  sequence can take LMC out of this state to the normal state.
17690                                                                  0x3 = Power-down state. LMC and DRAMs are in Power-down mode. */
17691         uint64_t reserved_6_7          : 2;
17692         uint64_t seq_complete          : 1;  /**< [  5:  5](RO/H) Sequence complete. This bit is cleared when [INIT_START] is set to a one and
17693                                                                  then is set to one when the sequence is completed. */
17694         uint64_t seq_sel               : 4;  /**< [  4:  1](R/W) Selects the sequence that LMC runs after a 0-\>1 transition on [INIT_START], as
17695                                                                  enumerated by LMC_SEQ_SEL_E.
17696 
17697                                                                  LMC writes the LMC()_MODEREG_PARAMS0 and LMC()_MODEREG_PARAMS1 CSR field values
17698                                                                  to the Mode registers in the DRAM parts (i.e. MR0-MR6) as part of some of
17699                                                                  these sequences.
17700                                                                  Refer to the LMC()_MODEREG_PARAMS0 and LMC()_MODEREG_PARAMS1 descriptions for more
17701                                                                  details. */
17702         uint64_t init_start            : 1;  /**< [  0:  0](WO) A 0-\>1 transition starts the DDR memory sequence that is selected by
17703                                                                  LMC()_SEQ_CTL[SEQ_SEL].
17704                                                                  This register is a one-shot and clears itself each time it is set. */
17705 #else /* Word 0 - Little Endian */
17706         uint64_t init_start            : 1;  /**< [  0:  0](WO) A 0-\>1 transition starts the DDR memory sequence that is selected by
17707                                                                  LMC()_SEQ_CTL[SEQ_SEL].
17708                                                                  This register is a one-shot and clears itself each time it is set. */
17709         uint64_t seq_sel               : 4;  /**< [  4:  1](R/W) Selects the sequence that LMC runs after a 0-\>1 transition on [INIT_START], as
17710                                                                  enumerated by LMC_SEQ_SEL_E.
17711 
17712                                                                  LMC writes the LMC()_MODEREG_PARAMS0 and LMC()_MODEREG_PARAMS1 CSR field values
17713                                                                  to the Mode registers in the DRAM parts (i.e. MR0-MR6) as part of some of
17714                                                                  these sequences.
17715                                                                  Refer to the LMC()_MODEREG_PARAMS0 and LMC()_MODEREG_PARAMS1 descriptions for more
17716                                                                  details. */
17717         uint64_t seq_complete          : 1;  /**< [  5:  5](RO/H) Sequence complete. This bit is cleared when [INIT_START] is set to a one and
17718                                                                  then is set to one when the sequence is completed. */
17719         uint64_t reserved_6_7          : 2;
17720         uint64_t lmc_mode              : 2;  /**< [  9:  8](RO/H) Readable internal state of LMC.
17721                                                                  0x0 = Init state. LMC is fresh out of reset. Only INIT or
17722                                                                  LMC_SEQ_SEL_E::SREF_EXIT sequence can
17723                                                                  take LMC out of this state to the normal state.
17724                                                                  0x1 = Normal state. LMC is in mission mode.
17725                                                                  0x2 = Self-refresh state. LMC and DRAMs are in Self-refresh mode. If software
17726                                                                  initiated (by running SREF_ENTRY sequence), only LMC_SEQ_SEL_E::SREF_EXIT
17727                                                                  sequence can take LMC out of this state to the normal state.
17728                                                                  0x3 = Power-down state. LMC and DRAMs are in Power-down mode. */
17729         uint64_t reserved_10_63        : 54;
17730 #endif /* Word 0 - End */
17731     } s;
17732     struct bdk_lmcx_seq_ctl_cn8
17733     {
17734 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
17735         uint64_t reserved_6_63         : 58;
17736         uint64_t seq_complete          : 1;  /**< [  5:  5](RO/H) Sequence complete. This bit is cleared when [INIT_START] is set to a one and
17737                                                                  then is set to one when the sequence is completed. */
17738         uint64_t seq_sel               : 4;  /**< [  4:  1](R/W) Selects the sequence that LMC runs after a 0-\>1 transition on [INIT_START], as
17739                                                                  enumerated by LMC_SEQ_SEL_E.
17740 
17741                                                                  LMC writes the LMC()_MODEREG_PARAMS0 and LMC()_MODEREG_PARAMS1 CSR field values
17742                                                                  to the Mode registers in the DRAM parts (i.e. MR0-MR6) as part of some of
17743                                                                  these sequences.
17744                                                                  Refer to the LMC()_MODEREG_PARAMS0 and LMC()_MODEREG_PARAMS1 descriptions for more
17745                                                                  details. */
17746         uint64_t init_start            : 1;  /**< [  0:  0](WO) A 0-\>1 transition starts the DDR memory sequence that is selected by
17747                                                                  LMC()_SEQ_CTL[SEQ_SEL].
17748                                                                  This register is a one-shot and clears itself each time it is set. */
17749 #else /* Word 0 - Little Endian */
17750         uint64_t init_start            : 1;  /**< [  0:  0](WO) A 0-\>1 transition starts the DDR memory sequence that is selected by
17751                                                                  LMC()_SEQ_CTL[SEQ_SEL].
17752                                                                  This register is a one-shot and clears itself each time it is set. */
17753         uint64_t seq_sel               : 4;  /**< [  4:  1](R/W) Selects the sequence that LMC runs after a 0-\>1 transition on [INIT_START], as
17754                                                                  enumerated by LMC_SEQ_SEL_E.
17755 
17756                                                                  LMC writes the LMC()_MODEREG_PARAMS0 and LMC()_MODEREG_PARAMS1 CSR field values
17757                                                                  to the Mode registers in the DRAM parts (i.e. MR0-MR6) as part of some of
17758                                                                  these sequences.
17759                                                                  Refer to the LMC()_MODEREG_PARAMS0 and LMC()_MODEREG_PARAMS1 descriptions for more
17760                                                                  details. */
17761         uint64_t seq_complete          : 1;  /**< [  5:  5](RO/H) Sequence complete. This bit is cleared when [INIT_START] is set to a one and
17762                                                                  then is set to one when the sequence is completed. */
17763         uint64_t reserved_6_63         : 58;
17764 #endif /* Word 0 - End */
17765     } cn8;
17766     /* struct bdk_lmcx_seq_ctl_s cn9; */
17767 };
17768 typedef union bdk_lmcx_seq_ctl bdk_lmcx_seq_ctl_t;
17769 
17770 static inline uint64_t BDK_LMCX_SEQ_CTL(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_SEQ_CTL(unsigned long a)17771 static inline uint64_t BDK_LMCX_SEQ_CTL(unsigned long a)
17772 {
17773     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
17774         return 0x87e088000048ll + 0x1000000ll * ((a) & 0x0);
17775     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
17776         return 0x87e088000048ll + 0x1000000ll * ((a) & 0x1);
17777     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
17778         return 0x87e088000048ll + 0x1000000ll * ((a) & 0x3);
17779     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
17780         return 0x87e088000048ll + 0x1000000ll * ((a) & 0x3);
17781     __bdk_csr_fatal("LMCX_SEQ_CTL", 1, a, 0, 0, 0);
17782 }
17783 
17784 #define typedef_BDK_LMCX_SEQ_CTL(a) bdk_lmcx_seq_ctl_t
17785 #define bustype_BDK_LMCX_SEQ_CTL(a) BDK_CSR_TYPE_RSL
17786 #define basename_BDK_LMCX_SEQ_CTL(a) "LMCX_SEQ_CTL"
17787 #define device_bar_BDK_LMCX_SEQ_CTL(a) 0x0 /* PF_BAR0 */
17788 #define busnum_BDK_LMCX_SEQ_CTL(a) (a)
17789 #define arguments_BDK_LMCX_SEQ_CTL(a) (a),-1,-1,-1
17790 
17791 /**
17792  * Register (RSL) lmc#_slot_ctl0
17793  *
17794  * LMC Slot Control0 Register
17795  * This register is an assortment of control fields needed by the memory controller. If software
17796  * has not previously written to this register (since the last DRESET), hardware updates the
17797  * fields in this register to the minimum allowed value when any of LMC()_RLEVEL_RANK(),
17798  * LMC()_WLEVEL_RANK(), LMC()_CONTROL, and LMC()_MODEREG_PARAMS0 registers
17799  * change. Ideally, only read this register after LMC has been initialized and
17800  * LMC()_RLEVEL_RANK(), LMC()_WLEVEL_RANK() have valid data.
17801  *
17802  * The interpretation of the fields in this register depends on LMC()_CONTROL[DDR2T]:
17803  *
17804  * * If LMC()_CONTROL[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles between when
17805  * the DRAM part registers CAS commands of the first and second types from different cache
17806  * blocks.
17807  *
17808  * If LMC()_CONTROL[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles between when the DRAM
17809  * part registers CAS commands of the first and second types from different cache blocks.
17810  * FieldValue = 0 is always illegal in this case.
17811  * The hardware-calculated minimums for these fields are shown in LMC()_SLOT_CTL0 Hardware-
17812  * Calculated Minimums.
17813  */
17814 union bdk_lmcx_slot_ctl0
17815 {
17816     uint64_t u;
17817     struct bdk_lmcx_slot_ctl0_s
17818     {
17819 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
17820         uint64_t reserved_50_63        : 14;
17821         uint64_t w2r_l_init_ext        : 1;  /**< [ 49: 49](R/W/H) A 1-bit extension to the [W2R_L_INIT] register. */
17822         uint64_t w2r_init_ext          : 1;  /**< [ 48: 48](R/W/H) A 1-bit extension to the [W2R_INIT] register. */
17823         uint64_t w2w_l_init            : 6;  /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
17824                                                                  accesses to the same rank and DIMM, and same BG for DDR4. */
17825         uint64_t w2r_l_init            : 6;  /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
17826                                                                  to the same rank and DIMM, and same BG for DDR4. */
17827         uint64_t r2w_l_init            : 6;  /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
17828                                                                  to the same rank and DIMM, and same BG for DDR4. */
17829         uint64_t r2r_l_init            : 6;  /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
17830                                                                  to the same rank and DIMM, and same BG for DDR4. */
17831         uint64_t w2w_init              : 6;  /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
17832                                                                  accesses to the same rank and DIMM. */
17833         uint64_t w2r_init              : 6;  /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
17834                                                                  to the same rank and DIMM. */
17835         uint64_t r2w_init              : 6;  /**< [ 11:  6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
17836                                                                  to the same rank and DIMM. */
17837         uint64_t r2r_init              : 6;  /**< [  5:  0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
17838                                                                  to the same rank and DIMM. */
17839 #else /* Word 0 - Little Endian */
17840         uint64_t r2r_init              : 6;  /**< [  5:  0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
17841                                                                  to the same rank and DIMM. */
17842         uint64_t r2w_init              : 6;  /**< [ 11:  6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
17843                                                                  to the same rank and DIMM. */
17844         uint64_t w2r_init              : 6;  /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
17845                                                                  to the same rank and DIMM. */
17846         uint64_t w2w_init              : 6;  /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
17847                                                                  accesses to the same rank and DIMM. */
17848         uint64_t r2r_l_init            : 6;  /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
17849                                                                  to the same rank and DIMM, and same BG for DDR4. */
17850         uint64_t r2w_l_init            : 6;  /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
17851                                                                  to the same rank and DIMM, and same BG for DDR4. */
17852         uint64_t w2r_l_init            : 6;  /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
17853                                                                  to the same rank and DIMM, and same BG for DDR4. */
17854         uint64_t w2w_l_init            : 6;  /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
17855                                                                  accesses to the same rank and DIMM, and same BG for DDR4. */
17856         uint64_t w2r_init_ext          : 1;  /**< [ 48: 48](R/W/H) A 1-bit extension to the [W2R_INIT] register. */
17857         uint64_t w2r_l_init_ext        : 1;  /**< [ 49: 49](R/W/H) A 1-bit extension to the [W2R_L_INIT] register. */
17858         uint64_t reserved_50_63        : 14;
17859 #endif /* Word 0 - End */
17860     } s;
17861     struct bdk_lmcx_slot_ctl0_cn88xxp1
17862     {
17863 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
17864         uint64_t reserved_50_63        : 14;
17865         uint64_t w2r_l_init_ext        : 1;  /**< [ 49: 49](RO) Reserved. */
17866         uint64_t w2r_init_ext          : 1;  /**< [ 48: 48](RO) Reserved. */
17867         uint64_t w2w_l_init            : 6;  /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
17868                                                                  accesses to the same rank and DIMM, and same BG for DDR4. */
17869         uint64_t w2r_l_init            : 6;  /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
17870                                                                  to the same rank and DIMM, and same BG for DDR4. */
17871         uint64_t r2w_l_init            : 6;  /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
17872                                                                  to the same rank and DIMM, and same BG for DDR4. */
17873         uint64_t r2r_l_init            : 6;  /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
17874                                                                  to the same rank and DIMM, and same BG for DDR4. */
17875         uint64_t w2w_init              : 6;  /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
17876                                                                  accesses to the same rank and DIMM. */
17877         uint64_t w2r_init              : 6;  /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
17878                                                                  to the same rank and DIMM. */
17879         uint64_t r2w_init              : 6;  /**< [ 11:  6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
17880                                                                  to the same rank and DIMM. */
17881         uint64_t r2r_init              : 6;  /**< [  5:  0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
17882                                                                  to the same rank and DIMM. */
17883 #else /* Word 0 - Little Endian */
17884         uint64_t r2r_init              : 6;  /**< [  5:  0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
17885                                                                  to the same rank and DIMM. */
17886         uint64_t r2w_init              : 6;  /**< [ 11:  6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
17887                                                                  to the same rank and DIMM. */
17888         uint64_t w2r_init              : 6;  /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
17889                                                                  to the same rank and DIMM. */
17890         uint64_t w2w_init              : 6;  /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
17891                                                                  accesses to the same rank and DIMM. */
17892         uint64_t r2r_l_init            : 6;  /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
17893                                                                  to the same rank and DIMM, and same BG for DDR4. */
17894         uint64_t r2w_l_init            : 6;  /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
17895                                                                  to the same rank and DIMM, and same BG for DDR4. */
17896         uint64_t w2r_l_init            : 6;  /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
17897                                                                  to the same rank and DIMM, and same BG for DDR4. */
17898         uint64_t w2w_l_init            : 6;  /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
17899                                                                  accesses to the same rank and DIMM, and same BG for DDR4. */
17900         uint64_t w2r_init_ext          : 1;  /**< [ 48: 48](RO) Reserved. */
17901         uint64_t w2r_l_init_ext        : 1;  /**< [ 49: 49](RO) Reserved. */
17902         uint64_t reserved_50_63        : 14;
17903 #endif /* Word 0 - End */
17904     } cn88xxp1;
17905     /* struct bdk_lmcx_slot_ctl0_s cn9; */
17906     /* struct bdk_lmcx_slot_ctl0_s cn81xx; */
17907     struct bdk_lmcx_slot_ctl0_cn83xx
17908     {
17909 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
17910         uint64_t reserved_50_63        : 14;
17911         uint64_t w2r_l_init_ext        : 1;  /**< [ 49: 49](R/W/H) A 1-bit extenstion to the [W2R_L_INIT] register. */
17912         uint64_t w2r_init_ext          : 1;  /**< [ 48: 48](R/W/H) A 1-bit extension to the [W2R_INIT] register. */
17913         uint64_t w2w_l_init            : 6;  /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
17914                                                                  accesses to the same rank and DIMM, and same BG for DDR4. */
17915         uint64_t w2r_l_init            : 6;  /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
17916                                                                  to the same rank and DIMM, and same BG for DDR4. */
17917         uint64_t r2w_l_init            : 6;  /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
17918                                                                  to the same rank and DIMM, and same BG for DDR4. */
17919         uint64_t r2r_l_init            : 6;  /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
17920                                                                  to the same rank and DIMM, and same BG for DDR4. */
17921         uint64_t w2w_init              : 6;  /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
17922                                                                  accesses to the same rank and DIMM. */
17923         uint64_t w2r_init              : 6;  /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
17924                                                                  to the same rank and DIMM. */
17925         uint64_t r2w_init              : 6;  /**< [ 11:  6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
17926                                                                  to the same rank and DIMM. */
17927         uint64_t r2r_init              : 6;  /**< [  5:  0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
17928                                                                  to the same rank and DIMM. */
17929 #else /* Word 0 - Little Endian */
17930         uint64_t r2r_init              : 6;  /**< [  5:  0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
17931                                                                  to the same rank and DIMM. */
17932         uint64_t r2w_init              : 6;  /**< [ 11:  6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
17933                                                                  to the same rank and DIMM. */
17934         uint64_t w2r_init              : 6;  /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
17935                                                                  to the same rank and DIMM. */
17936         uint64_t w2w_init              : 6;  /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
17937                                                                  accesses to the same rank and DIMM. */
17938         uint64_t r2r_l_init            : 6;  /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
17939                                                                  to the same rank and DIMM, and same BG for DDR4. */
17940         uint64_t r2w_l_init            : 6;  /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
17941                                                                  to the same rank and DIMM, and same BG for DDR4. */
17942         uint64_t w2r_l_init            : 6;  /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
17943                                                                  to the same rank and DIMM, and same BG for DDR4. */
17944         uint64_t w2w_l_init            : 6;  /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
17945                                                                  accesses to the same rank and DIMM, and same BG for DDR4. */
17946         uint64_t w2r_init_ext          : 1;  /**< [ 48: 48](R/W/H) A 1-bit extension to the [W2R_INIT] register. */
17947         uint64_t w2r_l_init_ext        : 1;  /**< [ 49: 49](R/W/H) A 1-bit extenstion to the [W2R_L_INIT] register. */
17948         uint64_t reserved_50_63        : 14;
17949 #endif /* Word 0 - End */
17950     } cn83xx;
17951     struct bdk_lmcx_slot_ctl0_cn88xxp2
17952     {
17953 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
17954         uint64_t reserved_50_63        : 14;
17955         uint64_t w2r_l_init_ext        : 1;  /**< [ 49: 49](R/W/H) A 1-bit extenstion to the W2R_L_INIT register. */
17956         uint64_t w2r_init_ext          : 1;  /**< [ 48: 48](R/W/H) A 1-bit extension to the W2R_INIT register. */
17957         uint64_t w2w_l_init            : 6;  /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
17958                                                                  accesses to the same rank and DIMM, and same BG for DDR4. */
17959         uint64_t w2r_l_init            : 6;  /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
17960                                                                  to the same rank and DIMM, and same BG for DDR4. */
17961         uint64_t r2w_l_init            : 6;  /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
17962                                                                  to the same rank and DIMM, and same BG for DDR4. */
17963         uint64_t r2r_l_init            : 6;  /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
17964                                                                  to the same rank and DIMM, and same BG for DDR4. */
17965         uint64_t w2w_init              : 6;  /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
17966                                                                  accesses to the same rank and DIMM. */
17967         uint64_t w2r_init              : 6;  /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
17968                                                                  to the same rank and DIMM. */
17969         uint64_t r2w_init              : 6;  /**< [ 11:  6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
17970                                                                  to the same rank and DIMM. */
17971         uint64_t r2r_init              : 6;  /**< [  5:  0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
17972                                                                  to the same rank and DIMM. */
17973 #else /* Word 0 - Little Endian */
17974         uint64_t r2r_init              : 6;  /**< [  5:  0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
17975                                                                  to the same rank and DIMM. */
17976         uint64_t r2w_init              : 6;  /**< [ 11:  6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
17977                                                                  to the same rank and DIMM. */
17978         uint64_t w2r_init              : 6;  /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
17979                                                                  to the same rank and DIMM. */
17980         uint64_t w2w_init              : 6;  /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
17981                                                                  accesses to the same rank and DIMM. */
17982         uint64_t r2r_l_init            : 6;  /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
17983                                                                  to the same rank and DIMM, and same BG for DDR4. */
17984         uint64_t r2w_l_init            : 6;  /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
17985                                                                  to the same rank and DIMM, and same BG for DDR4. */
17986         uint64_t w2r_l_init            : 6;  /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
17987                                                                  to the same rank and DIMM, and same BG for DDR4. */
17988         uint64_t w2w_l_init            : 6;  /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
17989                                                                  accesses to the same rank and DIMM, and same BG for DDR4. */
17990         uint64_t w2r_init_ext          : 1;  /**< [ 48: 48](R/W/H) A 1-bit extension to the W2R_INIT register. */
17991         uint64_t w2r_l_init_ext        : 1;  /**< [ 49: 49](R/W/H) A 1-bit extenstion to the W2R_L_INIT register. */
17992         uint64_t reserved_50_63        : 14;
17993 #endif /* Word 0 - End */
17994     } cn88xxp2;
17995 };
17996 typedef union bdk_lmcx_slot_ctl0 bdk_lmcx_slot_ctl0_t;
17997 
17998 static inline uint64_t BDK_LMCX_SLOT_CTL0(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_SLOT_CTL0(unsigned long a)17999 static inline uint64_t BDK_LMCX_SLOT_CTL0(unsigned long a)
18000 {
18001     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
18002         return 0x87e0880001f8ll + 0x1000000ll * ((a) & 0x0);
18003     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
18004         return 0x87e0880001f8ll + 0x1000000ll * ((a) & 0x1);
18005     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
18006         return 0x87e0880001f8ll + 0x1000000ll * ((a) & 0x3);
18007     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
18008         return 0x87e0880001f8ll + 0x1000000ll * ((a) & 0x3);
18009     __bdk_csr_fatal("LMCX_SLOT_CTL0", 1, a, 0, 0, 0);
18010 }
18011 
18012 #define typedef_BDK_LMCX_SLOT_CTL0(a) bdk_lmcx_slot_ctl0_t
18013 #define bustype_BDK_LMCX_SLOT_CTL0(a) BDK_CSR_TYPE_RSL
18014 #define basename_BDK_LMCX_SLOT_CTL0(a) "LMCX_SLOT_CTL0"
18015 #define device_bar_BDK_LMCX_SLOT_CTL0(a) 0x0 /* PF_BAR0 */
18016 #define busnum_BDK_LMCX_SLOT_CTL0(a) (a)
18017 #define arguments_BDK_LMCX_SLOT_CTL0(a) (a),-1,-1,-1
18018 
18019 /**
18020  * Register (RSL) lmc#_slot_ctl1
18021  *
18022  * LMC Slot Control1 Register
18023  * This register is an assortment of control fields needed by the memory controller. If software
18024  * has not previously written to this register (since the last DRESET), hardware updates the
18025  * fields in this register to the minimum allowed value when any of LMC()_RLEVEL_RANK(),
18026  * LMC()_WLEVEL_RANK(), LMC()_CONTROL and LMC()_MODEREG_PARAMS0 change.
18027  * Ideally, only read this register after LMC has been initialized and
18028  * LMC()_RLEVEL_RANK(), LMC()_WLEVEL_RANK() have valid data.
18029  *
18030  * The interpretation of the fields in this CSR depends on LMC()_CONTROL[DDR2T]:
18031  *
18032  * * If LMC()_CONTROL[DDR2T]=1, (FieldValue + 4) is the minimum CK cycles between when the
18033  * DRAM part registers CAS commands of the first and second types from different cache blocks.
18034  *
18035  * * If LMC()_CONTROL[DDR2T]=0, (FieldValue + 3) is the minimum CK cycles between when the DRAM
18036  * part registers CAS commands of the first and second types from different cache blocks.
18037  * FieldValue = 0 is always illegal in this case.
18038  *
18039  * The hardware calculated minimums for these fields are shown in LMC()_SLOT_CTL1 hardware
18040  * calculated minimums.
18041  */
18042 union bdk_lmcx_slot_ctl1
18043 {
18044     uint64_t u;
18045     struct bdk_lmcx_slot_ctl1_s
18046     {
18047 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
18048         uint64_t reserved_24_63        : 40;
18049         uint64_t w2w_xrank_init        : 6;  /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
18050                                                                  accesses across ranks of the same DIMM. */
18051         uint64_t w2r_xrank_init        : 6;  /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
18052                                                                  across ranks of the same DIMM. */
18053         uint64_t r2w_xrank_init        : 6;  /**< [ 11:  6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
18054                                                                  across ranks of the same DIMM. */
18055         uint64_t r2r_xrank_init        : 6;  /**< [  5:  0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
18056                                                                  across ranks of the same DIMM. */
18057 #else /* Word 0 - Little Endian */
18058         uint64_t r2r_xrank_init        : 6;  /**< [  5:  0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
18059                                                                  across ranks of the same DIMM. */
18060         uint64_t r2w_xrank_init        : 6;  /**< [ 11:  6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
18061                                                                  across ranks of the same DIMM. */
18062         uint64_t w2r_xrank_init        : 6;  /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
18063                                                                  across ranks of the same DIMM. */
18064         uint64_t w2w_xrank_init        : 6;  /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
18065                                                                  accesses across ranks of the same DIMM. */
18066         uint64_t reserved_24_63        : 40;
18067 #endif /* Word 0 - End */
18068     } s;
18069     /* struct bdk_lmcx_slot_ctl1_s cn; */
18070 };
18071 typedef union bdk_lmcx_slot_ctl1 bdk_lmcx_slot_ctl1_t;
18072 
18073 static inline uint64_t BDK_LMCX_SLOT_CTL1(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_SLOT_CTL1(unsigned long a)18074 static inline uint64_t BDK_LMCX_SLOT_CTL1(unsigned long a)
18075 {
18076     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
18077         return 0x87e088000200ll + 0x1000000ll * ((a) & 0x0);
18078     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
18079         return 0x87e088000200ll + 0x1000000ll * ((a) & 0x1);
18080     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
18081         return 0x87e088000200ll + 0x1000000ll * ((a) & 0x3);
18082     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
18083         return 0x87e088000200ll + 0x1000000ll * ((a) & 0x3);
18084     __bdk_csr_fatal("LMCX_SLOT_CTL1", 1, a, 0, 0, 0);
18085 }
18086 
18087 #define typedef_BDK_LMCX_SLOT_CTL1(a) bdk_lmcx_slot_ctl1_t
18088 #define bustype_BDK_LMCX_SLOT_CTL1(a) BDK_CSR_TYPE_RSL
18089 #define basename_BDK_LMCX_SLOT_CTL1(a) "LMCX_SLOT_CTL1"
18090 #define device_bar_BDK_LMCX_SLOT_CTL1(a) 0x0 /* PF_BAR0 */
18091 #define busnum_BDK_LMCX_SLOT_CTL1(a) (a)
18092 #define arguments_BDK_LMCX_SLOT_CTL1(a) (a),-1,-1,-1
18093 
18094 /**
18095  * Register (RSL) lmc#_slot_ctl2
18096  *
18097  * LMC Slot Control2 Register
18098  * This register is an assortment of control fields needed by the memory controller. If software
18099  * has not previously written to this register (since the last DRESET), hardware updates the
18100  * fields in this register to the minimum allowed value when any of LMC()_RLEVEL_RANK(),
18101  * LMC()_WLEVEL_RANK(), LMC()_CONTROL and LMC()_MODEREG_PARAMS0 change.
18102  * Ideally, only read this register after LMC has been initialized and
18103  * LMC()_RLEVEL_RANK(), LMC()_WLEVEL_RANK() have valid data.
18104  *
18105  * The interpretation of the fields in this CSR depends on LMC()_CONTROL[DDR2T]:
18106  *
18107  * * If LMC()_CONTROL[DDR2T] = 1, (FieldValue + 4) is the minimum CK cycles between when the
18108  * DRAM part registers CAS commands of the first and second types from different cache blocks.
18109  *
18110  * * If LMC()_CONTROL[DDR2T] = 0, (FieldValue + 3) is the minimum CK cycles between when the
18111  * DRAM part registers CAS commands of the first and second types from different cache blocks.
18112  * FieldValue = 0 is always illegal in this case.
18113  *
18114  * The hardware-calculated minimums for these fields are shown in LMC Registers.
18115  */
18116 union bdk_lmcx_slot_ctl2
18117 {
18118     uint64_t u;
18119     struct bdk_lmcx_slot_ctl2_s
18120     {
18121 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
18122         uint64_t reserved_24_63        : 40;
18123         uint64_t w2w_xdimm_init        : 6;  /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
18124                                                                  accesses across DIMMs. */
18125         uint64_t w2r_xdimm_init        : 6;  /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
18126                                                                  across DIMMs. */
18127         uint64_t r2w_xdimm_init        : 6;  /**< [ 11:  6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
18128                                                                  across DIMMs. */
18129         uint64_t r2r_xdimm_init        : 6;  /**< [  5:  0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
18130                                                                  across DIMMs. */
18131 #else /* Word 0 - Little Endian */
18132         uint64_t r2r_xdimm_init        : 6;  /**< [  5:  0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
18133                                                                  across DIMMs. */
18134         uint64_t r2w_xdimm_init        : 6;  /**< [ 11:  6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
18135                                                                  across DIMMs. */
18136         uint64_t w2r_xdimm_init        : 6;  /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
18137                                                                  across DIMMs. */
18138         uint64_t w2w_xdimm_init        : 6;  /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
18139                                                                  accesses across DIMMs. */
18140         uint64_t reserved_24_63        : 40;
18141 #endif /* Word 0 - End */
18142     } s;
18143     /* struct bdk_lmcx_slot_ctl2_s cn; */
18144 };
18145 typedef union bdk_lmcx_slot_ctl2 bdk_lmcx_slot_ctl2_t;
18146 
18147 static inline uint64_t BDK_LMCX_SLOT_CTL2(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_SLOT_CTL2(unsigned long a)18148 static inline uint64_t BDK_LMCX_SLOT_CTL2(unsigned long a)
18149 {
18150     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
18151         return 0x87e088000208ll + 0x1000000ll * ((a) & 0x0);
18152     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
18153         return 0x87e088000208ll + 0x1000000ll * ((a) & 0x1);
18154     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
18155         return 0x87e088000208ll + 0x1000000ll * ((a) & 0x3);
18156     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
18157         return 0x87e088000208ll + 0x1000000ll * ((a) & 0x3);
18158     __bdk_csr_fatal("LMCX_SLOT_CTL2", 1, a, 0, 0, 0);
18159 }
18160 
18161 #define typedef_BDK_LMCX_SLOT_CTL2(a) bdk_lmcx_slot_ctl2_t
18162 #define bustype_BDK_LMCX_SLOT_CTL2(a) BDK_CSR_TYPE_RSL
18163 #define basename_BDK_LMCX_SLOT_CTL2(a) "LMCX_SLOT_CTL2"
18164 #define device_bar_BDK_LMCX_SLOT_CTL2(a) 0x0 /* PF_BAR0 */
18165 #define busnum_BDK_LMCX_SLOT_CTL2(a) (a)
18166 #define arguments_BDK_LMCX_SLOT_CTL2(a) (a),-1,-1,-1
18167 
18168 /**
18169  * Register (RSL) lmc#_slot_ctl3
18170  *
18171  * LMC Slot Control3 Register
18172  * This register is an assortment of control fields needed by the memory controller. If software
18173  * has not previously written to this register (since the last DRESET), hardware updates the
18174  * fields in this register to the minimum allowed value when any of LMC()_RLEVEL_RANK(),
18175  * LMC()_WLEVEL_RANK(), LMC()_CONTROL and LMC()_MODEREG_PARAMS0 change.
18176  * Ideally, only read this register after LMC has been initialized and
18177  * LMC()_RLEVEL_RANK(), LMC()_WLEVEL_RANK() have valid data.
18178  *
18179  * The interpretation of the fields in this CSR depends on LMC()_CONTROL[DDR2T]:
18180  *
18181  * * If LMC()_CONTROL[DDR2T] = 1, (FieldValue + 4) is the minimum CK cycles between when the
18182  * DRAM part registers CAS commands of the first and second types from different cache blocks.
18183  *
18184  * * If LMC()_CONTROL[DDR2T] = 0, (FieldValue + 3) is the minimum CK cycles between when the
18185  * DRAM part registers CAS commands of the first and second types from different cache blocks.
18186  * FieldValue = 0 is always illegal in this case.
18187  *
18188  * The hardware-calculated minimums for these fields are shown in LMC Registers.
18189  */
18190 union bdk_lmcx_slot_ctl3
18191 {
18192     uint64_t u;
18193     struct bdk_lmcx_slot_ctl3_s
18194     {
18195 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
18196         uint64_t reserved_50_63        : 14;
18197         uint64_t w2r_l_xrank_init_ext  : 1;  /**< [ 49: 49](RO) Reserved. */
18198         uint64_t w2r_xrank_init_ext    : 1;  /**< [ 48: 48](RO) Reserved. */
18199         uint64_t w2w_l_xrank_init      : 6;  /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
18200                                                                  accesses to a different logical rank, and same BG for DDR4. */
18201         uint64_t w2r_l_xrank_init      : 6;  /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
18202                                                                  to a different logical rank, and same BG for DDR4. */
18203         uint64_t r2w_l_xrank_init      : 6;  /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
18204                                                                  to a different logical rank, and same BG for DDR4. */
18205         uint64_t r2r_l_xrank_init      : 6;  /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
18206                                                                  to a different logical rank, and same BG for DDR4. */
18207         uint64_t w2w_xrank_init        : 6;  /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
18208                                                                  accesses to a different logical rank. */
18209         uint64_t w2r_xrank_init        : 6;  /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
18210                                                                  to a different logical rank. */
18211         uint64_t r2w_xrank_init        : 6;  /**< [ 11:  6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
18212                                                                  to a different logical rank. */
18213         uint64_t r2r_xrank_init        : 6;  /**< [  5:  0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
18214                                                                  to a different logical rank. */
18215 #else /* Word 0 - Little Endian */
18216         uint64_t r2r_xrank_init        : 6;  /**< [  5:  0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
18217                                                                  to a different logical rank. */
18218         uint64_t r2w_xrank_init        : 6;  /**< [ 11:  6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
18219                                                                  to a different logical rank. */
18220         uint64_t w2r_xrank_init        : 6;  /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
18221                                                                  to a different logical rank. */
18222         uint64_t w2w_xrank_init        : 6;  /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
18223                                                                  accesses to a different logical rank. */
18224         uint64_t r2r_l_xrank_init      : 6;  /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
18225                                                                  to a different logical rank, and same BG for DDR4. */
18226         uint64_t r2w_l_xrank_init      : 6;  /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
18227                                                                  to a different logical rank, and same BG for DDR4. */
18228         uint64_t w2r_l_xrank_init      : 6;  /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
18229                                                                  to a different logical rank, and same BG for DDR4. */
18230         uint64_t w2w_l_xrank_init      : 6;  /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
18231                                                                  accesses to a different logical rank, and same BG for DDR4. */
18232         uint64_t w2r_xrank_init_ext    : 1;  /**< [ 48: 48](RO) Reserved. */
18233         uint64_t w2r_l_xrank_init_ext  : 1;  /**< [ 49: 49](RO) Reserved. */
18234         uint64_t reserved_50_63        : 14;
18235 #endif /* Word 0 - End */
18236     } s;
18237     /* struct bdk_lmcx_slot_ctl3_s cn88xxp1; */
18238     struct bdk_lmcx_slot_ctl3_cn9
18239     {
18240 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
18241         uint64_t reserved_50_63        : 14;
18242         uint64_t w2r_l_xrank_init_ext  : 1;  /**< [ 49: 49](R/W/H) A 1-bit extension to the [W2R_L_XRANK_INIT] register. */
18243         uint64_t w2r_xrank_init_ext    : 1;  /**< [ 48: 48](R/W/H) A 1-bit extension to the [W2R_XRANK_INIT] register. */
18244         uint64_t w2w_l_xrank_init      : 6;  /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
18245                                                                  accesses to a different logical rank, and same BG for DDR4. */
18246         uint64_t w2r_l_xrank_init      : 6;  /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
18247                                                                  to a different logical rank, and same BG for DDR4. */
18248         uint64_t r2w_l_xrank_init      : 6;  /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
18249                                                                  to a different logical rank, and same BG for DDR4. */
18250         uint64_t r2r_l_xrank_init      : 6;  /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
18251                                                                  to a different logical rank, and same BG for DDR4. */
18252         uint64_t w2w_xrank_init        : 6;  /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
18253                                                                  accesses to a different logical rank. */
18254         uint64_t w2r_xrank_init        : 6;  /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
18255                                                                  to a different logical rank. */
18256         uint64_t r2w_xrank_init        : 6;  /**< [ 11:  6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
18257                                                                  to a different logical rank. */
18258         uint64_t r2r_xrank_init        : 6;  /**< [  5:  0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
18259                                                                  to a different logical rank. */
18260 #else /* Word 0 - Little Endian */
18261         uint64_t r2r_xrank_init        : 6;  /**< [  5:  0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
18262                                                                  to a different logical rank. */
18263         uint64_t r2w_xrank_init        : 6;  /**< [ 11:  6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
18264                                                                  to a different logical rank. */
18265         uint64_t w2r_xrank_init        : 6;  /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
18266                                                                  to a different logical rank. */
18267         uint64_t w2w_xrank_init        : 6;  /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
18268                                                                  accesses to a different logical rank. */
18269         uint64_t r2r_l_xrank_init      : 6;  /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
18270                                                                  to a different logical rank, and same BG for DDR4. */
18271         uint64_t r2w_l_xrank_init      : 6;  /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
18272                                                                  to a different logical rank, and same BG for DDR4. */
18273         uint64_t w2r_l_xrank_init      : 6;  /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
18274                                                                  to a different logical rank, and same BG for DDR4. */
18275         uint64_t w2w_l_xrank_init      : 6;  /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
18276                                                                  accesses to a different logical rank, and same BG for DDR4. */
18277         uint64_t w2r_xrank_init_ext    : 1;  /**< [ 48: 48](R/W/H) A 1-bit extension to the [W2R_XRANK_INIT] register. */
18278         uint64_t w2r_l_xrank_init_ext  : 1;  /**< [ 49: 49](R/W/H) A 1-bit extension to the [W2R_L_XRANK_INIT] register. */
18279         uint64_t reserved_50_63        : 14;
18280 #endif /* Word 0 - End */
18281     } cn9;
18282     /* struct bdk_lmcx_slot_ctl3_cn9 cn81xx; */
18283     /* struct bdk_lmcx_slot_ctl3_cn9 cn83xx; */
18284     struct bdk_lmcx_slot_ctl3_cn88xxp2
18285     {
18286 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
18287         uint64_t reserved_50_63        : 14;
18288         uint64_t w2r_l_xrank_init_ext  : 1;  /**< [ 49: 49](R/W/H) A 1-bit extension to the W2R_L_XRANK_INIT register. */
18289         uint64_t w2r_xrank_init_ext    : 1;  /**< [ 48: 48](R/W/H) A 1-bit extension to the W2R_XRANK_INIT register. */
18290         uint64_t w2w_l_xrank_init      : 6;  /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
18291                                                                  accesses to a different logical rank, and same BG for DDR4. */
18292         uint64_t w2r_l_xrank_init      : 6;  /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
18293                                                                  to a different logical rank, and same BG for DDR4. */
18294         uint64_t r2w_l_xrank_init      : 6;  /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
18295                                                                  to a different logical rank, and same BG for DDR4. */
18296         uint64_t r2r_l_xrank_init      : 6;  /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
18297                                                                  to a different logical rank, and same BG for DDR4. */
18298         uint64_t w2w_xrank_init        : 6;  /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
18299                                                                  accesses to a different logical rank. */
18300         uint64_t w2r_xrank_init        : 6;  /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
18301                                                                  to a different logical rank. */
18302         uint64_t r2w_xrank_init        : 6;  /**< [ 11:  6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
18303                                                                  to a different logical rank. */
18304         uint64_t r2r_xrank_init        : 6;  /**< [  5:  0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
18305                                                                  to a different logical rank. */
18306 #else /* Word 0 - Little Endian */
18307         uint64_t r2r_xrank_init        : 6;  /**< [  5:  0](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
18308                                                                  to a different logical rank. */
18309         uint64_t r2w_xrank_init        : 6;  /**< [ 11:  6](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
18310                                                                  to a different logical rank. */
18311         uint64_t w2r_xrank_init        : 6;  /**< [ 17: 12](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
18312                                                                  to a different logical rank. */
18313         uint64_t w2w_xrank_init        : 6;  /**< [ 23: 18](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
18314                                                                  accesses to a different logical rank. */
18315         uint64_t r2r_l_xrank_init      : 6;  /**< [ 29: 24](R/W/H) Read-to-read spacing control for back-to-back read followed by read cache block accesses
18316                                                                  to a different logical rank, and same BG for DDR4. */
18317         uint64_t r2w_l_xrank_init      : 6;  /**< [ 35: 30](R/W/H) Read-to-write spacing control for back-to-back read followed by write cache block accesses
18318                                                                  to a different logical rank, and same BG for DDR4. */
18319         uint64_t w2r_l_xrank_init      : 6;  /**< [ 41: 36](R/W/H) Write-to-read spacing control for back-to-back write followed by read cache block accesses
18320                                                                  to a different logical rank, and same BG for DDR4. */
18321         uint64_t w2w_l_xrank_init      : 6;  /**< [ 47: 42](R/W/H) Write-to-write spacing control for back-to-back write followed by write cache block
18322                                                                  accesses to a different logical rank, and same BG for DDR4. */
18323         uint64_t w2r_xrank_init_ext    : 1;  /**< [ 48: 48](R/W/H) A 1-bit extension to the W2R_XRANK_INIT register. */
18324         uint64_t w2r_l_xrank_init_ext  : 1;  /**< [ 49: 49](R/W/H) A 1-bit extension to the W2R_L_XRANK_INIT register. */
18325         uint64_t reserved_50_63        : 14;
18326 #endif /* Word 0 - End */
18327     } cn88xxp2;
18328 };
18329 typedef union bdk_lmcx_slot_ctl3 bdk_lmcx_slot_ctl3_t;
18330 
18331 static inline uint64_t BDK_LMCX_SLOT_CTL3(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_SLOT_CTL3(unsigned long a)18332 static inline uint64_t BDK_LMCX_SLOT_CTL3(unsigned long a)
18333 {
18334     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
18335         return 0x87e088000248ll + 0x1000000ll * ((a) & 0x0);
18336     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
18337         return 0x87e088000248ll + 0x1000000ll * ((a) & 0x1);
18338     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
18339         return 0x87e088000248ll + 0x1000000ll * ((a) & 0x3);
18340     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
18341         return 0x87e088000248ll + 0x1000000ll * ((a) & 0x3);
18342     __bdk_csr_fatal("LMCX_SLOT_CTL3", 1, a, 0, 0, 0);
18343 }
18344 
18345 #define typedef_BDK_LMCX_SLOT_CTL3(a) bdk_lmcx_slot_ctl3_t
18346 #define bustype_BDK_LMCX_SLOT_CTL3(a) BDK_CSR_TYPE_RSL
18347 #define basename_BDK_LMCX_SLOT_CTL3(a) "LMCX_SLOT_CTL3"
18348 #define device_bar_BDK_LMCX_SLOT_CTL3(a) 0x0 /* PF_BAR0 */
18349 #define busnum_BDK_LMCX_SLOT_CTL3(a) (a)
18350 #define arguments_BDK_LMCX_SLOT_CTL3(a) (a),-1,-1,-1
18351 
18352 /**
18353  * Register (RSL) lmc#_timing_params0
18354  *
18355  * LMC Timing Parameters Register 0
18356  */
18357 union bdk_lmcx_timing_params0
18358 {
18359     uint64_t u;
18360     struct bdk_lmcx_timing_params0_s
18361     {
18362 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
18363         uint64_t reserved_61_63        : 3;
18364         uint64_t tckesr                : 4;  /**< [ 60: 57](R/W) Indicates TCKESR constraints. Set this field as follows:
18365                                                                  _ RNDUP[TCKESR(ns) / TCYC(ns)] - 1
18366 
18367                                                                  where TCKESR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
18368                                                                  frequency (not data rate). */
18369         uint64_t tzqoper               : 3;  /**< [ 56: 54](R/W) Indicates tZQoper constraints. Set this field as follows:
18370 
18371                                                                  _ RNDUP[tZQoper(nCK) / 128)]
18372 
18373                                                                  where tZQoper is from the JEDEC DDR4 spec.
18374 
18375                                                                  TYP = 4. */
18376         uint64_t tbcw                  : 6;  /**< [ 53: 48](R/W) Indicates tBCW constraints. Set this field as follows:
18377                                                                  _ RNDUP[TBCW(ns) / TCYC(ns)] - 1
18378 
18379                                                                  where TBCW is from the JEDEC DDR4DB spec, and TCYC(ns) is the DDR clock frequency (not
18380                                                                  data rate).
18381 
18382                                                                  TYP = 16. */
18383         uint64_t tcksre                : 4;  /**< [ 47: 44](R/W) Indicates TCKSRE constraints. Set this field as follows:
18384                                                                  _ RNDUP[TCKSRE(ns) / TCYC(ns)] - 1
18385 
18386                                                                  where TCKSRE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18387                                                                  frequency (not data rate).
18388 
18389                                                                  TYP = max(5nCK, 10 ns). */
18390         uint64_t trp                   : 5;  /**< [ 43: 39](R/W) Indicates TRP constraints. Set TRP as follows:
18391 
18392                                                                  _ RNDUP[TRP(ns) / TCYC(ns)] - 1
18393 
18394                                                                  where TRP and TRTP are from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18395                                                                  frequency
18396                                                                  (not data rate).
18397 
18398                                                                  TYP TRP = 10-15 ns.
18399 
18400                                                                  TYP TRTP = max(4nCK, 7.5 ns). */
18401         uint64_t tzqinit               : 4;  /**< [ 38: 35](R/W) Indicates TZQINIT constraints. Set this field as follows:
18402 
18403                                                                  _ RNDUP[TZQINIT(ns) / (256 * TCYC(ns))]
18404 
18405                                                                  where TZQINIT is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18406                                                                  frequency (not data rate).
18407 
18408                                                                  TYP = 2 (equivalent to 512). */
18409         uint64_t tdllk                 : 4;  /**< [ 34: 31](R/W) Indicates TDLLK constraints. Set this field as follows:
18410 
18411                                                                  _ RNDUP[TDLLK(ns) / (256 * TCYC(ns))]
18412 
18413                                                                  where TDLLK is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18414                                                                  frequency (not data rate).
18415 
18416                                                                  TYP = 3 (equivalent to 768).
18417 
18418                                                                  This parameter is used in self-refresh exit and assumed to be greater than TRFC. */
18419         uint64_t tmod                  : 5;  /**< [ 30: 26](R/W) Indicates tMOD constraints. Set this field as follows:
18420 
18421                                                                  _ RNDUP[TMOD(ns) / TCYC(ns)] - 1
18422 
18423                                                                  where TMOD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18424                                                                  frequency (not data rate).
18425 
18426                                                                  TYP = max(24nCK, 15 ns). */
18427         uint64_t tmrd                  : 4;  /**< [ 25: 22](R/W) Indicates TMRD constraints. Set this field as follows:
18428 
18429                                                                  _ RNDUP[TMRD(ns) / TCYC(ns)] - 1
18430 
18431                                                                  where TMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18432                                                                  frequency (not data rate).
18433 
18434                                                                  TYP = 8nCK. */
18435         uint64_t txpr                  : 6;  /**< [ 21: 16](R/W) Indicates TXPR constraints. Set this field as follows:
18436 
18437                                                                  _ RNDUP[TXPR(ns) / (16 * TCYC(ns))]
18438 
18439                                                                  where TXPR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18440                                                                  frequency (not data rate).
18441 
18442                                                                  TYP = max(5nCK, TRFC+10 ns). */
18443         uint64_t tcke                  : 4;  /**< [ 15: 12](R/W) Indicates TCKE constraints. Set this field as follows:
18444 
18445                                                                  _ RNDUP[TCKE(ns) / TCYC(ns)] - 1
18446 
18447                                                                  where TCKE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18448                                                                  frequency (not data rate).
18449 
18450                                                                  TYP = max(3nCK, 7.5/5.625/5.625/5 ns).
18451 
18452                                                                  Because a DDR4 register can shorten the pulse width of CKE (it delays the falling edge
18453                                                                  but does not delay the rising edge), care must be taken to set this parameter larger
18454                                                                  to account for this effective reduction in the pulse width. */
18455         uint64_t tzqcs                 : 4;  /**< [ 11:  8](R/W) Indicates TZQCS constraints. This field is set as follows:
18456 
18457                                                                  _ RNDUP[(2 * TZQCS(ns)) / (16 * TCYC(ns))]
18458 
18459                                                                  where TZQCS is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18460                                                                  frequency (not data rate).
18461 
18462                                                                  TYP \>= 8 (greater-than-or-equal-to 128), to allow for dclk90 calibration. */
18463         uint64_t reserved_0_7          : 8;
18464 #else /* Word 0 - Little Endian */
18465         uint64_t reserved_0_7          : 8;
18466         uint64_t tzqcs                 : 4;  /**< [ 11:  8](R/W) Indicates TZQCS constraints. This field is set as follows:
18467 
18468                                                                  _ RNDUP[(2 * TZQCS(ns)) / (16 * TCYC(ns))]
18469 
18470                                                                  where TZQCS is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18471                                                                  frequency (not data rate).
18472 
18473                                                                  TYP \>= 8 (greater-than-or-equal-to 128), to allow for dclk90 calibration. */
18474         uint64_t tcke                  : 4;  /**< [ 15: 12](R/W) Indicates TCKE constraints. Set this field as follows:
18475 
18476                                                                  _ RNDUP[TCKE(ns) / TCYC(ns)] - 1
18477 
18478                                                                  where TCKE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18479                                                                  frequency (not data rate).
18480 
18481                                                                  TYP = max(3nCK, 7.5/5.625/5.625/5 ns).
18482 
18483                                                                  Because a DDR4 register can shorten the pulse width of CKE (it delays the falling edge
18484                                                                  but does not delay the rising edge), care must be taken to set this parameter larger
18485                                                                  to account for this effective reduction in the pulse width. */
18486         uint64_t txpr                  : 6;  /**< [ 21: 16](R/W) Indicates TXPR constraints. Set this field as follows:
18487 
18488                                                                  _ RNDUP[TXPR(ns) / (16 * TCYC(ns))]
18489 
18490                                                                  where TXPR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18491                                                                  frequency (not data rate).
18492 
18493                                                                  TYP = max(5nCK, TRFC+10 ns). */
18494         uint64_t tmrd                  : 4;  /**< [ 25: 22](R/W) Indicates TMRD constraints. Set this field as follows:
18495 
18496                                                                  _ RNDUP[TMRD(ns) / TCYC(ns)] - 1
18497 
18498                                                                  where TMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18499                                                                  frequency (not data rate).
18500 
18501                                                                  TYP = 8nCK. */
18502         uint64_t tmod                  : 5;  /**< [ 30: 26](R/W) Indicates tMOD constraints. Set this field as follows:
18503 
18504                                                                  _ RNDUP[TMOD(ns) / TCYC(ns)] - 1
18505 
18506                                                                  where TMOD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18507                                                                  frequency (not data rate).
18508 
18509                                                                  TYP = max(24nCK, 15 ns). */
18510         uint64_t tdllk                 : 4;  /**< [ 34: 31](R/W) Indicates TDLLK constraints. Set this field as follows:
18511 
18512                                                                  _ RNDUP[TDLLK(ns) / (256 * TCYC(ns))]
18513 
18514                                                                  where TDLLK is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18515                                                                  frequency (not data rate).
18516 
18517                                                                  TYP = 3 (equivalent to 768).
18518 
18519                                                                  This parameter is used in self-refresh exit and assumed to be greater than TRFC. */
18520         uint64_t tzqinit               : 4;  /**< [ 38: 35](R/W) Indicates TZQINIT constraints. Set this field as follows:
18521 
18522                                                                  _ RNDUP[TZQINIT(ns) / (256 * TCYC(ns))]
18523 
18524                                                                  where TZQINIT is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18525                                                                  frequency (not data rate).
18526 
18527                                                                  TYP = 2 (equivalent to 512). */
18528         uint64_t trp                   : 5;  /**< [ 43: 39](R/W) Indicates TRP constraints. Set TRP as follows:
18529 
18530                                                                  _ RNDUP[TRP(ns) / TCYC(ns)] - 1
18531 
18532                                                                  where TRP and TRTP are from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18533                                                                  frequency
18534                                                                  (not data rate).
18535 
18536                                                                  TYP TRP = 10-15 ns.
18537 
18538                                                                  TYP TRTP = max(4nCK, 7.5 ns). */
18539         uint64_t tcksre                : 4;  /**< [ 47: 44](R/W) Indicates TCKSRE constraints. Set this field as follows:
18540                                                                  _ RNDUP[TCKSRE(ns) / TCYC(ns)] - 1
18541 
18542                                                                  where TCKSRE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18543                                                                  frequency (not data rate).
18544 
18545                                                                  TYP = max(5nCK, 10 ns). */
18546         uint64_t tbcw                  : 6;  /**< [ 53: 48](R/W) Indicates tBCW constraints. Set this field as follows:
18547                                                                  _ RNDUP[TBCW(ns) / TCYC(ns)] - 1
18548 
18549                                                                  where TBCW is from the JEDEC DDR4DB spec, and TCYC(ns) is the DDR clock frequency (not
18550                                                                  data rate).
18551 
18552                                                                  TYP = 16. */
18553         uint64_t tzqoper               : 3;  /**< [ 56: 54](R/W) Indicates tZQoper constraints. Set this field as follows:
18554 
18555                                                                  _ RNDUP[tZQoper(nCK) / 128)]
18556 
18557                                                                  where tZQoper is from the JEDEC DDR4 spec.
18558 
18559                                                                  TYP = 4. */
18560         uint64_t tckesr                : 4;  /**< [ 60: 57](R/W) Indicates TCKESR constraints. Set this field as follows:
18561                                                                  _ RNDUP[TCKESR(ns) / TCYC(ns)] - 1
18562 
18563                                                                  where TCKESR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
18564                                                                  frequency (not data rate). */
18565         uint64_t reserved_61_63        : 3;
18566 #endif /* Word 0 - End */
18567     } s;
18568     struct bdk_lmcx_timing_params0_cn88xxp1
18569     {
18570 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
18571         uint64_t reserved_53_63        : 11;
18572         uint64_t tbcw                  : 5;  /**< [ 52: 48](R/W) Indicates tBCW constraints. Set this field as follows:
18573                                                                  _ RNDUP[TBCW(ns) / TCYC(ns)] - 1
18574 
18575                                                                  where TBCW is from the JEDEC DDR4DB spec, and TCYC(ns) is the DDR clock frequency (not
18576                                                                  data rate).
18577 
18578                                                                  TYP = 16. */
18579         uint64_t tcksre                : 4;  /**< [ 47: 44](R/W) Indicates TCKSRE constraints. Set this field as follows:
18580                                                                  _ RNDUP[TCKSRE(ns) / TCYC(ns)] - 1
18581 
18582                                                                  where TCKSRE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18583                                                                  frequency (not data rate).
18584 
18585                                                                  TYP = max(5nCK, 10 ns). */
18586         uint64_t trp                   : 5;  /**< [ 43: 39](R/W) Indicates TRP constraints. Set TRP as follows:
18587 
18588                                                                  _ RNDUP[TRP(ns) / TCYC(ns)] - 1
18589 
18590                                                                  where TRP and TRTP are from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18591                                                                  frequency
18592                                                                  (not data rate).
18593 
18594                                                                  TYP TRP = 10-15 ns.
18595 
18596                                                                  TYP TRTP = max(4nCK, 7.5 ns). */
18597         uint64_t tzqinit               : 4;  /**< [ 38: 35](R/W) Indicates TZQINIT constraints. Set this field as follows:
18598 
18599                                                                  _ RNDUP[TZQINIT(ns) / (256 * TCYC(ns))]
18600 
18601                                                                  where TZQINIT is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18602                                                                  frequency (not data rate).
18603 
18604                                                                  TYP = 2 (equivalent to 512). */
18605         uint64_t tdllk                 : 4;  /**< [ 34: 31](R/W) Indicates TDLLK constraints. Set this field as follows:
18606 
18607                                                                  _ RNDUP[TDLLK(ns) / (256 * TCYC(ns))]
18608 
18609                                                                  where TDLLK is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18610                                                                  frequency (not data rate).
18611 
18612                                                                  TYP = 2 (equivalent to 512).
18613 
18614                                                                  This parameter is used in self-refresh exit and assumed to be greater than TRFC. */
18615         uint64_t tmod                  : 5;  /**< [ 30: 26](R/W) Indicates tMOD constraints. Set this field as follows:
18616 
18617                                                                  _ RNDUP[TMOD(ns) / TCYC(ns)] - 1
18618 
18619                                                                  where TMOD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18620                                                                  frequency (not data rate).
18621 
18622                                                                  TYP = max(12nCK, 15 ns). */
18623         uint64_t tmrd                  : 4;  /**< [ 25: 22](R/W) Indicates TMRD constraints. Set this field as follows:
18624 
18625                                                                  _ RNDUP[TMRD(ns) / TCYC(ns)] - 1
18626 
18627                                                                  where TMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18628                                                                  frequency (not data rate).
18629 
18630                                                                  TYP = 4nCK. */
18631         uint64_t txpr                  : 6;  /**< [ 21: 16](R/W) Indicates TXPR constraints. Set this field as follows:
18632 
18633                                                                  _ RNDUP[TXPR(ns) / (16 * TCYC(ns))]
18634 
18635                                                                  where TXPR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18636                                                                  frequency (not data rate).
18637 
18638                                                                  TYP = max(5nCK, TRFC+10 ns). */
18639         uint64_t tcke                  : 4;  /**< [ 15: 12](R/W) Indicates TCKE constraints. Set this field as follows:
18640 
18641                                                                  _ RNDUP[TCKE(ns) / TCYC(ns)] - 1
18642 
18643                                                                  where TCKE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18644                                                                  frequency (not data rate).
18645 
18646                                                                  TYP = max(3nCK, 7.5/5.625/5.625/5 ns).
18647 
18648                                                                  Because a DDR4 register can shorten the pulse width of CKE (it delays the falling edge
18649                                                                  but does not delay the rising edge), care must be taken to set this parameter larger
18650                                                                  to account for this effective reduction in the pulse width. */
18651         uint64_t tzqcs                 : 4;  /**< [ 11:  8](R/W) Indicates TZQCS constraints. This field is set as follows:
18652 
18653                                                                  _ RNDUP[(2 * TZQCS(ns)) / (16 * TCYC(ns))]
18654 
18655                                                                  where TZQCS is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18656                                                                  frequency (not data rate).
18657 
18658                                                                  TYP \>= 8 (greater-than-or-equal-to 128), to allow for dclk90 calibration. */
18659         uint64_t reserved_0_7          : 8;
18660 #else /* Word 0 - Little Endian */
18661         uint64_t reserved_0_7          : 8;
18662         uint64_t tzqcs                 : 4;  /**< [ 11:  8](R/W) Indicates TZQCS constraints. This field is set as follows:
18663 
18664                                                                  _ RNDUP[(2 * TZQCS(ns)) / (16 * TCYC(ns))]
18665 
18666                                                                  where TZQCS is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18667                                                                  frequency (not data rate).
18668 
18669                                                                  TYP \>= 8 (greater-than-or-equal-to 128), to allow for dclk90 calibration. */
18670         uint64_t tcke                  : 4;  /**< [ 15: 12](R/W) Indicates TCKE constraints. Set this field as follows:
18671 
18672                                                                  _ RNDUP[TCKE(ns) / TCYC(ns)] - 1
18673 
18674                                                                  where TCKE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18675                                                                  frequency (not data rate).
18676 
18677                                                                  TYP = max(3nCK, 7.5/5.625/5.625/5 ns).
18678 
18679                                                                  Because a DDR4 register can shorten the pulse width of CKE (it delays the falling edge
18680                                                                  but does not delay the rising edge), care must be taken to set this parameter larger
18681                                                                  to account for this effective reduction in the pulse width. */
18682         uint64_t txpr                  : 6;  /**< [ 21: 16](R/W) Indicates TXPR constraints. Set this field as follows:
18683 
18684                                                                  _ RNDUP[TXPR(ns) / (16 * TCYC(ns))]
18685 
18686                                                                  where TXPR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18687                                                                  frequency (not data rate).
18688 
18689                                                                  TYP = max(5nCK, TRFC+10 ns). */
18690         uint64_t tmrd                  : 4;  /**< [ 25: 22](R/W) Indicates TMRD constraints. Set this field as follows:
18691 
18692                                                                  _ RNDUP[TMRD(ns) / TCYC(ns)] - 1
18693 
18694                                                                  where TMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18695                                                                  frequency (not data rate).
18696 
18697                                                                  TYP = 4nCK. */
18698         uint64_t tmod                  : 5;  /**< [ 30: 26](R/W) Indicates tMOD constraints. Set this field as follows:
18699 
18700                                                                  _ RNDUP[TMOD(ns) / TCYC(ns)] - 1
18701 
18702                                                                  where TMOD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18703                                                                  frequency (not data rate).
18704 
18705                                                                  TYP = max(12nCK, 15 ns). */
18706         uint64_t tdllk                 : 4;  /**< [ 34: 31](R/W) Indicates TDLLK constraints. Set this field as follows:
18707 
18708                                                                  _ RNDUP[TDLLK(ns) / (256 * TCYC(ns))]
18709 
18710                                                                  where TDLLK is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18711                                                                  frequency (not data rate).
18712 
18713                                                                  TYP = 2 (equivalent to 512).
18714 
18715                                                                  This parameter is used in self-refresh exit and assumed to be greater than TRFC. */
18716         uint64_t tzqinit               : 4;  /**< [ 38: 35](R/W) Indicates TZQINIT constraints. Set this field as follows:
18717 
18718                                                                  _ RNDUP[TZQINIT(ns) / (256 * TCYC(ns))]
18719 
18720                                                                  where TZQINIT is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18721                                                                  frequency (not data rate).
18722 
18723                                                                  TYP = 2 (equivalent to 512). */
18724         uint64_t trp                   : 5;  /**< [ 43: 39](R/W) Indicates TRP constraints. Set TRP as follows:
18725 
18726                                                                  _ RNDUP[TRP(ns) / TCYC(ns)] - 1
18727 
18728                                                                  where TRP and TRTP are from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18729                                                                  frequency
18730                                                                  (not data rate).
18731 
18732                                                                  TYP TRP = 10-15 ns.
18733 
18734                                                                  TYP TRTP = max(4nCK, 7.5 ns). */
18735         uint64_t tcksre                : 4;  /**< [ 47: 44](R/W) Indicates TCKSRE constraints. Set this field as follows:
18736                                                                  _ RNDUP[TCKSRE(ns) / TCYC(ns)] - 1
18737 
18738                                                                  where TCKSRE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18739                                                                  frequency (not data rate).
18740 
18741                                                                  TYP = max(5nCK, 10 ns). */
18742         uint64_t tbcw                  : 5;  /**< [ 52: 48](R/W) Indicates tBCW constraints. Set this field as follows:
18743                                                                  _ RNDUP[TBCW(ns) / TCYC(ns)] - 1
18744 
18745                                                                  where TBCW is from the JEDEC DDR4DB spec, and TCYC(ns) is the DDR clock frequency (not
18746                                                                  data rate).
18747 
18748                                                                  TYP = 16. */
18749         uint64_t reserved_53_63        : 11;
18750 #endif /* Word 0 - End */
18751     } cn88xxp1;
18752     struct bdk_lmcx_timing_params0_cn9
18753     {
18754 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
18755         uint64_t reserved_61_63        : 3;
18756         uint64_t tckesr                : 4;  /**< [ 60: 57](R/W) Indicates TCKESR constraints. Set this field as follows:
18757                                                                  _ RNDUP[TCKESR(ns) / TCYC(ns)] - 1
18758 
18759                                                                  where TCKESR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
18760                                                                  frequency (not data rate). */
18761         uint64_t tzqoper               : 3;  /**< [ 56: 54](R/W) Indicates tZQoper constraints. Set this field as follows:
18762 
18763                                                                  _ RNDUP[tZQoper(nCK) / 128)]
18764 
18765                                                                  where tZQoper is from the JEDEC DDR4 spec.
18766 
18767                                                                  TYP = 4. */
18768         uint64_t tbcw                  : 6;  /**< [ 53: 48](R/W) Indicates tBCW constraints. Set this field as follows:
18769                                                                  _ RNDUP[TBCW(ns) / TCYC(ns)] - 1
18770 
18771                                                                  where TBCW is from the JEDEC DDR4DB spec, and TCYC(ns) is the DDR clock frequency (not
18772                                                                  data rate).
18773 
18774                                                                  TYP = 16. */
18775         uint64_t tcksre                : 4;  /**< [ 47: 44](R/W) Indicates TCKSRE constraints. Set this field as follows:
18776                                                                  _ RNDUP[(max(TCKSRE(ns), TCKOFF(ns)) / TCYC(ns)] - 1
18777 
18778                                                                  where TCKSRE is from the JEDEC DDR4 spec, TCKOFF is from RCD spec and TCYC(ns) is the DDR
18779                                                                  clock frequency (not data rate). */
18780         uint64_t trp                   : 5;  /**< [ 43: 39](R/W) Indicates TRP constraints. Set TRP as follows:
18781 
18782                                                                  _ RNDUP[TRP(ns) / TCYC(ns)] - 1
18783 
18784                                                                  where TRP is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
18785                                                                  rate).
18786 
18787                                                                  TYP TRP = 12.5-15 ns. */
18788         uint64_t tzqinit               : 4;  /**< [ 38: 35](R/W) Indicates TZQINIT constraints. Set this field as follows:
18789 
18790                                                                  _ RNDUP[TZQINIT(nCK) / 256]
18791 
18792                                                                  where TZQINIT is from the JEDEC DDR4 spec.
18793                                                                  TYP = 4 (equivalent to 1024 cycles). */
18794         uint64_t tdllk                 : 4;  /**< [ 34: 31](R/W) Indicates TDLLK constraints. Set this field as follows:
18795 
18796                                                                  _ RNDUP[TDLLK(ns) / (256 * TCYC(ns))]
18797 
18798                                                                  where TDLLK is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
18799                                                                  frequency (not data rate).
18800 
18801                                                                  TYP = 3 (equivalent to 768).
18802 
18803                                                                  This parameter is used in self-refresh exit and assumed to be greater than TRFC. */
18804         uint64_t tmod                  : 5;  /**< [ 30: 26](R/W) Indicates tMOD constraints. Set this field as follows:
18805 
18806                                                                  _ RNDUP[TMOD(ns) / TCYC(ns)] - 1
18807 
18808                                                                  where TMOD is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
18809                                                                  frequency (not data rate).
18810 
18811                                                                  TYP = max(24nCK, 15 ns). */
18812         uint64_t tmrd                  : 4;  /**< [ 25: 22](R/W) Indicates TMRD constraints. Set this field as follows:
18813 
18814                                                                  _ RNDUP[TMRD(ns) / TCYC(ns)] - 1
18815 
18816                                                                  where TMRD is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
18817                                                                  frequency (not data rate).
18818 
18819                                                                  TYP = 8nCK. */
18820         uint64_t txpr                  : 6;  /**< [ 21: 16](R/W) Indicates TXPR constraints. Set this field as follows:
18821 
18822                                                                  _ RNDUP[TXPR(ns) / (16 * TCYC(ns))]
18823 
18824                                                                  where TXPR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
18825                                                                  frequency (not data rate).
18826 
18827                                                                  TYP = max(5nCK, TRFC+10 ns). */
18828         uint64_t tcke                  : 4;  /**< [ 15: 12](R/W) Indicates TCKE constraints. Set this field as follows:
18829 
18830                                                                  _ RNDUP[TCKE(ns) / TCYC(ns)] - 1
18831 
18832                                                                  where TCKE is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
18833                                                                  frequency (not data rate).
18834 
18835                                                                  TYP = max(3nCK, 7.5/5.625/5.625/5 ns).
18836 
18837                                                                  Because a DDR4 register can shorten the pulse width of CKE (it delays the falling edge
18838                                                                  but does not delay the rising edge), care must be taken to set this parameter larger
18839                                                                  to account for this effective reduction in the pulse width. */
18840         uint64_t tzqcs                 : 4;  /**< [ 11:  8](R/W) Indicates TZQCS constraints. This field is set as follows:
18841 
18842                                                                  _ RNDUP[(2 * TZQCS(ns)) / (16 * TCYC(ns))]
18843 
18844                                                                  where TZQCS is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
18845                                                                  frequency (not data rate).
18846 
18847                                                                  TYP \>= 8 (greater-than-or-equal-to 128), to allow for dclk90 calibration. */
18848         uint64_t reserved_0_7          : 8;
18849 #else /* Word 0 - Little Endian */
18850         uint64_t reserved_0_7          : 8;
18851         uint64_t tzqcs                 : 4;  /**< [ 11:  8](R/W) Indicates TZQCS constraints. This field is set as follows:
18852 
18853                                                                  _ RNDUP[(2 * TZQCS(ns)) / (16 * TCYC(ns))]
18854 
18855                                                                  where TZQCS is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
18856                                                                  frequency (not data rate).
18857 
18858                                                                  TYP \>= 8 (greater-than-or-equal-to 128), to allow for dclk90 calibration. */
18859         uint64_t tcke                  : 4;  /**< [ 15: 12](R/W) Indicates TCKE constraints. Set this field as follows:
18860 
18861                                                                  _ RNDUP[TCKE(ns) / TCYC(ns)] - 1
18862 
18863                                                                  where TCKE is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
18864                                                                  frequency (not data rate).
18865 
18866                                                                  TYP = max(3nCK, 7.5/5.625/5.625/5 ns).
18867 
18868                                                                  Because a DDR4 register can shorten the pulse width of CKE (it delays the falling edge
18869                                                                  but does not delay the rising edge), care must be taken to set this parameter larger
18870                                                                  to account for this effective reduction in the pulse width. */
18871         uint64_t txpr                  : 6;  /**< [ 21: 16](R/W) Indicates TXPR constraints. Set this field as follows:
18872 
18873                                                                  _ RNDUP[TXPR(ns) / (16 * TCYC(ns))]
18874 
18875                                                                  where TXPR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
18876                                                                  frequency (not data rate).
18877 
18878                                                                  TYP = max(5nCK, TRFC+10 ns). */
18879         uint64_t tmrd                  : 4;  /**< [ 25: 22](R/W) Indicates TMRD constraints. Set this field as follows:
18880 
18881                                                                  _ RNDUP[TMRD(ns) / TCYC(ns)] - 1
18882 
18883                                                                  where TMRD is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
18884                                                                  frequency (not data rate).
18885 
18886                                                                  TYP = 8nCK. */
18887         uint64_t tmod                  : 5;  /**< [ 30: 26](R/W) Indicates tMOD constraints. Set this field as follows:
18888 
18889                                                                  _ RNDUP[TMOD(ns) / TCYC(ns)] - 1
18890 
18891                                                                  where TMOD is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
18892                                                                  frequency (not data rate).
18893 
18894                                                                  TYP = max(24nCK, 15 ns). */
18895         uint64_t tdllk                 : 4;  /**< [ 34: 31](R/W) Indicates TDLLK constraints. Set this field as follows:
18896 
18897                                                                  _ RNDUP[TDLLK(ns) / (256 * TCYC(ns))]
18898 
18899                                                                  where TDLLK is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
18900                                                                  frequency (not data rate).
18901 
18902                                                                  TYP = 3 (equivalent to 768).
18903 
18904                                                                  This parameter is used in self-refresh exit and assumed to be greater than TRFC. */
18905         uint64_t tzqinit               : 4;  /**< [ 38: 35](R/W) Indicates TZQINIT constraints. Set this field as follows:
18906 
18907                                                                  _ RNDUP[TZQINIT(nCK) / 256]
18908 
18909                                                                  where TZQINIT is from the JEDEC DDR4 spec.
18910                                                                  TYP = 4 (equivalent to 1024 cycles). */
18911         uint64_t trp                   : 5;  /**< [ 43: 39](R/W) Indicates TRP constraints. Set TRP as follows:
18912 
18913                                                                  _ RNDUP[TRP(ns) / TCYC(ns)] - 1
18914 
18915                                                                  where TRP is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
18916                                                                  rate).
18917 
18918                                                                  TYP TRP = 12.5-15 ns. */
18919         uint64_t tcksre                : 4;  /**< [ 47: 44](R/W) Indicates TCKSRE constraints. Set this field as follows:
18920                                                                  _ RNDUP[(max(TCKSRE(ns), TCKOFF(ns)) / TCYC(ns)] - 1
18921 
18922                                                                  where TCKSRE is from the JEDEC DDR4 spec, TCKOFF is from RCD spec and TCYC(ns) is the DDR
18923                                                                  clock frequency (not data rate). */
18924         uint64_t tbcw                  : 6;  /**< [ 53: 48](R/W) Indicates tBCW constraints. Set this field as follows:
18925                                                                  _ RNDUP[TBCW(ns) / TCYC(ns)] - 1
18926 
18927                                                                  where TBCW is from the JEDEC DDR4DB spec, and TCYC(ns) is the DDR clock frequency (not
18928                                                                  data rate).
18929 
18930                                                                  TYP = 16. */
18931         uint64_t tzqoper               : 3;  /**< [ 56: 54](R/W) Indicates tZQoper constraints. Set this field as follows:
18932 
18933                                                                  _ RNDUP[tZQoper(nCK) / 128)]
18934 
18935                                                                  where tZQoper is from the JEDEC DDR4 spec.
18936 
18937                                                                  TYP = 4. */
18938         uint64_t tckesr                : 4;  /**< [ 60: 57](R/W) Indicates TCKESR constraints. Set this field as follows:
18939                                                                  _ RNDUP[TCKESR(ns) / TCYC(ns)] - 1
18940 
18941                                                                  where TCKESR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
18942                                                                  frequency (not data rate). */
18943         uint64_t reserved_61_63        : 3;
18944 #endif /* Word 0 - End */
18945     } cn9;
18946     struct bdk_lmcx_timing_params0_cn81xx
18947     {
18948 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
18949         uint64_t reserved_54_63        : 10;
18950         uint64_t tbcw                  : 6;  /**< [ 53: 48](R/W) Indicates tBCW constraints. Set this field as follows:
18951                                                                  _ RNDUP[TBCW(ns) / TCYC(ns)] - 1
18952 
18953                                                                  where TBCW is from the JEDEC DDR4DB spec, and TCYC(ns) is the DDR clock frequency (not
18954                                                                  data rate).
18955 
18956                                                                  TYP = 16. */
18957         uint64_t tcksre                : 4;  /**< [ 47: 44](R/W) Indicates TCKSRE constraints. Set this field as follows:
18958                                                                  _ RNDUP[TCKSRE(ns) / TCYC(ns)] - 1
18959 
18960                                                                  where TCKSRE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18961                                                                  frequency (not data rate).
18962 
18963                                                                  TYP = max(5nCK, 10 ns). */
18964         uint64_t trp                   : 5;  /**< [ 43: 39](R/W) Indicates TRP constraints. Set TRP as follows:
18965 
18966                                                                  _ RNDUP[TRP(ns) / TCYC(ns)] - 1
18967 
18968                                                                  where TRP and TRTP are from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18969                                                                  frequency
18970                                                                  (not data rate).
18971 
18972                                                                  TYP TRP = 10-15 ns.
18973 
18974                                                                  TYP TRTP = max(4nCK, 7.5 ns). */
18975         uint64_t tzqinit               : 4;  /**< [ 38: 35](R/W) Indicates TZQINIT constraints. Set this field as follows:
18976 
18977                                                                  _ RNDUP[TZQINIT(ns) / (256 * TCYC(ns))]
18978 
18979                                                                  where TZQINIT is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18980                                                                  frequency (not data rate).
18981 
18982                                                                  TYP = 2 (equivalent to 512). */
18983         uint64_t tdllk                 : 4;  /**< [ 34: 31](R/W) Indicates TDLLK constraints. Set this field as follows:
18984 
18985                                                                  _ RNDUP[TDLLK(ns) / (256 * TCYC(ns))]
18986 
18987                                                                  where TDLLK is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18988                                                                  frequency (not data rate).
18989 
18990                                                                  TYP = 3 (equivalent to 768).
18991 
18992                                                                  This parameter is used in self-refresh exit and assumed to be greater than TRFC. */
18993         uint64_t tmod                  : 5;  /**< [ 30: 26](R/W) Indicates tMOD constraints. Set this field as follows:
18994 
18995                                                                  _ RNDUP[TMOD(ns) / TCYC(ns)] - 1
18996 
18997                                                                  where TMOD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
18998                                                                  frequency (not data rate).
18999 
19000                                                                  TYP = max(24nCK, 15 ns). */
19001         uint64_t tmrd                  : 4;  /**< [ 25: 22](R/W) Indicates TMRD constraints. Set this field as follows:
19002 
19003                                                                  _ RNDUP[TMRD(ns) / TCYC(ns)] - 1
19004 
19005                                                                  where TMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19006                                                                  frequency (not data rate).
19007 
19008                                                                  TYP = 8nCK. */
19009         uint64_t txpr                  : 6;  /**< [ 21: 16](R/W) Indicates TXPR constraints. Set this field as follows:
19010 
19011                                                                  _ RNDUP[TXPR(ns) / (16 * TCYC(ns))]
19012 
19013                                                                  where TXPR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19014                                                                  frequency (not data rate).
19015 
19016                                                                  TYP = max(5nCK, TRFC+10 ns). */
19017         uint64_t tcke                  : 4;  /**< [ 15: 12](R/W) Indicates TCKE constraints. Set this field as follows:
19018 
19019                                                                  _ RNDUP[TCKE(ns) / TCYC(ns)] - 1
19020 
19021                                                                  where TCKE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19022                                                                  frequency (not data rate).
19023 
19024                                                                  TYP = max(3nCK, 7.5/5.625/5.625/5 ns).
19025 
19026                                                                  Because a DDR4 register can shorten the pulse width of CKE (it delays the falling edge
19027                                                                  but does not delay the rising edge), care must be taken to set this parameter larger
19028                                                                  to account for this effective reduction in the pulse width. */
19029         uint64_t tzqcs                 : 4;  /**< [ 11:  8](R/W) Indicates TZQCS constraints. This field is set as follows:
19030 
19031                                                                  _ RNDUP[(2 * TZQCS(ns)) / (16 * TCYC(ns))]
19032 
19033                                                                  where TZQCS is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19034                                                                  frequency (not data rate).
19035 
19036                                                                  TYP \>= 8 (greater-than-or-equal-to 128), to allow for dclk90 calibration. */
19037         uint64_t reserved_0_7          : 8;
19038 #else /* Word 0 - Little Endian */
19039         uint64_t reserved_0_7          : 8;
19040         uint64_t tzqcs                 : 4;  /**< [ 11:  8](R/W) Indicates TZQCS constraints. This field is set as follows:
19041 
19042                                                                  _ RNDUP[(2 * TZQCS(ns)) / (16 * TCYC(ns))]
19043 
19044                                                                  where TZQCS is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19045                                                                  frequency (not data rate).
19046 
19047                                                                  TYP \>= 8 (greater-than-or-equal-to 128), to allow for dclk90 calibration. */
19048         uint64_t tcke                  : 4;  /**< [ 15: 12](R/W) Indicates TCKE constraints. Set this field as follows:
19049 
19050                                                                  _ RNDUP[TCKE(ns) / TCYC(ns)] - 1
19051 
19052                                                                  where TCKE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19053                                                                  frequency (not data rate).
19054 
19055                                                                  TYP = max(3nCK, 7.5/5.625/5.625/5 ns).
19056 
19057                                                                  Because a DDR4 register can shorten the pulse width of CKE (it delays the falling edge
19058                                                                  but does not delay the rising edge), care must be taken to set this parameter larger
19059                                                                  to account for this effective reduction in the pulse width. */
19060         uint64_t txpr                  : 6;  /**< [ 21: 16](R/W) Indicates TXPR constraints. Set this field as follows:
19061 
19062                                                                  _ RNDUP[TXPR(ns) / (16 * TCYC(ns))]
19063 
19064                                                                  where TXPR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19065                                                                  frequency (not data rate).
19066 
19067                                                                  TYP = max(5nCK, TRFC+10 ns). */
19068         uint64_t tmrd                  : 4;  /**< [ 25: 22](R/W) Indicates TMRD constraints. Set this field as follows:
19069 
19070                                                                  _ RNDUP[TMRD(ns) / TCYC(ns)] - 1
19071 
19072                                                                  where TMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19073                                                                  frequency (not data rate).
19074 
19075                                                                  TYP = 8nCK. */
19076         uint64_t tmod                  : 5;  /**< [ 30: 26](R/W) Indicates tMOD constraints. Set this field as follows:
19077 
19078                                                                  _ RNDUP[TMOD(ns) / TCYC(ns)] - 1
19079 
19080                                                                  where TMOD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19081                                                                  frequency (not data rate).
19082 
19083                                                                  TYP = max(24nCK, 15 ns). */
19084         uint64_t tdllk                 : 4;  /**< [ 34: 31](R/W) Indicates TDLLK constraints. Set this field as follows:
19085 
19086                                                                  _ RNDUP[TDLLK(ns) / (256 * TCYC(ns))]
19087 
19088                                                                  where TDLLK is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19089                                                                  frequency (not data rate).
19090 
19091                                                                  TYP = 3 (equivalent to 768).
19092 
19093                                                                  This parameter is used in self-refresh exit and assumed to be greater than TRFC. */
19094         uint64_t tzqinit               : 4;  /**< [ 38: 35](R/W) Indicates TZQINIT constraints. Set this field as follows:
19095 
19096                                                                  _ RNDUP[TZQINIT(ns) / (256 * TCYC(ns))]
19097 
19098                                                                  where TZQINIT is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19099                                                                  frequency (not data rate).
19100 
19101                                                                  TYP = 2 (equivalent to 512). */
19102         uint64_t trp                   : 5;  /**< [ 43: 39](R/W) Indicates TRP constraints. Set TRP as follows:
19103 
19104                                                                  _ RNDUP[TRP(ns) / TCYC(ns)] - 1
19105 
19106                                                                  where TRP and TRTP are from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19107                                                                  frequency
19108                                                                  (not data rate).
19109 
19110                                                                  TYP TRP = 10-15 ns.
19111 
19112                                                                  TYP TRTP = max(4nCK, 7.5 ns). */
19113         uint64_t tcksre                : 4;  /**< [ 47: 44](R/W) Indicates TCKSRE constraints. Set this field as follows:
19114                                                                  _ RNDUP[TCKSRE(ns) / TCYC(ns)] - 1
19115 
19116                                                                  where TCKSRE is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19117                                                                  frequency (not data rate).
19118 
19119                                                                  TYP = max(5nCK, 10 ns). */
19120         uint64_t tbcw                  : 6;  /**< [ 53: 48](R/W) Indicates tBCW constraints. Set this field as follows:
19121                                                                  _ RNDUP[TBCW(ns) / TCYC(ns)] - 1
19122 
19123                                                                  where TBCW is from the JEDEC DDR4DB spec, and TCYC(ns) is the DDR clock frequency (not
19124                                                                  data rate).
19125 
19126                                                                  TYP = 16. */
19127         uint64_t reserved_54_63        : 10;
19128 #endif /* Word 0 - End */
19129     } cn81xx;
19130     /* struct bdk_lmcx_timing_params0_cn81xx cn83xx; */
19131     /* struct bdk_lmcx_timing_params0_cn81xx cn88xxp2; */
19132 };
19133 typedef union bdk_lmcx_timing_params0 bdk_lmcx_timing_params0_t;
19134 
19135 static inline uint64_t BDK_LMCX_TIMING_PARAMS0(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_TIMING_PARAMS0(unsigned long a)19136 static inline uint64_t BDK_LMCX_TIMING_PARAMS0(unsigned long a)
19137 {
19138     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
19139         return 0x87e088000198ll + 0x1000000ll * ((a) & 0x0);
19140     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
19141         return 0x87e088000198ll + 0x1000000ll * ((a) & 0x1);
19142     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
19143         return 0x87e088000198ll + 0x1000000ll * ((a) & 0x3);
19144     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
19145         return 0x87e088000198ll + 0x1000000ll * ((a) & 0x3);
19146     __bdk_csr_fatal("LMCX_TIMING_PARAMS0", 1, a, 0, 0, 0);
19147 }
19148 
19149 #define typedef_BDK_LMCX_TIMING_PARAMS0(a) bdk_lmcx_timing_params0_t
19150 #define bustype_BDK_LMCX_TIMING_PARAMS0(a) BDK_CSR_TYPE_RSL
19151 #define basename_BDK_LMCX_TIMING_PARAMS0(a) "LMCX_TIMING_PARAMS0"
19152 #define device_bar_BDK_LMCX_TIMING_PARAMS0(a) 0x0 /* PF_BAR0 */
19153 #define busnum_BDK_LMCX_TIMING_PARAMS0(a) (a)
19154 #define arguments_BDK_LMCX_TIMING_PARAMS0(a) (a),-1,-1,-1
19155 
19156 /**
19157  * Register (RSL) lmc#_timing_params1
19158  *
19159  * LMC Timing Parameters Register 1
19160  */
19161 union bdk_lmcx_timing_params1
19162 {
19163     uint64_t u;
19164     struct bdk_lmcx_timing_params1_s
19165     {
19166 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
19167         uint64_t tstab                 : 5;  /**< [ 63: 59](R/W) Indicates tSTAB constraints. Set this field as follows:
19168 
19169                                                                  _ RNDUP[tSTAB(ns) / (512 * TCYC(ns))]
19170 
19171                                                                  where tSTAB is from the JEDEC DDR4 RCD spec, and TCYC(ns) is the DDR clock
19172                                                                  frequency (not data rate). */
19173         uint64_t txp_ext               : 1;  /**< [ 58: 58](R/W) A 1-bit extension to the TXP register.
19174                                                                  above. */
19175         uint64_t trcd_ext              : 1;  /**< [ 57: 57](R/W) A 1-bit extension to the TRCD register. */
19176         uint64_t tpdm_full_cycle_ena   : 1;  /**< [ 56: 56](R/W) When set, this field enables the addition of a one cycle delay to the
19177                                                                  write/read latency calculation. This is to compensate the case when
19178                                                                  tPDM delay in the RCD of an RDIMM is greater than one-cycle.
19179                                                                  Only valid in RDIMM  (LMC()_CONTROL[RDIMM_ENA]=1). */
19180         uint64_t trfc_dlr              : 7;  /**< [ 55: 49](R/W) Indicates tRFC_DLR constraints. Set this field as follows:
19181 
19182                                                                  _ RNDUP[tRFC_DLR(ns) / (8 * TCYC(ns))]
19183 
19184                                                                  where tRFC_DLR is from the JEDEC 3D stacked SDRAM spec, and TCYC(ns) is the DDR clock
19185                                                                  frequency (not data rate).
19186 
19187                                                                  TYP = 90-120 ns.
19188 
19189                                                                  0x0 = reserved.
19190                                                                  0x1 = 8 TCYC.
19191                                                                  0x2 = 16 TCYC.
19192                                                                  0x3 = 24 TCYC.
19193                                                                  0x4 = 32 TCYC.
19194                                                                  ...
19195                                                                  0x7E = 1008 TCYC.
19196                                                                  0x7F = 1016 TCYC. */
19197         uint64_t txpdll                : 5;  /**< [ 48: 44](R/W) Indicates TXPDLL constraints. Set this field as follows:
19198 
19199                                                                  _ RNDUP[TXPDLL(ns) / TCYC(ns)] - 1
19200 
19201                                                                  where TXPDLL is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19202                                                                  frequency (not data rate).
19203 
19204                                                                  TYP=max(10nCK, 24 ns) */
19205         uint64_t tfaw                  : 5;  /**< [ 43: 39](R/W) Indicates TFAW constraints. Set this field as follows:
19206 
19207                                                                  _ RNDUP[TFAW(ns) / (4 * TCYC(ns))]
19208 
19209                                                                  where TFAW is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19210                                                                  frequency (not data rate).
19211 
19212                                                                  TYP = 30-40 ns
19213 
19214                                                                  Internal:
19215                                                                  When interfacing with DIMMs that contain 3DS DRAMs, set this field as follows:
19216 
19217                                                                  _ RNDUP[tFAW_SLR(ns) / (4 * TCYC(ns))]
19218 
19219                                                                  where tFAW_SLR is the Four activate window to the same logical rank from the
19220                                                                  JEDEC DDR4 3D Stacked spec. */
19221         uint64_t twldqsen              : 4;  /**< [ 38: 35](R/W) Indicates TWLDQSEN constraints. Set this field as follows:
19222 
19223                                                                  _ RNDUP[TWLDQSEN(ns) / (4 * TCYC(ns))]
19224 
19225                                                                  where TWLDQSEN is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19226                                                                  frequency (not data rate).
19227 
19228                                                                  TYP = max(25nCK) */
19229         uint64_t twlmrd                : 4;  /**< [ 34: 31](R/W) Indicates TWLMRD constraints. Set this field as follows:
19230 
19231                                                                  _ RNDUP[TWLMRD(ns) / (4 * TCYC(ns))]
19232 
19233                                                                  where TWLMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19234                                                                  frequency (not data rate).
19235 
19236                                                                  TYP = max(40nCK) */
19237         uint64_t txp                   : 3;  /**< [ 30: 28](R/W) Indicates TXP constraints. Set this field as follows:
19238 
19239                                                                  _ RNDUP[TXP(ns) / TCYC(ns)] - 1
19240 
19241                                                                  where TXP is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19242                                                                  frequency (not data rate).
19243 
19244                                                                  TYP=max(3nCK, 7.5 ns) */
19245         uint64_t trrd                  : 3;  /**< [ 27: 25](R/W) Indicates TRRD constraints. Set this field as follows:
19246 
19247                                                                  _ RNDUP[TRRD(ns) / TCYC(ns)] - 2,
19248 
19249                                                                  where TRRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19250                                                                  frequency (not data rate).
19251 
19252                                                                  TYP = max(4nCK, 10 ns)
19253 
19254                                                                  0x0 = Reserved.
19255                                                                  0x1 = 3 TCYC.
19256                                                                  ...
19257                                                                  0x6 = 8 TCYC.
19258                                                                  0x7 = 9 TCYC.
19259 
19260                                                                  For DDR4, this is the tRRD_S parameter. */
19261         uint64_t trfc                  : 7;  /**< [ 24: 18](R/W) Indicates TRFC constraints. Set this field as follows:
19262 
19263                                                                  _ RNDUP[TRFC(ns) / (8 * TCYC(ns))]
19264 
19265                                                                  where TRFC is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19266                                                                  frequency (not data rate).
19267 
19268                                                                  TYP = 90-350 ns
19269 
19270                                                                  0x0 = reserved.
19271                                                                  0x1 = 8 TCYC.
19272                                                                  0x2 = 16 TCYC.
19273                                                                  0x3 = 24 TCYC.
19274                                                                  0x4 = 32 TCYC.
19275                                                                  ...
19276                                                                  0x7E = 1008 TCYC.
19277                                                                  0x7F = 1016 TCYC. */
19278         uint64_t twtr                  : 4;  /**< [ 17: 14](R/W) Indicates TWTR constraints. Set this field as follows:
19279 
19280                                                                  _ RNDUP[TWTR(ns) / TCYC(ns)] - 1
19281 
19282                                                                  where TWTR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19283                                                                  frequency (not data rate).
19284 
19285                                                                  TYP = max(4nCK, 7.5 ns)
19286 
19287                                                                  For DDR4, this CSR field represents tWTR_S.
19288 
19289                                                                  0x0 = reserved.
19290                                                                  0x1 = 2.
19291                                                                  ...
19292                                                                  0x7 = 8.
19293                                                                  0x8-0xF = reserved. */
19294         uint64_t trcd                  : 4;  /**< [ 13: 10](R/W) Indicates TRCD constraints. Set this field as follows:
19295 
19296                                                                  _ RNDUP[TRCD(ns) / TCYC(ns)]
19297 
19298                                                                  where TRCD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19299                                                                  frequency (not data rate).
19300 
19301                                                                  TYP = 10-15 ns
19302 
19303                                                                  0x0 = reserved.
19304                                                                  0x1 = 2 (2 is the smallest value allowed).
19305                                                                  0x2 = 2.
19306                                                                  ...
19307                                                                  0xE = 14.
19308                                                                  0xA-0xF = reserved.
19309 
19310                                                                  In 2T mode, make this register TRCD - 1, not going below 2. */
19311         uint64_t tras                  : 6;  /**< [  9:  4](R/W) Indicates TRAS constraints. Set TRAS (CSR field) as follows:
19312 
19313                                                                  _ RNDUP[TRAS(ns)/TCYC(ns)] - 1,
19314 
19315                                                                  where TRAS is from the DDR3/DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
19316                                                                  rate).
19317 
19318                                                                  TYP = 35ns - 9 * TREFI
19319 
19320                                                                  0x0 = reserved.
19321                                                                  0x1 = 2 TCYC.
19322                                                                  0x2 = 3 TCYC.
19323                                                                  ...
19324                                                                  0x3F = 64 TCYC. */
19325         uint64_t tmprr                 : 4;  /**< [  3:  0](R/W) Indicates TMPRR constraints. Set this field as follows:
19326 
19327                                                                  _ RNDUP[TMPRR(ns) / TCYC(ns)] - 1
19328 
19329                                                                  where TMPRR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19330                                                                  frequency (not data rate).
19331 
19332                                                                  TYP = 1 nCK */
19333 #else /* Word 0 - Little Endian */
19334         uint64_t tmprr                 : 4;  /**< [  3:  0](R/W) Indicates TMPRR constraints. Set this field as follows:
19335 
19336                                                                  _ RNDUP[TMPRR(ns) / TCYC(ns)] - 1
19337 
19338                                                                  where TMPRR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19339                                                                  frequency (not data rate).
19340 
19341                                                                  TYP = 1 nCK */
19342         uint64_t tras                  : 6;  /**< [  9:  4](R/W) Indicates TRAS constraints. Set TRAS (CSR field) as follows:
19343 
19344                                                                  _ RNDUP[TRAS(ns)/TCYC(ns)] - 1,
19345 
19346                                                                  where TRAS is from the DDR3/DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
19347                                                                  rate).
19348 
19349                                                                  TYP = 35ns - 9 * TREFI
19350 
19351                                                                  0x0 = reserved.
19352                                                                  0x1 = 2 TCYC.
19353                                                                  0x2 = 3 TCYC.
19354                                                                  ...
19355                                                                  0x3F = 64 TCYC. */
19356         uint64_t trcd                  : 4;  /**< [ 13: 10](R/W) Indicates TRCD constraints. Set this field as follows:
19357 
19358                                                                  _ RNDUP[TRCD(ns) / TCYC(ns)]
19359 
19360                                                                  where TRCD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19361                                                                  frequency (not data rate).
19362 
19363                                                                  TYP = 10-15 ns
19364 
19365                                                                  0x0 = reserved.
19366                                                                  0x1 = 2 (2 is the smallest value allowed).
19367                                                                  0x2 = 2.
19368                                                                  ...
19369                                                                  0xE = 14.
19370                                                                  0xA-0xF = reserved.
19371 
19372                                                                  In 2T mode, make this register TRCD - 1, not going below 2. */
19373         uint64_t twtr                  : 4;  /**< [ 17: 14](R/W) Indicates TWTR constraints. Set this field as follows:
19374 
19375                                                                  _ RNDUP[TWTR(ns) / TCYC(ns)] - 1
19376 
19377                                                                  where TWTR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19378                                                                  frequency (not data rate).
19379 
19380                                                                  TYP = max(4nCK, 7.5 ns)
19381 
19382                                                                  For DDR4, this CSR field represents tWTR_S.
19383 
19384                                                                  0x0 = reserved.
19385                                                                  0x1 = 2.
19386                                                                  ...
19387                                                                  0x7 = 8.
19388                                                                  0x8-0xF = reserved. */
19389         uint64_t trfc                  : 7;  /**< [ 24: 18](R/W) Indicates TRFC constraints. Set this field as follows:
19390 
19391                                                                  _ RNDUP[TRFC(ns) / (8 * TCYC(ns))]
19392 
19393                                                                  where TRFC is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19394                                                                  frequency (not data rate).
19395 
19396                                                                  TYP = 90-350 ns
19397 
19398                                                                  0x0 = reserved.
19399                                                                  0x1 = 8 TCYC.
19400                                                                  0x2 = 16 TCYC.
19401                                                                  0x3 = 24 TCYC.
19402                                                                  0x4 = 32 TCYC.
19403                                                                  ...
19404                                                                  0x7E = 1008 TCYC.
19405                                                                  0x7F = 1016 TCYC. */
19406         uint64_t trrd                  : 3;  /**< [ 27: 25](R/W) Indicates TRRD constraints. Set this field as follows:
19407 
19408                                                                  _ RNDUP[TRRD(ns) / TCYC(ns)] - 2,
19409 
19410                                                                  where TRRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19411                                                                  frequency (not data rate).
19412 
19413                                                                  TYP = max(4nCK, 10 ns)
19414 
19415                                                                  0x0 = Reserved.
19416                                                                  0x1 = 3 TCYC.
19417                                                                  ...
19418                                                                  0x6 = 8 TCYC.
19419                                                                  0x7 = 9 TCYC.
19420 
19421                                                                  For DDR4, this is the tRRD_S parameter. */
19422         uint64_t txp                   : 3;  /**< [ 30: 28](R/W) Indicates TXP constraints. Set this field as follows:
19423 
19424                                                                  _ RNDUP[TXP(ns) / TCYC(ns)] - 1
19425 
19426                                                                  where TXP is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19427                                                                  frequency (not data rate).
19428 
19429                                                                  TYP=max(3nCK, 7.5 ns) */
19430         uint64_t twlmrd                : 4;  /**< [ 34: 31](R/W) Indicates TWLMRD constraints. Set this field as follows:
19431 
19432                                                                  _ RNDUP[TWLMRD(ns) / (4 * TCYC(ns))]
19433 
19434                                                                  where TWLMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19435                                                                  frequency (not data rate).
19436 
19437                                                                  TYP = max(40nCK) */
19438         uint64_t twldqsen              : 4;  /**< [ 38: 35](R/W) Indicates TWLDQSEN constraints. Set this field as follows:
19439 
19440                                                                  _ RNDUP[TWLDQSEN(ns) / (4 * TCYC(ns))]
19441 
19442                                                                  where TWLDQSEN is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19443                                                                  frequency (not data rate).
19444 
19445                                                                  TYP = max(25nCK) */
19446         uint64_t tfaw                  : 5;  /**< [ 43: 39](R/W) Indicates TFAW constraints. Set this field as follows:
19447 
19448                                                                  _ RNDUP[TFAW(ns) / (4 * TCYC(ns))]
19449 
19450                                                                  where TFAW is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19451                                                                  frequency (not data rate).
19452 
19453                                                                  TYP = 30-40 ns
19454 
19455                                                                  Internal:
19456                                                                  When interfacing with DIMMs that contain 3DS DRAMs, set this field as follows:
19457 
19458                                                                  _ RNDUP[tFAW_SLR(ns) / (4 * TCYC(ns))]
19459 
19460                                                                  where tFAW_SLR is the Four activate window to the same logical rank from the
19461                                                                  JEDEC DDR4 3D Stacked spec. */
19462         uint64_t txpdll                : 5;  /**< [ 48: 44](R/W) Indicates TXPDLL constraints. Set this field as follows:
19463 
19464                                                                  _ RNDUP[TXPDLL(ns) / TCYC(ns)] - 1
19465 
19466                                                                  where TXPDLL is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19467                                                                  frequency (not data rate).
19468 
19469                                                                  TYP=max(10nCK, 24 ns) */
19470         uint64_t trfc_dlr              : 7;  /**< [ 55: 49](R/W) Indicates tRFC_DLR constraints. Set this field as follows:
19471 
19472                                                                  _ RNDUP[tRFC_DLR(ns) / (8 * TCYC(ns))]
19473 
19474                                                                  where tRFC_DLR is from the JEDEC 3D stacked SDRAM spec, and TCYC(ns) is the DDR clock
19475                                                                  frequency (not data rate).
19476 
19477                                                                  TYP = 90-120 ns.
19478 
19479                                                                  0x0 = reserved.
19480                                                                  0x1 = 8 TCYC.
19481                                                                  0x2 = 16 TCYC.
19482                                                                  0x3 = 24 TCYC.
19483                                                                  0x4 = 32 TCYC.
19484                                                                  ...
19485                                                                  0x7E = 1008 TCYC.
19486                                                                  0x7F = 1016 TCYC. */
19487         uint64_t tpdm_full_cycle_ena   : 1;  /**< [ 56: 56](R/W) When set, this field enables the addition of a one cycle delay to the
19488                                                                  write/read latency calculation. This is to compensate the case when
19489                                                                  tPDM delay in the RCD of an RDIMM is greater than one-cycle.
19490                                                                  Only valid in RDIMM  (LMC()_CONTROL[RDIMM_ENA]=1). */
19491         uint64_t trcd_ext              : 1;  /**< [ 57: 57](R/W) A 1-bit extension to the TRCD register. */
19492         uint64_t txp_ext               : 1;  /**< [ 58: 58](R/W) A 1-bit extension to the TXP register.
19493                                                                  above. */
19494         uint64_t tstab                 : 5;  /**< [ 63: 59](R/W) Indicates tSTAB constraints. Set this field as follows:
19495 
19496                                                                  _ RNDUP[tSTAB(ns) / (512 * TCYC(ns))]
19497 
19498                                                                  where tSTAB is from the JEDEC DDR4 RCD spec, and TCYC(ns) is the DDR clock
19499                                                                  frequency (not data rate). */
19500 #endif /* Word 0 - End */
19501     } s;
19502     struct bdk_lmcx_timing_params1_cn88xxp1
19503     {
19504 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
19505         uint64_t reserved_59_63        : 5;
19506         uint64_t txp_ext               : 1;  /**< [ 58: 58](RO) Reserved. */
19507         uint64_t trcd_ext              : 1;  /**< [ 57: 57](RO) Reserved. */
19508         uint64_t tpdm_full_cycle_ena   : 1;  /**< [ 56: 56](R/W) When set, this field enables the addition of a one cycle delay to the
19509                                                                  write/read latency calculation. This is to compensate the case when
19510                                                                  tPDM delay in the RCD of an RDIMM is greater than one-cycle.
19511                                                                  Only valid in RDIMM  (LMC()_CONTROL[RDIMM_ENA]=1). */
19512         uint64_t trfc_dlr              : 7;  /**< [ 55: 49](R/W) Indicates tRFC_DLR constraints. Set this field as follows:
19513 
19514                                                                  _ RNDUP[tRFC_DLR(ns) / (8 * TCYC(ns))]
19515 
19516                                                                  where tRFC_DLR is from the JEDEC 3D stacked SDRAM spec, and TCYC(ns) is the DDR clock
19517                                                                  frequency (not data rate).
19518 
19519                                                                  TYP = 90-120 ns.
19520 
19521                                                                  0x0 = reserved.
19522                                                                  0x1 = 8 TCYC.
19523                                                                  0x2 = 16 TCYC.
19524                                                                  0x3 = 24 TCYC.
19525                                                                  0x4 = 32 TCYC.
19526                                                                  ...
19527                                                                  0x7E = 1008 TCYC.
19528                                                                  0x7F = 1016 TCYC. */
19529         uint64_t txpdll                : 5;  /**< [ 48: 44](R/W) Indicates TXPDLL constraints. Set this field as follows:
19530 
19531                                                                  _ RNDUP[TXPDLL(ns) / TCYC(ns)] - 1
19532 
19533                                                                  where TXPDLL is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19534                                                                  frequency (not data rate).
19535 
19536                                                                  TYP=max(10nCK, 24 ns) */
19537         uint64_t tfaw                  : 5;  /**< [ 43: 39](R/W) Indicates TFAW constraints. Set this field as follows:
19538 
19539                                                                  _ RNDUP[TFAW(ns) / (4 * TCYC(ns))]
19540 
19541                                                                  where TFAW is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19542                                                                  frequency (not data rate).
19543 
19544                                                                  TYP = 30-40 ns
19545 
19546                                                                  Internal:
19547                                                                  When interfacing with DIMMs that contain 3DS DRAMs, set this field as follows:
19548 
19549                                                                  _ RNDUP[tFAW_SLR(ns) / (4 * TCYC(ns))]
19550 
19551                                                                  where tFAW_SLR is the Four activate window to the same logical rank from the
19552                                                                  JEDEC DDR4 3D Stacked spec. */
19553         uint64_t twldqsen              : 4;  /**< [ 38: 35](R/W) Indicates TWLDQSEN constraints. Set this field as follows:
19554 
19555                                                                  _ RNDUP[TWLDQSEN(ns) / (4 * TCYC(ns))]
19556 
19557                                                                  where TWLDQSEN is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19558                                                                  frequency (not data rate).
19559 
19560                                                                  TYP = max(25nCK) */
19561         uint64_t twlmrd                : 4;  /**< [ 34: 31](R/W) Indicates TWLMRD constraints. Set this field as follows:
19562 
19563                                                                  _ RNDUP[TWLMRD(ns) / (4 * TCYC(ns))]
19564 
19565                                                                  where TWLMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19566                                                                  frequency (not data rate).
19567 
19568                                                                  TYP = max(40nCK) */
19569         uint64_t txp                   : 3;  /**< [ 30: 28](R/W) Indicates TXP constraints. Set this field as follows:
19570 
19571                                                                  _ RNDUP[TXP(ns) / TCYC(ns)] - 1
19572 
19573                                                                  where TXP is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19574                                                                  frequency (not data rate).
19575 
19576                                                                  TYP=max(3nCK, 7.5 ns) */
19577         uint64_t trrd                  : 3;  /**< [ 27: 25](R/W) Indicates TRRD constraints. Set this field as follows:
19578 
19579                                                                  _ RNDUP[TRRD(ns) / TCYC(ns)] - 2,
19580 
19581                                                                  where TRRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19582                                                                  frequency (not data rate).
19583 
19584                                                                  TYP = max(4nCK, 10 ns)
19585 
19586                                                                  0x0 = Reserved.
19587                                                                  0x1 = 3 TCYC.
19588                                                                  ...
19589                                                                  0x6 = 8 TCYC.
19590                                                                  0x7 = 9 TCYC.
19591 
19592                                                                  For DDR4, this is the tRRD_S parameter. */
19593         uint64_t trfc                  : 7;  /**< [ 24: 18](R/W) Indicates TRFC constraints. Set this field as follows:
19594 
19595                                                                  _ RNDUP[TRFC(ns) / (8 * TCYC(ns))]
19596 
19597                                                                  where TRFC is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19598                                                                  frequency (not data rate).
19599 
19600                                                                  TYP = 90-350 ns
19601 
19602                                                                  0x0 = reserved.
19603                                                                  0x1 = 8 TCYC.
19604                                                                  0x2 = 16 TCYC.
19605                                                                  0x3 = 24 TCYC.
19606                                                                  0x4 = 32 TCYC.
19607                                                                  ...
19608                                                                  0x7E = 1008 TCYC.
19609                                                                  0x7F = 1016 TCYC. */
19610         uint64_t twtr                  : 4;  /**< [ 17: 14](R/W) Indicates TWTR constraints. Set this field as follows:
19611 
19612                                                                  _ RNDUP[TWTR(ns) / TCYC(ns)] - 1
19613 
19614                                                                  where TWTR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19615                                                                  frequency (not data rate).
19616 
19617                                                                  TYP = max(4nCK, 7.5 ns)
19618 
19619                                                                  For DDR4, this CSR field represents tWTR_S.
19620 
19621                                                                  0x0 = reserved.
19622                                                                  0x1 = 2.
19623                                                                  ...
19624                                                                  0x7 = 8.
19625                                                                  0x8-0xF = reserved. */
19626         uint64_t trcd                  : 4;  /**< [ 13: 10](R/W) Indicates TRCD constraints. Set this field as follows:
19627 
19628                                                                  _ RNDUP[TRCD(ns) / TCYC(ns)]
19629 
19630                                                                  where TRCD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19631                                                                  frequency (not data rate).
19632 
19633                                                                  TYP = 10-15 ns
19634 
19635                                                                  0x0 = reserved.
19636                                                                  0x1 = 2 (2 is the smallest value allowed).
19637                                                                  0x2 = 2.
19638                                                                  ...
19639                                                                  0xE = 14.
19640                                                                  0xA-0xF = reserved.
19641 
19642                                                                  In 2T mode, make this register TRCD - 1, not going below 2. */
19643         uint64_t tras                  : 6;  /**< [  9:  4](R/W) Indicates TRAS constraints. Set TRAS (CSR field) as follows:
19644 
19645                                                                  _ RNDUP[TRAS(ns)/TCYC(ns)] - 1,
19646 
19647                                                                  where TRAS is from the DDR3/DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
19648                                                                  rate).
19649 
19650                                                                  TYP = 35ns - 9 * TREFI
19651 
19652                                                                  0x0 = reserved.
19653                                                                  0x1 = 2 TCYC.
19654                                                                  0x2 = 3 TCYC.
19655                                                                  ...
19656                                                                  0x3F = 64 TCYC. */
19657         uint64_t tmprr                 : 4;  /**< [  3:  0](R/W) Indicates TMPRR constraints. Set this field as follows:
19658 
19659                                                                  _ RNDUP[TMPRR(ns) / TCYC(ns)] - 1
19660 
19661                                                                  where TMPRR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19662                                                                  frequency (not data rate).
19663 
19664                                                                  TYP = 1 nCK */
19665 #else /* Word 0 - Little Endian */
19666         uint64_t tmprr                 : 4;  /**< [  3:  0](R/W) Indicates TMPRR constraints. Set this field as follows:
19667 
19668                                                                  _ RNDUP[TMPRR(ns) / TCYC(ns)] - 1
19669 
19670                                                                  where TMPRR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19671                                                                  frequency (not data rate).
19672 
19673                                                                  TYP = 1 nCK */
19674         uint64_t tras                  : 6;  /**< [  9:  4](R/W) Indicates TRAS constraints. Set TRAS (CSR field) as follows:
19675 
19676                                                                  _ RNDUP[TRAS(ns)/TCYC(ns)] - 1,
19677 
19678                                                                  where TRAS is from the DDR3/DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
19679                                                                  rate).
19680 
19681                                                                  TYP = 35ns - 9 * TREFI
19682 
19683                                                                  0x0 = reserved.
19684                                                                  0x1 = 2 TCYC.
19685                                                                  0x2 = 3 TCYC.
19686                                                                  ...
19687                                                                  0x3F = 64 TCYC. */
19688         uint64_t trcd                  : 4;  /**< [ 13: 10](R/W) Indicates TRCD constraints. Set this field as follows:
19689 
19690                                                                  _ RNDUP[TRCD(ns) / TCYC(ns)]
19691 
19692                                                                  where TRCD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19693                                                                  frequency (not data rate).
19694 
19695                                                                  TYP = 10-15 ns
19696 
19697                                                                  0x0 = reserved.
19698                                                                  0x1 = 2 (2 is the smallest value allowed).
19699                                                                  0x2 = 2.
19700                                                                  ...
19701                                                                  0xE = 14.
19702                                                                  0xA-0xF = reserved.
19703 
19704                                                                  In 2T mode, make this register TRCD - 1, not going below 2. */
19705         uint64_t twtr                  : 4;  /**< [ 17: 14](R/W) Indicates TWTR constraints. Set this field as follows:
19706 
19707                                                                  _ RNDUP[TWTR(ns) / TCYC(ns)] - 1
19708 
19709                                                                  where TWTR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19710                                                                  frequency (not data rate).
19711 
19712                                                                  TYP = max(4nCK, 7.5 ns)
19713 
19714                                                                  For DDR4, this CSR field represents tWTR_S.
19715 
19716                                                                  0x0 = reserved.
19717                                                                  0x1 = 2.
19718                                                                  ...
19719                                                                  0x7 = 8.
19720                                                                  0x8-0xF = reserved. */
19721         uint64_t trfc                  : 7;  /**< [ 24: 18](R/W) Indicates TRFC constraints. Set this field as follows:
19722 
19723                                                                  _ RNDUP[TRFC(ns) / (8 * TCYC(ns))]
19724 
19725                                                                  where TRFC is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19726                                                                  frequency (not data rate).
19727 
19728                                                                  TYP = 90-350 ns
19729 
19730                                                                  0x0 = reserved.
19731                                                                  0x1 = 8 TCYC.
19732                                                                  0x2 = 16 TCYC.
19733                                                                  0x3 = 24 TCYC.
19734                                                                  0x4 = 32 TCYC.
19735                                                                  ...
19736                                                                  0x7E = 1008 TCYC.
19737                                                                  0x7F = 1016 TCYC. */
19738         uint64_t trrd                  : 3;  /**< [ 27: 25](R/W) Indicates TRRD constraints. Set this field as follows:
19739 
19740                                                                  _ RNDUP[TRRD(ns) / TCYC(ns)] - 2,
19741 
19742                                                                  where TRRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19743                                                                  frequency (not data rate).
19744 
19745                                                                  TYP = max(4nCK, 10 ns)
19746 
19747                                                                  0x0 = Reserved.
19748                                                                  0x1 = 3 TCYC.
19749                                                                  ...
19750                                                                  0x6 = 8 TCYC.
19751                                                                  0x7 = 9 TCYC.
19752 
19753                                                                  For DDR4, this is the tRRD_S parameter. */
19754         uint64_t txp                   : 3;  /**< [ 30: 28](R/W) Indicates TXP constraints. Set this field as follows:
19755 
19756                                                                  _ RNDUP[TXP(ns) / TCYC(ns)] - 1
19757 
19758                                                                  where TXP is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19759                                                                  frequency (not data rate).
19760 
19761                                                                  TYP=max(3nCK, 7.5 ns) */
19762         uint64_t twlmrd                : 4;  /**< [ 34: 31](R/W) Indicates TWLMRD constraints. Set this field as follows:
19763 
19764                                                                  _ RNDUP[TWLMRD(ns) / (4 * TCYC(ns))]
19765 
19766                                                                  where TWLMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19767                                                                  frequency (not data rate).
19768 
19769                                                                  TYP = max(40nCK) */
19770         uint64_t twldqsen              : 4;  /**< [ 38: 35](R/W) Indicates TWLDQSEN constraints. Set this field as follows:
19771 
19772                                                                  _ RNDUP[TWLDQSEN(ns) / (4 * TCYC(ns))]
19773 
19774                                                                  where TWLDQSEN is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19775                                                                  frequency (not data rate).
19776 
19777                                                                  TYP = max(25nCK) */
19778         uint64_t tfaw                  : 5;  /**< [ 43: 39](R/W) Indicates TFAW constraints. Set this field as follows:
19779 
19780                                                                  _ RNDUP[TFAW(ns) / (4 * TCYC(ns))]
19781 
19782                                                                  where TFAW is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19783                                                                  frequency (not data rate).
19784 
19785                                                                  TYP = 30-40 ns
19786 
19787                                                                  Internal:
19788                                                                  When interfacing with DIMMs that contain 3DS DRAMs, set this field as follows:
19789 
19790                                                                  _ RNDUP[tFAW_SLR(ns) / (4 * TCYC(ns))]
19791 
19792                                                                  where tFAW_SLR is the Four activate window to the same logical rank from the
19793                                                                  JEDEC DDR4 3D Stacked spec. */
19794         uint64_t txpdll                : 5;  /**< [ 48: 44](R/W) Indicates TXPDLL constraints. Set this field as follows:
19795 
19796                                                                  _ RNDUP[TXPDLL(ns) / TCYC(ns)] - 1
19797 
19798                                                                  where TXPDLL is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
19799                                                                  frequency (not data rate).
19800 
19801                                                                  TYP=max(10nCK, 24 ns) */
19802         uint64_t trfc_dlr              : 7;  /**< [ 55: 49](R/W) Indicates tRFC_DLR constraints. Set this field as follows:
19803 
19804                                                                  _ RNDUP[tRFC_DLR(ns) / (8 * TCYC(ns))]
19805 
19806                                                                  where tRFC_DLR is from the JEDEC 3D stacked SDRAM spec, and TCYC(ns) is the DDR clock
19807                                                                  frequency (not data rate).
19808 
19809                                                                  TYP = 90-120 ns.
19810 
19811                                                                  0x0 = reserved.
19812                                                                  0x1 = 8 TCYC.
19813                                                                  0x2 = 16 TCYC.
19814                                                                  0x3 = 24 TCYC.
19815                                                                  0x4 = 32 TCYC.
19816                                                                  ...
19817                                                                  0x7E = 1008 TCYC.
19818                                                                  0x7F = 1016 TCYC. */
19819         uint64_t tpdm_full_cycle_ena   : 1;  /**< [ 56: 56](R/W) When set, this field enables the addition of a one cycle delay to the
19820                                                                  write/read latency calculation. This is to compensate the case when
19821                                                                  tPDM delay in the RCD of an RDIMM is greater than one-cycle.
19822                                                                  Only valid in RDIMM  (LMC()_CONTROL[RDIMM_ENA]=1). */
19823         uint64_t trcd_ext              : 1;  /**< [ 57: 57](RO) Reserved. */
19824         uint64_t txp_ext               : 1;  /**< [ 58: 58](RO) Reserved. */
19825         uint64_t reserved_59_63        : 5;
19826 #endif /* Word 0 - End */
19827     } cn88xxp1;
19828     struct bdk_lmcx_timing_params1_cn9
19829     {
19830 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
19831         uint64_t tstab                 : 5;  /**< [ 63: 59](R/W) Indicates tSTAB constraints. Set this field as follows:
19832 
19833                                                                  _ RNDUP[tSTAB(ns) / (512 * TCYC(ns))]
19834 
19835                                                                  where tSTAB is from the JEDEC DDR4 RCD spec, and TCYC(ns) is the DDR clock
19836                                                                  frequency (not data rate). */
19837         uint64_t txp_ext               : 1;  /**< [ 58: 58](R/W) A 1-bit extension to the TXP register.
19838                                                                  above. */
19839         uint64_t trcd_ext              : 1;  /**< [ 57: 57](R/W) A 1-bit extension to the TRCD register. */
19840         uint64_t tpdm_full_cycle_ena   : 1;  /**< [ 56: 56](R/W) When set, this field enables the addition of a one cycle delay to the
19841                                                                  write/read latency calculation. This is to compensate the case when
19842                                                                  tPDM delay in the RCD of an RDIMM is greater than one-cycle.
19843                                                                  Only valid in RDIMM  (LMC()_CONTROL[RDIMM_ENA]=1). */
19844         uint64_t trfc_dlr              : 7;  /**< [ 55: 49](R/W) Indicates tRFC_DLR constraints. Set this field as follows:
19845 
19846                                                                  _ RNDUP[tRFC_DLR(ns) / (8 * TCYC(ns))]
19847 
19848                                                                  where tRFC_DLR is from the JEDEC 3D stacked SDRAM spec, and TCYC(ns) is the DDR clock
19849                                                                  frequency (not data rate).
19850 
19851                                                                  TYP = 90-120 ns.
19852 
19853                                                                  0x0 = reserved.
19854                                                                  0x1 = 8 TCYC.
19855                                                                  0x2 = 16 TCYC.
19856                                                                  0x3 = 24 TCYC.
19857                                                                  0x4 = 32 TCYC.
19858                                                                  ...
19859                                                                  0x7E = 1008 TCYC.
19860                                                                  0x7F = 1016 TCYC. */
19861         uint64_t txpdll                : 5;  /**< [ 48: 44](R/W) Indicates TXPDLL constraints. Set this field as follows:
19862 
19863                                                                  _ RNDUP[TXPDLL(ns) / TCYC(ns)] - 1
19864 
19865                                                                  where TXPDLL is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
19866                                                                  frequency (not data rate).
19867 
19868                                                                  TYP=max(10nCK, 24 ns) */
19869         uint64_t tfaw                  : 5;  /**< [ 43: 39](R/W) Indicates TFAW constraints. Set this field as follows:
19870 
19871                                                                  _ RNDUP[TFAW(ns) / (4 * TCYC(ns))]
19872 
19873                                                                  where TFAW is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
19874                                                                  frequency (not data rate).
19875 
19876                                                                  TYP = 30-40 ns
19877 
19878                                                                  Internal:
19879                                                                  When interfacing with DIMMs that contain 3DS DRAMs, set this field as follows:
19880 
19881                                                                  _ RNDUP[tFAW_SLR(ns) / (4 * TCYC(ns))]
19882 
19883                                                                  where tFAW_SLR is the Four activate window to the same logical rank from the
19884                                                                  JEDEC DDR4 3D Stacked spec. */
19885         uint64_t twldqsen              : 4;  /**< [ 38: 35](R/W) Indicates TWLDQSEN constraints. Set this field as follows:
19886 
19887                                                                  _ RNDUP[TWLDQSEN(ns) / (4 * TCYC(ns))]
19888 
19889                                                                  where TWLDQSEN is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
19890                                                                  frequency (not data rate).
19891 
19892                                                                  TYP = max(25nCK) */
19893         uint64_t twlmrd                : 4;  /**< [ 34: 31](R/W) Indicates TWLMRD constraints. Set this field as follows:
19894 
19895                                                                  _ RNDUP[TWLMRD(ns) / (4 * TCYC(ns))]
19896 
19897                                                                  where TWLMRD is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
19898                                                                  frequency (not data rate).
19899 
19900                                                                  TYP = max(40nCK) */
19901         uint64_t txp                   : 3;  /**< [ 30: 28](R/W) Indicates TXP constraints. Set this field as follows:
19902 
19903                                                                  _ RNDUP[TXP(ns) / TCYC(ns)] - 1
19904 
19905                                                                  where TXP is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
19906                                                                  frequency (not data rate).
19907 
19908                                                                  TYP=max(3nCK, 7.5 ns) */
19909         uint64_t trrd                  : 3;  /**< [ 27: 25](R/W) Indicates TRRD constraints. Set this field as follows:
19910 
19911                                                                  _ RNDUP[TRRD(ns) / TCYC(ns)] - 2,
19912 
19913                                                                  where TRRD is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
19914                                                                  frequency (not data rate).
19915 
19916                                                                  TYP = max(4nCK, 10 ns)
19917 
19918                                                                  0x0 = Reserved.
19919                                                                  0x1 = 3 TCYC.
19920                                                                  ...
19921                                                                  0x6 = 8 TCYC.
19922                                                                  0x7 = 9 TCYC.
19923 
19924                                                                  For DDR4, this is the tRRD_S parameter. */
19925         uint64_t trfc                  : 7;  /**< [ 24: 18](R/W) Indicates TRFC constraints. Set this field as follows:
19926 
19927                                                                  _ RNDUP[TRFC(ns) / (8 * TCYC(ns))]
19928 
19929                                                                  where TRFC is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
19930                                                                  frequency (not data rate).
19931 
19932                                                                  TYP = 90-350 ns
19933 
19934                                                                  0x0 = reserved.
19935                                                                  0x1 = 8 TCYC.
19936                                                                  0x2 = 16 TCYC.
19937                                                                  0x3 = 24 TCYC.
19938                                                                  0x4 = 32 TCYC.
19939                                                                  ...
19940                                                                  0x7E = 1008 TCYC.
19941                                                                  0x7F = 1016 TCYC. */
19942         uint64_t twtr                  : 4;  /**< [ 17: 14](R/W) Indicates TWTR constraints. Set this field as follows:
19943 
19944                                                                  _ RNDUP[TWTR(ns) / TCYC(ns)] - 1
19945 
19946                                                                  where TWTR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
19947                                                                  frequency (not data rate).
19948 
19949                                                                  TYP = max(4nCK, 7.5 ns)
19950 
19951                                                                  For DDR4, this CSR field represents tWTR_S.
19952 
19953                                                                  0x0 = reserved.
19954                                                                  0x1 = 2.
19955                                                                  ...
19956                                                                  0x7 = 8.
19957                                                                  0x8-0xF = reserved. */
19958         uint64_t trcd                  : 4;  /**< [ 13: 10](R/W) Indicates TRCD constraints. Set this field as follows:
19959 
19960                                                                  _ RNDUP[TRCD(ns) / TCYC(ns)]
19961 
19962                                                                  where TRCD is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
19963                                                                  frequency (not data rate).
19964 
19965                                                                  TYP = 10-15 ns
19966 
19967                                                                  0x0 = reserved.
19968                                                                  0x1 = 2 (2 is the smallest value allowed).
19969                                                                  0x2 = 2.
19970                                                                  ...
19971                                                                  0xE = 14.
19972                                                                  0xA-0xF = reserved.
19973 
19974                                                                  In 2T mode, make this register TRCD - 1, not going below 2. */
19975         uint64_t tras                  : 6;  /**< [  9:  4](R/W) Indicates TRAS constraints. Set TRAS (CSR field) as follows:
19976 
19977                                                                  _ RNDUP[TRAS(ns)/TCYC(ns)] - 1,
19978 
19979                                                                  where TRAS is from the DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
19980                                                                  rate).
19981 
19982                                                                  TYP = 35ns - 9 * TREFI
19983 
19984                                                                  0x0 = reserved.
19985                                                                  0x1 = 2 TCYC.
19986                                                                  0x2 = 3 TCYC.
19987                                                                  ...
19988                                                                  0x3F = 64 TCYC. */
19989         uint64_t tmprr                 : 4;  /**< [  3:  0](R/W) Indicates TMPRR constraints. Set this field as follows:
19990 
19991                                                                  _ RNDUP[TMPRR(ns) / TCYC(ns)] - 1
19992 
19993                                                                  where TMPRR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
19994                                                                  frequency (not data rate).
19995 
19996                                                                  TYP = 1 nCK */
19997 #else /* Word 0 - Little Endian */
19998         uint64_t tmprr                 : 4;  /**< [  3:  0](R/W) Indicates TMPRR constraints. Set this field as follows:
19999 
20000                                                                  _ RNDUP[TMPRR(ns) / TCYC(ns)] - 1
20001 
20002                                                                  where TMPRR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20003                                                                  frequency (not data rate).
20004 
20005                                                                  TYP = 1 nCK */
20006         uint64_t tras                  : 6;  /**< [  9:  4](R/W) Indicates TRAS constraints. Set TRAS (CSR field) as follows:
20007 
20008                                                                  _ RNDUP[TRAS(ns)/TCYC(ns)] - 1,
20009 
20010                                                                  where TRAS is from the DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
20011                                                                  rate).
20012 
20013                                                                  TYP = 35ns - 9 * TREFI
20014 
20015                                                                  0x0 = reserved.
20016                                                                  0x1 = 2 TCYC.
20017                                                                  0x2 = 3 TCYC.
20018                                                                  ...
20019                                                                  0x3F = 64 TCYC. */
20020         uint64_t trcd                  : 4;  /**< [ 13: 10](R/W) Indicates TRCD constraints. Set this field as follows:
20021 
20022                                                                  _ RNDUP[TRCD(ns) / TCYC(ns)]
20023 
20024                                                                  where TRCD is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20025                                                                  frequency (not data rate).
20026 
20027                                                                  TYP = 10-15 ns
20028 
20029                                                                  0x0 = reserved.
20030                                                                  0x1 = 2 (2 is the smallest value allowed).
20031                                                                  0x2 = 2.
20032                                                                  ...
20033                                                                  0xE = 14.
20034                                                                  0xA-0xF = reserved.
20035 
20036                                                                  In 2T mode, make this register TRCD - 1, not going below 2. */
20037         uint64_t twtr                  : 4;  /**< [ 17: 14](R/W) Indicates TWTR constraints. Set this field as follows:
20038 
20039                                                                  _ RNDUP[TWTR(ns) / TCYC(ns)] - 1
20040 
20041                                                                  where TWTR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20042                                                                  frequency (not data rate).
20043 
20044                                                                  TYP = max(4nCK, 7.5 ns)
20045 
20046                                                                  For DDR4, this CSR field represents tWTR_S.
20047 
20048                                                                  0x0 = reserved.
20049                                                                  0x1 = 2.
20050                                                                  ...
20051                                                                  0x7 = 8.
20052                                                                  0x8-0xF = reserved. */
20053         uint64_t trfc                  : 7;  /**< [ 24: 18](R/W) Indicates TRFC constraints. Set this field as follows:
20054 
20055                                                                  _ RNDUP[TRFC(ns) / (8 * TCYC(ns))]
20056 
20057                                                                  where TRFC is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20058                                                                  frequency (not data rate).
20059 
20060                                                                  TYP = 90-350 ns
20061 
20062                                                                  0x0 = reserved.
20063                                                                  0x1 = 8 TCYC.
20064                                                                  0x2 = 16 TCYC.
20065                                                                  0x3 = 24 TCYC.
20066                                                                  0x4 = 32 TCYC.
20067                                                                  ...
20068                                                                  0x7E = 1008 TCYC.
20069                                                                  0x7F = 1016 TCYC. */
20070         uint64_t trrd                  : 3;  /**< [ 27: 25](R/W) Indicates TRRD constraints. Set this field as follows:
20071 
20072                                                                  _ RNDUP[TRRD(ns) / TCYC(ns)] - 2,
20073 
20074                                                                  where TRRD is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20075                                                                  frequency (not data rate).
20076 
20077                                                                  TYP = max(4nCK, 10 ns)
20078 
20079                                                                  0x0 = Reserved.
20080                                                                  0x1 = 3 TCYC.
20081                                                                  ...
20082                                                                  0x6 = 8 TCYC.
20083                                                                  0x7 = 9 TCYC.
20084 
20085                                                                  For DDR4, this is the tRRD_S parameter. */
20086         uint64_t txp                   : 3;  /**< [ 30: 28](R/W) Indicates TXP constraints. Set this field as follows:
20087 
20088                                                                  _ RNDUP[TXP(ns) / TCYC(ns)] - 1
20089 
20090                                                                  where TXP is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20091                                                                  frequency (not data rate).
20092 
20093                                                                  TYP=max(3nCK, 7.5 ns) */
20094         uint64_t twlmrd                : 4;  /**< [ 34: 31](R/W) Indicates TWLMRD constraints. Set this field as follows:
20095 
20096                                                                  _ RNDUP[TWLMRD(ns) / (4 * TCYC(ns))]
20097 
20098                                                                  where TWLMRD is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20099                                                                  frequency (not data rate).
20100 
20101                                                                  TYP = max(40nCK) */
20102         uint64_t twldqsen              : 4;  /**< [ 38: 35](R/W) Indicates TWLDQSEN constraints. Set this field as follows:
20103 
20104                                                                  _ RNDUP[TWLDQSEN(ns) / (4 * TCYC(ns))]
20105 
20106                                                                  where TWLDQSEN is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20107                                                                  frequency (not data rate).
20108 
20109                                                                  TYP = max(25nCK) */
20110         uint64_t tfaw                  : 5;  /**< [ 43: 39](R/W) Indicates TFAW constraints. Set this field as follows:
20111 
20112                                                                  _ RNDUP[TFAW(ns) / (4 * TCYC(ns))]
20113 
20114                                                                  where TFAW is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20115                                                                  frequency (not data rate).
20116 
20117                                                                  TYP = 30-40 ns
20118 
20119                                                                  Internal:
20120                                                                  When interfacing with DIMMs that contain 3DS DRAMs, set this field as follows:
20121 
20122                                                                  _ RNDUP[tFAW_SLR(ns) / (4 * TCYC(ns))]
20123 
20124                                                                  where tFAW_SLR is the Four activate window to the same logical rank from the
20125                                                                  JEDEC DDR4 3D Stacked spec. */
20126         uint64_t txpdll                : 5;  /**< [ 48: 44](R/W) Indicates TXPDLL constraints. Set this field as follows:
20127 
20128                                                                  _ RNDUP[TXPDLL(ns) / TCYC(ns)] - 1
20129 
20130                                                                  where TXPDLL is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20131                                                                  frequency (not data rate).
20132 
20133                                                                  TYP=max(10nCK, 24 ns) */
20134         uint64_t trfc_dlr              : 7;  /**< [ 55: 49](R/W) Indicates tRFC_DLR constraints. Set this field as follows:
20135 
20136                                                                  _ RNDUP[tRFC_DLR(ns) / (8 * TCYC(ns))]
20137 
20138                                                                  where tRFC_DLR is from the JEDEC 3D stacked SDRAM spec, and TCYC(ns) is the DDR clock
20139                                                                  frequency (not data rate).
20140 
20141                                                                  TYP = 90-120 ns.
20142 
20143                                                                  0x0 = reserved.
20144                                                                  0x1 = 8 TCYC.
20145                                                                  0x2 = 16 TCYC.
20146                                                                  0x3 = 24 TCYC.
20147                                                                  0x4 = 32 TCYC.
20148                                                                  ...
20149                                                                  0x7E = 1008 TCYC.
20150                                                                  0x7F = 1016 TCYC. */
20151         uint64_t tpdm_full_cycle_ena   : 1;  /**< [ 56: 56](R/W) When set, this field enables the addition of a one cycle delay to the
20152                                                                  write/read latency calculation. This is to compensate the case when
20153                                                                  tPDM delay in the RCD of an RDIMM is greater than one-cycle.
20154                                                                  Only valid in RDIMM  (LMC()_CONTROL[RDIMM_ENA]=1). */
20155         uint64_t trcd_ext              : 1;  /**< [ 57: 57](R/W) A 1-bit extension to the TRCD register. */
20156         uint64_t txp_ext               : 1;  /**< [ 58: 58](R/W) A 1-bit extension to the TXP register.
20157                                                                  above. */
20158         uint64_t tstab                 : 5;  /**< [ 63: 59](R/W) Indicates tSTAB constraints. Set this field as follows:
20159 
20160                                                                  _ RNDUP[tSTAB(ns) / (512 * TCYC(ns))]
20161 
20162                                                                  where tSTAB is from the JEDEC DDR4 RCD spec, and TCYC(ns) is the DDR clock
20163                                                                  frequency (not data rate). */
20164 #endif /* Word 0 - End */
20165     } cn9;
20166     struct bdk_lmcx_timing_params1_cn81xx
20167     {
20168 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
20169         uint64_t reserved_59_63        : 5;
20170         uint64_t txp_ext               : 1;  /**< [ 58: 58](R/W) A 1-bit extension to the TXP register.
20171                                                                  above. */
20172         uint64_t trcd_ext              : 1;  /**< [ 57: 57](R/W) A 1-bit extension to the TRCD register. */
20173         uint64_t tpdm_full_cycle_ena   : 1;  /**< [ 56: 56](R/W) When set, this field enables the addition of a one cycle delay to the
20174                                                                  write/read latency calculation. This is to compensate the case when
20175                                                                  tPDM delay in the RCD of an RDIMM is greater than one-cycle.
20176                                                                  Only valid in RDIMM  (LMC()_CONTROL[RDIMM_ENA]=1). */
20177         uint64_t trfc_dlr              : 7;  /**< [ 55: 49](R/W) Indicates tRFC_DLR constraints. Set this field as follows:
20178 
20179                                                                  _ RNDUP[tRFC_DLR(ns) / (8 * TCYC(ns))]
20180 
20181                                                                  where tRFC_DLR is from the JEDEC 3D stacked SDRAM spec, and TCYC(ns) is the DDR clock
20182                                                                  frequency (not data rate).
20183 
20184                                                                  TYP = 90-120 ns.
20185 
20186                                                                  0x0 = reserved.
20187                                                                  0x1 = 8 TCYC.
20188                                                                  0x2 = 16 TCYC.
20189                                                                  0x3 = 24 TCYC.
20190                                                                  0x4 = 32 TCYC.
20191                                                                  ...
20192                                                                  0x7E = 1008 TCYC.
20193                                                                  0x7F = 1016 TCYC. */
20194         uint64_t txpdll                : 5;  /**< [ 48: 44](R/W) Indicates TXPDLL constraints. Set this field as follows:
20195 
20196                                                                  _ RNDUP[TXPDLL(ns) / TCYC(ns)] - 1
20197 
20198                                                                  where TXPDLL is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
20199                                                                  frequency (not data rate).
20200 
20201                                                                  TYP=max(10nCK, 24 ns) */
20202         uint64_t tfaw                  : 5;  /**< [ 43: 39](R/W) Indicates TFAW constraints. Set this field as follows:
20203 
20204                                                                  _ RNDUP[TFAW(ns) / (4 * TCYC(ns))]
20205 
20206                                                                  where TFAW is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
20207                                                                  frequency (not data rate).
20208 
20209                                                                  TYP = 30-40 ns
20210 
20211                                                                  Internal:
20212                                                                  When interfacing with DIMMs that contain 3DS DRAMs, set this field as follows:
20213 
20214                                                                  _ RNDUP[tFAW_SLR(ns) / (4 * TCYC(ns))]
20215 
20216                                                                  where tFAW_SLR is the Four activate window to the same logical rank from the
20217                                                                  JEDEC DDR4 3D Stacked spec. */
20218         uint64_t twldqsen              : 4;  /**< [ 38: 35](R/W) Indicates TWLDQSEN constraints. Set this field as follows:
20219 
20220                                                                  _ RNDUP[TWLDQSEN(ns) / (4 * TCYC(ns))]
20221 
20222                                                                  where TWLDQSEN is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
20223                                                                  frequency (not data rate).
20224 
20225                                                                  TYP = max(25nCK) */
20226         uint64_t twlmrd                : 4;  /**< [ 34: 31](R/W) Indicates TWLMRD constraints. Set this field as follows:
20227 
20228                                                                  _ RNDUP[TWLMRD(ns) / (4 * TCYC(ns))]
20229 
20230                                                                  where TWLMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
20231                                                                  frequency (not data rate).
20232 
20233                                                                  TYP = max(40nCK) */
20234         uint64_t txp                   : 3;  /**< [ 30: 28](R/W) Indicates TXP constraints. Set this field as follows:
20235 
20236                                                                  _ RNDUP[TXP(ns) / TCYC(ns)] - 1
20237 
20238                                                                  where TXP is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
20239                                                                  frequency (not data rate).
20240 
20241                                                                  TYP=max(3nCK, 7.5 ns) */
20242         uint64_t trrd                  : 3;  /**< [ 27: 25](R/W) Indicates TRRD constraints. Set this field as follows:
20243 
20244                                                                  _ RNDUP[TRRD(ns) / TCYC(ns)] - 2,
20245 
20246                                                                  where TRRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
20247                                                                  frequency (not data rate).
20248 
20249                                                                  TYP = max(4nCK, 10 ns)
20250 
20251                                                                  0x0 = Reserved.
20252                                                                  0x1 = 3 TCYC.
20253                                                                  ...
20254                                                                  0x6 = 8 TCYC.
20255                                                                  0x7 = 9 TCYC.
20256 
20257                                                                  For DDR4, this is the tRRD_S parameter. */
20258         uint64_t trfc                  : 7;  /**< [ 24: 18](R/W) Indicates TRFC constraints. Set this field as follows:
20259 
20260                                                                  _ RNDUP[TRFC(ns) / (8 * TCYC(ns))]
20261 
20262                                                                  where TRFC is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
20263                                                                  frequency (not data rate).
20264 
20265                                                                  TYP = 90-350 ns
20266 
20267                                                                  0x0 = reserved.
20268                                                                  0x1 = 8 TCYC.
20269                                                                  0x2 = 16 TCYC.
20270                                                                  0x3 = 24 TCYC.
20271                                                                  0x4 = 32 TCYC.
20272                                                                  ...
20273                                                                  0x7E = 1008 TCYC.
20274                                                                  0x7F = 1016 TCYC. */
20275         uint64_t twtr                  : 4;  /**< [ 17: 14](R/W) Indicates TWTR constraints. Set this field as follows:
20276 
20277                                                                  _ RNDUP[TWTR(ns) / TCYC(ns)] - 1
20278 
20279                                                                  where TWTR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
20280                                                                  frequency (not data rate).
20281 
20282                                                                  TYP = max(4nCK, 7.5 ns)
20283 
20284                                                                  For DDR4, this CSR field represents tWTR_S.
20285 
20286                                                                  0x0 = reserved.
20287                                                                  0x1 = 2.
20288                                                                  ...
20289                                                                  0x7 = 8.
20290                                                                  0x8-0xF = reserved. */
20291         uint64_t trcd                  : 4;  /**< [ 13: 10](R/W) Indicates TRCD constraints. Set this field as follows:
20292 
20293                                                                  _ RNDUP[TRCD(ns) / TCYC(ns)]
20294 
20295                                                                  where TRCD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
20296                                                                  frequency (not data rate).
20297 
20298                                                                  TYP = 10-15 ns
20299 
20300                                                                  0x0 = reserved.
20301                                                                  0x1 = 2 (2 is the smallest value allowed).
20302                                                                  0x2 = 2.
20303                                                                  ...
20304                                                                  0xE = 14.
20305                                                                  0xA-0xF = reserved.
20306 
20307                                                                  In 2T mode, make this register TRCD - 1, not going below 2. */
20308         uint64_t tras                  : 6;  /**< [  9:  4](R/W) Indicates TRAS constraints. Set TRAS (CSR field) as follows:
20309 
20310                                                                  _ RNDUP[TRAS(ns)/TCYC(ns)] - 1,
20311 
20312                                                                  where TRAS is from the DDR3/DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
20313                                                                  rate).
20314 
20315                                                                  TYP = 35ns - 9 * TREFI
20316 
20317                                                                  0x0 = reserved.
20318                                                                  0x1 = 2 TCYC.
20319                                                                  0x2 = 3 TCYC.
20320                                                                  ...
20321                                                                  0x3F = 64 TCYC. */
20322         uint64_t tmprr                 : 4;  /**< [  3:  0](R/W) Indicates TMPRR constraints. Set this field as follows:
20323 
20324                                                                  _ RNDUP[TMPRR(ns) / TCYC(ns)] - 1
20325 
20326                                                                  where TMPRR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
20327                                                                  frequency (not data rate).
20328 
20329                                                                  TYP = 1 nCK */
20330 #else /* Word 0 - Little Endian */
20331         uint64_t tmprr                 : 4;  /**< [  3:  0](R/W) Indicates TMPRR constraints. Set this field as follows:
20332 
20333                                                                  _ RNDUP[TMPRR(ns) / TCYC(ns)] - 1
20334 
20335                                                                  where TMPRR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
20336                                                                  frequency (not data rate).
20337 
20338                                                                  TYP = 1 nCK */
20339         uint64_t tras                  : 6;  /**< [  9:  4](R/W) Indicates TRAS constraints. Set TRAS (CSR field) as follows:
20340 
20341                                                                  _ RNDUP[TRAS(ns)/TCYC(ns)] - 1,
20342 
20343                                                                  where TRAS is from the DDR3/DDR4 spec, and TCYC(ns) is the DDR clock frequency (not data
20344                                                                  rate).
20345 
20346                                                                  TYP = 35ns - 9 * TREFI
20347 
20348                                                                  0x0 = reserved.
20349                                                                  0x1 = 2 TCYC.
20350                                                                  0x2 = 3 TCYC.
20351                                                                  ...
20352                                                                  0x3F = 64 TCYC. */
20353         uint64_t trcd                  : 4;  /**< [ 13: 10](R/W) Indicates TRCD constraints. Set this field as follows:
20354 
20355                                                                  _ RNDUP[TRCD(ns) / TCYC(ns)]
20356 
20357                                                                  where TRCD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
20358                                                                  frequency (not data rate).
20359 
20360                                                                  TYP = 10-15 ns
20361 
20362                                                                  0x0 = reserved.
20363                                                                  0x1 = 2 (2 is the smallest value allowed).
20364                                                                  0x2 = 2.
20365                                                                  ...
20366                                                                  0xE = 14.
20367                                                                  0xA-0xF = reserved.
20368 
20369                                                                  In 2T mode, make this register TRCD - 1, not going below 2. */
20370         uint64_t twtr                  : 4;  /**< [ 17: 14](R/W) Indicates TWTR constraints. Set this field as follows:
20371 
20372                                                                  _ RNDUP[TWTR(ns) / TCYC(ns)] - 1
20373 
20374                                                                  where TWTR is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
20375                                                                  frequency (not data rate).
20376 
20377                                                                  TYP = max(4nCK, 7.5 ns)
20378 
20379                                                                  For DDR4, this CSR field represents tWTR_S.
20380 
20381                                                                  0x0 = reserved.
20382                                                                  0x1 = 2.
20383                                                                  ...
20384                                                                  0x7 = 8.
20385                                                                  0x8-0xF = reserved. */
20386         uint64_t trfc                  : 7;  /**< [ 24: 18](R/W) Indicates TRFC constraints. Set this field as follows:
20387 
20388                                                                  _ RNDUP[TRFC(ns) / (8 * TCYC(ns))]
20389 
20390                                                                  where TRFC is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
20391                                                                  frequency (not data rate).
20392 
20393                                                                  TYP = 90-350 ns
20394 
20395                                                                  0x0 = reserved.
20396                                                                  0x1 = 8 TCYC.
20397                                                                  0x2 = 16 TCYC.
20398                                                                  0x3 = 24 TCYC.
20399                                                                  0x4 = 32 TCYC.
20400                                                                  ...
20401                                                                  0x7E = 1008 TCYC.
20402                                                                  0x7F = 1016 TCYC. */
20403         uint64_t trrd                  : 3;  /**< [ 27: 25](R/W) Indicates TRRD constraints. Set this field as follows:
20404 
20405                                                                  _ RNDUP[TRRD(ns) / TCYC(ns)] - 2,
20406 
20407                                                                  where TRRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
20408                                                                  frequency (not data rate).
20409 
20410                                                                  TYP = max(4nCK, 10 ns)
20411 
20412                                                                  0x0 = Reserved.
20413                                                                  0x1 = 3 TCYC.
20414                                                                  ...
20415                                                                  0x6 = 8 TCYC.
20416                                                                  0x7 = 9 TCYC.
20417 
20418                                                                  For DDR4, this is the tRRD_S parameter. */
20419         uint64_t txp                   : 3;  /**< [ 30: 28](R/W) Indicates TXP constraints. Set this field as follows:
20420 
20421                                                                  _ RNDUP[TXP(ns) / TCYC(ns)] - 1
20422 
20423                                                                  where TXP is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
20424                                                                  frequency (not data rate).
20425 
20426                                                                  TYP=max(3nCK, 7.5 ns) */
20427         uint64_t twlmrd                : 4;  /**< [ 34: 31](R/W) Indicates TWLMRD constraints. Set this field as follows:
20428 
20429                                                                  _ RNDUP[TWLMRD(ns) / (4 * TCYC(ns))]
20430 
20431                                                                  where TWLMRD is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
20432                                                                  frequency (not data rate).
20433 
20434                                                                  TYP = max(40nCK) */
20435         uint64_t twldqsen              : 4;  /**< [ 38: 35](R/W) Indicates TWLDQSEN constraints. Set this field as follows:
20436 
20437                                                                  _ RNDUP[TWLDQSEN(ns) / (4 * TCYC(ns))]
20438 
20439                                                                  where TWLDQSEN is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
20440                                                                  frequency (not data rate).
20441 
20442                                                                  TYP = max(25nCK) */
20443         uint64_t tfaw                  : 5;  /**< [ 43: 39](R/W) Indicates TFAW constraints. Set this field as follows:
20444 
20445                                                                  _ RNDUP[TFAW(ns) / (4 * TCYC(ns))]
20446 
20447                                                                  where TFAW is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
20448                                                                  frequency (not data rate).
20449 
20450                                                                  TYP = 30-40 ns
20451 
20452                                                                  Internal:
20453                                                                  When interfacing with DIMMs that contain 3DS DRAMs, set this field as follows:
20454 
20455                                                                  _ RNDUP[tFAW_SLR(ns) / (4 * TCYC(ns))]
20456 
20457                                                                  where tFAW_SLR is the Four activate window to the same logical rank from the
20458                                                                  JEDEC DDR4 3D Stacked spec. */
20459         uint64_t txpdll                : 5;  /**< [ 48: 44](R/W) Indicates TXPDLL constraints. Set this field as follows:
20460 
20461                                                                  _ RNDUP[TXPDLL(ns) / TCYC(ns)] - 1
20462 
20463                                                                  where TXPDLL is from the JEDEC DDR3/DDR4 spec, and TCYC(ns) is the DDR clock
20464                                                                  frequency (not data rate).
20465 
20466                                                                  TYP=max(10nCK, 24 ns) */
20467         uint64_t trfc_dlr              : 7;  /**< [ 55: 49](R/W) Indicates tRFC_DLR constraints. Set this field as follows:
20468 
20469                                                                  _ RNDUP[tRFC_DLR(ns) / (8 * TCYC(ns))]
20470 
20471                                                                  where tRFC_DLR is from the JEDEC 3D stacked SDRAM spec, and TCYC(ns) is the DDR clock
20472                                                                  frequency (not data rate).
20473 
20474                                                                  TYP = 90-120 ns.
20475 
20476                                                                  0x0 = reserved.
20477                                                                  0x1 = 8 TCYC.
20478                                                                  0x2 = 16 TCYC.
20479                                                                  0x3 = 24 TCYC.
20480                                                                  0x4 = 32 TCYC.
20481                                                                  ...
20482                                                                  0x7E = 1008 TCYC.
20483                                                                  0x7F = 1016 TCYC. */
20484         uint64_t tpdm_full_cycle_ena   : 1;  /**< [ 56: 56](R/W) When set, this field enables the addition of a one cycle delay to the
20485                                                                  write/read latency calculation. This is to compensate the case when
20486                                                                  tPDM delay in the RCD of an RDIMM is greater than one-cycle.
20487                                                                  Only valid in RDIMM  (LMC()_CONTROL[RDIMM_ENA]=1). */
20488         uint64_t trcd_ext              : 1;  /**< [ 57: 57](R/W) A 1-bit extension to the TRCD register. */
20489         uint64_t txp_ext               : 1;  /**< [ 58: 58](R/W) A 1-bit extension to the TXP register.
20490                                                                  above. */
20491         uint64_t reserved_59_63        : 5;
20492 #endif /* Word 0 - End */
20493     } cn81xx;
20494     /* struct bdk_lmcx_timing_params1_cn81xx cn83xx; */
20495     /* struct bdk_lmcx_timing_params1_cn81xx cn88xxp2; */
20496 };
20497 typedef union bdk_lmcx_timing_params1 bdk_lmcx_timing_params1_t;
20498 
20499 static inline uint64_t BDK_LMCX_TIMING_PARAMS1(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_TIMING_PARAMS1(unsigned long a)20500 static inline uint64_t BDK_LMCX_TIMING_PARAMS1(unsigned long a)
20501 {
20502     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
20503         return 0x87e0880001a0ll + 0x1000000ll * ((a) & 0x0);
20504     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
20505         return 0x87e0880001a0ll + 0x1000000ll * ((a) & 0x1);
20506     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
20507         return 0x87e0880001a0ll + 0x1000000ll * ((a) & 0x3);
20508     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
20509         return 0x87e0880001a0ll + 0x1000000ll * ((a) & 0x3);
20510     __bdk_csr_fatal("LMCX_TIMING_PARAMS1", 1, a, 0, 0, 0);
20511 }
20512 
20513 #define typedef_BDK_LMCX_TIMING_PARAMS1(a) bdk_lmcx_timing_params1_t
20514 #define bustype_BDK_LMCX_TIMING_PARAMS1(a) BDK_CSR_TYPE_RSL
20515 #define basename_BDK_LMCX_TIMING_PARAMS1(a) "LMCX_TIMING_PARAMS1"
20516 #define device_bar_BDK_LMCX_TIMING_PARAMS1(a) 0x0 /* PF_BAR0 */
20517 #define busnum_BDK_LMCX_TIMING_PARAMS1(a) (a)
20518 #define arguments_BDK_LMCX_TIMING_PARAMS1(a) (a),-1,-1,-1
20519 
20520 /**
20521  * Register (RSL) lmc#_timing_params2
20522  *
20523  * LMC Timing Parameters Register 2
20524  * This register sets timing parameters for DDR4.
20525  */
20526 union bdk_lmcx_timing_params2
20527 {
20528     uint64_t u;
20529     struct bdk_lmcx_timing_params2_s
20530     {
20531 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
20532         uint64_t reserved_35_63        : 29;
20533         uint64_t tcmd_gear             : 6;  /**< [ 34: 29](R/W) Indicates tCMD_GEAR constraint. Set this field as follows:
20534 
20535                                                                  _ RNDUP[tCMD_GEAR(ns) / (2 * TCYC(ns))]
20536 
20537                                                                  where tCMD_GEAR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20538                                                                  frequency (not data rate). */
20539         uint64_t tsync_gear            : 6;  /**< [ 28: 23](R/W) Indicates tSYNC_GEAR constraint. Set this field as follows:
20540 
20541                                                                  _ RNDUP[tSYNC_GEAR(ns) / (2 * TCYC(ns))]
20542 
20543                                                                  where tSYNC_GEAR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20544                                                                  frequency (not data rate). */
20545         uint64_t txs                   : 7;  /**< [ 22: 16](R/W) Indicates TXS constraints. Set this field as follows:
20546 
20547                                                                  _ RNDUP[tXS(ns) / (8 * TCYC(ns))]
20548 
20549                                                                  where tXS is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20550                                                                  frequency (not data rate). */
20551         uint64_t trrd_l_ext            : 1;  /**< [ 15: 15](R/W) MSB of tWTR_L constraints. Set this field
20552                                                                  when requiring tRRD_L of more than 9 nCK. Otherwise
20553                                                                  this bit must be zero. */
20554         uint64_t trtp                  : 4;  /**< [ 14: 11](R/W) Specifies the TRTP parameter, in cycles. Set this field as follows:
20555                                                                  _ RNDUP[TRTP(ns) / TCYC(ns)] - 1,
20556 
20557                                                                  For DDR3, typical = max(4 nCK, 7.5ns).
20558 
20559                                                                  For DDR4 the TRTP parameter is dictated by the TWR MR bits. */
20560         uint64_t t_rw_op_max           : 4;  /**< [ 10:  7](R/W) Specifies the maximum delay for a read or write operation to complete, used to set the
20561                                                                  timing of MRW and MPR operations. Set this field as follows:
20562 
20563                                                                  _ RNDUP[Maximum operation delay (cycles) / 8]
20564 
20565                                                                  Typical = 0x7. */
20566         uint64_t twtr_l                : 4;  /**< [  6:  3](R/W) Specifies tWTR_L constraints. Set this field as follows:
20567 
20568                                                                  _ RNDUP[tWTR_L(ns) / TCYC(ns)] - 1
20569 
20570                                                                  where tWTR_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
20571                                                                  data rate).
20572 
20573                                                                  Typical = MAX(4 nCK, 7.5 ns)
20574 
20575                                                                  Internal:
20576                                                                  Seems the '- 1' is because we add one back into slot timing equation */
20577         uint64_t trrd_l                : 3;  /**< [  2:  0](R/W) Specifies tRRD_L constraints. Set this field as follows:
20578 
20579                                                                  _ RNDUP[tRRD_L(ns) / TCYC(ns)] - 2,
20580 
20581                                                                  where tRRD_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
20582                                                                  data rate).
20583 
20584                                                                  Typical = MAX(4 nCK, 7.5 ns).
20585                                                                  0x0 = reserved.
20586                                                                  0x1 = three TCYC.
20587                                                                  0x2 = four TCYC.
20588                                                                  0x3 = five TCYC.
20589                                                                  0x4 = six TCYC.
20590                                                                  0x5 = seven TCYC.
20591                                                                  0x6 = eight TCYC.
20592                                                                  0x7 = nine TCYC. */
20593 #else /* Word 0 - Little Endian */
20594         uint64_t trrd_l                : 3;  /**< [  2:  0](R/W) Specifies tRRD_L constraints. Set this field as follows:
20595 
20596                                                                  _ RNDUP[tRRD_L(ns) / TCYC(ns)] - 2,
20597 
20598                                                                  where tRRD_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
20599                                                                  data rate).
20600 
20601                                                                  Typical = MAX(4 nCK, 7.5 ns).
20602                                                                  0x0 = reserved.
20603                                                                  0x1 = three TCYC.
20604                                                                  0x2 = four TCYC.
20605                                                                  0x3 = five TCYC.
20606                                                                  0x4 = six TCYC.
20607                                                                  0x5 = seven TCYC.
20608                                                                  0x6 = eight TCYC.
20609                                                                  0x7 = nine TCYC. */
20610         uint64_t twtr_l                : 4;  /**< [  6:  3](R/W) Specifies tWTR_L constraints. Set this field as follows:
20611 
20612                                                                  _ RNDUP[tWTR_L(ns) / TCYC(ns)] - 1
20613 
20614                                                                  where tWTR_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
20615                                                                  data rate).
20616 
20617                                                                  Typical = MAX(4 nCK, 7.5 ns)
20618 
20619                                                                  Internal:
20620                                                                  Seems the '- 1' is because we add one back into slot timing equation */
20621         uint64_t t_rw_op_max           : 4;  /**< [ 10:  7](R/W) Specifies the maximum delay for a read or write operation to complete, used to set the
20622                                                                  timing of MRW and MPR operations. Set this field as follows:
20623 
20624                                                                  _ RNDUP[Maximum operation delay (cycles) / 8]
20625 
20626                                                                  Typical = 0x7. */
20627         uint64_t trtp                  : 4;  /**< [ 14: 11](R/W) Specifies the TRTP parameter, in cycles. Set this field as follows:
20628                                                                  _ RNDUP[TRTP(ns) / TCYC(ns)] - 1,
20629 
20630                                                                  For DDR3, typical = max(4 nCK, 7.5ns).
20631 
20632                                                                  For DDR4 the TRTP parameter is dictated by the TWR MR bits. */
20633         uint64_t trrd_l_ext            : 1;  /**< [ 15: 15](R/W) MSB of tWTR_L constraints. Set this field
20634                                                                  when requiring tRRD_L of more than 9 nCK. Otherwise
20635                                                                  this bit must be zero. */
20636         uint64_t txs                   : 7;  /**< [ 22: 16](R/W) Indicates TXS constraints. Set this field as follows:
20637 
20638                                                                  _ RNDUP[tXS(ns) / (8 * TCYC(ns))]
20639 
20640                                                                  where tXS is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20641                                                                  frequency (not data rate). */
20642         uint64_t tsync_gear            : 6;  /**< [ 28: 23](R/W) Indicates tSYNC_GEAR constraint. Set this field as follows:
20643 
20644                                                                  _ RNDUP[tSYNC_GEAR(ns) / (2 * TCYC(ns))]
20645 
20646                                                                  where tSYNC_GEAR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20647                                                                  frequency (not data rate). */
20648         uint64_t tcmd_gear             : 6;  /**< [ 34: 29](R/W) Indicates tCMD_GEAR constraint. Set this field as follows:
20649 
20650                                                                  _ RNDUP[tCMD_GEAR(ns) / (2 * TCYC(ns))]
20651 
20652                                                                  where tCMD_GEAR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20653                                                                  frequency (not data rate). */
20654         uint64_t reserved_35_63        : 29;
20655 #endif /* Word 0 - End */
20656     } s;
20657     struct bdk_lmcx_timing_params2_cn8
20658     {
20659 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
20660         uint64_t reserved_16_63        : 48;
20661         uint64_t trrd_l_ext            : 1;  /**< [ 15: 15](R/W) MSB of tWTR_L constraints. Set this field
20662                                                                  when requiring tRRD_L of more than 9 nCK. Otherwise
20663                                                                  this bit must be zero. */
20664         uint64_t trtp                  : 4;  /**< [ 14: 11](R/W) Specifies the TRTP parameter, in cycles. Set this field as follows:
20665                                                                  _ RNDUP[TRTP(ns) / TCYC(ns)] - 1,
20666 
20667                                                                  For DDR3, typical = max(4 nCK, 7.5ns).
20668 
20669                                                                  For DDR4 the TRTP parameter is dictated by the TWR MR bits. */
20670         uint64_t t_rw_op_max           : 4;  /**< [ 10:  7](R/W) Specifies the maximum delay for a read or write operation to complete, used to set the
20671                                                                  timing of MRW and MPR operations. Set this field as follows:
20672 
20673                                                                  _ RNDUP[Maximum operation delay (cycles) / 8]
20674 
20675                                                                  Typical = 0x7. */
20676         uint64_t twtr_l                : 4;  /**< [  6:  3](R/W) Specifies tWTR_L constraints. Set this field as follows:
20677 
20678                                                                  _ RNDUP[tWTR_L(ns) / TCYC(ns)] - 1
20679 
20680                                                                  where tWTR_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
20681                                                                  data rate).
20682 
20683                                                                  Typical = MAX(4 nCK, 7.5 ns)
20684 
20685                                                                  Internal:
20686                                                                  Seems the '- 1' is because we add one back into slot timing equation */
20687         uint64_t trrd_l                : 3;  /**< [  2:  0](R/W) Specifies tRRD_L constraints. Set this field as follows:
20688 
20689                                                                  _ RNDUP[tRRD_L(ns) / TCYC(ns)] - 2,
20690 
20691                                                                  where tRRD_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
20692                                                                  data rate).
20693 
20694                                                                  Typical = MAX(4 nCK, 7.5 ns).
20695                                                                  0x0 = reserved.
20696                                                                  0x1 = three TCYC.
20697                                                                  0x2 = four TCYC.
20698                                                                  0x3 = five TCYC.
20699                                                                  0x4 = six TCYC.
20700                                                                  0x5 = seven TCYC.
20701                                                                  0x6 = eight TCYC.
20702                                                                  0x7 = nine TCYC. */
20703 #else /* Word 0 - Little Endian */
20704         uint64_t trrd_l                : 3;  /**< [  2:  0](R/W) Specifies tRRD_L constraints. Set this field as follows:
20705 
20706                                                                  _ RNDUP[tRRD_L(ns) / TCYC(ns)] - 2,
20707 
20708                                                                  where tRRD_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
20709                                                                  data rate).
20710 
20711                                                                  Typical = MAX(4 nCK, 7.5 ns).
20712                                                                  0x0 = reserved.
20713                                                                  0x1 = three TCYC.
20714                                                                  0x2 = four TCYC.
20715                                                                  0x3 = five TCYC.
20716                                                                  0x4 = six TCYC.
20717                                                                  0x5 = seven TCYC.
20718                                                                  0x6 = eight TCYC.
20719                                                                  0x7 = nine TCYC. */
20720         uint64_t twtr_l                : 4;  /**< [  6:  3](R/W) Specifies tWTR_L constraints. Set this field as follows:
20721 
20722                                                                  _ RNDUP[tWTR_L(ns) / TCYC(ns)] - 1
20723 
20724                                                                  where tWTR_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
20725                                                                  data rate).
20726 
20727                                                                  Typical = MAX(4 nCK, 7.5 ns)
20728 
20729                                                                  Internal:
20730                                                                  Seems the '- 1' is because we add one back into slot timing equation */
20731         uint64_t t_rw_op_max           : 4;  /**< [ 10:  7](R/W) Specifies the maximum delay for a read or write operation to complete, used to set the
20732                                                                  timing of MRW and MPR operations. Set this field as follows:
20733 
20734                                                                  _ RNDUP[Maximum operation delay (cycles) / 8]
20735 
20736                                                                  Typical = 0x7. */
20737         uint64_t trtp                  : 4;  /**< [ 14: 11](R/W) Specifies the TRTP parameter, in cycles. Set this field as follows:
20738                                                                  _ RNDUP[TRTP(ns) / TCYC(ns)] - 1,
20739 
20740                                                                  For DDR3, typical = max(4 nCK, 7.5ns).
20741 
20742                                                                  For DDR4 the TRTP parameter is dictated by the TWR MR bits. */
20743         uint64_t trrd_l_ext            : 1;  /**< [ 15: 15](R/W) MSB of tWTR_L constraints. Set this field
20744                                                                  when requiring tRRD_L of more than 9 nCK. Otherwise
20745                                                                  this bit must be zero. */
20746         uint64_t reserved_16_63        : 48;
20747 #endif /* Word 0 - End */
20748     } cn8;
20749     struct bdk_lmcx_timing_params2_cn9
20750     {
20751 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
20752         uint64_t reserved_35_63        : 29;
20753         uint64_t tcmd_gear             : 6;  /**< [ 34: 29](R/W) Indicates tCMD_GEAR constraint. Set this field as follows:
20754 
20755                                                                  _ RNDUP[tCMD_GEAR(ns) / (2 * TCYC(ns))]
20756 
20757                                                                  where tCMD_GEAR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20758                                                                  frequency (not data rate). */
20759         uint64_t tsync_gear            : 6;  /**< [ 28: 23](R/W) Indicates tSYNC_GEAR constraint. Set this field as follows:
20760 
20761                                                                  _ RNDUP[tSYNC_GEAR(ns) / (2 * TCYC(ns))]
20762 
20763                                                                  where tSYNC_GEAR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20764                                                                  frequency (not data rate). */
20765         uint64_t txs                   : 7;  /**< [ 22: 16](R/W) Indicates TXS constraints. Set this field as follows:
20766 
20767                                                                  _ RNDUP[tXS(ns) / (8 * TCYC(ns))]
20768 
20769                                                                  where tXS is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20770                                                                  frequency (not data rate). */
20771         uint64_t trrd_l_ext            : 1;  /**< [ 15: 15](R/W) MSB of tWTR_L constraints. Set this field
20772                                                                  when requiring tRRD_L of more than 9 nCK. Otherwise
20773                                                                  this bit must be zero. */
20774         uint64_t trtp                  : 4;  /**< [ 14: 11](R/W) Specifies the TRTP parameter, in cycles. Set this field as follows:
20775                                                                  _ RNDUP[TRTP(ns) / TCYC(ns)] - 1,
20776 
20777                                                                  The TRTP parameter is dictated by the WR and RTP MR0 bits. */
20778         uint64_t t_rw_op_max           : 4;  /**< [ 10:  7](R/W) Specifies the maximum delay for a read or write operation to complete, used to set the
20779                                                                  timing of MRW and MPR operations. Set this field as follows:
20780 
20781                                                                  _ RNDUP[Maximum operation delay (cycles) / 8]
20782 
20783                                                                  Typical = 0x7. */
20784         uint64_t twtr_l                : 4;  /**< [  6:  3](R/W) Specifies tWTR_L constraints. Set this field as follows:
20785 
20786                                                                  _ RNDUP[tWTR_L(ns) / TCYC(ns)] - 1
20787 
20788                                                                  where tWTR_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
20789                                                                  data rate).
20790 
20791                                                                  Typical = MAX(4 nCK, 7.5 ns)
20792 
20793                                                                  Internal:
20794                                                                  Seems the '- 1' is because we add one back into slot timing equation */
20795         uint64_t trrd_l                : 3;  /**< [  2:  0](R/W) Specifies tRRD_L constraints. Set this field as follows:
20796 
20797                                                                  _ RNDUP[tRRD_L(ns) / TCYC(ns)] - 2,
20798 
20799                                                                  where tRRD_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
20800                                                                  data rate).
20801 
20802                                                                  Typical = MAX(4 nCK, 7.5 ns).
20803                                                                  0x0 = reserved.
20804                                                                  0x1 = three TCYC.
20805                                                                  0x2 = four TCYC.
20806                                                                  0x3 = five TCYC.
20807                                                                  0x4 = six TCYC.
20808                                                                  0x5 = seven TCYC.
20809                                                                  0x6 = eight TCYC.
20810                                                                  0x7 = nine TCYC. */
20811 #else /* Word 0 - Little Endian */
20812         uint64_t trrd_l                : 3;  /**< [  2:  0](R/W) Specifies tRRD_L constraints. Set this field as follows:
20813 
20814                                                                  _ RNDUP[tRRD_L(ns) / TCYC(ns)] - 2,
20815 
20816                                                                  where tRRD_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
20817                                                                  data rate).
20818 
20819                                                                  Typical = MAX(4 nCK, 7.5 ns).
20820                                                                  0x0 = reserved.
20821                                                                  0x1 = three TCYC.
20822                                                                  0x2 = four TCYC.
20823                                                                  0x3 = five TCYC.
20824                                                                  0x4 = six TCYC.
20825                                                                  0x5 = seven TCYC.
20826                                                                  0x6 = eight TCYC.
20827                                                                  0x7 = nine TCYC. */
20828         uint64_t twtr_l                : 4;  /**< [  6:  3](R/W) Specifies tWTR_L constraints. Set this field as follows:
20829 
20830                                                                  _ RNDUP[tWTR_L(ns) / TCYC(ns)] - 1
20831 
20832                                                                  where tWTR_L is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock frequency (not the
20833                                                                  data rate).
20834 
20835                                                                  Typical = MAX(4 nCK, 7.5 ns)
20836 
20837                                                                  Internal:
20838                                                                  Seems the '- 1' is because we add one back into slot timing equation */
20839         uint64_t t_rw_op_max           : 4;  /**< [ 10:  7](R/W) Specifies the maximum delay for a read or write operation to complete, used to set the
20840                                                                  timing of MRW and MPR operations. Set this field as follows:
20841 
20842                                                                  _ RNDUP[Maximum operation delay (cycles) / 8]
20843 
20844                                                                  Typical = 0x7. */
20845         uint64_t trtp                  : 4;  /**< [ 14: 11](R/W) Specifies the TRTP parameter, in cycles. Set this field as follows:
20846                                                                  _ RNDUP[TRTP(ns) / TCYC(ns)] - 1,
20847 
20848                                                                  The TRTP parameter is dictated by the WR and RTP MR0 bits. */
20849         uint64_t trrd_l_ext            : 1;  /**< [ 15: 15](R/W) MSB of tWTR_L constraints. Set this field
20850                                                                  when requiring tRRD_L of more than 9 nCK. Otherwise
20851                                                                  this bit must be zero. */
20852         uint64_t txs                   : 7;  /**< [ 22: 16](R/W) Indicates TXS constraints. Set this field as follows:
20853 
20854                                                                  _ RNDUP[tXS(ns) / (8 * TCYC(ns))]
20855 
20856                                                                  where tXS is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20857                                                                  frequency (not data rate). */
20858         uint64_t tsync_gear            : 6;  /**< [ 28: 23](R/W) Indicates tSYNC_GEAR constraint. Set this field as follows:
20859 
20860                                                                  _ RNDUP[tSYNC_GEAR(ns) / (2 * TCYC(ns))]
20861 
20862                                                                  where tSYNC_GEAR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20863                                                                  frequency (not data rate). */
20864         uint64_t tcmd_gear             : 6;  /**< [ 34: 29](R/W) Indicates tCMD_GEAR constraint. Set this field as follows:
20865 
20866                                                                  _ RNDUP[tCMD_GEAR(ns) / (2 * TCYC(ns))]
20867 
20868                                                                  where tCMD_GEAR is from the JEDEC DDR4 spec, and TCYC(ns) is the DDR clock
20869                                                                  frequency (not data rate). */
20870         uint64_t reserved_35_63        : 29;
20871 #endif /* Word 0 - End */
20872     } cn9;
20873 };
20874 typedef union bdk_lmcx_timing_params2 bdk_lmcx_timing_params2_t;
20875 
20876 static inline uint64_t BDK_LMCX_TIMING_PARAMS2(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_TIMING_PARAMS2(unsigned long a)20877 static inline uint64_t BDK_LMCX_TIMING_PARAMS2(unsigned long a)
20878 {
20879     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
20880         return 0x87e088000060ll + 0x1000000ll * ((a) & 0x0);
20881     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
20882         return 0x87e088000060ll + 0x1000000ll * ((a) & 0x1);
20883     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
20884         return 0x87e088000060ll + 0x1000000ll * ((a) & 0x3);
20885     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
20886         return 0x87e088000060ll + 0x1000000ll * ((a) & 0x3);
20887     __bdk_csr_fatal("LMCX_TIMING_PARAMS2", 1, a, 0, 0, 0);
20888 }
20889 
20890 #define typedef_BDK_LMCX_TIMING_PARAMS2(a) bdk_lmcx_timing_params2_t
20891 #define bustype_BDK_LMCX_TIMING_PARAMS2(a) BDK_CSR_TYPE_RSL
20892 #define basename_BDK_LMCX_TIMING_PARAMS2(a) "LMCX_TIMING_PARAMS2"
20893 #define device_bar_BDK_LMCX_TIMING_PARAMS2(a) 0x0 /* PF_BAR0 */
20894 #define busnum_BDK_LMCX_TIMING_PARAMS2(a) (a)
20895 #define arguments_BDK_LMCX_TIMING_PARAMS2(a) (a),-1,-1,-1
20896 
20897 /**
20898  * Register (RSL) lmc#_wlevel_ctl
20899  *
20900  * LMC Write Level Control Register
20901  */
20902 union bdk_lmcx_wlevel_ctl
20903 {
20904     uint64_t u;
20905     struct bdk_lmcx_wlevel_ctl_s
20906     {
20907 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
20908         uint64_t reserved_22_63        : 42;
20909         uint64_t rtt_nom               : 3;  /**< [ 21: 19](R/W) LMC writes a decoded value to MR1[Rtt_Nom] of the rank during write leveling. Per JEDEC
20910                                                                  DDR3 specifications, only values MR1[Rtt_Nom] = 1 (RZQ/4), 2 (RZQ/2), or 3 (RZQ/6) are
20911                                                                  allowed during write leveling with output buffer enabled.
20912                                                                  DDR3 Spec:
20913                                                                  0x0 = LMC writes 0x1 (RZQ/4) to MR1[Rtt_Nom].
20914                                                                  0x1 = LMC writes 0x2 (RZQ/2) to MR1[Rtt_Nom].
20915                                                                  0x2 = LMC writes 0x3 (RZQ/6) to MR1[Rtt_Nom].
20916                                                                  0x3 = LMC writes 0x4 (RZQ/12) to MR1[Rtt_Nom].
20917                                                                  0x4 = LMC writes 0x5 (RZQ/8) to MR1[Rtt_Nom].
20918                                                                  0x5 = LMC writes 0x6 (Rsvd) to MR1[Rtt_Nom].
20919                                                                  0x6 = LMC writes 0x7 (Rsvd) to MR1[Rtt_Nom].
20920                                                                  0x7 = LMC writes 0x0 (Disabled) to MR1[Rtt_Nom].
20921 
20922                                                                  Internal:
20923                                                                  In DDR4 LRDIMM application, this is used to program the Data Buffer Control Word BC00
20924                                                                  during the Host Interface Write Leveling Mode:
20925                                                                  0x0 = LMC writes 0x1 (RZQ/4).
20926                                                                  0x1 = LMC writes 0x2 (RZQ/2).
20927                                                                  0x2 = LMC writes 0x3 (RZQ/6).
20928                                                                  0x3 = LMC writes 0x4 (RZQ/1).
20929                                                                  0x4 = LMC writes 0x5 (RZQ/5).
20930                                                                  0x5 = LMC writes 0x6 (RZQ/3).
20931                                                                  0x6 = LMC writes 0x7 (RZQ/7).
20932                                                                  0x7 = LMC writes 0x0 (Disabled). */
20933         uint64_t bitmask               : 8;  /**< [ 18: 11](R/W) Mask to select bit lanes on which write leveling feedback is returned when [OR_DIS] is set to one. */
20934         uint64_t or_dis                : 1;  /**< [ 10: 10](R/W) Disable ORing of bits in a byte lane when computing the write leveling bitmask. */
20935         uint64_t sset                  : 1;  /**< [  9:  9](R/W) Run write leveling on the current setting only. */
20936         uint64_t lanemask              : 9;  /**< [  8:  0](R/W) One-shot mask to select byte lane to be leveled by the write leveling sequence. Used with
20937                                                                  x16 parts where the upper and lower byte lanes need to be leveled independently.
20938 
20939                                                                  This field is also used for byte lane masking during read leveling sequence. */
20940 #else /* Word 0 - Little Endian */
20941         uint64_t lanemask              : 9;  /**< [  8:  0](R/W) One-shot mask to select byte lane to be leveled by the write leveling sequence. Used with
20942                                                                  x16 parts where the upper and lower byte lanes need to be leveled independently.
20943 
20944                                                                  This field is also used for byte lane masking during read leveling sequence. */
20945         uint64_t sset                  : 1;  /**< [  9:  9](R/W) Run write leveling on the current setting only. */
20946         uint64_t or_dis                : 1;  /**< [ 10: 10](R/W) Disable ORing of bits in a byte lane when computing the write leveling bitmask. */
20947         uint64_t bitmask               : 8;  /**< [ 18: 11](R/W) Mask to select bit lanes on which write leveling feedback is returned when [OR_DIS] is set to one. */
20948         uint64_t rtt_nom               : 3;  /**< [ 21: 19](R/W) LMC writes a decoded value to MR1[Rtt_Nom] of the rank during write leveling. Per JEDEC
20949                                                                  DDR3 specifications, only values MR1[Rtt_Nom] = 1 (RZQ/4), 2 (RZQ/2), or 3 (RZQ/6) are
20950                                                                  allowed during write leveling with output buffer enabled.
20951                                                                  DDR3 Spec:
20952                                                                  0x0 = LMC writes 0x1 (RZQ/4) to MR1[Rtt_Nom].
20953                                                                  0x1 = LMC writes 0x2 (RZQ/2) to MR1[Rtt_Nom].
20954                                                                  0x2 = LMC writes 0x3 (RZQ/6) to MR1[Rtt_Nom].
20955                                                                  0x3 = LMC writes 0x4 (RZQ/12) to MR1[Rtt_Nom].
20956                                                                  0x4 = LMC writes 0x5 (RZQ/8) to MR1[Rtt_Nom].
20957                                                                  0x5 = LMC writes 0x6 (Rsvd) to MR1[Rtt_Nom].
20958                                                                  0x6 = LMC writes 0x7 (Rsvd) to MR1[Rtt_Nom].
20959                                                                  0x7 = LMC writes 0x0 (Disabled) to MR1[Rtt_Nom].
20960 
20961                                                                  Internal:
20962                                                                  In DDR4 LRDIMM application, this is used to program the Data Buffer Control Word BC00
20963                                                                  during the Host Interface Write Leveling Mode:
20964                                                                  0x0 = LMC writes 0x1 (RZQ/4).
20965                                                                  0x1 = LMC writes 0x2 (RZQ/2).
20966                                                                  0x2 = LMC writes 0x3 (RZQ/6).
20967                                                                  0x3 = LMC writes 0x4 (RZQ/1).
20968                                                                  0x4 = LMC writes 0x5 (RZQ/5).
20969                                                                  0x5 = LMC writes 0x6 (RZQ/3).
20970                                                                  0x6 = LMC writes 0x7 (RZQ/7).
20971                                                                  0x7 = LMC writes 0x0 (Disabled). */
20972         uint64_t reserved_22_63        : 42;
20973 #endif /* Word 0 - End */
20974     } s;
20975     /* struct bdk_lmcx_wlevel_ctl_s cn8; */
20976     struct bdk_lmcx_wlevel_ctl_cn9
20977     {
20978 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
20979         uint64_t reserved_19_63        : 45;
20980         uint64_t bitmask               : 8;  /**< [ 18: 11](R/W) Mask to select bit lanes on which write leveling feedback is returned when [OR_DIS] is set to one. */
20981         uint64_t or_dis                : 1;  /**< [ 10: 10](R/W) Disable ORing of bits in a byte lane when computing the write leveling bitmask. */
20982         uint64_t sset                  : 1;  /**< [  9:  9](R/W) Run write leveling on the current setting only. */
20983         uint64_t lanemask              : 9;  /**< [  8:  0](R/W) One-shot mask to select byte lane to be leveled by the write leveling sequence. Used with
20984                                                                  x16 parts where the upper and lower byte lanes need to be leveled independently.
20985 
20986                                                                  This field is also used for byte lane masking during read leveling sequence. */
20987 #else /* Word 0 - Little Endian */
20988         uint64_t lanemask              : 9;  /**< [  8:  0](R/W) One-shot mask to select byte lane to be leveled by the write leveling sequence. Used with
20989                                                                  x16 parts where the upper and lower byte lanes need to be leveled independently.
20990 
20991                                                                  This field is also used for byte lane masking during read leveling sequence. */
20992         uint64_t sset                  : 1;  /**< [  9:  9](R/W) Run write leveling on the current setting only. */
20993         uint64_t or_dis                : 1;  /**< [ 10: 10](R/W) Disable ORing of bits in a byte lane when computing the write leveling bitmask. */
20994         uint64_t bitmask               : 8;  /**< [ 18: 11](R/W) Mask to select bit lanes on which write leveling feedback is returned when [OR_DIS] is set to one. */
20995         uint64_t reserved_19_63        : 45;
20996 #endif /* Word 0 - End */
20997     } cn9;
20998 };
20999 typedef union bdk_lmcx_wlevel_ctl bdk_lmcx_wlevel_ctl_t;
21000 
21001 static inline uint64_t BDK_LMCX_WLEVEL_CTL(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_WLEVEL_CTL(unsigned long a)21002 static inline uint64_t BDK_LMCX_WLEVEL_CTL(unsigned long a)
21003 {
21004     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
21005         return 0x87e088000300ll + 0x1000000ll * ((a) & 0x0);
21006     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
21007         return 0x87e088000300ll + 0x1000000ll * ((a) & 0x1);
21008     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
21009         return 0x87e088000300ll + 0x1000000ll * ((a) & 0x3);
21010     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
21011         return 0x87e088000300ll + 0x1000000ll * ((a) & 0x3);
21012     __bdk_csr_fatal("LMCX_WLEVEL_CTL", 1, a, 0, 0, 0);
21013 }
21014 
21015 #define typedef_BDK_LMCX_WLEVEL_CTL(a) bdk_lmcx_wlevel_ctl_t
21016 #define bustype_BDK_LMCX_WLEVEL_CTL(a) BDK_CSR_TYPE_RSL
21017 #define basename_BDK_LMCX_WLEVEL_CTL(a) "LMCX_WLEVEL_CTL"
21018 #define device_bar_BDK_LMCX_WLEVEL_CTL(a) 0x0 /* PF_BAR0 */
21019 #define busnum_BDK_LMCX_WLEVEL_CTL(a) (a)
21020 #define arguments_BDK_LMCX_WLEVEL_CTL(a) (a),-1,-1,-1
21021 
21022 /**
21023  * Register (RSL) lmc#_wlevel_dbg
21024  *
21025  * LMC Write Level Debug Register
21026  * A given write of LMC()_WLEVEL_DBG returns the write leveling pass/fail results for all
21027  * possible delay settings (i.e. the BITMASK) for only one byte in the last rank that the
21028  * hardware write leveled. LMC()_WLEVEL_DBG[BYTE] selects the particular byte. To get these
21029  * pass/fail results for a different rank, you must run the hardware write leveling again. For
21030  * example, it is possible to get the [BITMASK] results for every byte of every rank if you run
21031  * write leveling separately for each rank, probing LMC()_WLEVEL_DBG between each write-
21032  * leveling.
21033  */
21034 union bdk_lmcx_wlevel_dbg
21035 {
21036     uint64_t u;
21037     struct bdk_lmcx_wlevel_dbg_s
21038     {
21039 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
21040         uint64_t reserved_12_63        : 52;
21041         uint64_t bitmask               : 8;  /**< [ 11:  4](RO/H) Bitmask generated during write level settings sweep. If LMC()_WLEVEL_CTL[SSET]=0,
21042                                                                  [BITMASK]\<n\>=0 means write level setting n failed; [BITMASK]\<n\>=1 means write level
21043                                                                  setting n
21044                                                                  passed for
21045                                                                  0 \<= n \<= 7. [BITMASK] contains the first 8 results of the total 16 collected by LMC
21046                                                                  during
21047                                                                  the write leveling sequence.
21048 
21049                                                                  If LMC()_WLEVEL_CTL[SSET]=1, [BITMASK]\<0\>=0 means curr write level setting failed;
21050                                                                  [BITMASK]\<0\>=1 means curr write level setting passed. */
21051         uint64_t byte                  : 4;  /**< [  3:  0](R/W) 0 \<= BYTE \<= 8. */
21052 #else /* Word 0 - Little Endian */
21053         uint64_t byte                  : 4;  /**< [  3:  0](R/W) 0 \<= BYTE \<= 8. */
21054         uint64_t bitmask               : 8;  /**< [ 11:  4](RO/H) Bitmask generated during write level settings sweep. If LMC()_WLEVEL_CTL[SSET]=0,
21055                                                                  [BITMASK]\<n\>=0 means write level setting n failed; [BITMASK]\<n\>=1 means write level
21056                                                                  setting n
21057                                                                  passed for
21058                                                                  0 \<= n \<= 7. [BITMASK] contains the first 8 results of the total 16 collected by LMC
21059                                                                  during
21060                                                                  the write leveling sequence.
21061 
21062                                                                  If LMC()_WLEVEL_CTL[SSET]=1, [BITMASK]\<0\>=0 means curr write level setting failed;
21063                                                                  [BITMASK]\<0\>=1 means curr write level setting passed. */
21064         uint64_t reserved_12_63        : 52;
21065 #endif /* Word 0 - End */
21066     } s;
21067     /* struct bdk_lmcx_wlevel_dbg_s cn; */
21068 };
21069 typedef union bdk_lmcx_wlevel_dbg bdk_lmcx_wlevel_dbg_t;
21070 
21071 static inline uint64_t BDK_LMCX_WLEVEL_DBG(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_WLEVEL_DBG(unsigned long a)21072 static inline uint64_t BDK_LMCX_WLEVEL_DBG(unsigned long a)
21073 {
21074     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
21075         return 0x87e088000308ll + 0x1000000ll * ((a) & 0x0);
21076     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
21077         return 0x87e088000308ll + 0x1000000ll * ((a) & 0x1);
21078     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
21079         return 0x87e088000308ll + 0x1000000ll * ((a) & 0x3);
21080     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
21081         return 0x87e088000308ll + 0x1000000ll * ((a) & 0x3);
21082     __bdk_csr_fatal("LMCX_WLEVEL_DBG", 1, a, 0, 0, 0);
21083 }
21084 
21085 #define typedef_BDK_LMCX_WLEVEL_DBG(a) bdk_lmcx_wlevel_dbg_t
21086 #define bustype_BDK_LMCX_WLEVEL_DBG(a) BDK_CSR_TYPE_RSL
21087 #define basename_BDK_LMCX_WLEVEL_DBG(a) "LMCX_WLEVEL_DBG"
21088 #define device_bar_BDK_LMCX_WLEVEL_DBG(a) 0x0 /* PF_BAR0 */
21089 #define busnum_BDK_LMCX_WLEVEL_DBG(a) (a)
21090 #define arguments_BDK_LMCX_WLEVEL_DBG(a) (a),-1,-1,-1
21091 
21092 /**
21093  * Register (RSL) lmc#_wlevel_rank#
21094  *
21095  * LMC Write Level Rank Register
21096  * Four of these CSRs exist per LMC, one for each rank. Write level setting is measured in units
21097  * of 1/8 CK, so the below BYTEn values can range over 4 CK cycles. Assuming
21098  * LMC()_WLEVEL_CTL[SSET]=0, the BYTEn\<2:0\> values are not used during write leveling, and
21099  * they are overwritten by the hardware as part of the write leveling sequence. (Hardware sets
21100  * [STATUS] to 3 after hardware write leveling completes for the rank). Software needs to set
21101  * BYTEn\<4:3\> bits.
21102  *
21103  * Each CSR may also be written by software, but not while a write leveling sequence is in
21104  * progress. (Hardware sets [STATUS] to 1 after a CSR write.) Software initiates a hardware
21105  * write-
21106  * leveling sequence by programming LMC()_WLEVEL_CTL and writing LMC()_CONFIG[RANKMASK] and
21107  * LMC()_SEQ_CTL[INIT_START]=1 with
21108  * LMC()_SEQ_CTL[SEQ_SEL]=6.
21109  *
21110  * LMC will then step through and accumulate write leveling results for 8 unique delay settings
21111  * (twice), starting at a delay of LMC()_WLEVEL_RANK() [BYTEn\<4:3\>]* 8 CK increasing by
21112  * 1/8 CK each setting. Hardware will then set LMC()_WLEVEL_RANK()[BYTEn\<2:0\>] to
21113  * indicate the first write leveling result of 1 that followed a result of 0 during the
21114  * sequence by searching for a '1100' pattern in the generated bitmask, except that LMC will
21115  * always write LMC()_WLEVEL_RANK()[BYTEn\<0\>]=0. If hardware is unable to find a match
21116  * for a '1100' pattern, then hardware sets LMC()_WLEVEL_RANK() [BYTEn\<2:0\>] to 0x4. See
21117  * LMC()_WLEVEL_CTL.
21118  *
21119  * LMC()_WLEVEL_RANKi values for ranks i without attached DRAM should be set such that they do
21120  * not
21121  * increase the range of possible BYTE values for any byte lane. The easiest way to do this is to
21122  * set LMC()_WLEVEL_RANKi = LMC()_WLEVEL_RANKj, where j is some rank with attached DRAM whose
21123  * LMC()_WLEVEL_RANKj is already fully initialized.
21124  */
21125 union bdk_lmcx_wlevel_rankx
21126 {
21127     uint64_t u;
21128     struct bdk_lmcx_wlevel_rankx_s
21129     {
21130 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
21131         uint64_t reserved_47_63        : 17;
21132         uint64_t status                : 2;  /**< [ 46: 45](RO/H) Indicates status of the write leveling and where the BYTE* programmings in \<44:0\> came
21133                                                                  from:
21134                                                                  0x0 = BYTE* values are their reset value.
21135                                                                  0x1 = BYTE* values were set via a CSR write to this register.
21136                                                                  0x2 = Write leveling sequence currently in progress (BYTE* values are unpredictable).
21137                                                                  0x3 = BYTE* values came from a complete write leveling sequence, irrespective of which
21138                                                                  lanes are masked via LMC()_WLEVEL_CTL[LANEMASK]. */
21139         uint64_t byte8                 : 5;  /**< [ 44: 40](R/W/H) "Write level setting. Bit 0 of BYTE8 must be zero during normal operation. When ECC DRAM
21140                                                                  is not present in 64-bit mode (i.e. when DRAM is not attached to chip signals DDR#_DQS_8_*
21141                                                                  and DDR#_CB\<7:0\>), software should write BYTE8 with a value that does not increase the
21142                                                                  range of possible BYTE* values. The easiest way to do this is to set
21143                                                                  LMC()_WLEVEL_RANK()[BYTE8] = LMC()_WLEVEL_RANK()[BYTE0] when there is no
21144                                                                  ECC DRAM, using the final BYTE0 value." */
21145         uint64_t byte7                 : 5;  /**< [ 39: 35](R/W/H) Write level setting. Bit 0 of [BYTE7] must be zero during normal operation. */
21146         uint64_t byte6                 : 5;  /**< [ 34: 30](R/W/H) Write level setting. Bit 0 of [BYTE6] must be zero during normal operation. */
21147         uint64_t byte5                 : 5;  /**< [ 29: 25](R/W/H) Write level setting. Bit 0 of [BYTE5] must be zero during normal operation. */
21148         uint64_t byte4                 : 5;  /**< [ 24: 20](R/W/H) Write level setting. Bit 0 of [BYTE4] must be zero during normal operation. */
21149         uint64_t byte3                 : 5;  /**< [ 19: 15](R/W/H) Write level setting. Bit 0 of [BYTE3] must be zero during normal operation. */
21150         uint64_t byte2                 : 5;  /**< [ 14: 10](R/W/H) Write level setting. Bit 0 of [BYTE2] must be zero during normal operation. */
21151         uint64_t byte1                 : 5;  /**< [  9:  5](R/W/H) Write level setting. Bit 0 of [BYTE1] must be zero during normal operation. */
21152         uint64_t byte0                 : 5;  /**< [  4:  0](R/W/H) Write level setting. Bit 0 of [BYTE0] must be zero during normal operation. */
21153 #else /* Word 0 - Little Endian */
21154         uint64_t byte0                 : 5;  /**< [  4:  0](R/W/H) Write level setting. Bit 0 of [BYTE0] must be zero during normal operation. */
21155         uint64_t byte1                 : 5;  /**< [  9:  5](R/W/H) Write level setting. Bit 0 of [BYTE1] must be zero during normal operation. */
21156         uint64_t byte2                 : 5;  /**< [ 14: 10](R/W/H) Write level setting. Bit 0 of [BYTE2] must be zero during normal operation. */
21157         uint64_t byte3                 : 5;  /**< [ 19: 15](R/W/H) Write level setting. Bit 0 of [BYTE3] must be zero during normal operation. */
21158         uint64_t byte4                 : 5;  /**< [ 24: 20](R/W/H) Write level setting. Bit 0 of [BYTE4] must be zero during normal operation. */
21159         uint64_t byte5                 : 5;  /**< [ 29: 25](R/W/H) Write level setting. Bit 0 of [BYTE5] must be zero during normal operation. */
21160         uint64_t byte6                 : 5;  /**< [ 34: 30](R/W/H) Write level setting. Bit 0 of [BYTE6] must be zero during normal operation. */
21161         uint64_t byte7                 : 5;  /**< [ 39: 35](R/W/H) Write level setting. Bit 0 of [BYTE7] must be zero during normal operation. */
21162         uint64_t byte8                 : 5;  /**< [ 44: 40](R/W/H) "Write level setting. Bit 0 of BYTE8 must be zero during normal operation. When ECC DRAM
21163                                                                  is not present in 64-bit mode (i.e. when DRAM is not attached to chip signals DDR#_DQS_8_*
21164                                                                  and DDR#_CB\<7:0\>), software should write BYTE8 with a value that does not increase the
21165                                                                  range of possible BYTE* values. The easiest way to do this is to set
21166                                                                  LMC()_WLEVEL_RANK()[BYTE8] = LMC()_WLEVEL_RANK()[BYTE0] when there is no
21167                                                                  ECC DRAM, using the final BYTE0 value." */
21168         uint64_t status                : 2;  /**< [ 46: 45](RO/H) Indicates status of the write leveling and where the BYTE* programmings in \<44:0\> came
21169                                                                  from:
21170                                                                  0x0 = BYTE* values are their reset value.
21171                                                                  0x1 = BYTE* values were set via a CSR write to this register.
21172                                                                  0x2 = Write leveling sequence currently in progress (BYTE* values are unpredictable).
21173                                                                  0x3 = BYTE* values came from a complete write leveling sequence, irrespective of which
21174                                                                  lanes are masked via LMC()_WLEVEL_CTL[LANEMASK]. */
21175         uint64_t reserved_47_63        : 17;
21176 #endif /* Word 0 - End */
21177     } s;
21178     struct bdk_lmcx_wlevel_rankx_cn9
21179     {
21180 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
21181         uint64_t reserved_47_63        : 17;
21182         uint64_t status                : 2;  /**< [ 46: 45](RO/H) Indicates status of the write leveling and where the BYTE* programmings in \<44:0\> came
21183                                                                  from:
21184                                                                  0x0 = BYTE* values are their reset value.
21185                                                                  0x1 = BYTE* values were set via a CSR write to this register.
21186                                                                  0x2 = Write leveling sequence currently in progress (BYTE* values are unpredictable).
21187                                                                  0x3 = BYTE* values came from a complete write leveling sequence, irrespective of which
21188                                                                  lanes are masked via LMC()_WLEVEL_CTL[LANEMASK]. */
21189         uint64_t byte8                 : 5;  /**< [ 44: 40](R/W/H) "Write level setting. Bit 0 of BYTE8 must be zero during normal operation. When ECC DRAM
21190                                                                  is not present in 64-bit mode (i.e. when DRAM is not attached to chip signals DDR#_CBS_0_*
21191                                                                  and DDR#_CB\<7:0\>), software should write BYTE8 with a value that does not increase the
21192                                                                  range of possible BYTE* values. The easiest way to do this is to set
21193                                                                  LMC()_WLEVEL_RANK()[BYTE8] = LMC()_WLEVEL_RANK()[BYTE0] when there is no
21194                                                                  ECC DRAM, using the final BYTE0 value." */
21195         uint64_t byte7                 : 5;  /**< [ 39: 35](R/W/H) Write level setting. Bit 0 of [BYTE7] must be zero during normal operation. */
21196         uint64_t byte6                 : 5;  /**< [ 34: 30](R/W/H) Write level setting. Bit 0 of [BYTE6] must be zero during normal operation. */
21197         uint64_t byte5                 : 5;  /**< [ 29: 25](R/W/H) Write level setting. Bit 0 of [BYTE5] must be zero during normal operation. */
21198         uint64_t byte4                 : 5;  /**< [ 24: 20](R/W/H) Write level setting. Bit 0 of [BYTE4] must be zero during normal operation. */
21199         uint64_t byte3                 : 5;  /**< [ 19: 15](R/W/H) Write level setting. Bit 0 of [BYTE3] must be zero during normal operation. */
21200         uint64_t byte2                 : 5;  /**< [ 14: 10](R/W/H) Write level setting. Bit 0 of [BYTE2] must be zero during normal operation. */
21201         uint64_t byte1                 : 5;  /**< [  9:  5](R/W/H) Write level setting. Bit 0 of [BYTE1] must be zero during normal operation. */
21202         uint64_t byte0                 : 5;  /**< [  4:  0](R/W/H) Write level setting. Bit 0 of [BYTE0] must be zero during normal operation. */
21203 #else /* Word 0 - Little Endian */
21204         uint64_t byte0                 : 5;  /**< [  4:  0](R/W/H) Write level setting. Bit 0 of [BYTE0] must be zero during normal operation. */
21205         uint64_t byte1                 : 5;  /**< [  9:  5](R/W/H) Write level setting. Bit 0 of [BYTE1] must be zero during normal operation. */
21206         uint64_t byte2                 : 5;  /**< [ 14: 10](R/W/H) Write level setting. Bit 0 of [BYTE2] must be zero during normal operation. */
21207         uint64_t byte3                 : 5;  /**< [ 19: 15](R/W/H) Write level setting. Bit 0 of [BYTE3] must be zero during normal operation. */
21208         uint64_t byte4                 : 5;  /**< [ 24: 20](R/W/H) Write level setting. Bit 0 of [BYTE4] must be zero during normal operation. */
21209         uint64_t byte5                 : 5;  /**< [ 29: 25](R/W/H) Write level setting. Bit 0 of [BYTE5] must be zero during normal operation. */
21210         uint64_t byte6                 : 5;  /**< [ 34: 30](R/W/H) Write level setting. Bit 0 of [BYTE6] must be zero during normal operation. */
21211         uint64_t byte7                 : 5;  /**< [ 39: 35](R/W/H) Write level setting. Bit 0 of [BYTE7] must be zero during normal operation. */
21212         uint64_t byte8                 : 5;  /**< [ 44: 40](R/W/H) "Write level setting. Bit 0 of BYTE8 must be zero during normal operation. When ECC DRAM
21213                                                                  is not present in 64-bit mode (i.e. when DRAM is not attached to chip signals DDR#_CBS_0_*
21214                                                                  and DDR#_CB\<7:0\>), software should write BYTE8 with a value that does not increase the
21215                                                                  range of possible BYTE* values. The easiest way to do this is to set
21216                                                                  LMC()_WLEVEL_RANK()[BYTE8] = LMC()_WLEVEL_RANK()[BYTE0] when there is no
21217                                                                  ECC DRAM, using the final BYTE0 value." */
21218         uint64_t status                : 2;  /**< [ 46: 45](RO/H) Indicates status of the write leveling and where the BYTE* programmings in \<44:0\> came
21219                                                                  from:
21220                                                                  0x0 = BYTE* values are their reset value.
21221                                                                  0x1 = BYTE* values were set via a CSR write to this register.
21222                                                                  0x2 = Write leveling sequence currently in progress (BYTE* values are unpredictable).
21223                                                                  0x3 = BYTE* values came from a complete write leveling sequence, irrespective of which
21224                                                                  lanes are masked via LMC()_WLEVEL_CTL[LANEMASK]. */
21225         uint64_t reserved_47_63        : 17;
21226 #endif /* Word 0 - End */
21227     } cn9;
21228     /* struct bdk_lmcx_wlevel_rankx_s cn81xx; */
21229     /* struct bdk_lmcx_wlevel_rankx_s cn88xx; */
21230     /* struct bdk_lmcx_wlevel_rankx_cn9 cn83xx; */
21231 };
21232 typedef union bdk_lmcx_wlevel_rankx bdk_lmcx_wlevel_rankx_t;
21233 
21234 static inline uint64_t BDK_LMCX_WLEVEL_RANKX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
BDK_LMCX_WLEVEL_RANKX(unsigned long a,unsigned long b)21235 static inline uint64_t BDK_LMCX_WLEVEL_RANKX(unsigned long a, unsigned long b)
21236 {
21237     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=3)))
21238         return 0x87e0880002c0ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0x3);
21239     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=3)))
21240         return 0x87e0880002c0ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x3);
21241     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=3)))
21242         return 0x87e0880002c0ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x3);
21243     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b<=3)))
21244         return 0x87e0880002c0ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x3);
21245     __bdk_csr_fatal("LMCX_WLEVEL_RANKX", 2, a, b, 0, 0);
21246 }
21247 
21248 #define typedef_BDK_LMCX_WLEVEL_RANKX(a,b) bdk_lmcx_wlevel_rankx_t
21249 #define bustype_BDK_LMCX_WLEVEL_RANKX(a,b) BDK_CSR_TYPE_RSL
21250 #define basename_BDK_LMCX_WLEVEL_RANKX(a,b) "LMCX_WLEVEL_RANKX"
21251 #define device_bar_BDK_LMCX_WLEVEL_RANKX(a,b) 0x0 /* PF_BAR0 */
21252 #define busnum_BDK_LMCX_WLEVEL_RANKX(a,b) (a)
21253 #define arguments_BDK_LMCX_WLEVEL_RANKX(a,b) (a),(b),-1,-1
21254 
21255 /**
21256  * Register (RSL) lmc#_wodt_mask
21257  *
21258  * LMC Write OnDieTermination Mask Register
21259  * System designers may desire to terminate DQ/DQS lines for higher-frequency DDR operations,
21260  * especially on a multirank system. DDR4 DQ/DQS I/Os have built-in termination resistors that
21261  * can be turned on or off by the controller, after meeting TAOND and TAOF timing requirements.
21262  * Each rank has its own ODT pin that fans out to all of the memory parts in that DIMM. System
21263  * designers may prefer different combinations of ODT ONs for write operations into different
21264  * ranks. CNXXXX supports full programmability by way of the mask register below. Each rank
21265  * position has its own 8-bit programmable field. When the controller does a write to that rank,
21266  * it sets the four ODT pins to the mask pins below. For example, when doing a write into Rank0,
21267  * a
21268  * system designer may desire to terminate the lines with the resistor on DIMM0/Rank1. The mask
21269  * [WODT_D0_R0] would then be {00000010}.
21270  *
21271  * CNXXXX drives the appropriate mask values on the ODT pins by default. If this feature is not
21272  * required, write 0x0 in this register. When a given RANK is selected, the WODT mask for that
21273  * RANK is used. The resulting WODT mask is driven to the DIMMs in the following manner:
21274  */
21275 union bdk_lmcx_wodt_mask
21276 {
21277     uint64_t u;
21278     struct bdk_lmcx_wodt_mask_s
21279     {
21280 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
21281         uint64_t reserved_28_63        : 36;
21282         uint64_t wodt_d1_r1            : 4;  /**< [ 27: 24](R/W) Reserved.
21283                                                                  Internal:
21284                                                                  Write ODT mask DIMM1, RANK1/DIMM1 in SingleRanked.
21285                                                                  If LMC()_CONFIG[RANK_ENA]=0, [WODT_D1_R1]\<3:0\> must be zero. */
21286         uint64_t reserved_20_23        : 4;
21287         uint64_t wodt_d1_r0            : 4;  /**< [ 19: 16](R/W) Reserved.
21288                                                                  Internal:
21289                                                                  Write ODT mask DIMM1, RANK0. If [RANK_ENA]=0, [WODT_D1_R0]\<3,1\> must be zero. */
21290         uint64_t reserved_12_15        : 4;
21291         uint64_t wodt_d0_r1            : 4;  /**< [ 11:  8](R/W) Write ODT mask DIMM0, RANK1/DIMM0 in SingleRanked. If LMC()_CONFIG[RANK_ENA]=0,
21292                                                                  [WODT_D0_R1]\<3:0\> must be zero. */
21293         uint64_t reserved_4_7          : 4;
21294         uint64_t wodt_d0_r0            : 4;  /**< [  3:  0](R/W) Write ODT mask DIMM0, RANK0. If LMC()_CONFIG[RANK_ENA]=0, [WODT_D0_R0]\<3,1\> must be zero. */
21295 #else /* Word 0 - Little Endian */
21296         uint64_t wodt_d0_r0            : 4;  /**< [  3:  0](R/W) Write ODT mask DIMM0, RANK0. If LMC()_CONFIG[RANK_ENA]=0, [WODT_D0_R0]\<3,1\> must be zero. */
21297         uint64_t reserved_4_7          : 4;
21298         uint64_t wodt_d0_r1            : 4;  /**< [ 11:  8](R/W) Write ODT mask DIMM0, RANK1/DIMM0 in SingleRanked. If LMC()_CONFIG[RANK_ENA]=0,
21299                                                                  [WODT_D0_R1]\<3:0\> must be zero. */
21300         uint64_t reserved_12_15        : 4;
21301         uint64_t wodt_d1_r0            : 4;  /**< [ 19: 16](R/W) Reserved.
21302                                                                  Internal:
21303                                                                  Write ODT mask DIMM1, RANK0. If [RANK_ENA]=0, [WODT_D1_R0]\<3,1\> must be zero. */
21304         uint64_t reserved_20_23        : 4;
21305         uint64_t wodt_d1_r1            : 4;  /**< [ 27: 24](R/W) Reserved.
21306                                                                  Internal:
21307                                                                  Write ODT mask DIMM1, RANK1/DIMM1 in SingleRanked.
21308                                                                  If LMC()_CONFIG[RANK_ENA]=0, [WODT_D1_R1]\<3:0\> must be zero. */
21309         uint64_t reserved_28_63        : 36;
21310 #endif /* Word 0 - End */
21311     } s;
21312     struct bdk_lmcx_wodt_mask_cn9
21313     {
21314 #if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
21315         uint64_t reserved_28_63        : 36;
21316         uint64_t wodt_d1_r1            : 4;  /**< [ 27: 24](R/W) Write ODT mask DIMM1, RANK1/DIMM1 in SingleRanked.
21317                                                                  If LMC()_CONFIG[RANK_ENA]=0, [WODT_D1_R1]\<3:0\> must be zero. */
21318         uint64_t reserved_20_23        : 4;
21319         uint64_t wodt_d1_r0            : 4;  /**< [ 19: 16](R/W) Write ODT mask DIMM1, RANK0. If LMC()_CONFIG[RANK_ENA]=0, [WODT_D1_R0]\<3,1\> must be zero. */
21320         uint64_t reserved_12_15        : 4;
21321         uint64_t wodt_d0_r1            : 4;  /**< [ 11:  8](R/W) Write ODT mask DIMM0, RANK1/DIMM0 in SingleRanked. If LMC()_CONFIG[RANK_ENA]=0,
21322                                                                  [WODT_D0_R1]\<3:0\> must be zero. */
21323         uint64_t reserved_4_7          : 4;
21324         uint64_t wodt_d0_r0            : 4;  /**< [  3:  0](R/W) Write ODT mask DIMM0, RANK0. If LMC()_CONFIG[RANK_ENA]=0, [WODT_D0_R0]\<3,1\> must be zero. */
21325 #else /* Word 0 - Little Endian */
21326         uint64_t wodt_d0_r0            : 4;  /**< [  3:  0](R/W) Write ODT mask DIMM0, RANK0. If LMC()_CONFIG[RANK_ENA]=0, [WODT_D0_R0]\<3,1\> must be zero. */
21327         uint64_t reserved_4_7          : 4;
21328         uint64_t wodt_d0_r1            : 4;  /**< [ 11:  8](R/W) Write ODT mask DIMM0, RANK1/DIMM0 in SingleRanked. If LMC()_CONFIG[RANK_ENA]=0,
21329                                                                  [WODT_D0_R1]\<3:0\> must be zero. */
21330         uint64_t reserved_12_15        : 4;
21331         uint64_t wodt_d1_r0            : 4;  /**< [ 19: 16](R/W) Write ODT mask DIMM1, RANK0. If LMC()_CONFIG[RANK_ENA]=0, [WODT_D1_R0]\<3,1\> must be zero. */
21332         uint64_t reserved_20_23        : 4;
21333         uint64_t wodt_d1_r1            : 4;  /**< [ 27: 24](R/W) Write ODT mask DIMM1, RANK1/DIMM1 in SingleRanked.
21334                                                                  If LMC()_CONFIG[RANK_ENA]=0, [WODT_D1_R1]\<3:0\> must be zero. */
21335         uint64_t reserved_28_63        : 36;
21336 #endif /* Word 0 - End */
21337     } cn9;
21338     /* struct bdk_lmcx_wodt_mask_s cn81xx; */
21339     /* struct bdk_lmcx_wodt_mask_cn9 cn88xx; */
21340     /* struct bdk_lmcx_wodt_mask_cn9 cn83xx; */
21341 };
21342 typedef union bdk_lmcx_wodt_mask bdk_lmcx_wodt_mask_t;
21343 
21344 static inline uint64_t BDK_LMCX_WODT_MASK(unsigned long a) __attribute__ ((pure, always_inline));
BDK_LMCX_WODT_MASK(unsigned long a)21345 static inline uint64_t BDK_LMCX_WODT_MASK(unsigned long a)
21346 {
21347     if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
21348         return 0x87e0880001b0ll + 0x1000000ll * ((a) & 0x0);
21349     if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
21350         return 0x87e0880001b0ll + 0x1000000ll * ((a) & 0x1);
21351     if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
21352         return 0x87e0880001b0ll + 0x1000000ll * ((a) & 0x3);
21353     if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
21354         return 0x87e0880001b0ll + 0x1000000ll * ((a) & 0x3);
21355     __bdk_csr_fatal("LMCX_WODT_MASK", 1, a, 0, 0, 0);
21356 }
21357 
21358 #define typedef_BDK_LMCX_WODT_MASK(a) bdk_lmcx_wodt_mask_t
21359 #define bustype_BDK_LMCX_WODT_MASK(a) BDK_CSR_TYPE_RSL
21360 #define basename_BDK_LMCX_WODT_MASK(a) "LMCX_WODT_MASK"
21361 #define device_bar_BDK_LMCX_WODT_MASK(a) 0x0 /* PF_BAR0 */
21362 #define busnum_BDK_LMCX_WODT_MASK(a) (a)
21363 #define arguments_BDK_LMCX_WODT_MASK(a) (a),-1,-1,-1
21364 
21365 #endif /* __BDK_CSRS_LMC_H__ */
21366