components/open-fabrics/libsif/include/psifapi/psif_hw_setget.h
branchs11u3-sru
changeset 5812 ac16f94826c3
child 7120 b01185225eaa
equal deleted inserted replaced
5806:b6d54fc7632a 5812:ac16f94826c3
       
     1 /*
       
     2  * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
       
     3  */
       
     4 
       
     5 /*
       
     6  * Redistribution and use in source and binary forms, with or without modification,
       
     7  * are permitted provided that the following conditions are met:
       
     8  *
       
     9  * 1. Redistributions of source code must retain the above copyright notice,
       
    10  *    this list of conditions and the following disclaimer.
       
    11  *
       
    12  * 2. Redistributions in binary form must reproduce the above copyright notice,
       
    13  *    this list of conditions and the following disclaimer in the documentation
       
    14  *    and/or other materials provided with the distribution.
       
    15  *
       
    16  * 3. Neither the name of the copyright holder nor the names of its contributors
       
    17  *    may be used to endorse or promote products derived from this software without
       
    18  *    specific prior written permission.
       
    19  *
       
    20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
       
    21  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
       
    22  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
       
    23  * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
       
    24  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
       
    25  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
       
    26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
       
    27  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
       
    28  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
       
    29  * OF THE POSSIBILITY OF SUCH DAMAGE.
       
    30  */
       
    31 
       
    32 #ifndef	_PSIF_HW_SETGET_H
       
    33 #define	_PSIF_HW_SETGET_H
       
    34 
       
    35 #ifdef __cplusplus
       
    36 extern "C" {
       
    37 #endif
       
    38 
       
    39 #include "psif_api.h"
       
    40 
       
    41 #if defined(__arm__)
       
    42 #include "epsfw_misc.h"
       
    43 #  define htobe64(x) eps_htobe64(x)
       
    44 #  define be64toh(x) eps_be64toh(x)
       
    45 #endif /* __arm__ */
       
    46 #include "psif_endian.h"
       
    47 #if !defined(__KERNEL__)
       
    48 #include "os_header.h"
       
    49 #endif
       
    50 
       
    51 
       
    52 /*
       
    53  * PSIF_WR_INVALIDATE_LKEY: key to invalidate/flush from the DMA VT cache.
       
    54  * PSIF_WR_INVALIDATE_RKEY: key to invalidate/flush from the DMA VT cache.
       
    55  * PSIF_WR_INVALIDATE_BOTH_KEYS: key to invalidate/flush from the DMA VT
       
    56  * cache. PSIF_WR_INVALIDATE_TLB: this is the address vector to invalidate in
       
    57  * the TLB.
       
    58  */
       
    59 static inline void set_psif_wr_su__key(volatile struct psif_wr_su *ptr, u32 data)
       
    60 {
       
    61 	/* group=2 shift=32 bits=32 */
       
    62 	volatile __be64 *const pte = (__be64 *)ptr;
       
    63 	pte[2] = htobe64((be64toh(pte[2]) & 0x00000000ffffffffull) | 
       
    64 		((((u64)(data)) & 0x00000000ffffffffull) << 32));
       
    65 }
       
    66 static inline u32 get_psif_wr_su__key(volatile struct psif_wr_su *ptr)
       
    67 {
       
    68 	/* group=2 shift=32 bits=32 */
       
    69 	volatile __be64 *const pte = (__be64 *)ptr;
       
    70 	return((u32)((be64toh(pte[2]) >> 32) & 0x00000000ffffffffull));
       
    71 }
       
    72 
       
    73 /*
       
    74  * Send queue sequence number. Used to map request to a particular work
       
    75  * request in the send queue.
       
    76  */
       
    77 static inline void set_psif_wr__sq_seq(volatile struct psif_wr *ptr, u16 data)
       
    78 {
       
    79 	/* group=0 shift=0 bits=16 */
       
    80 	volatile __be64 *const pte = (__be64 *)ptr;
       
    81 	pte[0] = htobe64((be64toh(pte[0]) & 0xffffffffffff0000ull) | 
       
    82 		((((u64)(data)) & 0x000000000000ffffull) << 0));
       
    83 }
       
    84 static inline u16 get_psif_wr__sq_seq(volatile struct psif_wr *ptr)
       
    85 {
       
    86 	/* group=0 shift=0 bits=16 */
       
    87 	volatile __be64 *const pte = (__be64 *)ptr;
       
    88 	return((u16)((be64toh(pte[0]) >> 0) & 0x000000000000ffffull));
       
    89 }
       
    90 
       
    91 /*
       
    92  * QP sending this request. XXX: Should name be own_qp_num as defined in QP
       
    93  * state?
       
    94  */
       
    95 static inline void set_psif_wr__local_qp(volatile struct psif_wr *ptr, u32 data)
       
    96 {
       
    97 	/* group=0 shift=32 bits=24 */
       
    98 	volatile __be64 *const pte = (__be64 *)ptr;
       
    99 	pte[0] = htobe64((be64toh(pte[0]) & 0xff000000ffffffffull) | 
       
   100 		((((u64)(data)) & 0x0000000000ffffffull) << 32));
       
   101 }
       
   102 static inline u32 get_psif_wr__local_qp(volatile struct psif_wr *ptr)
       
   103 {
       
   104 	/* group=0 shift=32 bits=24 */
       
   105 	volatile __be64 *const pte = (__be64 *)ptr;
       
   106 	return((u32)((be64toh(pte[0]) >> 32) & 0x0000000000ffffffull));
       
   107 }
       
   108 
       
   109 /* Completion notification identifier. */
       
   110 static inline void set_psif_wr__completion(volatile struct psif_wr *ptr, u8 data)
       
   111 {
       
   112 	/* group=1 shift=31 bits=1 */
       
   113 	volatile __be64 *const pte = (__be64 *)ptr;
       
   114 	pte[1] = htobe64((be64toh(pte[1]) & 0xffffffff7fffffffull) | 
       
   115 		((((u64)(data)) & 0x0000000000000001ull) << 31));
       
   116 }
       
   117 static inline u8 get_psif_wr__completion(volatile struct psif_wr *ptr)
       
   118 {
       
   119 	/* group=1 shift=31 bits=1 */
       
   120 	volatile __be64 *const pte = (__be64 *)ptr;
       
   121 	return((u8)((be64toh(pte[1]) >> 31) & 0x0000000000000001ull));
       
   122 }
       
   123 
       
   124 /*
       
   125  * Checksum used for data protection and consistency between work request and
       
   126  * QP state.
       
   127  */
       
   128 static inline void set_psif_wr__checksum(volatile struct psif_wr *ptr, u32 data)
       
   129 {
       
   130 	/* group=2 shift=32 bits=32 */
       
   131 	volatile __be64 *const pte = (__be64 *)ptr;
       
   132 	pte[2] = htobe64((be64toh(pte[2]) & 0x00000000ffffffffull) | 
       
   133 		((((u64)(data)) & 0x00000000ffffffffull) << 32));
       
   134 }
       
   135 static inline u32 get_psif_wr__checksum(volatile struct psif_wr *ptr)
       
   136 {
       
   137 	/* group=2 shift=32 bits=32 */
       
   138 	volatile __be64 *const pte = (__be64 *)ptr;
       
   139 	return((u32)((be64toh(pte[2]) >> 32) & 0x00000000ffffffffull));
       
   140 }
       
   141 
       
   142 /*
       
   143  * Index to where elements are added to the send queue by SW. SW is
       
   144  * responsibel for keeping track of how many entries there are in the send
       
   145  * queue. I.e. SW needs to keep track of the head_index so it doesn't
       
   146  * overwrite entries in the send queue which is not yet completed.
       
   147  */
       
   148 static inline void set_psif_sq_sw__tail_indx(volatile struct psif_sq_sw *ptr, u16 data)
       
   149 {
       
   150 	/* group=0 shift=32 bits=16 */
       
   151 	volatile __be64 *const pte = (__be64 *)ptr;
       
   152 	pte[0] = htobe64((be64toh(pte[0]) & 0xffff0000ffffffffull) | 
       
   153 		((((u64)(data)) & 0x000000000000ffffull) << 32));
       
   154 }
       
   155 static inline u16 get_psif_sq_sw__tail_indx(volatile struct psif_sq_sw *ptr)
       
   156 {
       
   157 	/* group=0 shift=32 bits=16 */
       
   158 	volatile __be64 *const pte = (__be64 *)ptr;
       
   159 	return((u16)((be64toh(pte[0]) >> 32) & 0x000000000000ffffull));
       
   160 }
       
   161 
       
   162 /*
       
   163  * Send queue sequence number used by the SQS to maintain ordering and keep
       
   164  * track of where which send queue elements to fetch. This field is not in
       
   165  * sync with the field in qp_t. This number is typically a little bit before
       
   166  * the number in the qp_t as SQS has to fetch the elements from host memory.
       
   167  * This is also used as tail_index when checking if there are more elements
       
   168  * in the send queue.
       
   169  */
       
   170 static inline void set_psif_sq_hw__last_seq(volatile struct psif_sq_hw *ptr, u16 data)
       
   171 {
       
   172 	/* group=0 shift=16 bits=16 */
       
   173 	volatile __be64 *const pte = (__be64 *)ptr;
       
   174 	pte[0] = htobe64((be64toh(pte[0]) & 0xffffffff0000ffffull) | 
       
   175 		((((u64)(data)) & 0x000000000000ffffull) << 16));
       
   176 }
       
   177 static inline u16 get_psif_sq_hw__last_seq(volatile struct psif_sq_hw *ptr)
       
   178 {
       
   179 	/* group=0 shift=16 bits=16 */
       
   180 	volatile __be64 *const pte = (__be64 *)ptr;
       
   181 	return((u16)((be64toh(pte[0]) >> 16) & 0x000000000000ffffull));
       
   182 }
       
   183 
       
   184 /* QP and UF to be processed next. */
       
   185 static inline void set_psif_sq_hw__sq_next(volatile struct psif_sq_hw *ptr, u32 data)
       
   186 {
       
   187 	/* group=0 shift=32 bits=32 */
       
   188 	volatile __be64 *const pte = (__be64 *)ptr;
       
   189 	pte[0] = htobe64((be64toh(pte[0]) & 0x00000000ffffffffull) | 
       
   190 		((((u64)(data)) & 0x00000000ffffffffull) << 32));
       
   191 }
       
   192 static inline u32 get_psif_sq_hw__sq_next(volatile struct psif_sq_hw *ptr)
       
   193 {
       
   194 	/* group=0 shift=32 bits=32 */
       
   195 	volatile __be64 *const pte = (__be64 *)ptr;
       
   196 	return((u32)((be64toh(pte[0]) >> 32) & 0x00000000ffffffffull));
       
   197 }
       
   198 
       
   199 /*
       
   200  * This bit is set through the doorbell. SW should check this bit plus
       
   201  * psif_next = null to ensure SW can own the SQ descriptor.
       
   202  */
       
   203 static inline void set_psif_sq_hw__destroyed(volatile struct psif_sq_hw *ptr, u8 data)
       
   204 {
       
   205 	/* group=1 shift=27 bits=1 */
       
   206 	volatile __be64 *const pte = (__be64 *)ptr;
       
   207 	pte[1] = htobe64((be64toh(pte[1]) & 0xfffffffff7ffffffull) | 
       
   208 		((((u64)(data)) & 0x0000000000000001ull) << 27));
       
   209 }
       
   210 static inline u8 get_psif_sq_hw__destroyed(volatile struct psif_sq_hw *ptr)
       
   211 {
       
   212 	/* group=1 shift=27 bits=1 */
       
   213 	volatile __be64 *const pte = (__be64 *)ptr;
       
   214 	return((u8)((be64toh(pte[1]) >> 27) & 0x0000000000000001ull));
       
   215 }
       
   216 
       
   217 /* Software modified index pointing to the tail reecive entry in host memory. */
       
   218 static inline void set_psif_rq_sw__tail_indx(volatile struct psif_rq_sw *ptr, u16 data)
       
   219 {
       
   220 	/* group=0 shift=32 bits=14 */
       
   221 	volatile __be64 *const pte = (__be64 *)ptr;
       
   222 	pte[0] = htobe64((be64toh(pte[0]) & 0xffffc000ffffffffull) | 
       
   223 		((((u64)(data)) & 0x0000000000003fffull) << 32));
       
   224 }
       
   225 static inline u16 get_psif_rq_sw__tail_indx(volatile struct psif_rq_sw *ptr)
       
   226 {
       
   227 	/* group=0 shift=32 bits=14 */
       
   228 	volatile __be64 *const pte = (__be64 *)ptr;
       
   229 	return((u16)((be64toh(pte[0]) >> 32) & 0x0000000000003fffull));
       
   230 }
       
   231 
       
   232 /*
       
   233  * Hardware modified index pointing to the head of the receive queue. TSU is
       
   234  * using this to find the address of the receive queue entry.
       
   235  */
       
   236 static inline void set_psif_rq_hw__head_indx(volatile struct psif_rq_hw *ptr, u16 data)
       
   237 {
       
   238 	/* group=0 shift=14 bits=14 */
       
   239 	volatile __be64 *const pte = (__be64 *)ptr;
       
   240 	pte[0] = htobe64((be64toh(pte[0]) & 0xfffffffff0003fffull) | 
       
   241 		((((u64)(data)) & 0x0000000000003fffull) << 14));
       
   242 }
       
   243 static inline u16 get_psif_rq_hw__head_indx(volatile struct psif_rq_hw *ptr)
       
   244 {
       
   245 	/* group=0 shift=14 bits=14 */
       
   246 	volatile __be64 *const pte = (__be64 *)ptr;
       
   247 	return((u16)((be64toh(pte[0]) >> 14) & 0x0000000000003fffull));
       
   248 }
       
   249 
       
   250 /* The desciptor is valid. */
       
   251 static inline void set_psif_rq_hw__valid(volatile struct psif_rq_hw *ptr, u8 data)
       
   252 {
       
   253 	/* group=3 shift=55 bits=1 */
       
   254 	volatile __be64 *const pte = (__be64 *)ptr;
       
   255 	pte[3] = htobe64((be64toh(pte[3]) & 0xff7fffffffffffffull) | 
       
   256 		((((u64)(data)) & 0x0000000000000001ull) << 55));
       
   257 }
       
   258 static inline u8 get_psif_rq_hw__valid(volatile struct psif_rq_hw *ptr)
       
   259 {
       
   260 	/* group=3 shift=55 bits=1 */
       
   261 	volatile __be64 *const pte = (__be64 *)ptr;
       
   262 	return((u8)((be64toh(pte[3]) >> 55) & 0x0000000000000001ull));
       
   263 }
       
   264 
       
   265 /*
       
   266  * Receive queue entry ID. This is added to the receive completion using this
       
   267  * receive queue entry.
       
   268  */
       
   269 static inline void set_psif_rq_entry__rqe_id(volatile struct psif_rq_entry *ptr, u64 data)
       
   270 {
       
   271 	/* group=0 shift=0 bits=64 */
       
   272 	volatile __be64 *const pte = (__be64 *)ptr;
       
   273 	pte[0] = htobe64((__be64)data);
       
   274 }
       
   275 static inline u64 get_psif_rq_entry__rqe_id(volatile struct psif_rq_entry *ptr)
       
   276 {
       
   277 	/* group=0 shift=0 bits=64 */
       
   278 	volatile __be64 *const pte = (__be64 *)ptr;
       
   279 	return((u64)be64toh(pte[0]));
       
   280 }
       
   281 
       
   282 /*
       
   283  * This retry tag is the one used by tsu_rqs and added to the packets sent to
       
   284  * tsu_dma. It is the responsibility of tsu_rqs to update this retry tag
       
   285  * whenever the sq_sequence_number in QP state is equal to the one in the
       
   286  * request.
       
   287  */
       
   288 static inline void set_psif_qp_core__retry_tag_committed(volatile struct psif_qp_core *ptr, u8 data)
       
   289 {
       
   290 	/* group=0 shift=0 bits=3 */
       
   291 	volatile __be64 *const pte = (__be64 *)ptr;
       
   292 	pte[0] = htobe64((be64toh(pte[0]) & 0xfffffffffffffff8ull) | 
       
   293 		((((u64)(data)) & 0x0000000000000007ull) << 0));
       
   294 }
       
   295 static inline u8 get_psif_qp_core__retry_tag_committed(volatile struct psif_qp_core *ptr)
       
   296 {
       
   297 	/* group=0 shift=0 bits=3 */
       
   298 	volatile __be64 *const pte = (__be64 *)ptr;
       
   299 	return((u8)((be64toh(pte[0]) >> 0) & 0x0000000000000007ull));
       
   300 }
       
   301 
       
   302 /*
       
   303  * This retry tag is updated by the error block when an error occur. If
       
   304  * tsu_rqs reads this retry tag and it is different than the
       
   305  * retry_tag_comitted, tsu_rqs must update retry_tag_comitted to the value of
       
   306  * retry_tag_err when the sq_sequence_number indicates this is the valid
       
   307  * request. The sq_sequence_number has been updated by tsu_err at the same
       
   308  * time the retry_tag_err is updated.
       
   309  */
       
   310 static inline void set_psif_qp_core__retry_tag_err(volatile struct psif_qp_core *ptr, u8 data)
       
   311 {
       
   312 	/* group=0 shift=3 bits=3 */
       
   313 	volatile __be64 *const pte = (__be64 *)ptr;
       
   314 	pte[0] = htobe64((be64toh(pte[0]) & 0xffffffffffffffc7ull) | 
       
   315 		((((u64)(data)) & 0x0000000000000007ull) << 3));
       
   316 }
       
   317 static inline u8 get_psif_qp_core__retry_tag_err(volatile struct psif_qp_core *ptr)
       
   318 {
       
   319 	/* group=0 shift=3 bits=3 */
       
   320 	volatile __be64 *const pte = (__be64 *)ptr;
       
   321 	return((u8)((be64toh(pte[0]) >> 3) & 0x0000000000000007ull));
       
   322 }
       
   323 
       
   324 /*
       
   325  * Error retry counter initial value. Read by tsu_dma and used by tsu_cmpl to
       
   326  * calculate exp_backoff etc..
       
   327  */
       
   328 static inline void set_psif_qp_core__error_retry_init(volatile struct psif_qp_core *ptr, u8 data)
       
   329 {
       
   330 	/* group=0 shift=32 bits=3 */
       
   331 	volatile __be64 *const pte = (__be64 *)ptr;
       
   332 	pte[0] = htobe64((be64toh(pte[0]) & 0xfffffff8ffffffffull) | 
       
   333 		((((u64)(data)) & 0x0000000000000007ull) << 32));
       
   334 }
       
   335 static inline u8 get_psif_qp_core__error_retry_init(volatile struct psif_qp_core *ptr)
       
   336 {
       
   337 	/* group=0 shift=32 bits=3 */
       
   338 	volatile __be64 *const pte = (__be64 *)ptr;
       
   339 	return((u8)((be64toh(pte[0]) >> 32) & 0x0000000000000007ull));
       
   340 }
       
   341 
       
   342 /*
       
   343  * Retry counter associated with retries to received NAK or implied NAK. If
       
   344  * it expires, a path migration will be attempted if it is armed, or the QP
       
   345  * will go to error state. Read by tsu_dma and used by tsu_cmpl.
       
   346  */
       
   347 static inline void set_psif_qp_core__error_retry_count(volatile struct psif_qp_core *ptr, u8 data)
       
   348 {
       
   349 	/* group=0 shift=35 bits=3 */
       
   350 	volatile __be64 *const pte = (__be64 *)ptr;
       
   351 	pte[0] = htobe64((be64toh(pte[0]) & 0xffffffc7ffffffffull) | 
       
   352 		((((u64)(data)) & 0x0000000000000007ull) << 35));
       
   353 }
       
   354 static inline u8 get_psif_qp_core__error_retry_count(volatile struct psif_qp_core *ptr)
       
   355 {
       
   356 	/* group=0 shift=35 bits=3 */
       
   357 	volatile __be64 *const pte = (__be64 *)ptr;
       
   358 	return((u8)((be64toh(pte[0]) >> 35) & 0x0000000000000007ull));
       
   359 }
       
   360 
       
   361 /* A hit in the set locally spun out of tsu_cmpl is found. */
       
   362 static inline void set_psif_qp_core__spin_hit(volatile struct psif_qp_core *ptr, u8 data)
       
   363 {
       
   364 	/* group=0 shift=39 bits=1 */
       
   365 	volatile __be64 *const pte = (__be64 *)ptr;
       
   366 	pte[0] = htobe64((be64toh(pte[0]) & 0xffffff7fffffffffull) | 
       
   367 		((((u64)(data)) & 0x0000000000000001ull) << 39));
       
   368 }
       
   369 static inline u8 get_psif_qp_core__spin_hit(volatile struct psif_qp_core *ptr)
       
   370 {
       
   371 	/* group=0 shift=39 bits=1 */
       
   372 	volatile __be64 *const pte = (__be64 *)ptr;
       
   373 	return((u8)((be64toh(pte[0]) >> 39) & 0x0000000000000001ull));
       
   374 }
       
   375 
       
   376 /*
       
   377  * Minium RNR NAK timeout. This is added to RNR NAK packets and the requester
       
   378  * receiving the RNR NAK must wait until the timer has expired before the
       
   379  * retry is sent.
       
   380  */
       
   381 static inline void set_psif_qp_core__min_rnr_nak_time(volatile struct psif_qp_core *ptr, u8 data)
       
   382 {
       
   383 	/* group=1 shift=0 bits=5 */
       
   384 	volatile __be64 *const pte = (__be64 *)ptr;
       
   385 	pte[1] = htobe64((be64toh(pte[1]) & 0xffffffffffffffe0ull) | 
       
   386 		((((u64)(data)) & 0x000000000000001full) << 0));
       
   387 }
       
   388 static inline u8 get_psif_qp_core__min_rnr_nak_time(volatile struct psif_qp_core *ptr)
       
   389 {
       
   390 	/* group=1 shift=0 bits=5 */
       
   391 	volatile __be64 *const pte = (__be64 *)ptr;
       
   392 	return((u8)((be64toh(pte[1]) >> 0) & 0x000000000000001full));
       
   393 }
       
   394 
       
   395 /* QP State for this QP. */
       
   396 static inline void set_psif_qp_core__state(volatile struct psif_qp_core *ptr, enum psif_qp_state data)
       
   397 {
       
   398 	/* group=1 shift=5 bits=3 */
       
   399 	volatile __be64 *const pte = (__be64 *)ptr;
       
   400 	pte[1] = htobe64((be64toh(pte[1]) & 0xffffffffffffff1full) | 
       
   401 		((((u64)(data)) & 0x0000000000000007ull) << 5));
       
   402 }
       
   403 static inline enum psif_qp_state get_psif_qp_core__state(volatile struct psif_qp_core *ptr)
       
   404 {
       
   405 	/* group=1 shift=5 bits=3 */
       
   406 	volatile __be64 *const pte = (__be64 *)ptr;
       
   407 	return((enum psif_qp_state)((be64toh(pte[1]) >> 5) & 0x0000000000000007ull));
       
   408 }
       
   409 
       
   410 /* QP number for the remote node. */
       
   411 static inline void set_psif_qp_core__remote_qp(volatile struct psif_qp_core *ptr, u32 data)
       
   412 {
       
   413 	/* group=1 shift=8 bits=24 */
       
   414 	volatile __be64 *const pte = (__be64 *)ptr;
       
   415 	pte[1] = htobe64((be64toh(pte[1]) & 0xffffffff000000ffull) | 
       
   416 		((((u64)(data)) & 0x0000000000ffffffull) << 8));
       
   417 }
       
   418 static inline u32 get_psif_qp_core__remote_qp(volatile struct psif_qp_core *ptr)
       
   419 {
       
   420 	/* group=1 shift=8 bits=24 */
       
   421 	volatile __be64 *const pte = (__be64 *)ptr;
       
   422 	return((u32)((be64toh(pte[1]) >> 8) & 0x0000000000ffffffull));
       
   423 }
       
   424 
       
   425 static inline void set_psif_qp_core__retry_sq_seq(volatile struct psif_qp_core *ptr, u16 data)
       
   426 {
       
   427 	/* group=2 shift=32 bits=16 */
       
   428 	volatile __be64 *const pte = (__be64 *)ptr;
       
   429 	pte[2] = htobe64((be64toh(pte[2]) & 0xffff0000ffffffffull) | 
       
   430 		((((u64)(data)) & 0x000000000000ffffull) << 32));
       
   431 }
       
   432 static inline u16 get_psif_qp_core__retry_sq_seq(volatile struct psif_qp_core *ptr)
       
   433 {
       
   434 	/* group=2 shift=32 bits=16 */
       
   435 	volatile __be64 *const pte = (__be64 *)ptr;
       
   436 	return((u16)((be64toh(pte[2]) >> 32) & 0x000000000000ffffull));
       
   437 }
       
   438 
       
   439 static inline void set_psif_qp_core__sq_seq(volatile struct psif_qp_core *ptr, u16 data)
       
   440 {
       
   441 	/* group=2 shift=48 bits=16 */
       
   442 	volatile __be64 *const pte = (__be64 *)ptr;
       
   443 	pte[2] = htobe64((be64toh(pte[2]) & 0x0000ffffffffffffull) | 
       
   444 		((((u64)(data)) & 0x000000000000ffffull) << 48));
       
   445 }
       
   446 static inline u16 get_psif_qp_core__sq_seq(volatile struct psif_qp_core *ptr)
       
   447 {
       
   448 	/* group=2 shift=48 bits=16 */
       
   449 	volatile __be64 *const pte = (__be64 *)ptr;
       
   450 	return((u16)((be64toh(pte[2]) >> 48) & 0x000000000000ffffull));
       
   451 }
       
   452 
       
   453 /*
       
   454  * Magic number used to verify use of QP state. This is done by calculating a
       
   455  * checksum of the work request incorporating the magic number. This checksum
       
   456  * is checked against the checksum in the work request.
       
   457  */
       
   458 static inline void set_psif_qp_core__magic(volatile struct psif_qp_core *ptr, u32 data)
       
   459 {
       
   460 	/* group=3 shift=0 bits=32 */
       
   461 	volatile __be64 *const pte = (__be64 *)ptr;
       
   462 	pte[3] = htobe64((be64toh(pte[3]) & 0xffffffff00000000ull) | 
       
   463 		((((u64)(data)) & 0x00000000ffffffffull) << 0));
       
   464 }
       
   465 static inline u32 get_psif_qp_core__magic(volatile struct psif_qp_core *ptr)
       
   466 {
       
   467 	/* group=3 shift=0 bits=32 */
       
   468 	volatile __be64 *const pte = (__be64 *)ptr;
       
   469 	return((u32)((be64toh(pte[3]) >> 0) & 0x00000000ffffffffull));
       
   470 }
       
   471 
       
   472 /*
       
   473  * Q-Key received in incoming IB packet is checked towards this Q-Key. Q-Key
       
   474  * used on transmit if top bit of Q-Key in WR is set.
       
   475  */
       
   476 static inline void set_psif_qp_core__qkey(volatile struct psif_qp_core *ptr, u32 data)
       
   477 {
       
   478 	/* group=4 shift=0 bits=32 */
       
   479 	volatile __be64 *const pte = (__be64 *)ptr;
       
   480 	pte[4] = htobe64((be64toh(pte[4]) & 0xffffffff00000000ull) | 
       
   481 		((((u64)(data)) & 0x00000000ffffffffull) << 0));
       
   482 }
       
   483 static inline u32 get_psif_qp_core__qkey(volatile struct psif_qp_core *ptr)
       
   484 {
       
   485 	/* group=4 shift=0 bits=32 */
       
   486 	volatile __be64 *const pte = (__be64 *)ptr;
       
   487 	return((u32)((be64toh(pte[4]) >> 0) & 0x00000000ffffffffull));
       
   488 }
       
   489 
       
   490 /*
       
   491  * Sequence number of the last ACK received. Read and written by tsu_cmpl.
       
   492  * Used to verify that the received response packet is a valid response.
       
   493  */
       
   494 static inline void set_psif_qp_core__last_acked_psn(volatile struct psif_qp_core *ptr, u32 data)
       
   495 {
       
   496 	/* group=4 shift=40 bits=24 */
       
   497 	volatile __be64 *const pte = (__be64 *)ptr;
       
   498 	pte[4] = htobe64((be64toh(pte[4]) & 0x000000ffffffffffull) | 
       
   499 		((((u64)(data)) & 0x0000000000ffffffull) << 40));
       
   500 }
       
   501 static inline u32 get_psif_qp_core__last_acked_psn(volatile struct psif_qp_core *ptr)
       
   502 {
       
   503 	/* group=4 shift=40 bits=24 */
       
   504 	volatile __be64 *const pte = (__be64 *)ptr;
       
   505 	return((u32)((be64toh(pte[4]) >> 40) & 0x0000000000ffffffull));
       
   506 }
       
   507 
       
   508 /* Index to scatter element of in progress SEND. */
       
   509 static inline void set_psif_qp_core__scatter_indx(volatile struct psif_qp_core *ptr, u8 data)
       
   510 {
       
   511 	/* group=5 shift=32 bits=5 */
       
   512 	volatile __be64 *const pte = (__be64 *)ptr;
       
   513 	pte[5] = htobe64((be64toh(pte[5]) & 0xffffffe0ffffffffull) | 
       
   514 		((((u64)(data)) & 0x000000000000001full) << 32));
       
   515 }
       
   516 static inline u8 get_psif_qp_core__scatter_indx(volatile struct psif_qp_core *ptr)
       
   517 {
       
   518 	/* group=5 shift=32 bits=5 */
       
   519 	volatile __be64 *const pte = (__be64 *)ptr;
       
   520 	return((u8)((be64toh(pte[5]) >> 32) & 0x000000000000001full));
       
   521 }
       
   522 
       
   523 /*
       
   524  * Expected packet sequence number: Sequence number on next expected packet.
       
   525  */
       
   526 static inline void set_psif_qp_core__expected_psn(volatile struct psif_qp_core *ptr, u32 data)
       
   527 {
       
   528 	/* group=5 shift=40 bits=24 */
       
   529 	volatile __be64 *const pte = (__be64 *)ptr;
       
   530 	pte[5] = htobe64((be64toh(pte[5]) & 0x000000ffffffffffull) | 
       
   531 		((((u64)(data)) & 0x0000000000ffffffull) << 40));
       
   532 }
       
   533 static inline u32 get_psif_qp_core__expected_psn(volatile struct psif_qp_core *ptr)
       
   534 {
       
   535 	/* group=5 shift=40 bits=24 */
       
   536 	volatile __be64 *const pte = (__be64 *)ptr;
       
   537 	return((u32)((be64toh(pte[5]) >> 40) & 0x0000000000ffffffull));
       
   538 }
       
   539 
       
   540 /*
       
   541  * TSU quality of service level. Can take values indicating low latency and
       
   542  * high throughput. This is equivalent to high/low BAR when writing doorbells
       
   543  * to PSIF. The qosl bit in the doorbell request must match this bit in the
       
   544  * QP state, otherwise the QP must be put in error. This check only applies
       
   545  * to tsu_rqs.
       
   546  */
       
   547 static inline void set_psif_qp_core__qosl(volatile struct psif_qp_core *ptr, enum psif_tsu_qos data)
       
   548 {
       
   549 	/* group=6 shift=49 bits=1 */
       
   550 	volatile __be64 *const pte = (__be64 *)ptr;
       
   551 	pte[6] = htobe64((be64toh(pte[6]) & 0xfffdffffffffffffull) | 
       
   552 		((((u64)(data)) & 0x0000000000000001ull) << 49));
       
   553 }
       
   554 static inline enum psif_tsu_qos get_psif_qp_core__qosl(volatile struct psif_qp_core *ptr)
       
   555 {
       
   556 	/* group=6 shift=49 bits=1 */
       
   557 	volatile __be64 *const pte = (__be64 *)ptr;
       
   558 	return((enum psif_tsu_qos)((be64toh(pte[6]) >> 49) & 0x0000000000000001ull));
       
   559 }
       
   560 
       
   561 /*
       
   562  * Migration state (migrated, re-arm and armed). Since path migration is
       
   563  * handled by tsu_qps, this is controlled by tsu_qps. XXX: Should error
       
   564  * handler also be able to change the path?
       
   565  */
       
   566 static inline void set_psif_qp_core__mstate(volatile struct psif_qp_core *ptr, enum psif_migration data)
       
   567 {
       
   568 	/* group=6 shift=50 bits=2 */
       
   569 	volatile __be64 *const pte = (__be64 *)ptr;
       
   570 	pte[6] = htobe64((be64toh(pte[6]) & 0xfff3ffffffffffffull) | 
       
   571 		((((u64)(data)) & 0x0000000000000003ull) << 50));
       
   572 }
       
   573 static inline enum psif_migration get_psif_qp_core__mstate(volatile struct psif_qp_core *ptr)
       
   574 {
       
   575 	/* group=6 shift=50 bits=2 */
       
   576 	volatile __be64 *const pte = (__be64 *)ptr;
       
   577 	return((enum psif_migration)((be64toh(pte[6]) >> 50) & 0x0000000000000003ull));
       
   578 }
       
   579 
       
   580 /* This is an IB over IB QP. */
       
   581 static inline void set_psif_qp_core__ipoib_enable(volatile struct psif_qp_core *ptr, u8 data)
       
   582 {
       
   583 	/* group=6 shift=53 bits=1 */
       
   584 	volatile __be64 *const pte = (__be64 *)ptr;
       
   585 	pte[6] = htobe64((be64toh(pte[6]) & 0xffdfffffffffffffull) | 
       
   586 		((((u64)(data)) & 0x0000000000000001ull) << 53));
       
   587 }
       
   588 static inline u8 get_psif_qp_core__ipoib_enable(volatile struct psif_qp_core *ptr)
       
   589 {
       
   590 	/* group=6 shift=53 bits=1 */
       
   591 	volatile __be64 *const pte = (__be64 *)ptr;
       
   592 	return((u8)((be64toh(pte[6]) >> 53) & 0x0000000000000001ull));
       
   593 }
       
   594 
       
   595 /* IB defined capability enable for receiving Atomic operations. */
       
   596 static inline void set_psif_qp_core__atomic_enable(volatile struct psif_qp_core *ptr, u8 data)
       
   597 {
       
   598 	/* group=6 shift=61 bits=1 */
       
   599 	volatile __be64 *const pte = (__be64 *)ptr;
       
   600 	pte[6] = htobe64((be64toh(pte[6]) & 0xdfffffffffffffffull) | 
       
   601 		((((u64)(data)) & 0x0000000000000001ull) << 61));
       
   602 }
       
   603 static inline u8 get_psif_qp_core__atomic_enable(volatile struct psif_qp_core *ptr)
       
   604 {
       
   605 	/* group=6 shift=61 bits=1 */
       
   606 	volatile __be64 *const pte = (__be64 *)ptr;
       
   607 	return((u8)((be64toh(pte[6]) >> 61) & 0x0000000000000001ull));
       
   608 }
       
   609 
       
   610 /* IB defined capability enable for receiving RDMA WR. */
       
   611 static inline void set_psif_qp_core__rdma_wr_enable(volatile struct psif_qp_core *ptr, u8 data)
       
   612 {
       
   613 	/* group=6 shift=62 bits=1 */
       
   614 	volatile __be64 *const pte = (__be64 *)ptr;
       
   615 	pte[6] = htobe64((be64toh(pte[6]) & 0xbfffffffffffffffull) | 
       
   616 		((((u64)(data)) & 0x0000000000000001ull) << 62));
       
   617 }
       
   618 static inline u8 get_psif_qp_core__rdma_wr_enable(volatile struct psif_qp_core *ptr)
       
   619 {
       
   620 	/* group=6 shift=62 bits=1 */
       
   621 	volatile __be64 *const pte = (__be64 *)ptr;
       
   622 	return((u8)((be64toh(pte[6]) >> 62) & 0x0000000000000001ull));
       
   623 }
       
   624 
       
   625 /* IB defined capability enable for receiving RDMA RD. */
       
   626 static inline void set_psif_qp_core__rdma_rd_enable(volatile struct psif_qp_core *ptr, u8 data)
       
   627 {
       
   628 	/* group=6 shift=63 bits=1 */
       
   629 	volatile __be64 *const pte = (__be64 *)ptr;
       
   630 	pte[6] = htobe64((be64toh(pte[6]) & 0x7fffffffffffffffull) | 
       
   631 		((((u64)(data)) & 0x0000000000000001ull) << 63));
       
   632 }
       
   633 static inline u8 get_psif_qp_core__rdma_rd_enable(volatile struct psif_qp_core *ptr)
       
   634 {
       
   635 	/* group=6 shift=63 bits=1 */
       
   636 	volatile __be64 *const pte = (__be64 *)ptr;
       
   637 	return((u8)((be64toh(pte[6]) >> 63) & 0x0000000000000001ull));
       
   638 }
       
   639 
       
   640 /*
       
   641  * Transmit packet sequence number. Read and updated by tsu_dma before
       
   642  * sending packets to tsu_ibpb and tsu_cmpl.
       
   643  */
       
   644 static inline void set_psif_qp_core__xmit_psn(volatile struct psif_qp_core *ptr, u32 data)
       
   645 {
       
   646 	/* group=7 shift=0 bits=24 */
       
   647 	volatile __be64 *const pte = (__be64 *)ptr;
       
   648 	pte[7] = htobe64((be64toh(pte[7]) & 0xffffffffff000000ull) | 
       
   649 		((((u64)(data)) & 0x0000000000ffffffull) << 0));
       
   650 }
       
   651 static inline u32 get_psif_qp_core__xmit_psn(volatile struct psif_qp_core *ptr)
       
   652 {
       
   653 	/* group=7 shift=0 bits=24 */
       
   654 	volatile __be64 *const pte = (__be64 *)ptr;
       
   655 	return((u32)((be64toh(pte[7]) >> 0) & 0x0000000000ffffffull));
       
   656 }
       
   657 
       
   658 /*
       
   659  * TSU Service Level used to decide the TSU VL for requests associated with
       
   660  * this QP.
       
   661  */
       
   662 static inline void set_psif_qp_core__tsl(volatile struct psif_qp_core *ptr, u8 data)
       
   663 {
       
   664 	/* group=7 shift=55 bits=4 */
       
   665 	volatile __be64 *const pte = (__be64 *)ptr;
       
   666 	pte[7] = htobe64((be64toh(pte[7]) & 0xf87fffffffffffffull) | 
       
   667 		((((u64)(data)) & 0x000000000000000full) << 55));
       
   668 }
       
   669 static inline u8 get_psif_qp_core__tsl(volatile struct psif_qp_core *ptr)
       
   670 {
       
   671 	/* group=7 shift=55 bits=4 */
       
   672 	volatile __be64 *const pte = (__be64 *)ptr;
       
   673 	return((u8)((be64toh(pte[7]) >> 55) & 0x000000000000000full));
       
   674 }
       
   675 
       
   676 /*
       
   677  * Maximum number of outstanding read or atomic requests allowed by the
       
   678  * remote HCA. Initialized by software.
       
   679  */
       
   680 static inline void set_psif_qp_core__max_outstanding(volatile struct psif_qp_core *ptr, u8 data)
       
   681 {
       
   682 	/* group=7 shift=59 bits=5 */
       
   683 	volatile __be64 *const pte = (__be64 *)ptr;
       
   684 	pte[7] = htobe64((be64toh(pte[7]) & 0x07ffffffffffffffull) | 
       
   685 		((((u64)(data)) & 0x000000000000001full) << 59));
       
   686 }
       
   687 static inline u8 get_psif_qp_core__max_outstanding(volatile struct psif_qp_core *ptr)
       
   688 {
       
   689 	/* group=7 shift=59 bits=5 */
       
   690 	volatile __be64 *const pte = (__be64 *)ptr;
       
   691 	return((u8)((be64toh(pte[7]) >> 59) & 0x000000000000001full));
       
   692 }
       
   693 
       
   694 /* Send Queue RNR retry count initialization value. */
       
   695 static inline void set_psif_qp_core__rnr_retry_init(volatile struct psif_qp_core *ptr, u8 data)
       
   696 {
       
   697 	/* group=8 shift=32 bits=3 */
       
   698 	volatile __be64 *const pte = (__be64 *)ptr;
       
   699 	pte[8] = htobe64((be64toh(pte[8]) & 0xfffffff8ffffffffull) | 
       
   700 		((((u64)(data)) & 0x0000000000000007ull) << 32));
       
   701 }
       
   702 static inline u8 get_psif_qp_core__rnr_retry_init(volatile struct psif_qp_core *ptr)
       
   703 {
       
   704 	/* group=8 shift=32 bits=3 */
       
   705 	volatile __be64 *const pte = (__be64 *)ptr;
       
   706 	return((u8)((be64toh(pte[8]) >> 32) & 0x0000000000000007ull));
       
   707 }
       
   708 
       
   709 /*
       
   710  * Retry counter associated with RNR NAK retries. If it expires, a path
       
   711  * migration will be attempted if it is armed, or the QP will go to error
       
   712  * state.
       
   713  */
       
   714 static inline void set_psif_qp_core__rnr_retry_count(volatile struct psif_qp_core *ptr, u8 data)
       
   715 {
       
   716 	/* group=8 shift=35 bits=3 */
       
   717 	volatile __be64 *const pte = (__be64 *)ptr;
       
   718 	pte[8] = htobe64((be64toh(pte[8]) & 0xffffffc7ffffffffull) | 
       
   719 		((((u64)(data)) & 0x0000000000000007ull) << 35));
       
   720 }
       
   721 static inline u8 get_psif_qp_core__rnr_retry_count(volatile struct psif_qp_core *ptr)
       
   722 {
       
   723 	/* group=8 shift=35 bits=3 */
       
   724 	volatile __be64 *const pte = (__be64 *)ptr;
       
   725 	return((u8)((be64toh(pte[8]) >> 35) & 0x0000000000000007ull));
       
   726 }
       
   727 
       
   728 /*
       
   729  * When set, RQS should only check that the orig_checksum is equal to magic
       
   730  * number. When not set, RQS should perform the checksum check towards the
       
   731  * checksum in the psif_wr.
       
   732  */
       
   733 static inline void set_psif_qp_core__no_checksum(volatile struct psif_qp_core *ptr, u8 data)
       
   734 {
       
   735 	/* group=8 shift=39 bits=1 */
       
   736 	volatile __be64 *const pte = (__be64 *)ptr;
       
   737 	pte[8] = htobe64((be64toh(pte[8]) & 0xffffff7fffffffffull) | 
       
   738 		((((u64)(data)) & 0x0000000000000001ull) << 39));
       
   739 }
       
   740 static inline u8 get_psif_qp_core__no_checksum(volatile struct psif_qp_core *ptr)
       
   741 {
       
   742 	/* group=8 shift=39 bits=1 */
       
   743 	volatile __be64 *const pte = (__be64 *)ptr;
       
   744 	return((u8)((be64toh(pte[8]) >> 39) & 0x0000000000000001ull));
       
   745 }
       
   746 
       
   747 /*
       
   748  * Transport type of the QP (RC, UC, UD, XRC, MANSP1). MANSP1 is set for
       
   749  * privileged QPs.
       
   750  */
       
   751 static inline void set_psif_qp_core__transport_type(volatile struct psif_qp_core *ptr, enum psif_qp_trans data)
       
   752 {
       
   753 	/* group=9 shift=0 bits=3 */
       
   754 	volatile __be64 *const pte = (__be64 *)ptr;
       
   755 	pte[9] = htobe64((be64toh(pte[9]) & 0xfffffffffffffff8ull) | 
       
   756 		((((u64)(data)) & 0x0000000000000007ull) << 0));
       
   757 }
       
   758 static inline enum psif_qp_trans get_psif_qp_core__transport_type(volatile struct psif_qp_core *ptr)
       
   759 {
       
   760 	/* group=9 shift=0 bits=3 */
       
   761 	volatile __be64 *const pte = (__be64 *)ptr;
       
   762 	return((enum psif_qp_trans)((be64toh(pte[9]) >> 0) & 0x0000000000000007ull));
       
   763 }
       
   764 
       
   765 /*
       
   766  * Number of bytes received of in progress RDMA Write or SEND. The data
       
   767  * received for SENDs and RDMA WR w/Imm are needed for completions. This
       
   768  * should be added to the msg_length.
       
   769  */
       
   770 static inline void set_psif_qp_core__bytes_received(volatile struct psif_qp_core *ptr, u32 data)
       
   771 {
       
   772 	/* group=9 shift=32 bits=32 */
       
   773 	volatile __be64 *const pte = (__be64 *)ptr;
       
   774 	pte[9] = htobe64((be64toh(pte[9]) & 0x00000000ffffffffull) | 
       
   775 		((((u64)(data)) & 0x00000000ffffffffull) << 32));
       
   776 }
       
   777 static inline u32 get_psif_qp_core__bytes_received(volatile struct psif_qp_core *ptr)
       
   778 {
       
   779 	/* group=9 shift=32 bits=32 */
       
   780 	volatile __be64 *const pte = (__be64 *)ptr;
       
   781 	return((u32)((be64toh(pte[9]) >> 32) & 0x00000000ffffffffull));
       
   782 }
       
   783 
       
   784 /* This QP is running IP over IB. */
       
   785 static inline void set_psif_qp_core__ipoib(volatile struct psif_qp_core *ptr, u8 data)
       
   786 {
       
   787 	/* group=10 shift=5 bits=1 */
       
   788 	volatile __be64 *const pte = (__be64 *)ptr;
       
   789 	pte[10] = htobe64((be64toh(pte[10]) & 0xffffffffffffffdfull) | 
       
   790 		((((u64)(data)) & 0x0000000000000001ull) << 5));
       
   791 }
       
   792 static inline u8 get_psif_qp_core__ipoib(volatile struct psif_qp_core *ptr)
       
   793 {
       
   794 	/* group=10 shift=5 bits=1 */
       
   795 	volatile __be64 *const pte = (__be64 *)ptr;
       
   796 	return((u8)((be64toh(pte[10]) >> 5) & 0x0000000000000001ull));
       
   797 }
       
   798 
       
   799 /*
       
   800  * Combined 'Last Received MSN' and 'Last Outstanding MSN', used to maintain
       
   801  * 'spin set floor' and indicate 'all retries completed', respectively.
       
   802  */
       
   803 static inline void set_psif_qp_core__last_received_outstanding_msn(volatile struct psif_qp_core *ptr, u16 data)
       
   804 {
       
   805 	/* group=11 shift=0 bits=16 */
       
   806 	volatile __be64 *const pte = (__be64 *)ptr;
       
   807 	pte[11] = htobe64((be64toh(pte[11]) & 0xffffffffffff0000ull) | 
       
   808 		((((u64)(data)) & 0x000000000000ffffull) << 0));
       
   809 }
       
   810 static inline u16 get_psif_qp_core__last_received_outstanding_msn(volatile struct psif_qp_core *ptr)
       
   811 {
       
   812 	/* group=11 shift=0 bits=16 */
       
   813 	volatile __be64 *const pte = (__be64 *)ptr;
       
   814 	return((u16)((be64toh(pte[11]) >> 0) & 0x000000000000ffffull));
       
   815 }
       
   816 
       
   817 static inline void set_psif_qp_core__path_mtu(volatile struct psif_qp_core *ptr, enum psif_path_mtu data)
       
   818 {
       
   819 	/* group=13 shift=4 bits=3 */
       
   820 	volatile __be64 *const pte = (__be64 *)ptr;
       
   821 	pte[13] = htobe64((be64toh(pte[13]) & 0xffffffffffffff8full) | 
       
   822 		((((u64)(data)) & 0x0000000000000007ull) << 4));
       
   823 }
       
   824 static inline enum psif_path_mtu get_psif_qp_core__path_mtu(volatile struct psif_qp_core *ptr)
       
   825 {
       
   826 	/* group=13 shift=4 bits=3 */
       
   827 	volatile __be64 *const pte = (__be64 *)ptr;
       
   828 	return((enum psif_path_mtu)((be64toh(pte[13]) >> 4) & 0x0000000000000007ull));
       
   829 }
       
   830 
       
   831 /* This PSN is committed - ACKs sent will contain this PSN. */
       
   832 static inline void set_psif_qp_core__committed_received_psn(volatile struct psif_qp_core *ptr, u32 data)
       
   833 {
       
   834 	/* group=13 shift=8 bits=24 */
       
   835 	volatile __be64 *const pte = (__be64 *)ptr;
       
   836 	pte[13] = htobe64((be64toh(pte[13]) & 0xffffffff000000ffull) | 
       
   837 		((((u64)(data)) & 0x0000000000ffffffull) << 8));
       
   838 }
       
   839 static inline u32 get_psif_qp_core__committed_received_psn(volatile struct psif_qp_core *ptr)
       
   840 {
       
   841 	/* group=13 shift=8 bits=24 */
       
   842 	volatile __be64 *const pte = (__be64 *)ptr;
       
   843 	return((u32)((be64toh(pte[13]) >> 8) & 0x0000000000ffffffull));
       
   844 }
       
   845 
       
   846 /*
       
   847  * Message sequence number used in AETH when sending ACKs. The number is
       
   848  * incremented every time a new inbound message is processed.
       
   849  */
       
   850 static inline void set_psif_qp_core__msn(volatile struct psif_qp_core *ptr, u32 data)
       
   851 {
       
   852 	/* group=14 shift=0 bits=24 */
       
   853 	volatile __be64 *const pte = (__be64 *)ptr;
       
   854 	pte[14] = htobe64((be64toh(pte[14]) & 0xffffffffff000000ull) | 
       
   855 		((((u64)(data)) & 0x0000000000ffffffull) << 0));
       
   856 }
       
   857 static inline u32 get_psif_qp_core__msn(volatile struct psif_qp_core *ptr)
       
   858 {
       
   859 	/* group=14 shift=0 bits=24 */
       
   860 	volatile __be64 *const pte = (__be64 *)ptr;
       
   861 	return((u32)((be64toh(pte[14]) >> 0) & 0x0000000000ffffffull));
       
   862 }
       
   863 
       
   864 /*
       
   865  * This is an index to send completion queue descriptor. The descriptor
       
   866  * points to a send completion queue, which may or may not be the same as the
       
   867  * send completion queue.
       
   868  */
       
   869 static inline void set_psif_qp_core__send_cq_indx(volatile struct psif_qp_core *ptr, u32 data)
       
   870 {
       
   871 	/* group=14 shift=24 bits=24 */
       
   872 	volatile __be64 *const pte = (__be64 *)ptr;
       
   873 	pte[14] = htobe64((be64toh(pte[14]) & 0xffff000000ffffffull) | 
       
   874 		((((u64)(data)) & 0x0000000000ffffffull) << 24));
       
   875 }
       
   876 static inline u32 get_psif_qp_core__send_cq_indx(volatile struct psif_qp_core *ptr)
       
   877 {
       
   878 	/* group=14 shift=24 bits=24 */
       
   879 	volatile __be64 *const pte = (__be64 *)ptr;
       
   880 	return((u32)((be64toh(pte[14]) >> 24) & 0x0000000000ffffffull));
       
   881 }
       
   882 
       
   883 /*
       
   884  * Committed MSN - the MSN of the newest committed request for this QP. Only
       
   885  * the bottom 16 bits of the MSN is used.
       
   886  */
       
   887 static inline void set_psif_qp_core__last_committed_msn(volatile struct psif_qp_core *ptr, u16 data)
       
   888 {
       
   889 	/* group=14 shift=48 bits=16 */
       
   890 	volatile __be64 *const pte = (__be64 *)ptr;
       
   891 	pte[14] = htobe64((be64toh(pte[14]) & 0x0000ffffffffffffull) | 
       
   892 		((((u64)(data)) & 0x000000000000ffffull) << 48));
       
   893 }
       
   894 static inline u16 get_psif_qp_core__last_committed_msn(volatile struct psif_qp_core *ptr)
       
   895 {
       
   896 	/* group=14 shift=48 bits=16 */
       
   897 	volatile __be64 *const pte = (__be64 *)ptr;
       
   898 	return((u16)((be64toh(pte[14]) >> 48) & 0x000000000000ffffull));
       
   899 }
       
   900 
       
   901 static inline void set_psif_qp_core__srq_pd(volatile struct psif_qp_core *ptr, u32 data)
       
   902 {
       
   903 	/* group=15 shift=0 bits=24 */
       
   904 	volatile __be64 *const pte = (__be64 *)ptr;
       
   905 	pte[15] = htobe64((be64toh(pte[15]) & 0xffffffffff000000ull) | 
       
   906 		((((u64)(data)) & 0x0000000000ffffffull) << 0));
       
   907 }
       
   908 static inline u32 get_psif_qp_core__srq_pd(volatile struct psif_qp_core *ptr)
       
   909 {
       
   910 	/* group=15 shift=0 bits=24 */
       
   911 	volatile __be64 *const pte = (__be64 *)ptr;
       
   912 	return((u32)((be64toh(pte[15]) >> 0) & 0x0000000000ffffffull));
       
   913 }
       
   914 
       
   915 static inline void set_psif_qp_path__remote_gid_0(volatile struct psif_qp_path *ptr, u64 data)
       
   916 {
       
   917 	/* group=0 shift=0 bits=64 */
       
   918 	volatile __be64 *const pte = (__be64 *)ptr;
       
   919 	pte[0] = htobe64((__be64)data);
       
   920 }
       
   921 static inline u64 get_psif_qp_path__remote_gid_0(volatile struct psif_qp_path *ptr)
       
   922 {
       
   923 	/* group=0 shift=0 bits=64 */
       
   924 	volatile __be64 *const pte = (__be64 *)ptr;
       
   925 	return((u64)be64toh(pte[0]));
       
   926 }
       
   927 
       
   928 static inline void set_psif_qp_path__remote_gid_1(volatile struct psif_qp_path *ptr, u64 data)
       
   929 {
       
   930 	/* group=1 shift=0 bits=64 */
       
   931 	volatile __be64 *const pte = (__be64 *)ptr;
       
   932 	pte[1] = htobe64((__be64)data);
       
   933 }
       
   934 static inline u64 get_psif_qp_path__remote_gid_1(volatile struct psif_qp_path *ptr)
       
   935 {
       
   936 	/* group=1 shift=0 bits=64 */
       
   937 	volatile __be64 *const pte = (__be64 *)ptr;
       
   938 	return((u64)be64toh(pte[1]));
       
   939 }
       
   940 
       
   941 static inline void set_psif_qp_path__remote_lid(volatile struct psif_qp_path *ptr, u16 data)
       
   942 {
       
   943 	/* group=2 shift=0 bits=16 */
       
   944 	volatile __be64 *const pte = (__be64 *)ptr;
       
   945 	pte[2] = htobe64((be64toh(pte[2]) & 0xffffffffffff0000ull) | 
       
   946 		((((u64)(data)) & 0x000000000000ffffull) << 0));
       
   947 }
       
   948 static inline u16 get_psif_qp_path__remote_lid(volatile struct psif_qp_path *ptr)
       
   949 {
       
   950 	/* group=2 shift=0 bits=16 */
       
   951 	volatile __be64 *const pte = (__be64 *)ptr;
       
   952 	return((u16)((be64toh(pte[2]) >> 0) & 0x000000000000ffffull));
       
   953 }
       
   954 
       
   955 static inline void set_psif_qp_path__port(volatile struct psif_qp_path *ptr, enum psif_port data)
       
   956 {
       
   957 	/* group=2 shift=17 bits=1 */
       
   958 	volatile __be64 *const pte = (__be64 *)ptr;
       
   959 	pte[2] = htobe64((be64toh(pte[2]) & 0xfffffffffffdffffull) | 
       
   960 		((((u64)(data)) & 0x0000000000000001ull) << 17));
       
   961 }
       
   962 static inline enum psif_port get_psif_qp_path__port(volatile struct psif_qp_path *ptr)
       
   963 {
       
   964 	/* group=2 shift=17 bits=1 */
       
   965 	volatile __be64 *const pte = (__be64 *)ptr;
       
   966 	return((enum psif_port)((be64toh(pte[2]) >> 17) & 0x0000000000000001ull));
       
   967 }
       
   968 
       
   969 static inline void set_psif_qp_path__loopback(volatile struct psif_qp_path *ptr, enum psif_loopback data)
       
   970 {
       
   971 	/* group=2 shift=18 bits=1 */
       
   972 	volatile __be64 *const pte = (__be64 *)ptr;
       
   973 	pte[2] = htobe64((be64toh(pte[2]) & 0xfffffffffffbffffull) | 
       
   974 		((((u64)(data)) & 0x0000000000000001ull) << 18));
       
   975 }
       
   976 static inline enum psif_loopback get_psif_qp_path__loopback(volatile struct psif_qp_path *ptr)
       
   977 {
       
   978 	/* group=2 shift=18 bits=1 */
       
   979 	volatile __be64 *const pte = (__be64 *)ptr;
       
   980 	return((enum psif_loopback)((be64toh(pte[2]) >> 18) & 0x0000000000000001ull));
       
   981 }
       
   982 
       
   983 static inline void set_psif_qp_path__use_grh(volatile struct psif_qp_path *ptr, enum psif_use_grh data)
       
   984 {
       
   985 	/* group=2 shift=19 bits=1 */
       
   986 	volatile __be64 *const pte = (__be64 *)ptr;
       
   987 	pte[2] = htobe64((be64toh(pte[2]) & 0xfffffffffff7ffffull) | 
       
   988 		((((u64)(data)) & 0x0000000000000001ull) << 19));
       
   989 }
       
   990 static inline enum psif_use_grh get_psif_qp_path__use_grh(volatile struct psif_qp_path *ptr)
       
   991 {
       
   992 	/* group=2 shift=19 bits=1 */
       
   993 	volatile __be64 *const pte = (__be64 *)ptr;
       
   994 	return((enum psif_use_grh)((be64toh(pte[2]) >> 19) & 0x0000000000000001ull));
       
   995 }
       
   996 
       
   997 static inline void set_psif_qp_path__sl(volatile struct psif_qp_path *ptr, u8 data)
       
   998 {
       
   999 	/* group=2 shift=20 bits=4 */
       
  1000 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1001 	pte[2] = htobe64((be64toh(pte[2]) & 0xffffffffff0fffffull) | 
       
  1002 		((((u64)(data)) & 0x000000000000000full) << 20));
       
  1003 }
       
  1004 static inline u8 get_psif_qp_path__sl(volatile struct psif_qp_path *ptr)
       
  1005 {
       
  1006 	/* group=2 shift=20 bits=4 */
       
  1007 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1008 	return((u8)((be64toh(pte[2]) >> 20) & 0x000000000000000full));
       
  1009 }
       
  1010 
       
  1011 static inline void set_psif_qp_path__hoplmt(volatile struct psif_qp_path *ptr, u8 data)
       
  1012 {
       
  1013 	/* group=2 shift=28 bits=8 */
       
  1014 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1015 	pte[2] = htobe64((be64toh(pte[2]) & 0xfffffff00fffffffull) | 
       
  1016 		((((u64)(data)) & 0x00000000000000ffull) << 28));
       
  1017 }
       
  1018 static inline u8 get_psif_qp_path__hoplmt(volatile struct psif_qp_path *ptr)
       
  1019 {
       
  1020 	/* group=2 shift=28 bits=8 */
       
  1021 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1022 	return((u8)((be64toh(pte[2]) >> 28) & 0x00000000000000ffull));
       
  1023 }
       
  1024 
       
  1025 static inline void set_psif_qp_path__flowlabel(volatile struct psif_qp_path *ptr, u32 data)
       
  1026 {
       
  1027 	/* group=2 shift=44 bits=20 */
       
  1028 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1029 	pte[2] = htobe64((be64toh(pte[2]) & 0x00000fffffffffffull) | 
       
  1030 		((((u64)(data)) & 0x00000000000fffffull) << 44));
       
  1031 }
       
  1032 static inline u32 get_psif_qp_path__flowlabel(volatile struct psif_qp_path *ptr)
       
  1033 {
       
  1034 	/* group=2 shift=44 bits=20 */
       
  1035 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1036 	return((u32)((be64toh(pte[2]) >> 44) & 0x00000000000fffffull));
       
  1037 }
       
  1038 
       
  1039 static inline void set_psif_qp_path__local_ack_timeout(volatile struct psif_qp_path *ptr, u8 data)
       
  1040 {
       
  1041 	/* group=3 shift=27 bits=5 */
       
  1042 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1043 	pte[3] = htobe64((be64toh(pte[3]) & 0xffffffff07ffffffull) | 
       
  1044 		((((u64)(data)) & 0x000000000000001full) << 27));
       
  1045 }
       
  1046 static inline u8 get_psif_qp_path__local_ack_timeout(volatile struct psif_qp_path *ptr)
       
  1047 {
       
  1048 	/* group=3 shift=27 bits=5 */
       
  1049 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1050 	return((u8)((be64toh(pte[3]) >> 27) & 0x000000000000001full));
       
  1051 }
       
  1052 
       
  1053 static inline void set_psif_qp_path__ipd(volatile struct psif_qp_path *ptr, u8 data)
       
  1054 {
       
  1055 	/* group=3 shift=32 bits=8 */
       
  1056 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1057 	pte[3] = htobe64((be64toh(pte[3]) & 0xffffff00ffffffffull) | 
       
  1058 		((((u64)(data)) & 0x00000000000000ffull) << 32));
       
  1059 }
       
  1060 static inline u8 get_psif_qp_path__ipd(volatile struct psif_qp_path *ptr)
       
  1061 {
       
  1062 	/* group=3 shift=32 bits=8 */
       
  1063 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1064 	return((u8)((be64toh(pte[3]) >> 32) & 0x00000000000000ffull));
       
  1065 }
       
  1066 
       
  1067 /*
       
  1068  * This is the LID path bits. This is used by tsu_ibpb when generating the
       
  1069  * SLID in the packet, and it is used by tsu_rcv when checking the DLID.
       
  1070  */
       
  1071 static inline void set_psif_qp_path__local_lid_path(volatile struct psif_qp_path *ptr, u8 data)
       
  1072 {
       
  1073 	/* group=3 shift=48 bits=7 */
       
  1074 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1075 	pte[3] = htobe64((be64toh(pte[3]) & 0xff80ffffffffffffull) | 
       
  1076 		((((u64)(data)) & 0x000000000000007full) << 48));
       
  1077 }
       
  1078 static inline u8 get_psif_qp_path__local_lid_path(volatile struct psif_qp_path *ptr)
       
  1079 {
       
  1080 	/* group=3 shift=48 bits=7 */
       
  1081 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1082 	return((u8)((be64toh(pte[3]) >> 48) & 0x000000000000007full));
       
  1083 }
       
  1084 
       
  1085 static inline void set_psif_qp_path__pkey_indx(volatile struct psif_qp_path *ptr, u16 data)
       
  1086 {
       
  1087 	/* group=3 shift=55 bits=9 */
       
  1088 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1089 	pte[3] = htobe64((be64toh(pte[3]) & 0x007fffffffffffffull) | 
       
  1090 		((((u64)(data)) & 0x00000000000001ffull) << 55));
       
  1091 }
       
  1092 static inline u16 get_psif_qp_path__pkey_indx(volatile struct psif_qp_path *ptr)
       
  1093 {
       
  1094 	/* group=3 shift=55 bits=9 */
       
  1095 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1096 	return((u16)((be64toh(pte[3]) >> 55) & 0x00000000000001ffull));
       
  1097 }
       
  1098 
       
  1099 /* L-key state for this DMA validation entry */
       
  1100 static inline void set_psif_key__lkey_state(volatile struct psif_key *ptr, enum psif_dma_vt_key_states data)
       
  1101 {
       
  1102 	/* group=0 shift=60 bits=2 */
       
  1103 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1104 	pte[0] = htobe64((be64toh(pte[0]) & 0xcfffffffffffffffull) | 
       
  1105 		((((u64)(data)) & 0x0000000000000003ull) << 60));
       
  1106 }
       
  1107 static inline enum psif_dma_vt_key_states get_psif_key__lkey_state(volatile struct psif_key *ptr)
       
  1108 {
       
  1109 	/* group=0 shift=60 bits=2 */
       
  1110 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1111 	return((enum psif_dma_vt_key_states)((be64toh(pte[0]) >> 60) & 0x0000000000000003ull));
       
  1112 }
       
  1113 
       
  1114 /* R-key state for this DMA validation entry */
       
  1115 static inline void set_psif_key__rkey_state(volatile struct psif_key *ptr, enum psif_dma_vt_key_states data)
       
  1116 {
       
  1117 	/* group=0 shift=62 bits=2 */
       
  1118 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1119 	pte[0] = htobe64((be64toh(pte[0]) & 0x3fffffffffffffffull) | 
       
  1120 		((((u64)(data)) & 0x0000000000000003ull) << 62));
       
  1121 }
       
  1122 static inline enum psif_dma_vt_key_states get_psif_key__rkey_state(volatile struct psif_key *ptr)
       
  1123 {
       
  1124 	/* group=0 shift=62 bits=2 */
       
  1125 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1126 	return((enum psif_dma_vt_key_states)((be64toh(pte[0]) >> 62) & 0x0000000000000003ull));
       
  1127 }
       
  1128 
       
  1129 /* Length of memory region this validation entry is associated with. */
       
  1130 static inline void set_psif_key__length(volatile struct psif_key *ptr, u64 data)
       
  1131 {
       
  1132 	/* group=1 shift=0 bits=64 */
       
  1133 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1134 	pte[1] = htobe64((__be64)data);
       
  1135 }
       
  1136 static inline u64 get_psif_key__length(volatile struct psif_key *ptr)
       
  1137 {
       
  1138 	/* group=1 shift=0 bits=64 */
       
  1139 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1140 	return((u64)be64toh(pte[1]));
       
  1141 }
       
  1142 
       
  1143 static inline void set_psif_key__mmu_context(volatile struct psif_key *ptr, u64 data)
       
  1144 {
       
  1145 	/* group=2 shift=0 bits=64 */
       
  1146 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1147 	pte[2] = htobe64((__be64)data);
       
  1148 }
       
  1149 static inline u64 get_psif_key__mmu_context(volatile struct psif_key *ptr)
       
  1150 {
       
  1151 	/* group=2 shift=0 bits=64 */
       
  1152 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1153 	return((u64)be64toh(pte[2]));
       
  1154 }
       
  1155 
       
  1156 static inline void set_psif_key__base_addr(volatile struct psif_key *ptr, u64 data)
       
  1157 {
       
  1158 	/* group=3 shift=0 bits=64 */
       
  1159 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1160 	pte[3] = htobe64((__be64)data);
       
  1161 }
       
  1162 static inline u64 get_psif_key__base_addr(volatile struct psif_key *ptr)
       
  1163 {
       
  1164 	/* group=3 shift=0 bits=64 */
       
  1165 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1166 	return((u64)be64toh(pte[3]));
       
  1167 }
       
  1168 
       
  1169 /* sequence number for sanity checking */
       
  1170 static inline void set_psif_eq_entry__seq_num(volatile struct psif_eq_entry *ptr, u32 data)
       
  1171 {
       
  1172 	/* group=7 shift=0 bits=32 */
       
  1173 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1174 	pte[7] = htobe64((be64toh(pte[7]) & 0xffffffff00000000ull) | 
       
  1175 		((((u64)(data)) & 0x00000000ffffffffull) << 0));
       
  1176 }
       
  1177 static inline u32 get_psif_eq_entry__seq_num(volatile struct psif_eq_entry *ptr)
       
  1178 {
       
  1179 	/* group=7 shift=0 bits=32 */
       
  1180 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1181 	return((u32)((be64toh(pte[7]) >> 0) & 0x00000000ffffffffull));
       
  1182 }
       
  1183 
       
  1184 /* enum psif_epsc_csr_opcode from request */
       
  1185 static inline void set_psif_epsc_csr_rsp__opcode(volatile struct psif_epsc_csr_rsp *ptr, enum psif_epsc_csr_opcode data)
       
  1186 {
       
  1187 	/* group=0 shift=48 bits=8 */
       
  1188 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1189 	pte[0] = htobe64((be64toh(pte[0]) & 0xff00ffffffffffffull) | 
       
  1190 		((((u64)(data)) & 0x00000000000000ffull) << 48));
       
  1191 }
       
  1192 static inline enum psif_epsc_csr_opcode get_psif_epsc_csr_rsp__opcode(volatile struct psif_epsc_csr_rsp *ptr)
       
  1193 {
       
  1194 	/* group=0 shift=48 bits=8 */
       
  1195 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1196 	return((enum psif_epsc_csr_opcode)((be64toh(pte[0]) >> 48) & 0x00000000000000ffull));
       
  1197 }
       
  1198 
       
  1199 /* Sequence number from request */
       
  1200 static inline void set_psif_epsc_csr_rsp__seq_num(volatile struct psif_epsc_csr_rsp *ptr, u64 data)
       
  1201 {
       
  1202 	/* group=3 shift=0 bits=64 */
       
  1203 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1204 	pte[3] = htobe64((__be64)data);
       
  1205 }
       
  1206 static inline u64 get_psif_epsc_csr_rsp__seq_num(volatile struct psif_epsc_csr_rsp *ptr)
       
  1207 {
       
  1208 	/* group=3 shift=0 bits=64 */
       
  1209 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1210 	return((u64)be64toh(pte[3]));
       
  1211 }
       
  1212 
       
  1213 /* Sequence number - included in response */
       
  1214 static inline void set_psif_epsc_csr_req__seq_num(volatile struct psif_epsc_csr_req *ptr, u16 data)
       
  1215 {
       
  1216 	/* group=0 shift=32 bits=16 */
       
  1217 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1218 	pte[0] = htobe64((be64toh(pte[0]) & 0xffff0000ffffffffull) | 
       
  1219 		((((u64)(data)) & 0x000000000000ffffull) << 32));
       
  1220 }
       
  1221 static inline u16 get_psif_epsc_csr_req__seq_num(volatile struct psif_epsc_csr_req *ptr)
       
  1222 {
       
  1223 	/* group=0 shift=32 bits=16 */
       
  1224 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1225 	return((u16)((be64toh(pte[0]) >> 32) & 0x000000000000ffffull));
       
  1226 }
       
  1227 
       
  1228 static inline void set_psif_epsc_csr_req__opcode(volatile struct psif_epsc_csr_req *ptr, enum psif_epsc_csr_opcode data)
       
  1229 {
       
  1230 	/* group=0 shift=56 bits=8 */
       
  1231 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1232 	pte[0] = htobe64((be64toh(pte[0]) & 0x00ffffffffffffffull) | 
       
  1233 		((((u64)(data)) & 0x00000000000000ffull) << 56));
       
  1234 }
       
  1235 static inline enum psif_epsc_csr_opcode get_psif_epsc_csr_req__opcode(volatile struct psif_epsc_csr_req *ptr)
       
  1236 {
       
  1237 	/* group=0 shift=56 bits=8 */
       
  1238 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1239 	return((enum psif_epsc_csr_opcode)((be64toh(pte[0]) >> 56) & 0x00000000000000ffull));
       
  1240 }
       
  1241 
       
  1242 /* Index to completion elements added by SW. */
       
  1243 static inline void set_psif_cq_sw__head_indx(volatile struct psif_cq_sw *ptr, u32 data)
       
  1244 {
       
  1245 	/* group=0 shift=32 bits=32 */
       
  1246 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1247 	pte[0] = htobe64((be64toh(pte[0]) & 0x00000000ffffffffull) | 
       
  1248 		((((u64)(data)) & 0x00000000ffffffffull) << 32));
       
  1249 }
       
  1250 static inline u32 get_psif_cq_sw__head_indx(volatile struct psif_cq_sw *ptr)
       
  1251 {
       
  1252 	/* group=0 shift=32 bits=32 */
       
  1253 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1254 	return((u32)((be64toh(pte[0]) >> 32) & 0x00000000ffffffffull));
       
  1255 }
       
  1256 
       
  1257 /*
       
  1258  * EPS-A core number completions are forwarded to if the proxy_enabled bit is
       
  1259  * set.
       
  1260  */
       
  1261 static inline void set_psif_cq_hw__eps_core(volatile struct psif_cq_hw *ptr, enum psif_eps_a_core data)
       
  1262 {
       
  1263 	/* group=0 shift=52 bits=2 */
       
  1264 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1265 	pte[0] = htobe64((be64toh(pte[0]) & 0xffcfffffffffffffull) | 
       
  1266 		((((u64)(data)) & 0x0000000000000003ull) << 52));
       
  1267 }
       
  1268 static inline enum psif_eps_a_core get_psif_cq_hw__eps_core(volatile struct psif_cq_hw *ptr)
       
  1269 {
       
  1270 	/* group=0 shift=52 bits=2 */
       
  1271 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1272 	return((enum psif_eps_a_core)((be64toh(pte[0]) >> 52) & 0x0000000000000003ull));
       
  1273 }
       
  1274 
       
  1275 /*
       
  1276  * If set, this completion queue is proxy enabled and should send completions
       
  1277  * to EPS core indicated by the eps_core field.
       
  1278  */
       
  1279 static inline void set_psif_cq_hw__proxy_en(volatile struct psif_cq_hw *ptr, u8 data)
       
  1280 {
       
  1281 	/* group=0 shift=54 bits=1 */
       
  1282 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1283 	pte[0] = htobe64((be64toh(pte[0]) & 0xffbfffffffffffffull) | 
       
  1284 		((((u64)(data)) & 0x0000000000000001ull) << 54));
       
  1285 }
       
  1286 static inline u8 get_psif_cq_hw__proxy_en(volatile struct psif_cq_hw *ptr)
       
  1287 {
       
  1288 	/* group=0 shift=54 bits=1 */
       
  1289 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1290 	return((u8)((be64toh(pte[0]) >> 54) & 0x0000000000000001ull));
       
  1291 }
       
  1292 
       
  1293 /* The descriptor is valid. */
       
  1294 static inline void set_psif_cq_hw__valid(volatile struct psif_cq_hw *ptr, u8 data)
       
  1295 {
       
  1296 	/* group=0 shift=60 bits=1 */
       
  1297 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1298 	pte[0] = htobe64((be64toh(pte[0]) & 0xefffffffffffffffull) | 
       
  1299 		((((u64)(data)) & 0x0000000000000001ull) << 60));
       
  1300 }
       
  1301 static inline u8 get_psif_cq_hw__valid(volatile struct psif_cq_hw *ptr)
       
  1302 {
       
  1303 	/* group=0 shift=60 bits=1 */
       
  1304 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1305 	return((u8)((be64toh(pte[0]) >> 60) & 0x0000000000000001ull));
       
  1306 }
       
  1307 
       
  1308 /*
       
  1309  * VA or PA of the base of the completion queue. If PA the MMU context above
       
  1310  * will be a bypass context. Updated by software. The head and tail pointers
       
  1311  * can be calculated by the following calculations: Address = base_ptr +
       
  1312  * (head * ($bits(completion_entry_t)/8 ) Head Pointer and Tail Pointer will
       
  1313  * use the same MMU context as the base, and all need to be VA from one
       
  1314  * address space, or all need to be PA. In typical use, to allow direct user
       
  1315  * access to the head and tail pointer VAs are used.
       
  1316  */
       
  1317 static inline void set_psif_cq_hw__base_addr(volatile struct psif_cq_hw *ptr, u64 data)
       
  1318 {
       
  1319 	/* group=2 shift=0 bits=64 */
       
  1320 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1321 	pte[2] = htobe64((__be64)data);
       
  1322 }
       
  1323 static inline u64 get_psif_cq_hw__base_addr(volatile struct psif_cq_hw *ptr)
       
  1324 {
       
  1325 	/* group=2 shift=0 bits=64 */
       
  1326 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1327 	return((u64)be64toh(pte[2]));
       
  1328 }
       
  1329 
       
  1330 /* Index to completion elements to be consumed by HW. */
       
  1331 static inline void set_psif_cq_hw__tail_indx(volatile struct psif_cq_hw *ptr, u32 data)
       
  1332 {
       
  1333 	/* group=3 shift=32 bits=32 */
       
  1334 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1335 	pte[3] = htobe64((be64toh(pte[3]) & 0x00000000ffffffffull) | 
       
  1336 		((((u64)(data)) & 0x00000000ffffffffull) << 32));
       
  1337 }
       
  1338 static inline u32 get_psif_cq_hw__tail_indx(volatile struct psif_cq_hw *ptr)
       
  1339 {
       
  1340 	/* group=3 shift=32 bits=32 */
       
  1341 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1342 	return((u32)((be64toh(pte[3]) >> 32) & 0x00000000ffffffffull));
       
  1343 }
       
  1344 
       
  1345 /*
       
  1346  * Work queue completion ID. For receive completions this is the entry number
       
  1347  * in the receive queue and the receive queue descriptor index. For send
       
  1348  * completions this is the sq_sequence number.
       
  1349  */
       
  1350 static inline void set_psif_cq_entry__wc_id(volatile struct psif_cq_entry *ptr, u64 data)
       
  1351 {
       
  1352 	/* group=0 shift=0 bits=64 */
       
  1353 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1354 	pte[0] = htobe64((__be64)data);
       
  1355 }
       
  1356 static inline u64 get_psif_cq_entry__wc_id(volatile struct psif_cq_entry *ptr)
       
  1357 {
       
  1358 	/* group=0 shift=0 bits=64 */
       
  1359 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1360 	return((u64)be64toh(pte[0]));
       
  1361 }
       
  1362 
       
  1363 static inline void set_psif_cq_entry__qp(volatile struct psif_cq_entry *ptr, u32 data)
       
  1364 {
       
  1365 	/* group=1 shift=0 bits=24 */
       
  1366 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1367 	pte[1] = htobe64((be64toh(pte[1]) & 0xffffffffff000000ull) | 
       
  1368 		((((u64)(data)) & 0x0000000000ffffffull) << 0));
       
  1369 }
       
  1370 static inline u32 get_psif_cq_entry__qp(volatile struct psif_cq_entry *ptr)
       
  1371 {
       
  1372 	/* group=1 shift=0 bits=24 */
       
  1373 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1374 	return((u32)((be64toh(pte[1]) >> 0) & 0x0000000000ffffffull));
       
  1375 }
       
  1376 
       
  1377 static inline void set_psif_cq_entry__opcode(volatile struct psif_cq_entry *ptr, enum psif_wc_opcode data)
       
  1378 {
       
  1379 	/* group=1 shift=24 bits=8 */
       
  1380 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1381 	pte[1] = htobe64((be64toh(pte[1]) & 0xffffffff00ffffffull) | 
       
  1382 		((((u64)(data)) & 0x00000000000000ffull) << 24));
       
  1383 }
       
  1384 static inline enum psif_wc_opcode get_psif_cq_entry__opcode(volatile struct psif_cq_entry *ptr)
       
  1385 {
       
  1386 	/* group=1 shift=24 bits=8 */
       
  1387 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1388 	return((enum psif_wc_opcode)((be64toh(pte[1]) >> 24) & 0x00000000000000ffull));
       
  1389 }
       
  1390 
       
  1391 static inline void set_psif_cq_entry__status(volatile struct psif_cq_entry *ptr, enum psif_wc_status data)
       
  1392 {
       
  1393 	/* group=2 shift=24 bits=8 */
       
  1394 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1395 	pte[2] = htobe64((be64toh(pte[2]) & 0xffffffff00ffffffull) | 
       
  1396 		((((u64)(data)) & 0x00000000000000ffull) << 24));
       
  1397 }
       
  1398 static inline enum psif_wc_status get_psif_cq_entry__status(volatile struct psif_cq_entry *ptr)
       
  1399 {
       
  1400 	/* group=2 shift=24 bits=8 */
       
  1401 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1402 	return((enum psif_wc_status)((be64toh(pte[2]) >> 24) & 0x00000000000000ffull));
       
  1403 }
       
  1404 
       
  1405 /* sequence number for sanity checking */
       
  1406 static inline void set_psif_cq_entry__seq_num(volatile struct psif_cq_entry *ptr, u32 data)
       
  1407 {
       
  1408 	/* group=7 shift=0 bits=32 */
       
  1409 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1410 	pte[7] = htobe64((be64toh(pte[7]) & 0xffffffff00000000ull) | 
       
  1411 		((((u64)(data)) & 0x00000000ffffffffull) << 0));
       
  1412 }
       
  1413 static inline u32 get_psif_cq_entry__seq_num(volatile struct psif_cq_entry *ptr)
       
  1414 {
       
  1415 	/* group=7 shift=0 bits=32 */
       
  1416 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1417 	return((u32)((be64toh(pte[7]) >> 0) & 0x00000000ffffffffull));
       
  1418 }
       
  1419 
       
  1420 static inline void set_psif_ah__remote_lid(volatile struct psif_ah *ptr, u16 data)
       
  1421 {
       
  1422 	/* group=2 shift=0 bits=16 */
       
  1423 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1424 	pte[2] = htobe64((be64toh(pte[2]) & 0xffffffffffff0000ull) | 
       
  1425 		((((u64)(data)) & 0x000000000000ffffull) << 0));
       
  1426 }
       
  1427 static inline u16 get_psif_ah__remote_lid(volatile struct psif_ah *ptr)
       
  1428 {
       
  1429 	/* group=2 shift=0 bits=16 */
       
  1430 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1431 	return((u16)((be64toh(pte[2]) >> 0) & 0x000000000000ffffull));
       
  1432 }
       
  1433 
       
  1434 static inline void set_psif_ah__sl(volatile struct psif_ah *ptr, u8 data)
       
  1435 {
       
  1436 	/* group=2 shift=20 bits=4 */
       
  1437 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1438 	pte[2] = htobe64((be64toh(pte[2]) & 0xffffffffff0fffffull) | 
       
  1439 		((((u64)(data)) & 0x000000000000000full) << 20));
       
  1440 }
       
  1441 static inline u8 get_psif_ah__sl(volatile struct psif_ah *ptr)
       
  1442 {
       
  1443 	/* group=2 shift=20 bits=4 */
       
  1444 	volatile __be64 *const pte = (__be64 *)ptr;
       
  1445 	return((u8)((be64toh(pte[2]) >> 20) & 0x000000000000000full));
       
  1446 }
       
  1447 static inline void set_psif_csr_mmu_config__ta_upper_twelve(volatile void *ptr, u16 data)
       
  1448 {
       
  1449 	        /* group=0 shift=32 bits=12 */
       
  1450 	        volatile __be64 *pte = (__be64 *)ptr;
       
  1451 		        pte[0] = htobe64((be64toh(pte[0]) & 0xfffff000ffffffffull) |
       
  1452 			                    ((((u64)(data)) & 0x0000000000000fffull) << 32));
       
  1453 }
       
  1454 static inline void set_psif_csr_mmu_config__pa_upper_twelve(volatile void *ptr, u16 data)
       
  1455 {
       
  1456 	        /* group=0 shift=48 bits=12 */
       
  1457 	        volatile __be64 *pte = (__be64 *)ptr;
       
  1458 		        pte[0] = htobe64((be64toh(pte[0]) & 0xf000ffffffffffffull) |
       
  1459 			                    ((((u64)(data)) & 0x0000000000000fffull) << 48));
       
  1460 }
       
  1461 
       
  1462 #if defined (HOST_LITTLE_ENDIAN)
       
  1463 #elif defined (HOST_BIG_ENDIAN)
       
  1464 #else
       
  1465 #error "Could not determine byte order in psif_hw_setget.h !?"
       
  1466 #endif
       
  1467 
       
  1468 
       
  1469 #ifdef __cplusplus
       
  1470 }
       
  1471 #endif
       
  1472 
       
  1473 
       
  1474 #endif	/* _PSIF_HW_SETGET_H */