components/open-fabrics/libmlx4/patches/base.patch
changeset 817 f45ca7242301
parent 715 eed3ed08f692
child 1196 cba083182ade
equal deleted inserted replaced
816:5df727abb287 817:f45ca7242301
    22 +#endif
    22 +#endif
    23 +
    23 +
    24  #define MLX4_UVERBS_MIN_ABI_VERSION	2
    24  #define MLX4_UVERBS_MIN_ABI_VERSION	2
    25  #define MLX4_UVERBS_MAX_ABI_VERSION	3
    25  #define MLX4_UVERBS_MAX_ABI_VERSION	3
    26  
    26  
       
    27 @@ -49,6 +53,12 @@
       
    28  	struct ibv_alloc_pd_resp	ibv_resp;
       
    29  	__u32				pdn;
       
    30  	__u32				reserved;
       
    31 +};
       
    32 +
       
    33 +struct mlx4_share_pd_resp {
       
    34 +	struct ibv_share_pd_resp	ibv_resp;
       
    35 +	__u32				pdn;
       
    36 +	__u32				reserved;
       
    37  };
       
    38  
       
    39  struct mlx4_create_cq {
    27 diff -r -u /tmp/839450/libmlx4-1.0.1/src/verbs.c libmlx4-1.0.1/src/verbs.c
    40 diff -r -u /tmp/839450/libmlx4-1.0.1/src/verbs.c libmlx4-1.0.1/src/verbs.c
    28 --- /tmp/839450/libmlx4-1.0.1/src/verbs.c	Thu Mar 10 04:48:34 2011
    41 --- /tmp/839450/libmlx4-1.0.1/src/verbs.c	Thu Mar 10 04:48:34 2011
    29 +++ libmlx4-1.0.1/src/verbs.c	Fri Mar 11 14:40:18 2011
    42 +++ libmlx4-1.0.1/src/verbs.c	Fri Mar 11 14:40:18 2011
    30 @@ -56,6 +56,14 @@
    43 @@ -56,6 +56,14 @@
    31  	if (ret)
    44  	if (ret)
    50 +	mlnx_umap_pd_data_out_t   *mdd;
    63 +	mlnx_umap_pd_data_out_t   *mdd;
    51 +#endif
    64 +#endif
    52  
    65  
    53  	pd = malloc(sizeof *pd);
    66  	pd = malloc(sizeof *pd);
    54  	if (!pd)
    67  	if (!pd)
    55 @@ -90,7 +101,16 @@
    68 @@ -90,11 +101,67 @@
    56  		return NULL;
    69  		return NULL;
    57  	}
    70  	}
    58  
    71  
    59 +#if defined(__SVR4) && defined(__sun)
    72 +#if defined(__SVR4) && defined(__sun)
    60 +	/*
    73 +	/*
    67  	pd->pdn = resp.pdn;
    80  	pd->pdn = resp.pdn;
    68 +#endif
    81 +#endif
    69  
    82  
    70  	return &pd->ibv_pd;
    83  	return &pd->ibv_pd;
    71  }
    84  }
    72 @@ -168,6 +188,10 @@
    85  
       
    86 +struct ibv_shpd *mlx4_alloc_shpd(struct ibv_pd *pd, uint64_t share_key, struct ibv_shpd *shpd)
       
    87 +{
       
    88 +	struct ibv_alloc_shpd cmd;
       
    89 +	struct ibv_alloc_shpd_resp resp;
       
    90 +
       
    91 +	if (ibv_cmd_alloc_shpd(pd->context, pd, share_key, shpd, &cmd, sizeof cmd,
       
    92 +			     &resp, sizeof resp)) {
       
    93 +		return NULL;
       
    94 +	}
       
    95 +
       
    96 +	return shpd;
       
    97 +}
       
    98 +
       
    99 +
       
   100 +struct ibv_pd *mlx4_share_pd(struct ibv_context *context, struct ibv_shpd *shpd, uint64_t share_key)
       
   101 +{
       
   102 +	struct ibv_share_pd       cmd;
       
   103 +	struct mlx4_share_pd_resp resp;
       
   104 +	struct mlx4_pd		 *pd;
       
   105 +#if defined(__SVR4) && defined(__sun)
       
   106 +	mlnx_umap_pd_data_out_t   *mdd;
       
   107 +#endif
       
   108 +
       
   109 +	pd = malloc(sizeof *pd);
       
   110 +	if (!pd)
       
   111 +		return NULL;
       
   112 +
       
   113 +	if (ibv_cmd_share_pd(context, shpd, share_key, &pd->ibv_pd, &cmd, sizeof cmd,
       
   114 +			     &resp.ibv_resp, sizeof resp)) {
       
   115 +		free(pd);
       
   116 +		return NULL;
       
   117 +	}
       
   118 +
       
   119 +#if defined(__SVR4) && defined(__sun)
       
   120 +	/*
       
   121 +	 * kernel driver passes back the PD table index as opaque data.  This
       
   122 +	 * is required for specifying the PD in user space address vectors.
       
   123 +	 */
       
   124 +	mdd     = (mlnx_umap_pd_data_out_t *) &resp.ibv_resp.drv_out;
       
   125 +	pd->pdn = mdd->mpd_pdnum;
       
   126 +#else
       
   127 +	pd->pdn = resp.pdn;
       
   128 +#endif
       
   129 +
       
   130 +	return &pd->ibv_pd;
       
   131 +}
       
   132 +
       
   133  int mlx4_free_pd(struct ibv_pd *pd)
       
   134  {
       
   135  	int ret;
       
   136 @@ -138,6 +205,37 @@
       
   137  	return mr;
       
   138  }
       
   139  
       
   140 +struct ibv_mr *mlx4_reg_mr_relaxed(struct ibv_pd *pd, void *addr, size_t length,
       
   141 +			   int access)
       
   142 +{
       
   143 +	struct ibv_mr *mr;
       
   144 +	struct ibv_reg_mr cmd;
       
   145 +	int ret;
       
   146 +
       
   147 +	mr = malloc(sizeof *mr);
       
   148 +	if (!mr)
       
   149 +		return NULL;
       
   150 +
       
   151 +#ifdef IBV_CMD_REG_MR_RELAXED_HAS_RESP_PARAMS
       
   152 +	{
       
   153 +		struct ibv_reg_mr_resp resp;
       
   154 +
       
   155 +		ret = ibv_cmd_reg_mr_relaxed(pd, addr, length, (uintptr_t) addr,
       
   156 +				     access, mr, &cmd, sizeof cmd,
       
   157 +				     &resp, sizeof resp);
       
   158 +	}
       
   159 +#else
       
   160 +	ret = ibv_cmd_reg_mr_relaxed(pd, addr, length, (uintptr_t) addr, access, mr,
       
   161 +			     &cmd, sizeof cmd);
       
   162 +#endif
       
   163 +	if (ret) {
       
   164 +		free(mr);
       
   165 +		return NULL;
       
   166 +	}
       
   167 +
       
   168 +	return mr;
       
   169 +}
       
   170 +
       
   171  int mlx4_dereg_mr(struct ibv_mr *mr)
       
   172  {
       
   173  	int ret;
       
   174 @@ -150,6 +248,29 @@
       
   175  	return 0;
       
   176  }
       
   177  
       
   178 +int mlx4_dereg_mr_relaxed(struct ibv_mr *mr)
       
   179 +{
       
   180 +	int ret;
       
   181 +
       
   182 +	ret = ibv_cmd_dereg_mr_relaxed(mr);
       
   183 +	if (ret)
       
   184 +		return ret;
       
   185 +
       
   186 +	free(mr);
       
   187 +	return 0;
       
   188 +}
       
   189 +
       
   190 +int mlx4_flush_relaxed_mr(struct ibv_pd *pd)
       
   191 +{
       
   192 +	int ret;
       
   193 +
       
   194 +	ret = ibv_cmd_flush_relaxed_mr(pd);
       
   195 +	if (ret)
       
   196 +		return ret;
       
   197 +
       
   198 +	return 0;
       
   199 +}
       
   200 +
       
   201  static int align_queue_size(int req)
       
   202  {
       
   203  	int nent;
       
   204 @@ -168,6 +289,10 @@
    73  	struct mlx4_create_cq_resp resp;
   205  	struct mlx4_create_cq_resp resp;
    74  	struct mlx4_cq		  *cq;
   206  	struct mlx4_cq		  *cq;
    75  	int			   ret;
   207  	int			   ret;
    76 +#if defined(__SVR4) && defined(__sun)
   208 +#if defined(__SVR4) && defined(__sun)
    77 +	void                      *cqbuf;
   209 +	void                      *cqbuf;
    78 +	mlnx_umap_cq_data_out_t   *mdd;
   210 +	mlnx_umap_cq_data_out_t   *mdd;
    79 +#endif
   211 +#endif
    80  
   212  
    81  	/* Sanity check CQ size before proceeding */
   213  	/* Sanity check CQ size before proceeding */
    82  	if (cqe > 0x3fffff)
   214  	if (cqe > 0x3fffff)
    83 @@ -184,7 +208,8 @@
   215 @@ -184,7 +309,8 @@
    84  
   216  
    85  	cqe = align_queue_size(cqe + 1);
   217  	cqe = align_queue_size(cqe + 1);
    86  
   218  
    87 -	if (mlx4_alloc_cq_buf(to_mdev(context->device), &cq->buf, cqe))
   219 -	if (mlx4_alloc_cq_buf(to_mdev(context->device), &cq->buf, cqe))
    88 +#if !(defined(__SVR4) && defined(__sun))
   220 +#if !(defined(__SVR4) && defined(__sun))
    89 +	if (mlx4_alloc_cq_buf((to_mdev(context->device), &cq->buf, cqe))
   221 +	if (mlx4_alloc_cq_buf((to_mdev(context->device), &cq->buf, cqe))
    90  		goto err;
   222  		goto err;
    91  
   223  
    92  	cq->set_ci_db  = mlx4_alloc_db(to_mctx(context), MLX4_DB_TYPE_CQ);
   224  	cq->set_ci_db  = mlx4_alloc_db(to_mctx(context), MLX4_DB_TYPE_CQ);
    93 @@ -198,15 +223,78 @@
   225 @@ -198,15 +324,78 @@
    94  
   226  
    95  	cmd.buf_addr = (uintptr_t) cq->buf.buf;
   227  	cmd.buf_addr = (uintptr_t) cq->buf.buf;
    96  	cmd.db_addr  = (uintptr_t) cq->set_ci_db;
   228  	cmd.db_addr  = (uintptr_t) cq->set_ci_db;
    97 +#else
   229 +#else
    98 +	cq->buf.buf    = NULL;
   230 +	cq->buf.buf    = NULL;
   167 +	*cq->set_ci_db = 0;
   299 +	*cq->set_ci_db = 0;
   168 +#endif
   300 +#endif
   169  	return &cq->ibv_cq;
   301  	return &cq->ibv_cq;
   170  
   302  
   171  err_db:
   303  err_db:
   172 @@ -215,6 +303,21 @@
   304 @@ -215,6 +404,21 @@
   173  err_buf:
   305  err_buf:
   174  	mlx4_free_buf(&cq->buf);
   306  	mlx4_free_buf(&cq->buf);
   175  
   307  
   176 +#if defined(__SVR4) && defined(__sun)
   308 +#if defined(__SVR4) && defined(__sun)
   177 +err_destroy:
   309 +err_destroy:
   189 +	ibv_cmd_destroy_cq(&cq->ibv_cq);
   321 +	ibv_cmd_destroy_cq(&cq->ibv_cq);
   190 +#endif
   322 +#endif
   191  err:
   323  err:
   192  	free(cq);
   324  	free(cq);
   193  
   325  
   194 @@ -225,12 +328,16 @@
   326 @@ -225,12 +429,16 @@
   195  {
   327  {
   196  	struct mlx4_cq *cq = to_mcq(ibcq);
   328  	struct mlx4_cq *cq = to_mcq(ibcq);
   197  	struct mlx4_resize_cq cmd;
   329  	struct mlx4_resize_cq cmd;
   198 +	struct ibv_resize_cq_resp resp;
   330 +	struct ibv_resize_cq_resp resp;
   199  	struct mlx4_buf buf;
   331  	struct mlx4_buf buf;
   208 -		return EINVAL;
   340 -		return EINVAL;
   209 + 		return EINVAL;
   341 + 		return EINVAL;
   210  
   342  
   211  	pthread_spin_lock(&cq->lock);
   343  	pthread_spin_lock(&cq->lock);
   212  
   344  
   213 @@ -247,32 +354,76 @@
   345 @@ -247,32 +455,76 @@
   214  		goto out;
   346  		goto out;
   215  	}
   347  	}
   216  
   348  
   217 +#if !(defined(__SVR4) && defined(__sun))
   349 +#if !(defined(__SVR4) && defined(__sun))
   218  	ret = mlx4_alloc_cq_buf(to_mdev(ibcq->context->device), &buf, cqe);
   350  	ret = mlx4_alloc_cq_buf(to_mdev(ibcq->context->device), &buf, cqe);
   296 +	cq->cqn        = mdd->mcq_cqnum;
   428 +	cq->cqn        = mdd->mcq_cqnum;
   297 +#endif
   429 +#endif
   298  out:
   430  out:
   299  	pthread_spin_unlock(&cq->lock);
   431  	pthread_spin_unlock(&cq->lock);
   300  	return ret;
   432  	return ret;
   301 @@ -287,6 +438,9 @@
   433 @@ -287,6 +539,9 @@
   302  		return ret;
   434  		return ret;
   303  
   435  
   304  	mlx4_free_db(to_mctx(cq->context), MLX4_DB_TYPE_CQ, to_mcq(cq)->set_ci_db);
   436  	mlx4_free_db(to_mctx(cq->context), MLX4_DB_TYPE_CQ, to_mcq(cq)->set_ci_db);
   305 +#if defined(__SVR4) && defined(__sun)
   437 +#if defined(__SVR4) && defined(__sun)
   306 +	mlx4_free_db(to_mctx(cq->context), MLX4_DB_TYPE_CQ, to_mcq(cq)->arm_db);
   438 +	mlx4_free_db(to_mctx(cq->context), MLX4_DB_TYPE_CQ, to_mcq(cq)->arm_db);
   307 +#endif
   439 +#endif
   308  	mlx4_free_buf(&to_mcq(cq)->buf);
   440  	mlx4_free_buf(&to_mcq(cq)->buf);
   309  	free(to_mcq(cq));
   441  	free(to_mcq(cq));
   310  
   442  
   311 @@ -300,6 +454,10 @@
   443 @@ -300,6 +555,10 @@
   312  	struct mlx4_create_srq_resp resp;
   444  	struct mlx4_create_srq_resp resp;
   313  	struct mlx4_srq		   *srq;
   445  	struct mlx4_srq		   *srq;
   314  	int			    ret;
   446  	int			    ret;
   315 +#if defined(__SVR4) && defined(__sun)
   447 +#if defined(__SVR4) && defined(__sun)
   316 +	mlnx_umap_srq_data_out_t   *mdd;
   448 +	mlnx_umap_srq_data_out_t   *mdd;
   317 +	void                       *srqbuf;
   449 +	void                       *srqbuf;
   318 +#endif
   450 +#endif
   319  
   451  
   320  	/* Sanity check SRQ size before proceeding */
   452  	/* Sanity check SRQ size before proceeding */
   321  	if (attr->attr.max_wr > 1 << 16 || attr->attr.max_sge > 64)
   453  	if (attr->attr.max_wr > 1 << 16 || attr->attr.max_sge > 64)
   322 @@ -312,6 +470,7 @@
   454 @@ -312,6 +571,7 @@
   323  	if (pthread_spin_init(&srq->lock, PTHREAD_PROCESS_PRIVATE))
   455  	if (pthread_spin_init(&srq->lock, PTHREAD_PROCESS_PRIVATE))
   324  		goto err;
   456  		goto err;
   325  
   457  
   326 +#if !(defined(__SVR4) && defined(__sun))
   458 +#if !(defined(__SVR4) && defined(__sun))
   327  	srq->max     = align_queue_size(attr->attr.max_wr + 1);
   459  	srq->max     = align_queue_size(attr->attr.max_wr + 1);
   328  	srq->max_gs  = attr->attr.max_sge;
   460  	srq->max_gs  = attr->attr.max_sge;
   329  	srq->counter = 0;
   461  	srq->counter = 0;
   330 @@ -324,7 +483,23 @@
   462 @@ -324,7 +584,23 @@
   331  		goto err_free;
   463  		goto err_free;
   332  
   464  
   333  	*srq->db = 0;
   465  	*srq->db = 0;
   334 +#else
   466 +#else
   335 +	/*
   467 +	/*
   349 +	attr->attr.max_wr += 1;
   481 +	attr->attr.max_wr += 1;
   350 +#endif
   482 +#endif
   351  	cmd.buf_addr = (uintptr_t) srq->buf.buf;
   483  	cmd.buf_addr = (uintptr_t) srq->buf.buf;
   352  	cmd.db_addr  = (uintptr_t) srq->db;
   484  	cmd.db_addr  = (uintptr_t) srq->db;
   353  
   485  
   354 @@ -331,19 +506,97 @@
   486 @@ -331,19 +607,97 @@
   355  	ret = ibv_cmd_create_srq(pd, &srq->ibv_srq, attr,
   487  	ret = ibv_cmd_create_srq(pd, &srq->ibv_srq, attr,
   356  				 &cmd.ibv_cmd, sizeof cmd,
   488  				 &cmd.ibv_cmd, sizeof cmd,
   357  				 &resp.ibv_resp, sizeof resp);
   489  				 &resp.ibv_resp, sizeof resp);
   358 +#if defined(__SVR4) && defined(__sun)
   490 +#if defined(__SVR4) && defined(__sun)
   359 +	if (ret) {
   491 +	if (ret) {
   447  	mlx4_free_buf(&srq->buf);
   579  	mlx4_free_buf(&srq->buf);
   448 +#endif
   580 +#endif
   449  
   581  
   450  err:
   582  err:
   451  	free(srq);
   583  	free(srq);
   452 @@ -357,7 +610,16 @@
   584 @@ -357,7 +711,16 @@
   453  {
   585  {
   454  	struct ibv_modify_srq cmd;
   586  	struct ibv_modify_srq cmd;
   455  
   587  
   456 +#if !(defined(__SVR4) && defined(__sun))
   588 +#if !(defined(__SVR4) && defined(__sun))
   457  	return ibv_cmd_modify_srq(srq, attr, attr_mask, &cmd, sizeof cmd);
   589  	return ibv_cmd_modify_srq(srq, attr, attr_mask, &cmd, sizeof cmd);
   464 +	return (ret);
   596 +	return (ret);
   465 +#endif
   597 +#endif
   466  }
   598  }
   467  
   599  
   468  int mlx4_query_srq(struct ibv_srq *srq,
   600  int mlx4_query_srq(struct ibv_srq *srq,
   469 @@ -365,7 +627,17 @@
   601 @@ -365,7 +728,17 @@
   470  {
   602  {
   471  	struct ibv_query_srq cmd;
   603  	struct ibv_query_srq cmd;
   472  
   604  
   473 +#if !(defined(__SVR4) && defined(__sun))
   605 +#if !(defined(__SVR4) && defined(__sun))
   474  	return ibv_cmd_query_srq(srq, attr, &cmd, sizeof cmd);
   606  	return ibv_cmd_query_srq(srq, attr, &cmd, sizeof cmd);
   482 +	return (ret);
   614 +	return (ret);
   483 +#endif
   615 +#endif
   484  }
   616  }
   485  
   617  
   486  int mlx4_destroy_srq(struct ibv_srq *ibsrq)
   618  int mlx4_destroy_srq(struct ibv_srq *ibsrq)
   487 @@ -447,6 +719,10 @@
   619 @@ -447,6 +820,10 @@
   488  	struct mlx4_qp		 *qp;
   620  	struct mlx4_qp		 *qp;
   489  	int			  ret;
   621  	int			  ret;
   490  	struct mlx4_context	 *context = to_mctx(pd->context);
   622  	struct mlx4_context	 *context = to_mctx(pd->context);
   491 +#if defined(__SVR4) && defined(__sun)
   623 +#if defined(__SVR4) && defined(__sun)
   492 +	mlnx_umap_qp_data_out_t	*mdd;
   624 +	mlnx_umap_qp_data_out_t	*mdd;
   493 +	void			*qpbuf;
   625 +	void			*qpbuf;
   494 +#endif
   626 +#endif
   495  
   627  
   496  
   628  
   497  	/* Sanity check QP size before proceeding */
   629  	/* Sanity check QP size before proceeding */
   498 @@ -457,6 +733,7 @@
   630 @@ -457,6 +834,7 @@
   499  	if (!qp)
   631  	if (!qp)
   500  		return NULL;
   632  		return NULL;
   501  
   633  
   502 +#if !(defined(__SVR4) && defined(__sun))
   634 +#if !(defined(__SVR4) && defined(__sun))
   503  	mlx4_calc_sq_wqe_size(&attr->cap, attr->qp_type, qp);
   635  	mlx4_calc_sq_wqe_size(&attr->cap, attr->qp_type, qp);
   504  
   636  
   505  	/*
   637  	/*
   506 @@ -466,6 +743,7 @@
   638 @@ -466,6 +844,7 @@
   507  	qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1;
   639  	qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1;
   508  	qp->sq.wqe_cnt = align_queue_size(attr->cap.max_send_wr + qp->sq_spare_wqes);
   640  	qp->sq.wqe_cnt = align_queue_size(attr->cap.max_send_wr + qp->sq_spare_wqes);
   509  	qp->rq.wqe_cnt = align_queue_size(attr->cap.max_recv_wr);
   641  	qp->rq.wqe_cnt = align_queue_size(attr->cap.max_recv_wr);
   510 +#endif
   642 +#endif
   511  
   643  
   512  	if (attr->srq || attr->qp_type == IBV_QPT_XRC)
   644  	if (attr->srq || attr->qp_type == IBV_QPT_XRC)
   513  		attr->cap.max_recv_wr = qp->rq.wqe_cnt = 0;
   645  		attr->cap.max_recv_wr = qp->rq.wqe_cnt = 0;
   514 @@ -476,6 +754,22 @@
   646 @@ -476,6 +855,22 @@
   515  			attr->cap.max_recv_wr = 1;
   647  			attr->cap.max_recv_wr = 1;
   516  	}
   648  	}
   517  
   649  
   518 +#if defined(__SVR4) && defined(__sun)
   650 +#if defined(__SVR4) && defined(__sun)
   519 +	if (pthread_spin_init(&qp->sq.lock, PTHREAD_PROCESS_PRIVATE) ||
   651 +	if (pthread_spin_init(&qp->sq.lock, PTHREAD_PROCESS_PRIVATE) ||
   532 +	memset(&cmd, 0, sizeof(cmd));
   664 +	memset(&cmd, 0, sizeof(cmd));
   533 +#else
   665 +#else
   534  	if (mlx4_alloc_qp_buf(pd, &attr->cap, attr->qp_type, qp))
   666  	if (mlx4_alloc_qp_buf(pd, &attr->cap, attr->qp_type, qp))
   535  		goto err;
   667  		goto err;
   536  
   668  
   537 @@ -505,17 +799,84 @@
   669 @@ -505,17 +900,84 @@
   538  		; /* nothing */
   670  		; /* nothing */
   539  	cmd.sq_no_prefetch = 0;	/* OK for ABI 2: just a reserved field */
   671  	cmd.sq_no_prefetch = 0;	/* OK for ABI 2: just a reserved field */
   540  	memset(cmd.reserved, 0, sizeof cmd.reserved);
   672  	memset(cmd.reserved, 0, sizeof cmd.reserved);
   541 +#endif
   673 +#endif
   542  
   674  
   617  		goto err_destroy;
   749  		goto err_destroy;
   618 +#endif
   750 +#endif
   619  	pthread_mutex_unlock(&to_mctx(pd->context)->qp_table_mutex);
   751  	pthread_mutex_unlock(&to_mctx(pd->context)->qp_table_mutex);
   620  
   752  
   621  	qp->rq.wqe_cnt = attr->cap.max_recv_wr;
   753  	qp->rq.wqe_cnt = attr->cap.max_recv_wr;
   622 @@ -536,9 +897,38 @@
   754 @@ -536,9 +998,38 @@
   623  
   755  
   624  	return &qp->ibv_qp;
   756  	return &qp->ibv_qp;
   625  
   757  
   626 +#if defined(__SVR4) && defined(__sun)
   758 +#if defined(__SVR4) && defined(__sun)
   627 +err_rq_db:
   759 +err_rq_db:
   656 +	ibv_cmd_destroy_qp(&qp->ibv_qp);
   788 +	ibv_cmd_destroy_qp(&qp->ibv_qp);
   657 +
   789 +
   658  err_rq_db:
   790  err_rq_db:
   659  	pthread_mutex_unlock(&to_mctx(pd->context)->qp_table_mutex);
   791  	pthread_mutex_unlock(&to_mctx(pd->context)->qp_table_mutex);
   660  	if (!attr->srq && attr->qp_type != IBV_QPT_XRC)
   792  	if (!attr->srq && attr->qp_type != IBV_QPT_XRC)
   661 @@ -552,6 +942,7 @@
   793 @@ -552,6 +1043,7 @@
   662  
   794  
   663  err:
   795  err:
   664  	free(qp);
   796  	free(qp);
   665 +#endif
   797 +#endif
   666  
   798  
   667  	return NULL;
   799  	return NULL;
   668  }
   800  }
   669 @@ -745,6 +1136,13 @@
   801 @@ -745,6 +1237,13 @@
   670  				    struct ibv_cq *xrc_cq,
   802  				    struct ibv_cq *xrc_cq,
   671  				    struct ibv_srq_init_attr *attr)
   803  				    struct ibv_srq_init_attr *attr)
   672  {
   804  {
   673 +#if defined(__SVR4) && defined(__sun)
   805 +#if defined(__SVR4) && defined(__sun)
   674 +	/*
   806 +	/*
   678 +	return NULL;
   810 +	return NULL;
   679 +#else
   811 +#else
   680  	struct mlx4_create_xrc_srq  cmd;
   812  	struct mlx4_create_xrc_srq  cmd;
   681  	struct mlx4_create_srq_resp resp;
   813  	struct mlx4_create_srq_resp resp;
   682  	struct mlx4_srq		   *srq;
   814  	struct mlx4_srq		   *srq;
   683 @@ -807,6 +1205,7 @@
   815 @@ -807,6 +1306,7 @@
   684  	free(srq);
   816  	free(srq);
   685  
   817  
   686  	return NULL;
   818  	return NULL;
   687 +#endif
   819 +#endif
   688  }
   820  }
   776  uint32_t *mlx4_alloc_db(struct mlx4_context *context, enum mlx4_db_type type);
   908  uint32_t *mlx4_alloc_db(struct mlx4_context *context, enum mlx4_db_type type);
   777 +#endif
   909 +#endif
   778  void mlx4_free_db(struct mlx4_context *context, enum mlx4_db_type type, uint32_t *db);
   910  void mlx4_free_db(struct mlx4_context *context, enum mlx4_db_type type, uint32_t *db);
   779  
   911  
   780  int mlx4_query_device(struct ibv_context *context,
   912  int mlx4_query_device(struct ibv_context *context,
   781 @@ -388,8 +397,13 @@
   913 @@ -360,11 +369,17 @@
       
   914  		     struct ibv_port_attr *attr);
       
   915  
       
   916  struct ibv_pd *mlx4_alloc_pd(struct ibv_context *context);
       
   917 +struct ibv_shpd *mlx4_alloc_shpd(struct ibv_pd *pd, uint64_t share_key, struct ibv_shpd *shpd);
       
   918 +struct ibv_pd *mlx4_share_pd(struct ibv_context *context, struct ibv_shpd *shpd, uint64_t share_key);
       
   919  int mlx4_free_pd(struct ibv_pd *pd);
       
   920  
       
   921  struct ibv_mr *mlx4_reg_mr(struct ibv_pd *pd, void *addr,
       
   922  			    size_t length, int access);
       
   923 +struct ibv_mr *mlx4_reg_mr_relaxed(struct ibv_pd *pd, void *addr,
       
   924 +			    size_t length, int access);
       
   925  int mlx4_dereg_mr(struct ibv_mr *mr);
       
   926 +int mlx4_dereg_mr_relaxed(struct ibv_mr *mr);
       
   927 +int mlx4_flush_relaxed_mr(struct ibv_pd *pd);
       
   928  
       
   929  struct ibv_cq *mlx4_create_cq(struct ibv_context *context, int cqe,
       
   930  			       struct ibv_comp_channel *channel,
       
   931 @@ -388,8 +403,13 @@
   782  int mlx4_query_srq(struct ibv_srq *srq,
   932  int mlx4_query_srq(struct ibv_srq *srq,
   783  			   struct ibv_srq_attr *attr);
   933  			   struct ibv_srq_attr *attr);
   784  int mlx4_destroy_srq(struct ibv_srq *srq);
   934  int mlx4_destroy_srq(struct ibv_srq *srq);
   785 +#if defined(__SVR4) && defined(__sun)
   935 +#if defined(__SVR4) && defined(__sun)
   786 +int mlx4_set_srq_buf(struct ibv_pd *pd, struct mlx4_srq *srq,
   936 +int mlx4_set_srq_buf(struct ibv_pd *pd, struct mlx4_srq *srq,
   790  			struct mlx4_srq *srq);
   940  			struct mlx4_srq *srq);
   791 +#endif
   941 +#endif
   792  void mlx4_free_srq_wqe(struct mlx4_srq *srq, int ind);
   942  void mlx4_free_srq_wqe(struct mlx4_srq *srq, int ind);
   793  int mlx4_post_srq_recv(struct ibv_srq *ibsrq,
   943  int mlx4_post_srq_recv(struct ibv_srq *ibsrq,
   794  		       struct ibv_recv_wr *wr,
   944  		       struct ibv_recv_wr *wr,
   795 @@ -415,8 +429,14 @@
   945 @@ -415,8 +435,14 @@
   796  void mlx4_calc_sq_wqe_size(struct ibv_qp_cap *cap, enum ibv_qp_type type,
   946  void mlx4_calc_sq_wqe_size(struct ibv_qp_cap *cap, enum ibv_qp_type type,
   797  			   struct mlx4_qp *qp);
   947  			   struct mlx4_qp *qp);
   798  int num_inline_segs(int data, enum ibv_qp_type type);
   948  int num_inline_segs(int data, enum ibv_qp_type type);
   799 +#if defined(__SVR4) && defined(__sun)
   949 +#if defined(__SVR4) && defined(__sun)
   800 +int mlx4_set_qp_buf(struct ibv_pd *pd, struct mlx4_qp *qp,
   950 +int mlx4_set_qp_buf(struct ibv_pd *pd, struct mlx4_qp *qp,
  1044  	munmap(buf->buf, buf->length);
  1194  	munmap(buf->buf, buf->length);
  1045  }
  1195  }
  1046 diff -r -u /tmp/839450/libmlx4-1.0.1/src/mlx4.c libmlx4-1.0.1/src/mlx4.c
  1196 diff -r -u /tmp/839450/libmlx4-1.0.1/src/mlx4.c libmlx4-1.0.1/src/mlx4.c
  1047 --- /tmp/839450/libmlx4-1.0.1/src/mlx4.c	Thu Mar 10 04:48:34 2011
  1197 --- /tmp/839450/libmlx4-1.0.1/src/mlx4.c	Thu Mar 10 04:48:34 2011
  1048 +++ libmlx4-1.0.1/src/mlx4.c	Fri Mar 11 14:05:26 2011
  1198 +++ libmlx4-1.0.1/src/mlx4.c	Fri Mar 11 14:05:26 2011
  1049 @@ -144,6 +144,11 @@
  1199 @@ -112,8 +112,13 @@
       
  1200  	.query_port    = mlx4_query_port,
       
  1201  	.alloc_pd      = mlx4_alloc_pd,
       
  1202  	.dealloc_pd    = mlx4_free_pd,
       
  1203 +	.alloc_shpd    = mlx4_alloc_shpd,
       
  1204 +	.share_pd      = mlx4_share_pd,
       
  1205  	.reg_mr	       = mlx4_reg_mr,
       
  1206 +	.reg_mr_relaxed	       = mlx4_reg_mr_relaxed,
       
  1207  	.dereg_mr      = mlx4_dereg_mr,
       
  1208 +	.dereg_mr_relaxed      = mlx4_dereg_mr_relaxed,
       
  1209 +	.flush_relaxed_mr      = mlx4_flush_relaxed_mr,
       
  1210  	.create_cq     = mlx4_create_cq,
       
  1211  	.poll_cq       = mlx4_poll_cq,
       
  1212  	.req_notify_cq = mlx4_arm_cq,
       
  1213 @@ -144,6 +149,11 @@
  1050  	struct mlx4_alloc_ucontext_resp resp;
  1214  	struct mlx4_alloc_ucontext_resp resp;
  1051  	int				i;
  1215  	int				i;
  1052  	struct ibv_device_attr		dev_attrs;
  1216  	struct ibv_device_attr		dev_attrs;
  1053 +#if defined(__SVR4) && defined(__sun)
  1217 +#if defined(__SVR4) && defined(__sun)
  1054 +	pid_t                           cur_pid;
  1218 +	pid_t                           cur_pid;
  1056 +	uint32_t                        temp_qp_num;
  1220 +	uint32_t                        temp_qp_num;
  1057 +#endif
  1221 +#endif
  1058  
  1222  
  1059  	context = calloc(1, sizeof *context);
  1223  	context = calloc(1, sizeof *context);
  1060  	if (!context)
  1224  	if (!context)
  1061 @@ -150,11 +155,30 @@
  1225 @@ -150,11 +160,30 @@
  1062  		return NULL;
  1226  		return NULL;
  1063  
  1227  
  1064  	context->ibv_ctx.cmd_fd = cmd_fd;
  1228  	context->ibv_ctx.cmd_fd = cmd_fd;
  1065 +#if defined(__SVR4) && defined(__sun)
  1229 +#if defined(__SVR4) && defined(__sun)
  1066 +	context->ibv_ctx.device = ibdev;
  1230 +	context->ibv_ctx.device = ibdev;
  1087 +	resp.bf_reg_size = 512;
  1251 +	resp.bf_reg_size = 512;
  1088 +#endif
  1252 +#endif
  1089  	context->num_qps	= resp.qp_tab_size;
  1253  	context->num_qps	= resp.qp_tab_size;
  1090  	context->qp_table_shift = ffs(context->num_qps) - 1 - MLX4_QP_TABLE_BITS;
  1254  	context->qp_table_shift = ffs(context->num_qps) - 1 - MLX4_QP_TABLE_BITS;
  1091  	context->qp_table_mask	= (1 << context->qp_table_shift) - 1;
  1255  	context->qp_table_mask	= (1 << context->qp_table_shift) - 1;
  1092 @@ -172,20 +196,44 @@
  1256 @@ -172,20 +201,44 @@
  1093  	for (i = 0; i < MLX4_XRC_SRQ_TABLE_SIZE; ++i)
  1257  	for (i = 0; i < MLX4_XRC_SRQ_TABLE_SIZE; ++i)
  1094  		context->xrc_srq_table[i].refcnt = 0;
  1258  		context->xrc_srq_table[i].refcnt = 0;
  1095  
  1259  
  1096 +#if defined(__SVR4) && defined(__sun)
  1260 +#if defined(__SVR4) && defined(__sun)
  1097 +	context->db_page_list = NULL;
  1261 +	context->db_page_list = NULL;
  1132  					to_mdev(ibdev)->page_size);
  1296  					to_mdev(ibdev)->page_size);
  1133 +#endif
  1297 +#endif
  1134  		if (context->bf_page == MAP_FAILED) {
  1298  		if (context->bf_page == MAP_FAILED) {
  1135  			fprintf(stderr, PFX "Warning: BlueFlame available, "
  1299  			fprintf(stderr, PFX "Warning: BlueFlame available, "
  1136  				"but failed to mmap() BlueFlame page.\n");
  1300  				"but failed to mmap() BlueFlame page.\n");
  1137 @@ -214,6 +262,7 @@
  1301 @@ -214,6 +267,7 @@
  1138  	context->max_qp_wr = dev_attrs.max_qp_wr;
  1302  	context->max_qp_wr = dev_attrs.max_qp_wr;
  1139  	context->max_sge = dev_attrs.max_sge;
  1303  	context->max_sge = dev_attrs.max_sge;
  1140  	context->max_cqe = dev_attrs.max_cqe;
  1304  	context->max_cqe = dev_attrs.max_cqe;
  1141 +#ifdef HAVE_IBV_XRC_OPS
  1305 +#ifdef HAVE_IBV_XRC_OPS
  1142  	if (!(dev_attrs.device_cap_flags & IBV_DEVICE_XRC)) {
  1306  	if (!(dev_attrs.device_cap_flags & IBV_DEVICE_XRC)) {
  1143  		fprintf(stderr, PFX "There is a mismatch between "
  1307  		fprintf(stderr, PFX "There is a mismatch between "
  1144  		        "the kernel and the userspace libraries: "
  1308  		        "the kernel and the userspace libraries: "
  1145 @@ -220,6 +269,7 @@
  1309 @@ -220,6 +274,7 @@
  1146  			"Kernel does not support XRC. Exiting.\n");
  1310  			"Kernel does not support XRC. Exiting.\n");
  1147  		goto query_free;
  1311  		goto query_free;
  1148  	}
  1312  	}
  1149 +#endif
  1313 +#endif
  1150  
  1314  
  1151  	return &context->ibv_ctx;
  1315  	return &context->ibv_ctx;
  1152  
  1316  
  1153 @@ -240,6 +290,7 @@
  1317 @@ -240,6 +295,7 @@
  1154  	munmap(context->uar, to_mdev(ibctx->device)->page_size);
  1318  	munmap(context->uar, to_mdev(ibctx->device)->page_size);
  1155  	if (context->bf_page)
  1319  	if (context->bf_page)
  1156  		munmap(context->bf_page, to_mdev(ibctx->device)->page_size);
  1320  		munmap(context->bf_page, to_mdev(ibctx->device)->page_size);
  1157 +
  1321 +
  1158  	free(context);
  1322  	free(context);