7154945 userland FMR and shared PD OFUV libraries support
authorChristophe Juhasz <Chris.Juhasz@Sun.COM>
Wed, 16 May 2012 12:10:05 -0700
changeset 817 f45ca7242301
parent 816 5df727abb287
child 818 1c77b9213f26
7154945 userland FMR and shared PD OFUV libraries support
components/open-fabrics/libibverbs/Makefile
components/open-fabrics/libibverbs/manpages/ibv_alloc_shpd.3
components/open-fabrics/libibverbs/manpages/ibv_reg_mr_relaxed.3
components/open-fabrics/libibverbs/manpages/ibv_share_pd.3
components/open-fabrics/libibverbs/patches/base.patch
components/open-fabrics/libmlx4/patches/base.patch
components/open-fabrics/open-fabrics.p5m
--- a/components/open-fabrics/libibverbs/Makefile	Wed May 16 01:39:44 2012 -0700
+++ b/components/open-fabrics/libibverbs/Makefile	Wed May 16 12:10:05 2012 -0700
@@ -1,4 +1,4 @@
-#
+
 # CDDL HEADER START
 #
 # The contents of this file are subject to the terms of the
@@ -34,6 +34,12 @@
 include $(WS_TOP)/make-rules/configure.mk
 include ../ofed.mk
 
+MAN3FILES =	ibv_alloc_shpd.3 \
+		ibv_reg_mr_relaxed.3 \
+		ibv_share_pd.3
+
+include $(WS_TOP)/make-rules/shared-targets.mk
+
 # add flags to get at extra bits from other components' source trees
 CPPFLAGS +=	-I$(PWD)/../libmlx4/libmlx4-1.0.1/src
 CPPFLAGS +=	-I$(PWD)/../librdmacm/librdmacm-1.0.14.1/include
@@ -71,7 +77,7 @@
 # common targets
 build:		$(BUILD_32_and_64)
 
-install:	$(INSTALL_32_and_64)
+install:	$(INSTALL_32_and_64) $(PROTOMAN3FILES)
 
 BUILD_PKG_DEPENDENCIES =	$(BUILD_TOOLS)
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/open-fabrics/libibverbs/manpages/ibv_alloc_shpd.3	Wed May 16 12:10:05 2012 -0700
@@ -0,0 +1,37 @@
+.\" -*- nroff -*-
+.\"
+.TH IBV_ALLOC_SHPD 3 2012-02-29 libibverbs "Libibverbs Programmer's Manual"
+.SH "NAME"
+ibv_alloc_shpd \- allocate unique id for sharing a protection domain (PD). 
+.SH "SYNOPSIS"
+.nf
+.B #include <infiniband/verbs.h>
+.sp
+.BI "struct ibv_shpd *ibv_alloc_shpd(struct ibv_pd " "*pd" ", uint64_t "
+.BI " " "                               share_key" ", struct ibv_shpd " "*shpd");
+.sp
+.fi
+.SH "DESCRIPTION"
+.B ibv_alloc_shpd()
+allocates a unique identifier required for sharing the PD  
+.I pd\fR with another process.
+.I share_key
+is a 64 bit key which needs to be provided with 
+.B ibv_share_pd()
+call by another process to share the same PD in that other process. The argument 
+.I shpd
+specifies a pointer to a user allocated area where libibverbs can write the unique identifier for the
+.I pd\fR.
+.SH "RETURN VALUE"
+.B ibv_alloc_shpd()
+returns
+.I shpd
+- a pointer to the area where the unique identifier is written - or NULL if the request fails.
+.SH "NOTES"
+.B ibv_alloc_shpd()
+can be called on a particular PD only once.
+.SH "SEE ALSO"
+.BR ibv_share_pd (3)
+.SH "AUTHORS"
+.TP
+Arun Kaimalettu <gotoarunk at gmail dot com>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/open-fabrics/libibverbs/manpages/ibv_reg_mr_relaxed.3	Wed May 16 12:10:05 2012 -0700
@@ -0,0 +1,88 @@
+.\" -*- nroff -*-
+.\"
+.TH IBV_REG_MR_RELAXED 3 2012-02-29 libibverbs "Libibverbs Programmer's Manual"
+.SH "NAME"
+ibv_reg_mr_relaxed, ibv_dereg_mr_relaxed, ibv_flush_relaxed_mr \- register or deregister a memory region (MR) in relaxed mode, flush relaxed MRs
+.SH "SYNOPSIS"
+.nf
+.B #include <infiniband/verbs.h>
+.sp
+.BI "struct ibv_mr *ibv_reg_mr_relaxed(struct ibv_pd " "*pd" ", void " "*addr" ,
+.BI "                                  size_t " "length" ", int " "access" );
+.sp
+.BI "int ibv_dereg_mr_relaxed(struct ibv_mr " "*mr" );
+.sp
+.BI "int ibv_flush_relaxed_mr(struct ibv_pd " "*pd" );
+.fi
+.SH "DESCRIPTION"
+Relaxed MRs are different from normal MRs in following ways :
+.PP
+.PP
+ *  Registration is done using Fast Memory Registration(FMR) interface provided by the RDMA device.
+.PP
+ *  Access permissions are extended to specified memory area's last page boundary.
+.PP
+ *  There could be a finite time gap between the deregistration call and actual invalidation in the RDMA device for an MR.
+.sp
+.PP
+
+.B ibv_reg_mr_relaxed()
+registers a memory region (MR) associated with the protection domain
+.I pd\fR.
+The MR's starting address is
+.I addr
+and its size is
+.I length\fR.
+The argument
+.I access
+describes the desired memory protection attributes; for details on 
+.I access
+options see description of 
+.B ibv_reg_mr()\fR.
+.PP
+.B ibv_dereg_mr_relaxed()
+deregisters the MR
+.I mr\fR.
+This call marks 
+.I mr
+as ready to be invalidated; however actual invalidation happens later.
+.PP
+.B ibv_flush_relaxed_mr()
+forces all deregistered relaxed MRs under PD
+.I pd
+to be invalidated by the RDMA device.
+.SH "RETURN VALUE"
+.B ibv_reg_mr_relaxed()
+returns a pointer to the registered MR, or NULL if the request fails.
+The local key (\fBL_Key\fR) field
+.B lkey
+is used as the lkey field of struct ibv_sge when posting buffers with
+ibv_post_* verbs, and the remote key (\fBR_Key\fR)
+field
+.B rkey
+is used by remote processes to perform Atomic and RDMA operations.  The remote process places this
+.B rkey
+as the rkey field of struct ibv_send_wr passed to the ibv_post_send function.
+.PP
+.B ibv_dereg_mr_relaxed()
+returns 0 on success, or the value of errno on failure (which indicates the failure reason).
+.PP
+.B ibv_flush_relaxed_mr() 
+returns 0 on success, or the value of errno on failure.
+
+.SH "NOTES"
+.B ibv_reg_mr_relaxed()
+can return transient error EAGAIN. User may retry the operation after sometime.
+.PP
+The user of relaxed memory regions should take care to avoid reliance on
+immediate deregistration behavior.  Also, because of the page granularity
+of relaxed memory regions, it is often advisable to use page sized
+registrations or to align registered regions to a page boundary.
+.SH "SEE ALSO"
+.BR ibv_alloc_pd (3),
+.BR ibv_post_send (3),
+.BR ibv_post_recv (3),
+.BR ibv_post_srq_recv (3)
+.SH "AUTHORS"
+.TP
+Arun Kaimalettu <gotoarunk at gmail dot com>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/open-fabrics/libibverbs/manpages/ibv_share_pd.3	Wed May 16 12:10:05 2012 -0700
@@ -0,0 +1,52 @@
+.\" -*- nroff -*-
+.\"
+.TH IBV_SHARE_PD 3 2012-02-29 libibverbs "Libibverbs Programmer's Manual"
+.SH "NAME"
+ibv_share_pd \- share a protection domain (PD). 
+.SH "SYNOPSIS"
+.nf
+.B #include <infiniband/verbs.h>
+.sp
+.BI "struct ibv_pd *ibv_share_pd(struct ibv_context " "*context" ", "
+.BI "                            struct ibv_shpd " "*shpd" ", uint64_t " "share_key");
+.sp
+.fi
+.SH "DESCRIPTION"
+.B ibv_share_pd()
+shares the protection domain specified by a unique identifier 
+.I shpd
+for the RDMA device context 
+.I context\fR.
+.I share_key
+is the 64 bit key used to generate the unique identifier 
+.I shpd\fR.
+.I ibv_pd\fRs created using
+.B ibv_share_pd()
+can be deallocated using
+.B ibv_dealloc_pd()\fR.
+Libibverbs keeps track of each instance of the shared PD and removes the PD from RDMA device when the last instance of the shared PD is deallocated.
+.SH "RETURN VALUE"
+.B ibv_share_pd()
+returns a pointer to the shared pd or NULL if the request fails.
+
+.SH "NOTES"
+Even though the same PD is shared by multiple contexts of an RDMA device or processes, the life span of each resource created in an 'ibv_pd' linked to a context or process is limited by the life span of that instance of 'ibv_pd'. e.g. The life span of an MR 
+.I mr1
+created under ibv_pd
+.I pd1
+(which is an instance of shared PD 
+.I shPD1\fR)
+will end whenever
+.I pd1
+is deallocated, even though underlying 
+.I shPD1
+may continue to live on.
+.PP
+Sharing PD is not supported among 'ibv_context' created for different RDMA devices.
+.SH "SEE ALSO"
+.BR ibv_alloc_shpd (3),
+.BR ibv_dealloc_pd (3)
+
+.SH "AUTHORS"
+.TP
+Arun Kaimalettu <gotoarunk at gmail dot com>
--- a/components/open-fabrics/libibverbs/patches/base.patch	Wed May 16 01:39:44 2012 -0700
+++ b/components/open-fabrics/libibverbs/patches/base.patch	Wed May 16 12:10:05 2012 -0700
@@ -356,6 +356,18 @@
  
  /*
   * Increment this value if any changes that break userspace ABI
[email protected]@ -47,7 +51,10 @@
+ 	IB_USER_VERBS_CMD_ALLOC_PD,
+ 	IB_USER_VERBS_CMD_DEALLOC_PD,
+ 	IB_USER_VERBS_CMD_REG_MR,
+-	IB_USER_VERBS_CMD_DEREG_MR
++	IB_USER_VERBS_CMD_DEREG_MR,
++	IB_USER_VERBS_CMD_REG_MR_RELAXED,
++	IB_USER_VERBS_CMD_DEREG_MR_RELAXED,
++	IB_USER_VERBS_CMD_FLUSH_RELAXED_MR
+ };
+ 
+ /*
 diff -r -u /tmp/846623/libibverbs-1.1.4/src/verbs.c libibverbs-1.1.4/src/verbs.c
 --- /tmp/846623/libibverbs-1.1.4/src/verbs.c	Thu Feb  3 01:53:17 2011
 +++ libibverbs-1.1.4/src/verbs.c	Fri Feb 11 04:02:33 2011
@@ -413,7 +425,90 @@
  }
  default_symver(__ibv_query_pkey, ibv_query_pkey);
  
[email protected]@ -212,6 +231,10 @@
[email protected]@ -148,6 +167,27 @@
+ }
+ default_symver(__ibv_alloc_pd, ibv_alloc_pd);
+ 
++struct ibv_shpd *__ibv_alloc_shpd(struct ibv_pd *pd, uint64_t share_key, struct ibv_shpd *shpd)
++{
++
++	shpd = pd->context->ops.alloc_shpd(pd, share_key, shpd);
++
++	return shpd;
++}
++default_symver(__ibv_alloc_shpd, ibv_alloc_shpd);
++
++struct ibv_pd *__ibv_share_pd(struct ibv_context *context, struct ibv_shpd *shpd, uint64_t share_key)
++{
++	struct ibv_pd *pd;
++
++	pd = context->ops.share_pd(context, shpd, share_key);
++	if (pd)
++		pd->context = context;
++
++	return pd;
++}
++default_symver(__ibv_share_pd, ibv_share_pd);
++
+ int __ibv_dealloc_pd(struct ibv_pd *pd)
+ {
+ 	return pd->context->ops.dealloc_pd(pd);
[email protected]@ -175,6 +215,27 @@
+ }
+ default_symver(__ibv_reg_mr, ibv_reg_mr);
+ 
++struct ibv_mr *__ibv_reg_mr_relaxed(struct ibv_pd *pd, void *addr,
++			    size_t length, int access)
++{
++	struct ibv_mr *mr;
++
++	if (ibv_dontfork_range(addr, length))
++		return NULL;
++
++	mr = pd->context->ops.reg_mr_relaxed(pd, addr, length, access);
++	if (mr) {
++		mr->context = pd->context;
++		mr->pd      = pd;
++		mr->addr    = addr;
++		mr->length  = length;
++	} else
++		ibv_dofork_range(addr, length);
++
++	return mr;
++}
++default_symver(__ibv_reg_mr_relaxed, ibv_reg_mr_relaxed);
++
+ int __ibv_dereg_mr(struct ibv_mr *mr)
+ {
+ 	int ret;
[email protected]@ -189,6 +250,26 @@
+ }
+ default_symver(__ibv_dereg_mr, ibv_dereg_mr);
+ 
++int __ibv_dereg_mr_relaxed(struct ibv_mr *mr)
++{
++	int ret;
++	void *addr	= mr->addr;
++	size_t length	= mr->length;
++
++	ret = mr->context->ops.dereg_mr_relaxed(mr);
++	if (!ret)
++		ibv_dofork_range(addr, length);
++
++	return ret;
++}
++default_symver(__ibv_dereg_mr_relaxed, ibv_dereg_mr_relaxed);
++
++int __ibv_flush_relaxed_mr(struct ibv_pd *pd)
++{
++	return pd->context->ops.flush_relaxed_mr(pd);
++}
++default_symver(__ibv_flush_relaxed_mr, ibv_flush_relaxed_mr);
++
+ static struct ibv_comp_channel *ibv_create_comp_channel_v2(struct ibv_context *context)
+ {
+ 	struct ibv_abi_compat_v2 *t = context->abi_compat;
[email protected]@ -212,6 +293,10 @@
  	struct ibv_comp_channel            *channel;
  	struct ibv_create_comp_channel      cmd;
  	struct ibv_create_comp_channel_resp resp;
@@ -424,7 +519,7 @@
  
  	if (abi_ver <= 2)
  		return ibv_create_comp_channel_v2(context);
[email protected]@ -221,7 +244,23 @@
[email protected]@ -221,7 +306,23 @@
  		return NULL;
  
  	IBV_INIT_CMD_RESP(&cmd, sizeof cmd, CREATE_COMP_CHANNEL, &resp, sizeof resp);
@@ -448,7 +543,7 @@
  		free(channel);
  		return NULL;
  	}
[email protected]@ -228,6 +267,9 @@
[email protected]@ -228,6 +329,9 @@
  
  	VALGRIND_MAKE_MEM_DEFINED(&resp, sizeof resp);
  
@@ -517,7 +612,37 @@
 diff -r -u /tmp/846623/libibverbs-1.1.4/src/libibverbs.map libibverbs-1.1.4/src/libibverbs.map
 --- /tmp/846623/libibverbs-1.1.4/src/libibverbs.map	Thu Mar 10 06:58:21 2011
 +++ libibverbs-1.1.4/src/libibverbs.map	Mon Mar 28 13:44:44 2011
[email protected]@ -71,6 +71,7 @@
[email protected]@ -13,9 +13,14 @@
+ 		ibv_query_gid;
+ 		ibv_query_pkey;
+ 		ibv_alloc_pd;
++		ibv_alloc_shpd;
++		ibv_share_pd;
+ 		ibv_dealloc_pd;
+ 		ibv_reg_mr;
++		ibv_reg_mr_relaxed;
+ 		ibv_dereg_mr;
++		ibv_dereg_mr_relaxed;
++		ibv_flush_relaxed_mr;
+ 		ibv_create_comp_channel;
+ 		ibv_destroy_comp_channel;
+ 		ibv_create_cq;
[email protected]@ -41,9 +46,14 @@
+ 		ibv_cmd_query_gid;
+ 		ibv_cmd_query_pkey;
+ 		ibv_cmd_alloc_pd;
++		ibv_cmd_alloc_shpd;
++		ibv_cmd_share_pd;
+ 		ibv_cmd_dealloc_pd;
+ 		ibv_cmd_reg_mr;
++		ibv_cmd_reg_mr_relaxed;
+ 		ibv_cmd_dereg_mr;
++		ibv_cmd_dereg_mr_relaxed;
++		ibv_cmd_flush_relaxed_mr;
+ 		ibv_cmd_create_cq;
+ 		ibv_cmd_poll_cq;
+ 		ibv_cmd_req_notify_cq;
[email protected]@ -71,6 +81,7 @@
  		mult_to_ibv_rate;
  		ibv_get_sysfs_path;
  		ibv_read_sysfs_file;
@@ -641,7 +766,121 @@
  	return 0;
  }
  
[email protected]@ -315,7 +380,19 @@
[email protected]@ -218,6 +283,45 @@
+ 	return 0;
+ }
+ 
++int ibv_cmd_alloc_shpd(struct ibv_context *context, struct ibv_pd *pd,
++         uint64_t share_key, struct ibv_shpd *shpd,
++		     struct ibv_alloc_shpd *cmd, size_t cmd_size,
++		     struct ibv_alloc_shpd_resp *resp, size_t resp_size)
++{
++	IBV_INIT_CMD_RESP(cmd, cmd_size, ALLOC_SHPD, resp, resp_size);
++        cmd->pd_handle = pd->handle;
++	cmd->share_key = share_key;
++
++	if (write(context->cmd_fd, cmd, cmd_size) != cmd_size)
++		return errno;
++
++	VALGRIND_MAKE_MEM_DEFINED(resp, resp_size);
++
++	shpd->handle  = resp->shpd_handle;
++
++	return 0;
++}
++
++int ibv_cmd_share_pd(struct ibv_context *context, struct ibv_shpd *shpd,
++         uint64_t share_key, struct ibv_pd *pd,
++		     struct ibv_share_pd *cmd, size_t cmd_size,
++		     struct ibv_share_pd_resp *resp, size_t resp_size)
++{
++	IBV_INIT_CMD_RESP(cmd, cmd_size, SHARE_PD, resp, resp_size);
++	cmd->shpd_handle = shpd->handle;
++	cmd->share_key = share_key;
++
++	if (write(context->cmd_fd, cmd, cmd_size) != cmd_size)
++		return errno;
++
++	VALGRIND_MAKE_MEM_DEFINED(resp, resp_size);
++
++	pd->handle  = resp->pd_handle;
++	pd->context = context;
++
++	return 0;
++}
++
+ int ibv_cmd_dealloc_pd(struct ibv_pd *pd)
+ {
+ 	struct ibv_dealloc_pd cmd;
[email protected]@ -259,6 +363,34 @@
+ 	return 0;
+ }
+ 
++int ibv_cmd_reg_mr_relaxed(struct ibv_pd *pd, void *addr, size_t length,
++		   uint64_t hca_va, int access,
++		   struct ibv_mr *mr, struct ibv_reg_mr *cmd,
++		   size_t cmd_size,
++		   struct ibv_reg_mr_resp *resp, size_t resp_size)
++{
++
++	IBV_INIT_CMD_RESP(cmd, cmd_size, REG_MR_RELAXED, resp, resp_size);
++
++	cmd->start 	  = (uintptr_t) addr;
++	cmd->length 	  = length;
++	cmd->hca_va 	  = hca_va;
++	cmd->pd_handle 	  = pd->handle;
++	cmd->access_flags = access;
++
++	if (write(pd->context->cmd_fd, cmd, cmd_size) != cmd_size)
++		return errno;
++
++	VALGRIND_MAKE_MEM_DEFINED(resp, resp_size);
++
++	mr->handle  = resp->mr_handle;
++	mr->lkey    = resp->lkey;
++	mr->rkey    = resp->rkey;
++	mr->context = pd->context;
++
++	return 0;
++}
++
+ int ibv_cmd_dereg_mr(struct ibv_mr *mr)
+ {
+ 	struct ibv_dereg_mr cmd;
[email protected]@ -272,6 +404,32 @@
+ 	return 0;
+ }
+ 
++int ibv_cmd_dereg_mr_relaxed(struct ibv_mr *mr)
++{
++	struct ibv_dereg_mr cmd;
++
++	IBV_INIT_CMD(&cmd, sizeof cmd, DEREG_MR_RELAXED);
++	cmd.mr_handle = mr->handle;
++
++	if (write(mr->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
++		return errno;
++
++	return 0;
++}
++
++int ibv_cmd_flush_relaxed_mr(struct ibv_pd *pd)
++{
++	struct ibv_flush_relaxed_mr cmd;
++
++	IBV_INIT_CMD(&cmd, sizeof cmd, FLUSH_RELAXED_MR);
++	cmd.pd_handle = pd->handle;
++
++	if (write(pd->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
++		return errno;
++
++	return 0;
++}
++
+ static int ibv_cmd_create_cq_v2(struct ibv_context *context, int cqe,
+ 				struct ibv_cq *cq,
+ 				struct ibv_create_cq *new_cmd, size_t new_cmd_size,
[email protected]@ -315,7 +473,19 @@
  	cmd->user_handle   = (uintptr_t) cq;
  	cmd->cqe           = cqe;
  	cmd->comp_vector   = comp_vector;
@@ -661,7 +900,7 @@
  	cmd->reserved      = 0;
  
  	if (write(context->cmd_fd, cmd, cmd_size) != cmd_size)
[email protected]@ -637,7 +714,19 @@
[email protected]@ -637,7 +807,19 @@
  	cmd->max_send_sge    = attr->cap.max_send_sge;
  	cmd->max_recv_sge    = attr->cap.max_recv_sge;
  	cmd->max_inline_data = attr->cap.max_inline_data;
@@ -681,7 +920,7 @@
  	cmd->qp_type 	     = attr->qp_type;
  	cmd->is_srq 	     = !!attr->srq;
  	cmd->srq_handle      = attr->qp_type == IBV_QPT_XRC ?
[email protected]@ -1406,4 +1495,3 @@
[email protected]@ -1406,4 +1588,3 @@
  		return errno;
  	return 0;
  }
@@ -769,7 +1008,7 @@
  static void add_device(struct ibv_device *dev,
 diff -r -u /tmp/846623/libibverbs-1.1.4/src/enum_strs.c libibverbs-1.1.4/src/enum_strs.c
 --- /tmp/846623/libibverbs-1.1.4/src/enum_strs.c	Wed Sep 16 04:27:22 2009
-+++ libibverbs-1.1.4/src/enum_strs.c	Fri Mar 16 22:51:54 2012
++++ libibverbs-1.1.4/src/enum_strs.c	Tue Mar 20 16:27:45 2012
 @@ -85,6 +85,7 @@
  		[IBV_EVENT_SRQ_LIMIT_REACHED]	= "SRQ limit reached",
  		[IBV_EVENT_QP_LAST_WQE_REACHED]	= "last WQE reached",
@@ -944,10 +1183,15 @@
  
  /*
   * This file must be kept in sync with the kernel's version of
[email protected]@ -94,6 +98,10 @@
[email protected]@ -94,6 +98,15 @@
  	IB_USER_VERBS_CMD_QUERY_XRC_RCV_QP,
  	IB_USER_VERBS_CMD_REG_XRC_RCV_QP,
  	IB_USER_VERBS_CMD_UNREG_XRC_RCV_QP,
++	IB_USER_VERBS_CMD_REG_MR_RELAXED,
++	IB_USER_VERBS_CMD_DEREG_MR_RELAXED,
++	IB_USER_VERBS_CMD_FLUSH_RELAXED_MR,
++	IB_USER_VERBS_CMD_ALLOC_SHPD,
++	IB_USER_VERBS_CMD_SHARE_PD,
 +#if defined(__SVR4) && defined(__sun)
 +	IB_USER_VERBS_CMD_QUERY_GID,
 +	IB_USER_VERBS_CMD_QUERY_PKEY
@@ -955,7 +1199,7 @@
  };
  
  /*
[email protected]@ -235,6 +243,38 @@
[email protected]@ -235,6 +248,38 @@
  	__u8  reserved[2];
  };
  
@@ -994,7 +1238,7 @@
  struct ibv_alloc_pd {
  	__u32 command;
  	__u16 in_words;
[email protected]@ -243,9 +283,24 @@
[email protected]@ -243,10 +288,57 @@
  	__u64 driver_data[0];
  };
  
@@ -1017,9 +1261,56 @@
  };
 +#endif
  
++struct ibv_alloc_shpd {
++	__u32 command;
++	__u16 in_words;
++	__u16 out_words;
++	__u64 response;
++	__u32 pd_handle;
++	__u32 reserved;
++	__u64 share_key;
++	__u64 driver_data[0];
++};
++
++struct ibv_alloc_shpd_resp {
++	__u32 shpd_handle;
++};
++
++struct ibv_share_pd {
++	__u32 command;
++	__u16 in_words;
++	__u16 out_words;
++	__u64 response;
++	__u32 shpd_handle;
++	__u32 reserved;
++	__u64 share_key;
++	__u64 driver_data[0];
++};
++
++struct ibv_share_pd_resp {
++	__u32 pd_handle;
++        __u32 reserved;
++        ofuv_pd_drv_data_out_t drv_out;
++};
++
  struct ibv_dealloc_pd {
  	__u32 command;
[email protected]@ -304,10 +359,25 @@
+ 	__u16 in_words;
[email protected]@ -280,6 +372,13 @@
+ 	__u32 mr_handle;
+ };
+ 
++struct ibv_flush_relaxed_mr {
++	__u32 command;
++	__u16 in_words;
++	__u16 out_words;
++	__u32 pd_handle;
++};
++
+ struct ibv_create_comp_channel {
+ 	__u32 command;
+ 	__u16 in_words;
[email protected]@ -304,10 +403,25 @@
  	__u64 driver_data[0];
  };
  
@@ -1045,7 +1336,7 @@
  
  struct ibv_kern_wc {
  	__u64  wr_id;
[email protected]@ -363,7 +433,11 @@
[email protected]@ -363,7 +477,11 @@
  struct ibv_resize_cq_resp {
  	__u32 cqe;
  	__u32 reserved;
@@ -1057,7 +1348,7 @@
  };
  
  struct ibv_destroy_cq {
[email protected]@ -460,6 +534,14 @@
[email protected]@ -460,6 +578,14 @@
  	__u64 driver_data[0];
  };
  
@@ -1072,7 +1363,7 @@
  struct ibv_create_qp_resp {
  	__u32 qp_handle;
  	__u32 qpn;
[email protected]@ -469,7 +551,20 @@
[email protected]@ -469,7 +595,20 @@
  	__u32 max_recv_sge;
  	__u32 max_inline_data;
  	__u32 reserved;
@@ -1093,7 +1384,7 @@
  
  struct ibv_qp_dest {
  	__u8  dgid[16];
[email protected]@ -817,12 +912,29 @@
[email protected]@ -817,12 +956,29 @@
  	__u64 driver_data[0];
  };
  
@@ -1123,10 +1414,53 @@
  
  struct ibv_modify_srq {
  	__u32 command;
[email protected]@ -946,6 +1102,11 @@
+ 	IB_USER_VERBS_CMD_QUERY_XRC_RCV_QP_V2 = -1,
+ 	IB_USER_VERBS_CMD_REG_XRC_RCV_QP_V2 = -1,
+ 	IB_USER_VERBS_CMD_UNREG_XRC_RCV_QP_V2 = -1,
++	IB_USER_VERBS_CMD_REG_MR_RELAXED_V2 = -1,
++	IB_USER_VERBS_CMD_DEREG_MR_RELAXED_V2 = -1,
++	IB_USER_VERBS_CMD_FLUSH_RELAXED_MR_V2 = -1,
++  	IB_USER_VERBS_CMD_ALLOC_SHPD_V2 = -1,
++  	IB_USER_VERBS_CMD_SHARE_PD_V2 = -1,
+ };
+ 
+ struct ibv_destroy_cq_v1 {
 diff -r -u /tmp/846623/libibverbs-1.1.4/include/infiniband/driver.h libibverbs-1.1.4/include/infiniband/driver.h
 --- /tmp/846623/libibverbs-1.1.4/include/infiniband/driver.h	Thu Feb  3 01:53:17 2011
 +++ libibverbs-1.1.4/include/infiniband/driver.h	Fri Feb 11 04:02:20 2011
[email protected]@ -164,8 +164,8 @@
[email protected]@ -74,6 +74,14 @@
+ int ibv_cmd_alloc_pd(struct ibv_context *context, struct ibv_pd *pd,
+ 		     struct ibv_alloc_pd *cmd, size_t cmd_size,
+ 		     struct ibv_alloc_pd_resp *resp, size_t resp_size);
++int ibv_cmd_alloc_shpd(struct ibv_context *context, struct ibv_pd *pd,
++		     uint64_t share_key, struct ibv_shpd *shpd,
++		     struct ibv_alloc_shpd *cmd, size_t cmd_size,
++		     struct ibv_alloc_shpd_resp *resp, size_t resp_size);
++int ibv_cmd_share_pd(struct ibv_context *context, struct ibv_shpd *shpd,
++		     uint64_t share_key, struct ibv_pd *pd,
++		     struct ibv_share_pd *cmd, size_t cmd_size,
++		     struct ibv_share_pd_resp *resp, size_t resp_size);
+ int ibv_cmd_dealloc_pd(struct ibv_pd *pd);
+ #define IBV_CMD_REG_MR_HAS_RESP_PARAMS
+ int ibv_cmd_reg_mr(struct ibv_pd *pd, void *addr, size_t length,
[email protected]@ -81,7 +89,15 @@
+ 		   struct ibv_mr *mr, struct ibv_reg_mr *cmd,
+ 		   size_t cmd_size,
+ 		   struct ibv_reg_mr_resp *resp, size_t resp_size);
++#define IBV_CMD_REG_MR_RELAXED_HAS_RESP_PARAMS
++int ibv_cmd_reg_mr_relaxed(struct ibv_pd *pd, void *addr, size_t length,
++		   uint64_t hca_va, int access,
++		   struct ibv_mr *mr, struct ibv_reg_mr *cmd,
++		   size_t cmd_size,
++		   struct ibv_reg_mr_resp *resp, size_t resp_size);
+ int ibv_cmd_dereg_mr(struct ibv_mr *mr);
++int ibv_cmd_dereg_mr_relaxed(struct ibv_mr *mr);
++int ibv_cmd_flush_relaxed_mr(struct ibv_pd *pd);
+ int ibv_cmd_create_cq(struct ibv_context *context, int cqe,
+ 		      struct ibv_comp_channel *channel,
+ 		      int comp_vector, struct ibv_cq *cq,
[email protected]@ -164,8 +180,8 @@
  int ibv_read_sysfs_file(const char *dir, const char *file,
  			char *buf, size_t size);
  
@@ -1160,7 +1494,18 @@
  };
  
  struct ibv_pd {
[email protected]@ -425,6 +429,14 @@
[email protected]@ -305,6 +309,10 @@
+ 	uint32_t		handle;
+ };
+ 
++struct ibv_shpd {
++	uint32_t		handle;
++};
++
+ enum ibv_rereg_mr_flags {
+ 	IBV_REREG_MR_CHANGE_TRANSLATION	= (1 << 0),
+ 	IBV_REREG_MR_CHANGE_PD		= (1 << 1),
[email protected]@ -425,6 +433,14 @@
  	uint32_t		max_inline_data;
  };
  
@@ -1175,7 +1520,20 @@
  struct ibv_qp_init_attr {
  	void		       *qp_context;
  	struct ibv_cq	       *send_cq;
[email protected]@ -749,6 +761,13 @@
[email protected]@ -743,6 +759,12 @@
+ 	int			(*detach_mcast)(struct ibv_qp *qp, const union ibv_gid *gid,
+ 						uint16_t lid);
+ 	void			(*async_event)(struct ibv_async_event *event);
++	struct ibv_mr *		(*reg_mr_relaxed)(struct ibv_pd *pd, void *addr, size_t length,
++					  int access);
++	int			(*dereg_mr_relaxed)(struct ibv_mr *mr);
++	int			(*flush_relaxed_mr)(struct ibv_pd *pd);
++	struct ibv_shpd *	(*alloc_shpd)(struct ibv_pd *pd, uint64_t share_key, struct ibv_shpd *shpd);
++	struct ibv_pd *		(*share_pd)(struct ibv_context *context, struct ibv_shpd *shpd, uint64_t share_key);
+ };
+ 
+ struct ibv_context {
[email protected]@ -749,6 +771,13 @@
  	struct ibv_device      *device;
  	struct ibv_context_ops	ops;
  	int			cmd_fd;
@@ -1189,6 +1547,55 @@
  	int			async_fd;
  	int			num_comp_vectors;
  	pthread_mutex_t		mutex;
[email protected]@ -858,6 +887,20 @@
+ struct ibv_pd *ibv_alloc_pd(struct ibv_context *context);
+ 
+ /**
++ * ibv_alloc_shpd - Mark given protection domain as shareable & return shpd structure
++ *                  that identify it.
++ *                  the storage for shpd structure needs to be provided by client.
++ */
++struct ibv_shpd *ibv_alloc_shpd(struct ibv_pd *pd, uint64_t share_key, struct ibv_shpd *shpd);
++
++/**
++ * ibv_share_pd - share the protection domain identified by given shpd struct & return a
++ *                process linked ibv_pd struct.
++ *                the share_key given should match with the share_key specifed in alloc_shpd().
++ */
++struct ibv_pd *ibv_share_pd(struct ibv_context *context, struct ibv_shpd *shpd, uint64_t share_key);
++
++/**
+  * ibv_dealloc_pd - Free a protection domain
+  */
+ int ibv_dealloc_pd(struct ibv_pd *pd);
[email protected]@ -869,11 +912,27 @@
+ 			  size_t length, int access);
+ 
+ /**
++ * ibv_reg_mr_relaxed - Register a memory region using FMR
++ */
++struct ibv_mr *ibv_reg_mr_relaxed(struct ibv_pd *pd, void *addr,
++			  size_t length, int access);
++
++/**
+  * ibv_dereg_mr - Deregister a memory region
+  */
+ int ibv_dereg_mr(struct ibv_mr *mr);
+ 
+ /**
++ * ibv_dereg_mr_relaxed - Deregister a memory region registered using FMR
++ */
++int ibv_dereg_mr_relaxed(struct ibv_mr *mr);
++
++/**
++ * ibv_flush_relaxed_mr - Flush all free mr's in the protection domain
++ */
++int ibv_flush_relaxed_mr(struct ibv_pd *pd);
++
++/**
+  * ibv_create_comp_channel - Create a completion event channel
+  */
+ struct ibv_comp_channel *ibv_create_comp_channel(struct ibv_context *context);
 diff -r -u /tmp/846623/libibverbs-1.1.4/include/infiniband/arch.h libibverbs-1.1.4/include/infiniband/arch.h
 --- /tmp/846623/libibverbs-1.1.4/include/infiniband/arch.h	Wed Sep 16 04:27:22 2009
 +++ libibverbs-1.1.4/include/infiniband/arch.h	Fri Feb 11 04:02:20 2011
--- a/components/open-fabrics/libmlx4/patches/base.patch	Wed May 16 01:39:44 2012 -0700
+++ b/components/open-fabrics/libmlx4/patches/base.patch	Wed May 16 12:10:05 2012 -0700
@@ -24,6 +24,19 @@
  #define MLX4_UVERBS_MIN_ABI_VERSION	2
  #define MLX4_UVERBS_MAX_ABI_VERSION	3
  
[email protected]@ -49,6 +53,12 @@
+ 	struct ibv_alloc_pd_resp	ibv_resp;
+ 	__u32				pdn;
+ 	__u32				reserved;
++};
++
++struct mlx4_share_pd_resp {
++	struct ibv_share_pd_resp	ibv_resp;
++	__u32				pdn;
++	__u32				reserved;
+ };
+ 
+ struct mlx4_create_cq {
 diff -r -u /tmp/839450/libmlx4-1.0.1/src/verbs.c libmlx4-1.0.1/src/verbs.c
 --- /tmp/839450/libmlx4-1.0.1/src/verbs.c	Thu Mar 10 04:48:34 2011
 +++ libmlx4-1.0.1/src/verbs.c	Fri Mar 11 14:40:18 2011
@@ -52,7 +65,7 @@
  
  	pd = malloc(sizeof *pd);
  	if (!pd)
[email protected]@ -90,7 +101,16 @@
[email protected]@ -90,11 +101,67 @@
  		return NULL;
  	}
  
@@ -69,7 +82,126 @@
  
  	return &pd->ibv_pd;
  }
[email protected]@ -168,6 +188,10 @@
+ 
++struct ibv_shpd *mlx4_alloc_shpd(struct ibv_pd *pd, uint64_t share_key, struct ibv_shpd *shpd)
++{
++	struct ibv_alloc_shpd cmd;
++	struct ibv_alloc_shpd_resp resp;
++
++	if (ibv_cmd_alloc_shpd(pd->context, pd, share_key, shpd, &cmd, sizeof cmd,
++			     &resp, sizeof resp)) {
++		return NULL;
++	}
++
++	return shpd;
++}
++
++
++struct ibv_pd *mlx4_share_pd(struct ibv_context *context, struct ibv_shpd *shpd, uint64_t share_key)
++{
++	struct ibv_share_pd       cmd;
++	struct mlx4_share_pd_resp resp;
++	struct mlx4_pd		 *pd;
++#if defined(__SVR4) && defined(__sun)
++	mlnx_umap_pd_data_out_t   *mdd;
++#endif
++
++	pd = malloc(sizeof *pd);
++	if (!pd)
++		return NULL;
++
++	if (ibv_cmd_share_pd(context, shpd, share_key, &pd->ibv_pd, &cmd, sizeof cmd,
++			     &resp.ibv_resp, sizeof resp)) {
++		free(pd);
++		return NULL;
++	}
++
++#if defined(__SVR4) && defined(__sun)
++	/*
++	 * kernel driver passes back the PD table index as opaque data.  This
++	 * is required for specifying the PD in user space address vectors.
++	 */
++	mdd     = (mlnx_umap_pd_data_out_t *) &resp.ibv_resp.drv_out;
++	pd->pdn = mdd->mpd_pdnum;
++#else
++	pd->pdn = resp.pdn;
++#endif
++
++	return &pd->ibv_pd;
++}
++
+ int mlx4_free_pd(struct ibv_pd *pd)
+ {
+ 	int ret;
[email protected]@ -138,6 +205,37 @@
+ 	return mr;
+ }
+ 
++struct ibv_mr *mlx4_reg_mr_relaxed(struct ibv_pd *pd, void *addr, size_t length,
++			   int access)
++{
++	struct ibv_mr *mr;
++	struct ibv_reg_mr cmd;
++	int ret;
++
++	mr = malloc(sizeof *mr);
++	if (!mr)
++		return NULL;
++
++#ifdef IBV_CMD_REG_MR_RELAXED_HAS_RESP_PARAMS
++	{
++		struct ibv_reg_mr_resp resp;
++
++		ret = ibv_cmd_reg_mr_relaxed(pd, addr, length, (uintptr_t) addr,
++				     access, mr, &cmd, sizeof cmd,
++				     &resp, sizeof resp);
++	}
++#else
++	ret = ibv_cmd_reg_mr_relaxed(pd, addr, length, (uintptr_t) addr, access, mr,
++			     &cmd, sizeof cmd);
++#endif
++	if (ret) {
++		free(mr);
++		return NULL;
++	}
++
++	return mr;
++}
++
+ int mlx4_dereg_mr(struct ibv_mr *mr)
+ {
+ 	int ret;
[email protected]@ -150,6 +248,29 @@
+ 	return 0;
+ }
+ 
++int mlx4_dereg_mr_relaxed(struct ibv_mr *mr)
++{
++	int ret;
++
++	ret = ibv_cmd_dereg_mr_relaxed(mr);
++	if (ret)
++		return ret;
++
++	free(mr);
++	return 0;
++}
++
++int mlx4_flush_relaxed_mr(struct ibv_pd *pd)
++{
++	int ret;
++
++	ret = ibv_cmd_flush_relaxed_mr(pd);
++	if (ret)
++		return ret;
++
++	return 0;
++}
++
+ static int align_queue_size(int req)
+ {
+ 	int nent;
[email protected]@ -168,6 +289,10 @@
  	struct mlx4_create_cq_resp resp;
  	struct mlx4_cq		  *cq;
  	int			   ret;
@@ -80,7 +212,7 @@
  
  	/* Sanity check CQ size before proceeding */
  	if (cqe > 0x3fffff)
[email protected]@ -184,7 +208,8 @@
[email protected]@ -184,7 +309,8 @@
  
  	cqe = align_queue_size(cqe + 1);
  
@@ -90,7 +222,7 @@
  		goto err;
  
  	cq->set_ci_db  = mlx4_alloc_db(to_mctx(context), MLX4_DB_TYPE_CQ);
[email protected]@ -198,15 +223,78 @@
[email protected]@ -198,15 +324,78 @@
  
  	cmd.buf_addr = (uintptr_t) cq->buf.buf;
  	cmd.db_addr  = (uintptr_t) cq->set_ci_db;
@@ -169,7 +301,7 @@
  	return &cq->ibv_cq;
  
  err_db:
[email protected]@ -215,6 +303,21 @@
[email protected]@ -215,6 +404,21 @@
  err_buf:
  	mlx4_free_buf(&cq->buf);
  
@@ -191,7 +323,7 @@
  err:
  	free(cq);
  
[email protected]@ -225,12 +328,16 @@
[email protected]@ -225,12 +429,16 @@
  {
  	struct mlx4_cq *cq = to_mcq(ibcq);
  	struct mlx4_resize_cq cmd;
@@ -210,7 +342,7 @@
  
  	pthread_spin_lock(&cq->lock);
  
[email protected]@ -247,32 +354,76 @@
[email protected]@ -247,32 +455,76 @@
  		goto out;
  	}
  
@@ -298,7 +430,7 @@
  out:
  	pthread_spin_unlock(&cq->lock);
  	return ret;
[email protected]@ -287,6 +438,9 @@
[email protected]@ -287,6 +539,9 @@
  		return ret;
  
  	mlx4_free_db(to_mctx(cq->context), MLX4_DB_TYPE_CQ, to_mcq(cq)->set_ci_db);
@@ -308,7 +440,7 @@
  	mlx4_free_buf(&to_mcq(cq)->buf);
  	free(to_mcq(cq));
  
[email protected]@ -300,6 +454,10 @@
[email protected]@ -300,6 +555,10 @@
  	struct mlx4_create_srq_resp resp;
  	struct mlx4_srq		   *srq;
  	int			    ret;
@@ -319,7 +451,7 @@
  
  	/* Sanity check SRQ size before proceeding */
  	if (attr->attr.max_wr > 1 << 16 || attr->attr.max_sge > 64)
[email protected]@ -312,6 +470,7 @@
[email protected]@ -312,6 +571,7 @@
  	if (pthread_spin_init(&srq->lock, PTHREAD_PROCESS_PRIVATE))
  		goto err;
  
@@ -327,7 +459,7 @@
  	srq->max     = align_queue_size(attr->attr.max_wr + 1);
  	srq->max_gs  = attr->attr.max_sge;
  	srq->counter = 0;
[email protected]@ -324,7 +483,23 @@
[email protected]@ -324,7 +584,23 @@
  		goto err_free;
  
  	*srq->db = 0;
@@ -351,7 +483,7 @@
  	cmd.buf_addr = (uintptr_t) srq->buf.buf;
  	cmd.db_addr  = (uintptr_t) srq->db;
  
[email protected]@ -331,19 +506,97 @@
[email protected]@ -331,19 +607,97 @@
  	ret = ibv_cmd_create_srq(pd, &srq->ibv_srq, attr,
  				 &cmd.ibv_cmd, sizeof cmd,
  				 &resp.ibv_resp, sizeof resp);
@@ -449,7 +581,7 @@
  
  err:
  	free(srq);
[email protected]@ -357,7 +610,16 @@
[email protected]@ -357,7 +711,16 @@
  {
  	struct ibv_modify_srq cmd;
  
@@ -466,7 +598,7 @@
  }
  
  int mlx4_query_srq(struct ibv_srq *srq,
[email protected]@ -365,7 +627,17 @@
[email protected]@ -365,7 +728,17 @@
  {
  	struct ibv_query_srq cmd;
  
@@ -484,7 +616,7 @@
  }
  
  int mlx4_destroy_srq(struct ibv_srq *ibsrq)
[email protected]@ -447,6 +719,10 @@
[email protected]@ -447,6 +820,10 @@
  	struct mlx4_qp		 *qp;
  	int			  ret;
  	struct mlx4_context	 *context = to_mctx(pd->context);
@@ -495,7 +627,7 @@
  
  
  	/* Sanity check QP size before proceeding */
[email protected]@ -457,6 +733,7 @@
[email protected]@ -457,6 +834,7 @@
  	if (!qp)
  		return NULL;
  
@@ -503,7 +635,7 @@
  	mlx4_calc_sq_wqe_size(&attr->cap, attr->qp_type, qp);
  
  	/*
[email protected]@ -466,6 +743,7 @@
[email protected]@ -466,6 +844,7 @@
  	qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1;
  	qp->sq.wqe_cnt = align_queue_size(attr->cap.max_send_wr + qp->sq_spare_wqes);
  	qp->rq.wqe_cnt = align_queue_size(attr->cap.max_recv_wr);
@@ -511,7 +643,7 @@
  
  	if (attr->srq || attr->qp_type == IBV_QPT_XRC)
  		attr->cap.max_recv_wr = qp->rq.wqe_cnt = 0;
[email protected]@ -476,6 +754,22 @@
[email protected]@ -476,6 +855,22 @@
  			attr->cap.max_recv_wr = 1;
  	}
  
@@ -534,7 +666,7 @@
  	if (mlx4_alloc_qp_buf(pd, &attr->cap, attr->qp_type, qp))
  		goto err;
  
[email protected]@ -505,17 +799,84 @@
[email protected]@ -505,17 +900,84 @@
  		; /* nothing */
  	cmd.sq_no_prefetch = 0;	/* OK for ABI 2: just a reserved field */
  	memset(cmd.reserved, 0, sizeof cmd.reserved);
@@ -619,7 +751,7 @@
  	pthread_mutex_unlock(&to_mctx(pd->context)->qp_table_mutex);
  
  	qp->rq.wqe_cnt = attr->cap.max_recv_wr;
[email protected]@ -536,9 +897,38 @@
[email protected]@ -536,9 +998,38 @@
  
  	return &qp->ibv_qp;
  
@@ -658,7 +790,7 @@
  err_rq_db:
  	pthread_mutex_unlock(&to_mctx(pd->context)->qp_table_mutex);
  	if (!attr->srq && attr->qp_type != IBV_QPT_XRC)
[email protected]@ -552,6 +942,7 @@
[email protected]@ -552,6 +1043,7 @@
  
  err:
  	free(qp);
@@ -666,7 +798,7 @@
  
  	return NULL;
  }
[email protected]@ -745,6 +1136,13 @@
[email protected]@ -745,6 +1237,13 @@
  				    struct ibv_cq *xrc_cq,
  				    struct ibv_srq_init_attr *attr)
  {
@@ -680,7 +812,7 @@
  	struct mlx4_create_xrc_srq  cmd;
  	struct mlx4_create_srq_resp resp;
  	struct mlx4_srq		   *srq;
[email protected]@ -807,6 +1205,7 @@
[email protected]@ -807,6 +1306,7 @@
  	free(srq);
  
  	return NULL;
@@ -778,7 +910,25 @@
  void mlx4_free_db(struct mlx4_context *context, enum mlx4_db_type type, uint32_t *db);
  
  int mlx4_query_device(struct ibv_context *context,
[email protected]@ -388,8 +397,13 @@
[email protected]@ -360,11 +369,17 @@
+ 		     struct ibv_port_attr *attr);
+ 
+ struct ibv_pd *mlx4_alloc_pd(struct ibv_context *context);
++struct ibv_shpd *mlx4_alloc_shpd(struct ibv_pd *pd, uint64_t share_key, struct ibv_shpd *shpd);
++struct ibv_pd *mlx4_share_pd(struct ibv_context *context, struct ibv_shpd *shpd, uint64_t share_key);
+ int mlx4_free_pd(struct ibv_pd *pd);
+ 
+ struct ibv_mr *mlx4_reg_mr(struct ibv_pd *pd, void *addr,
+ 			    size_t length, int access);
++struct ibv_mr *mlx4_reg_mr_relaxed(struct ibv_pd *pd, void *addr,
++			    size_t length, int access);
+ int mlx4_dereg_mr(struct ibv_mr *mr);
++int mlx4_dereg_mr_relaxed(struct ibv_mr *mr);
++int mlx4_flush_relaxed_mr(struct ibv_pd *pd);
+ 
+ struct ibv_cq *mlx4_create_cq(struct ibv_context *context, int cqe,
+ 			       struct ibv_comp_channel *channel,
[email protected]@ -388,8 +403,13 @@
  int mlx4_query_srq(struct ibv_srq *srq,
  			   struct ibv_srq_attr *attr);
  int mlx4_destroy_srq(struct ibv_srq *srq);
@@ -792,7 +942,7 @@
  void mlx4_free_srq_wqe(struct mlx4_srq *srq, int ind);
  int mlx4_post_srq_recv(struct ibv_srq *ibsrq,
  		       struct ibv_recv_wr *wr,
[email protected]@ -415,8 +429,14 @@
[email protected]@ -415,8 +435,14 @@
  void mlx4_calc_sq_wqe_size(struct ibv_qp_cap *cap, enum ibv_qp_type type,
  			   struct mlx4_qp *qp);
  int num_inline_segs(int data, enum ibv_qp_type type);
@@ -1046,7 +1196,21 @@
 diff -r -u /tmp/839450/libmlx4-1.0.1/src/mlx4.c libmlx4-1.0.1/src/mlx4.c
 --- /tmp/839450/libmlx4-1.0.1/src/mlx4.c	Thu Mar 10 04:48:34 2011
 +++ libmlx4-1.0.1/src/mlx4.c	Fri Mar 11 14:05:26 2011
[email protected]@ -144,6 +144,11 @@
[email protected]@ -112,8 +112,13 @@
+ 	.query_port    = mlx4_query_port,
+ 	.alloc_pd      = mlx4_alloc_pd,
+ 	.dealloc_pd    = mlx4_free_pd,
++	.alloc_shpd    = mlx4_alloc_shpd,
++	.share_pd      = mlx4_share_pd,
+ 	.reg_mr	       = mlx4_reg_mr,
++	.reg_mr_relaxed	       = mlx4_reg_mr_relaxed,
+ 	.dereg_mr      = mlx4_dereg_mr,
++	.dereg_mr_relaxed      = mlx4_dereg_mr_relaxed,
++	.flush_relaxed_mr      = mlx4_flush_relaxed_mr,
+ 	.create_cq     = mlx4_create_cq,
+ 	.poll_cq       = mlx4_poll_cq,
+ 	.req_notify_cq = mlx4_arm_cq,
[email protected]@ -144,6 +149,11 @@
  	struct mlx4_alloc_ucontext_resp resp;
  	int				i;
  	struct ibv_device_attr		dev_attrs;
@@ -1058,7 +1222,7 @@
  
  	context = calloc(1, sizeof *context);
  	if (!context)
[email protected]@ -150,11 +155,30 @@
[email protected]@ -150,11 +160,30 @@
  		return NULL;
  
  	context->ibv_ctx.cmd_fd = cmd_fd;
@@ -1089,7 +1253,7 @@
  	context->num_qps	= resp.qp_tab_size;
  	context->qp_table_shift = ffs(context->num_qps) - 1 - MLX4_QP_TABLE_BITS;
  	context->qp_table_mask	= (1 << context->qp_table_shift) - 1;
[email protected]@ -172,20 +196,44 @@
[email protected]@ -172,20 +201,44 @@
  	for (i = 0; i < MLX4_XRC_SRQ_TABLE_SIZE; ++i)
  		context->xrc_srq_table[i].refcnt = 0;
  
@@ -1134,7 +1298,7 @@
  		if (context->bf_page == MAP_FAILED) {
  			fprintf(stderr, PFX "Warning: BlueFlame available, "
  				"but failed to mmap() BlueFlame page.\n");
[email protected]@ -214,6 +262,7 @@
[email protected]@ -214,6 +267,7 @@
  	context->max_qp_wr = dev_attrs.max_qp_wr;
  	context->max_sge = dev_attrs.max_sge;
  	context->max_cqe = dev_attrs.max_cqe;
@@ -1142,7 +1306,7 @@
  	if (!(dev_attrs.device_cap_flags & IBV_DEVICE_XRC)) {
  		fprintf(stderr, PFX "There is a mismatch between "
  		        "the kernel and the userspace libraries: "
[email protected]@ -220,6 +269,7 @@
[email protected]@ -220,6 +274,7 @@
  			"Kernel does not support XRC. Exiting.\n");
  		goto query_free;
  	}
@@ -1150,7 +1314,7 @@
  
  	return &context->ibv_ctx;
  
[email protected]@ -240,6 +290,7 @@
[email protected]@ -240,6 +295,7 @@
  	munmap(context->uar, to_mdev(ibctx->device)->page_size);
  	if (context->bf_page)
  		munmap(context->bf_page, to_mdev(ibctx->device)->page_size);
--- a/components/open-fabrics/open-fabrics.p5m	Wed May 16 01:39:44 2012 -0700
+++ b/components/open-fabrics/open-fabrics.p5m	Wed May 16 12:10:05 2012 -0700
@@ -306,6 +306,7 @@
 file path=usr/share/man/man1m/smpdump.1m variant.opensolaris.zone=global
 file path=usr/share/man/man1m/smpquery.1m variant.opensolaris.zone=global
 file path=usr/share/man/man3/ibv_alloc_pd.3 variant.opensolaris.zone=global
+file path=usr/share/man/man3/ibv_alloc_shpd.3 variant.opensolaris.zone=global
 file path=usr/share/man/man3/ibv_attach_mcast.3 variant.opensolaris.zone=global
 file path=usr/share/man/man3/ibv_create_ah_from_wc.3 variant.opensolaris.zone=global
 file path=usr/share/man/man3/ibv_create_ah.3 variant.opensolaris.zone=global
@@ -337,8 +338,10 @@
 file path=usr/share/man/man3/ibv_query_srq.3 variant.opensolaris.zone=global
 file path=usr/share/man/man3/ibv_rate_to_mult.3 variant.opensolaris.zone=global
 file path=usr/share/man/man3/ibv_reg_mr.3 variant.opensolaris.zone=global
+file path=usr/share/man/man3/ibv_reg_mr_relaxed.3 variant.opensolaris.zone=global
 file path=usr/share/man/man3/ibv_req_notify_cq.3 variant.opensolaris.zone=global
 file path=usr/share/man/man3/ibv_resize_cq.3 variant.opensolaris.zone=global
+file path=usr/share/man/man3/ibv_share_pd.3 variant.opensolaris.zone=global
 file path=usr/share/man/man3/rdma_accept.3 variant.opensolaris.zone=global
 file path=usr/share/man/man3/rdma_ack_cm_event.3 variant.opensolaris.zone=global
 file path=usr/share/man/man3/rdma_bind_addr.3 variant.opensolaris.zone=global