[mpich2-commits] r3945 - in mpich2/trunk: . src/include src/mpi/errhan src/mpid/ch3/channels/nemesis src/mpid/ch3/channels/nemesis/include src/mpid/ch3/channels/nemesis/nemesis/include src/mpid/ch3/channels/nemesis/nemesis/netmod src/mpid/ch3/channels/nemesis/nemesis/netmod/mx src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad src/mpid/ch3/channels/nemesis/src src/mpid/ch3/include src/mpid/ch3/src src/util/param
buntinas at mcs.anl.gov
buntinas at mcs.anl.gov
Thu Mar 5 16:44:19 CST 2009
Author: buntinas
Date: 2009-03-05 16:44:18 -0600 (Thu, 05 Mar 2009)
New Revision: 3945
Added:
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_cancel.c
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/uthash.h
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/Makefile.sm
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_finalize.c
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_impl.h
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_init.c
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_poll.c
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_register.c
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_send.c
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_test.c
Removed:
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_register.c
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_test.c
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/Makefile.sm
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_finalize.c
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_impl.h
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_init.c
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_poll.c
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_register.c
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_send.c
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_test.c
Modified:
mpich2/trunk/configure.in
mpich2/trunk/src/include/mpiimpl.h
mpich2/trunk/src/mpi/errhan/errnames.txt
mpich2/trunk/src/mpid/ch3/channels/nemesis/configure.in
mpich2/trunk/src/mpid/ch3/channels/nemesis/include/mpidi_ch3_post.h
mpich2/trunk/src/mpid/ch3/channels/nemesis/include/mpidi_ch3_pre.h
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/include/mpid_nem_inline.h
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/Makefile.sm
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/Makefile.sm
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_finalize.c
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_impl.h
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_init.c
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_poll.c
mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_send.c
mpich2/trunk/src/mpid/ch3/channels/nemesis/src/ch3_progress.c
mpich2/trunk/src/mpid/ch3/include/mpidimpl.h
mpich2/trunk/src/mpid/ch3/src/ch3u_recvq.c
mpich2/trunk/src/mpid/ch3/src/mpid_irsend.c
mpich2/trunk/src/mpid/ch3/src/mpid_isend.c
mpich2/trunk/src/mpid/ch3/src/mpid_issend.c
mpich2/trunk/src/mpid/ch3/src/mpid_rsend.c
mpich2/trunk/src/mpid/ch3/src/mpid_send.c
mpich2/trunk/src/mpid/ch3/src/mpid_ssend.c
mpich2/trunk/src/mpid/ch3/src/mpid_vc.c
mpich2/trunk/src/util/param/param.c
Log:
Merging mx netmod branch into trunk. This adds override functions to the vc for overriding mpid_ send functions, as well as hooks for posting receives.
Modified: mpich2/trunk/configure.in
===================================================================
--- mpich2/trunk/configure.in 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/configure.in 2009-03-05 22:44:18 UTC (rev 3945)
@@ -5100,6 +5100,11 @@
if test "$ac_cv_func_fdopen" = "yes" ; then
PAC_FUNC_NEEDS_DECL([#include <stdlib.h>],fdopen)
fi
+# setenv() sets environment variable
+AC_HAVE_FUNCS(setenv)
+if test "$ac_cv_func_setenv" = "yes" ; then
+ PAC_FUNC_NEEDS_DECL([#include <stdlib.h>],setenv)
+fi
# ----------------------------------------------------------------------------
Modified: mpich2/trunk/src/include/mpiimpl.h
===================================================================
--- mpich2/trunk/src/include/mpiimpl.h 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/include/mpiimpl.h 2009-03-05 22:44:18 UTC (rev 3945)
@@ -681,7 +681,9 @@
int MPIU_GetEnvRange( const char *envName, int *lowPtr, int *highPtr );
int MPIU_GetEnvBool( const char *envName, int *val );
int MPIU_GetEnvStr( const char *envName, const char **val );
+int MPIU_SetEnv( const char *name, const char *value, int overwrite );
+
/* See mpishared.h as well */
/* ------------------------------------------------------------------------- */
/* end of mpiparam.h*/
Modified: mpich2/trunk/src/mpi/errhan/errnames.txt
===================================================================
--- mpich2/trunk/src/mpi/errhan/errnames.txt 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpi/errhan/errnames.txt 2009-03-05 22:44:18 UTC (rev 3945)
@@ -889,6 +889,8 @@
**mx_isend %s:mx_isend failed (%s)
**mx_wait:mx_wait failed
**mx_wait %s:mx_wait failed (%s)
+**mx_cancel:mx_cancel failed
+**mx_cancel %s:mx_cancel failed (%s)
**ibv_alloc_pd:ibv_alloc_pd failed
**ibv_alloc_pd %s:ibv_alloc_pd failed for device (%s)
**ibv_create_comp_channel:ibv_create_comp_channel failed
Modified: mpich2/trunk/src/mpid/ch3/channels/nemesis/configure.in
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/configure.in 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/configure.in 2009-03-05 22:44:18 UTC (rev 3945)
@@ -85,10 +85,8 @@
AC_DEFINE(HAVE_NETINET_IN_H,1,[Define if netinet/in.h exists])
fi
-AC_ARG_ENABLE(fast,
-[--enable-fast - pick the appropriate options for fast execution. This
- turns off error checking and timing collection],,
-enable_fast=no)
+AC_ARG_ENABLE(fast, [--enable-fast - pick the appropriate options for fast execution.
+This turns off error checking and timing collection],,enable_fast=no)
if test "$enable_fast" = "yes" ; then
AC_DEFINE(ENABLE_NO_YIELD,1,[Define to remove yield()s in polling loops])
fi
@@ -173,6 +171,13 @@
LDFLAGS="$LDFLAGS -L${with_psm_lib}"
fi,)
+#check for NewMadeleine options
+AC_ARG_WITH(newmad, [--with-newmad=path - specify path where pm2 software can be found],
+if test "${with_newmad}" != "yes" -a "${with_newmad}" != "no" ; then
+ LDFLAGS="$LDFLAGS `${with_newmad}/bin/pm2-config --flavor=$PM2_FLAVOR --libs`"
+ CPPFLAGS="`${with_newmad}/bin/pm2-config --flavor=$PM2_FLAVOR --cflags`"
+ LIBS="$LIBS `${with_newmad}/bin/pm2-config --flavor=$PM2_FLAVOR --libs`"
+fi,)
nemesis_nets_dirs=""
nemesis_nets_strings=""
@@ -249,21 +254,53 @@
AC_CHECK_HEADER([myriexpress.h], , [
AC_MSG_ERROR(['myriexpress.h not found. Did you specify --with-mx= or --with-mx-include=?'])
])
- #mx_init is a macro, switch to mx_finalize to find the symbol
- AC_CHECK_LIB(myriexpress, mx_finalize, , [
+ AC_CHECK_HEADER([mx_extensions.h], , [
+ AC_MSG_ERROR(['mx_extensions.h not found. Are you running a recent version of MX (al least 1.2.1)?'])
+ ])
+ #mx_init is a macro, switch to mx_finalize to find the symbol
+ AC_CHECK_LIB(myriexpress, mx_finalize, , [
AC_MSG_ERROR(['myriexpress library not found. Did you specify --with-mx= or --with-mx-lib=?'])
])
+ AC_TRY_COMPILE([
+ #include "myriexpress.h"
+ #include "mx_extensions.h"
+ #if MX_API < 0x301
+ #error You need at least MX 1.2.1 (MX_API >= 0x301)
+ #endif],
+ [int a=0;],
+ mx_api_version=yes,
+ mx_api_version=no)
+ if test "$mx_api_version" = no ; then
+ AC_MSG_ERROR(['MX API version Problem. Are you running a recent version of MX (at least 1.2.1)?'])
+ fi;
+ AC_DEFINE([ENABLE_COMM_OVERRIDES], 1, [define to add per-vc function pointers to override send and recv functions])
fi;
- if test "${net}" = "elan" ; then
+ if test "${net}" == "newmad" ; then
+ AC_CHECK_HEADER([nm_public.h], , [
+ AC_MSG_ERROR(['nm_public.h not found. Did you specify --with-newmad= ?'])
+ ])
+ AC_CHECK_HEADER([nm_sendrecv_interface.h], , [
+ AC_MSG_ERROR(['nm_sendrecv_interface.h not found. Did you specify --with-newmad= ?'])
+ ])
+ AC_CHECK_LIB(nmad,nm_core_init, , [
+ AC_MSG_ERROR(['nmad library not found. Did you specify --with-newmad= ?'])
+ ])
+ AC_ARG_ENABLE(newmad-multirail,
+ [--enable-newmad-multirail - enables multirail support in newmad module],,enable_multi=no)
+ if test "$enable_multi" = "yes" ; then
+ AC_DEFINE(MPID_MAD_MODULE_MULTIRAIL, 1, [Define to enable multirail support in newmad module])
+ fi
+ fi;
+
+ if test "${net}" == "elan" ; then
echo "=== You're about to use the experimental Nemesis/Elan network module."
echo "=== This module has not been thoroughly tested and some performance issues remain."
-
AC_CHECK_HEADER([elan/elan.h], , [
- AC_MSG_ERROR(['elan.h not found. Did you specify --with-elan= or --with-elan-include=? or --with-qsnet-include=?'])
+ AC_MSG_ERROR(['elan.h not found. Did you specify --with-elan= or --with-elan-include= or --with-qsnet-include=?'])
])
AC_CHECK_LIB(elan, elan_baseInit, , [
- AC_MSG_ERROR(['elan library not found. Did you specify --with-elan= or --with-elan-lib=? or --with-qsnet-include=?'])
+ AC_MSG_ERROR(['elan library not found. Did you specify --with-elan= or --with-elan-lib= or --with-qsnet-include=?'])
])
fi;
@@ -281,19 +318,21 @@
case "${nemesis_network}" in
elan )
- AC_DEFINE(MPID_NEM_NET_MODULE, MPID_NEM_ELAN, [Choose elan, gm, mx, tcp, ib, psm, or no network]) ;;
+ AC_DEFINE(MPID_NEM_NET_MODULE, MPID_NEM_ELAN, [Choose netmod]) ;;
gm )
- AC_DEFINE(MPID_NEM_NET_MODULE, MPID_NEM_GM, [Choose elan, gm, mx, tcp, ib, psm, or no network]) ;;
+ AC_DEFINE(MPID_NEM_NET_MODULE, MPID_NEM_GM, [Choose netmod]) ;;
mx )
- AC_DEFINE(MPID_NEM_NET_MODULE, MPID_NEM_MX, [Choose elan, gm, mx, tcp, ib, psm, or no network]) ;;
+ AC_DEFINE(MPID_NEM_NET_MODULE, MPID_NEM_MX, [Choose netmod]) ;;
+ newmad )
+ AC_DEFINE(MPID_NEM_NET_MODULE, MPID_NEM_NEWMAD, [Choose netmod]) ;;
tcp )
- AC_DEFINE(MPID_NEM_NET_MODULE, MPID_NEM_TCP, [Choose elan, gm, mx, tcp, ib, psm, or no network]) ;;
+ AC_DEFINE(MPID_NEM_NET_MODULE, MPID_NEM_TCP, [Choose netmod]) ;;
ib )
- AC_DEFINE(MPID_NEM_NET_MODULE, MPID_NEM_IB, [Choose elan, gm, mx, tcp, ib, psm, or no network]) ;;
+ AC_DEFINE(MPID_NEM_NET_MODULE, MPID_NEM_IB, [Choose netmod]) ;;
psm )
- AC_DEFINE(MPID_NEM_NET_MODULE, MPID_NEM_PSM, [Choose elan, gm, mx, tcp, ib, psm, or no network]) ;;
+ AC_DEFINE(MPID_NEM_NET_MODULE, MPID_NEM_PSM, [Choose netmod]) ;;
* )
- AC_DEFINE(MPID_NEM_NET_MODULE, MPID_NEM_NONE, [Choose elan, gm, mx, tcp, ib, psm, or no network]) ;;
+ AC_DEFINE(MPID_NEM_NET_MODULE, MPID_NEM_NONE, [Choose netmod]) ;;
esac
AC_ARG_ENABLE(nemesis-dbg-nolocal, [--enable-nemesis-dbg-nolocal - enables debugging mode where shared-memory communication is disabled],
@@ -526,4 +565,4 @@
dnl Place holder macro for finalization
PAC_SUBCONFIG_FINALIZE()
-AC_OUTPUT(Makefile src/Makefile nemesis/Makefile nemesis/src/Makefile nemesis/utils/Makefile nemesis/utils/replacements/Makefile nemesis/utils/monitor/Makefile nemesis/netmod/Makefile nemesis/netmod/elan/Makefile nemesis/netmod/gm/Makefile nemesis/netmod/mx/Makefile nemesis/netmod/tcp/Makefile nemesis/netmod/ib/Makefile nemesis/netmod/psm/Makefile nemesis/netmod/none/Makefile localdefs nemesis/include/mpid_nem_net_module_defs.h nemesis/src/mpid_nem_net_array.c)
+AC_OUTPUT(Makefile src/Makefile nemesis/Makefile nemesis/src/Makefile nemesis/utils/Makefile nemesis/utils/replacements/Makefile nemesis/utils/monitor/Makefile nemesis/netmod/Makefile nemesis/netmod/elan/Makefile nemesis/netmod/gm/Makefile nemesis/netmod/mx/Makefile nemesis/netmod/tcp/Makefile nemesis/netmod/ib/Makefile nemesis/netmod/psm/Makefile nemesis/netmod/newmad/Makefile nemesis/netmod/none/Makefile localdefs nemesis/include/mpid_nem_net_module_defs.h nemesis/src/mpid_nem_net_array.c)
Modified: mpich2/trunk/src/mpid/ch3/channels/nemesis/include/mpidi_ch3_post.h
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/include/mpidi_ch3_post.h 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/include/mpidi_ch3_post.h 2009-03-05 22:44:18 UTC (rev 3945)
@@ -35,8 +35,9 @@
#define MPIDI_CH3_Progress_wait(progress_state) MPIDI_CH3I_Progress(progress_state, TRUE)
#define MPIDI_CH3_Progress_poke() MPIDI_CH3I_Progress(NULL, FALSE)
-int MPIDI_CH3I_Posted_recv_enqueued (MPID_Request *rreq);
-int MPIDI_CH3I_Posted_recv_dequeued (MPID_Request *rreq);
+void MPIDI_CH3I_Posted_recv_enqueued(MPID_Request *rreq);
+/* returns non-zero when req has been matched by channel */
+int MPIDI_CH3I_Posted_recv_dequeued(MPID_Request *rreq);
/*
* Enable optional functionality
@@ -55,4 +56,7 @@
MPIDI_msg_sz_t data_sz, MPI_Aint dt_true_lb, int rank, int tag, MPID_Comm * comm, int context_offset);
int MPID_nem_lmt_RndvRecv(struct MPIDI_VC *vc, MPID_Request *rreq);
+
+int MPIDI_CH3I_Register_anysource_notification(void (*enqueue_fn)(MPID_Request *rreq), int (*dequeue_fn)(MPID_Request *rreq));
+
#endif /* !defined(MPICH_MPIDI_CH3_POST_H_INCLUDED) */
Modified: mpich2/trunk/src/mpid/ch3/channels/nemesis/include/mpidi_ch3_pre.h
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/include/mpidi_ch3_pre.h 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/include/mpidi_ch3_pre.h 2009-03-05 22:44:18 UTC (rev 3945)
@@ -19,7 +19,6 @@
/*#define HAVE_CH3_PRE_INIT*/
/* #define MPIDI_CH3_HAS_NO_DYNAMIC_PROCESS */
#define MPIDI_DEV_IMPLEMENTS_KVS
-
typedef enum MPIDI_CH3I_VC_state
{
@@ -30,26 +29,32 @@
}
MPIDI_CH3I_VC_state_t;
-/* size of private data area in vc for network modules */
+/* size of private data area in vc and req for network modules */
#define MPID_NEM_VC_NETMOD_AREA_LEN 128
+#define MPID_NEM_REQ_NETMOD_AREA_LEN 64
-
/*
* MPIDI_CH3_REQUEST_DECL (additions to MPID_Request)
*/
-#define MPIDI_CH3_REQUEST_DECL \
- struct MPIDI_CH3I_Request \
- { \
- struct MPIDI_VC *vc; \
- int noncontig; \
- MPIDI_msg_sz_t header_sz; \
- \
- MPI_Request lmt_req_id; /* request id of remote side */ \
- struct MPID_Request *lmt_req; /* pointer to original send/recv request */ \
- MPIDI_msg_sz_t lmt_data_sz; /* data size to be transferred, after checking for truncation */ \
- MPID_IOV lmt_tmp_cookie; /* temporary storage for received cookie */ \
+#define MPIDI_CH3_REQUEST_DECL \
+ struct MPIDI_CH3I_Request \
+ { \
+ struct MPIDI_VC *vc; \
+ int noncontig; \
+ MPIDI_msg_sz_t header_sz; \
+ \
+ MPI_Request lmt_req_id; /* request id of remote side */ \
+ struct MPID_Request *lmt_req; /* pointer to original send/recv request */ \
+ MPIDI_msg_sz_t lmt_data_sz; /* data size to be transferred, after checking for truncation */ \
+ MPID_IOV lmt_tmp_cookie; /* temporary storage for received cookie */ \
+ \
+ struct \
+ { \
+ char padding[MPID_NEM_REQ_NETMOD_AREA_LEN]; \
+ } netmod_area; \
} ch;
+
#if 0
#define DUMP_REQUEST(req) do { \
int i; \
@@ -71,9 +76,11 @@
#define DUMP_REQUEST(req) do { } while (0)
#endif
-#define MPIDI_POSTED_RECV_ENQUEUE_HOOK(x) MPIDI_CH3I_Posted_recv_enqueued(x)
-#define MPIDI_POSTED_RECV_DEQUEUE_HOOK(x) MPIDI_CH3I_Posted_recv_dequeued(x)
+#define MPIDI_POSTED_RECV_ENQUEUE_HOOK(req) MPIDI_CH3I_Posted_recv_enqueued(req)
+#define MPIDI_POSTED_RECV_DEQUEUE_HOOK(req) MPIDI_CH3I_Posted_recv_dequeued(req)
+
+
typedef struct MPIDI_CH3I_comm
{
/* FIXME we should really use the copy of these values that is stored in the
@@ -98,6 +105,7 @@
if (_mpi_errno) MPIU_ERR_POP (_mpi_errno); \
} while(0)
+
#define MPID_Dev_comm_destroy_hook(comm_) do { \
int _mpi_errno; \
_mpi_errno = MPIDI_CH3I_comm_destroy (comm_); \
Modified: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/include/mpid_nem_inline.h
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/include/mpid_nem_inline.h 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/include/mpid_nem_inline.h 2009-03-05 22:44:18 UTC (rev 3945)
@@ -18,8 +18,8 @@
static inline int MPID_nem_mpich2_send_header (void* buf, int size, MPIDI_VC_t *vc, int *again);
static inline int MPID_nem_mpich2_sendv (MPID_IOV **iov, int *n_iov, MPIDI_VC_t *vc, int *again);
-static inline int MPID_nem_mpich2_dequeue_fastbox (int local_rank);
-static inline int MPID_nem_mpich2_enqueue_fastbox (int local_rank);
+static inline void MPID_nem_mpich2_dequeue_fastbox (int local_rank);
+static inline void MPID_nem_mpich2_enqueue_fastbox (int local_rank);
static inline int MPID_nem_mpich2_sendv_header (MPID_IOV **iov, int *n_iov, MPIDI_VC_t *vc, int *again);
static inline int MPID_nem_recv_seqno_matches (MPID_nem_queue_ptr_t qhead);
static inline int MPID_nem_mpich2_test_recv (MPID_nem_cell_ptr_t *cell, int *in_fbox);
@@ -769,10 +769,8 @@
#define FUNCNAME MPID_nem_mpich2_dequeue_fastbox
#undef FCNAME
#define FCNAME MPIDI_QUOTE(FUNCNAME)
-static inline int
-MPID_nem_mpich2_dequeue_fastbox (int local_rank)
+static inline void MPID_nem_mpich2_dequeue_fastbox(int local_rank)
{
- int mpi_errno = MPI_SUCCESS;
MPID_nem_fboxq_elem_t *el;
MPIU_Assert(local_rank < MPID_nem_mem_region.num_local);
@@ -780,7 +778,7 @@
el = &MPID_nem_fboxq_elem_list[local_rank];
MPIU_Assert(el->fbox != NULL);
- MPIU_ERR_CHKANDJUMP (!el->usage, mpi_errno, MPI_ERR_OTHER, "**intern");
+ MPIU_Assert(el->usage);
--el->usage;
if (el->usage == 0)
@@ -802,12 +800,7 @@
else
MPID_nem_curr_fboxq_elem = el->next;
}
- }
-
- fn_exit:
- return mpi_errno;
- fn_fail:
- goto fn_exit;
+ }
}
/*
@@ -820,10 +813,8 @@
#define FUNCNAME MPID_nem_mpich2_dequeue_fastbox
#undef FCNAME
#define FCNAME MPIDI_QUOTE(FUNCNAME)
-static inline
-int MPID_nem_mpich2_enqueue_fastbox (int local_rank)
+static inline void MPID_nem_mpich2_enqueue_fastbox(int local_rank)
{
- int mpi_errno = MPI_SUCCESS;
MPID_nem_fboxq_elem_t *el;
MPIU_Assert(local_rank < MPID_nem_mem_region.num_local);
@@ -851,9 +842,7 @@
el->next = NULL;
MPID_nem_fboxq_tail = el;
- }
-
- return mpi_errno;
+ }
}
/*
MPID_nem_recv_seqno_matches (MPID_nem_queue_ptr_t qhead)
Modified: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/Makefile.sm
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/Makefile.sm 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/Makefile.sm 2009-03-05 22:44:18 UTC (rev 3945)
@@ -1,3 +1,3 @@
-SUBDIRS_nemesis_nets_dirs = elan gm mx tcp ib none psm
+SUBDIRS_nemesis_nets_dirs = elan gm mx tcp ib none psm newmad
SUBDIRS = @nemesis_nets_dirs@
Modified: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/Makefile.sm
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/Makefile.sm 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/Makefile.sm 2009-03-05 22:44:18 UTC (rev 3945)
@@ -1,5 +1,3 @@
INCLUDES = -I. -I../../include -I${master_top_srcdir}/src/ch3/channels/nemesis/nemesis/include -I${master_top_srcdir}/src/include \
-I${top_builddir}/src/include
-lib${MPILIBNAME}_a_SOURCES = \
- mx_finalize.c mx_init.c mx_poll.c mx_send.c \
- mx_register.c mx_test.c
+lib${MPILIBNAME}_a_SOURCES = mx_finalize.c mx_init.c mx_poll.c mx_send.c mx_cancel.c
Copied: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_cancel.c (from rev 3937, mpich2/branches/dev/mx-netmod/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_cancel.c)
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_cancel.c (rev 0)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_cancel.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -0,0 +1,70 @@
+/* -*- Mode: C; c-basic-offset:4 ; -*- */
+/*
+ * (C) 2006 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mx_impl.h"
+#include "my_papi_defs.h"
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mx_cancel_send
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mx_cancel_send(MPIDI_VC_t *vc, MPID_Request *sreq)
+{
+ mx_request_t *mx_request = NULL;
+ mx_return_t ret;
+ uint32_t result;
+ int mpi_errno = MPI_SUCCESS;
+
+ mx_request = &(REQ_FIELD(sreq,mx_request));
+ ret = mx_cancel(MPID_nem_mx_local_endpoint,mx_request,&result);
+ MPIU_ERR_CHKANDJUMP1(ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_cancel", "**mx_cancel %s", mx_strerror(ret));
+
+ if (result)
+ {
+ sreq->status.cancelled = TRUE;
+ MPID_nem_mx_pending_send_req--;
+ }
+ else
+ {
+ sreq->status.cancelled = FALSE;
+ }
+
+ fn_exit:
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mx_cancel_recv
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mx_cancel_recv(MPIDI_VC_t *vc, MPID_Request *rreq)
+{
+ mx_request_t *mx_request = NULL;
+ mx_return_t ret;
+ uint32_t result;
+ int mpi_errno = MPI_SUCCESS;
+
+ mx_request = &(REQ_FIELD(rreq,mx_request));
+ ret = mx_cancel(MPID_nem_mx_local_endpoint,mx_request,&result);
+ MPIU_ERR_CHKANDJUMP1(ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_cancel", "**mx_cancel %s", mx_strerror(ret));
+
+ if (result)
+ {
+ rreq->status.cancelled = TRUE;
+ }
+ else
+ {
+ rreq->status.cancelled = FALSE;
+ }
+
+ fn_exit:
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
Modified: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_finalize.c
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_finalize.c 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_finalize.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -5,7 +5,6 @@
*/
#include "mx_impl.h"
-#include "myriexpress.h"
#undef FUNCNAME
@@ -15,27 +14,18 @@
int
MPID_nem_mx_finalize()
{
- int mpi_errno = MPI_SUCCESS;
+ int mpi_errno = MPI_SUCCESS;
+ int ret ;
+
+ while(MPID_nem_mx_pending_send_req > 0)
+ MPID_nem_mx_poll(MPID_NEM_POLL_OUT);
- if (MPID_nem_mem_region.ext_procs > 0)
- {
- int ret ;
-
- while(MPID_nem_module_mx_pendings_sends > 0)
- {
- MPID_nem_mx_poll(MPID_NEM_POLL_OUT);
- }
- ret = mx_close_endpoint(MPID_nem_module_mx_local_endpoint);
- MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_close_endpoint", "**mx_close_endpoint %s", mx_strerror (ret));
+ ret = mx_close_endpoint(MPID_nem_mx_local_endpoint);
+ MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_close_endpoint", "**mx_close_endpoint %s", mx_strerror (ret));
+
+ ret = mx_finalize();
+ MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_finalize", "**mx_finalize %s", mx_strerror (ret));
- MPIU_Free( MPID_nem_module_mx_endpoints_addr );
- MPIU_Free( MPID_nem_module_mx_send_outstanding_request );
- MPIU_Free( MPID_nem_module_mx_recv_outstanding_request );
-
- ret = mx_finalize();
- MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_finalize", "**mx_finalize %s", mx_strerror (ret));
- }
-
fn_exit:
return mpi_errno;
fn_fail:
@@ -49,10 +39,6 @@
int
MPID_nem_mx_ckpt_shutdown ()
{
- int mpi_errno = MPI_SUCCESS;
- fn_exit:
- return mpi_errno;
- fn_fail:
- goto fn_exit;
+ return MPI_SUCCESS;
}
Modified: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_impl.h
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_impl.h 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_impl.h 2009-03-05 22:44:18 UTC (rev 3945)
@@ -4,15 +4,21 @@
* See COPYRIGHT in top-level directory.
*/
-#ifndef MX_IMPL_H
-#define MX_IMPL_H
+#ifndef MX_MODULE_IMPL_H
+#define MX_MODULE_IMPL_H
#include <myriexpress.h>
+#include "mx_extensions.h"
#include "mpid_nem_impl.h"
-int MPID_nem_mx_init (MPID_nem_queue_ptr_t proc_recv_queue, MPID_nem_queue_ptr_t proc_free_queue, MPID_nem_cell_ptr_t proc_elements,
- int num_proc_elements, MPID_nem_cell_ptr_t module_elements, int num_module_elements,
- MPID_nem_queue_ptr_t *module_free_queue, int ckpt_restart,
- MPIDI_PG_t *pg_p, int pg_rank, char **bc_val_p, int *val_max_sz_p);
+/* #define USE_CTXT_AS_MARK */
+/* #define DEBUG_IOV */
+/* #define ONDEMAND */
+
+int MPID_nem_mx_init (MPID_nem_queue_ptr_t proc_recv_queue, MPID_nem_queue_ptr_t proc_free_queue,
+ MPID_nem_cell_ptr_t proc_elements,int num_proc_elements,
+ MPID_nem_cell_ptr_t module_elements, int num_module_elements,
+ MPID_nem_queue_ptr_t *module_free_queue, int ckpt_restart,
+ MPIDI_PG_t *pg_p, int pg_rank, char **bc_val_p, int *val_max_sz_p);
int MPID_nem_mx_finalize (void);
int MPID_nem_mx_ckpt_shutdown (void);
int MPID_nem_mx_poll(MPID_nem_poll_dir_t in_or_out);
@@ -22,120 +28,191 @@
int MPID_nem_mx_vc_init (MPIDI_VC_t *vc);
int MPID_nem_mx_vc_destroy(MPIDI_VC_t *vc);
int MPID_nem_mx_vc_terminate (MPIDI_VC_t *vc);
+int MPID_nem_mx_get_from_bc(const char *business_card, uint32_t *remote_endpoint_id, uint64_t *remote_nic_id);
+
+/* alternate interface */
+int MPID_nem_mx_iSendContig(MPIDI_VC_t *vc, MPID_Request *sreq, void *hdr, MPIDI_msg_sz_t hdr_sz,
+ void *data, MPIDI_msg_sz_t data_sz);
+int MPID_nem_mx_iStartContigMsg(MPIDI_VC_t *vc, void *hdr, MPIDI_msg_sz_t hdr_sz, void *data,
+ MPIDI_msg_sz_t data_sz, MPID_Request **sreq_ptr);
+int MPID_nem_mx_SendNoncontig(MPIDI_VC_t *vc, MPID_Request *sreq, void *header, MPIDI_msg_sz_t hdr_sz);
-int MPID_nem_mx_test (void);
+/* Direct Routines */
+int MPID_nem_mx_directSend(MPIDI_VC_t *vc, const void * buf, int count, MPI_Datatype datatype, int dest, int tag,
+ MPID_Comm * comm, int context_offset, MPID_Request **sreq_p);
+int MPID_nem_mx_directSsend(MPIDI_VC_t *vc, const void * buf, int count, MPI_Datatype datatype, int dest, int tag,
+ MPID_Comm * comm, int context_offset,MPID_Request **sreq_p);
+int MPID_nem_mx_directRecv(MPIDI_VC_t *vc, MPID_Request *rreq);
+int MPID_nem_mx_cancel_send(MPIDI_VC_t *vc, MPID_Request *sreq);
+int MPID_nem_mx_cancel_recv(MPIDI_VC_t *vc, MPID_Request *rreq);
-int MPID_mem_mx_register_mem (void *p, int len);
-int MPID_nem_mx_deregister_mem (void *p, int len);
+/* Callback routine for unex msgs in MX */
+mx_unexp_handler_action_t MPID_nem_mx_get_adi_msg(void *context,mx_endpoint_addr_t source,
+ uint64_t match_info,uint32_t length,void *data);
+/* Any source management */
+void MPID_nem_mx_anysource_posted(MPID_Request *rreq);
+int MPID_nem_mx_anysource_matched(MPID_Request *rreq);
-/* completion counter is atomically decremented when operation completes */
-int MPID_nem_mx_get (void *target_p, void *source_p, int len, MPIDI_VC_t *source_vc, int *completion_ctr);
-int MPID_nem_mx_put (void *target_p, void *source_p, int len, MPIDI_VC_t *target_vc, int *completion_ctr);
+/* Dtype management */
+int MPID_nem_mx_process_sdtype(MPID_Request **sreq_p, MPI_Datatype datatype, MPID_Datatype * dt_ptr, const void *buf,
+ int count, MPIDI_msg_sz_t data_sz, mx_segment_t *mx_iov, uint32_t *num_seg,int first_free_slot);
+int MPID_nem_mx_process_rdtype(MPID_Request **rreq_p, MPID_Datatype * dt_ptr, MPIDI_msg_sz_t data_sz, mx_segment_t *mx_iov,
+ uint32_t *num_seg);
-/* large message transfer functions */
-int MPID_nem_mx_lmt_send_pre (struct iovec *iov, size_t n_iov, MPIDI_VC_t *dest, struct iovec *cookie);
-int MPID_nem_mx_lmt_recv_pre (struct iovec *iov, size_t n_iov, MPIDI_VC_t *src, struct iovec *cookie);
-int MPID_nem_mx_lmt_start_send (MPIDI_VC_t *dest, struct iovec s_cookie, struct iovec r_cookie, int *completion_ctr);
-int MPID_nem_mx_lmt_start_recv (MPIDI_VC_t *src, struct iovec s_cookie, struct iovec r_cookie, int *completion_ctr);
-int MPID_nem_mx_lmt_send_post (struct iovec cookie);
-int MPID_nem_mx_lmt_recv_post (struct iovec cookie);
+/* Connection management*/
+int MPID_nem_mx_send_conn_info (MPIDI_VC_t *vc);
-#define MPID_NEM_CELL_LEN_MX (32*1024)
-#define MPID_NEM_CELL_PAYLOAD_LEN_MX (MPID_NEM_CELL_LEN_MX - sizeof(void *))
+extern mx_endpoint_t MPID_nem_mx_local_endpoint;
+extern int MPID_nem_mx_pending_send_req;
+extern uint32_t MPID_NEM_MX_FILTER;
+extern uint64_t MPID_nem_mx_local_nic_id;
+extern uint32_t MPID_nem_mx_local_endpoint_id;
-extern uint32_t MPID_nem_module_mx_filter;
-extern mx_endpoint_t MPID_nem_module_mx_local_endpoint;
-extern mx_endpoint_addr_t *MPID_nem_module_mx_endpoints_addr;
-
-extern int MPID_nem_module_mx_send_outstanding_request_num;
-extern int MPID_nem_module_mx_pendings_sends;
-
-extern int MPID_nem_module_mx_recv_outstanding_request_num;
-extern int MPID_nem_module_mx_pendings_recvs;
-extern int *MPID_nem_module_mx_pendings_recvs_array;
-
/* The vc provides a generic buffer in which network modules can store
private fields This removes all dependencies from the VC struction
on the network module, facilitating dynamic module loading. */
typedef struct
{
- unsigned int remote_endpoint_id; /* uint32_t equivalent */
- unsigned long long remote_nic_id; /* uint64_t equivalent */
+ /* The following 2 are used to set-up the connection */
+ uint32_t remote_endpoint_id;
+ uint64_t remote_nic_id;
+ uint16_t local_connected;
+ uint16_t remote_connected;
+ /* The following is used to actually send messages */
+ mx_endpoint_addr_t remote_endpoint_addr;
+ /* Poster recv pointer for anysource management*/
+ int (* recv_posted)(MPID_Request *req, void *vc);
} MPID_nem_mx_vc_area;
/* accessor macro to private fields in VC */
-#define VC_FIELD(vc, field) (((MPID_nem_mx_vc_area *)((MPIDI_CH3I_VC *)(vc)->channel_private)->netmod_area.padding)->field)
+#define VC_FIELD(vcp, field) (((MPID_nem_mx_vc_area *)((MPIDI_CH3I_VC *)((vcp)->channel_private))->netmod_area.padding)->field)
-typedef struct MPID_nem_mx_cell
-{
- struct MPID_nem_mx_cell *next;
- mx_request_t mx_request;
-} MPID_nem_mx_cell_t, *MPID_nem_mx_cell_ptr_t;
+/* The req provides a generic buffer in which network modules can store
+ private fields This removes all dependencies from the req structure
+ on the network module, facilitating dynamic module loading. */
+typedef struct
+{
+ mx_request_t mx_request;
+} MPID_nem_mx_req_area;
+/* accessor macro to private fields in REQ */
+#define REQ_FIELD(reqp, field) (((MPID_nem_mx_req_area *)((reqp)->ch.netmod_area.padding))->field)
-typedef struct MPID_nem_mx_req_queue
-{
- MPID_nem_mx_cell_ptr_t head;
- MPID_nem_mx_cell_ptr_t tail;
-} MPID_nem_mx_req_queue_t, *MPID_nem_mx_req_queue_ptr_t;
+#if CH3_RANK_BITS == 16
+#ifdef USE_CTXT_AS_MARK
+#define NBITS_TAG 32
+#else /* USE_CTXT_AS_MARK */
+#define NBITS_TAG 31
+#endif /* USE_CTXT_AS_MARK */
+typedef int32_t Mx_Nem_tag_t;
+#elif CH3_RANK_BITS == 32
+#ifdef USE_CTXT_AS_MARK
+#define NBITS_TAG 16
+#else /* USE_CTXT_AS_MARK */
+#define NBITS_TAG 15
+#endif /* USE_CTXT_AS_MARK */
+typedef int16_t Mx_Nem_tag_t;
+#endif /* CH3_RANK_BITS */
+#ifdef USE_CTXT_AS_MARK
+#define NBITS_TYPE 0
+#else /* USE_CTXT_AS_MARK */
+#define NBITS_TYPE 1
+#endif /* USE_CTXT_AS_MARK */
+#define NBITS_RANK CH3_RANK_BITS
+#define NBITS_CTXT 16
+#define NBITS_PGRANK (sizeof(int)*8)
-#define MPID_NEM_MX_CELL_TO_REQUEST(cellp) (&((cellp)->mx_request))
-#define MPID_NEM_MX_REQ 64
-#define MPID_NEM_MX_MATCH (UINT64_C(0x666))
-#define MPID_NEM_MX_MASK (UINT64_C(0xffffffffffffffff))
+#define NEM_MX_MATCHING_BITS (NBITS_TYPE+NBITS_TAG+NBITS_RANK+NBITS_CTXT)
+#define SHIFT_TYPE (NBITS_TAG+NBITS_RANK+NBITS_CTXT)
+#define SHIFT_TAG (NBITS_RANK+NBITS_CTXT)
+#define SHIFT_RANK (NBITS_CTXT)
+#define SHIFT_PGRANK (NBITS_CTXT)
+#define SHIFT_CTXT (0)
-static inline int
-MPID_nem_mx_req_queue_empty ( MPID_nem_mx_req_queue_ptr_t qhead )
-{
- return qhead->head == NULL;
-}
+#define NEM_MX_MAX_TYPE ((UINT64_C(1)<<NBITS_TYPE) -1)
+#define NEM_MX_MAX_TAG ((UINT64_C(1)<<NBITS_TAG) -1)
+#define NEM_MX_MAX_RANK ((UINT64_C(1)<<NBITS_RANK) -1)
+#define NEM_MX_MAX_CTXT ((UINT64_C(1)<<NBITS_CTXT) -1)
+#define NEM_MX_MAX_PGRANK ((UINT64_C(1)<<NBITS_PGRANK)-1)
-static inline void
-MPID_nem_mx_req_queue_enqueue (MPID_nem_mx_req_queue_ptr_t qhead, MPID_nem_mx_cell_ptr_t element)
-{
- MPID_nem_mx_cell_ptr_t prev = qhead->tail;
- if (prev == NULL)
- {
- qhead->head = element;
- }
- else
- {
- prev->next = element;
- }
- qhead->tail = element;
-}
+#define NEM_MX_TYPE_MASK (NEM_MX_MAX_TYPE<<SHIFT_TYPE)
+#define NEM_MX_TAG_MASK (NEM_MX_MAX_TAG <<SHIFT_TAG )
+#define NEM_MX_RANK_MASK (NEM_MX_MAX_RANK<<SHIFT_RANK)
+#define NEM_MX_CTXT_MASK (NEM_MX_MAX_CTXT<<SHIFT_CTXT)
+#define NEM_MX_PGRANK_MASK (NEM_MX_MAX_PGRANK<<SHIFT_PGRANK)
-static inline void
-MPID_nem_mx_req_queue_dequeue (MPID_nem_mx_req_queue_ptr_t qhead, MPID_nem_mx_cell_ptr_t *e)
-{
- register MPID_nem_mx_cell_ptr_t _e = qhead->head;
- if(_e == NULL)
- {
- *e = NULL;
- }
- else
- {
- qhead->head = _e->next;
- if(qhead->head == NULL)
- {
- qhead->tail = NULL;
- }
- _e->next = NULL;
- *e = (MPID_nem_mx_cell_ptr_t)_e;
- }
-}
+#define NEM_MX_MATCH_INTRA (UINT64_C(0x8000000000000000))
+#define NEM_MX_MATCH_DIRECT (UINT64_C(0x0000000000000000))
+#define NEM_MX_MATCH_FULL_MASK (UINT64_C(0xffffffffffffffff))
+#define NEM_MX_MATCH_EMPTY_MASK (UINT64_C(0x0000000000000000))
+#define NEM_MX_MASK (UINT64_C(0x8000000000000000))
-extern MPID_nem_mx_cell_ptr_t MPID_nem_module_mx_send_outstanding_request;
-extern MPID_nem_mx_req_queue_ptr_t MPID_nem_module_mx_send_free_req_queue;
-extern MPID_nem_mx_req_queue_ptr_t MPID_nem_module_mx_send_pending_req_queue;
+#define NEM_MX_SET_TAG(_match, _tag) do { \
+ MPIU_Assert((_tag >= 0)&&(_tag <= (NEM_MX_MAX_TAG))); \
+ ((_match) |= (((uint64_t)((_tag)&(NEM_MX_MAX_TAG))) << SHIFT_TAG)); \
+}while(0)
+#define NEM_MX_SET_SRC(_match, _src) do { \
+ MPIU_Assert(_src >= 0)&&(_src<=(NEM_MX_MAX_RANK))); \
+ ((_match) |= (((uint64_t)(_src)) << SHIFT_RANK)); \
+}while(0)
+#define NEM_MX_SET_CTXT(_match, _ctxt) do { \
+ MPIU_Assert(_ctxt >= 0)&&(_ctxt<=(NEM_MX_MAX_CTXT))); \
+ ((_match) |= (((uint64_t)(_ctxt)) << SHIFT_CTXT)); \
+}while(0)
+#define NEM_MX_SET_PGRANK(_match, _pg_rank) do { \
+ ((_match) |= (((uint64_t)(_pg_rank)) << SHIFT_PGRANK)); \
+}while(0)
+#define NEM_MX_SET_ANYSRC(_match) do{ \
+ ((_match) &= ~NEM_MX_RANK_MASK);\
+}while(0)
+#define NEM_MX_SET_ANYTAG(_match) do{ \
+ ((_match) &= ~NEM_MX_TAG_MASK); \
+}while(0)
-extern MPID_nem_mx_cell_ptr_t MPID_nem_module_mx_recv_outstanding_request;
-extern MPID_nem_mx_req_queue_ptr_t MPID_nem_module_mx_recv_free_req_queue;
-extern MPID_nem_mx_req_queue_ptr_t MPID_nem_module_mx_recv_pending_req_queue;
+#define NEM_MX_MATCH_GET_TYPE(_match, _type) do{ \
+ ((_type) = ((int16_t)(((_match) & NEM_MX_TYPE_MASK) >> SHIFT_TYPE))); \
+}while(0)
+#define NEM_MX_MATCH_GET_TAG(_match, _tag) do{ \
+ ((_tag) = ((Mx_Nem_tag_t)(((_match) & NEM_MX_TAG_MASK) >> SHIFT_TAG)));\
+}while(0)
+#define NEM_MX_MATCH_GET_RANK(_match, _rank) do{ \
+ ((_rank) = ((MPIR_Rank_t)(((_match) & NEM_MX_RANK_MASK) >> SHIFT_RANK)));\
+}while(0)
+#define NEM_MX_MATCH_GET_CTXT(_match, _ctxt) do{ \
+((_ctxt) = ((MPIR_Context_id_t)(((_match) & NEM_MX_CTXT_MASK) >> SHIFT_CTXT))); \
+}while(0)
+#define NEM_MX_MATCH_GET_PGRANK(_match, _pg_rank) do{ \
+ ((_pg_rank) = ((int)(((_match) & NEM_MX_PGRANK_MASK) >> SHIFT_PGRANK))); \
+}while(0)
-extern MPID_nem_queue_ptr_t MPID_nem_module_mx_free_queue;
-extern MPID_nem_queue_ptr_t MPID_nem_process_recv_queue;
-extern MPID_nem_queue_ptr_t MPID_nem_process_free_queue;
+#ifdef USE_CTXT_AS_MARK
+#define NEM_MX_INTRA_CTXT (0x0000000c)
+#define NEM_MX_SET_MATCH(_match,_tag,_rank,_context ) do{ \
+ MPIU_Assert((_tag >= 0)&&(_tag <= (NEM_MX_MAX_TAG))); \
+ MPIU_Assert((_rank >= 0)&&(_rank<=(NEM_MX_MAX_RANK))); \
+ MPIU_Assert((_context >= 0)&&(_context<=(NEM_MX_MAX_CTXT)));\
+ (_match)=((((uint64_t)(_tag)) << SHIFT_TAG) \
+ |(((uint64_t)(_rank)) << SHIFT_RANK) \
+ |(((uint64_t)(_context)) << SHIFT_CTXT)); \
+}while(0)
+#define NEM_MX_DIRECT_MATCH(_match,_tag,_rank,_context) NEM_MX_SET_MATCH(_match,_tag,_rank,_context)
+#define NEM_MX_ADI_MATCH(_match) NEM_MX_SET_MATCH(_match,0,0,NEM_MX_INTRA_CTXT)
+#else /* USE_CTXT_AS_MARK */
+#define NEM_MX_DIRECT_TYPE (0x0)
+#define NEM_MX_INTRA_TYPE (0x1)
+#define NEM_MX_SET_MATCH(_match,_type,_tag,_rank,_context ) do{ \
+ MPIU_Assert((_tag >= 0)&&(_tag <= (NEM_MX_MAX_TAG))); \
+ MPIU_Assert((_rank >= 0)&&(_rank<=(NEM_MX_MAX_RANK))); \
+ MPIU_Assert((_context >= 0)&&(_context<=(NEM_MX_MAX_CTXT))); \
+ (_match)=((((uint64_t) (_type)) << SHIFT_TYPE) \
+ |(((uint64_t)((_tag)&(NEM_MX_MAX_TAG)))<< SHIFT_TAG) \
+ |(((uint64_t) (_rank)) << SHIFT_RANK) \
+ |(((uint64_t) (_context)) << SHIFT_CTXT));\
+}while(0)
+#define NEM_MX_DIRECT_MATCH(_match,_tag,_rank,_context) NEM_MX_SET_MATCH(_match,NEM_MX_DIRECT_TYPE,_tag,_rank,_context)
+#define NEM_MX_ADI_MATCH(_match) NEM_MX_SET_MATCH(_match,NEM_MX_INTRA_TYPE,0,0,0)
+#endif /* USE_CTXT_AS_MARK */
#endif
+
Modified: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_init.c
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_init.c 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_init.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -4,7 +4,6 @@
* See COPYRIGHT in top-level directory.
*/
-#include "myriexpress.h"
#include "mpid_nem_impl.h"
#include "mx_impl.h"
@@ -21,80 +20,58 @@
MPID_nem_mx_vc_terminate
};
+static MPIDI_Comm_ops_t comm_ops = {
+ MPID_nem_mx_directRecv, /* recv_posted */
+
+ MPID_nem_mx_directSend, /* send */
+ MPID_nem_mx_directSend, /* rsend */
+ MPID_nem_mx_directSsend, /* ssend */
+ MPID_nem_mx_directSend, /* isend */
+ MPID_nem_mx_directSend, /* irsend */
+ MPID_nem_mx_directSsend, /* issend */
+
+ NULL, /* send_init */
+ NULL, /* bsend_init */
+ NULL, /* rsend_init */
+ NULL, /* ssend_init */
+ NULL, /* startall */
+
+ MPID_nem_mx_cancel_send,/* cancel_send */
+ MPID_nem_mx_cancel_recv /* cancel_recv */
+};
+
#define MPIDI_CH3I_ENDPOINT_KEY "endpoint_id"
#define MPIDI_CH3I_NIC_KEY "nic_id"
-static uint64_t local_nic_id;
-static uint64_t remote_nic_id;
-static uint32_t local_endpoint_id;
-static uint32_t remote_endpoint_id;
+int MPID_nem_mx_pending_send_req = 0;
+uint32_t MPID_NEM_MX_FILTER = 0xabadbada;
+uint64_t MPID_nem_mx_local_nic_id;
+uint32_t MPID_nem_mx_local_endpoint_id;
+mx_endpoint_t MPID_nem_mx_local_endpoint;
-mx_endpoint_t MPID_nem_module_mx_local_endpoint;
-mx_endpoint_addr_t *MPID_nem_module_mx_endpoints_addr;
-
-MPID_nem_mx_cell_ptr_t MPID_nem_module_mx_send_outstanding_request;
-int MPID_nem_module_mx_send_outstanding_request_num;
-MPID_nem_mx_cell_ptr_t MPID_nem_module_mx_recv_outstanding_request;
-int MPID_nem_module_mx_recv_outstanding_request_num;
-
-uint32_t MPID_nem_module_mx_filter = 0xdeadbeef;
-static uint32_t MPID_nem_module_mx_timeout = MX_INFINITE;
-int MPID_nem_module_mx_pendings_sends = 0;
-int MPID_nem_module_mx_pendings_recvs = 0 ;
-int *MPID_nem_module_mx_pendings_sends_array;
-int *MPID_nem_module_mx_pendings_recvs_array;
-
-
-static MPID_nem_mx_req_queue_t _mx_send_free_req_q;
-static MPID_nem_mx_req_queue_t _mx_send_pend_req_q;
-static MPID_nem_mx_req_queue_t _mx_recv_free_req_q;
-static MPID_nem_mx_req_queue_t _mx_recv_pend_req_q;
-
-MPID_nem_mx_req_queue_ptr_t MPID_nem_module_mx_send_free_req_queue = &_mx_send_free_req_q;
-MPID_nem_mx_req_queue_ptr_t MPID_nem_module_mx_send_pending_req_queue = &_mx_send_pend_req_q;
-MPID_nem_mx_req_queue_ptr_t MPID_nem_module_mx_recv_free_req_queue = &_mx_recv_free_req_q;
-MPID_nem_mx_req_queue_ptr_t MPID_nem_module_mx_recv_pending_req_queue = &_mx_recv_pend_req_q;
-
-static MPID_nem_queue_t _free_queue;
-
-MPID_nem_queue_ptr_t MPID_nem_module_mx_free_queue = 0;
-
-MPID_nem_queue_ptr_t MPID_nem_process_recv_queue = 0;
-MPID_nem_queue_ptr_t MPID_nem_process_free_queue = 0;
-
#undef FUNCNAME
#define FUNCNAME init_mx
#undef FCNAME
#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int init_mx( MPIDI_PG_t *pg_p )
+static int init_mx( MPIDI_PG_t *pg_p )
{
- mx_return_t ret;
- int mpi_errno = MPI_SUCCESS;
- int index;
- MPIU_CHKPMEM_DECL(1);
+ mx_endpoint_addr_t local_endpoint_addr;
+ mx_return_t ret;
+ mx_param_t param;
+ int mpi_errno = MPI_SUCCESS;
+ int r;
+ r = MPIU_SetEnv("MX_DISABLE_SHARED", "1", 1);
+ MPIU_ERR_CHKANDJUMP(r, mpi_errno, MPI_ERR_OTHER, "**setenv");
+ r = MPIU_SetEnv("MX_DISABLE_SELF", "1", 1);
+ MPIU_ERR_CHKANDJUMP(r, mpi_errno, MPI_ERR_OTHER, "**setenv");
+
ret = mx_init();
MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_init", "**mx_init %s", mx_strerror (ret));
- /* Allocate more than needed but use only external processes */
- MPIU_CHKPMEM_MALLOC (MPID_nem_module_mx_endpoints_addr, mx_endpoint_addr_t *, MPID_nem_mem_region.num_procs * sizeof(mx_endpoint_addr_t), mpi_errno, "endpoints addr");
-#if 1
- /* Fix me : mysteriously MPIU_CHKPMEM_MALLOC fails here, when the regular MPIU_Malloc doesn't ... */
- MPID_nem_module_mx_send_outstanding_request =(MPID_nem_mx_cell_ptr_t)MPIU_Malloc(MPID_NEM_MX_REQ * sizeof(MPID_nem_mx_cell_t));
- MPID_nem_module_mx_recv_outstanding_request =(MPID_nem_mx_cell_ptr_t)MPIU_Malloc(MPID_NEM_MX_REQ * sizeof(MPID_nem_mx_cell_t));
- MPID_nem_module_mx_pendings_recvs_array =(int *)MPIU_Malloc(MPID_nem_mem_region.num_procs * sizeof(int));
-#else
- MPIU_CHKPMEM_MALLOC (MPID_nem_module_mx_send_outstanding_request, MPID_nem_mx_cell_ptr_t, MPID_NEM_MX_REQ * sizeof(MPID_nem_mx_cell_t), mpi_errno, "send outstanding req");
- MPIU_CHKPMEM_MALLOC (MPID_nem_module_mx_recv_outstanding_request, MPID_nem_mx_cell_ptr_t, MPID_NEM_MX_REQ * sizeof(MPID_nem_mx_cell_t), mpi_errno, "recv outstanding req");
- MPIU_CHKPMEM_MALLOC (MPID_nem_module_mx_pendings_recvs_array,int *, MPID_nem_mem_region.num_procs * sizeof(int), mpi_errno, "pending recvs array");
-#endif
- memset(MPID_nem_module_mx_send_outstanding_request,0,MPID_NEM_MX_REQ*sizeof(MPID_nem_mx_cell_t));
- memset(MPID_nem_module_mx_recv_outstanding_request,0,MPID_NEM_MX_REQ*sizeof(MPID_nem_mx_cell_t));
- for (index = 0 ; index < MPID_nem_mem_region.num_procs ; index++)
- {
- MPID_nem_module_mx_pendings_recvs_array[index] = 0;
- }
+ mx_set_error_handler(MX_ERRORS_RETURN);
+
/*
ret = mx_get_info(NULL, MX_NIC_COUNT, NULL, 0, &nic_count, sizeof(int));
MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_get_info", "**mx_get_info %s", mx_strerror (ret));
@@ -109,39 +86,26 @@
index++;
}while(ret != MX_SUCCESS);
*/
-
- /* mx_board_num */
- ret = mx_open_endpoint(MX_ANY_NIC,MX_ANY_ENDPOINT,MPID_nem_module_mx_filter,NULL,0,&MPID_nem_module_mx_local_endpoint);
+#ifndef USE_CTXT_AS_MARK
+ param.key = MX_PARAM_CONTEXT_ID;
+ param.val.context_id.bits = NEM_MX_MATCHING_BITS - SHIFT_TYPE;
+ param.val.context_id.shift = SHIFT_TYPE;
+ ret = mx_open_endpoint(MX_ANY_NIC,MX_ANY_ENDPOINT,MPID_NEM_MX_FILTER,¶m,1,&MPID_nem_mx_local_endpoint);
+#else
+ ret = mx_open_endpoint(MX_ANY_NIC,MX_ANY_ENDPOINT,MPID_NEM_MX_FILTER,NULL,0,&MPID_nem_mx_local_endpoint);
+#endif
MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_open_endpoint", "**mx_open_endpoint %s", mx_strerror (ret));
- ret = mx_get_endpoint_addr( MPID_nem_module_mx_local_endpoint,&MPID_nem_module_mx_endpoints_addr[MPID_nem_mem_region.rank]);
+ ret = mx_get_endpoint_addr(MPID_nem_mx_local_endpoint,&local_endpoint_addr);
MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_get_endpoint_addr", "**mx_get_endpoint_addr %s", mx_strerror (ret));
- ret = mx_decompose_endpoint_addr(MPID_nem_module_mx_endpoints_addr[MPID_nem_mem_region.rank],&local_nic_id, &local_endpoint_id);
+ ret = mx_decompose_endpoint_addr(local_endpoint_addr,&MPID_nem_mx_local_nic_id,&MPID_nem_mx_local_endpoint_id);
MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_decompose_endpoint_addr", "**mx_decompose_endpoint_addr %s", mx_strerror (ret));
- MPID_nem_module_mx_send_free_req_queue->head = NULL;
- MPID_nem_module_mx_send_free_req_queue->tail = NULL;
- MPID_nem_module_mx_send_pending_req_queue->head = NULL;
- MPID_nem_module_mx_send_pending_req_queue->tail = NULL;
-
- MPID_nem_module_mx_recv_free_req_queue->head = NULL;
- MPID_nem_module_mx_recv_free_req_queue->tail = NULL;
- MPID_nem_module_mx_recv_pending_req_queue->head = NULL;
- MPID_nem_module_mx_recv_pending_req_queue->tail = NULL;
-
- for (index = 0; index < MPID_NEM_MX_REQ ; ++index)
- {
- MPID_nem_mx_req_queue_enqueue (MPID_nem_module_mx_send_free_req_queue,&MPID_nem_module_mx_send_outstanding_request[index]);
- MPID_nem_mx_req_queue_enqueue (MPID_nem_module_mx_recv_free_req_queue,&MPID_nem_module_mx_recv_outstanding_request[index]);
- }
-
- MPIU_CHKPMEM_COMMIT();
- fn_exit:
- return mpi_errno;
- fn_fail:
- MPIU_CHKPMEM_REAP();
- goto fn_exit;
+ fn_exit:
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
}
/*
@@ -170,37 +134,26 @@
#define FCNAME MPIDI_QUOTE(FUNCNAME)
int
MPID_nem_mx_init (MPID_nem_queue_ptr_t proc_recv_queue,
- MPID_nem_queue_ptr_t proc_free_queue,
- MPID_nem_cell_ptr_t proc_elements, int num_proc_elements,
- MPID_nem_cell_ptr_t module_elements, int num_module_elements,
- MPID_nem_queue_ptr_t *module_free_queue, int ckpt_restart,
- MPIDI_PG_t *pg_p, int pg_rank,
- char **bc_val_p, int *val_max_sz_p)
+ MPID_nem_queue_ptr_t proc_free_queue,
+ MPID_nem_cell_ptr_t proc_elements, int num_proc_elements,
+ MPID_nem_cell_ptr_t module_elements, int num_module_elements,
+ MPID_nem_queue_ptr_t *module_free_queue, int ckpt_restart,
+ MPIDI_PG_t *pg_p, int pg_rank,
+ char **bc_val_p, int *val_max_sz_p)
{
int mpi_errno = MPI_SUCCESS ;
- int index;
- if( MPID_nem_mem_region.ext_procs > 0)
- {
- init_mx(pg_p);
- mpi_errno = MPID_nem_mx_get_business_card (pg_rank, bc_val_p, val_max_sz_p);
- if (mpi_errno) MPIU_ERR_POP (mpi_errno);
- }
-
- MPID_nem_process_recv_queue = proc_recv_queue;
- MPID_nem_process_free_queue = proc_free_queue;
-
- MPID_nem_module_mx_free_queue = &_free_queue;
-
- MPID_nem_queue_init (MPID_nem_module_mx_free_queue);
-
- for (index = 0; index < num_module_elements; ++index)
- {
- MPID_nem_queue_enqueue (MPID_nem_module_mx_free_queue, &module_elements[index]);
- }
-
- *module_free_queue = MPID_nem_module_mx_free_queue;
+ init_mx(pg_p);
+ mpi_errno = MPID_nem_mx_get_business_card (pg_rank, bc_val_p, val_max_sz_p);
+ if (mpi_errno) MPIU_ERR_POP (mpi_errno);
+
+ mx_register_unexp_handler(MPID_nem_mx_local_endpoint,MPID_nem_mx_get_adi_msg,NULL);
+
+ mpi_errno = MPIDI_CH3I_Register_anysource_notification(MPID_nem_mx_anysource_posted,
+ MPID_nem_mx_anysource_matched);
+ if (mpi_errno) MPIU_ERR_POP(mpi_errno);
+
fn_exit:
return mpi_errno;
fn_fail:
@@ -216,66 +169,63 @@
{
int mpi_errno = MPI_SUCCESS;
- mpi_errno = MPIU_Str_add_int_arg (bc_val_p, val_max_sz_p, MPIDI_CH3I_ENDPOINT_KEY, local_endpoint_id);
+ mpi_errno = MPIU_Str_add_int_arg (bc_val_p, val_max_sz_p, MPIDI_CH3I_ENDPOINT_KEY, MPID_nem_mx_local_endpoint_id);
if (mpi_errno != MPIU_STR_SUCCESS)
- {
- if (mpi_errno == MPIU_STR_NOMEM)
- {
- MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**buscard_len");
- }
- else
- {
- MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**buscard");
- }
+ {
+ if (mpi_errno == MPIU_STR_NOMEM)
+ {
+ MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**buscard_len");
+ }
+ else
+ {
+ MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**buscard");
+ }
goto fn_exit;
- }
-
- mpi_errno = MPIU_Str_add_binary_arg (bc_val_p, val_max_sz_p, MPIDI_CH3I_NIC_KEY, (char *)&local_nic_id, sizeof(uint64_t));
+ }
+
+ mpi_errno = MPIU_Str_add_binary_arg (bc_val_p, val_max_sz_p, MPIDI_CH3I_NIC_KEY, (char *)&MPID_nem_mx_local_nic_id, sizeof(uint64_t));
if (mpi_errno != MPIU_STR_SUCCESS)
- {
- if (mpi_errno == MPIU_STR_NOMEM)
- {
- MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**buscard_len");
- }
- else
- {
- MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**buscard");
- }
- goto fn_exit;
- }
+ {
+ if (mpi_errno == MPIU_STR_NOMEM)
+ {
+ MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**buscard_len");
+ }
+ else
+ {
+ MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**buscard");
+ }
+ goto fn_exit;
+ }
fn_exit:
return mpi_errno;
- fn_fail:
- goto fn_exit;
}
#undef FUNCNAME
#define FUNCNAME MPID_nem_mx_get_from_bc
#undef FCNAME
#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int
-MPID_nem_mx_get_from_bc (const char *business_card, uint32_t *remote_endpoint_id, uint64_t *remote_nic_id)
+int MPID_nem_mx_get_from_bc(const char *business_card, uint32_t *remote_endpoint_id, uint64_t *remote_nic_id)
{
int mpi_errno = MPI_SUCCESS;
int len;
- uint32_t tmp_endpoint_id;
+ int tmp_endpoint_id;
- mpi_errno = MPIU_Str_get_int_arg (business_card, MPIDI_CH3I_ENDPOINT_KEY, &tmp_endpoint_id);
+ mpi_errno = MPIU_Str_get_int_arg(business_card, MPIDI_CH3I_ENDPOINT_KEY, &tmp_endpoint_id);
if (mpi_errno != MPIU_STR_SUCCESS)
- {
- /* FIXME: create a real error string for this */
- MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER, "**argstr_hostd");
- }
+ {
+ /* FIXME: create a real error string for this */
+ MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER, "**argstr_hostd");
+ }
*remote_endpoint_id = (uint32_t)tmp_endpoint_id;
mpi_errno = MPIU_Str_get_binary_arg (business_card, MPIDI_CH3I_NIC_KEY, (char *)remote_nic_id, sizeof(uint64_t), &len);
if ((mpi_errno != MPIU_STR_SUCCESS) || len != sizeof(uint64_t))
- {
- /* FIXME: create a real error string for this */
- MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER, "**argstr_hostd");
- }
+ {
+ /* FIXME: create a real error string for this */
+ MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER, "**argstr_hostd");
+ }
fn_exit:
return mpi_errno;
@@ -290,11 +240,7 @@
int
MPID_nem_mx_connect_to_root (const char *business_card, MPIDI_VC_t *new_vc)
{
- int mpi_errno = MPI_SUCCESS;
- fn_exit:
- return mpi_errno;
- fn_fail:
- goto fn_exit;
+ return MPI_SUCCESS;
}
#undef FUNCNAME
@@ -302,37 +248,49 @@
#undef FCNAME
#define FCNAME MPIDI_QUOTE(FUNCNAME)
int
-MPID_nem_mx_vc_init (MPIDI_VC_t *vc, const char *business_card)
+MPID_nem_mx_vc_init (MPIDI_VC_t *vc)
{
+ uint32_t threshold;
+ MPIDI_CH3I_VC *vc_ch = (MPIDI_CH3I_VC *)vc->channel_private;
int mpi_errno = MPI_SUCCESS;
- int ret;
-
+
/* first make sure that our private fields in the vc fit into the area provided */
MPIU_Assert(sizeof(MPID_nem_mx_vc_area) <= MPID_NEM_VC_NETMOD_AREA_LEN);
- if( MPID_nem_mem_region.ext_procs > 0)
- {
- mpi_errno = MPID_nem_mx_get_from_bc (business_card, &VC_FIELD(vc, remote_endpoint_id), &VC_FIELD(vc, remote_nic_id));
- /* --BEGIN ERROR HANDLING-- */
- if (mpi_errno)
- {
- MPIU_ERR_POP (mpi_errno);
- }
- /* --END ERROR HANDLING-- */
-
- ret = mx_connect(MPID_nem_module_mx_local_endpoint,
- VC_FIELD(vc, remote_nic_id),
- VC_FIELD(vc, remote_endpoint_id),
- MPID_nem_module_mx_filter,
- MPID_nem_module_mx_timeout,
- &MPID_nem_module_mx_endpoints_addr[vc->pg_rank]);
- MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_connect", "**mx_connect %s", mx_strerror (ret));
- }
-
- fn_exit:
- return mpi_errno;
- fn_fail:
- goto fn_exit;
+#ifdef ONDEMAND
+ VC_FIELD(vc, local_connected) = 0;
+ VC_FIELD(vc, remote_connected) = 0;
+#else
+ {
+ char business_card[MPID_NEM_MAX_NETMOD_STRING_LEN];
+ int ret;
+
+ mpi_errno = vc->pg->getConnInfo(vc->pg_rank, business_card, MPID_NEM_MAX_NETMOD_STRING_LEN, vc->pg);
+ if (mpi_errno) MPIU_ERR_POP(mpi_errno);
+
+ mpi_errno = MPID_nem_mx_get_from_bc (business_card, &VC_FIELD(vc, remote_endpoint_id), &VC_FIELD(vc, remote_nic_id));
+ if (mpi_errno) MPIU_ERR_POP (mpi_errno);
+
+ ret = mx_connect(MPID_nem_mx_local_endpoint,VC_FIELD(vc, remote_nic_id),VC_FIELD(vc, remote_endpoint_id),
+ MPID_NEM_MX_FILTER,MX_INFINITE,&(VC_FIELD(vc, remote_endpoint_addr)));
+ MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_connect", "**mx_connect %s", mx_strerror (ret));
+ mx_set_endpoint_addr_context(VC_FIELD(vc, remote_endpoint_addr),(void *)vc);
+ }
+#endif
+ mx_get_info(MPID_nem_mx_local_endpoint, MX_COPY_SEND_MAX, NULL, 0, &threshold, sizeof(uint32_t));
+
+ vc->eager_max_msg_sz = threshold;
+ vc->rndvSend_fn = NULL;
+ vc->sendNoncontig_fn = MPID_nem_mx_SendNoncontig;
+ vc->comm_ops = &comm_ops;
+
+ vc_ch->iStartContigMsg = MPID_nem_mx_iStartContigMsg;
+ vc_ch->iSendContig = MPID_nem_mx_iSendContig;
+
+ fn_exit:
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
}
#undef FUNCNAME
@@ -345,10 +303,7 @@
/* free any resources associated with this VC here */
- fn_exit:
- return mpi_errno;
- fn_fail:
- goto fn_exit;
+ return mpi_errno;
}
#undef FUNCNAME
Modified: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_poll.c
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_poll.c 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_poll.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -1,3 +1,4 @@
+
/* -*- Mode: C; c-basic-offset:4 ; -*- */
/*
* (C) 2006 by Argonne National Laboratory.
@@ -5,166 +6,400 @@
*/
#include "mx_impl.h"
-#include "myriexpress.h"
#include "my_papi_defs.h"
+#include "uthash.h"
-/*
-void mx__print_queue(MPID_nem_mx_req_queue_ptr_t qhead, int sens)
+typedef struct mpid_nem_mx_hash_struct {
+ MPID_Request *mpid_req_ptr;
+ MPIDI_VC_t *vc;
+ mx_request_t *mx_req_ptr;
+ UT_hash_handle hh1,hh2;
+}mpid_nem_mx_hash_t;
+
+static mpid_nem_mx_hash_t *mpid_nem_mx_asreqs = NULL;
+#define MPID_MEM_MX_ADD_REQ_IN_HASH(_mpi_req,_mx_req) do{ \
+ mpid_nem_mx_hash_t *s; \
+ s = MPIU_Malloc(sizeof(mpid_nem_mx_hash_t)); \
+ s->mpid_req_ptr = (_mpi_req); \
+ s->mx_req_ptr = (_mx_req); \
+ HASH_ADD(hh1, mpid_nem_mx_asreqs, mpid_req_ptr, sizeof(MPID_Request*), s); \
+}while(0)
+#define MPID_NEM_MX_GET_REQ_FROM_HASH(_mpi_req_ptr,_mx_req) do{ \
+ mpid_nem_mx_hash_t *s; \
+ HASH_FIND(hh1, mpid_nem_mx_asreqs, &(_mpi_req_ptr), sizeof(MPID_Request*), s); \
+ if(s){HASH_DELETE(hh1, mpid_nem_mx_asreqs, s); (_mx_req) = s->mx_req_ptr; } else {(_mx_req) = NULL;} \
+}while(0)
+
+static mpid_nem_mx_hash_t *mpid_nem_mx_connreqs ATTRIBUTE((unused, used))= NULL;
+#define MPID_MEM_MX_ADD_VC_IN_HASH(_vc,_mx_req) do{ \
+ mpid_nem_mx_hash_t *s; \
+ s = MPIU_Malloc(sizeof(mpid_nem_mx_hash_t)); \
+ s->vc = (_vc); \
+ s->mx_req_ptr = (_mx_req); \
+ HASH_ADD(hh2, mpid_nem_mx_connreqs, mpid_req_ptr, sizeof(MPID_Request*), s); \
+}while(0)
+#define MPID_NEM_MX_REM_VC_FROM_HASH(_vc,_mx_req) do{ \
+ mpid_nem_mx_hash_t *s; \
+ HASH_FIND(hh2, mpid_nem_mx_connreqs, &(_mpi_req_ptr), sizeof(MPID_Request*), s); \
+ if(s){HASH_DELETE(hh2, mpid_nem_mx_connreqs, s); (_mx_req) = s->mx_req_ptr; } else {(_mx_req) = NULL;} \
+}while(0)
+#define MPID_NEM_MX_IS_VC_IN_HASH(_vc,_ret) do{ \
+ mpid_nem_mx_hash_t *s; \
+ HASH_FIND(hh2, mpid_nem_mx_connreqs, &(_mpi_req_ptr), sizeof(MPID_Request*), s); \
+ if(s){ (_ret) = 1; } else {(_ret) = 0;} \
+}while(0)
+
+
+static int MPID_nem_mx_handle_sreq(MPID_Request *sreq);
+static int MPID_nem_mx_handle_rreq(MPID_Request *rreq, mx_status_t status);
+
+/* For a very mysterious reason the MPID_nem_handle_pkt routine can NOT be called */
+/* from inside the MX callback: I suspect that when the RecvQ is manipulated (through */
+/* the pkt_handler func) it is somehow corrupted. This results in having a SEND req as */
+/* head of the Posted RecvQ !! */
+/* Edit : handling the packet in the callback when the data is present or latter doesn't */
+/* impact latency much so that's OK. */
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mx_get_adi_msg
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+mx_unexp_handler_action_t MPID_nem_mx_get_adi_msg(void *context, mx_endpoint_addr_t source, uint64_t match_info,
+ uint32_t length, void *data)
{
- MPID_nem_mx_cell_ptr_t curr = qhead->head;
- int index = 0;
+ MPIDI_VC_t *vc;
+#ifdef USE_CTXT_AS_MARK
+ MPIR_Context_id_t ctxt;
+ NEM_MX_MATCH_GET_CTXT(match_info, ctxt);
+#else
+ int16_t type;
+ NEM_MX_MATCH_GET_TYPE(match_info,type);
+#endif
- if(sens)
- fprintf(stdout,"=======================ENQUEUE=========================== \n");
- else
- fprintf(stdout,"=======================DEQUEUE=========================== \n");
-
- while(curr != NULL)
- {
- fprintf(stdout,"[%i] -- [CELL %i @%p]: [REQUEST is %i @%p][NEXT @ %p] \n",
- MPID_nem_mem_region.rank,index,curr,curr->mx_request,&(curr->mx_request),curr->next);
- curr = curr->next;
- index++;
- }
- if(sens)
- fprintf(stdout,"=======================ENQUEUE=========================== \n");
- else
- fprintf(stdout,"=======================DEQUEUE=========================== \n");
+
+#ifdef ONDEMAND
+ mx_get_endpoint_addr_context(source,(void **)(&vc));
+
+ if (vc == NULL)
+ {
+#ifdef USE_CTXT_AS_MARK
+ if(ctxt == NEM_MX_INTRA_CTXT)
+#else
+ if(type == NEM_MX_INTRA_TYPE)
+#endif
+ {
+ mx_return_t ret;
+ uint32_t result;
+ uint64_t remote_nic_id;
+ uint32_t remote_endpoint_id;
+ int pg_rank;
+ char pg_id[MPID_NEM_MAX_NETMOD_STRING_LEN];
+ MPIDI_PG_t *pg;
+ /*mx_request_t *mx_request = MPIU_Malloc(sizeof(mx_request_t)); */
+ mx_request_t mx_request;
+ mx_status_t status;
+
+ remote_nic_id = *((uint64_t *)data);
+ remote_endpoint_id = *((uint32_t *)((char *)data+sizeof(uint64_t)));
+ memcpy(pg_id,(char *)data+sizeof(uint64_t)+sizeof(uint32_t),length-sizeof(uint64_t)-sizeof(uint32_t));
+
+ MPIDI_PG_Find (pg_id, &pg);
+ NEM_MX_MATCH_GET_PGRANK(match_info,pg_rank);
+ MPIDI_PG_Get_vc(pg, pg_rank, &vc);
+
+ fprintf(stdout,"[%i]=== NULL VC : Receiver Unex Got infos (%lx) from Sender (in data) ..%li %i %s %p\n",
+ MPID_nem_mem_region.rank,match_info,remote_nic_id,remote_endpoint_id,pg_id,vc);
+ ret = mx_iconnect(MPID_nem_mx_local_endpoint, remote_nic_id,remote_endpoint_id ,
+ MPID_NEM_MX_FILTER,match_info,NULL,&mx_request);
+ MPIU_Assert(ret == MX_SUCCESS);
+
+ /*fprintf(stdout,"[%i]=== NULL VC : Receiver IConnect posted \n", MPID_nem_mem_region.rank); */
+
+ do{
+ ret = mx_test(MPID_nem_mx_local_endpoint,&mx_request,&status,&result);
+ }while((result == 0) && (ret == MX_SUCCESS));
+ MPIU_Assert(ret == MX_SUCCESS);
+
+ VC_FIELD(vc, remote_connected) = 1;
+ VC_FIELD(vc, local_connected) = 1;
+ VC_FIELD(vc, remote_endpoint_addr) = status.source;
+ mx_set_endpoint_addr_context(VC_FIELD(vc, remote_endpoint_addr),(void *)vc);
+ fprintf(stdout,"[%i]=== NULL VC : Receiver IConnect done \n", MPID_nem_mem_region.rank);
+
+
+ return MX_RECV_FINISHED;
+ }
+ else
+#ifndef USE_CTXT_AS_MARK
+if(type == NEM_MX_DIRECT_TYPE)
+#endif
+ {
+ fprintf(stdout,"[%i]=== NULL VC : callback for direct %lx \n", MPID_nem_mem_region.rank,match_info);
+ return MX_RECV_CONTINUE;
+ }
+#ifndef USE_CTXT_AS_MARK
+ else
+ {
+ fprintf(stdout,"Unknown Message Type :%i, aborting ...\n",type);
+ MPIU_Assert(0);
+ abort();
+ }
+#endif
+ }
+ else {
+#endif
+#ifdef USE_CTXT_AS_MARK
+ if(ctxt == NEM_MX_INTRA_CTXT)
+#else
+ if (type == NEM_MX_INTRA_TYPE)
+#endif
+ {
+ MPID_Request *rreq;
+ mx_request_t mx_request;
+ mx_segment_t iov;
+ mx_return_t ret;
+
+#ifdef ONDEMAND
+ if (VC_FIELD(vc, remote_connected) == 0)
+ {
+ mx_return_t ret;
+ uint64_t remote_nic_id;
+ uint32_t remote_endpoint_id;
+ int pg_rank;
+ char pg_id[MPID_NEM_MAX_NETMOD_STRING_LEN];
+ MPIDI_PG_t *pg;
+ uint32_t result;
+
+ remote_nic_id = *((uint64_t *)data);
+ remote_endpoint_id = *((uint32_t *)((char *)data+sizeof(uint64_t)));
+ memcpy(pg_id,(char *)data+sizeof(uint64_t)+sizeof(uint32_t),length-sizeof(uint64_t)-sizeof(uint32_t));
+
+ fprintf(stdout,"[%i]=== NOT NULL VC : Receiver Unex Got infos from Sender (in data) ..%li %i %s vc is %p\n",
+ MPID_nem_mem_region.rank,remote_nic_id,remote_endpoint_id,pg_id,vc);
+
+ MPIDI_PG_Find (pg_id, &pg);
+ NEM_MX_MATCH_GET_PGRANK(match_info,pg_rank);
+ MPIDI_PG_Get_vc(pg, pg_rank, &vc);
+
+ if(VC_FIELD(vc, local_connected) == 0)
+ {
+ mx_request_t mx_request;
+ mx_status_t status;
+ ret = mx_iconnect(MPID_nem_mx_local_endpoint,remote_nic_id,remote_endpoint_id,
+ MPID_NEM_MX_FILTER,match_info,NULL,&mx_request);
+ MPIU_Assert(ret == MX_SUCCESS);
+ do{
+ ret = mx_test(MPID_nem_mx_local_endpoint,&mx_request,&status,&result);
+ }while((result == 0) && (ret == MX_SUCCESS));
+ MPIU_Assert(ret == MX_SUCCESS);
+
+ VC_FIELD(vc, remote_endpoint_addr) = status.source;
+ mx_set_endpoint_addr_context(VC_FIELD(vc, remote_endpoint_addr),(void *)vc);
+ VC_FIELD(vc, local_connected) = 1;
+ }
+ VC_FIELD(vc, remote_connected) = 1;
+ fprintf(stdout,"[%i]=== NOT NULL VC : Receiver FULLY Connected with peer \n", MPID_nem_mem_region.rank);
+
+ return MX_RECV_FINISHED;
+ }
+#endif
+ rreq = MPID_Request_create();
+ MPIU_Assert (rreq != NULL);
+ MPIU_Object_set_ref (rreq, 1);
+ rreq->kind = MPID_REQUEST_RECV;
+ mx_get_endpoint_addr_context(source,(void **)(&vc));
+ rreq->ch.vc = vc;
+
+ if(length <= sizeof(MPIDI_CH3_PktGeneric_t)) {
+ iov.segment_ptr = (char*)&(rreq->dev.pending_pkt);
+ }
+ else{
+ rreq->dev.tmpbuf = MPIU_Malloc(length);
+ MPIU_Assert(rreq->dev.tmpbuf);
+ rreq->dev.tmpbuf_sz = length;
+ iov.segment_ptr = (char*)(rreq->dev.tmpbuf);
+ }
+ iov.segment_length = length;
+
+ ret = mx_irecv(MPID_nem_mx_local_endpoint,&iov,1,match_info,NEM_MX_MATCH_FULL_MASK,(void *)rreq,&mx_request);
+ MPIU_Assert(ret == MX_SUCCESS);
+
+ return MX_RECV_CONTINUE;
+ }
+ else
+#ifndef USE_CTXT_AS_MARK
+if (type == NEM_MX_DIRECT_TYPE)
+#endif
+ {
+ /* Do nothing: */
+ /* This shall be eventually matched in recv */
+ return MX_RECV_CONTINUE;
+ }
+#ifndef USE_CTXT_AS_MARK
+ else
+ {
+ fprintf(stdout,"Unknown Message Type :%i, aborting ...\n",type);
+ MPIU_Assert(0);
+ abort();
+ }
+#endif
+#ifdef ONDEMAND
+ }
+#endif
}
-*/
#undef FUNCNAME
-#define FUNCNAME MPID_nem_mx_send_from_queue
+#define FUNCNAME MPID_nem_mx_directRecv
#undef FCNAME
#define FCNAME MPIDI_QUOTE(FUNCNAME)
-inline int
-MPID_nem_mx_send_from_queue()
+int MPID_nem_mx_directRecv(MPIDI_VC_t *vc, MPID_Request *rreq)
{
- int mpi_errno = MPI_SUCCESS;
- mx_return_t ret;
- mx_status_t status;
- uint32_t result;
-
- if (MPID_nem_module_mx_pendings_sends > 0)
- {
- MPID_nem_mx_cell_ptr_t curr_cell = MPID_nem_module_mx_send_pending_req_queue->head;
- while( curr_cell != NULL )
- {
- ret = mx_test(MPID_nem_module_mx_local_endpoint,
- MPID_NEM_MX_CELL_TO_REQUEST(curr_cell),
- &status,
- &result);
- MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_test", "**mx_test %s", mx_strerror (ret));
- if((result != 0) && (status.code == MX_STATUS_SUCCESS))
- {
- MPID_nem_mx_cell_ptr_t cell;
- MPID_nem_mx_req_queue_dequeue(MPID_nem_module_mx_send_pending_req_queue,&cell);
- MPID_nem_mx_req_queue_enqueue(MPID_nem_module_mx_send_free_req_queue,cell);
- MPID_nem_queue_enqueue (MPID_nem_process_free_queue, (MPID_nem_cell_ptr_t)status.context);
- MPID_nem_module_mx_pendings_sends--;
- curr_cell = curr_cell->next;
- }
- else
- {
- return;
- }
- }
- }
- fn_exit:
- return mpi_errno;
- fn_fail:
- goto fn_exit;
+ int mpi_errno = MPI_SUCCESS;
+
+ MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MX_DIRECTRECV);
+ MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MX_DIRECTRECV);
+
+ if (!((MPIDI_CH3I_VC *)vc->channel_private)->is_local)
+ {
+ uint64_t match_info = NEM_MX_MATCH_DIRECT;
+ uint64_t match_mask = NEM_MX_MATCH_FULL_MASK;
+ MPIR_Rank_t source = rreq->dev.match.parts.rank;
+ MPIR_Context_id_t context = rreq->dev.match.parts.context_id;
+ Mx_Nem_tag_t tag = rreq->dev.match.parts.tag;
+ mx_segment_t mx_iov[MX_MAX_SEGMENTS];
+ mx_return_t ret;
+ uint32_t num_seg = 1;
+ MPIDI_msg_sz_t data_sz;
+ int dt_contig;
+ MPI_Aint dt_true_lb;
+ MPID_Datatype *dt_ptr;
+ /*int threshold = (vc->eager_max_msg_sz - sizeof(MPIDI_CH3_PktGeneric_t));*/
+
+ NEM_MX_DIRECT_MATCH(match_info,0,source,context);
+ if (tag == MPI_ANY_TAG)
+ {
+ NEM_MX_SET_ANYTAG(match_info);
+ NEM_MX_SET_ANYTAG(match_mask);
+ }
+ else
+ NEM_MX_SET_TAG(match_info,tag);
+
+ MPIDI_Datatype_get_info(rreq->dev.user_count,rreq->dev.datatype, dt_contig, data_sz, dt_ptr,dt_true_lb);
+ rreq->dev.OnDataAvail = NULL;
+
+ if (dt_contig)
+ {
+ mx_iov[0].segment_ptr = (char *)(rreq->dev.user_buf) + dt_true_lb;
+ mx_iov[0].segment_length = data_sz;
+ }
+ else
+ MPID_nem_mx_process_rdtype(&rreq,dt_ptr,data_sz,mx_iov,&num_seg);
+ ret = mx_irecv(MPID_nem_mx_local_endpoint,mx_iov,num_seg,match_info,match_mask,(void *)rreq, &(REQ_FIELD(rreq,mx_request)));
+ MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_irecv", "**mx_irecv %s", mx_strerror (ret));
+ }
+
+ fn_exit:
+ MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MX_DIRECTRECV);
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
}
+
+static int my__count = 0;
+#ifndef USE_CTXT_AS_MARK
#undef FUNCNAME
-#define FUNCNAME MPID_nem_mx_recv
+#define FUNCNAME MPID_nem_mx_poll
#undef FCNAME
#define FCNAME MPIDI_QUOTE(FUNCNAME)
-inline int
-MPID_nem_mx_recv()
+int
+MPID_nem_mx_poll(MPID_nem_poll_dir_t in_or_out)
{
- int mpi_errno = MPI_SUCCESS;
- mx_segment_t seg;
- mx_return_t ret;
- mx_status_t status;
- uint32_t result;
+ int mpi_errno = MPI_SUCCESS;
+ mx_status_t status;
+ mx_return_t ret;
+ uint32_t result;
+
+ if ( ((my__count++)%100000000) == 0)
+ fprintf(stdout,"[%i] ==== Polling %i\n",MPID_nem_mem_region.rank,my__count);
+
+ /*mx_progress(MPID_nem_mx_local_endpoint); */
+
+ /* first check ADI msgs */
+ ret = mx_test_any(MPID_nem_mx_local_endpoint,NEM_MX_MATCH_INTRA,NEM_MX_MASK,&status,&result);
+ MPIU_Assert(ret == MX_SUCCESS);
+ if ((ret == MX_SUCCESS) && (result > 0))
+ {
+ MPID_Request *req = (MPID_Request *)(status.context);
+ if ((req->kind == MPID_REQUEST_SEND) || (req->kind == MPID_PREQUEST_SEND))
+ {
+ MPIU_Assert(MPIDI_Request_get_type(req) != MPIDI_REQUEST_TYPE_GET_RESP);
+ MPID_nem_mx_handle_sreq(req);
+ }
+ else if (req->kind == MPID_REQUEST_RECV)
+ {
+ MPIU_Assert(status.code != MX_STATUS_TRUNCATED);
+ if (status.msg_length <= sizeof(MPIDI_CH3_PktGeneric_t))
+ {
+ MPID_nem_handle_pkt(req->ch.vc,(char *)&(req->dev.pending_pkt),(MPIDI_msg_sz_t)(status.msg_length));
+ }
+ else
+ {
+ MPID_nem_handle_pkt(req->ch.vc,(char *)(req->dev.tmpbuf),(MPIDI_msg_sz_t)(req->dev.tmpbuf_sz));
+ MPIU_Free(req->dev.tmpbuf);
+ }
+ MPIDI_CH3_Request_destroy(req);
+ }
+ else
+ {
+ /* Error : unknown REQ type */
+ MPIU_ERR_CHKANDJUMP1(TRUE, mpi_errno, MPI_ERR_OTHER, "**intern", "**intern %s", "unknown REQ type");
+ }
+ }
- if (MPID_nem_module_mx_recv_outstanding_request_num > 0)
+ /* Then check Direct MPI msgs */
+ ret = mx_test_any(MPID_nem_mx_local_endpoint,NEM_MX_MATCH_DIRECT,NEM_MX_MASK,&status,&result);
+ MPIU_Assert(ret == MX_SUCCESS);
+ if ((ret == MX_SUCCESS) && (result > 0))
+ {
+ MPID_Request *req = (MPID_Request *)(status.context);
+
+ if ((req->kind == MPID_REQUEST_SEND) || (req->kind == MPID_PREQUEST_SEND))
+ {
+ MPIU_Assert(MPIDI_Request_get_type(req) != MPIDI_REQUEST_TYPE_GET_RESP);
+ MPID_nem_mx_handle_sreq(req);
+ }
+ else if ((req->kind == MPID_REQUEST_RECV) || (req->kind == MPID_PREQUEST_RECV))
{
- MPID_nem_mx_cell_ptr_t curr_cell = MPID_nem_module_mx_recv_pending_req_queue->head;
-
- while( curr_cell != NULL )
- {
- ret = mx_test(MPID_nem_module_mx_local_endpoint,
- MPID_NEM_MX_CELL_TO_REQUEST(curr_cell),
- &status,
- &result);
- MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_test", "**mx_test %s", mx_strerror (ret));
- if((result != 0) && (status.code == MX_STATUS_SUCCESS))
- {
- MPID_nem_mx_cell_ptr_t cell_req;
- MPID_nem_queue_enqueue (MPID_nem_process_recv_queue, (MPID_nem_cell_ptr_t)status.context);
- MPID_nem_mx_req_queue_dequeue(MPID_nem_module_mx_recv_pending_req_queue,&cell_req);
- MPID_nem_mx_req_queue_enqueue(MPID_nem_module_mx_recv_free_req_queue,cell_req);
- MPID_nem_module_mx_recv_outstanding_request_num--;
- curr_cell = curr_cell->next;
- }
- else
- {
- goto next_step;
- }
- }
+ int found = FALSE;
+ mx_request_t *mx_request = NULL;
+ MPIU_Assert(status.code != MX_STATUS_TRUNCATED);
+ MPIU_Assert(MPIDI_Request_get_type(req) != MPIDI_REQUEST_TYPE_GET_RESP);
+ MPIU_THREAD_CS_ENTER(MSGQUEUE,req);
+ MPID_NEM_MX_GET_REQ_FROM_HASH(req,mx_request);
+ if(mx_request != NULL)
+ {
+ MPIU_Assert(req->dev.match.parts.rank == MPI_ANY_SOURCE);
+ MPIU_Free(mx_request);
+ }
+ found = MPIDI_CH3U_Recvq_DP(req);
+ if(found){
+ MPID_nem_mx_handle_rreq(req, status);
+ }
+ MPIU_THREAD_CS_EXIT(MSGQUEUE,req);
}
-
- next_step:
- if (MPID_nem_module_mx_recv_outstanding_request_num == 0)
- {
- MPID_nem_cell_ptr_t cell = NULL;
- if (!MPID_nem_queue_empty(MPID_nem_module_mx_free_queue) && !MPID_nem_mx_req_queue_empty(MPID_nem_module_mx_recv_free_req_queue))
- {
- MPID_nem_mx_cell_ptr_t cell_req;
- mx_request_t *request;
- uint32_t num_seg ;
-
- MPID_nem_mx_req_queue_dequeue(MPID_nem_module_mx_recv_free_req_queue,&cell_req);
- request = MPID_NEM_MX_CELL_TO_REQUEST(cell_req);
-
- MPID_nem_queue_dequeue (MPID_nem_module_mx_free_queue, &cell);
- seg.segment_ptr = (void *)(MPID_NEM_CELL_TO_PACKET (cell));
- seg.segment_length = MPID_NEM_CELL_PAYLOAD_LEN ;
-
- ret = mx_irecv(MPID_nem_module_mx_local_endpoint,
- &seg,1,
- MPID_NEM_MX_MATCH,
- MPID_NEM_MX_MASK,
- (void *)cell,
- request);
- MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_irecv", "**mx_irecv %s", mx_strerror (ret));
-
-
- ret = mx_test(MPID_nem_module_mx_local_endpoint,
- request,
- &status,
- &result);
- MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_test", "**mx_test %s", mx_strerror (ret));
- if((result != 0) && (status.code == MX_STATUS_SUCCESS))
- {
- MPID_nem_queue_enqueue (MPID_nem_process_recv_queue, cell);
- MPID_nem_mx_req_queue_enqueue(MPID_nem_module_mx_recv_free_req_queue,cell_req);
- }
- else
- {
- MPID_nem_mx_req_queue_enqueue(MPID_nem_module_mx_recv_pending_req_queue,cell_req);
- MPID_nem_module_mx_recv_outstanding_request_num++;
- }
- }
- }
- fn_exit:
- return mpi_errno;
- fn_fail:
- goto fn_exit;
+ else
+ {
+ /* Error : unknown REQ type */
+ MPIU_ERR_CHKANDJUMP1(TRUE, mpi_errno, MPI_ERR_OTHER, "**intern", "**intern %s", "unknown REQ type");
+ }
+ }
+ fn_exit:
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
}
+#else /*USE_CTXT_AS_MARK */
#undef FUNCNAME
#define FUNCNAME MPID_nem_mx_poll
@@ -173,21 +408,384 @@
int
MPID_nem_mx_poll(MPID_nem_poll_dir_t in_or_out)
{
- int mpi_errno = MPI_SUCCESS;
+ int mpi_errno = MPI_SUCCESS;
+ mx_status_t status;
+ mx_return_t ret;
+ uint32_t result;
+
+ mx_progress(MPID_nem_mx_local_endpoint);
- if (in_or_out == MPID_NEM_POLL_OUT)
- {
- MPID_nem_mx_send_from_queue();
- MPID_nem_mx_recv();
+ if ( ((my__count++)%100000000) == 0)
+ fprintf(stdout,"[%i] ==== Polling %i\n",MPID_nem_mem_region.rank,my__count);
+
+ ret = mx_test_any(MPID_nem_mx_local_endpoint,NEM_MX_MATCH_EMPTY_MASK,NEM_MX_MATCH_EMPTY_MASK,&status,&result);
+ MPIU_Assert(ret == MX_SUCCESS);
+ if ((ret == MX_SUCCESS) && (result > 0))
+ {
+ MPIR_Context_id_t ctxt;
+ MPID_Request *req = (MPID_Request *)(status.context);
+ NEM_MX_MATCH_GET_CTXT(status.match_info, ctxt);
+
+ if(ctxt == NEM_MX_INTRA_CTXT)
+ {
+ if ((req->kind == MPID_REQUEST_SEND) || (req->kind == MPID_PREQUEST_SEND))
+ {
+ MPIU_Assert(MPIDI_Request_get_type(req) != MPIDI_REQUEST_TYPE_GET_RESP);
+ MPID_nem_mx_handle_sreq(req);
+ }
+ else if (req->kind == MPID_REQUEST_RECV)
+ {
+ MPIU_Assert(status.code != MX_STATUS_TRUNCATED);
+ if (status.msg_length <= sizeof(MPIDI_CH3_PktGeneric_t))
+ {
+ MPID_nem_handle_pkt(req->ch.vc,(char *)&(req->dev.pending_pkt),(MPIDI_msg_sz_t)(status.msg_length));
+ }
+ else
+ {
+ MPID_nem_handle_pkt(req->ch.vc,(char *)(req->dev.tmpbuf),(MPIDI_msg_sz_t)(req->dev.tmpbuf_sz));
+ MPIU_Free(req->dev.tmpbuf);
+ }
+ MPIDI_CH3_Request_destroy(req);
+ }
+ else
+ {
+ MPIU_Assert(0);
+ }
}
- else
+ else
{
- MPID_nem_mx_recv();
- MPID_nem_mx_send_from_queue();
+ if ((req->kind == MPID_REQUEST_SEND) || (req->kind == MPID_PREQUEST_SEND))
+ {
+ MPIU_Assert(MPIDI_Request_get_type(req) != MPIDI_REQUEST_TYPE_GET_RESP);
+ MPID_nem_mx_handle_sreq(req);
+ }
+ else if ((req->kind == MPID_REQUEST_RECV) || (req->kind == MPID_PREQUEST_RECV))
+ {
+ int found = FALSE;
+ mx_request_t *mx_request = NULL;
+ MPIU_Assert(status.code != MX_STATUS_TRUNCATED);
+ MPIU_Assert(MPIDI_Request_get_type(req) != MPIDI_REQUEST_TYPE_GET_RESP);
+ MPIU_THREAD_CS_ENTER(MSGQUEUE,req);
+ MPID_NEM_MX_GET_REQ_FROM_HASH(req,mx_request);
+ if(mx_request != NULL)
+ {
+ MPIU_Assert(req->dev.match.parts.rank == MPI_ANY_SOURCE);
+ MPIU_Free(mx_request);
+ }
+ found = MPIDI_CH3U_Recvq_DP(req);
+ if(found){
+ MPID_nem_mx_handle_rreq(req, status);
+ }
+ MPIU_THREAD_CS_EXIT(MSGQUEUE,req);
+ }
+ else
+ {
+ MPIU_Assert(0);
+ }
}
-
- fn_exit:
- return mpi_errno;
- fn_fail:
- goto fn_exit;
+ }
+ fn_exit:
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
}
+#endif /*USE_CTXT_AS_MARK */
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mx_handle_sreq
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+static int
+MPID_nem_mx_handle_sreq(MPID_Request *req)
+{
+ int mpi_errno = MPI_SUCCESS;
+ int (*reqFn)(MPIDI_VC_t *, MPID_Request *, int *);
+ if ((req->dev.datatype_ptr != NULL) && (req->dev.tmpbuf != NULL))
+ {
+ MPIU_Free(req->dev.tmpbuf);
+ }
+ reqFn = req->dev.OnDataAvail;
+ if (!reqFn){
+ MPIDI_CH3U_Request_complete(req);
+ MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, ".... complete");
+ }
+ else{
+ MPIDI_VC_t *vc = req->ch.vc;
+ int complete = 0;
+ mpi_errno = reqFn(vc, req, &complete);
+ if (mpi_errno) MPIU_ERR_POP(mpi_errno);
+ if(complete)
+ {
+ MPIDI_CH3U_Request_complete(req);
+ MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, ".... complete");
+ }
+ }
+ MPID_nem_mx_pending_send_req--;
+ fn_exit:
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mx_handle_rreq
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+static int
+MPID_nem_mx_handle_rreq(MPID_Request *req, mx_status_t status)
+{
+ int mpi_errno = MPI_SUCCESS;
+ uint64_t match_info = status.match_info;
+ int complete = FALSE;
+ int dt_contig;
+ MPI_Aint dt_true_lb;
+ MPIDI_msg_sz_t userbuf_sz;
+ MPID_Datatype *dt_ptr;
+ MPIDI_msg_sz_t data_sz;
+ MPIDI_VC_t *vc;
+
+ NEM_MX_MATCH_GET_RANK(match_info,req->status.MPI_SOURCE);
+ NEM_MX_MATCH_GET_TAG(match_info,req->status.MPI_TAG);
+ req->status.count = status.xfer_length;
+ req->dev.recv_data_sz = status.xfer_length;
+
+ MPIDI_Datatype_get_info(req->dev.user_count, req->dev.datatype, dt_contig, userbuf_sz, dt_ptr, dt_true_lb);
+ /*fprintf(stdout," ===> userbuf_size is %i, msg_length is %i, xfer_length is %i\n",userbuf_sz,status.msg_length,status.xfer_length); */
+
+ if (status.xfer_length <= userbuf_sz) {
+ data_sz = req->dev.recv_data_sz;
+ }
+ else
+ {
+ MPIU_DBG_MSG_FMT(CH3_OTHER,VERBOSE,(MPIU_DBG_FDEST,
+ "receive buffer too small; message truncated, msg_sz="
+ MPIDI_MSG_SZ_FMT ", userbuf_sz="
+ MPIDI_MSG_SZ_FMT,
+ req->dev.recv_data_sz, userbuf_sz));
+ req->status.MPI_ERROR = MPIR_Err_create_code(MPI_SUCCESS,
+ MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_TRUNCATE,
+ "**truncate", "**truncate %d %d %d %d",
+ req->status.MPI_SOURCE, req->status.MPI_TAG,
+ req->dev.recv_data_sz, userbuf_sz );
+ req->status.count = userbuf_sz;
+ data_sz = userbuf_sz;
+ }
+
+ if ((!dt_contig)&&(req->dev.tmpbuf != NULL))
+ {
+ MPIDI_msg_sz_t last;
+ last = req->dev.recv_data_sz;
+ MPID_Segment_unpack( req->dev.segment_ptr, 0, &last, req->dev.tmpbuf );
+ MPIU_Free(req->dev.tmpbuf);
+ if (last != data_sz) {
+ req->status.count = (int)last;
+ if (req->dev.recv_data_sz <= userbuf_sz) {
+ MPIU_ERR_SETSIMPLE(req->status.MPI_ERROR,MPI_ERR_TYPE,"**dtypemismatch");
+ }
+ }
+ }
+
+ mx_get_endpoint_addr_context(status.source,(void **)(&vc));
+#ifdef ONDEMAND
+ if( vc == NULL)
+ {
+ char business_card[MPID_NEM_MAX_NETMOD_STRING_LEN];
+ mx_return_t ret;
+ mx_request_t mx_request;
+ mx_status_t status;
+ uint32_t result;
+
+ MPIDI_Comm_get_vc(req->comm, req->status.MPI_SOURCE, &vc);
+ mpi_errno = vc->pg->getConnInfo(vc->pg_rank, business_card, MPID_NEM_MAX_NETMOD_STRING_LEN, vc->pg);
+ if (mpi_errno) MPIU_ERR_POP(mpi_errno);
+ mpi_errno = MPID_nem_mx_get_from_bc (business_card, &VC_FIELD(vc, remote_endpoint_id), &VC_FIELD(vc, remote_nic_id));
+ if (mpi_errno) MPIU_ERR_POP (mpi_errno);
+
+ ret = mx_iconnect(MPID_nem_mx_local_endpoint,VC_FIELD(vc, remote_nic_id),
+ VC_FIELD(vc, remote_endpoint_id),MPID_NEM_MX_FILTER,match_info,NULL,&mx_request);
+ MPIU_Assert(ret == MX_SUCCESS);
+ do{
+ ret = mx_test(MPID_nem_mx_local_endpoint,&mx_request,&status,&result);
+ }while((result == 0) && (ret == MX_SUCCESS));
+ MPIU_Assert(ret == MX_SUCCESS);
+
+ fprintf(stdout,"[%i]=== Connected on recv with %i ... %p \n", MPID_nem_mem_region.rank,vc->lpid,vc);
+ VC_FIELD(vc, remote_endpoint_addr) = status.source;
+ VC_FIELD(vc, local_connected) = 1;
+ VC_FIELD(vc, remote_connected) = 1;
+ mx_set_endpoint_addr_context(VC_FIELD(vc, remote_endpoint_addr),(void *)vc);
+ fprintf(stdout,"[%i]=== Connected 2 on recv with %i ... %p \n", MPID_nem_mem_region.rank,vc->lpid,vc);
+ }
+#endif
+ MPIDI_CH3U_Handle_recv_req(vc, req, &complete);
+ MPIU_Assert(complete == TRUE);
+ fn_exit:
+ return mpi_errno;
+ fn_fail: ATTRIBUTE((unused))
+ goto fn_exit;
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mx_anysource_posted
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+void MPID_nem_mx_anysource_posted(MPID_Request *rreq)
+{
+ /* This function is called whenever an anyource request has been
+ posted to the posted receive queue. */
+ MPIR_Context_id_t context;
+ Mx_Nem_tag_t tag;
+ uint64_t match_info = 0;
+ uint64_t match_mask = NEM_MX_MATCH_FULL_MASK;
+ mx_request_t *mx_request = MPIU_Malloc(sizeof(mx_request_t));
+ mx_segment_t mx_iov[MX_MAX_SEGMENTS];
+ uint32_t num_seg = 1;
+ mx_return_t ret;
+ MPIDI_msg_sz_t data_sz;
+ int dt_contig;
+ MPI_Aint dt_true_lb;
+ MPID_Datatype *dt_ptr;
+
+ MPIDI_Datatype_get_info(rreq->dev.user_count,rreq->dev.datatype, dt_contig, data_sz, dt_ptr,dt_true_lb);
+
+ tag = rreq->dev.match.parts.tag;
+ context = rreq->dev.match.parts.context_id;
+ NEM_MX_DIRECT_MATCH(match_info,0,0,context);
+ if (tag == MPI_ANY_TAG)
+ {
+ NEM_MX_SET_ANYTAG(match_info);
+ NEM_MX_SET_ANYTAG(match_mask);
+ }
+ else
+ NEM_MX_SET_TAG(match_info,tag);
+ NEM_MX_SET_ANYSRC(match_info);
+ NEM_MX_SET_ANYSRC(match_mask);
+
+ if (dt_contig)
+ {
+ mx_iov[0].segment_ptr = (char *)(rreq->dev.user_buf) + dt_true_lb;
+ mx_iov[0].segment_length = data_sz;
+ }
+ else
+ MPID_nem_mx_process_rdtype(&rreq,dt_ptr,data_sz,mx_iov,&num_seg);
+ ret = mx_irecv(MPID_nem_mx_local_endpoint,mx_iov,num_seg,match_info,match_mask,(void *)rreq,mx_request);
+ /* FIXME: this function can't return an error because it's called
+ from a recvq function that doesn't check for errors. For now,
+ I'm replacing the chkandjump with an assertp. */
+ /* MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_irecv", "**mx_irecv %s", mx_strerror (ret)); */
+ MPIU_Assertp(ret == MX_SUCCESS);
+
+ MPID_MEM_MX_ADD_REQ_IN_HASH(rreq,mx_request);
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mx_anysource_matched
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mx_anysource_matched(MPID_Request *rreq)
+{
+ /* This function is called when an anysource request in the posted
+ receive queue is matched and dequeued. It returns 0 if the req
+ was not matched by mx; non-zero otherwise. */
+ mx_request_t *mx_request = NULL;
+ mx_return_t ret;
+ uint32_t result;
+ int matched = FALSE;
+
+ MPID_NEM_MX_GET_REQ_FROM_HASH(rreq,mx_request);
+ if(mx_request != NULL)
+ {
+ ret = mx_cancel(MPID_nem_mx_local_endpoint,mx_request,&result);
+ if (ret == MX_SUCCESS)
+ {
+ if (result != 1)
+ {
+ mx_status_t status;
+ MPIU_Assert(MPIDI_Request_get_type(rreq) != MPIDI_REQUEST_TYPE_GET_RESP);
+ do{
+ ret = mx_test(MPID_nem_mx_local_endpoint,mx_request,&status,&result);
+ }while((result == 0) && (ret == MX_SUCCESS));
+ MPIU_Assert(ret == MX_SUCCESS);
+ MPID_nem_mx_handle_rreq(rreq, status);
+ matched = TRUE;
+ }
+ else
+ {
+ MPID_Segment_free(rreq->dev.segment_ptr);
+ }
+ MPIU_Free(mx_request);
+ }
+ }
+ return matched;
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mx_process_rdtype
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mx_process_rdtype(MPID_Request **rreq_p, MPID_Datatype * dt_ptr, MPIDI_msg_sz_t data_sz, mx_segment_t *mx_iov, uint32_t *num_seg)
+{
+ MPID_Request *rreq =*rreq_p;
+ MPID_IOV *iov;
+ MPIDI_msg_sz_t last;
+ int num_entries = MX_MAX_SEGMENTS;
+ int iov_num_ub = rreq->dev.user_count * dt_ptr->n_contig_blocks;
+ int n_iov = iov_num_ub;
+ int mpi_errno = MPI_SUCCESS;
+ int index;
+
+ MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MX_PROCESS_RDTYPE);
+ MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MX_PROCESS_RDTYPE);
+
+ if (rreq->dev.segment_ptr == NULL)
+ {
+ rreq->dev.segment_ptr = MPID_Segment_alloc( );
+ MPIU_ERR_CHKANDJUMP1((rreq->dev.segment_ptr == NULL), mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPID_Segment_allo\
+c");
+ }
+ MPID_Segment_init(rreq->dev.user_buf, rreq->dev.user_count, rreq->dev.datatype, rreq->dev.segment_ptr, 0);
+ rreq->dev.segment_first = 0;
+ rreq->dev.segment_size = data_sz;
+ last = rreq->dev.segment_size;
+ iov = MPIU_Malloc(iov_num_ub*sizeof(MPID_IOV));
+ MPID_Segment_unpack_vector(rreq->dev.segment_ptr, rreq->dev.segment_first, &last, iov, &n_iov);
+ MPIU_Assert(last == rreq->dev.segment_size);
+
+#ifdef DEBUG_IOV
+ fprintf(stdout,"=============== %i entries (free slots : %i)\n",n_iov,num_entries);
+ for(index = 0; index < n_iov; index++)
+ fprintf(stdout,"[%i]======= Recv iov[%i] = ptr : %p, len : %i \n",
+ MPID_nem_mem_region.rank,index,iov[index].MPID_IOV_BUF,iov[index].MPID_IOV_LEN);
+#endif
+ if(n_iov <= num_entries)
+ {
+ for(index = 0; index < n_iov ; index++)
+ {
+ (mx_iov)[index].segment_ptr = iov[index].MPID_IOV_BUF;
+ (mx_iov)[index].segment_length = iov[index].MPID_IOV_LEN;
+ }
+ rreq->dev.tmpbuf = NULL;
+ *num_seg = n_iov;
+ }
+ else
+ {
+ int packsize = 0;
+ NMPI_Pack_size(rreq->dev.user_count, rreq->dev.datatype, rreq->comm->handle, &packsize);
+ rreq->dev.tmpbuf = MPIU_Malloc((size_t) packsize);
+ MPIU_Assert(rreq->dev.tmpbuf);
+ rreq->dev.tmpbuf_sz = packsize;
+ mx_iov[0].segment_ptr = (char *) rreq->dev.tmpbuf;
+ mx_iov[0].segment_length = (uint32_t) packsize;
+ *num_seg = 1 ;
+ }
+ MPIU_Free(iov);
+ fn_exit:
+ MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MX_PROCESS_RDTYPE);
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+
Deleted: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_register.c
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_register.c 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_register.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -1,32 +0,0 @@
-/* -*- Mode: C; c-basic-offset:4 ; -*- */
-/*
- * (C) 2006 by Argonne National Laboratory.
- * See COPYRIGHT in top-level directory.
- */
-
-#include "mx_impl.h"
-#include "myriexpress.h"
-
-int
-MPID_nem_mx_register_mem (void *p, int len)
-{
- /*
- if (gm_register_memory (MPID_nem_module_gm_port, p, len) == GM_SUCCESS)
- return 0;
- else
- return -1;
- */
-}
-
-int
-MPID_nem_mx_deregister_mem (void *p, int len)
-{
- /*
- if (gm_deregister_memory (MPID_nem_module_gm_port, p, len) == GM_SUCCESS)
- return 0;
- else
- return -1;
- */
-}
-
-
Modified: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_send.c
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_send.c 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_send.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -5,90 +5,542 @@
*/
#include "mx_impl.h"
-#include "myriexpress.h"
#include "my_papi_defs.h"
#undef FUNCNAME
-#define FUNCNAME MPID_nem_mx_send
+#define FUNCNAME MPID_nem_mx_iSendContig
#undef FCNAME
#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mx_iSendContig(MPIDI_VC_t *vc, MPID_Request *sreq, void *hdr, MPIDI_msg_sz_t hdr_sz, void *data, MPIDI_msg_sz_t data_sz)
+{
+ int mpi_errno = MPI_SUCCESS;
+ mx_request_t mx_request;
+ mx_segment_t mx_iov[3];
+ uint32_t num_seg = 1;
+ mx_return_t ret;
+ uint64_t match_info = 0;
+ /*MPIDI_CH3_Pkt_type_t type = ((MPIDI_CH3_Pkt_t *)(hdr))->type; */
+
+ MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MX_ISENDCONTIGMSG);
+ MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MX_ISENDCONTIGMSG);
+
+ MPIU_Assert(hdr_sz <= sizeof(MPIDI_CH3_Pkt_t));
+ MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, "mx_iSendContig");
+ MPIDI_DBG_Print_packet((MPIDI_CH3_Pkt_t *)hdr);
+
+#ifdef ONDEMAND
+ if( VC_FIELD(vc, local_connected) == 0)
+ {
+ MPID_nem_mx_send_conn_info(vc);
+ }
+#endif
+
+ NEM_MX_ADI_MATCH(match_info);
+ memcpy(&(sreq->dev.pending_pkt),(char *)hdr,sizeof(MPIDI_CH3_PktGeneric_t));
+ mx_iov[0].segment_ptr = (char *)&(sreq->dev.pending_pkt);
+ mx_iov[0].segment_length = sizeof(MPIDI_CH3_PktGeneric_t);
+ num_seg = 1;
+ if(data_sz)
+ {
+ mx_iov[1].segment_ptr = data;
+ mx_iov[1].segment_length = data_sz;
+ num_seg += 1;
+ }
+
+ ret = mx_isend(MPID_nem_mx_local_endpoint,mx_iov,num_seg,VC_FIELD(vc,remote_endpoint_addr),match_info,(void*)sreq,&mx_request);
+ MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_isend", "**mx_isend %s", mx_strerror (ret));
+ MPID_nem_mx_pending_send_req++;
+ sreq->ch.vc = vc;
+
+ fn_exit:
+ MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MX_ISENDCONTIGMSG);
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mx_iStartContigMsg
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mx_iStartContigMsg(MPIDI_VC_t *vc, void *hdr, MPIDI_msg_sz_t hdr_sz, void *data, MPIDI_msg_sz_t data_sz, MPID_Request **sreq_ptr)
+{
+ MPID_Request *sreq = NULL;
+ int mpi_errno = MPI_SUCCESS;
+ mx_request_t mx_request;
+ mx_segment_t mx_iov[2];
+ uint32_t num_seg = 1;
+ mx_return_t ret;
+ uint64_t match_info = 0;
+
+ MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MX_ISTARTCONTIGMSG);
+ MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MX_ISTARTCONTIGMSG);
+ MPIU_Assert(hdr_sz <= sizeof(MPIDI_CH3_Pkt_t));
+ MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, "mx_iSendContig");
+ MPIDI_DBG_Print_packet((MPIDI_CH3_Pkt_t *)hdr);
+
+#ifdef ONDEMAND
+ if( VC_FIELD(vc, local_connected) == 0)
+ {
+ MPID_nem_mx_send_conn_info(vc);
+ }
+#endif
+ /* create a request */
+ sreq = MPID_Request_create();
+ MPIU_Assert (sreq != NULL);
+ MPIU_Object_set_ref (sreq, 2);
+ sreq->kind = MPID_REQUEST_SEND;
+ sreq->dev.OnDataAvail = 0;
+
+ NEM_MX_ADI_MATCH(match_info);
+ /*fprintf(stdout,"[%i]=== Startcontigmsg sending (%lx) to %i... \n",MPID_nem_mem_region.rank,match_info,vc->lpid); */
+
+ memcpy(&(sreq->dev.pending_pkt),(char *)hdr,sizeof(MPIDI_CH3_PktGeneric_t));
+ mx_iov[0].segment_ptr = (char *)&(sreq->dev.pending_pkt);
+ mx_iov[0].segment_length = sizeof(MPIDI_CH3_PktGeneric_t);
+ num_seg = 1;
+ if (data_sz)
+ {
+ mx_iov[1].segment_ptr = (char *)data;
+ mx_iov[1].segment_length = data_sz;
+ num_seg += 1;
+ }
+
+ ret = mx_isend(MPID_nem_mx_local_endpoint,mx_iov,num_seg,VC_FIELD(vc,remote_endpoint_addr),match_info,(void *)sreq,&mx_request);
+ MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_isend", "**mx_isend %s", mx_strerror (ret));
+ MPID_nem_mx_pending_send_req++;
+ sreq->ch.vc = vc;
+
+ fn_exit:
+ *sreq_ptr = sreq;
+ MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MX_ISTARTCONTIGMSG);
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mx_SendNoncontig
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mx_SendNoncontig(MPIDI_VC_t *vc, MPID_Request *sreq, void *header, MPIDI_msg_sz_t hdr_sz)
+{
+ mx_segment_t mx_iov[MX_MAX_SEGMENTS];
+ uint32_t num_seg = 1;
+ int mpi_errno = MPI_SUCCESS;
+ mx_request_t mx_request;
+ mx_return_t ret;
+ uint64_t match_info;
+ MPIDI_msg_sz_t data_sz;
+ int dt_contig;
+ MPI_Aint dt_true_lb;
+ MPID_Datatype *dt_ptr;
+
+ MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MX_SENDNONCONTIGMSG);
+ MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MX_SENDNONCONTIGMSG);
+ MPIU_Assert(hdr_sz <= sizeof(MPIDI_CH3_Pkt_t));
+ MPIU_Assert(MPID_IOV_LIMIT < MX_MAX_SEGMENTS);
+ MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, "MPID_nem_mx_iSendNoncontig");
+
+#ifdef ONDEMAND
+ if( VC_FIELD(vc, local_connected) == 0)
+ {
+ MPID_nem_mx_send_conn_info(vc);
+ }
+#endif
+
+ NEM_MX_ADI_MATCH(match_info);
+ memcpy(&(sreq->dev.pending_pkt),(char *)header,sizeof(MPIDI_CH3_PktGeneric_t));
+ mx_iov[0].segment_ptr = (char *)&(sreq->dev.pending_pkt);
+ mx_iov[0].segment_length = sizeof(MPIDI_CH3_PktGeneric_t);
+ num_seg = 1;
+
+ MPIDI_Datatype_get_info(sreq->dev.user_count,sreq->dev.datatype, dt_contig, data_sz, dt_ptr,dt_true_lb);
+ if(data_sz)
+ {
+ if( data_sz <= vc->eager_max_msg_sz)
+ {
+ MPID_nem_mx_process_sdtype(&sreq,sreq->dev.datatype,dt_ptr,sreq->dev.user_buf,sreq->dev.user_count,
+ data_sz, mx_iov,&num_seg,1);
+ }
+ else
+ {
+ int packsize = 0;
+ MPI_Aint last;
+ MPIU_Assert(sreq->dev.segment_ptr == NULL);
+ sreq->dev.segment_ptr = MPID_Segment_alloc( );
+ MPIU_ERR_CHKANDJUMP1((sreq->dev.segment_ptr == NULL), mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPID_Segment_alloc");
+ NMPI_Pack_size(sreq->dev.user_count, sreq->dev.datatype, sreq->comm->handle, &packsize);
+ sreq->dev.tmpbuf = MPIU_Malloc((size_t) packsize);
+ MPIU_Assert(sreq->dev.tmpbuf);
+ MPID_Segment_init(sreq->dev.user_buf, sreq->dev.user_count, sreq->dev.datatype, sreq->dev.segment_ptr, 0);
+ last = data_sz;
+ MPID_Segment_pack(sreq->dev.segment_ptr, 0, &last, sreq->dev.tmpbuf);
+ mx_iov[1].segment_ptr = (char *) sreq->dev.tmpbuf;
+ mx_iov[1].segment_length = (uint32_t) last;
+ num_seg++;
+ }
+ }
+
+ MPIU_Assert(num_seg <= MX_MAX_SEGMENTS);
+ ret = mx_isend(MPID_nem_mx_local_endpoint,mx_iov,num_seg,VC_FIELD(vc,remote_endpoint_addr),match_info,(void *)sreq,&mx_request);
+ MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_isend", "**mx_isend %s", mx_strerror (ret));
+
+ MPID_nem_mx_pending_send_req++;
+ sreq->ch.vc = vc;
+
+ fn_exit:
+ MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MX_SENDNONCONTIGMSG);
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mx_directSend
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mx_directSend(MPIDI_VC_t *vc, const void * buf, int count, MPI_Datatype datatype, int rank, int tag, MPID_Comm * comm, int context_offset,MPID_Request **request)
+{
+ MPID_Request *sreq = NULL;
+ mx_segment_t mx_iov[MX_MAX_SEGMENTS];
+ uint32_t num_seg = 1;
+ mx_return_t ret;
+ uint64_t mx_matching;
+ int mpi_errno = MPI_SUCCESS;
+ MPID_Datatype *dt_ptr;
+ int dt_contig;
+ MPIDI_msg_sz_t data_sz;
+ MPI_Aint dt_true_lb;
+
+ MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MX_DIRECTSEND);
+ MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MX_DIRECTSEND);
+
+ MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr, dt_true_lb);
+
+ MPIDI_Request_create_sreq(sreq, mpi_errno, goto fn_exit);
+ MPIDI_Request_set_type(sreq, MPIDI_REQUEST_TYPE_SEND);
+ sreq->partner_request = NULL;
+
+#ifdef ONDEMAND
+ if( VC_FIELD(vc, local_connected) == 0)
+ {
+ MPID_nem_mx_send_conn_info(vc);
+ fprintf(stdout,"[%i]=== DirectSend info Sender FULLY connected with %i %p \n", MPID_nem_mem_region.rank,vc->lpid,vc);
+ }
+#endif
+
+ MPIDI_VC_FAI_send_seqnum(vc, seqnum);
+ MPIDI_Request_set_seqnum(sreq, seqnum);
+ sreq->ch.vc = vc;
+ sreq->dev.OnDataAvail = NULL;
+
+ NEM_MX_DIRECT_MATCH(mx_matching,tag,comm->rank,comm->context_id + context_offset);
+ if(data_sz)
+ {
+ if (dt_contig)
+ {
+ mx_iov[0].segment_ptr = (char*)buf + dt_true_lb;
+ mx_iov[0].segment_length = data_sz;
+ }
+ else
+ {
+ if( data_sz <= vc->eager_max_msg_sz)
+ {
+ MPID_nem_mx_process_sdtype(&sreq,datatype,dt_ptr,buf,count,data_sz,mx_iov,&num_seg,0);
+#ifdef DEBUG_IOV
+ {
+ int index;
+ fprintf(stdout,"==========================\n");
+ for(index = 0; index < num_seg; index++)
+ fprintf(stdout,"[%i]======= MX iov[%i] = ptr : %p, len : %i \n",
+ MPID_nem_mem_region.rank,index,mx_iov[index].segment_ptr,mx_iov[index].segment_length);
+ }
+#endif
+ }
+ else
+ {
+ int packsize = 0;
+ MPI_Aint last;
+ sreq->dev.segment_ptr = MPID_Segment_alloc( );
+ MPIU_ERR_CHKANDJUMP1((sreq->dev.segment_ptr == NULL), mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPID_Segment_alloc");
+ NMPI_Pack_size(count, datatype, comm->handle, &packsize);
+ sreq->dev.tmpbuf = MPIU_Malloc((size_t) packsize);
+ MPIU_Assert(sreq->dev.tmpbuf);
+ MPID_Segment_init(buf, count, datatype, sreq->dev.segment_ptr, 0);
+ last = data_sz;
+ MPID_Segment_pack(sreq->dev.segment_ptr, 0, &last, sreq->dev.tmpbuf);
+ mx_iov[0].segment_ptr = (char *) sreq->dev.tmpbuf;
+ mx_iov[0].segment_length = (uint32_t) last;
+ }
+ }
+ }
+ else
+ {
+ mx_iov[0].segment_ptr = NULL;
+ mx_iov[0].segment_length = 0;
+ }
+
+ ret = mx_isend(MPID_nem_mx_local_endpoint,mx_iov,num_seg,VC_FIELD(vc,remote_endpoint_addr),
+ mx_matching,(void *)sreq,&(REQ_FIELD(sreq,mx_request)));
+ MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_isend", "**mx_isend %s", mx_strerror (ret));
+ MPID_nem_mx_pending_send_req++;
+ fn_exit:
+ *request = sreq;
+ MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MX_DIRECTSEND);
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mx_directSsend
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mx_directSsend(MPIDI_VC_t *vc, const void * buf, int count, MPI_Datatype datatype, int rank, int tag, MPID_Comm * comm, int context_offset,MPID_Request **request)
+{
+ MPID_Request *sreq = NULL;
+ uint32_t num_seg = 1;
+ mx_segment_t mx_iov[MX_MAX_SEGMENTS];
+ mx_return_t ret;
+ uint64_t mx_matching;
+ int mpi_errno = MPI_SUCCESS;
+ MPID_Datatype *dt_ptr;
+ int dt_contig;
+ MPIDI_msg_sz_t data_sz;
+ MPI_Aint dt_true_lb;
+
+ MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MX_DIRECTSSEND);
+ MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MX_DIRECTSSEND);
+
+ MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr, dt_true_lb);
+
+ MPIDI_Request_create_sreq(sreq, mpi_errno, goto fn_exit);
+ MPIDI_Request_set_type(sreq, MPIDI_REQUEST_TYPE_SEND);
+ sreq->partner_request = NULL;
+
+#ifdef ONDEMAND
+ if( VC_FIELD(vc, local_connected) == 0)
+ {
+ MPID_nem_mx_send_conn_info(vc);
+ fprintf(stdout,"[%i]=== DirectSsend info Sender FULLY connected with %i %p \n", MPID_nem_mem_region.rank,vc->lpid,vc);
+ }
+#endif
+ MPIDI_VC_FAI_send_seqnum(vc, seqnum);
+ MPIDI_Request_set_seqnum(sreq, seqnum);
+ sreq->ch.vc = vc;
+ sreq->dev.OnDataAvail = NULL;
+
+ NEM_MX_DIRECT_MATCH(mx_matching,tag,comm->rank,comm->context_id + context_offset);
+
+ if(data_sz)
+ {
+ if (dt_contig)
+ {
+ mx_iov[0].segment_ptr = (char*)(buf) + dt_true_lb;
+ mx_iov[0].segment_length = data_sz;
+ }
+ else
+ {
+ if( data_sz <= vc->eager_max_msg_sz)
+ {
+ MPID_nem_mx_process_sdtype(&sreq,datatype,dt_ptr,buf,count,data_sz,mx_iov,&num_seg,0);
+ }
+ else
+ {
+ int packsize = 0;
+ MPI_Aint last;
+ sreq->dev.segment_ptr = MPID_Segment_alloc( );
+ MPIU_ERR_CHKANDJUMP1((sreq->dev.segment_ptr == NULL), mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPID_Segment_alloc");
+ NMPI_Pack_size(count, datatype, comm->handle, &packsize);
+ sreq->dev.tmpbuf = MPIU_Malloc((size_t) packsize);
+ MPIU_Assert(sreq->dev.tmpbuf);
+ MPID_Segment_init(buf, count, datatype, sreq->dev.segment_ptr, 0);
+ last = data_sz;
+ MPID_Segment_pack(sreq->dev.segment_ptr, 0, &last, sreq->dev.tmpbuf);
+ mx_iov[0].segment_ptr = (char *) sreq->dev.tmpbuf;
+ mx_iov[0].segment_length = (uint32_t) last;
+ }
+ }
+ }
+ else
+ {
+ mx_iov[0].segment_ptr = NULL;
+ mx_iov[0].segment_length = 0;
+ }
+
+ ret = mx_issend(MPID_nem_mx_local_endpoint,mx_iov,num_seg,VC_FIELD(vc,remote_endpoint_addr),
+ mx_matching,(void *)sreq,&(REQ_FIELD(sreq,mx_request)));
+ MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_isend", "**mx_isend %s", mx_strerror (ret));
+ MPID_nem_mx_pending_send_req++;
+
+ fn_exit:
+ MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MX_DIRECTSSEND);
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mx_process_sdtype
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_mx_process_sdtype(MPID_Request **sreq_p, MPI_Datatype datatype, MPID_Datatype * dt_ptr, const void *buf, int count, MPIDI_msg_sz_t data_sz, mx_segment_t *mx_iov, uint32_t *num_seg,int first_free_slot)
+{
+ MPID_Request *sreq =*sreq_p;
+ MPID_IOV *iov;
+ MPIDI_msg_sz_t last;
+ int num_entries = MX_MAX_SEGMENTS - first_free_slot;
+ int iov_num_ub = count * dt_ptr->n_contig_blocks;
+ int n_iov = iov_num_ub;
+ int mpi_errno = MPI_SUCCESS;
+ int index;
+
+ MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MX_PROCESS_SDTYPE);
+ MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MX_PROCESS_SDTYPE);
+
+ sreq->dev.segment_ptr = MPID_Segment_alloc( );
+ MPIU_ERR_CHKANDJUMP1((sreq->dev.segment_ptr == NULL), mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPID_Segment_alloc");
+ MPID_Segment_init(buf, count, datatype, sreq->dev.segment_ptr, 0);
+ sreq->dev.segment_first = 0;
+ sreq->dev.segment_size = data_sz;
+ last = sreq->dev.segment_size;
+ iov = MPIU_Malloc(iov_num_ub*sizeof(MPID_IOV));
+
+ MPID_Segment_pack_vector(sreq->dev.segment_ptr, sreq->dev.segment_first, &last, iov, &n_iov);
+ MPIU_Assert(last == sreq->dev.segment_size);
+
+#ifdef DEBUG_IOV
+ fprintf(stdout,"=============== %i entries (free slots : %i)\n",n_iov,num_entries);
+ for(index = 0; index < n_iov; index++)
+ fprintf(stdout,"[%i]======= Send iov[%i] = ptr : %p, len : %i \n",
+ MPID_nem_mem_region.rank,index,iov[index].MPID_IOV_BUF,iov[index].MPID_IOV_LEN);
+#endif
+
+ if(n_iov <= num_entries)
+ {
+ for(index = 0; index < n_iov ; index++)
+ {
+ (mx_iov)[first_free_slot+index].segment_ptr = iov[index].MPID_IOV_BUF;
+ (mx_iov)[first_free_slot+index].segment_length = iov[index].MPID_IOV_LEN;
+ }
+ *num_seg = n_iov;
+ }
+ else
+ {
+ int size_to_copy = 0;
+ int offset = 0;
+ int last_entry = num_entries - 1;
+ for(index = 0; index < n_iov ; index++)
+ {
+ if (index <= (last_entry-1))
+ {
+ (mx_iov)[first_free_slot+index].segment_ptr = iov[index].MPID_IOV_BUF;
+ (mx_iov)[first_free_slot+index].segment_length = iov[index].MPID_IOV_LEN;
+ }
+ else
+ {
+ size_to_copy += iov[index].MPID_IOV_LEN;
+ }
+ }
+ sreq->dev.tmpbuf = MPIU_Malloc(size_to_copy);
+ MPIU_Assert(sreq->dev.tmpbuf);
+ for(index = last_entry; index < n_iov; index++)
+ {
+ memcpy((char *)(sreq->dev.tmpbuf) + offset, iov[index].MPID_IOV_BUF, iov[index].MPID_IOV_LEN);
+ offset += iov[index].MPID_IOV_LEN;
+ }
+ (mx_iov)[MX_MAX_SEGMENTS-1].segment_ptr = sreq->dev.tmpbuf;
+ (mx_iov)[MX_MAX_SEGMENTS-1].segment_length = size_to_copy;
+ *num_seg = MX_MAX_SEGMENTS ;
+ }
+ MPIU_Free(iov);
+ fn_exit:
+ MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MX_PROCESS_SDTYPE);
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mx_send_conn_info
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
int
-MPID_nem_mx_send (MPIDI_VC_t *vc, MPID_nem_cell_ptr_t cell, int datalen)
+MPID_nem_mx_send_conn_info (MPIDI_VC_t *vc)
{
- MPID_nem_mx_cell_ptr_t cell_req;
- mx_request_t *request;
- mx_segment_t seg;
- mx_return_t ret;
- mx_status_t status;
- uint32_t result;
- int data_size;
- int dest = vc->lpid;
- int mpi_errno = MPI_SUCCESS;
+ char business_card[MPID_NEM_MAX_NETMOD_STRING_LEN];
+ uint64_t match_info;
+ uint32_t num_seg = 3;
+ mx_segment_t mx_iov[3];
+ mx_request_t mx_request;
+ mx_return_t ret;
+ mx_status_t status;
+ uint32_t result;
+ int mpi_errno = MPI_SUCCESS;
+
+ MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_MX_SEND_CONN_INFO);
+ MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_MX_SEND_CONN_INFO);
+
+ mpi_errno = vc->pg->getConnInfo(vc->pg_rank, business_card, MPID_NEM_MAX_NETMOD_STRING_LEN, vc->pg);
+ if (mpi_errno) MPIU_ERR_POP(mpi_errno);
- MPIU_Assert (datalen <= MPID_NEM_MPICH2_DATA_LEN);
+ mpi_errno = MPID_nem_mx_get_from_bc (business_card, &VC_FIELD(vc, remote_endpoint_id), &VC_FIELD(vc, remote_nic_id));
+ if (mpi_errno) MPIU_ERR_POP (mpi_errno);
+
+ fprintf(stdout,"[%i]=== Sender connecting to %i \n", MPID_nem_mem_region.rank,vc->lpid);
+
+ /* FIXME: match_info is used uninitialized */
+ ret = mx_iconnect(MPID_nem_mx_local_endpoint,VC_FIELD(vc, remote_nic_id),
+ VC_FIELD(vc, remote_endpoint_id),MPID_NEM_MX_FILTER,match_info,NULL,&mx_request);
+ MPIU_Assert(ret == MX_SUCCESS);
+ do{
+ ret = mx_test(MPID_nem_mx_local_endpoint,&mx_request,&status,&result);
+ }while((result == 0) && (ret == MX_SUCCESS));
+ MPIU_Assert(ret == MX_SUCCESS);
+ VC_FIELD(vc, remote_endpoint_addr) = status.source;
+ VC_FIELD(vc, local_connected) = 1;
+ mx_set_endpoint_addr_context(VC_FIELD(vc, remote_endpoint_addr),(void *)vc);
+
+ fprintf(stdout,"[%i]=== Sending conn info connection to %i \n", MPID_nem_mem_region.rank,vc->lpid);
+
+ NEM_MX_ADI_MATCH(match_info);
+ NEM_MX_SET_PGRANK(match_info,MPIDI_Process.my_pg_rank);
+
+ mx_iov[0].segment_ptr = (char*)&MPID_nem_mx_local_nic_id;
+ mx_iov[0].segment_length = sizeof(uint64_t);
+ mx_iov[1].segment_ptr = (char*)&MPID_nem_mx_local_endpoint_id;
+ mx_iov[1].segment_length = sizeof(uint32_t);
+ mx_iov[2].segment_ptr = MPIDI_Process.my_pg->id;
+ mx_iov[2].segment_length = strlen(MPIDI_Process.my_pg->id) + 1;
- if ( MPID_nem_mx_req_queue_empty(MPID_nem_module_mx_send_free_req_queue))
- {
- MPID_nem_mx_cell_ptr_t curr_cell = MPID_nem_module_mx_send_pending_req_queue->head;
- ret = mx_wait(MPID_nem_module_mx_local_endpoint,
- MPID_NEM_MX_CELL_TO_REQUEST(curr_cell),
- MX_INFINITE,
- &status,
- &result);
- MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_wait", "**mx_wait %s", mx_strerror (ret));
- if((result != 0) && (status.code == MX_STATUS_SUCCESS))
- {
- MPID_nem_queue_enqueue (MPID_nem_process_free_queue, (MPID_nem_cell_ptr_t)status.context);
- MPID_nem_mx_req_queue_dequeue(MPID_nem_module_mx_send_pending_req_queue,&cell_req);
- MPID_nem_module_mx_pendings_sends--;
- goto regular_step;
- }
- }
- else
- {
- MPID_nem_mx_req_queue_dequeue(MPID_nem_module_mx_send_free_req_queue,&cell_req);
- regular_step:
- {
- request = MPID_NEM_MX_CELL_TO_REQUEST(cell_req);
- data_size = datalen + MPID_NEM_MPICH2_HEAD_LEN;
- seg.segment_ptr = (void *)(MPID_NEM_CELL_TO_PACKET (cell));
- seg.segment_length = data_size ;
-
- ret = mx_isend(MPID_nem_module_mx_local_endpoint,
- &seg,1,
- MPID_nem_module_mx_endpoints_addr[dest],
- MPID_NEM_MX_MATCH,
- (void *)cell,
- request);
- MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_isend", "**mx_isend %s", mx_strerror (ret));
-
- if(MPID_nem_module_mx_pendings_sends == 0)
- {
- ret = mx_test(MPID_nem_module_mx_local_endpoint,
- request,
- &status,
- &result);
- MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_test", "**mx_test %s", mx_strerror (ret));
- if((result != 0) && (status.code == MX_STATUS_SUCCESS))
- {
- MPID_nem_queue_enqueue (MPID_nem_process_free_queue, (MPID_nem_cell_ptr_t)status.context);
- MPID_nem_mx_req_queue_enqueue(MPID_nem_module_mx_send_free_req_queue,cell_req);
- }
- else
- {
- MPID_nem_mx_req_queue_enqueue(MPID_nem_module_mx_send_pending_req_queue,cell_req);
- MPID_nem_module_mx_pendings_sends++;
- }
- }
- else
- {
- MPID_nem_mx_req_queue_enqueue(MPID_nem_module_mx_send_pending_req_queue,cell_req);
- MPID_nem_module_mx_pendings_sends++;
- }
- }
- }
- fn_exit:
- return mpi_errno;
- fn_fail:
- goto fn_exit;
+ ret = mx_isend(MPID_nem_mx_local_endpoint,mx_iov,num_seg,VC_FIELD(vc,remote_endpoint_addr),match_info,NULL,&mx_request);
+ MPIU_ERR_CHKANDJUMP1 (ret != MX_SUCCESS, mpi_errno, MPI_ERR_OTHER, "**mx_isend", "**mx_isend %s", mx_strerror (ret));
+ do{
+ ret = mx_test(MPID_nem_mx_local_endpoint,&mx_request,&status,&result);
+ }while((result == 0) && (ret == MX_SUCCESS));
+ MPIU_Assert(ret == MX_SUCCESS);
+ fprintf(stdout,"[%i]=== Send conn info to %i (%Lx) (%Li %i %s) ... \n",
+ MPID_nem_mem_region.rank,vc->lpid,match_info,MPID_nem_mx_local_nic_id,MPID_nem_mx_local_endpoint_id,(char *)MPIDI_Process.my_pg->id);
+ fn_exit:
+ MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_MX_SEND_CONN_INFO);
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_mx_send
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int
+MPID_nem_mx_send (MPIDI_VC_t *vc, MPID_nem_cell_ptr_t cell, int datalen)
+{
+ int mpi_errno = MPI_SUCCESS;
+ return mpi_errno;
+}
+
Deleted: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_test.c
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_test.c 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/mx_test.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -1,14 +0,0 @@
-/* -*- Mode: C; c-basic-offset:4 ; -*- */
-/*
- * (C) 2006 by Argonne National Laboratory.
- * See COPYRIGHT in top-level directory.
- */
-
-#include "mx_impl.h"
-#include "myriexpress.h"
-
-int
-MPID_nem_mx_test()
-{
- return 0;
-}
Copied: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/uthash.h (from rev 3937, mpich2/branches/dev/mx-netmod/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/uthash.h)
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/uthash.h (rev 0)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/mx/uthash.h 2009-03-05 22:44:18 UTC (rev 3945)
@@ -0,0 +1,612 @@
+/*
+Copyright (c) 2003-2009, Troy D. Hanson http://uthash.sourceforge.net
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
+OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <string.h> /* memcmp,strlen */
+#include <stddef.h> /* ptrdiff_t */
+
+#ifndef UTHASH_H
+#define UTHASH_H
+
+#define UTHASH_VERSION 1.5
+
+/* C++ requires extra stringent casting */
+#if defined __cplusplus
+#define TYPEOF(x) (typeof(x))
+#else
+#define TYPEOF(x)
+#endif
+
+
+#define uthash_fatal(msg) exit(-1) /* fatal error (out of memory,etc) */
+#define uthash_bkt_malloc(sz) malloc(sz) /* malloc fcn for UT_hash_bucket's */
+#define uthash_bkt_free(ptr) free(ptr) /* free fcn for UT_hash_bucket's */
+#define uthash_tbl_malloc(sz) malloc(sz) /* malloc fcn for UT_hash_table */
+#define uthash_tbl_free(ptr) free(ptr) /* free fcn for UT_hash_table */
+
+#define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */
+#define uthash_expand_fyi(tbl) /* can be defined to log expands */
+
+/* initial number of buckets */
+#define HASH_INITIAL_NUM_BUCKETS 32 /* initial number of buckets */
+#define HASH_INITIAL_NUM_BUCKETS_LOG2 5 /* lg2 of initial number of buckets */
+#define HASH_BKT_CAPACITY_THRESH 10 /* expand when bucket count reaches */
+
+/* calculate the element whose hash handle address is hhe */
+#define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)hhp) - (tbl)->hho))
+
+#define HASH_FIND(hh,head,keyptr,keylen,out) \
+do { \
+ unsigned _hf_bkt,_hf_hashv; \
+ out=TYPEOF(out)head; \
+ if (head) { \
+ HASH_FCN(keyptr,keylen, (head)->hh.tbl->num_buckets, _hf_hashv, _hf_bkt); \
+ HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], \
+ keyptr,keylen,out); \
+ } \
+} while (0)
+
+#define HASH_ADD(hh,head,fieldname,keylen_in,add) \
+ HASH_ADD_KEYPTR(hh,head,&add->fieldname,keylen_in,add)
+
+#define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \
+do { \
+ unsigned _ha_bkt; \
+ add->hh.next = NULL; \
+ add->hh.key = (char*)keyptr; \
+ add->hh.keylen = keylen_in; \
+ if (!(head)) { \
+ head = add; \
+ (head)->hh.prev = NULL; \
+ (head)->hh.tbl = (UT_hash_table*)uthash_tbl_malloc( \
+ sizeof(UT_hash_table)); \
+ if (!((head)->hh.tbl)) { uthash_fatal( "out of memory"); } \
+ memset((head)->hh.tbl, 0, sizeof(UT_hash_table)); \
+ (head)->hh.tbl->tail = &(add->hh); \
+ (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \
+ (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \
+ (head)->hh.tbl->hho = (char*)(&add->hh) - (char*)(add); \
+ (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_bkt_malloc( \
+ HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
+ if (! (head)->hh.tbl->buckets) { uthash_fatal( "out of memory"); } \
+ memset((head)->hh.tbl->buckets, 0, \
+ HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
+ } else { \
+ (head)->hh.tbl->tail->next = add; \
+ add->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \
+ (head)->hh.tbl->tail = &(add->hh); \
+ } \
+ (head)->hh.tbl->num_items++; \
+ add->hh.tbl = (head)->hh.tbl; \
+ HASH_FCN(keyptr,keylen_in, (head)->hh.tbl->num_buckets, \
+ (add)->hh.hashv, _ha_bkt); \
+ HASH_ADD_TO_BKT(hh,(head)->hh.tbl->buckets[_ha_bkt],add); \
+ HASH_EMIT_KEY(hh,head,keyptr,keylen_in); \
+ HASH_FSCK(hh,head); \
+} while(0)
+
+#define HASH_TO_BKT( hashv, num_bkts, bkt ) bkt = ((hashv) & ((num_bkts) - 1))
+
+/* delete "delptr" from the hash table.
+ * "the usual" patch-up process for the app-order doubly-linked-list.
+ * The use of _hd_hh_del below deserves special explanation.
+ * These used to be expressed using (delptr) but that led to a bug
+ * if someone used the same symbol for the head and deletee, like
+ * HASH_DELETE(hh,users,users);
+ * We want that to work, but by changing the head (users) below
+ * we were forfeiting our ability to further refer to the deletee (users)
+ * in the patch-up process. Solution: use scratch space in the table to
+ * copy the deletee pointer, then the latter references are via that
+ * scratch pointer rather than through the repointed (users) symbol.
+ */
+#define HASH_DELETE(hh,head,delptr) \
+do { \
+ unsigned _hd_bkt; \
+ struct UT_hash_handle *_hd_hh_del; \
+ if ( ((delptr)->hh.prev == NULL) && ((delptr)->hh.next == NULL) ) { \
+ uthash_bkt_free((head)->hh.tbl->buckets ); \
+ uthash_tbl_free((head)->hh.tbl); \
+ head = NULL; \
+ } else { \
+ _hd_hh_del = &((delptr)->hh); \
+ if ((delptr) == ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->tail)) { \
+ (head)->hh.tbl->tail = \
+ (UT_hash_handle*)((char*)((delptr)->hh.prev) + \
+ (head)->hh.tbl->hho); \
+ } \
+ if ((delptr)->hh.prev) { \
+ ((UT_hash_handle*)((char*)((delptr)->hh.prev) + \
+ (head)->hh.tbl->hho))->next = (delptr)->hh.next; \
+ } else { \
+ head = TYPEOF(head)((delptr)->hh.next); \
+ } \
+ if (_hd_hh_del->next) { \
+ ((UT_hash_handle*)((char*)_hd_hh_del->next + \
+ (head)->hh.tbl->hho))->prev = \
+ _hd_hh_del->prev; \
+ } \
+ HASH_TO_BKT( _hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \
+ HASH_DEL_IN_BKT(hh,(head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \
+ (head)->hh.tbl->num_items--; \
+ } \
+ HASH_FSCK(hh,head); \
+} while (0)
+
+
+/* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */
+#define HASH_FIND_STR(head,findstr,out) \
+ HASH_FIND(hh,head,findstr,strlen(findstr),out)
+#define HASH_ADD_STR(head,strfield,add) \
+ HASH_ADD(hh,head,strfield,strlen(add->strfield),add)
+#define HASH_FIND_INT(head,findint,out) \
+ HASH_FIND(hh,head,findint,sizeof(int),out)
+#define HASH_ADD_INT(head,intfield,add) \
+ HASH_ADD(hh,head,intfield,sizeof(int),add)
+#define HASH_DEL(head,delptr) \
+ HASH_DELETE(hh,head,delptr)
+
+/* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined.
+ * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined.
+ */
+#ifdef HASH_DEBUG
+#define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0)
+#define HASH_FSCK(hh,head) \
+do { \
+ unsigned _bkt_i; \
+ unsigned _count, _bkt_count; \
+ char *_prev; \
+ struct UT_hash_handle *_thh; \
+ if (head) { \
+ _count = 0; \
+ for( _bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; _bkt_i++) { \
+ _bkt_count = 0; \
+ _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \
+ _prev = NULL; \
+ while (_thh) { \
+ if (_prev != (char*)(_thh->hh_prev)) { \
+ HASH_OOPS("invalid hh_prev %p, actual %p\n", \
+ _thh->hh_prev, _prev ); \
+ } \
+ _bkt_count++; \
+ _prev = (char*)(_thh); \
+ _thh = _thh->hh_next; \
+ } \
+ _count += _bkt_count; \
+ if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \
+ HASH_OOPS("invalid bucket count %d, actual %d\n", \
+ (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \
+ } \
+ } \
+ if (_count != (head)->hh.tbl->num_items) { \
+ HASH_OOPS("invalid hh item count %d, actual %d\n", \
+ (head)->hh.tbl->num_items, _count ); \
+ } \
+ /* traverse hh in app order; check next/prev integrity, count */ \
+ _count = 0; \
+ _prev = NULL; \
+ _thh = &(head)->hh; \
+ while (_thh) { \
+ _count++; \
+ if (_prev !=(char*)(_thh->prev)) { \
+ HASH_OOPS("invalid prev %p, actual %p\n", \
+ _thh->prev, _prev ); \
+ } \
+ _prev = ELMT_FROM_HH((head)->hh.tbl, _thh); \
+ _thh = ( _thh->next ? (UT_hash_handle*)((char*)(_thh->next) + \
+ (head)->hh.tbl->hho) : NULL ); \
+ } \
+ if (_count != (head)->hh.tbl->num_items) { \
+ HASH_OOPS("invalid app item count %d, actual %d\n", \
+ (head)->hh.tbl->num_items, _count ); \
+ } \
+ } \
+} while (0)
+#else
+#define HASH_FSCK(hh,head)
+#endif
+
+/* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to
+ * the descriptor to which this macro is defined for tuning the hash function.
+ * The app can #include <unistd.h> to get the prototype for write(2). */
+#ifdef HASH_EMIT_KEYS
+#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \
+do { \
+ unsigned _klen = fieldlen; \
+ write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \
+ write(HASH_EMIT_KEYS, keyptr, fieldlen); \
+} while (0)
+#else
+#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen)
+#endif
+
+/* default to Jenkins unless specified e.g. DHASH_FUNCTION=HASH_SAX */
+#ifdef HASH_FUNCTION
+#define HASH_FCN HASH_FUNCTION
+#else
+#define HASH_FCN HASH_JEN
+#endif
+
+/* The Bernstein hash function, used in Perl prior to v5.6 */
+#define HASH_BER(key,keylen,num_bkts,hashv,bkt) \
+do { \
+ unsigned _hb_keylen=keylen; \
+ char *_hb_key=(char*)key; \
+ (hashv) = 0; \
+ while (_hb_keylen--) { (hashv) = ((hashv) * 33) + *_hb_key++; } \
+ bkt = (hashv) & (num_bkts-1); \
+} while (0)
+
+
+/* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at
+ * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */
+#define HASH_SAX(key,keylen,num_bkts,hashv,bkt) \
+do { \
+ unsigned _sx_i; \
+ hashv = 0; \
+ for(_sx_i=0; _sx_i < keylen; _sx_i++) \
+ hashv ^= (hashv << 5) + (hashv >> 2) + key[_sx_i]; \
+ bkt = hashv & (num_bkts-1); \
+} while (0)
+
+#define HASH_FNV(key,keylen,num_bkts,hashv,bkt) \
+do { \
+ unsigned _fn_i; \
+ hashv = 2166136261UL; \
+ for(_fn_i=0; _fn_i < keylen; _fn_i++) \
+ hashv = (hashv * 16777619) ^ key[_fn_i]; \
+ bkt = hashv & (num_bkts-1); \
+} while(0);
+
+#define HASH_OAT(key,keylen,num_bkts,hashv,bkt) \
+do { \
+ unsigned _ho_i; \
+ hashv = 0; \
+ for(_ho_i=0; _ho_i < keylen; _ho_i++) { \
+ hashv += key[_ho_i]; \
+ hashv += (hashv << 10); \
+ hashv ^= (hashv >> 6); \
+ } \
+ hashv += (hashv << 3); \
+ hashv ^= (hashv >> 11); \
+ hashv += (hashv << 15); \
+ bkt = hashv & (num_bkts-1); \
+} while(0)
+
+#define HASH_JEN_MIX(a,b,c) \
+do { \
+ a -= b; a -= c; a ^= ( c >> 13 ); \
+ b -= c; b -= a; b ^= ( a << 8 ); \
+ c -= a; c -= b; c ^= ( b >> 13 ); \
+ a -= b; a -= c; a ^= ( c >> 12 ); \
+ b -= c; b -= a; b ^= ( a << 16 ); \
+ c -= a; c -= b; c ^= ( b >> 5 ); \
+ a -= b; a -= c; a ^= ( c >> 3 ); \
+ b -= c; b -= a; b ^= ( a << 10 ); \
+ c -= a; c -= b; c ^= ( b >> 15 ); \
+} while (0)
+
+#define HASH_JEN(key,keylen,num_bkts,hashv,bkt) \
+do { \
+ unsigned _hj_i,_hj_j,_hj_k; \
+ char *_hj_key=(char*)key; \
+ hashv = 0xfeedbeef; \
+ _hj_i = _hj_j = 0x9e3779b9; \
+ _hj_k = keylen; \
+ while (_hj_k >= 12) { \
+ _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \
+ + ( (unsigned)_hj_key[2] << 16 ) \
+ + ( (unsigned)_hj_key[3] << 24 ) ); \
+ _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \
+ + ( (unsigned)_hj_key[6] << 16 ) \
+ + ( (unsigned)_hj_key[7] << 24 ) ); \
+ hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \
+ + ( (unsigned)_hj_key[10] << 16 ) \
+ + ( (unsigned)_hj_key[11] << 24 ) ); \
+ \
+ HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
+ \
+ _hj_key += 12; \
+ _hj_k -= 12; \
+ } \
+ hashv += keylen; \
+ switch ( _hj_k ) { \
+ case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); \
+ case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); \
+ case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); \
+ case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); \
+ case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); \
+ case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); \
+ case 5: _hj_j += _hj_key[4]; \
+ case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); \
+ case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); \
+ case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); \
+ case 1: _hj_i += _hj_key[0]; \
+ } \
+ HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
+ bkt = hashv & (num_bkts-1); \
+} while(0)
+
+
+/* key comparison function; return 0 if keys equal */
+#define HASH_KEYCMP(a,b,len) memcmp(a,b,len)
+
+/* iterate over items in a known bucket to find desired item */
+#define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,out) \
+out = TYPEOF(out)((head.hh_head) ? ELMT_FROM_HH(tbl,head.hh_head) : NULL); \
+while (out) { \
+ if (out->hh.keylen == keylen_in) { \
+ if ((HASH_KEYCMP(out->hh.key,keyptr,keylen_in)) == 0) break; \
+ } \
+ out= TYPEOF(out)((out->hh.hh_next) ? \
+ ELMT_FROM_HH(tbl,out->hh.hh_next) : NULL); \
+}
+
+/* add an item to a bucket */
+#define HASH_ADD_TO_BKT(hh,head,add) \
+ head.count++; \
+ add->hh.hh_next = head.hh_head; \
+ add->hh.hh_prev = NULL; \
+ if (head.hh_head) head.hh_head->hh_prev = &add->hh; \
+ head.hh_head=&add->hh; \
+ if (head.count >= ((head.expand_mult+1) * HASH_BKT_CAPACITY_THRESH) \
+ && add->hh.tbl->noexpand != 1) { \
+ HASH_EXPAND_BUCKETS(add->hh.tbl); \
+ }
+
+/* remove an item from a given bucket */
+#define HASH_DEL_IN_BKT(hh,head,hh_del) \
+ (head).count--; \
+ if ((head).hh_head == hh_del) { \
+ (head).hh_head = hh_del->hh_next; \
+ } \
+ if (hh_del->hh_prev) { \
+ hh_del->hh_prev->hh_next = hh_del->hh_next; \
+ } \
+ if (hh_del->hh_next) { \
+ hh_del->hh_next->hh_prev = hh_del->hh_prev; \
+ }
+
+/* Bucket expansion has the effect of doubling the number of buckets
+ * and redistributing the items into the new buckets. Ideally the
+ * items will distribute more or less evenly into the new buckets
+ * (the extent to which this is true is a measure of the quality of
+ * the hash function as it applies to the key domain).
+ *
+ * With the items distributed into more buckets, the chain length
+ * (item count) in each bucket is reduced. Thus by expanding buckets
+ * the hash keeps a bound on the chain length. This bounded chain
+ * length is the essence of how a hash provides constant time lookup.
+ *
+ * The calculation of tbl->ideal_chain_maxlen below deserves some
+ * explanation. First, keep in mind that we're calculating the ideal
+ * maximum chain length based on the *new* (doubled) bucket count.
+ * In fractions this is just n/b (n=number of items,b=new num buckets).
+ * Since the ideal chain length is an integer, we want to calculate
+ * ceil(n/b). We don't depend on floating point arithmetic in this
+ * hash, so to calculate ceil(n/b) with integers we could write
+ *
+ * ceil(n/b) = (n/b) + ((n%b)?1:0)
+ *
+ * and in fact a previous version of this hash did just that.
+ * But now we have improved things a bit by recognizing that b is
+ * always a power of two. We keep its base 2 log handy (call it lb),
+ * so now we can write this with a bit shift and logical AND:
+ *
+ * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0)
+ *
+ */
+#define HASH_EXPAND_BUCKETS(tbl) \
+do { \
+ unsigned _he_bkt; \
+ unsigned _he_bkt_i; \
+ struct UT_hash_handle *_he_thh, *_he_hh_nxt; \
+ UT_hash_bucket *_he_new_buckets, *_he_newbkt; \
+ _he_new_buckets = (UT_hash_bucket*)uthash_bkt_malloc( \
+ 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
+ if (!_he_new_buckets) { uthash_fatal( "out of memory"); } \
+ memset(_he_new_buckets, 0, \
+ 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
+ tbl->ideal_chain_maxlen = \
+ (tbl->num_items >> (tbl->log2_num_buckets+1)) + \
+ ((tbl->num_items & ((tbl->num_buckets*2)-1)) ? 1 : 0); \
+ tbl->nonideal_items = 0; \
+ for(_he_bkt_i = 0; _he_bkt_i < tbl->num_buckets; _he_bkt_i++) \
+ { \
+ _he_thh = tbl->buckets[ _he_bkt_i ].hh_head; \
+ while (_he_thh) { \
+ _he_hh_nxt = _he_thh->hh_next; \
+ HASH_TO_BKT( _he_thh->hashv, tbl->num_buckets*2, _he_bkt); \
+ _he_newbkt = &(_he_new_buckets[ _he_bkt ]); \
+ if (++(_he_newbkt->count) > tbl->ideal_chain_maxlen) { \
+ tbl->nonideal_items++; \
+ _he_newbkt->expand_mult = _he_newbkt->count / \
+ tbl->ideal_chain_maxlen; \
+ } \
+ _he_thh->hh_prev = NULL; \
+ _he_thh->hh_next = _he_newbkt->hh_head; \
+ if (_he_newbkt->hh_head) _he_newbkt->hh_head->hh_prev = \
+ _he_thh; \
+ _he_newbkt->hh_head = _he_thh; \
+ _he_thh = _he_hh_nxt; \
+ } \
+ } \
+ tbl->num_buckets *= 2; \
+ tbl->log2_num_buckets++; \
+ uthash_bkt_free( tbl->buckets ); \
+ tbl->buckets = _he_new_buckets; \
+ tbl->ineff_expands = (tbl->nonideal_items > (tbl->num_items >> 1)) ? \
+ (tbl->ineff_expands+1) : 0; \
+ if (tbl->ineff_expands > 1) { \
+ tbl->noexpand=1; \
+ uthash_noexpand_fyi(tbl); \
+ } \
+ uthash_expand_fyi(tbl); \
+} while(0)
+
+
+/* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */
+/* Note that HASH_SORT assumes the hash handle name to be hh.
+ * HASH_SRT was added to allow the hash handle name to be passed in. */
+#define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn)
+#define HASH_SRT(hh,head,cmpfcn) \
+do { \
+ unsigned _hs_i; \
+ unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \
+ struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \
+ if (head) { \
+ _hs_insize = 1; \
+ _hs_looping = 1; \
+ _hs_list = &((head)->hh); \
+ while (_hs_looping) { \
+ _hs_p = _hs_list; \
+ _hs_list = NULL; \
+ _hs_tail = NULL; \
+ _hs_nmerges = 0; \
+ while (_hs_p) { \
+ _hs_nmerges++; \
+ _hs_q = _hs_p; \
+ _hs_psize = 0; \
+ for ( _hs_i = 0; _hs_i < _hs_insize; _hs_i++ ) { \
+ _hs_psize++; \
+ _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
+ ((void*)((char*)(_hs_q->next) + \
+ (head)->hh.tbl->hho)) : NULL); \
+ if (! (_hs_q) ) break; \
+ } \
+ _hs_qsize = _hs_insize; \
+ while ((_hs_psize > 0) || ((_hs_qsize > 0) && _hs_q )) { \
+ if (_hs_psize == 0) { \
+ _hs_e = _hs_q; \
+ _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
+ ((void*)((char*)(_hs_q->next) + \
+ (head)->hh.tbl->hho)) : NULL); \
+ _hs_qsize--; \
+ } else if ( (_hs_qsize == 0) || !(_hs_q) ) { \
+ _hs_e = _hs_p; \
+ _hs_p = (UT_hash_handle*)((_hs_p->next) ? \
+ ((void*)((char*)(_hs_p->next) + \
+ (head)->hh.tbl->hho)) : NULL); \
+ _hs_psize--; \
+ } else if (( \
+ cmpfcn(TYPEOF(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_p)), \
+ TYPEOF(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_q))) \
+ ) <= 0) { \
+ _hs_e = _hs_p; \
+ _hs_p = (UT_hash_handle*)((_hs_p->next) ? \
+ ((void*)((char*)(_hs_p->next) + \
+ (head)->hh.tbl->hho)) : NULL); \
+ _hs_psize--; \
+ } else { \
+ _hs_e = _hs_q; \
+ _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
+ ((void*)((char*)(_hs_q->next) + \
+ (head)->hh.tbl->hho)) : NULL); \
+ _hs_qsize--; \
+ } \
+ if ( _hs_tail ) { \
+ _hs_tail->next = ((_hs_e) ? \
+ ELMT_FROM_HH((head)->hh.tbl,_hs_e) : NULL); \
+ } else { \
+ _hs_list = _hs_e; \
+ } \
+ _hs_e->prev = ((_hs_tail) ? \
+ ELMT_FROM_HH((head)->hh.tbl,_hs_tail) : NULL); \
+ _hs_tail = _hs_e; \
+ } \
+ _hs_p = _hs_q; \
+ } \
+ _hs_tail->next = NULL; \
+ if ( _hs_nmerges <= 1 ) { \
+ _hs_looping=0; \
+ (head)->hh.tbl->tail = _hs_tail; \
+ (head) = TYPEOF(head)ELMT_FROM_HH((head)->hh.tbl, _hs_list); \
+ } \
+ _hs_insize *= 2; \
+ } \
+ HASH_FSCK(hh,head); \
+ } \
+} while (0)
+
+/* obtain a count of items in the hash */
+#define HASH_COUNT(head) HASH_CNT(hh,head)
+#define HASH_CNT(hh,head) (head?(head->hh.tbl->num_items):0)
+
+typedef struct UT_hash_bucket {
+ struct UT_hash_handle *hh_head;
+ unsigned count;
+
+ /* expand_mult is normally set to 0. In this situation, the max chain length
+ * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If
+ * the bucket's chain exceeds this length, bucket expansion is triggered).
+ * However, setting expand_mult to a non-zero value delays bucket expansion
+ * (that would be triggered by additions to this particular bucket)
+ * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH.
+ * (The multiplier is simply expand_mult+1). The whole idea of this
+ * multiplier is to reduce bucket expansions, since they are expensive, in
+ * situations where we know that a particular bucket tends to be overused.
+ * It is better to let its chain length grow to a longer yet-still-bounded
+ * value, than to do an O(n) bucket expansion too often.
+ */
+ unsigned expand_mult;
+
+} UT_hash_bucket;
+
+typedef struct UT_hash_table {
+ UT_hash_bucket *buckets;
+ unsigned num_buckets, log2_num_buckets;
+ unsigned num_items;
+ struct UT_hash_handle *tail; /* tail hh in app order, for fast append */
+ ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */
+
+ /* in an ideal situation (all buckets used equally), no bucket would have
+ * more than ceil(#items/#buckets) items. that's the ideal chain length. */
+ unsigned ideal_chain_maxlen;
+
+ /* nonideal_items is the number of items in the hash whose chain position
+ * exceeds the ideal chain maxlen. these items pay the penalty for an uneven
+ * hash distribution; reaching them in a chain traversal takes >ideal steps */
+ unsigned nonideal_items;
+
+ /* ineffective expands occur when a bucket doubling was performed, but
+ * afterward, more than half the items in the hash had nonideal chain
+ * positions. If this happens on two consecutive expansions we inhibit any
+ * further expansion, as it's not helping; this happens when the hash
+ * function isn't a good fit for the key domain. When expansion is inhibited
+ * the hash will still work, albeit no longer in constant time. */
+ unsigned ineff_expands, noexpand;
+
+
+} UT_hash_table;
+
+
+typedef struct UT_hash_handle {
+ struct UT_hash_table *tbl;
+ void *prev; /* prev element in app order */
+ void *next; /* next element in app order */
+ struct UT_hash_handle *hh_prev; /* previous hh in bucket order */
+ struct UT_hash_handle *hh_next; /* next hh in bucket order */
+ void *key; /* ptr to enclosing struct's key */
+ unsigned keylen; /* enclosing struct's key len */
+ unsigned hashv; /* result of hash-fcn(key) */
+} UT_hash_handle;
+
+#endif /* UTHASH_H */
Copied: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad (from rev 3937, mpich2/branches/dev/mx-netmod/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad)
Property changes on: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad
___________________________________________________________________
Name: svn:ignore
+ configure
configure.lineno
Makefile
Makefile.in
config.log
config.status
config.system
localdefs
*.o
*.lo
*.bb
*.bbg
*.da
*.gcov
*Debug*
*Release*
*.cache
.deps
.state-cache
Deleted: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/Makefile.sm
===================================================================
--- mpich2/branches/dev/mx-netmod/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/Makefile.sm 2009-03-04 22:01:42 UTC (rev 3937)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/Makefile.sm 2009-03-05 22:44:18 UTC (rev 3945)
@@ -1,5 +0,0 @@
-INCLUDES = -I. -I../../include -I${master_top_srcdir}/src/ch3/channels/nemesis/nemesis/include -I${master_top_srcdir}/src/include \
- -I${top_builddir}/src/include
-lib${MPILIBNAME}_a_SOURCES = \
- newmad_finalize.c newmad_init.c newmad_poll.c newmad_send.c \
- newmad_register.c newmad_test.c newmad_cancel.c
Copied: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/Makefile.sm (from rev 3937, mpich2/branches/dev/mx-netmod/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/Makefile.sm)
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/Makefile.sm (rev 0)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/Makefile.sm 2009-03-05 22:44:18 UTC (rev 3945)
@@ -0,0 +1,5 @@
+INCLUDES = -I. -I../../include -I${master_top_srcdir}/src/ch3/channels/nemesis/nemesis/include -I${master_top_srcdir}/src/include \
+ -I${top_builddir}/src/include
+lib${MPILIBNAME}_a_SOURCES = \
+ newmad_finalize.c newmad_init.c newmad_poll.c newmad_send.c \
+ newmad_register.c newmad_test.c newmad_cancel.c
Deleted: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_finalize.c
===================================================================
--- mpich2/branches/dev/mx-netmod/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_finalize.c 2009-03-04 22:01:42 UTC (rev 3937)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_finalize.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -1,38 +0,0 @@
-/* -*- Mode: C; c-basic-offset:4 ; -*- */
-/*
- * (C) 2006 by Argonne National Laboratory.
- * See COPYRIGHT in top-level directory.
- */
-
-#include "newmad_impl.h"
-
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_finalize
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int
-MPID_nem_newmad_finalize()
-{
- int mpi_errno = MPI_SUCCESS;
-
- fn_exit:
- return mpi_errno;
- fn_fail:
- goto fn_exit;
-}
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_ckpt_shutdown
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int
-MPID_nem_newmad_ckpt_shutdown ()
-{
- int mpi_errno = MPI_SUCCESS;
- fn_exit:
- return mpi_errno;
- fn_fail:
- goto fn_exit;
-}
-
Copied: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_finalize.c (from rev 3937, mpich2/branches/dev/mx-netmod/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_finalize.c)
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_finalize.c (rev 0)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_finalize.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -0,0 +1,38 @@
+/* -*- Mode: C; c-basic-offset:4 ; -*- */
+/*
+ * (C) 2006 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "newmad_impl.h"
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_finalize
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int
+MPID_nem_newmad_finalize()
+{
+ int mpi_errno = MPI_SUCCESS;
+
+ fn_exit:
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_ckpt_shutdown
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int
+MPID_nem_newmad_ckpt_shutdown ()
+{
+ int mpi_errno = MPI_SUCCESS;
+ fn_exit:
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
Deleted: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_impl.h
===================================================================
--- mpich2/branches/dev/mx-netmod/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_impl.h 2009-03-04 22:01:42 UTC (rev 3937)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_impl.h 2009-03-05 22:44:18 UTC (rev 3945)
@@ -1,172 +0,0 @@
-/* -*- Mode: C; c-basic-offset:4 ; -*- */
-/*
- * (C) 2006 by Argonne National Laboratory.
- * See COPYRIGHT in top-level directory.
- */
-
-#ifndef NEWMAD_MODULE_IMPL_H
-#define NEWMAD_MODULE_IMPL_H
-//#include <linux/types.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <stdint.h>
-#include <nm_public.h>
-#include <nm_sendrecv_interface.h>
-#include <nm_predictions.h>
-#include "mpid_nem_impl.h"
-
-int MPID_nem_newmad_init (MPID_nem_queue_ptr_t proc_recv_queue, MPID_nem_queue_ptr_t proc_free_queue,
- MPID_nem_cell_ptr_t proc_elements,int num_proc_elements,
- MPID_nem_cell_ptr_t module_elements, int num_module_elements,
- MPID_nem_queue_ptr_t *module_free_queue, int ckpt_restart,
- MPIDI_PG_t *pg_p, int pg_rank, char **bc_val_p, int *val_max_sz_p);
-int MPID_nem_newmad_finalize (void);
-int MPID_nem_newmad_ckpt_shutdown (void);
-int MPID_nem_newmad_poll(MPID_nem_poll_dir_t in_or_out);
-int MPID_nem_newmad_send (MPIDI_VC_t *vc, MPID_nem_cell_ptr_t cell, int datalen);
-int MPID_nem_newmad_get_business_card (int my_rank, char **bc_val_p, int *val_max_sz_p);
-int MPID_nem_newmad_connect_to_root (const char *business_card, MPIDI_VC_t *new_vc);
-int MPID_nem_newmad_vc_init (MPIDI_VC_t *vc);
-int MPID_nem_newmad_vc_destroy(MPIDI_VC_t *vc);
-int MPID_nem_newmad_vc_terminate (MPIDI_VC_t *vc);
-
-/* alternate interface */
-int MPID_nem_newmad_iSendContig(MPIDI_VC_t *vc, MPID_Request *sreq, void *hdr, MPIDI_msg_sz_t hdr_sz,
- void *data, MPIDI_msg_sz_t data_sz);
-int MPID_nem_newmad_iStartContigMsg(MPIDI_VC_t *vc, void *hdr, MPIDI_msg_sz_t hdr_sz, void *data,
- MPIDI_msg_sz_t data_sz, MPID_Request **sreq_ptr);
-int MPID_nem_newmad_SendNoncontig(MPIDI_VC_t *vc, MPID_Request *sreq, void *header, MPIDI_msg_sz_t hdr_sz);
-
-/* Direct Routines */
-int MPID_nem_newmad_directSend(MPIDI_VC_t *vc, const void * buf, int count, MPI_Datatype datatype, int dest, int tag,
- MPID_Comm * comm, int context_offset, MPID_Request **sreq_p);
-int MPID_nem_newmad_directSsend(MPIDI_VC_t *vc, const void * buf, int count, MPI_Datatype datatype, int dest, int tag,
- MPID_Comm * comm, int context_offset,MPID_Request **sreq_p);
-int MPID_nem_newmad_directRecv(MPIDI_VC_t *vc, MPID_Request *rreq);
-int MPID_nem_newmad_cancel_send(MPIDI_VC_t *vc, MPID_Request *sreq);
-int MPID_nem_newmad_cancel_recv(MPIDI_VC_t *vc, MPID_Request *rreq);
-
-/* Any source management */
-int MPID_nem_newmad_anysource_posted(MPID_Request *rreq);
-int MPID_nem_newmad_anysource_matched(MPID_Request *rreq);
-
-/* Callbacks for events */
-void MPID_nem_newmad_get_adi_msg(nm_sr_event_t event, nm_sr_event_info_t*info);
-void MPID_nem_newmad_handle_sreq(nm_sr_event_t event, nm_sr_event_info_t*info);
-void MPID_nem_newmad_handle_rreq(nm_sr_event_t event, nm_sr_event_info_t*info);
-
-/* Dtype management */
-int MPID_nem_newmad_process_sdtype(MPID_Request **sreq_p, MPI_Datatype datatype, MPID_Datatype * dt_ptr, const void *buf,
- int count, MPIDI_msg_sz_t data_sz, struct iovec **newmad_iov, int *num_iov, int first_taken);
-int MPID_nem_newmad_process_rdtype(MPID_Request **rreq_p, MPID_Datatype * dt_ptr, MPIDI_msg_sz_t data_sz,struct iovec **newmad_iov,
- int *num_iov);
-
-/* Connection management*/
-int MPID_nem_newmad_send_conn_info (MPIDI_VC_t *vc);
-
-#define MPID_NEM_NMAD_MAX_STRING_SIZE ((MPID_NEM_MAX_NETMOD_STRING_LEN)/2)
-#define MPID_NEM_NMAD_MAX_NETS 4
-
-typedef nm_gate_id_t mpid_nem_newmad_p_gate_t;
-
-typedef struct MPID_nem_newmad_vc_area_internal
-{
- char hostname[MPID_NEM_NMAD_MAX_STRING_SIZE];
- char url[MPID_NEM_NMAD_MAX_NETS][MPID_NEM_NMAD_MAX_STRING_SIZE];
- uint8_t drv_id[MPID_NEM_NMAD_MAX_NETS];
- mpid_nem_newmad_p_gate_t p_gate;
-} MPID_nem_newmad_vc_area_internal_t;
-
-/* The current max size for the whole structure is 128 bytes */
-typedef struct
-{
- MPID_nem_newmad_vc_area_internal_t *area;
-} MPID_nem_newmad_vc_area;
-/* accessor macro to private fields in VC */
-#define VC_FIELD(vcp, field) (((MPID_nem_newmad_vc_area *)((MPIDI_CH3I_VC *)((vcp)->channel_private))->netmod_area.padding)->area->field)
-
-/* The req provides a generic buffer in which network modules can store
- private fields This removes all dependencies from the req structure
- on the network module, facilitating dynamic module loading. */
-typedef struct
-{
- nm_sr_request_t newmad_req;
-} MPID_nem_newmad_req_area;
-/* accessor macro to private fields in REQ */
-#define REQ_FIELD(reqp, field) (((MPID_nem_newmad_req_area *)((reqp)->ch.netmod_area.padding))->field)
-
-#if CH3_RANK_BITS == 16
-#define NBITS_TAG 32
-typedef int32_t Nmad_Nem_tag_t;
-#elif CH3_RANK_BITS == 32
-#define NBITS_TAG 16
-typedef int16_t Nmad_Nem_tag_t;
-#endif /* CH3_RANK_BITS */
-
-#define NBITS_RANK CH3_RANK_BITS
-#define NBITS_CTXT 16
-#define NBITS_PGRANK (sizeof(int)*8)
-
-#define NEM_NMAD_MATCHING_BITS (NBITS_TAG+NBITS_RANK+NBITS_CTXT)
-#define SHIFT_TAG (NBITS_RANK+NBITS_CTXT)
-#define SHIFT_RANK (NBITS_CTXT)
-#define SHIFT_PGRANK (NBITS_CTXT)
-#define SHIFT_CTXT (0)
-
-#define NEM_NMAD_MAX_TAG ((UINT64_C(1)<<NBITS_TAG) -1)
-#define NEM_NMAD_MAX_RANK ((UINT64_C(1)<<NBITS_RANK) -1)
-#define NEM_NMAD_MAX_CTXT ((UINT64_C(1)<<NBITS_CTXT) -1)
-#define NEM_NMAD_MAX_PGRANK ((UINT64_C(1)<<NBITS_PGRANK)-1)
-
-#define NEM_NMAD_TAG_MASK (NEM_NMAD_MAX_TAG <<SHIFT_TAG )
-#define NEM_NMAD_RANK_MASK (NEM_NMAD_MAX_RANK<<SHIFT_RANK)
-#define NEM_NMAD_CTXT_MASK (NEM_NMAD_MAX_CTXT<<SHIFT_CTXT)
-#define NEM_NMAD_PGRANK_MASK (NEM_NMAD_MAX_PGRANK<<SHIFT_PGRANK)
-
-#define NEM_NMAD_SET_TAG(_match, _tag) do { \
- MPIU_Assert((_tag >= 0)&&(_tag <= (NEM_NMAD_MAX_TAG))); \
- ((_match) |= (((nm_tag_t)((_tag)&(NEM_NMAD_MAX_TAG))) << SHIFT_TAG)); \
- }while(0)
-#define NEM_NMAD_SET_SRC(_match, _src) do { \
- MPIU_Assert(_src >= 0)&&(_src<=(NEM_NMAD_MAX_RANK))); \
- ((_match) |= (((nm_tag_t)(_src)) << SHIFT_RANK)); \
- }while(0)
-#define NEM_NMAD_SET_CTXT(_match, _ctxt) do { \
- MPIU_Assert(_ctxt >= 0)&&(_ctxt<=(NEM_NMAD_MAX_CTXT)));\
- ((_match) |= (((nm_tag_t)(_ctxt)) << SHIFT_CTXT)); \
- }while(0)
-#define NEM_NMAD_SET_PGRANK(_match, _pg_rank) do { \
- ((_match) |= (((nm_tag_t)(_pg_rank)) << SHIFT_PGRANK));\
- }while(0)
-
-#define NEM_NMAD_MATCH_GET_TAG(_match, _tag) do{ \
- ((_tag) = ((Nmad_Nem_tag_t)(((_match) & NEM_NMAD_TAG_MASK) >> SHIFT_TAG)));\
- }while(0)
-#define NEM_NMAD_MATCH_GET_RANK(_match, _rank) do{ \
- ((_rank) = ((MPIR_Rank_t)(((_match) & NEM_NMAD_RANK_MASK) >> SHIFT_RANK)));\
- }while(0)
-#define NEM_NMAD_MATCH_GET_CTXT(_match, _ctxt) do{ \
- ((_ctxt) = ((MPIR_Context_id_t)(((_match) & NEM_NMAD_CTXT_MASK) >> SHIFT_CTXT)));\
- }while(0)
-#define NEM_NMAD_MATCH_GET_PGRANK(_match, _pg_rank) do{ \
- ((_pg_rank) = ((int)(((_match) & NEM_NMAD_PGRANK_MASK) >> SHIFT_PGRANK))); \
- }while(0)
-
-#define NEM_NMAD_INTRA_CTXT (0x0000000c)
-#define NEM_NMAD_SET_MATCH(_match,_tag,_rank,_context ) do{ \
- MPIU_Assert((_tag >= 0) &&(_tag <= (NEM_NMAD_MAX_TAG))); \
- MPIU_Assert((_rank >= 0) &&(_rank<=(NEM_NMAD_MAX_RANK))); \
- MPIU_Assert((_context >= 0)&&(_context<=(NEM_NMAD_MAX_CTXT))); \
- (_match)=((((nm_tag_t)(_tag)) << SHIFT_TAG) \
- |(((nm_tag_t)(_rank)) << SHIFT_RANK) \
- |(((nm_tag_t)(_context))<< SHIFT_CTXT)); \
- }while(0)
-#define NEM_NMAD_DIRECT_MATCH(_match,_tag,_rank,_context) NEM_NMAD_SET_MATCH(_match,_tag,_rank,_context)
-#define NEM_NMAD_ADI_MATCH(_match) NEM_NMAD_SET_MATCH(_match,0,0,NEM_NMAD_INTRA_CTXT)
-
-extern nm_core_t mpid_nem_newmad_pcore;
-extern mpid_nem_newmad_p_gate_t *mpid_nem_newmad_gate_to_rank;
-extern int mpid_nem_newmad_pending_send_req;
-
-#endif //NEWMAD_MODULE_IMPL_H
-
Copied: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_impl.h (from rev 3937, mpich2/branches/dev/mx-netmod/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_impl.h)
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_impl.h (rev 0)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_impl.h 2009-03-05 22:44:18 UTC (rev 3945)
@@ -0,0 +1,172 @@
+/* -*- Mode: C; c-basic-offset:4 ; -*- */
+/*
+ * (C) 2006 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#ifndef NEWMAD_MODULE_IMPL_H
+#define NEWMAD_MODULE_IMPL_H
+//#include <linux/types.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <stdint.h>
+#include <nm_public.h>
+#include <nm_sendrecv_interface.h>
+#include <nm_predictions.h>
+#include "mpid_nem_impl.h"
+
+int MPID_nem_newmad_init (MPID_nem_queue_ptr_t proc_recv_queue, MPID_nem_queue_ptr_t proc_free_queue,
+ MPID_nem_cell_ptr_t proc_elements,int num_proc_elements,
+ MPID_nem_cell_ptr_t module_elements, int num_module_elements,
+ MPID_nem_queue_ptr_t *module_free_queue, int ckpt_restart,
+ MPIDI_PG_t *pg_p, int pg_rank, char **bc_val_p, int *val_max_sz_p);
+int MPID_nem_newmad_finalize (void);
+int MPID_nem_newmad_ckpt_shutdown (void);
+int MPID_nem_newmad_poll(MPID_nem_poll_dir_t in_or_out);
+int MPID_nem_newmad_send (MPIDI_VC_t *vc, MPID_nem_cell_ptr_t cell, int datalen);
+int MPID_nem_newmad_get_business_card (int my_rank, char **bc_val_p, int *val_max_sz_p);
+int MPID_nem_newmad_connect_to_root (const char *business_card, MPIDI_VC_t *new_vc);
+int MPID_nem_newmad_vc_init (MPIDI_VC_t *vc);
+int MPID_nem_newmad_vc_destroy(MPIDI_VC_t *vc);
+int MPID_nem_newmad_vc_terminate (MPIDI_VC_t *vc);
+
+/* alternate interface */
+int MPID_nem_newmad_iSendContig(MPIDI_VC_t *vc, MPID_Request *sreq, void *hdr, MPIDI_msg_sz_t hdr_sz,
+ void *data, MPIDI_msg_sz_t data_sz);
+int MPID_nem_newmad_iStartContigMsg(MPIDI_VC_t *vc, void *hdr, MPIDI_msg_sz_t hdr_sz, void *data,
+ MPIDI_msg_sz_t data_sz, MPID_Request **sreq_ptr);
+int MPID_nem_newmad_SendNoncontig(MPIDI_VC_t *vc, MPID_Request *sreq, void *header, MPIDI_msg_sz_t hdr_sz);
+
+/* Direct Routines */
+int MPID_nem_newmad_directSend(MPIDI_VC_t *vc, const void * buf, int count, MPI_Datatype datatype, int dest, int tag,
+ MPID_Comm * comm, int context_offset, MPID_Request **sreq_p);
+int MPID_nem_newmad_directSsend(MPIDI_VC_t *vc, const void * buf, int count, MPI_Datatype datatype, int dest, int tag,
+ MPID_Comm * comm, int context_offset,MPID_Request **sreq_p);
+int MPID_nem_newmad_directRecv(MPIDI_VC_t *vc, MPID_Request *rreq);
+int MPID_nem_newmad_cancel_send(MPIDI_VC_t *vc, MPID_Request *sreq);
+int MPID_nem_newmad_cancel_recv(MPIDI_VC_t *vc, MPID_Request *rreq);
+
+/* Any source management */
+int MPID_nem_newmad_anysource_posted(MPID_Request *rreq);
+int MPID_nem_newmad_anysource_matched(MPID_Request *rreq);
+
+/* Callbacks for events */
+void MPID_nem_newmad_get_adi_msg(nm_sr_event_t event, nm_sr_event_info_t*info);
+void MPID_nem_newmad_handle_sreq(nm_sr_event_t event, nm_sr_event_info_t*info);
+void MPID_nem_newmad_handle_rreq(nm_sr_event_t event, nm_sr_event_info_t*info);
+
+/* Dtype management */
+int MPID_nem_newmad_process_sdtype(MPID_Request **sreq_p, MPI_Datatype datatype, MPID_Datatype * dt_ptr, const void *buf,
+ int count, MPIDI_msg_sz_t data_sz, struct iovec **newmad_iov, int *num_iov, int first_taken);
+int MPID_nem_newmad_process_rdtype(MPID_Request **rreq_p, MPID_Datatype * dt_ptr, MPIDI_msg_sz_t data_sz,struct iovec **newmad_iov,
+ int *num_iov);
+
+/* Connection management*/
+int MPID_nem_newmad_send_conn_info (MPIDI_VC_t *vc);
+
+#define MPID_NEM_NMAD_MAX_STRING_SIZE ((MPID_NEM_MAX_NETMOD_STRING_LEN)/2)
+#define MPID_NEM_NMAD_MAX_NETS 4
+
+typedef nm_gate_id_t mpid_nem_newmad_p_gate_t;
+
+typedef struct MPID_nem_newmad_vc_area_internal
+{
+ char hostname[MPID_NEM_NMAD_MAX_STRING_SIZE];
+ char url[MPID_NEM_NMAD_MAX_NETS][MPID_NEM_NMAD_MAX_STRING_SIZE];
+ uint8_t drv_id[MPID_NEM_NMAD_MAX_NETS];
+ mpid_nem_newmad_p_gate_t p_gate;
+} MPID_nem_newmad_vc_area_internal_t;
+
+/* The current max size for the whole structure is 128 bytes */
+typedef struct
+{
+ MPID_nem_newmad_vc_area_internal_t *area;
+} MPID_nem_newmad_vc_area;
+/* accessor macro to private fields in VC */
+#define VC_FIELD(vcp, field) (((MPID_nem_newmad_vc_area *)((MPIDI_CH3I_VC *)((vcp)->channel_private))->netmod_area.padding)->area->field)
+
+/* The req provides a generic buffer in which network modules can store
+ private fields This removes all dependencies from the req structure
+ on the network module, facilitating dynamic module loading. */
+typedef struct
+{
+ nm_sr_request_t newmad_req;
+} MPID_nem_newmad_req_area;
+/* accessor macro to private fields in REQ */
+#define REQ_FIELD(reqp, field) (((MPID_nem_newmad_req_area *)((reqp)->ch.netmod_area.padding))->field)
+
+#if CH3_RANK_BITS == 16
+#define NBITS_TAG 32
+typedef int32_t Nmad_Nem_tag_t;
+#elif CH3_RANK_BITS == 32
+#define NBITS_TAG 16
+typedef int16_t Nmad_Nem_tag_t;
+#endif /* CH3_RANK_BITS */
+
+#define NBITS_RANK CH3_RANK_BITS
+#define NBITS_CTXT 16
+#define NBITS_PGRANK (sizeof(int)*8)
+
+#define NEM_NMAD_MATCHING_BITS (NBITS_TAG+NBITS_RANK+NBITS_CTXT)
+#define SHIFT_TAG (NBITS_RANK+NBITS_CTXT)
+#define SHIFT_RANK (NBITS_CTXT)
+#define SHIFT_PGRANK (NBITS_CTXT)
+#define SHIFT_CTXT (0)
+
+#define NEM_NMAD_MAX_TAG ((UINT64_C(1)<<NBITS_TAG) -1)
+#define NEM_NMAD_MAX_RANK ((UINT64_C(1)<<NBITS_RANK) -1)
+#define NEM_NMAD_MAX_CTXT ((UINT64_C(1)<<NBITS_CTXT) -1)
+#define NEM_NMAD_MAX_PGRANK ((UINT64_C(1)<<NBITS_PGRANK)-1)
+
+#define NEM_NMAD_TAG_MASK (NEM_NMAD_MAX_TAG <<SHIFT_TAG )
+#define NEM_NMAD_RANK_MASK (NEM_NMAD_MAX_RANK<<SHIFT_RANK)
+#define NEM_NMAD_CTXT_MASK (NEM_NMAD_MAX_CTXT<<SHIFT_CTXT)
+#define NEM_NMAD_PGRANK_MASK (NEM_NMAD_MAX_PGRANK<<SHIFT_PGRANK)
+
+#define NEM_NMAD_SET_TAG(_match, _tag) do { \
+ MPIU_Assert((_tag >= 0)&&(_tag <= (NEM_NMAD_MAX_TAG))); \
+ ((_match) |= (((nm_tag_t)((_tag)&(NEM_NMAD_MAX_TAG))) << SHIFT_TAG)); \
+ }while(0)
+#define NEM_NMAD_SET_SRC(_match, _src) do { \
+ MPIU_Assert(_src >= 0)&&(_src<=(NEM_NMAD_MAX_RANK))); \
+ ((_match) |= (((nm_tag_t)(_src)) << SHIFT_RANK)); \
+ }while(0)
+#define NEM_NMAD_SET_CTXT(_match, _ctxt) do { \
+ MPIU_Assert(_ctxt >= 0)&&(_ctxt<=(NEM_NMAD_MAX_CTXT)));\
+ ((_match) |= (((nm_tag_t)(_ctxt)) << SHIFT_CTXT)); \
+ }while(0)
+#define NEM_NMAD_SET_PGRANK(_match, _pg_rank) do { \
+ ((_match) |= (((nm_tag_t)(_pg_rank)) << SHIFT_PGRANK));\
+ }while(0)
+
+#define NEM_NMAD_MATCH_GET_TAG(_match, _tag) do{ \
+ ((_tag) = ((Nmad_Nem_tag_t)(((_match) & NEM_NMAD_TAG_MASK) >> SHIFT_TAG)));\
+ }while(0)
+#define NEM_NMAD_MATCH_GET_RANK(_match, _rank) do{ \
+ ((_rank) = ((MPIR_Rank_t)(((_match) & NEM_NMAD_RANK_MASK) >> SHIFT_RANK)));\
+ }while(0)
+#define NEM_NMAD_MATCH_GET_CTXT(_match, _ctxt) do{ \
+ ((_ctxt) = ((MPIR_Context_id_t)(((_match) & NEM_NMAD_CTXT_MASK) >> SHIFT_CTXT)));\
+ }while(0)
+#define NEM_NMAD_MATCH_GET_PGRANK(_match, _pg_rank) do{ \
+ ((_pg_rank) = ((int)(((_match) & NEM_NMAD_PGRANK_MASK) >> SHIFT_PGRANK))); \
+ }while(0)
+
+#define NEM_NMAD_INTRA_CTXT (0x0000000c)
+#define NEM_NMAD_SET_MATCH(_match,_tag,_rank,_context ) do{ \
+ MPIU_Assert((_tag >= 0) &&(_tag <= (NEM_NMAD_MAX_TAG))); \
+ MPIU_Assert((_rank >= 0) &&(_rank<=(NEM_NMAD_MAX_RANK))); \
+ MPIU_Assert((_context >= 0)&&(_context<=(NEM_NMAD_MAX_CTXT))); \
+ (_match)=((((nm_tag_t)(_tag)) << SHIFT_TAG) \
+ |(((nm_tag_t)(_rank)) << SHIFT_RANK) \
+ |(((nm_tag_t)(_context))<< SHIFT_CTXT)); \
+ }while(0)
+#define NEM_NMAD_DIRECT_MATCH(_match,_tag,_rank,_context) NEM_NMAD_SET_MATCH(_match,_tag,_rank,_context)
+#define NEM_NMAD_ADI_MATCH(_match) NEM_NMAD_SET_MATCH(_match,0,0,NEM_NMAD_INTRA_CTXT)
+
+extern nm_core_t mpid_nem_newmad_pcore;
+extern mpid_nem_newmad_p_gate_t *mpid_nem_newmad_gate_to_rank;
+extern int mpid_nem_newmad_pending_send_req;
+
+#endif //NEWMAD_MODULE_IMPL_H
+
Deleted: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_init.c
===================================================================
--- mpich2/branches/dev/mx-netmod/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_init.c 2009-03-04 22:01:42 UTC (rev 3937)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_init.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -1,385 +0,0 @@
-/* -*- Mode: C; c-basic-offset:4 ; -*- */
-/*
- * (C) 2006 by Argonne National Laboratory.
- * See COPYRIGHT in top-level directory.
- */
-
-#include "mpid_nem_impl.h"
-#include "newmad_impl.h"
-
-#define MPIDI_CH3I_HOSTNAME_KEY "hostname_id"
-
-MPID_nem_netmod_funcs_t MPIDI_nem_newmad_funcs = {
- MPID_nem_newmad_init,
- MPID_nem_newmad_finalize,
- MPID_nem_newmad_ckpt_shutdown,
- MPID_nem_newmad_poll,
- MPID_nem_newmad_send,
- MPID_nem_newmad_get_business_card,
- MPID_nem_newmad_connect_to_root,
- MPID_nem_newmad_vc_init,
- MPID_nem_newmad_vc_destroy,
- MPID_nem_newmad_vc_terminate
-};
-
-static MPIDI_Comm_ops_t comm_ops = {
- MPID_nem_newmad_directRecv, /* recv_posted */
-
- MPID_nem_newmad_directSend, /* send */
- MPID_nem_newmad_directSend, /* rsend */
- MPID_nem_newmad_directSsend, /* ssend */
- MPID_nem_newmad_directSend, /* isend */
- MPID_nem_newmad_directSend, /* irsend */
- MPID_nem_newmad_directSsend, /* issend */
-
- NULL, /* send_init */
- NULL, /* bsend_init */
- NULL, /* rsend_init */
- NULL, /* ssend_init */
- NULL, /* startall */
-
- MPID_nem_newmad_cancel_send,/* cancel_send */
- MPID_nem_newmad_cancel_recv /* cancel_recv */
-};
-
-//typedef int (*nm_driver_load)(struct nm_drv_ops*);
-
-static int mpid_nem_newmad_myrank;
-static nm_drv_id_t drv_id[MPID_NEM_NMAD_MAX_NETS];
-static char *url[MPID_NEM_NMAD_MAX_NETS];
-static char url_keys[MPID_NEM_NMAD_MAX_NETS][MPID_NEM_NMAD_MAX_STRING_SIZE] = {"url_id0","url_id1","url_id2","url_id3"};
-static int mpid_nem_newmad_num_rails = 1 ;
-nm_core_t mpid_nem_newmad_pcore;
-int mpid_nem_newmad_pending_send_req = 0;
-mpid_nem_newmad_p_gate_t *mpid_nem_newmad_gate_to_rank = NULL;
-
-
-#ifdef MPID_MAD_MODULE_MULTIRAIL
-static puk_component_t *p_driver_load_array;
-static void mpid_nem_newmad_rails(void)
-{
- int index = 0;
- mpid_nem_newmad_num_rails = 0 ;
-#if defined CONFIG_IBVERBS
- mpid_nem_newmad_num_rails++;
-#endif
-#if defined CONFIG_MX
- mpid_nem_newmad_num_rails++;
-#endif
-#if defined CONFIG_GM
- mpid_nem_newmad_num_rails++;
-#endif
-#if defined CONFIG_QSNET
- mpid_nem_newmad_num_rails++;
-#endif
-#if defined CONFIG_TCP
- mpid_nem_newmad_num_rails++;
-#endif
-
- p_driver_load_array = (puk_component_t *)MPIU_Malloc( mpid_nem_newmad_num_rails*sizeof(puk_component_t));
-
-#if defined CONFIG_IBVERBS
- p_driver_load_array[index++] = nm_core_component_load("driver", "ibverbs");
-#endif
-#if defined CONFIG_MX
- p_driver_load_array[index++] = nm_core_component_load("driver", "mx");
-#endif
-#if defined CONFIG_GM
- p_driver_load_array[index++] = nm_core_component_load("driver", "gm");
-#endif
-#if defined CONFIG_QSNET
- p_driver_load_array[index++] = nm_core_component_load("driver", "qsnet");
-#endif
-#if defined CONFIG_TCP
- p_driver_load_array[index++] = nm_core_component_load("driver", "tcp");
-#endif
-}
-#else //MPID_MAD_MODULE_MULTIRAIL
-static puk_component_t p_driver_load;
-static void mpid_nem_newmad_rails(void)
-{
-# if defined CONFIG_IBVERBS
- p_driver_load = nm_core_component_load("driver", "ibverbs");
-# elif defined CONFIG_MX
- p_driver_load = nm_core_component_load("driver", "mx");
-# elif defined CONFIG_GM
- p_driver_load = nm_core_component_load("driver", "gm");
-# elif defined CONFIG_QSNET
- p_driver_load = nm_core_component_load("driver", "qsnet");
-# elif defined CONFIG_TCP
- p_driver_load = nm_core_component_load("driver", "tcp");
-# endif
-}
-#endif //MULTIRAIL
-
-
-#undef FUNCNAME
-#define FUNCNAME init_mad
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-static int init_mad( MPIDI_PG_t *pg_p )
-{
-#if 0
-#ifdef CONFIG_TCP
-#ifndef MPID_MAD_MODULE_MULTIRAIL
- char hostname[16];
-#endif //TCP
-#endif //MPID_MAD_MODULE_MULTIRAIL
-#endif
- int index = 0;
- int ret;
- int mpi_errno = MPI_SUCCESS;
- char *dummy_argv[1] = {NULL};
- int dummy_argc = 1;
-
- ret = nm_core_init(&dummy_argc,dummy_argv, &mpid_nem_newmad_pcore);
- if (ret != NM_ESUCCESS){
- fprintf(stdout,"nm_core_init returned err = %d\n", ret);
- }
-
- mpid_nem_newmad_rails();
-#ifdef MPID_MAD_MODULE_MULTIRAIL
- fprintf(stdout,"Number of rails : %i\n",nem_mad_num_rail);
-#endif //MPID_MAD_MODULE_MULTIRAIL
-
- ret = nm_sr_init(mpid_nem_newmad_pcore);
- if(ret != NM_ESUCCESS) {
- fprintf(stdout,"nm_so_pack_interface_init return err = %d\n", ret);
- }
-#ifdef MPID_MAD_MODULE_MULTIRAIL
-#warning "========== MAD MODULE MULTIRAIL CODE ENABLED ============="
- ret = nm_core_driver_load_init_some(mpid_nem_newmad_pcore, mpid_nem_newmad_num_rails,
- p_driver_load_array, drv_id, url);
-#else //MPID_MAD_MODULE_MULTIRAIL
- ret = nm_core_driver_load_init(mpid_nem_newmad_pcore,
- p_driver_load, &drv_id[0], &url[0]);
-#endif //MPID_MAD_MODULE_MULTIRAIL
- if (ret != NM_ESUCCESS) {
- fprintf(stdout,"nm_core_driver_init(some) returned ret = %d\n", ret);
- }
-
-#if 0
-#ifdef CONFIG_TCP
-#ifndef MPID_MAD_MODULE_MULTIRAIL
- {
- gethostname(hostname, 16);
- strcat(hostname,":");
- strcat(hostname,url[0]);
- url[0] = (char *)MPIU_Malloc(strlen(hostname)+1);
- strcpy(url[0],hostname);
- }
-#endif //!MULTIRAIL
-#endif // TCP
-#endif
-
- nm_ns_init(mpid_nem_newmad_pcore);
-
- fn_exit:
- return mpi_errno;
- fn_fail:
- goto fn_exit;
-}
-
-
-
-/*
- int
- MPID_nem_newmad_init(MPID_nem_queue_ptr_t proc_recv_queue, MPID_nem_queue_ptr_t proc_free_queue, MPID_nem_cell_ptr_t proc_elements, int num_proc_elements,
- MPID_nem_cell_ptr_t module_elements, int num_module_elements,
- MPID_nem_queue_ptr_t *module_free_queue)
-
- IN
- proc_recv_queue -- main recv queue for the process
- proc_free_queue -- main free queueu for the process
- proc_elements -- pointer to the process' queue elements
- num_proc_elements -- number of process' queue elements
- module_elements -- pointer to queue elements to be used by this module
- num_module_elements -- number of queue elements for this module
- ckpt_restart -- true if this is a restart from a checkpoint. In a restart, the network needs to be brought up again, but
- we want to keep things like sequence numbers.
- OUT
- free_queue -- pointer to the free queue for this module. The process will return elements to
- this queue
-*/
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_init
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int
-MPID_nem_newmad_init (MPID_nem_queue_ptr_t proc_recv_queue,
- MPID_nem_queue_ptr_t proc_free_queue,
- MPID_nem_cell_ptr_t proc_elements, int num_proc_elements,
- MPID_nem_cell_ptr_t module_elements, int num_module_elements,
- MPID_nem_queue_ptr_t *module_free_queue, int ckpt_restart,
- MPIDI_PG_t *pg_p, int pg_rank,
- char **bc_val_p, int *val_max_sz_p)
-{
- int mpi_errno = MPI_SUCCESS ;
- int index;
-
- fprintf(stdout,"Size of MPID_nem_mad_module_vc_area_internal_t : %i | size of nm_sr_request_t :%i | Size of req_area : %i\n",
- sizeof(MPID_nem_newmad_vc_area_internal_t),sizeof(nm_sr_request_t), sizeof(MPID_nem_newmad_req_area));
- MPIU_Assert( sizeof(MPID_nem_newmad_vc_area_internal_t) <= MPID_NEM_VC_NETMOD_AREA_LEN);
- MPIU_Assert( sizeof(MPID_nem_newmad_req_area) <= MPID_NEM_REQ_NETMOD_AREA_LEN);
-
- mpid_nem_newmad_myrank = pg_rank;
- for (index = 0; index < MPID_NEM_NMAD_MAX_NETS ; index++)
- {
- drv_id[index] = -1;
- url[index] = NULL;
- }
-
- init_mad(pg_p);
-
- mpi_errno = MPID_nem_mad_module_get_business_card (pg_rank,bc_val_p, val_max_sz_p);
- if (mpi_errno) MPIU_ERR_POP (mpi_errno);
-
- nm_sr_monitor(mpid_nem_newmad_pcore, NM_SR_EVENT_RECV_UNEXPECTED, &MPID_nem_newmad_get_adi_msg);
- nm_sr_monitor(mpid_nem_newmad_pcore, NM_SR_EVENT_SEND_COMPLETED, &MPID_nem_newmad_handle_sreq);
- //nm_sr_monitor(mpid_nem_newmad_pcore, NM_SR_EVENT_RECV_COMPLETED, &MPID_nem_newmad_handle_rreq);
-
- mpi_errno = MPIDI_CH3I_Register_anysource_notification(MPID_nem_newmad_anysource_posted,
- MPID_nem_newmad_anysource_matched);
- if (mpi_errno) MPIU_ERR_POP(mpi_errno);
-
- fn_exit:
- return mpi_errno;
- fn_fail:
- goto fn_exit;
-}
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_get_business_card
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int
-MPID_nem_newmad_get_business_card (int my_rank, char **bc_val_p, int *val_max_sz_p)
-{
- int mpi_errno = MPI_SUCCESS;
- char name[MPID_NEM_MAX_NETMOD_STRING_LEN];
- int index;
-
- gethostname(name,MPID_NEM_MAX_NETMOD_STRING_LEN);
-
- mpi_errno = MPIU_Str_add_binary_arg (bc_val_p, val_max_sz_p, MPIDI_CH3I_HOSTNAME_KEY, name, strlen(name));
- if (mpi_errno != MPIU_STR_SUCCESS){
- if (mpi_errno == MPIU_STR_NOMEM){
- MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**buscard_len");
- }
- else{
- MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**buscard");
- }
- goto fn_exit;
- }
-
- for(index = 0 ; index < mpid_nem_newmad_num_rails ; index ++){
- mpi_errno = MPIU_Str_add_binary_arg (bc_val_p, val_max_sz_p, url_keys[index], url[index], strlen(url[index]));
- if (mpi_errno != MPIU_STR_SUCCESS){
- if (mpi_errno == MPIU_STR_NOMEM){
- MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**buscard_len");
- }
- else{
- MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**buscard");
- }
- goto fn_exit;
- }
- }
-
- fn_exit:
- return mpi_errno;
- fn_fail:
- goto fn_exit;
-}
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_get_from_bc
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int
-MPID_nem_newmad_get_from_bc (const char *business_card, char *hostname, char *url, int index)
-{
- int mpi_errno = MPI_SUCCESS;
- int len;
-
- mpi_errno = MPIU_Str_get_binary_arg (business_card, MPIDI_CH3I_HOSTNAME_KEY, hostname,
- MPID_NEM_MAX_NETMOD_STRING_LEN, &len);
- if ((mpi_errno != MPIU_STR_SUCCESS)){
- MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER, "**argstr_hostd");
- }
-
- mpi_errno = MPIU_Str_get_binary_arg (business_card, url_keys[index], url,
- MPID_NEM_MAX_NETMOD_STRING_LEN, &len);
- if ((mpi_errno != MPIU_STR_SUCCESS)){
- MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER, "**argstr_hostd");
- }
-
- fn_exit:
- return mpi_errno;
- fn_fail:
- goto fn_exit;
-}
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_connect_to_root
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int
-MPID_nem_newmad_connect_to_root (const char *business_card, MPIDI_VC_t *new_vc)
-{
- int mpi_errno = MPI_SUCCESS;
- fn_exit:
- return mpi_errno;
- fn_fail:
- goto fn_exit;
-}
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_vc_init
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int
-MPID_nem_newmad_vc_init (MPIDI_VC_t *vc)
-{
- int mpi_errno = MPI_SUCCESS;
- MPIDI_CH3I_VC *vc_ch = (MPIDI_CH3I_VC *)vc->channel_private;
-
-
-
- vc->eager_max_msg_sz = 32768;
- vc->rndvSend_fn = NULL;
- vc->sendNoncontig_fn = MPID_nem_newmad_SendNoncontig;
- vc->comm_ops = &comm_ops;
-
- vc_ch->iStartContigMsg = MPID_nem_newmad_iStartContigMsg;
- vc_ch->iSendContig = MPID_nem_newmad_iSendContig;
-
- fn_exit:
- return mpi_errno;
- fn_fail:
- goto fn_exit;
-}
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_vc_destroy
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int MPID_nem_newmad_vc_destroy(MPIDI_VC_t *vc)
-{
- int mpi_errno = MPI_SUCCESS;
-
- fn_exit:
- return mpi_errno;
- fn_fail:
- goto fn_exit;
-}
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_vc_terminate
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int MPID_nem_newmad_vc_terminate (MPIDI_VC_t *vc)
-{
- return MPI_SUCCESS;
-}
-
Copied: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_init.c (from rev 3937, mpich2/branches/dev/mx-netmod/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_init.c)
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_init.c (rev 0)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_init.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -0,0 +1,385 @@
+/* -*- Mode: C; c-basic-offset:4 ; -*- */
+/*
+ * (C) 2006 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "mpid_nem_impl.h"
+#include "newmad_impl.h"
+
+#define MPIDI_CH3I_HOSTNAME_KEY "hostname_id"
+
+MPID_nem_netmod_funcs_t MPIDI_nem_newmad_funcs = {
+ MPID_nem_newmad_init,
+ MPID_nem_newmad_finalize,
+ MPID_nem_newmad_ckpt_shutdown,
+ MPID_nem_newmad_poll,
+ MPID_nem_newmad_send,
+ MPID_nem_newmad_get_business_card,
+ MPID_nem_newmad_connect_to_root,
+ MPID_nem_newmad_vc_init,
+ MPID_nem_newmad_vc_destroy,
+ MPID_nem_newmad_vc_terminate
+};
+
+static MPIDI_Comm_ops_t comm_ops = {
+ MPID_nem_newmad_directRecv, /* recv_posted */
+
+ MPID_nem_newmad_directSend, /* send */
+ MPID_nem_newmad_directSend, /* rsend */
+ MPID_nem_newmad_directSsend, /* ssend */
+ MPID_nem_newmad_directSend, /* isend */
+ MPID_nem_newmad_directSend, /* irsend */
+ MPID_nem_newmad_directSsend, /* issend */
+
+ NULL, /* send_init */
+ NULL, /* bsend_init */
+ NULL, /* rsend_init */
+ NULL, /* ssend_init */
+ NULL, /* startall */
+
+ MPID_nem_newmad_cancel_send,/* cancel_send */
+ MPID_nem_newmad_cancel_recv /* cancel_recv */
+};
+
+//typedef int (*nm_driver_load)(struct nm_drv_ops*);
+
+static int mpid_nem_newmad_myrank;
+static nm_drv_id_t drv_id[MPID_NEM_NMAD_MAX_NETS];
+static char *url[MPID_NEM_NMAD_MAX_NETS];
+static char url_keys[MPID_NEM_NMAD_MAX_NETS][MPID_NEM_NMAD_MAX_STRING_SIZE] = {"url_id0","url_id1","url_id2","url_id3"};
+static int mpid_nem_newmad_num_rails = 1 ;
+nm_core_t mpid_nem_newmad_pcore;
+int mpid_nem_newmad_pending_send_req = 0;
+mpid_nem_newmad_p_gate_t *mpid_nem_newmad_gate_to_rank = NULL;
+
+
+#ifdef MPID_MAD_MODULE_MULTIRAIL
+static puk_component_t *p_driver_load_array;
+static void mpid_nem_newmad_rails(void)
+{
+ int index = 0;
+ mpid_nem_newmad_num_rails = 0 ;
+#if defined CONFIG_IBVERBS
+ mpid_nem_newmad_num_rails++;
+#endif
+#if defined CONFIG_MX
+ mpid_nem_newmad_num_rails++;
+#endif
+#if defined CONFIG_GM
+ mpid_nem_newmad_num_rails++;
+#endif
+#if defined CONFIG_QSNET
+ mpid_nem_newmad_num_rails++;
+#endif
+#if defined CONFIG_TCP
+ mpid_nem_newmad_num_rails++;
+#endif
+
+ p_driver_load_array = (puk_component_t *)MPIU_Malloc( mpid_nem_newmad_num_rails*sizeof(puk_component_t));
+
+#if defined CONFIG_IBVERBS
+ p_driver_load_array[index++] = nm_core_component_load("driver", "ibverbs");
+#endif
+#if defined CONFIG_MX
+ p_driver_load_array[index++] = nm_core_component_load("driver", "mx");
+#endif
+#if defined CONFIG_GM
+ p_driver_load_array[index++] = nm_core_component_load("driver", "gm");
+#endif
+#if defined CONFIG_QSNET
+ p_driver_load_array[index++] = nm_core_component_load("driver", "qsnet");
+#endif
+#if defined CONFIG_TCP
+ p_driver_load_array[index++] = nm_core_component_load("driver", "tcp");
+#endif
+}
+#else //MPID_MAD_MODULE_MULTIRAIL
+static puk_component_t p_driver_load;
+static void mpid_nem_newmad_rails(void)
+{
+# if defined CONFIG_IBVERBS
+ p_driver_load = nm_core_component_load("driver", "ibverbs");
+# elif defined CONFIG_MX
+ p_driver_load = nm_core_component_load("driver", "mx");
+# elif defined CONFIG_GM
+ p_driver_load = nm_core_component_load("driver", "gm");
+# elif defined CONFIG_QSNET
+ p_driver_load = nm_core_component_load("driver", "qsnet");
+# elif defined CONFIG_TCP
+ p_driver_load = nm_core_component_load("driver", "tcp");
+# endif
+}
+#endif //MULTIRAIL
+
+
+#undef FUNCNAME
+#define FUNCNAME init_mad
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+static int init_mad( MPIDI_PG_t *pg_p )
+{
+#if 0
+#ifdef CONFIG_TCP
+#ifndef MPID_MAD_MODULE_MULTIRAIL
+ char hostname[16];
+#endif //TCP
+#endif //MPID_MAD_MODULE_MULTIRAIL
+#endif
+ int index = 0;
+ int ret;
+ int mpi_errno = MPI_SUCCESS;
+ char *dummy_argv[1] = {NULL};
+ int dummy_argc = 1;
+
+ ret = nm_core_init(&dummy_argc,dummy_argv, &mpid_nem_newmad_pcore);
+ if (ret != NM_ESUCCESS){
+ fprintf(stdout,"nm_core_init returned err = %d\n", ret);
+ }
+
+ mpid_nem_newmad_rails();
+#ifdef MPID_MAD_MODULE_MULTIRAIL
+ fprintf(stdout,"Number of rails : %i\n",nem_mad_num_rail);
+#endif //MPID_MAD_MODULE_MULTIRAIL
+
+ ret = nm_sr_init(mpid_nem_newmad_pcore);
+ if(ret != NM_ESUCCESS) {
+ fprintf(stdout,"nm_so_pack_interface_init return err = %d\n", ret);
+ }
+#ifdef MPID_MAD_MODULE_MULTIRAIL
+#warning "========== MAD MODULE MULTIRAIL CODE ENABLED ============="
+ ret = nm_core_driver_load_init_some(mpid_nem_newmad_pcore, mpid_nem_newmad_num_rails,
+ p_driver_load_array, drv_id, url);
+#else //MPID_MAD_MODULE_MULTIRAIL
+ ret = nm_core_driver_load_init(mpid_nem_newmad_pcore,
+ p_driver_load, &drv_id[0], &url[0]);
+#endif //MPID_MAD_MODULE_MULTIRAIL
+ if (ret != NM_ESUCCESS) {
+ fprintf(stdout,"nm_core_driver_init(some) returned ret = %d\n", ret);
+ }
+
+#if 0
+#ifdef CONFIG_TCP
+#ifndef MPID_MAD_MODULE_MULTIRAIL
+ {
+ gethostname(hostname, 16);
+ strcat(hostname,":");
+ strcat(hostname,url[0]);
+ url[0] = (char *)MPIU_Malloc(strlen(hostname)+1);
+ strcpy(url[0],hostname);
+ }
+#endif //!MULTIRAIL
+#endif // TCP
+#endif
+
+ nm_ns_init(mpid_nem_newmad_pcore);
+
+ fn_exit:
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+
+
+/*
+ int
+ MPID_nem_newmad_init(MPID_nem_queue_ptr_t proc_recv_queue, MPID_nem_queue_ptr_t proc_free_queue, MPID_nem_cell_ptr_t proc_elements, int num_proc_elements,
+ MPID_nem_cell_ptr_t module_elements, int num_module_elements,
+ MPID_nem_queue_ptr_t *module_free_queue)
+
+ IN
+ proc_recv_queue -- main recv queue for the process
+ proc_free_queue -- main free queueu for the process
+ proc_elements -- pointer to the process' queue elements
+ num_proc_elements -- number of process' queue elements
+ module_elements -- pointer to queue elements to be used by this module
+ num_module_elements -- number of queue elements for this module
+ ckpt_restart -- true if this is a restart from a checkpoint. In a restart, the network needs to be brought up again, but
+ we want to keep things like sequence numbers.
+ OUT
+ free_queue -- pointer to the free queue for this module. The process will return elements to
+ this queue
+*/
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_init
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int
+MPID_nem_newmad_init (MPID_nem_queue_ptr_t proc_recv_queue,
+ MPID_nem_queue_ptr_t proc_free_queue,
+ MPID_nem_cell_ptr_t proc_elements, int num_proc_elements,
+ MPID_nem_cell_ptr_t module_elements, int num_module_elements,
+ MPID_nem_queue_ptr_t *module_free_queue, int ckpt_restart,
+ MPIDI_PG_t *pg_p, int pg_rank,
+ char **bc_val_p, int *val_max_sz_p)
+{
+ int mpi_errno = MPI_SUCCESS ;
+ int index;
+
+ fprintf(stdout,"Size of MPID_nem_mad_module_vc_area_internal_t : %i | size of nm_sr_request_t :%i | Size of req_area : %i\n",
+ sizeof(MPID_nem_newmad_vc_area_internal_t),sizeof(nm_sr_request_t), sizeof(MPID_nem_newmad_req_area));
+ MPIU_Assert( sizeof(MPID_nem_newmad_vc_area_internal_t) <= MPID_NEM_VC_NETMOD_AREA_LEN);
+ MPIU_Assert( sizeof(MPID_nem_newmad_req_area) <= MPID_NEM_REQ_NETMOD_AREA_LEN);
+
+ mpid_nem_newmad_myrank = pg_rank;
+ for (index = 0; index < MPID_NEM_NMAD_MAX_NETS ; index++)
+ {
+ drv_id[index] = -1;
+ url[index] = NULL;
+ }
+
+ init_mad(pg_p);
+
+ mpi_errno = MPID_nem_mad_module_get_business_card (pg_rank,bc_val_p, val_max_sz_p);
+ if (mpi_errno) MPIU_ERR_POP (mpi_errno);
+
+ nm_sr_monitor(mpid_nem_newmad_pcore, NM_SR_EVENT_RECV_UNEXPECTED, &MPID_nem_newmad_get_adi_msg);
+ nm_sr_monitor(mpid_nem_newmad_pcore, NM_SR_EVENT_SEND_COMPLETED, &MPID_nem_newmad_handle_sreq);
+ //nm_sr_monitor(mpid_nem_newmad_pcore, NM_SR_EVENT_RECV_COMPLETED, &MPID_nem_newmad_handle_rreq);
+
+ mpi_errno = MPIDI_CH3I_Register_anysource_notification(MPID_nem_newmad_anysource_posted,
+ MPID_nem_newmad_anysource_matched);
+ if (mpi_errno) MPIU_ERR_POP(mpi_errno);
+
+ fn_exit:
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_get_business_card
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int
+MPID_nem_newmad_get_business_card (int my_rank, char **bc_val_p, int *val_max_sz_p)
+{
+ int mpi_errno = MPI_SUCCESS;
+ char name[MPID_NEM_MAX_NETMOD_STRING_LEN];
+ int index;
+
+ gethostname(name,MPID_NEM_MAX_NETMOD_STRING_LEN);
+
+ mpi_errno = MPIU_Str_add_binary_arg (bc_val_p, val_max_sz_p, MPIDI_CH3I_HOSTNAME_KEY, name, strlen(name));
+ if (mpi_errno != MPIU_STR_SUCCESS){
+ if (mpi_errno == MPIU_STR_NOMEM){
+ MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**buscard_len");
+ }
+ else{
+ MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**buscard");
+ }
+ goto fn_exit;
+ }
+
+ for(index = 0 ; index < mpid_nem_newmad_num_rails ; index ++){
+ mpi_errno = MPIU_Str_add_binary_arg (bc_val_p, val_max_sz_p, url_keys[index], url[index], strlen(url[index]));
+ if (mpi_errno != MPIU_STR_SUCCESS){
+ if (mpi_errno == MPIU_STR_NOMEM){
+ MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**buscard_len");
+ }
+ else{
+ MPIU_ERR_SET(mpi_errno, MPI_ERR_OTHER, "**buscard");
+ }
+ goto fn_exit;
+ }
+ }
+
+ fn_exit:
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_get_from_bc
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int
+MPID_nem_newmad_get_from_bc (const char *business_card, char *hostname, char *url, int index)
+{
+ int mpi_errno = MPI_SUCCESS;
+ int len;
+
+ mpi_errno = MPIU_Str_get_binary_arg (business_card, MPIDI_CH3I_HOSTNAME_KEY, hostname,
+ MPID_NEM_MAX_NETMOD_STRING_LEN, &len);
+ if ((mpi_errno != MPIU_STR_SUCCESS)){
+ MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER, "**argstr_hostd");
+ }
+
+ mpi_errno = MPIU_Str_get_binary_arg (business_card, url_keys[index], url,
+ MPID_NEM_MAX_NETMOD_STRING_LEN, &len);
+ if ((mpi_errno != MPIU_STR_SUCCESS)){
+ MPIU_ERR_SETANDJUMP(mpi_errno,MPI_ERR_OTHER, "**argstr_hostd");
+ }
+
+ fn_exit:
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_connect_to_root
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int
+MPID_nem_newmad_connect_to_root (const char *business_card, MPIDI_VC_t *new_vc)
+{
+ int mpi_errno = MPI_SUCCESS;
+ fn_exit:
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_vc_init
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int
+MPID_nem_newmad_vc_init (MPIDI_VC_t *vc)
+{
+ int mpi_errno = MPI_SUCCESS;
+ MPIDI_CH3I_VC *vc_ch = (MPIDI_CH3I_VC *)vc->channel_private;
+
+
+
+ vc->eager_max_msg_sz = 32768;
+ vc->rndvSend_fn = NULL;
+ vc->sendNoncontig_fn = MPID_nem_newmad_SendNoncontig;
+ vc->comm_ops = &comm_ops;
+
+ vc_ch->iStartContigMsg = MPID_nem_newmad_iStartContigMsg;
+ vc_ch->iSendContig = MPID_nem_newmad_iSendContig;
+
+ fn_exit:
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_vc_destroy
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_newmad_vc_destroy(MPIDI_VC_t *vc)
+{
+ int mpi_errno = MPI_SUCCESS;
+
+ fn_exit:
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_vc_terminate
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_newmad_vc_terminate (MPIDI_VC_t *vc)
+{
+ return MPI_SUCCESS;
+}
+
Deleted: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_poll.c
===================================================================
--- mpich2/branches/dev/mx-netmod/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_poll.c 2009-03-04 22:01:42 UTC (rev 3937)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_poll.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -1,182 +0,0 @@
-/* -*- Mode: C; c-basic-offset:4 ; -*- */
-/*
- * (C) 2006 by Argonne National Laboratory.
- * See COPYRIGHT in top-level directory.
- */
-
-#include "newmad_impl.h"
-#include "my_papi_defs.h"
-#include "uthash.h"
-
-typedef struct mpid_nem_nmad_hash_struct {
- MPID_Request *mpid_req_ptr;
- nm_sr_request_t *nmad_req_ptr;
- UT_hash_handle hh;
-}mpid_nem_nmad_hash_t;
-
-static mpid_nem_nmad_hash_t *mpid_nem_nmad_asreqs = NULL;
-#define MPID_MEM_NMAD_ADD_REQ_IN_HASH(_mpi_req,_nmad_req) do{ \
- mpid_nem_nmad_hash_t *s; \
- s = MPIU_Malloc(sizeof(mpid_nem_nmad_hash_t)); \
- s->mpid_req_ptr = (_mpi_req); \
- s->nmad_req_ptr = (_nmad_req); \
- HASH_ADD(hh, mpid_nem_nmad_asreqs, mpid_req_ptr, sizeof(MPID_Request*), s); \
- }while(0)
-#define MPID_NEM_NMAD_GET_REQ_FROM_HASH(_mpi_req_ptr,_nmad_req) do{ \
- mpid_nem_nmad_hash_t *s; \
- HASH_FIND(hh, mpid_nem_nmad_asreqs, &(_mpi_req_ptr), sizeof(MPID_Request*), s); \
- if(s){HASH_DELETE(hh, mpid_nem_nmad_asreqs, s); (_nmad_req) = s->nmad_req_ptr; } else {(_nmad_req) = NULL;} \
- }while(0)
-
-
-
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_get_adi_msg
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-void MPID_nem_newmad_get_adi_msg(nm_sr_event_t event, nm_sr_event_info_t*info)
-{
- nm_tag_t match_info = info->recv_unexpected.tag;
- MPIR_Context_id_t ctxt;
- NEM_NMAD_MATCH_GET_CTXT(match_info, ctxt);
- if(ctxt == NEM_NMAD_INTRA_CTXT)
- {
- nm_gate_t from = info->recv_unexpected.p_gate;
- MPID_Request *rreq;
- MPIDI_VC_t *vc;
- struct iovec mad_iov;
- int num_iov = 1;
- int length = 0; //=info->...
-
- rreq = MPID_Request_create();
- MPIU_Assert (rreq != NULL);
- MPIU_Object_set_ref (rreq, 1);
- rreq->kind = MPID_REQUEST_RECV;
-
- //get vc from gate
- rreq->ch.vc = vc;
-
- if(length <= sizeof(MPIDI_CH3_PktGeneric_t)) {
- mad_iov.iov_base = (char*)&(rreq->dev.pending_pkt);
- }
- else{
- rreq->dev.tmpbuf = MPIU_Malloc(length);
- MPIU_Assert(rreq->dev.tmpbuf);
- rreq->dev.tmpbuf_sz = length;
- mad_iov.iov_base = (char*)(rreq->dev.tmpbuf);
- }
- mad_iov.iov_len = length;
-
- nm_sr_irecv_with_ref(mpid_nem_newmad_pcore, from, match_info, mad_iov.iov_base,
- length, &(REQ_FIELD(rreq,newmad_req)), (void *)&rreq);
- }
- return;
-}
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_directRecv
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int MPID_nem_newmad_directRecv(MPIDI_VC_t *vc, MPID_Request *rreq)
-{
- int mpi_errno = MPI_SUCCESS;
- MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_NEWMAD_DIRECTRECV);
- MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_NEWMAD_DIRECTRECV);
-
-
-
-
-
- fn_exit:
- MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_NEWMAD_DIRECTRECV);
- return mpi_errno;
- fn_fail:
- goto fn_exit;
-}
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_poll
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int
-MPID_nem_newmad_poll(MPID_nem_poll_dir_t in_or_out)
-{
- int mpi_errno = MPI_SUCCESS;
- nm_sr_request_t *p_out_req = NULL;
- MPID_Request *rreq = NULL;
-
- // nm_sr_progress(mpid_nem_newmad_pcore);
- nm_sr_recv_success(mpid_nem_newmad_pcore, &p_out_req);
-
-
- fn_exit:
- return mpi_errno;
- fn_fail:
- goto fn_exit;
-}
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_handle_sreq
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-void
-MPID_nem_newmad_handle_sreq(nm_sr_event_t event, nm_sr_event_info_t*info)
-{
-
- int (*reqFn)(MPIDI_VC_t *, MPID_Request *, int *);
- nm_sr_request_t *p_request = info->send_completed.p_request;
- MPID_Request *req;
- nm_sr_get_ref(mpid_nem_newmad_pcore,p_request,(void *)&req);
- MPIU_Assert(MPIDI_Request_get_type(req) != MPIDI_REQUEST_TYPE_GET_RESP);
- reqFn = req->dev.OnDataAvail;
- if (!reqFn){
- MPIDI_CH3U_Request_complete(req);
- MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, ".... complete");
- }
- else{
- MPIDI_VC_t *vc = req->ch.vc;
- int complete = 0;
- reqFn(vc, req, &complete);
- if(complete)
- {
- MPIDI_CH3U_Request_complete(req);
- MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, ".... complete");
- }
- }
- mpid_nem_newmad_pending_send_req--;
- return;
-}
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_anysource_posted
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int MPID_nem_newmad_anysource_posted(MPID_Request *rreq)
-{
- /* This function is called whenever an anyource request has been
- posted to the posted receive queue. */
- int mpi_errno = MPI_SUCCESS;
-
- fn_exit:
- return mpi_errno;
- fn_fail:
- goto fn_exit;
-}
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_anysource_matched
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int MPID_nem_newmad_anysource_matched(MPID_Request *rreq)
-{
- /* This function is called when an anysource request in the posted
- receive queue is matched and dequeued */
- int mpi_errno = MPI_SUCCESS;
-
- fn_exit:
- return mpi_errno;
- fn_fail:
- goto fn_exit;
-}
-
Copied: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_poll.c (from rev 3937, mpich2/branches/dev/mx-netmod/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_poll.c)
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_poll.c (rev 0)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_poll.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -0,0 +1,182 @@
+/* -*- Mode: C; c-basic-offset:4 ; -*- */
+/*
+ * (C) 2006 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "newmad_impl.h"
+#include "my_papi_defs.h"
+#include "uthash.h"
+
+typedef struct mpid_nem_nmad_hash_struct {
+ MPID_Request *mpid_req_ptr;
+ nm_sr_request_t *nmad_req_ptr;
+ UT_hash_handle hh;
+}mpid_nem_nmad_hash_t;
+
+static mpid_nem_nmad_hash_t *mpid_nem_nmad_asreqs = NULL;
+#define MPID_MEM_NMAD_ADD_REQ_IN_HASH(_mpi_req,_nmad_req) do{ \
+ mpid_nem_nmad_hash_t *s; \
+ s = MPIU_Malloc(sizeof(mpid_nem_nmad_hash_t)); \
+ s->mpid_req_ptr = (_mpi_req); \
+ s->nmad_req_ptr = (_nmad_req); \
+ HASH_ADD(hh, mpid_nem_nmad_asreqs, mpid_req_ptr, sizeof(MPID_Request*), s); \
+ }while(0)
+#define MPID_NEM_NMAD_GET_REQ_FROM_HASH(_mpi_req_ptr,_nmad_req) do{ \
+ mpid_nem_nmad_hash_t *s; \
+ HASH_FIND(hh, mpid_nem_nmad_asreqs, &(_mpi_req_ptr), sizeof(MPID_Request*), s); \
+ if(s){HASH_DELETE(hh, mpid_nem_nmad_asreqs, s); (_nmad_req) = s->nmad_req_ptr; } else {(_nmad_req) = NULL;} \
+ }while(0)
+
+
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_get_adi_msg
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+void MPID_nem_newmad_get_adi_msg(nm_sr_event_t event, nm_sr_event_info_t*info)
+{
+ nm_tag_t match_info = info->recv_unexpected.tag;
+ MPIR_Context_id_t ctxt;
+ NEM_NMAD_MATCH_GET_CTXT(match_info, ctxt);
+ if(ctxt == NEM_NMAD_INTRA_CTXT)
+ {
+ nm_gate_t from = info->recv_unexpected.p_gate;
+ MPID_Request *rreq;
+ MPIDI_VC_t *vc;
+ struct iovec mad_iov;
+ int num_iov = 1;
+ int length = 0; //=info->...
+
+ rreq = MPID_Request_create();
+ MPIU_Assert (rreq != NULL);
+ MPIU_Object_set_ref (rreq, 1);
+ rreq->kind = MPID_REQUEST_RECV;
+
+ //get vc from gate
+ rreq->ch.vc = vc;
+
+ if(length <= sizeof(MPIDI_CH3_PktGeneric_t)) {
+ mad_iov.iov_base = (char*)&(rreq->dev.pending_pkt);
+ }
+ else{
+ rreq->dev.tmpbuf = MPIU_Malloc(length);
+ MPIU_Assert(rreq->dev.tmpbuf);
+ rreq->dev.tmpbuf_sz = length;
+ mad_iov.iov_base = (char*)(rreq->dev.tmpbuf);
+ }
+ mad_iov.iov_len = length;
+
+ nm_sr_irecv_with_ref(mpid_nem_newmad_pcore, from, match_info, mad_iov.iov_base,
+ length, &(REQ_FIELD(rreq,newmad_req)), (void *)&rreq);
+ }
+ return;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_directRecv
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_newmad_directRecv(MPIDI_VC_t *vc, MPID_Request *rreq)
+{
+ int mpi_errno = MPI_SUCCESS;
+ MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_NEWMAD_DIRECTRECV);
+ MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_NEWMAD_DIRECTRECV);
+
+
+
+
+
+ fn_exit:
+ MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_NEWMAD_DIRECTRECV);
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_poll
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int
+MPID_nem_newmad_poll(MPID_nem_poll_dir_t in_or_out)
+{
+ int mpi_errno = MPI_SUCCESS;
+ nm_sr_request_t *p_out_req = NULL;
+ MPID_Request *rreq = NULL;
+
+ // nm_sr_progress(mpid_nem_newmad_pcore);
+ nm_sr_recv_success(mpid_nem_newmad_pcore, &p_out_req);
+
+
+ fn_exit:
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_handle_sreq
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+void
+MPID_nem_newmad_handle_sreq(nm_sr_event_t event, nm_sr_event_info_t*info)
+{
+
+ int (*reqFn)(MPIDI_VC_t *, MPID_Request *, int *);
+ nm_sr_request_t *p_request = info->send_completed.p_request;
+ MPID_Request *req;
+ nm_sr_get_ref(mpid_nem_newmad_pcore,p_request,(void *)&req);
+ MPIU_Assert(MPIDI_Request_get_type(req) != MPIDI_REQUEST_TYPE_GET_RESP);
+ reqFn = req->dev.OnDataAvail;
+ if (!reqFn){
+ MPIDI_CH3U_Request_complete(req);
+ MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, ".... complete");
+ }
+ else{
+ MPIDI_VC_t *vc = req->ch.vc;
+ int complete = 0;
+ reqFn(vc, req, &complete);
+ if(complete)
+ {
+ MPIDI_CH3U_Request_complete(req);
+ MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, ".... complete");
+ }
+ }
+ mpid_nem_newmad_pending_send_req--;
+ return;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_anysource_posted
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_newmad_anysource_posted(MPID_Request *rreq)
+{
+ /* This function is called whenever an anyource request has been
+ posted to the posted receive queue. */
+ int mpi_errno = MPI_SUCCESS;
+
+ fn_exit:
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_anysource_matched
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_newmad_anysource_matched(MPID_Request *rreq)
+{
+ /* This function is called when an anysource request in the posted
+ receive queue is matched and dequeued */
+ int mpi_errno = MPI_SUCCESS;
+
+ fn_exit:
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
Deleted: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_register.c
===================================================================
--- mpich2/branches/dev/mx-netmod/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_register.c 2009-03-04 22:01:42 UTC (rev 3937)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_register.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -1,21 +0,0 @@
-/* -*- Mode: C; c-basic-offset:4 ; -*- */
-/*
- * (C) 2006 by Argonne National Laboratory.
- * See COPYRIGHT in top-level directory.
- */
-
-#include "newmad_impl.h"
-
-int
-MPID_nem_newmad_register_mem (void *p, int len)
-{
- return 0;
-}
-
-int
-MPID_nem_newmad_deregister_mem (void *p, int len)
-{
- return 0;
-}
-
-
Copied: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_register.c (from rev 3937, mpich2/branches/dev/mx-netmod/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_register.c)
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_register.c (rev 0)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_register.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -0,0 +1,21 @@
+/* -*- Mode: C; c-basic-offset:4 ; -*- */
+/*
+ * (C) 2006 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "newmad_impl.h"
+
+int
+MPID_nem_newmad_register_mem (void *p, int len)
+{
+ return 0;
+}
+
+int
+MPID_nem_newmad_deregister_mem (void *p, int len)
+{
+ return 0;
+}
+
+
Deleted: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_send.c
===================================================================
--- mpich2/branches/dev/mx-netmod/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_send.c 2009-03-04 22:01:42 UTC (rev 3937)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_send.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -1,329 +0,0 @@
-/* -*- Mode: C; c-basic-offset:4 ; -*- */
-/*
- * (C) 2006 by Argonne National Laboratory.
- * See COPYRIGHT in top-level directory.
- */
-
-#include "newmad_impl.h"
-#include "my_papi_defs.h"
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_iSendContig
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int MPID_nem_newmad_iSendContig(MPIDI_VC_t *vc, MPID_Request *sreq, void *hdr, MPIDI_msg_sz_t hdr_sz, void *data, MPIDI_msg_sz_t data_sz)
-{
- int mpi_errno = MPI_SUCCESS;
- nm_tag_t match_info = 0;
- struct iovec mad_iov[2];
- int num_iov = 1;
-
- MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_NEWMAD_ISENDCONTIG);
- MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_NEWMAD_ISENDCONTIG);
-
- MPIU_Assert(hdr_sz <= sizeof(MPIDI_CH3_Pkt_t));
- MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, "mx_iSendContig");
- MPIDI_DBG_Print_packet((MPIDI_CH3_Pkt_t *)hdr);
-
- NEM_NMAD_ADI_MATCH(match_info);
- memcpy(&(sreq->dev.pending_pkt),(char *)hdr,sizeof(MPIDI_CH3_PktGeneric_t));
- mad_iov[0].iov_base = (char *)&(sreq->dev.pending_pkt);
- mad_iov[0].iov_len = sizeof(MPIDI_CH3_PktGeneric_t);
- if(data_sz)
- {
- mad_iov[1].iov_base = data;
- mad_iov[1].iov_len = data_sz;
- num_iov += 1;
- }
-
- nm_sr_isend_iov(mpid_nem_newmad_pcore, VC_FIELD(vc, p_gate), match_info,
- mad_iov, num_iov, &(REQ_FIELD(sreq,newmad_req)));
- mpid_nem_newmad_pending_send_req++;
- sreq->ch.vc = vc;
-
- fn_exit:
- MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_NEWMAD_ISENDCONTIG);
- return mpi_errno;
- fn_fail:
- goto fn_exit;
-}
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_iStartContigMsg
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int MPID_nem_newmad_iStartContigMsg(MPIDI_VC_t *vc, void *hdr, MPIDI_msg_sz_t hdr_sz, void *data, MPIDI_msg_sz_t data_sz, MPID_Request **sreq_ptr)
-{
- MPID_Request *sreq = NULL;
- nm_tag_t match_info = 0;
- struct iovec mad_iov[2];
- int num_iov = 1;
- int mpi_errno = MPI_SUCCESS;
-
- MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_NEWMAD_ISTARTCONTIGMSG);
- MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_NEWMAD_ISTARTCONTIGMSG);
- MPIU_Assert(hdr_sz <= sizeof(MPIDI_CH3_Pkt_t));
- MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, "mx_iSendContig");
- MPIDI_DBG_Print_packet((MPIDI_CH3_Pkt_t *)hdr);
-
- /* create a request */
- sreq = MPID_Request_create();
- MPIU_Assert (sreq != NULL);
- MPIU_Object_set_ref (sreq, 2);
- sreq->kind = MPID_REQUEST_SEND;
- sreq->dev.OnDataAvail = 0;
-
- NEM_NMAD_ADI_MATCH(match_info);
- memcpy(&(sreq->dev.pending_pkt),(char *)hdr,sizeof(MPIDI_CH3_PktGeneric_t));
- mad_iov[0].iov_base = (char *)&(sreq->dev.pending_pkt);
- mad_iov[0].iov_len = sizeof(MPIDI_CH3_PktGeneric_t);
- if (data_sz)
- {
- mad_iov[1].iov_base = (char *)data;
- mad_iov[1].iov_len = data_sz;
- num_iov += 1;
- }
-
- nm_sr_isend_iov(mpid_nem_newmad_pcore, VC_FIELD(vc, p_gate), match_info,
- mad_iov, num_iov, &(REQ_FIELD(sreq,newmad_req)));
- mpid_nem_newmad_pending_send_req++;
- sreq->ch.vc = vc;
-
- fn_exit:
- *sreq_ptr = sreq;
- MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_NEWMAD_ISTARTCONTIGMSG);
- return mpi_errno;
- fn_fail:
- goto fn_exit;
-}
-
-
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_SendNoncontig
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int MPID_nem_newmad_SendNoncontig(MPIDI_VC_t *vc, MPID_Request *sreq, void *header, MPIDI_msg_sz_t hdr_sz)
-{
- int mpi_errno = MPI_SUCCESS;
- nm_tag_t match_info = 0;
- struct iovec *mad_iov;
- int num_iov = 1;
- MPIDI_msg_sz_t data_sz;
- int dt_contig;
- MPI_Aint dt_true_lb;
- MPID_Datatype *dt_ptr;
-
- MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_NEWMAD_SENDNONCONTIGMSG);
- MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_NEWMAD_SENDNONCONTIGMSG);
- MPIU_Assert(hdr_sz <= sizeof(MPIDI_CH3_Pkt_t));
- MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, "MPID_nem_newmad_iSendNoncontig");
-
- MPIDI_Datatype_get_info(sreq->dev.user_count,sreq->dev.datatype, dt_contig, data_sz, dt_ptr,dt_true_lb);
-
- if(data_sz)
- {
- MPID_nem_mx_process_sdtype(&sreq,sreq->dev.datatype,dt_ptr,sreq->dev.user_buf,
- sreq->dev.user_count,data_sz, &mad_iov,&num_iov,1);
- }
- else
- {
- mad_iov = MPIU_Malloc(sizeof(struct iovec));
- }
-
- NEM_NMAD_ADI_MATCH(match_info);
- memcpy(&(sreq->dev.pending_pkt),(char *)header,sizeof(MPIDI_CH3_PktGeneric_t));
- mad_iov[0].iov_base = (char *)&(sreq->dev.pending_pkt);
- mad_iov[0].iov_len = sizeof(MPIDI_CH3_PktGeneric_t);
-
- nm_sr_isend_iov(mpid_nem_newmad_pcore, VC_FIELD(vc, p_gate), match_info,
- mad_iov, num_iov, &(REQ_FIELD(sreq,newmad_req)));
-
- MPIU_Free(mad_iov); /* FIXME : is this safe ? */
-
- fn_exit:
- MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_NEWMAD_SENDNONCONTIGMSG);
- return mpi_errno;
- fn_fail:
- goto fn_exit;
-}
-
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_directSend
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int MPID_nem_newmad_directSend(MPIDI_VC_t *vc, const void * buf, int count, MPI_Datatype datatype, int rank, int tag,
- MPID_Comm * comm, int context_offset, MPID_Request **sreq_p)
-{
- MPID_Request *sreq = NULL;
- nm_tag_t match_info = 0;
- struct iovec *mad_iov;
- int num_iov = 0;
- int mpi_errno = MPI_SUCCESS;
- MPID_Datatype *dt_ptr;
- int dt_contig;
- MPIDI_msg_sz_t data_sz;
- MPI_Aint dt_true_lb;
-
- MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_NEWMAD_DIRECTSEND);
- MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_NEWMAD_DIRECTSEND);
-
- MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr, dt_true_lb);
-
- MPIDI_Request_create_sreq(sreq, mpi_errno, goto fn_exit);
- MPIDI_Request_set_type(sreq, MPIDI_REQUEST_TYPE_SEND);
- sreq->partner_request = NULL;
- MPIDI_VC_FAI_send_seqnum(vc, seqnum);
- MPIDI_Request_set_seqnum(sreq, seqnum);
- sreq->ch.vc = vc;
- sreq->dev.OnDataAvail = NULL;
-
- NEM_NMAD_DIRECT_MATCH(match_info,tag,comm->rank,comm->context_id + context_offset);
-
- if(data_sz)
- {
- if (dt_contig)
- {
- mad_iov = MPIU_Malloc(sizeof(struct iovec));
- mad_iov[0].iov_base = (char*)(buf + dt_true_lb);
- mad_iov[0].iov_len = data_sz;
- num_iov += 1;
- }
- else
- {
- MPID_nem_mx_process_sdtype(&sreq,datatype,dt_ptr,buf,count,data_sz,&mad_iov,&num_iov,0);
- }
- nm_sr_isend_iov(mpid_nem_newmad_pcore, VC_FIELD(vc, p_gate), match_info,
- mad_iov, num_iov, &(REQ_FIELD(sreq,newmad_req)));
- }
- else
- {
- nm_sr_isend(mpid_nem_newmad_pcore, VC_FIELD(vc, p_gate), match_info,
- NULL, 0, &(REQ_FIELD(sreq,newmad_req)));
- }
- mpid_nem_newmad_pending_send_req++;
- MPIU_Free(mad_iov);
-
- fn_exit:
- *sreq_p = sreq;
- MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_NEWMAD_DIRECTSEND);
- return mpi_errno;
- fn_fail:
- goto fn_exit;
-}
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_directSsend
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int MPID_nem_newmad_directSsend(MPIDI_VC_t *vc, const void * buf, int count, MPI_Datatype datatype, int rank, int tag,
- MPID_Comm * comm, int context_offset, MPID_Request **sreq_p)
-{
- MPID_Request *sreq = NULL;
- nm_tag_t match_info = 0;
- struct iovec *mad_iov;
- int num_iov = 0;
- int mpi_errno = MPI_SUCCESS;
- MPID_Datatype *dt_ptr;
- int dt_contig;
- MPIDI_msg_sz_t data_sz;
- MPI_Aint dt_true_lb;
-
- MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_NEWMAD_DIRECTSSEND);
- MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_NEWMAD_DIRECTSSEND);
-
- MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr, dt_true_lb);
-
- MPIDI_Request_create_sreq(sreq, mpi_errno, goto fn_exit);
- MPIDI_Request_set_type(sreq, MPIDI_REQUEST_TYPE_SEND);
- sreq->partner_request = NULL;
- MPIDI_VC_FAI_send_seqnum(vc, seqnum);
- MPIDI_Request_set_seqnum(sreq, seqnum);
- sreq->ch.vc = vc;
- sreq->dev.OnDataAvail = NULL;
-
- NEM_NMAD_DIRECT_MATCH(match_info,tag,comm->rank,comm->context_id + context_offset);
-
- if(data_sz)
- {
- if (dt_contig)
- {
- mad_iov = MPIU_Malloc(sizeof(struct iovec));
- mad_iov[0].iov_base = (char*)(buf + dt_true_lb);
- mad_iov[0].iov_len = data_sz;
- num_iov += 1;
- }
- else
- {
- MPID_nem_mx_process_sdtype(&sreq,datatype,dt_ptr,buf,count,data_sz,&mad_iov,&num_iov,0);
- }
- /* FIXME issend !*/
- nm_sr_isend_iov(mpid_nem_newmad_pcore, VC_FIELD(vc, p_gate), match_info,
- mad_iov, num_iov, &(REQ_FIELD(sreq,newmad_req)));
- }
- else
- {
- /* FIXME issend !*/
- nm_sr_isend(mpid_nem_newmad_pcore, VC_FIELD(vc, p_gate), match_info,
- NULL, 0, &(REQ_FIELD(sreq,newmad_req)));
- }
- mpid_nem_newmad_pending_send_req++;
- MPIU_Free(mad_iov);
-
- fn_exit:
- *sreq_p = sreq;
- MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_NEWMAD_DIRECTSSEND);
- return mpi_errno;
- fn_fail:
- goto fn_exit;
-}
-
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_process_sdtype
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int MPID_nem_newmad_process_sdtype(MPID_Request **sreq_p, MPI_Datatype datatype, MPID_Datatype * dt_ptr, const void *buf,
- int count, MPIDI_msg_sz_t data_sz, struct iovec **mad_iov, int *num_iov, int first_taken)
-{
- MPID_Request *sreq =*sreq_p;
- MPIDI_msg_sz_t last;
- int iov_num_ub = count * dt_ptr->n_contig_blocks;
- int n_iov = iov_num_ub;
- int mpi_errno = MPI_SUCCESS;
-
- MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_NEWMAD_PROCESS_SDTYPE);
- MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_NEWMAD_PROCESS_SDTYPE);
-
- sreq->dev.segment_ptr = MPID_Segment_alloc( );
- MPIU_ERR_CHKANDJUMP1((sreq->dev.segment_ptr == NULL), mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPID_Segment_alloc");
- MPID_Segment_init(buf, count, datatype, sreq->dev.segment_ptr, 0);
- sreq->dev.segment_first = 0;
- sreq->dev.segment_size = data_sz;
- last = sreq->dev.segment_size;
- (*mad_iov) = MPIU_Malloc((iov_num_ub+first_taken)*sizeof(struct iovec));
-
- MPID_Segment_pack_vector(sreq->dev.segment_ptr, sreq->dev.segment_first, &last,
- (MPID_IOV *)((*mad_iov)+(first_taken*sizeof(struct iovec))), &n_iov);
- MPIU_Assert(last == sreq->dev.segment_size);
- *num_iov = n_iov + first_taken;
-
- fn_exit:
- MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_NEWMAD_PROCESS_SDTYPE);
- return mpi_errno;
- fn_fail:
- goto fn_exit;
-}
-
-#undef FUNCNAME
-#define FUNCNAME MPID_nem_newmad_send
-#undef FCNAME
-#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int
-MPID_nem_newmad_send (MPIDI_VC_t *vc, MPID_nem_cell_ptr_t cell, int datalen)
-{
- int mpi_errno = MPI_SUCCESS;
- return mpi_errno;
-}
-
Copied: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_send.c (from rev 3937, mpich2/branches/dev/mx-netmod/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_send.c)
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_send.c (rev 0)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_send.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -0,0 +1,329 @@
+/* -*- Mode: C; c-basic-offset:4 ; -*- */
+/*
+ * (C) 2006 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "newmad_impl.h"
+#include "my_papi_defs.h"
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_iSendContig
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_newmad_iSendContig(MPIDI_VC_t *vc, MPID_Request *sreq, void *hdr, MPIDI_msg_sz_t hdr_sz, void *data, MPIDI_msg_sz_t data_sz)
+{
+ int mpi_errno = MPI_SUCCESS;
+ nm_tag_t match_info = 0;
+ struct iovec mad_iov[2];
+ int num_iov = 1;
+
+ MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_NEWMAD_ISENDCONTIG);
+ MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_NEWMAD_ISENDCONTIG);
+
+ MPIU_Assert(hdr_sz <= sizeof(MPIDI_CH3_Pkt_t));
+ MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, "mx_iSendContig");
+ MPIDI_DBG_Print_packet((MPIDI_CH3_Pkt_t *)hdr);
+
+ NEM_NMAD_ADI_MATCH(match_info);
+ memcpy(&(sreq->dev.pending_pkt),(char *)hdr,sizeof(MPIDI_CH3_PktGeneric_t));
+ mad_iov[0].iov_base = (char *)&(sreq->dev.pending_pkt);
+ mad_iov[0].iov_len = sizeof(MPIDI_CH3_PktGeneric_t);
+ if(data_sz)
+ {
+ mad_iov[1].iov_base = data;
+ mad_iov[1].iov_len = data_sz;
+ num_iov += 1;
+ }
+
+ nm_sr_isend_iov(mpid_nem_newmad_pcore, VC_FIELD(vc, p_gate), match_info,
+ mad_iov, num_iov, &(REQ_FIELD(sreq,newmad_req)));
+ mpid_nem_newmad_pending_send_req++;
+ sreq->ch.vc = vc;
+
+ fn_exit:
+ MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_NEWMAD_ISENDCONTIG);
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_iStartContigMsg
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_newmad_iStartContigMsg(MPIDI_VC_t *vc, void *hdr, MPIDI_msg_sz_t hdr_sz, void *data, MPIDI_msg_sz_t data_sz, MPID_Request **sreq_ptr)
+{
+ MPID_Request *sreq = NULL;
+ nm_tag_t match_info = 0;
+ struct iovec mad_iov[2];
+ int num_iov = 1;
+ int mpi_errno = MPI_SUCCESS;
+
+ MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_NEWMAD_ISTARTCONTIGMSG);
+ MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_NEWMAD_ISTARTCONTIGMSG);
+ MPIU_Assert(hdr_sz <= sizeof(MPIDI_CH3_Pkt_t));
+ MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, "mx_iSendContig");
+ MPIDI_DBG_Print_packet((MPIDI_CH3_Pkt_t *)hdr);
+
+ /* create a request */
+ sreq = MPID_Request_create();
+ MPIU_Assert (sreq != NULL);
+ MPIU_Object_set_ref (sreq, 2);
+ sreq->kind = MPID_REQUEST_SEND;
+ sreq->dev.OnDataAvail = 0;
+
+ NEM_NMAD_ADI_MATCH(match_info);
+ memcpy(&(sreq->dev.pending_pkt),(char *)hdr,sizeof(MPIDI_CH3_PktGeneric_t));
+ mad_iov[0].iov_base = (char *)&(sreq->dev.pending_pkt);
+ mad_iov[0].iov_len = sizeof(MPIDI_CH3_PktGeneric_t);
+ if (data_sz)
+ {
+ mad_iov[1].iov_base = (char *)data;
+ mad_iov[1].iov_len = data_sz;
+ num_iov += 1;
+ }
+
+ nm_sr_isend_iov(mpid_nem_newmad_pcore, VC_FIELD(vc, p_gate), match_info,
+ mad_iov, num_iov, &(REQ_FIELD(sreq,newmad_req)));
+ mpid_nem_newmad_pending_send_req++;
+ sreq->ch.vc = vc;
+
+ fn_exit:
+ *sreq_ptr = sreq;
+ MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_NEWMAD_ISTARTCONTIGMSG);
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_SendNoncontig
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_newmad_SendNoncontig(MPIDI_VC_t *vc, MPID_Request *sreq, void *header, MPIDI_msg_sz_t hdr_sz)
+{
+ int mpi_errno = MPI_SUCCESS;
+ nm_tag_t match_info = 0;
+ struct iovec *mad_iov;
+ int num_iov = 1;
+ MPIDI_msg_sz_t data_sz;
+ int dt_contig;
+ MPI_Aint dt_true_lb;
+ MPID_Datatype *dt_ptr;
+
+ MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_NEWMAD_SENDNONCONTIGMSG);
+ MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_NEWMAD_SENDNONCONTIGMSG);
+ MPIU_Assert(hdr_sz <= sizeof(MPIDI_CH3_Pkt_t));
+ MPIU_DBG_MSG(CH3_CHANNEL, VERBOSE, "MPID_nem_newmad_iSendNoncontig");
+
+ MPIDI_Datatype_get_info(sreq->dev.user_count,sreq->dev.datatype, dt_contig, data_sz, dt_ptr,dt_true_lb);
+
+ if(data_sz)
+ {
+ MPID_nem_mx_process_sdtype(&sreq,sreq->dev.datatype,dt_ptr,sreq->dev.user_buf,
+ sreq->dev.user_count,data_sz, &mad_iov,&num_iov,1);
+ }
+ else
+ {
+ mad_iov = MPIU_Malloc(sizeof(struct iovec));
+ }
+
+ NEM_NMAD_ADI_MATCH(match_info);
+ memcpy(&(sreq->dev.pending_pkt),(char *)header,sizeof(MPIDI_CH3_PktGeneric_t));
+ mad_iov[0].iov_base = (char *)&(sreq->dev.pending_pkt);
+ mad_iov[0].iov_len = sizeof(MPIDI_CH3_PktGeneric_t);
+
+ nm_sr_isend_iov(mpid_nem_newmad_pcore, VC_FIELD(vc, p_gate), match_info,
+ mad_iov, num_iov, &(REQ_FIELD(sreq,newmad_req)));
+
+ MPIU_Free(mad_iov); /* FIXME : is this safe ? */
+
+ fn_exit:
+ MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_NEWMAD_SENDNONCONTIGMSG);
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_directSend
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_newmad_directSend(MPIDI_VC_t *vc, const void * buf, int count, MPI_Datatype datatype, int rank, int tag,
+ MPID_Comm * comm, int context_offset, MPID_Request **sreq_p)
+{
+ MPID_Request *sreq = NULL;
+ nm_tag_t match_info = 0;
+ struct iovec *mad_iov;
+ int num_iov = 0;
+ int mpi_errno = MPI_SUCCESS;
+ MPID_Datatype *dt_ptr;
+ int dt_contig;
+ MPIDI_msg_sz_t data_sz;
+ MPI_Aint dt_true_lb;
+
+ MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_NEWMAD_DIRECTSEND);
+ MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_NEWMAD_DIRECTSEND);
+
+ MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr, dt_true_lb);
+
+ MPIDI_Request_create_sreq(sreq, mpi_errno, goto fn_exit);
+ MPIDI_Request_set_type(sreq, MPIDI_REQUEST_TYPE_SEND);
+ sreq->partner_request = NULL;
+ MPIDI_VC_FAI_send_seqnum(vc, seqnum);
+ MPIDI_Request_set_seqnum(sreq, seqnum);
+ sreq->ch.vc = vc;
+ sreq->dev.OnDataAvail = NULL;
+
+ NEM_NMAD_DIRECT_MATCH(match_info,tag,comm->rank,comm->context_id + context_offset);
+
+ if(data_sz)
+ {
+ if (dt_contig)
+ {
+ mad_iov = MPIU_Malloc(sizeof(struct iovec));
+ mad_iov[0].iov_base = (char*)(buf + dt_true_lb);
+ mad_iov[0].iov_len = data_sz;
+ num_iov += 1;
+ }
+ else
+ {
+ MPID_nem_mx_process_sdtype(&sreq,datatype,dt_ptr,buf,count,data_sz,&mad_iov,&num_iov,0);
+ }
+ nm_sr_isend_iov(mpid_nem_newmad_pcore, VC_FIELD(vc, p_gate), match_info,
+ mad_iov, num_iov, &(REQ_FIELD(sreq,newmad_req)));
+ }
+ else
+ {
+ nm_sr_isend(mpid_nem_newmad_pcore, VC_FIELD(vc, p_gate), match_info,
+ NULL, 0, &(REQ_FIELD(sreq,newmad_req)));
+ }
+ mpid_nem_newmad_pending_send_req++;
+ MPIU_Free(mad_iov);
+
+ fn_exit:
+ *sreq_p = sreq;
+ MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_NEWMAD_DIRECTSEND);
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_directSsend
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_newmad_directSsend(MPIDI_VC_t *vc, const void * buf, int count, MPI_Datatype datatype, int rank, int tag,
+ MPID_Comm * comm, int context_offset, MPID_Request **sreq_p)
+{
+ MPID_Request *sreq = NULL;
+ nm_tag_t match_info = 0;
+ struct iovec *mad_iov;
+ int num_iov = 0;
+ int mpi_errno = MPI_SUCCESS;
+ MPID_Datatype *dt_ptr;
+ int dt_contig;
+ MPIDI_msg_sz_t data_sz;
+ MPI_Aint dt_true_lb;
+
+ MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_NEWMAD_DIRECTSSEND);
+ MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_NEWMAD_DIRECTSSEND);
+
+ MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr, dt_true_lb);
+
+ MPIDI_Request_create_sreq(sreq, mpi_errno, goto fn_exit);
+ MPIDI_Request_set_type(sreq, MPIDI_REQUEST_TYPE_SEND);
+ sreq->partner_request = NULL;
+ MPIDI_VC_FAI_send_seqnum(vc, seqnum);
+ MPIDI_Request_set_seqnum(sreq, seqnum);
+ sreq->ch.vc = vc;
+ sreq->dev.OnDataAvail = NULL;
+
+ NEM_NMAD_DIRECT_MATCH(match_info,tag,comm->rank,comm->context_id + context_offset);
+
+ if(data_sz)
+ {
+ if (dt_contig)
+ {
+ mad_iov = MPIU_Malloc(sizeof(struct iovec));
+ mad_iov[0].iov_base = (char*)(buf + dt_true_lb);
+ mad_iov[0].iov_len = data_sz;
+ num_iov += 1;
+ }
+ else
+ {
+ MPID_nem_mx_process_sdtype(&sreq,datatype,dt_ptr,buf,count,data_sz,&mad_iov,&num_iov,0);
+ }
+ /* FIXME issend !*/
+ nm_sr_isend_iov(mpid_nem_newmad_pcore, VC_FIELD(vc, p_gate), match_info,
+ mad_iov, num_iov, &(REQ_FIELD(sreq,newmad_req)));
+ }
+ else
+ {
+ /* FIXME issend !*/
+ nm_sr_isend(mpid_nem_newmad_pcore, VC_FIELD(vc, p_gate), match_info,
+ NULL, 0, &(REQ_FIELD(sreq,newmad_req)));
+ }
+ mpid_nem_newmad_pending_send_req++;
+ MPIU_Free(mad_iov);
+
+ fn_exit:
+ *sreq_p = sreq;
+ MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_NEWMAD_DIRECTSSEND);
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_process_sdtype
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPID_nem_newmad_process_sdtype(MPID_Request **sreq_p, MPI_Datatype datatype, MPID_Datatype * dt_ptr, const void *buf,
+ int count, MPIDI_msg_sz_t data_sz, struct iovec **mad_iov, int *num_iov, int first_taken)
+{
+ MPID_Request *sreq =*sreq_p;
+ MPIDI_msg_sz_t last;
+ int iov_num_ub = count * dt_ptr->n_contig_blocks;
+ int n_iov = iov_num_ub;
+ int mpi_errno = MPI_SUCCESS;
+
+ MPIDI_STATE_DECL(MPID_STATE_MPID_NEM_NEWMAD_PROCESS_SDTYPE);
+ MPIDI_FUNC_ENTER(MPID_STATE_MPID_NEM_NEWMAD_PROCESS_SDTYPE);
+
+ sreq->dev.segment_ptr = MPID_Segment_alloc( );
+ MPIU_ERR_CHKANDJUMP1((sreq->dev.segment_ptr == NULL), mpi_errno, MPI_ERR_OTHER, "**nomem", "**nomem %s", "MPID_Segment_alloc");
+ MPID_Segment_init(buf, count, datatype, sreq->dev.segment_ptr, 0);
+ sreq->dev.segment_first = 0;
+ sreq->dev.segment_size = data_sz;
+ last = sreq->dev.segment_size;
+ (*mad_iov) = MPIU_Malloc((iov_num_ub+first_taken)*sizeof(struct iovec));
+
+ MPID_Segment_pack_vector(sreq->dev.segment_ptr, sreq->dev.segment_first, &last,
+ (MPID_IOV *)((*mad_iov)+(first_taken*sizeof(struct iovec))), &n_iov);
+ MPIU_Assert(last == sreq->dev.segment_size);
+ *num_iov = n_iov + first_taken;
+
+ fn_exit:
+ MPIDI_FUNC_EXIT(MPID_STATE_MPID_NEM_NEWMAD_PROCESS_SDTYPE);
+ return mpi_errno;
+ fn_fail:
+ goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPID_nem_newmad_send
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int
+MPID_nem_newmad_send (MPIDI_VC_t *vc, MPID_nem_cell_ptr_t cell, int datalen)
+{
+ int mpi_errno = MPI_SUCCESS;
+ return mpi_errno;
+}
+
Deleted: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_test.c
===================================================================
--- mpich2/branches/dev/mx-netmod/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_test.c 2009-03-04 22:01:42 UTC (rev 3937)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_test.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -1,13 +0,0 @@
-/* -*- Mode: C; c-basic-offset:4 ; -*- */
-/*
- * (C) 2006 by Argonne National Laboratory.
- * See COPYRIGHT in top-level directory.
- */
-
-#include "newmad_impl.h"
-
-int
-MPID_nem_newmad_test()
-{
- return 0;
-}
Copied: mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_test.c (from rev 3937, mpich2/branches/dev/mx-netmod/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_test.c)
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_test.c (rev 0)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/nemesis/netmod/newmad/newmad_test.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -0,0 +1,13 @@
+/* -*- Mode: C; c-basic-offset:4 ; -*- */
+/*
+ * (C) 2006 by Argonne National Laboratory.
+ * See COPYRIGHT in top-level directory.
+ */
+
+#include "newmad_impl.h"
+
+int
+MPID_nem_newmad_test()
+{
+ return 0;
+}
Modified: mpich2/trunk/src/mpid/ch3/channels/nemesis/src/ch3_progress.c
===================================================================
--- mpich2/trunk/src/mpid/ch3/channels/nemesis/src/ch3_progress.c 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/channels/nemesis/src/ch3_progress.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -18,10 +18,10 @@
static MPIDI_CH3_PktHandler_Fcn *pktArray[PKTARRAY_SIZE];
#ifndef MPIDI_POSTED_RECV_ENQUEUE_HOOK
-#define MPIDI_POSTED_RECV_ENQUEUE_HOOK(x) do {} while (0)
+#define MPIDI_POSTED_RECV_ENQUEUE_HOOK(x) do{}while(0)
#endif
#ifndef MPIDI_POSTED_RECV_DEQUEUE_HOOK
-#define MPIDI_POSTED_RECV_DEQUEUE_HOOK(x) do {} while (0)
+#define MPIDI_POSTED_RECV_DEQUEUE_HOOK(x) 0
#endif
#ifdef BY_PASS_PROGRESS
@@ -47,6 +47,18 @@
struct MPID_Request *MPIDI_CH3I_sendq_tail[CH3_NUM_QUEUES] = {0};
struct MPID_Request *MPIDI_CH3I_active_send[CH3_NUM_QUEUES] = {0};
+/* qn_ent and friends are used to keep a list of notification
+ callbacks for posted and matched anysources */
+typedef struct qn_ent
+{
+ struct qn_ent *next;
+ void (*enqueue_fn)(MPID_Request *rreq);
+ int (*dequeue_fn)(MPID_Request *rreq);
+} qn_ent_t;
+
+static qn_ent_t *qn_head = NULL;
+
+
#undef FUNCNAME
#define FUNCNAME MPIDI_CH3I_Progress
#undef FCNAME
@@ -715,81 +727,166 @@
#undef FUNCNAME
+#define FUNCNAME MPIDI_CH3I_Register_anysource_notification
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+int MPIDI_CH3I_Register_anysource_notification(void (*enqueue_fn)(MPID_Request *rreq), int (*dequeue_fn)(MPID_Request *rreq))
+{
+ int mpi_errno = MPI_SUCCESS;
+ qn_ent_t *ent;
+ MPIU_CHKPMEM_DECL(1);
+
+ MPIU_CHKPMEM_MALLOC(ent, qn_ent_t *, sizeof(qn_ent_t), mpi_errno, "queue entry");
+
+ ent->enqueue_fn = enqueue_fn;
+ ent->dequeue_fn = dequeue_fn;
+ ent->next = qn_head;
+ qn_head = ent;
+
+ fn_exit:
+ MPIU_CHKPMEM_COMMIT();
+ return mpi_errno;
+ fn_fail:
+ MPIU_CHKPMEM_REAP();
+ goto fn_exit;
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPIDI_CH3I_Anysource_posted
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+static void anysource_posted(MPID_Request *rreq)
+{
+ qn_ent_t *ent = qn_head;
+
+ /* call all of the registered handlers */
+ while (ent)
+ {
+ if (ent->enqueue_fn)
+ {
+ ent->enqueue_fn(rreq);
+ }
+ ent = ent->next;
+ }
+}
+
+#undef FUNCNAME
+#define FUNCNAME MPIDI_CH3I_Anysource_matched
+#undef FCNAME
+#define FCNAME MPIDI_QUOTE(FUNCNAME)
+static int anysource_matched(MPID_Request *rreq)
+{
+ int matched = FALSE;
+ qn_ent_t *ent = qn_head;
+
+ /* call all of the registered handlers */
+ while(ent) {
+ if (ent->dequeue_fn)
+ {
+ int m;
+
+ m = ent->dequeue_fn(rreq);
+
+ /* this is a crude check to check if the req has been
+ matched by more than one netmod. When MPIU_Assert() is
+ defined to empty, the extra matched=m is optimized
+ away. */
+ MPIU_Assert(!m || !matched);
+ matched = m;
+ }
+ ent = ent->next;
+ }
+
+ return matched;
+}
+
+#undef FUNCNAME
#define FUNCNAME MPIDI_CH3I_Posted_recv_enqueued
#undef FCNAME
#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int MPIDI_CH3I_Posted_recv_enqueued (MPID_Request *rreq)
+void MPIDI_CH3I_Posted_recv_enqueued(MPID_Request *rreq)
{
- int mpi_errno = MPI_SUCCESS;
- int local_rank = -1;
- MPIDI_VC_t *vc;
MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3I_POSTED_RECV_ENQUEUED);
MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3I_POSTED_RECV_ENQUEUED);
- /* don't enqueue for anysource */
- if (rreq->dev.match.parts.rank < 0)
- goto fn_exit;
- /* don't enqueue a fastbox for yourself */
- MPIU_Assert(rreq->comm != NULL);
- if (rreq->dev.match.parts.rank == rreq->comm->rank)
- goto fn_exit;
- /* don't enqueue non-local processes */
- MPIDI_Comm_get_vc(rreq->comm, rreq->dev.match.parts.rank, &vc);
- MPIU_Assert(vc != NULL);
- if (!((MPIDI_CH3I_VC *)vc->channel_private)->is_local)
- goto fn_exit;
+ if ((rreq)->dev.match.parts.rank == MPI_ANY_SOURCE)
+ /* call anysource handler */
+ anysource_posted(rreq);
+ else
+ {
+ int local_rank = -1;
+ MPIDI_VC_t *vc;
- /* Translate the communicator rank to a local rank. Note that there is an
- implicit assumption here that because is_local is true above, that these
- processes are in the same PG. */
- local_rank = MPID_NEM_LOCAL_RANK(vc->pg_rank);
+#ifdef ENABLE_COMM_OVERRIDES
+ /* call vc-specific handler */
+ MPIDI_Comm_get_vc((rreq)->comm, (rreq)->dev.match.parts.rank, &vc);
+ if (vc->comm_ops && vc->comm_ops->recv_posted)
+ vc->comm_ops->recv_posted(vc, rreq);
+#endif
+
+ /* enqueue fastbox */
+
+ /* don't enqueue a fastbox for yourself */
+ MPIU_Assert(rreq->comm != NULL);
+ if (rreq->dev.match.parts.rank == rreq->comm->rank)
+ goto fn_exit;
- mpi_errno = MPID_nem_mpich2_enqueue_fastbox (local_rank);
- if (mpi_errno) MPIU_ERR_POP (mpi_errno);
+ /* don't enqueue non-local processes */
+ if (!((MPIDI_CH3I_VC *)vc->channel_private)->is_local)
+ goto fn_exit;
+ /* Translate the communicator rank to a local rank. Note that there is an
+ implicit assumption here that because is_local is true above, that these
+ processes are in the same PG. */
+ local_rank = MPID_NEM_LOCAL_RANK(vc->pg_rank);
+
+ MPID_nem_mpich2_enqueue_fastbox(local_rank);
+ }
+
fn_exit:
MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3I_POSTED_RECV_ENQUEUED);
- return mpi_errno;
- fn_fail:
- goto fn_exit;
}
+/* returns non-zero when req has been matched by channel */
#undef FUNCNAME
#define FUNCNAME MPIDI_CH3I_Posted_recv_dequeued
#undef FCNAME
#define FCNAME MPIDI_QUOTE(FUNCNAME)
-int MPIDI_CH3I_Posted_recv_dequeued (MPID_Request *rreq)
+int MPIDI_CH3I_Posted_recv_dequeued(MPID_Request *rreq)
{
- int mpi_errno = MPI_SUCCESS;
int local_rank = -1;
MPIDI_VC_t *vc;
+ int matched = FALSE;
MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3I_POSTED_RECV_DEQUEUED);
MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3I_POSTED_RECV_DEQUEUED);
- if (rreq->dev.match.parts.rank < 0)
- goto fn_exit;
+
+ if (rreq->dev.match.parts.rank == MPI_ANY_SOURCE)
+ {
+ matched = anysource_matched(rreq);
+ }
+ else
+ {
+ if (rreq->dev.match.parts.rank == rreq->comm->rank)
+ goto fn_exit;
+
+ /* don't use MPID_NEM_IS_LOCAL, it doesn't handle dynamic processes */
+ MPIDI_Comm_get_vc(rreq->comm, rreq->dev.match.parts.rank, &vc);
+ MPIU_Assert(vc != NULL);
+ if (!((MPIDI_CH3I_VC *)vc->channel_private)->is_local)
+ goto fn_exit;
- if (rreq->dev.match.parts.rank == rreq->comm->rank)
- goto fn_exit;
+ /* Translate the communicator rank to a local rank. Note that there is an
+ implicit assumption here that because is_local is true above, that these
+ processes are in the same PG. */
+ local_rank = MPID_NEM_LOCAL_RANK(vc->pg_rank);
- /* don't use MPID_NEM_IS_LOCAL, it doesn't handle dynamic processes */
- MPIDI_Comm_get_vc(rreq->comm, rreq->dev.match.parts.rank, &vc);
- MPIU_Assert(vc != NULL);
- if (!((MPIDI_CH3I_VC *)vc->channel_private)->is_local)
- goto fn_exit;
-
- /* Translate the communicator rank to a local rank. Note that there is an
- implicit assumption here that because is_local is true above, that these
- processes are in the same PG. */
- local_rank = MPID_NEM_LOCAL_RANK(vc->pg_rank);
-
- mpi_errno = MPID_nem_mpich2_dequeue_fastbox (local_rank);
- if (mpi_errno) MPIU_ERR_POP (mpi_errno);
-
+ MPID_nem_mpich2_dequeue_fastbox(local_rank);
+ }
+
fn_exit:
MPIDI_FUNC_EXIT(MPID_STATE_MPIDI_CH3I_POSTED_RECV_DEQUEUED);
- return mpi_errno;
- fn_fail:
- goto fn_exit;
+ return matched;
}
+
Modified: mpich2/trunk/src/mpid/ch3/include/mpidimpl.h
===================================================================
--- mpich2/trunk/src/mpid/ch3/include/mpidimpl.h 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/include/mpidimpl.h 2009-03-05 22:44:18 UTC (rev 3945)
@@ -627,6 +627,50 @@
struct MPID_Comm;
+#ifdef ENABLE_COMM_OVERRIDES
+typedef struct MPIDI_Comm_ops
+{
+ /* Overriding calls in case of matching-capable interfaces */
+ int (*recv_posted)(struct MPIDI_VC *vc, struct MPID_Request *req);
+
+ int (*send)(struct MPIDI_VC *vc, const void *buf, int count, MPI_Datatype datatype,
+ int dest, int tag, MPID_Comm *comm, int context_offset,
+ struct MPID_Request **request);
+ int (*rsend)(struct MPIDI_VC *vc, const void *buf, int count, MPI_Datatype datatype,
+ int dest, int tag, MPID_Comm *comm, int context_offset,
+ struct MPID_Request **request);
+ int (*ssend)(struct MPIDI_VC *vc, const void *buf, int count, MPI_Datatype datatype,
+ int dest, int tag, MPID_Comm *comm, int context_offset,
+ struct MPID_Request **request );
+ int (*isend)(struct MPIDI_VC *vc, const void *buf, int count, MPI_Datatype datatype,
+ int dest, int tag, MPID_Comm *comm, int context_offset,
+ struct MPID_Request **request );
+ int (*irsend)(struct MPIDI_VC *vc, const void *buf, int count, MPI_Datatype datatype,
+ int dest, int tag, MPID_Comm *comm, int context_offset,
+ struct MPID_Request **request );
+ int (*issend)(struct MPIDI_VC *vc, const void *buf, int count, MPI_Datatype datatype,
+ int dest, int tag, MPID_Comm *comm, int context_offset,
+ struct MPID_Request **request );
+
+ int (*send_init)(struct MPIDI_VC *vc, const void *buf, int count, MPI_Datatype datatype,
+ int dest, int tag, MPID_Comm *comm, int context_offset,
+ struct MPID_Request **request );
+ int (*bsend_init)(struct MPIDI_VC *vc, const void *buf, int count, MPI_Datatype datatype,
+ int dest, int tag, MPID_Comm *comm, int context_offset,
+ struct MPID_Request **request);
+ int (*rsend_init)(struct MPIDI_VC *vc, const void *buf, int count, MPI_Datatype datatype,
+ int dest, int tag, MPID_Comm *comm, int context_offset,
+ struct MPID_Request **request );
+ int (*ssend_init)(struct MPIDI_VC *vc, const void *buf, int count, MPI_Datatype datatype,
+ int dest, int tag, MPID_Comm *comm, int context_offset,
+ struct MPID_Request **request );
+ int (*startall)(struct MPIDI_VC *vc, int count, struct MPID_Request *requests[]);
+
+ int (*cancel_send)(struct MPIDI_VC *vc, struct MPID_Request *sreq);
+ int (*cancel_recv)(struct MPIDI_VC *vc, struct MPID_Request *rreq);
+} MPIDI_Comm_ops_t;
+#endif
+
typedef struct MPIDI_VC
{
/* XXX - need better comment */
@@ -690,6 +734,10 @@
int (* sendNoncontig_fn)( struct MPIDI_VC *vc, struct MPID_Request *sreq,
void *header, MPIDI_msg_sz_t hdr_sz );
+#ifdef ENABLE_COMM_OVERRIDES
+ MPIDI_Comm_ops_t *comm_ops;
+#endif
+
#ifdef MPICH_IS_THREADED
#if MPIU_THREAD_GRANULARITY == MPIU_THREAD_GRANULARITY_PER_OBJECT
MPID_Thread_mutex_t pobj_mutex;
Modified: mpich2/trunk/src/mpid/ch3/src/ch3u_recvq.c
===================================================================
--- mpich2/trunk/src/mpid/ch3/src/ch3u_recvq.c 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/src/ch3u_recvq.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -6,12 +6,21 @@
#include "mpidimpl.h"
-/* These are needed for nemesis to know from where to expect a message */
+/* MPIDI_POSTED_RECV_ENQUEUE_HOOK(req): Notifies channel that req has
+ been enqueued on the posted recv queue. Returns void. */
#ifndef MPIDI_POSTED_RECV_ENQUEUE_HOOK
-#define MPIDI_POSTED_RECV_ENQUEUE_HOOK(x)
+#define MPIDI_POSTED_RECV_ENQUEUE_HOOK(req) do{}while(0)
#endif
+/* MPIDI_POSTED_RECV_DEQUEUE_HOOK(req): Notifies channel that req has
+ been dequeued from the posted recv queue. Returns non-zero if the
+ channel has already matched the request; 0 otherwise. This happens
+ when the channel supports shared-memory and network communication
+ with a network capable of matching, and the same request is matched
+ by the network and, e.g., shared-memory. When that happens the
+ dequeue functions below should, either search for the next matching
+ request, or report that no request was found. */
#ifndef MPIDI_POSTED_RECV_DEQUEUE_HOOK
-#define MPIDI_POSTED_RECV_DEQUEUE_HOOK(x)
+#define MPIDI_POSTED_RECV_DEQUEUE_HOOK(req) 0
#endif
/* FIXME:
@@ -361,8 +370,7 @@
recvq_posted_head = rreq;
}
recvq_posted_tail = rreq;
- /* This is for nemesis to know from where to expect a message */
- MPIDI_POSTED_RECV_ENQUEUE_HOOK (rreq);
+ MPIDI_POSTED_RECV_ENQUEUE_HOOK(rreq);
}
found = FALSE;
@@ -393,10 +401,11 @@
int found;
MPID_Request * cur_rreq;
MPID_Request * prev_rreq;
+ int dequeue_failed;
MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3U_RECVQ_DP);
MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3U_RECVQ_DP);
-
+
found = FALSE;
prev_rreq = NULL;
@@ -413,9 +422,11 @@
if (cur_rreq->dev.next == NULL) {
recvq_posted_tail = prev_rreq;
}
- /* This is for nemesis to know from where to expect a message */
- MPIDI_POSTED_RECV_DEQUEUE_HOOK (rreq);
- found = TRUE;
+ /* Notify channel that rreq has been dequeued and check if
+ it has already matched rreq, fail if so */
+ dequeue_failed = MPIDI_POSTED_RECV_DEQUEUE_HOOK(rreq);
+ if (!dequeue_failed)
+ found = TRUE;
break;
}
@@ -458,10 +469,12 @@
int found;
MPID_Request * rreq;
MPID_Request * prev_rreq;
+ int channel_matched;
MPIDI_STATE_DECL(MPID_STATE_MPIDI_CH3U_RECVQ_FDP_OR_AEU);
MPIDI_FUNC_ENTER(MPID_STATE_MPIDI_CH3U_RECVQ_FDP_OR_AEU);
-
+
+ top_loop:
prev_rreq = NULL;
rreq = recvq_posted_head;
@@ -477,10 +490,13 @@
if (rreq->dev.next == NULL) {
recvq_posted_tail = prev_rreq;
}
- found = TRUE;
- /* This is for the channel to know from where to expect a
- * message */
- MPIDI_POSTED_RECV_DEQUEUE_HOOK (rreq);
+
+ /* give channel a chance to match the request, try again if so */
+ channel_matched = MPIDI_POSTED_RECV_DEQUEUE_HOOK(rreq);
+ if (channel_matched)
+ goto top_loop;
+
+ found = TRUE;
goto lock_exit;
}
prev_rreq = rreq;
Modified: mpich2/trunk/src/mpid/ch3/src/mpid_irsend.c
===================================================================
--- mpich2/trunk/src/mpid/ch3/src/mpid_irsend.c 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/src/mpid_irsend.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -55,10 +55,18 @@
goto fn_exit;
}
+ MPIDI_Comm_get_vc(comm, rank, &vc);
+
+#ifdef ENABLE_COMM_OVERRIDES
+ if (vc->comm_ops && vc->comm_ops->irsend)
+ {
+ mpi_errno = vc->comm_ops->irsend( vc, buf, count, datatype, rank, tag, comm, context_offset, &sreq);
+ goto fn_exit;
+ }
+#endif
+
MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr, dt_true_lb);
- MPIDI_Comm_get_vc(comm, rank, &vc);
-
MPIDI_Pkt_init(ready_pkt, MPIDI_CH3_PKT_READY_SEND);
ready_pkt->match.parts.rank = comm->rank;
ready_pkt->match.parts.tag = tag;
Modified: mpich2/trunk/src/mpid/ch3/src/mpid_isend.c
===================================================================
--- mpich2/trunk/src/mpid/ch3/src/mpid_isend.c 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/src/mpid_isend.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -66,11 +66,19 @@
goto fn_exit;
}
+ MPIDI_Comm_get_vc(comm, rank, &vc);
+
+#ifdef ENABLE_COMM_OVERRIDES
+ if (vc->comm_ops && vc->comm_ops->isend)
+ {
+ mpi_errno = vc->comm_ops->isend( vc, buf, count, datatype, rank, tag, comm, context_offset, &sreq);
+ goto fn_exit;
+ }
+#endif
+
MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr,
dt_true_lb);
- MPIDI_Comm_get_vc(comm, rank, &vc);
-
if (data_sz == 0)
{
MPIDI_CH3_Pkt_t upkt;
Modified: mpich2/trunk/src/mpid/ch3/src/mpid_issend.c
===================================================================
--- mpich2/trunk/src/mpid/ch3/src/mpid_issend.c 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/src/mpid_issend.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -52,10 +52,18 @@
goto fn_exit;
}
+ MPIDI_Comm_get_vc(comm, rank, &vc);
+
+#ifdef ENABLE_COMM_OVERRIDES
+ if (vc->comm_ops && vc->comm_ops->issend)
+ {
+ mpi_errno = vc->comm_ops->issend( vc, buf, count, datatype, rank, tag, comm, context_offset, &sreq);
+ goto fn_exit;
+ }
+#endif
+
MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr, dt_true_lb);
- MPIDI_Comm_get_vc(comm, rank, &vc);
-
if (data_sz == 0)
{
mpi_errno = MPIDI_CH3_EagerSyncZero( &sreq, rank, tag, comm,
Modified: mpich2/trunk/src/mpid/ch3/src/mpid_rsend.c
===================================================================
--- mpich2/trunk/src/mpid/ch3/src/mpid_rsend.c 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/src/mpid_rsend.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -51,9 +51,18 @@
goto fn_exit;
}
- MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr, dt_true_lb);
MPIDI_Comm_get_vc(comm, rank, &vc);
+#ifdef ENABLE_COMM_OVERRIDES
+ if (vc->comm_ops && vc->comm_ops->rsend)
+ {
+ mpi_errno = vc->comm_ops->rsend( vc, buf, count, datatype, rank, tag, comm, context_offset, &sreq);
+ goto fn_exit;
+ }
+#endif
+
+ MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr, dt_true_lb);
+
if (data_sz == 0)
{
MPIDI_CH3_Pkt_t upkt;
Modified: mpich2/trunk/src/mpid/ch3/src/mpid_send.c
===================================================================
--- mpich2/trunk/src/mpid/ch3/src/mpid_send.c 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/src/mpid_send.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -63,11 +63,20 @@
goto fn_exit;
}
+ MPIDI_Comm_get_vc(comm, rank, &vc);
+
+#ifdef ENABLE_COMM_OVERRIDES
+ if (vc->comm_ops && vc->comm_ops->send)
+ {
+ mpi_errno = vc->comm_ops->send( vc, buf, count, datatype, rank, tag, comm, context_offset, &sreq);
+ goto fn_exit;
+ }
+#endif
+
MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr,
dt_true_lb);
- MPIDI_Comm_get_vc(comm, rank, &vc);
-
+
if (data_sz == 0)
{
MPIDI_CH3_Pkt_t upkt;
Modified: mpich2/trunk/src/mpid/ch3/src/mpid_ssend.c
===================================================================
--- mpich2/trunk/src/mpid/ch3/src/mpid_ssend.c 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/src/mpid_ssend.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -64,12 +64,22 @@
goto fn_exit;
}
- MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr, dt_true_lb);
MPIDI_Comm_get_vc(comm, rank, &vc);
+#ifdef ENABLE_COMM_OVERRIDES
+ if (vc->comm_ops && vc->comm_ops->ssend)
+ {
+ mpi_errno = vc->comm_ops->ssend( vc, buf, count, datatype, rank, tag, comm, context_offset, &sreq);
+ goto fn_exit;
+ }
+#endif
+
+
+ MPIDI_Datatype_get_info(count, datatype, dt_contig, data_sz, dt_ptr, dt_true_lb);
+
MPIDI_Request_create_sreq(sreq, mpi_errno, goto fn_exit);
MPIDI_Request_set_type(sreq, MPIDI_REQUEST_TYPE_SSEND);
-
+
if (data_sz == 0)
{
mpi_errno = MPIDI_CH3_EagerSyncZero( &sreq, rank, tag, comm,
Modified: mpich2/trunk/src/mpid/ch3/src/mpid_vc.c
===================================================================
--- mpich2/trunk/src/mpid/ch3/src/mpid_vc.c 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/mpid/ch3/src/mpid_vc.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -693,10 +693,14 @@
vc->node_id = -1;
MPIDI_VC_Init_seqnum_send(vc);
MPIDI_VC_Init_seqnum_recv(vc);
- vc->rndvSend_fn = MPIDI_CH3_RndvSend;
- vc->rndvRecv_fn = MPIDI_CH3_RecvRndv;
- vc->eager_max_msg_sz = MPIDI_CH3_EAGER_MAX_MSG_SIZE;
- vc->sendNoncontig_fn = MPIDI_CH3_SendNoncontig_iov;
+ vc->rndvSend_fn = MPIDI_CH3_RndvSend;
+ vc->rndvRecv_fn = MPIDI_CH3_RecvRndv;
+ vc->eager_max_msg_sz = MPIDI_CH3_EAGER_MAX_MSG_SIZE;
+ vc->sendNoncontig_fn = MPIDI_CH3_SendNoncontig_iov;
+#ifdef ENABLE_COMM_OVERRIDES
+ vc->comm_ops = NULL;
+#endif
+
MPIU_CALL(MPIDI_CH3,VC_Init( vc ));
MPIU_DBG_PrintVCState(vc);
Modified: mpich2/trunk/src/util/param/param.c
===================================================================
--- mpich2/trunk/src/util/param/param.c 2009-03-05 21:27:15 UTC (rev 3944)
+++ mpich2/trunk/src/util/param/param.c 2009-03-05 22:44:18 UTC (rev 3945)
@@ -14,6 +14,11 @@
#include <stdio.h>
#include <ctype.h>
+
+#if defined( HAVE_SETENV ) && defined( NEEDS_SETENV_DECL )
+extern int setenv(const char *name, const char *value, int overwrite);
+#endif
+
#ifndef isascii
#define isascii(c) (((c)&~0x7f)==0)
#endif
@@ -416,3 +421,8 @@
return 0;
}
+
+int MPIU_SetEnv( const char *name, const char *value, int overwrite )
+{
+ return setenv( name, value, overwrite );
+}
More information about the mpich2-commits
mailing list