loaded modules Currently Loaded Modulefiles: 1) intel/11.1.069(default) 5) libnuma/2.0.7 2) openmpi/1.5.4(default) 6) sbank/1.2 3) git/1.8.3.4 7) cmake/2.8.6 4) acml/5.0.0_fma4(default) limits cputime unlimited filesize unlimited datasize unlimited stacksize unlimited coredumpsize unlimited memoryuse unlimited vmemoryuse unlimited descriptors 64000 memorylocked unlimited maxproc 16000 Environment Variables USER=d3m956 LOGNAME=d3m956 HOME=/people/d3m956 PATH=/share/apps/cmake/2.8.6//bin:/share/apps/sbank/1.2//bin:/share/apps/libnuma/2.0.7//bin:/share/apps/git/1.8.3.4//bin:/share/apps/intel/11.1/069/bin/intel64:/pic/people/scicons/bin:/usr/java/latest/bin:/share/apps/openmpi/1.5.4/intel/11.1.069/bin:/usr/lib64/qt-3.3/bin:/pic/people/scicons/bin:/usr/NX/bin:/usr/kerberos/bin:/usr/java/latest/bin:/usr/local/bin:/bin:/usr/bin:/usr/X11R6/bin:/opt/ganglia/bin:/opt/ganglia/sbin:/opt/hpss/bin:/opt/pdsh/bin:/opt/rocks/bin:/opt/rocks/sbin:/opt/ganglia/bin:/opt/ganglia/sbin:/opt/hpss/bin:/opt/rocks/bin:/opt/rocks/sbin MAIL=/var/spool/mail/d3m956 SHELL=/bin/tcsh SSH_CLIENT=130.20.35.206 60625 22 SSH_CONNECTION=130.20.35.206 60625 130.20.68.15 22 SSH_TTY=/dev/pts/18 TERM=xterm KRB5CCNAME=FILE:/tmp/krb5cc_22956_1Emwt1 HOSTTYPE=x86_64-linux VENDOR=unknown OSTYPE=linux MACHTYPE=x86_64 SHLVL=2 PWD=/pic/projects/ds/DS_PETSc/ss16 GROUP=users HOST=node0660.local REMOTEHOST=we19351.pnl.gov HOSTNAME=node0660 INPUTRC=/etc/inputrc ANT_HOME=/opt/rocks LS_COLORS=no=00:fi=00:di=00;34:ln=00;36:pi=40;33:so=00;35:bd=40;33;01:cd=40;33;01:or=01;05;37;41:mi=01;05;37;41:ex=00;32:*.cmd=00;32:*.exe=00;32:*.com=00;32:*.btm=00;32:*.bat=00;32:*.sh=00;32:*.csh=00;32:*.tar=00;31:*.tgz=00;31:*.arj=00;31:*.taz=00;31:*.lzh=00;31:*.zip=00;31:*.z=00;31:*.Z=00;31:*.gz=00;31:*.bz2=00;31:*.bz=00;31:*.tz=00;31:*.rpm=00;31:*.cpio=00;31:*.jpg=00;35:*.gif=00;35:*.bmp=00;35:*.xbm=00;35:*.xpm=00;35:*.png=00;35:*.tif=00;35: G_BROKEN_FILENAMES=1 SSH_ASKPASS=/usr/libexec/openssh/gnome-ssh-askpass HPSS_PRIMARY_AUTHN_MECH=krb5 JAVA_HOME=/usr/java/latest LANG=en_US.UTF-8 LESSOPEN=|/usr/bin/lesspipe.sh %s MODULE_VERSION=3.2.8 MODULE_VERSION_STACK=3.2.8 MODULESHOME=/share/apps/modules/Modules/3.2.8 MODULEPATH=$MODULESHOME/modulefiles/environment:$MODULESHOME/modulefiles/development/compilers:$MODULESHOME/modulefiles/development/mpi:$MODULESHOME/modulefiles/development/mlib:$MODULESHOME/modulefiles/development/tools:$MODULESHOME/modulefiles/apps:$MODULESHOME/modulefiles/libs:/share/apps/modules/Modules/versions NXDIR=/usr/NX PDSHROOT=/opt/pdsh MANPATH=/share/apps/sbank/1.2//share/man:/share/apps/libnuma/2.0.7//share/man:/share/apps/git/1.8.3.4//share/man:/share/apps/intel/11.1/069/man/en_US/:/share/apps/openmpi/1.5.4/intel/11.1.069/man:/usr/share/man:: SBATCH_NO_REQUEUE=1 SRUN_NO_REQUEUE=1 SLURM_NO_REQUEUE=1 QTDIR=/usr/lib64/qt-3.3 QTINC=/usr/lib64/qt-3.3/include QTLIB=/usr/lib64/qt-3.3/lib ROCKS_ROOT=/opt/rocks LD_LIBRARY_PATH=/share/apps/libnuma/2.0.7//lib64:/share/apps/intel/11.1/069/lib/intel64:/share/apps/acml/5.0.0/ifort64_fma4/lib:/share/apps/openmpi/1.5.4/intel/11.1.069/lib:/pic/projects/ds/petsc-dev.6.06.13/arch-complex-opt/lib PETSC_DIR=/pic/projects/ds/petsc-dev.6.06.13 PETSC_ARCH=arch-complex-opt SLURM_JOB_NAME=DS SLURM_PRIO_PROCESS=0 SLURM_SUBMIT_DIR=/pic/projects/ds/DS_PETSc/ss16 SLURM_JOB_ID=3714735 SLURM_JOB_NUM_NODES=1 SLURM_JOB_NODELIST=node0660 SLURM_NODE_ALIASES=(null) SLURM_JOB_CPUS_PER_NODE=32 ENVIRONMENT=BATCH SLURM_JOBID=3714735 SLURM_NNODES=1 SLURM_NODELIST=node0660 SLURM_NTASKS=32 SLURM_NPROCS=32 SLURM_TASKS_PER_NODE=32 SLURM_TOPOLOGY_ADDR=root.ql027.node0660 SLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.node TMPDIR=/tmp SLURM_TASK_PID=15021 SLURM_CPUS_ON_NODE=32 SBATCH_CPU_BIND_VERBOSE=quiet SBATCH_CPU_BIND_TYPE=mask_cpu: SBATCH_CPU_BIND_LIST=0xFFFFFFFF SBATCH_CPU_BIND=quiet,mask_cpu:0xFFFFFFFF SLURM_NODEID=0 SLURM_PROCID=0 SLURM_LOCALID=0 SLURM_GTIDS=0 SLURM_CHECKPOINT_IMAGE_DIR=/pic/projects/ds/DS_PETSc/ss16 SLURMD_NODENAME=node0660 LOADEDMODULES=intel/11.1.069:openmpi/1.5.4:git/1.8.3.4:acml/5.0.0_fma4:libnuma/2.0.7:sbank/1.2:cmake/2.8.6 PNNL_COMPILER=intel PNNL_COMPILER_VERSION=11.1.069 _LMFILES_=/share/apps/modules/Modules/3.2.8/modulefiles/development/compilers/intel/11.1.069:/share/apps/modules/Modules/3.2.8/modulefiles/development/mpi/openmpi/1.5.4:/share/apps/modules/Modules/3.2.8/modulefiles/development/tools/git/1.8.3.4:/share/apps/modules/Modules/3.2.8/modulefiles/development/mlib/acml/5.0.0_fma4:/share/apps/modules/Modules/3.2.8/modulefiles/development/tools/libnuma/2.0.7:/share/apps/modules/Modules/3.2.8/modulefiles/development/tools/sbank/1.2:/share/apps/modules/Modules/3.2.8/modulefiles/development/tools/cmake/2.8.6 IBV_FORK_SAFE=1 MPI_ROOT=/share/apps/openmpi/1.5.4/intel/11.1.069 PNNL_MPI=openmpi PNNL_MPI_VERSION=1.5.4 MLIB_CFLAGS=-I/share/apps/acml/5.0.0/ifort64_fma4/include MLIB_FFLAGS=-I/share/apps/acml/5.0.0/ifort64_fma4/include MLIB_LDFLAGS=/share/apps/acml/5.0.0/ifort64_fma4/lib/libacml.a CMAKE=/share/apps/cmake/2.8.6/ ldd output Number of buses: 1081 Number of branches: 1689 Number of swing buses: 1 Number of PQ buses: 793 Number of PV buses: 287 Number of generators: 288 Number of switches: 4 Initialization time: 0.040288 Alloc main data time: 0.0446651 Read input data time: 0.061626 Ext2int_gen time: 0.00050211 Build admittance matrix time: 0.522901 20 steps, ftime 0.1 9 steps, ftime 0.05 571 steps, ftime 2.855 Run simulation time: 351.276 ************************************************************************************************************************ *** WIDEN YOUR WINDOW TO 120 CHARACTERS. Use 'enscript -r -fCourier9' to print this document *** ************************************************************************************************************************ ---------------------------------------------- PETSc Performance Summary: ---------------------------------------------- dynSim on a arch-complex-opt named node0660.local with 32 processors, by d3m956 Mon Aug 12 20:10:15 2013 Using Petsc Development GIT revision: a0a914e661bf6402b8edabe0f5a2dad46323f69f GIT Date: 2013-06-05 14:18:39 -0500 Max Max/Min Avg Total Time (sec): 3.520e+02 1.00000 3.520e+02 Objects: 2.367e+04 1.00025 2.367e+04 Flops: 2.877e+09 1.00371 2.872e+09 9.190e+10 Flops/sec: 8.174e+06 1.00371 8.159e+06 2.611e+08 MPI Messages: 2.590e+05 1.00097 2.589e+05 8.284e+06 MPI Message Lengths: 1.494e+08 1.00359 5.756e+02 4.768e+09 MPI Reductions: 6.682e+04 1.00009 Flop counting convention: 1 flop = 1 real number operation of type (multiply/divide/add/subtract) e.g., VecAXPY() for real vectors of length N --> 2N flops and VecAXPY() for complex vectors of length N --> 8N flops Summary of Stages: ----- Time ------ ----- Flops ----- --- Messages --- -- Message Lengths -- -- Reductions -- Avg %Total Avg %Total counts %Total Avg %Total counts %Total 0: Main Stage: 3.5199e+02 100.0% 9.1901e+10 100.0% 8.284e+06 100.0% 5.756e+02 100.0% 6.682e+04 100.0% ------------------------------------------------------------------------------------------------------------------------ See the 'Profiling' chapter of the users' manual for details on interpreting output. Phase summary info: Count: number of times phase was executed Time and Flops: Max - maximum over all processors Ratio - ratio of maximum to minimum over all processors Mess: number of messages sent Avg. len: average message length (bytes) Reduct: number of global reductions Global: entire computation Stage: stages of a computation. Set stages with PetscLogStagePush() and PetscLogStagePop(). %T - percent time in this phase %f - percent flops in this phase %M - percent messages in this phase %L - percent message lengths in this phase %R - percent reductions in this phase Total Mflop/s: 10e-6 * (sum of flops over all processors)/(max time over all processors) ------------------------------------------------------------------------------------------------------------------------ ########################################################## # # # WARNING!!! # # # # The code for various complex numbers numerical # # kernels uses C++, which generally is not well # # optimized. For performance that is about 4-5 times # # faster, specify --with-fortran-kernels=1 # # when running ./configure.py. # # # ########################################################## Event Count Time (sec) Flops --- Global --- --- Stage --- Total Max Ratio Max Ratio Max Ratio Mess Avg len Reduct %T %f %M %L %R %T %f %M %L %R Mflop/s ------------------------------------------------------------------------------------------------------------------------ --- Event Stage 0: Main Stage VecDot 1782 1.0 6.6991e-01 5.6 5.06e+05 1.0 0.0e+00 0.0e+00 1.8e+03 0 0 0 0 3 0 0 0 0 3 24 VecMDot 6546 1.0 3.2755e+00 5.2 4.40e+06 1.0 0.0e+00 0.0e+00 6.5e+03 1 0 0 0 10 1 0 0 0 10 43 VecNorm 11892 1.0 1.7802e+0113.8 3.42e+06 1.0 0.0e+00 0.0e+00 1.2e+04 3 0 0 0 18 3 0 0 0 18 6 VecScale 8328 1.0 1.3907e-02 1.2 1.20e+06 1.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 2759 VecCopy 6546 1.0 9.1767e-03 1.5 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 VecSet 20246 1.0 1.6136e-01 1.2 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 VecAXPY 2382 1.0 7.0758e-03 1.1 6.86e+05 1.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 3102 VecAXPBYCZ 2982 1.0 6.9971e-03 1.5 1.29e+06 1.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 5891 VecWAXPY 1782 1.0 2.7928e-03 1.2 2.57e+05 1.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 2940 VecMAXPY 8328 1.0 1.4402e-02 1.1 6.35e+06 1.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 14103 VecAssemblyBegin 4010.0 3.0029e-03 1.4 0.00e+00 0.0 0.0e+00 0.0e+00 1.2e+01 0 0 0 0 0 0 0 0 0 0 0 VecAssemblyEnd 4010.0 1.4067e-05 4.9 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 VecScatterBegin 15742 1.0 2.6506e+00 1.3 0.00e+00 0.0 8.3e+06 5.8e+02 7.4e+03 1 0100100 11 1 0100100 11 0 VecScatterEnd 8328 1.0 3.7505e+00 4.3 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 1 0 0 0 0 1 0 0 0 0 0 VecReduceArith 4164 1.0 5.5809e-03 1.3 1.18e+06 1.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 6781 VecReduceComm 2382 1.0 5.6911e+0036.4 0.00e+00 0.0 0.0e+00 0.0e+00 2.4e+03 1 0 0 0 4 1 0 0 0 4 0 VecNormalize 8328 1.0 6.8714e-01 1.4 3.60e+06 1.0 0.0e+00 0.0e+00 8.3e+03 0 0 0 0 12 0 0 0 0 12 168 MatMult 8328 1.0 7.4437e+00 1.8 2.76e+09 1.0 8.3e+06 5.8e+02 0.0e+00 2 96100100 0 2 96100100 0 11873 MatSolve 8328 1.0 6.1783e-01 1.0 8.51e+07 1.0 0.0e+00 0.0e+00 0.0e+00 0 3 0 0 0 0 3 0 0 0 4410 MatLUFactorSym 3 1.0 3.3424e-03 1.3 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 MatLUFactorNum 1788 1.0 3.5998e-01 1.2 1.09e+0742.4 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 496 MatILUFactorSym 3 1.0 1.8849e-03 2.6 0.00e+00 0.0 0.0e+00 0.0e+00 3.0e+00 0 0 0 0 0 0 0 0 0 0 0 MatCopy 3 1.0 2.6360e-03 1.4 0.00e+00 0.0 0.0e+00 0.0e+00 2.4e+01 0 0 0 0 0 0 0 0 0 0 0 MatConvert 14 1.0 2.0933e-01 1.0 0.00e+00 0.0 7.5e+03 1.8e+01 1.4e+02 0 0 0 0 0 0 0 0 0 0 0 MatScale 1 1.0 5.8858e-03 1.1 1.36e+02 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 MatAssemblyBegin 1857 1.0 7.4614e+00 1.5 0.00e+00 0.0 2.3e+02 5.9e+01 3.7e+03 2 0 0 0 6 2 0 0 0 6 0 MatAssemblyEnd 1857 1.0 3.0394e-01 1.4 0.00e+00 0.0 1.9e+04 2.4e+01 2.1e+03 0 0 0 0 3 0 0 0 0 3 0 MatGetRow 8542 1.0 2.2396e-02 1.1 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 MatGetRowIJ 6 1.0 1.3118e-03 3.6 0.00e+00 0.0 0.0e+00 0.0e+00 0.0e+00 0 0 0 0 0 0 0 0 0 0 0 MatGetOrdering 6 1.0 3.3140e-03 1.6 0.00e+00 0.0 0.0e+00 0.0e+00 1.8e+01 0 0 0 0 0 0 0 0 0 0 0 MatAXPY 3 1.0 7.2742e-03 1.3 0.00e+00 0.0 1.4e+03 1.1e+01 4.8e+01 0 0 0 0 0 0 0 0 0 0 0 MatTranspose 2 1.0 9.0001e-03 1.0 0.00e+00 0.0 3.1e+02 3.4e+01 3.4e+01 0 0 0 0 0 0 0 0 0 0 0 MatMatMult 9 1.0 2.3718e-02 1.0 6.52e+04 1.0 9.5e+03 9.5e+02 2.2e+02 0 0 0 0 0 0 0 0 0 0 86 MatMatSolve 3 1.0 7.9999e-02 1.0 0.00e+00 0.0 0.0e+00 0.0e+00 2.4e+01 0 0 0 0 0 0 0 0 0 0 0 MatMatMultSym 9 1.0 1.9545e-02 1.1 0.00e+00 0.0 8.8e+03 5.8e+02 2.0e+02 0 0 0 0 0 0 0 0 0 0 0 MatMatMultNum 9 1.0 4.1912e-03 1.2 6.52e+04 1.0 7.4e+02 5.4e+03 1.8e+01 0 0 0 0 0 0 0 0 0 0 489 MatGetLocalMat 18 1.0 1.8110e-03 1.5 0.00e+00 0.0 0.0e+00 0.0e+00 1.8e+01 0 0 0 0 0 0 0 0 0 0 0 MatGetBrAoCol 18 1.0 6.1026e-03 1.6 0.00e+00 0.0 2.9e+03 3.0e+03 1.8e+01 0 0 0 0 0 0 0 0 0 0 0 TSStep 600 1.0 3.5095e+02 1.0 2.88e+09 1.0 8.3e+06 5.8e+02 6.1e+04100100100100 91 100100100100 91 262 TSFunctionEval 2382 1.0 2.3467e+0114.5 0.00e+00 0.0 0.0e+00 0.0e+00 2.4e+04 4 0 0 0 36 4 0 0 0 36 0 TSJacobianEval 1782 1.0 3.1640e+02 1.0 0.00e+00 0.0 2.0e+03 7.4e+01 1.4e+04 90 0 0 0 21 90 0 0 0 21 0 SNESSolve 600 1.0 3.5093e+02 1.0 2.88e+09 1.0 8.3e+06 5.8e+02 6.1e+04100100100100 91 100100100100 91 262 SNESFunctionEval 2382 1.0 2.3485e+0114.4 1.03e+06 1.0 0.0e+00 0.0e+00 2.4e+04 4 0 0 0 36 4 0 0 0 36 1 SNESJacobianEval 1782 1.0 3.1641e+02 1.0 0.00e+00 0.0 2.0e+03 7.4e+01 1.4e+04 90 0 0 0 21 90 0 0 0 21 0 SNESLineSearch 1782 1.0 1.9982e+01 1.0 5.95e+08 1.0 1.8e+06 5.8e+02 2.5e+04 6 21 21 21 37 6 21 21 21 37 952 KSPGMRESOrthog 6546 1.0 3.3187e+00 4.9 8.86e+06 1.0 0.0e+00 0.0e+00 6.5e+03 1 0 0 0 10 1 0 0 0 10 85 KSPSetUp 3564 1.0 3.2435e-02 1.1 0.00e+00 0.0 0.0e+00 0.0e+00 3.0e+01 0 0 0 0 0 0 0 0 0 0 0 KSPSolve 1782 1.0 8.5647e+00 1.0 2.28e+09 1.0 6.5e+06 5.8e+02 1.5e+04 2 79 78 78 22 2 79 78 78 22 8505 PCSetUp 3564 1.0 2.7744e-01 1.3 1.09e+0742.4 0.0e+00 0.0e+00 2.1e+01 0 0 0 0 0 0 0 0 0 0 643 PCSetUpOnBlocks 1782 1.0 2.6474e-01 1.3 1.09e+0742.4 0.0e+00 0.0e+00 1.5e+01 0 0 0 0 0 0 0 0 0 0 674 PCApply 8328 1.0 7.7867e-01 1.0 8.51e+07 1.0 0.0e+00 0.0e+00 0.0e+00 0 3 0 0 0 0 3 0 0 0 3499 ------------------------------------------------------------------------------------------------------------------------ Memory usage is given in bytes: Object Type Creations Destructions Memory Descendants' Mem. Reports information only for process 0. --- Event Stage 0: Main Stage Vector 8474 144 749552 0 Vector Scatter 7456 7451 4811836 0 Matrix 160 154 1658100 0 Index Set 7515 7509 5721944 0 IS L to G Mapping 1 0 0 0 Bipartite Graph 16 14 11424 0 Distributed Mesh 7 6 26304 0 TSAdapt 6 4 4784 0 TS 3 1 1224 0 DMTS 3 2 1424 0 SNES 3 1 1316 0 SNESLineSearch 3 1 864 0 DMSNES 4 3 2016 0 Krylov Solver 6 2 35896 0 DMKSP interface 1 0 0 0 Preconditioner 6 2 1864 0 Viewer 1 0 0 0 ======================================================================================================================== Average time to get PetscTime(): 1.90735e-07 Average time for MPI_Barrier(): 1.64032e-05 Average time for zero size MPI_Send(): 8.34465e-06 #PETSc Option Table entries: -i /pic/projects/ds/DS_PETSc/txt/d288gen.txt -log_summary #End of PETSc Option Table entries Compiled without FORTRAN kernels Compiled with full precision matrices (default) sizeof(short) 2 sizeof(int) 4 sizeof(long) 8 sizeof(void*) 8 sizeof(PetscScalar) 16 sizeof(PetscInt) 4 Configure run at: Mon Aug 5 13:50:31 2013 Configure options: --with-scalar-type=complex --with-clanguage=C++ PETSC_ARCH=arch-complex-opt --with-fortran-kernels=generic --download-superlu_dist --download-mumps --download-scalapack --download-parmetis --download-metis --download-elemental --with-debugging=0 ----------------------------------------- Libraries compiled on Mon Aug 5 13:50:31 2013 on olympus.local Machine characteristics: Linux-2.6.32-131.17.1.el6.x86_64-x86_64-with-redhat-5.7-Tikanga Using PETSc directory: /pic/projects/ds/petsc-dev.6.06.13 Using PETSc arch: arch-complex-opt ----------------------------------------- Using C compiler: mpicxx -wd1572 -O3 -fPIC ${COPTFLAGS} ${CFLAGS} Using Fortran compiler: mpif90 -fPIC -O3 ${FOPTFLAGS} ${FFLAGS} ----------------------------------------- Using include paths: -I/pic/projects/ds/petsc-dev.6.06.13/arch-complex-opt/include -I/pic/projects/ds/petsc-dev.6.06.13/include -I/pic/projects/ds/petsc-dev.6.06.13/include -I/pic/projects/ds/petsc-dev.6.06.13/arch-complex-opt/include -I/share/apps/openmpi/1.5.4/intel/11.1/include ----------------------------------------- Using C linker: mpicxx Using Fortran linker: mpif90 Using libraries: -Wl,-rpath,/pic/projects/ds/petsc-dev.6.06.13/arch-complex-opt/lib -L/pic/projects/ds/petsc-dev.6.06.13/arch-complex-opt/lib -lpetsc -Wl,-rpath,/pic/projects/ds/petsc-dev.6.06.13/arch-complex-opt/lib -L/pic/projects/ds/petsc-dev.6.06.13/arch-complex-opt/lib -lcmumps -ldmumps -lsmumps -lzmumps -lmumps_common -lpord -lscalapack -lsuperlu_dist_3.3 -lelemental -lpmrrr -llapack -lblas -lX11 -lparmetis -lmetis -lpthread -Wl,-rpath,/share/apps/openmpi/1.5.4/intel/11.1/lib -L/share/apps/openmpi/1.5.4/intel/11.1/lib -Wl,-rpath,/share/apps/intel/11.1/069/lib/intel64 -L/share/apps/intel/11.1/069/lib/intel64 -Wl,-rpath,/usr/lib/gcc/x86_64-redhat-linux/4.1.2 -L/usr/lib/gcc/x86_64-redhat-linux/4.1.2 -lmpi_f90 -lmpi_f77 -lifport -lifcore -lm -lm -lmpi_cxx -lstdc++ -lmpi_cxx -lstdc++ -ldl -lmpi -lnsl -lutil -limf -lsvml -lipgo -ldecimal -lgcc_s -lirc -lpthread -lirc_s -ldl -----------------------------------------