[Swift-commit] r6905 - in branches/release-0.94/tests/stress: . local_cluster

yadunandb at ci.uchicago.edu yadunandb at ci.uchicago.edu
Tue Aug 20 17:12:32 CDT 2013


Author: yadunandb
Date: 2013-08-20 17:12:32 -0500 (Tue, 20 Aug 2013)
New Revision: 6905

Added:
   branches/release-0.94/tests/stress/local_cluster/
   branches/release-0.94/tests/stress/local_cluster/combiner.sh
   branches/release-0.94/tests/stress/local_cluster/run
   branches/release-0.94/tests/stress/local_cluster/simple_MapRed.args
   branches/release-0.94/tests/stress/local_cluster/simple_MapRed.check.sh
   branches/release-0.94/tests/stress/local_cluster/simple_MapRed.setup.sh
   branches/release-0.94/tests/stress/local_cluster/simple_MapRed.stdout
   branches/release-0.94/tests/stress/local_cluster/simple_MapRed.swift
   branches/release-0.94/tests/stress/local_cluster/sites.template.xml
   branches/release-0.94/tests/stress/local_cluster/sites.xml
   branches/release-0.94/tests/stress/local_cluster/swift.properties
   branches/release-0.94/tests/stress/local_cluster/teragen_wrap.sh
   branches/release-0.94/tests/stress/local_cluster/title.txt
Log:

Committing tests for Local cluster style tests



Added: branches/release-0.94/tests/stress/local_cluster/combiner.sh
===================================================================
--- branches/release-0.94/tests/stress/local_cluster/combiner.sh	                        (rev 0)
+++ branches/release-0.94/tests/stress/local_cluster/combiner.sh	2013-08-20 22:12:32 UTC (rev 6905)
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+FILES=$*
+SUM=0
+COUNT=0
+
+for file in $*
+do
+    RES=($(awk '{ sum += $1 } END { print sum,NR }' $file))
+    echo "${RES[0]} ${RES[1]}"
+    SUM=$(($SUM+${RES[0]}))
+    COUNT=$(($COUNT+${RES[1]}))
+done
+echo "SUM  : $SUM"
+echo "COUNT: $COUNT"
+exit 0


Property changes on: branches/release-0.94/tests/stress/local_cluster/combiner.sh
___________________________________________________________________
Added: svn:executable
   + *

Added: branches/release-0.94/tests/stress/local_cluster/run
===================================================================
--- branches/release-0.94/tests/stress/local_cluster/run	                        (rev 0)
+++ branches/release-0.94/tests/stress/local_cluster/run	2013-08-20 22:12:32 UTC (rev 6905)
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+PATH=/scratch/midway/yadunand/swift-0.94RC2/cog/modules/swift/dist/swift-svn/bin:$PATH
+
+echo "Swift location: "; which swift
+echo "Swift version : "; swift -version
+
+export MIDWAY_USERNAME=yadunand
+export BEAGLE_USERNAME=yadunandb
+export MCS_USERNAME=yadunand
+export UC3_USERNAME=yadunand
+
+SCRIPT=simple_MapRed.swift
+BASE=${SCRIPT%.swift}
+
+rm $BASE.stdout
+cat title.txt
+
+cp sites.template.xml sites.xml
+./$BASE.setup.sh
+
+ARGS=$(cat $BASE.args)
+swift -tc.file tc.data -config swift.properties -sites.file sites.xml $BASE.swift ${ARGS[*]} | tee -a $BASE.stdout
+
+rm -rf *{swiftx,kml} $BASE-* _concurrent* failed* &> /dev/null
+
+./$BASE.check.sh
\ No newline at end of file


Property changes on: branches/release-0.94/tests/stress/local_cluster/run
___________________________________________________________________
Added: svn:executable
   + *

Added: branches/release-0.94/tests/stress/local_cluster/simple_MapRed.args
===================================================================
--- branches/release-0.94/tests/stress/local_cluster/simple_MapRed.args	                        (rev 0)
+++ branches/release-0.94/tests/stress/local_cluster/simple_MapRed.args	2013-08-20 22:12:32 UTC (rev 6905)
@@ -0,0 +1 @@
+-loops=10
\ No newline at end of file

Added: branches/release-0.94/tests/stress/local_cluster/simple_MapRed.check.sh
===================================================================
--- branches/release-0.94/tests/stress/local_cluster/simple_MapRed.check.sh	                        (rev 0)
+++ branches/release-0.94/tests/stress/local_cluster/simple_MapRed.check.sh	2013-08-20 22:12:32 UTC (rev 6905)
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+BASE=${0%.check.sh}
+ARGS=`cat $BASE.args | sed 's/-loops=//'`
+
+EXPECTED=$(($ARGS * 10000))
+
+if [ -f "final_result" ];then
+    RESULT=($(tail -n 1 final_result))
+    echo "RESULT line : ${RESULT[*]}"
+    echo "EXPECTED = $EXPECTED"
+    echo "ACTUAL   = ${RESULT[1]}"
+fi
+
+if [[ "${RESULT[1]}" == "$EXPECTED" ]]
+then
+    echo "Result matched"
+else
+    echo "Result does not match expectation" >&2
+    exit 1
+fi


Property changes on: branches/release-0.94/tests/stress/local_cluster/simple_MapRed.check.sh
___________________________________________________________________
Added: svn:executable
   + *

Added: branches/release-0.94/tests/stress/local_cluster/simple_MapRed.setup.sh
===================================================================
--- branches/release-0.94/tests/stress/local_cluster/simple_MapRed.setup.sh	                        (rev 0)
+++ branches/release-0.94/tests/stress/local_cluster/simple_MapRed.setup.sh	2013-08-20 22:12:32 UTC (rev 6905)
@@ -0,0 +1,86 @@
+#!/bin/bash
+
+HOST=$(hostname -f)
+
+if   [[ "$HOST" == *midway* ]]; then
+    echo "On Midway"
+    echo "midway bash /bin/bash null null null" > tc.data
+elif [[ "$HOST" == *beagle* ]]; then
+    echo "On Beagle"
+    echo "beagle bash /bin/bash null null null" > tc.data
+elif [[ "$HOST" == *mcs* ]]; then
+    echo "On MCS"
+    echo "mcs bash /bin/bash null null null" > tc.data
+elif [[ "$HOST" == *uc3* ]]; then
+    echo "On UC3"
+    echo "uc3 bash /bin/bash null null null" > tc.data
+else
+    echo "On unidentified machine, using defaults"
+    echo "local bash /bin/bash null null null" > tc.data
+fi
+
+if [[ -z $MIDWAY_USERNAME ]]
+then
+    echo "Remote username not provided. Skipping sites configs"
+else
+    cat sites.xml  | sed "s/{mid.USER}/$MIDWAY_USERNAME/" > tmp && mv tmp\
+ sites.xml
+fi
+if [[ -z $UC3_USERNAME ]]
+then
+    echo "Remote username not provided. Skipping sites configs"
+else
+    cat sites.xml  | sed "s/{uc3.USER}/$UC3_USERNAME/" > tmp && mv tmp si\
+tes.xml
+fi
+if [[ -z $BEAGLE_USERNAME ]]
+then
+    echo "Remote username not provided. Skipping sites configs"
+else
+    cat sites.xml  | sed "s/{beagle.USER}/$BEAGLE_USERNAME/" > tmp && mv \
+tmp sites.xml
+fi
+if [[ -z $MCS_USERNAME ]]
+then
+    echo "Remote username not provided. Skipping sites configs"
+else
+    cat sites.xml  | sed "s/{mcs.USER}/$MCS_USERNAME/" > tmp && mv \
+tmp sites.xml
+fi
+
+cat<<'EOF' > teragen_wrap.sh
+#!/bin/bash
+
+# By default with ARG1:100 and SLICESIZE=10000, this script will generate
+# 10^6 records.
+ARG1=1
+[ ! -z $1 ] && ARG1=$1
+
+FILE="input_$RANDOM.txt"
+LOWERLIMIT=0
+UPPERLIMIT=1000000 # 10^9
+SLICESIZE=10000     # 10^4 records padded to 100B would result in 1MB file
+#SLICESIZE=1000     # 10^3  If padded to 100B would result
+
+shuf -i $LOWERLIMIT-$UPPERLIMIT -n $(($SLICESIZE*$ARG1)) | awk '{printf "%-99s\n", $0}'
+exit 0
+EOF
+
+cat <<'EOF' > combiner.sh
+#!/bin/bash
+
+FILES=$*
+SUM=0
+COUNT=0
+
+for file in $*
+do
+    RES=($(awk '{ sum += $1 } END { print sum,NR }' $file))
+    echo "${RES[0]} ${RES[1]}"
+    SUM=$(($SUM+${RES[0]}))
+    COUNT=$(($COUNT+${RES[1]}))
+done
+echo "SUM  : $SUM"
+echo "COUNT: $COUNT"
+exit 0
+EOF


Property changes on: branches/release-0.94/tests/stress/local_cluster/simple_MapRed.setup.sh
___________________________________________________________________
Added: svn:executable
   + *

Added: branches/release-0.94/tests/stress/local_cluster/simple_MapRed.stdout
===================================================================
--- branches/release-0.94/tests/stress/local_cluster/simple_MapRed.stdout	                        (rev 0)
+++ branches/release-0.94/tests/stress/local_cluster/simple_MapRed.stdout	2013-08-20 22:12:32 UTC (rev 6905)
@@ -0,0 +1,3 @@
+Swift 0.94.1 RC2 swift-r6895 cog-r3765
+
+RunID: 20130820-2209-384c1ky1

Added: branches/release-0.94/tests/stress/local_cluster/simple_MapRed.swift
===================================================================
--- branches/release-0.94/tests/stress/local_cluster/simple_MapRed.swift	                        (rev 0)
+++ branches/release-0.94/tests/stress/local_cluster/simple_MapRed.swift	2013-08-20 22:12:32 UTC (rev 6905)
@@ -0,0 +1,30 @@
+type file;
+type script;
+
+app (file out, file err) gen_data (script run, int recsize)
+{
+    bash @run recsize stdout=@out stderr=@err;
+}
+
+app (file out, file err) comb_data (script comb, file array[])
+{
+    bash @comb @array stdout=@out stderr=@err;
+}
+
+
+file tgen_out[] <simple_mapper; prefix="tgen", suffix=".out">;
+file tgen_err[] <simple_mapper; prefix="tgen", suffix=".err">;
+
+script wrapper <"teragen_wrap.sh">;
+int loop = @toInt(@arg("loops","10"));
+int fsize = @toInt(@arg("recsize","1")); # This would make 10M records per file
+string dir = @arg("dir", "./");
+
+foreach item,i in [0:loop-1] {
+	(tgen_out[i], tgen_err[i]) = gen_data(wrapper, fsize);
+}
+
+script combine <"combiner.sh">;
+file final <"final_result">;
+file errs <"err_file">;
+(final, errs) = comb_data(combine, tgen_out);

Added: branches/release-0.94/tests/stress/local_cluster/sites.template.xml
===================================================================
--- branches/release-0.94/tests/stress/local_cluster/sites.template.xml	                        (rev 0)
+++ branches/release-0.94/tests/stress/local_cluster/sites.template.xml	2013-08-20 22:12:32 UTC (rev 6905)
@@ -0,0 +1,65 @@
+<config>
+
+  <pool handle="uc3">
+    <execution provider="coaster" url="uc3-sub.uchicago.edu" jobmanager="ssh-cl:condor"/>
+    <profile namespace="karajan" key="jobThrottle">10.00</profile>
+    <profile namespace="karajan" key="initialScore">10000</profile>
+    <profile namespace="globus"  key="jobsPerNode">1</profile>
+    <profile namespace="globus"  key="maxtime">3600</profile>
+    <profile namespace="globus"  key="maxWalltime">00:15:00</profile>
+    <profile namespace="globus"  key="highOverAllocation">100</profile>
+    <profile namespace="globus"  key="lowOverAllocation">100</profile>
+    <profile namespace="globus"  key="slots">1000</profile>
+    <profile namespace="globus"  key="maxNodes">1</profile>
+    <profile namespace="globus"  key="nodeGranularity">1</profile>
+    <profile namespace="globus"  key="condor.+AccountingGroup">"group_friends.{uc3.USER}"</profile>
+    <profile namespace="globus"  key="jobType">nonshared</profile>
+    <workdirectory>.</workdirectory>
+  </pool>
+
+  <pool handle="beagle">
+    <execution provider="coaster" jobmanager="ssh-cl:pbs" url="login4.beagle.ci.uchicago.edu"/>
+    <profile namespace="globus" key="jobsPerNode">24</profile>
+    <profile namespace="globus" key="lowOverAllocation">100</profile>
+    <profile namespace="globus" key="highOverAllocation">100</profile>
+    <profile namespace="globus" key="providerAttributes">pbs.aprun;pbs.mpp;depth=24;pbs.resource_list=advres=wilde.1768</profile>
+    <profile namespace="globus" key="maxtime">3600</profile>
+    <profile namespace="globus" key="maxWalltime">00:15:00</profile>
+    <profile namespace="globus" key="userHomeOverride">/lustre/beagle/{beagle.USER}/swiftwork</profile>
+    <profile namespace="globus" key="slots">5</profile>
+    <profile namespace="globus" key="maxnodes">1</profile>
+    <profile namespace="globus" key="nodeGranularity">1</profile>
+    <profile namespace="karajan" key="jobThrottle">4.80</profile>
+    <profile namespace="karajan" key="initialScore">10000</profile>
+    <workdirectory>/tmp/{beagle.USER}/swiftwork</workdirectory>
+  </pool>
+
+  <pool handle="midway">
+    <execution provider="coaster" jobmanager="local:slurm"/>
+    <profile namespace="globus" key="queue">sandyb</profile>
+    <profile namespace="globus" key="jobsPerNode">16</profile>
+    <profile namespace="globus" key="maxWalltime">00:15:00</profile>
+    <profile namespace="globus" key="maxTime">3600</profile>
+    <profile namespace="globus" key="highOverAllocation">100</profile>
+    <profile namespace="globus" key="lowOverAllocation">100</profile>
+    <profile namespace="globus" key="slots">4</profile>
+    <profile namespace="globus" key="maxNodes">1</profile>
+    <profile namespace="globus" key="nodeGranularity">1</profile>
+    <profile namespace="karajan" key="jobThrottle">.64</profile>
+    <profile namespace="karajan" key="initialScore">10000</profile>
+    <workdirectory>/tmp/{mid.USER}</workdirectory>
+  </pool>
+
+  <pool handle="mcs">
+    <execution provider="coaster" jobmanager="ssh-cl:local" url="thwomp.mcs.anl.gov"/>
+    <profile namespace="globus" key="jobsPerNode">8</profile>
+    <profile namespace="globus" key="lowOverAllocation">100</profile>
+    <profile namespace="globus" key="highOverAllocation">100</profile>
+    <profile namespace="globus" key="maxtime">3600</profile>
+    <profile namespace="globus" key="maxWalltime">00:15:00</profile>
+    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
+    <profile namespace="karajan" key="initialScore">10000</profile>
+    <workdirectory>/sandbox/{mcs.USER}/swiftwork</workdirectory>
+  </pool>
+
+</config>

Added: branches/release-0.94/tests/stress/local_cluster/sites.xml
===================================================================
--- branches/release-0.94/tests/stress/local_cluster/sites.xml	                        (rev 0)
+++ branches/release-0.94/tests/stress/local_cluster/sites.xml	2013-08-20 22:12:32 UTC (rev 6905)
@@ -0,0 +1,65 @@
+<config>
+
+  <pool handle="uc3">
+    <execution provider="coaster" url="uc3-sub.uchicago.edu" jobmanager="ssh-cl:condor"/>
+    <profile namespace="karajan" key="jobThrottle">10.00</profile>
+    <profile namespace="karajan" key="initialScore">10000</profile>
+    <profile namespace="globus"  key="jobsPerNode">1</profile>
+    <profile namespace="globus"  key="maxtime">3600</profile>
+    <profile namespace="globus"  key="maxWalltime">00:15:00</profile>
+    <profile namespace="globus"  key="highOverAllocation">100</profile>
+    <profile namespace="globus"  key="lowOverAllocation">100</profile>
+    <profile namespace="globus"  key="slots">1000</profile>
+    <profile namespace="globus"  key="maxNodes">1</profile>
+    <profile namespace="globus"  key="nodeGranularity">1</profile>
+    <profile namespace="globus"  key="condor.+AccountingGroup">"group_friends.yadunand"</profile>
+    <profile namespace="globus"  key="jobType">nonshared</profile>
+    <workdirectory>.</workdirectory>
+  </pool>
+
+  <pool handle="beagle">
+    <execution provider="coaster" jobmanager="ssh-cl:pbs" url="login4.beagle.ci.uchicago.edu"/>
+    <profile namespace="globus" key="jobsPerNode">24</profile>
+    <profile namespace="globus" key="lowOverAllocation">100</profile>
+    <profile namespace="globus" key="highOverAllocation">100</profile>
+    <profile namespace="globus" key="providerAttributes">pbs.aprun;pbs.mpp;depth=24;pbs.resource_list=advres=wilde.1768</profile>
+    <profile namespace="globus" key="maxtime">3600</profile>
+    <profile namespace="globus" key="maxWalltime">00:15:00</profile>
+    <profile namespace="globus" key="userHomeOverride">/lustre/beagle/yadunandb/swiftwork</profile>
+    <profile namespace="globus" key="slots">5</profile>
+    <profile namespace="globus" key="maxnodes">1</profile>
+    <profile namespace="globus" key="nodeGranularity">1</profile>
+    <profile namespace="karajan" key="jobThrottle">4.80</profile>
+    <profile namespace="karajan" key="initialScore">10000</profile>
+    <workdirectory>/tmp/yadunandb/swiftwork</workdirectory>
+  </pool>
+
+  <pool handle="midway">
+    <execution provider="coaster" jobmanager="local:slurm"/>
+    <profile namespace="globus" key="queue">sandyb</profile>
+    <profile namespace="globus" key="jobsPerNode">16</profile>
+    <profile namespace="globus" key="maxWalltime">00:15:00</profile>
+    <profile namespace="globus" key="maxTime">3600</profile>
+    <profile namespace="globus" key="highOverAllocation">100</profile>
+    <profile namespace="globus" key="lowOverAllocation">100</profile>
+    <profile namespace="globus" key="slots">4</profile>
+    <profile namespace="globus" key="maxNodes">1</profile>
+    <profile namespace="globus" key="nodeGranularity">1</profile>
+    <profile namespace="karajan" key="jobThrottle">.64</profile>
+    <profile namespace="karajan" key="initialScore">10000</profile>
+    <workdirectory>/tmp/yadunand</workdirectory>
+  </pool>
+
+  <pool handle="mcs">
+    <execution provider="coaster" jobmanager="ssh-cl:local" url="thwomp.mcs.anl.gov"/>
+    <profile namespace="globus" key="jobsPerNode">8</profile>
+    <profile namespace="globus" key="lowOverAllocation">100</profile>
+    <profile namespace="globus" key="highOverAllocation">100</profile>
+    <profile namespace="globus" key="maxtime">3600</profile>
+    <profile namespace="globus" key="maxWalltime">00:15:00</profile>
+    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
+    <profile namespace="karajan" key="initialScore">10000</profile>
+    <workdirectory>/sandbox/yadunand/swiftwork</workdirectory>
+  </pool>
+
+</config>

Added: branches/release-0.94/tests/stress/local_cluster/swift.properties
===================================================================
--- branches/release-0.94/tests/stress/local_cluster/swift.properties	                        (rev 0)
+++ branches/release-0.94/tests/stress/local_cluster/swift.properties	2013-08-20 22:12:32 UTC (rev 6905)
@@ -0,0 +1,8 @@
+use.provider.staging=true
+use.wrapper.staging=false
+wrapperlog.always.transfer=true
+execution.retries=0
+lazy.errors=false
+provider.staging.pin.swiftfiles=false
+sitedir.keep=true
+tcp.port.range=50000,51000
\ No newline at end of file

Added: branches/release-0.94/tests/stress/local_cluster/teragen_wrap.sh
===================================================================
--- branches/release-0.94/tests/stress/local_cluster/teragen_wrap.sh	                        (rev 0)
+++ branches/release-0.94/tests/stress/local_cluster/teragen_wrap.sh	2013-08-20 22:12:32 UTC (rev 6905)
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+# By default with ARG1:100 and SLICESIZE=10000, this script will generate
+# 10^6 records.
+ARG1=1
+[ ! -z $1 ] && ARG1=$1
+
+FILE="input_$RANDOM.txt"
+LOWERLIMIT=0
+UPPERLIMIT=1000000 # 10^9
+SLICESIZE=10000     # 10^4 records padded to 100B would result in 1MB file
+#SLICESIZE=1000     # 10^3  If padded to 100B would result
+
+shuf -i $LOWERLIMIT-$UPPERLIMIT -n $(($SLICESIZE*$ARG1)) | awk '{printf "%-99s\n", $0}'
+exit 0


Property changes on: branches/release-0.94/tests/stress/local_cluster/teragen_wrap.sh
___________________________________________________________________
Added: svn:executable
   + *

Added: branches/release-0.94/tests/stress/local_cluster/title.txt
===================================================================
--- branches/release-0.94/tests/stress/local_cluster/title.txt	                        (rev 0)
+++ branches/release-0.94/tests/stress/local_cluster/title.txt	2013-08-20 22:12:32 UTC (rev 6905)
@@ -0,0 +1,4 @@
+Simple MapReduce style job for Local cluster testing
+| The first map stage generates a large number of random numbers
+| The reduce stage aggregates the results and outputs the count
+| and sum




More information about the Swift-commit mailing list