[Swift-commit] r8006 - in trunk/tests: . bugs cdm/ps cdm/ps/pinned documentation/tutorial functions language/should-not-work language/working language-behaviour/IO language-behaviour/arrays language-behaviour/broken language-behaviour/cleanup language-behaviour/compounds language-behaviour/control_structures language-behaviour/datatypes language-behaviour/logic language-behaviour/mappers language-behaviour/math language-behaviour/params language-behaviour/procedures language-behaviour/procedures/dynamic-output/provider-staging-coasters language-behaviour/procedures/dynamic-output/provider-staging-local language-behaviour/procedures/dynamic-output/swift-staging language-behaviour/strings language-behaviour/variables local mpi/beagle/long mpi/beagle/short mpi/crow/long mpi/crow/short mpi/fusion/long mpi/fusion/short mpi/local multi_remote sites/beagle sites/blues sites/bridled sites/ci sites/communicado sites/fusion sites/local sites/local-coasters sites/mac-frisbee sites/mcs sites/m idway sites/multiple_coaster_pools sites/osgconnect sites/raven sites/stampede sites/todo/crow sites/todo/fusion sites/todo/ibicluster sites/todo/intrepid sites/todo/surveyor sites/uc3 stress/IO/bagOnodes stress/IO/beagle stress/IO/multiple stress/IO/uc3 stress/apps/modis_beagle stress/apps/modis_local stress/apps/modis_midway stress/apps/modis_multiple stress/apps/modis_uc3 stress/long_runs stress/remote_sanity/beagle stress/remote_sanity/mac-frisbee stress/remote_sanity/mcs stress/remote_sanity/midway stress/remote_sanity/uc3

hategan at ci.uchicago.edu hategan at ci.uchicago.edu
Sat Jul 12 03:47:54 CDT 2014


Author: hategan
Date: 2014-07-12 03:50:14 -0500 (Sat, 12 Jul 2014)
New Revision: 8006

Added:
   trunk/tests/bugs/swift.conf
   trunk/tests/cdm/ps/pinned/swift.conf
   trunk/tests/cdm/ps/swift.conf
   trunk/tests/documentation/tutorial/swift.conf
   trunk/tests/functions/swift.conf
   trunk/tests/language-behaviour/IO/swift.conf
   trunk/tests/language-behaviour/arrays/swift.conf
   trunk/tests/language-behaviour/broken/swift.conf
   trunk/tests/language-behaviour/cleanup/swift.conf
   trunk/tests/language-behaviour/compounds/swift.conf
   trunk/tests/language-behaviour/control_structures/swift.conf
   trunk/tests/language-behaviour/datatypes/swift.conf
   trunk/tests/language-behaviour/logic/swift.conf
   trunk/tests/language-behaviour/mappers/swift.conf
   trunk/tests/language-behaviour/math/swift.conf
   trunk/tests/language-behaviour/params/swift.conf
   trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-coasters/swift.conf
   trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-local/swift.conf
   trunk/tests/language-behaviour/procedures/dynamic-output/swift-staging/swift.conf
   trunk/tests/language-behaviour/procedures/swift.conf
   trunk/tests/language-behaviour/strings/swift.conf
   trunk/tests/language-behaviour/variables/swift.conf
   trunk/tests/language/should-not-work/swift.conf
   trunk/tests/language/working/swift.conf
   trunk/tests/local/swift.conf
   trunk/tests/mpi/beagle/long/swift.conf
   trunk/tests/mpi/beagle/short/swift.conf
   trunk/tests/mpi/crow/long/swift.conf
   trunk/tests/mpi/crow/short/swift.conf
   trunk/tests/mpi/fusion/long/swift.conf
   trunk/tests/mpi/fusion/short/swift.conf
   trunk/tests/mpi/local/swift.conf
   trunk/tests/multi_remote/swift.conf
   trunk/tests/sites/beagle/swift.conf
   trunk/tests/sites/blues/swift.conf
   trunk/tests/sites/bridled/swift.conf
   trunk/tests/sites/ci/swift.conf
   trunk/tests/sites/communicado/swift.conf
   trunk/tests/sites/fusion/swift.conf
   trunk/tests/sites/local-coasters/swift.conf
   trunk/tests/sites/local/swift.conf
   trunk/tests/sites/mac-frisbee/swift.conf
   trunk/tests/sites/mcs/swift.conf
   trunk/tests/sites/midway/swift.conf
   trunk/tests/sites/multiple_coaster_pools/swift.conf
   trunk/tests/sites/osgconnect/swift.conf
   trunk/tests/sites/raven/swift.conf
   trunk/tests/sites/stampede/swift.conf
   trunk/tests/sites/todo/crow/swift.conf
   trunk/tests/sites/todo/fusion/swift.conf
   trunk/tests/sites/todo/ibicluster/swift.conf
   trunk/tests/sites/todo/intrepid/swift.conf
   trunk/tests/sites/todo/surveyor/swift.conf
   trunk/tests/sites/uc3/swift.conf
   trunk/tests/stress/IO/bagOnodes/swift.conf
   trunk/tests/stress/IO/beagle/swift.conf
   trunk/tests/stress/IO/multiple/swift.conf
   trunk/tests/stress/IO/uc3/swift.conf
   trunk/tests/stress/apps/modis_beagle/swift.conf
   trunk/tests/stress/apps/modis_local/swift.conf
   trunk/tests/stress/apps/modis_midway/swift.conf
   trunk/tests/stress/apps/modis_multiple/swift.conf
   trunk/tests/stress/apps/modis_uc3/swift.conf
   trunk/tests/stress/long_runs/swift.conf
   trunk/tests/stress/remote_sanity/beagle/swift.conf
   trunk/tests/stress/remote_sanity/mac-frisbee/swift.conf
   trunk/tests/stress/remote_sanity/mcs/swift.conf
   trunk/tests/stress/remote_sanity/midway/swift.conf
   trunk/tests/stress/remote_sanity/uc3/swift.conf
Removed:
   trunk/tests/bugs/swift.properties
   trunk/tests/cdm/ps/pinned/sites.template.xml
   trunk/tests/cdm/ps/pinned/swift.properties
   trunk/tests/cdm/ps/pinned/tc.template.data
   trunk/tests/cdm/ps/sites.template.xml
   trunk/tests/cdm/ps/swift.properties
   trunk/tests/cdm/ps/tc.template.data
   trunk/tests/documentation/tutorial/swift.properties
   trunk/tests/functions/swift.properties
   trunk/tests/language-behaviour/IO/swift.properties
   trunk/tests/language-behaviour/arrays/swift.properties
   trunk/tests/language-behaviour/broken/swift.properties
   trunk/tests/language-behaviour/cleanup/swift.properties
   trunk/tests/language-behaviour/compounds/swift.properties
   trunk/tests/language-behaviour/control_structures/swift.properties
   trunk/tests/language-behaviour/datatypes/swift.properties
   trunk/tests/language-behaviour/logic/swift.properties
   trunk/tests/language-behaviour/mappers/swift.properties
   trunk/tests/language-behaviour/math/swift.properties
   trunk/tests/language-behaviour/params/swift.properties
   trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-coasters/sites.template.xml
   trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-coasters/swift.properties
   trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-coasters/tc.template.data
   trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-local/sites.template.xml
   trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-local/swift.properties
   trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-local/tc.template.data
   trunk/tests/language-behaviour/procedures/dynamic-output/swift-staging/sites.template.xml
   trunk/tests/language-behaviour/procedures/dynamic-output/swift-staging/swift.properties
   trunk/tests/language-behaviour/procedures/dynamic-output/swift-staging/tc.template.data
   trunk/tests/language-behaviour/procedures/swift.properties
   trunk/tests/language-behaviour/strings/swift.properties
   trunk/tests/language-behaviour/variables/swift.properties
   trunk/tests/language/should-not-work/swift.properties
   trunk/tests/language/working/swift.properties
   trunk/tests/local/swift.properties
   trunk/tests/mpi/beagle/long/sites.template.xml
   trunk/tests/mpi/beagle/long/tc.template.data
   trunk/tests/mpi/beagle/short/sites.template.xml
   trunk/tests/mpi/beagle/short/tc.template.data
   trunk/tests/mpi/crow/long/sites.template.xml
   trunk/tests/mpi/crow/long/tc.template.data
   trunk/tests/mpi/crow/short/sites.template.xml
   trunk/tests/mpi/crow/short/tc.template.data
   trunk/tests/mpi/fusion/long/sites.template.xml
   trunk/tests/mpi/fusion/long/tc.template.data
   trunk/tests/mpi/fusion/short/sites.template.xml
   trunk/tests/mpi/fusion/short/tc.template.data
   trunk/tests/mpi/local/sites.template.xml
   trunk/tests/mpi/local/tc.template.data
   trunk/tests/multi_remote/sites.template.xml
   trunk/tests/multi_remote/swift.properties
   trunk/tests/multi_remote/tc.template.data
   trunk/tests/sites/beagle/sites.template.xml
   trunk/tests/sites/beagle/swift.properties
   trunk/tests/sites/beagle/tc.template.data
   trunk/tests/sites/blues/sites.template.xml
   trunk/tests/sites/blues/swift.properties
   trunk/tests/sites/blues/tc.template.data
   trunk/tests/sites/bridled/sites.template.xml
   trunk/tests/sites/bridled/swift.properties
   trunk/tests/sites/bridled/tc.template.data
   trunk/tests/sites/ci/sites.template.xml
   trunk/tests/sites/ci/swift.properties
   trunk/tests/sites/ci/tc.template.data
   trunk/tests/sites/communicado/sites.template.xml
   trunk/tests/sites/communicado/swift.properties
   trunk/tests/sites/communicado/tc.template.data
   trunk/tests/sites/fusion/sites.template.xml
   trunk/tests/sites/fusion/swift.properties
   trunk/tests/sites/fusion/tc.template.data
   trunk/tests/sites/local-coasters/sites.template.xml
   trunk/tests/sites/local-coasters/tc.template.data
   trunk/tests/sites/local/sites.template.xml
   trunk/tests/sites/local/tc.template.data
   trunk/tests/sites/mac-frisbee/sites.template.xml
   trunk/tests/sites/mac-frisbee/swift.properties
   trunk/tests/sites/mac-frisbee/tc.template.data
   trunk/tests/sites/mcs/sites.template.xml
   trunk/tests/sites/mcs/swift.properties
   trunk/tests/sites/mcs/tc.template.data
   trunk/tests/sites/midway/sites.template.xml
   trunk/tests/sites/midway/swift.properties
   trunk/tests/sites/midway/tc.template.data
   trunk/tests/sites/multiple_coaster_pools/sites.template.xml
   trunk/tests/sites/multiple_coaster_pools/tc.template.data
   trunk/tests/sites/osgconnect/sites.template.xml
   trunk/tests/sites/osgconnect/swift.properties
   trunk/tests/sites/osgconnect/tc.template.data
   trunk/tests/sites/raven/sites.template.xml
   trunk/tests/sites/raven/swift.properties
   trunk/tests/sites/raven/tc.template.data
   trunk/tests/sites/stampede/sites.template.xml
   trunk/tests/sites/stampede/swift.properties
   trunk/tests/sites/stampede/tc.template.data
   trunk/tests/sites/todo/crow/sites.template.xml
   trunk/tests/sites/todo/crow/tc.template.data
   trunk/tests/sites/todo/fusion/sites.template.xml
   trunk/tests/sites/todo/fusion/tc.template.data
   trunk/tests/sites/todo/ibicluster/sites.template.xml
   trunk/tests/sites/todo/ibicluster/tc.template.data
   trunk/tests/sites/todo/intrepid/sites.template.xml
   trunk/tests/sites/todo/intrepid/tc.template.data
   trunk/tests/sites/todo/surveyor/sites.template.xml
   trunk/tests/sites/todo/surveyor/tc.template.data
   trunk/tests/sites/uc3/sites.template.xml
   trunk/tests/sites/uc3/swift.properties
   trunk/tests/sites/uc3/tc.template.data
   trunk/tests/stress/IO/bagOnodes/sites.template.xml
   trunk/tests/stress/IO/bagOnodes/swift.properties
   trunk/tests/stress/IO/bagOnodes/tc.template.data
   trunk/tests/stress/IO/beagle/sites.template.xml
   trunk/tests/stress/IO/beagle/swift.properties
   trunk/tests/stress/IO/beagle/tc.template.data
   trunk/tests/stress/IO/multiple/sites.template.xml
   trunk/tests/stress/IO/multiple/swift.properties
   trunk/tests/stress/IO/multiple/tc.template.data
   trunk/tests/stress/IO/uc3/sites.template.xml
   trunk/tests/stress/IO/uc3/swift.properties
   trunk/tests/stress/IO/uc3/tc.template.data
   trunk/tests/stress/apps/modis_beagle/sites.template.xml
   trunk/tests/stress/apps/modis_beagle/swift.properties
   trunk/tests/stress/apps/modis_beagle/tc.template.data
   trunk/tests/stress/apps/modis_local/sites.template.xml
   trunk/tests/stress/apps/modis_local/swift.properties
   trunk/tests/stress/apps/modis_local/tc.template.data
   trunk/tests/stress/apps/modis_midway/sites.template.xml
   trunk/tests/stress/apps/modis_midway/swift.properties
   trunk/tests/stress/apps/modis_midway/tc.template.data
   trunk/tests/stress/apps/modis_multiple/sites.template.xml
   trunk/tests/stress/apps/modis_multiple/swift.properties
   trunk/tests/stress/apps/modis_multiple/tc.template.data
   trunk/tests/stress/apps/modis_uc3/sites.template.xml
   trunk/tests/stress/apps/modis_uc3/swift.properties
   trunk/tests/stress/apps/modis_uc3/tc.template.data
   trunk/tests/stress/long_runs/sites.template.xml
   trunk/tests/stress/long_runs/swift.properties
   trunk/tests/stress/long_runs/tc.template.data
   trunk/tests/stress/remote_sanity/beagle/sites.template.xml
   trunk/tests/stress/remote_sanity/beagle/swift.properties
   trunk/tests/stress/remote_sanity/beagle/tc.template.data
   trunk/tests/stress/remote_sanity/mac-frisbee/sites.template.xml
   trunk/tests/stress/remote_sanity/mac-frisbee/swift.properties
   trunk/tests/stress/remote_sanity/mac-frisbee/tc.template.data
   trunk/tests/stress/remote_sanity/mcs/sites.template.xml
   trunk/tests/stress/remote_sanity/mcs/swift.properties
   trunk/tests/stress/remote_sanity/mcs/tc.template.data
   trunk/tests/stress/remote_sanity/midway/sites.template.xml
   trunk/tests/stress/remote_sanity/midway/swift.properties
   trunk/tests/stress/remote_sanity/midway/tc.template.data
   trunk/tests/stress/remote_sanity/uc3/sites.template.xml
   trunk/tests/stress/remote_sanity/uc3/swift.properties
   trunk/tests/stress/remote_sanity/uc3/tc.template.data
Modified:
   trunk/tests/suite.sh
Log:
converted test sites/tc/prop files to .conf files

Added: trunk/tests/bugs/swift.conf
===================================================================
--- trunk/tests/bugs/swift.conf	                        (rev 0)
+++ trunk/tests/bugs/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,4 @@
+include "${swift.home}/etc/swift.conf"
+
+site: local
+keepSiteDir: true

Deleted: trunk/tests/bugs/swift.properties
===================================================================
--- trunk/tests/bugs/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/bugs/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,2 +0,0 @@
-site=local
-sitedir.keep=true

Deleted: trunk/tests/cdm/ps/pinned/sites.template.xml
===================================================================
--- trunk/tests/cdm/ps/pinned/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/cdm/ps/pinned/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,24 +0,0 @@
-<config>
-
-  <pool handle="localhost" sysinfo="INTEL32::LINUX">
-    <gridftp url="local://localhost" />
-    <execution provider="local" url="none" />
-    <workdirectory>_WORK_</workdirectory>
-    <profile namespace="swift" key="stagingMethod">file</profile>
-  </pool>
-
-  <pool handle="coasterslocal">
-    <filesystem provider="local" />
-    <execution provider="coaster" jobmanager="local:local"/>
-    <profile namespace="globus"   key="internalHostname">_HOST_</profile>
-    <profile namespace="karajan"  key="jobthrottle">2.55</profile>
-    <profile namespace="karajan"  key="initialScore">10000</profile>
-    <profile namespace="globus"   key="jobsPerNode">4</profile>
-    <profile namespace="globus"   key="slots">8</profile>
-    <profile namespace="globus"   key="maxTime">1000</profile>
-    <profile namespace="globus"   key="nodeGranularity">1</profile>
-    <profile namespace="globus"   key="maxNodes">4</profile>
-    <workdirectory>_WORK_</workdirectory>
-  </pool>
-
-</config>

Added: trunk/tests/cdm/ps/pinned/swift.conf
===================================================================
--- trunk/tests/cdm/ps/pinned/swift.conf	                        (rev 0)
+++ trunk/tests/cdm/ps/pinned/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,87 @@
+include "${swift.home}/etc/swift.conf"
+
+site.localhost {
+	execution {
+		type: "local"
+		URL: "none"
+	}
+	filesystem {
+		type: "local"
+		URL: "localhost"
+	}
+	workDirectory: "_WORK_"
+	OS: "INTEL32::LINUX"
+	app.cat {
+		executable: "/bin/cat"
+	}
+
+	app.echo {
+		executable: "/bin/echo"
+	}
+
+	app.grep {
+		executable: "/bin/grep"
+	}
+
+	app.ls {
+		executable: "/bin/ls"
+	}
+
+	app.paste {
+		executable: "/bin/paste"
+	}
+
+	app.sort {
+		executable: "/bin/sort"
+	}
+
+}
+
+site.coasterslocal {
+	execution {
+		type: "coaster"
+		URL: "localhost"
+		jobManager: "local:local"
+		options {
+			internalHostname: "_HOST_"
+			nodeGranularity: 1
+			maxNodesPerJob: 4
+			maxJobs: 8
+			tasksPerNode: 4
+			jobMaxTime: "00:16:40"
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "localhost"
+	}
+	workDirectory: "_WORK_"
+	maxParallelTasks: 255
+	initialParallelTasks: 255
+	app.cp {
+		executable: "/bin/cp"
+	}
+
+	app.merge {
+		executable: "_DIR_/merge.sh"
+	}
+
+}
+
+lazyErrors: false
+executionRetries: 2
+cachingAlgorithm: "LRU"
+jobSubmitThrottle: 4
+hostJobSubmitThrottle: 2
+fileTransfersThrottle: 4
+fileOperationsThrottle: 8
+siteScoreThrottlingFactor: 0.2
+keepSiteDir: false
+logProvenance: false
+replicationEnabled: false
+replicationMinQueueTime: 60
+replicationLimit: 3
+providerStagingPinSwiftFiles: true
+alwaysTransferWrapperLog: false
+maxForeachThreads: 16384
+staging: "local"

Deleted: trunk/tests/cdm/ps/pinned/swift.properties
===================================================================
--- trunk/tests/cdm/ps/pinned/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/cdm/ps/pinned/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,353 +0,0 @@
-sites.file=${swift.home}/etc/sites.xml
-tc.file=${swift.home}/etc/tc.data
-
-#
-# The host name of the submit machine is used by GRAM as a callback
-# address to report the status of submitted jobs. In general, Swift
-# can automatically detect the host name of the local machine.
-# However, if the machine host name is improperly configured or if
-# it does not represent a valid DNS entry, certain services (such as
-# GRAM) will not be able to send job status notifications back to
-# the client. The value of this property can be an IP address.
-#
-# Format:
-#    hostname=string
-#
-
-
-#hostname=localhost
-
-#
-# A TCP port range can be specified to restrict the ports on which GRAM
-# callback services are started. This is likely needed if your submit
-# host is behind a firewall, in which case the firewall should be
-# configured to allow incoming connections on ports in the range.
-#
-# Format:
-#     tcp.port.range=start,end
-#
-
-#tcp.port.range=50000,50100
-
-#
-# false	- means an error will be immediately reported and cause the
-# 		workflow to abort. At this time remote jobs that are already
-#		running will not be canceled
-# true	- means that Swift will try to do as much work as possible and
-#		report all errors encountered at the end. However, "errors"
-#		here only applies to job execution errors. Certain errors
-#		that are related to the Swift implementation (should such
-#		errors occur) will still be reported eagerly.
-#
-# Default: false
-#
-lazy.errors=false
-
-#
-# What algorithm to use for caching of remote files. LRU (as in what
-# files to purge) is the only implementation right now. One can set
-# a target size (in bytes) for a host by using the swift:storagesize
-# profile for a host in sites.xml
-#
-# Default: LRU
-#
-caching.algorithm=LRU
-
-#
-# true       - generate a provenance graph in .dot format (Swift will
-#			 choose a random file name)
-# false      - do not generate a provenance graph
-# <filename> - generate a provenange graph in the give file name
-#
-# Default: false
-#
-pgraph=false
-
-
-#
-# graph properties for the provenance graph (.dot specific)
-#
-# Default: splines="compound", rankdir="TB"
-#
-pgraph.graph.options=splines="compound", rankdir="TB"
-
-
-#
-# node properties for the provenance graph (.dot specific)
-#
-# Default: color="seagreen", style="filled"
-#
-pgraph.node.options=color="seagreen", style="filled"
-
-#
-# true	- clustering of small jobs is enabled. Clustering works in the
-#       following way: If a job is clusterable (meaning that it has the
-#       GLOBUS::maxwalltime profile specified in tc.data and its value
-#       is less than the value of the "clustering.min.time" property) it will
-#       be put in a clustering queue. The queue is processed at intervals
-#       specified by the "clustering.queue.delay" property. The processing
-#       of the clustering queue consists of selecting compatible jobs and
-#		grouping them in clusters whose max wall time does not exceed twice
-#       the value of the "clustering.min.time" property. Two or more jobs are
-#       considered compatible if they share the same site and do not have
-#       conflicting profiles (e.g. different values for the same environment
-#       variable).
-# false	- clustering of small jobs is disabled.
-#
-# Default: false
-#
-clustering.enabled=false
-
-
-#
-# <seconds>	- the intervals at which the clustering queue is processed
-#
-# Default: 4
-#
-clustering.queue.delay=4
-
-#
-# <seconds>	- the threshold time for clustering
-#
-# Default: 60
-#
-clustering.min.time=60
-
-#
-# Kickstart is a useful tool that can be used to gather various information
-# about a remote process. Before it can be used it must be installed on the
-# remote site and the corresponding entry be set in the sites file.
-# This option allows controlling of how Swift uses Kickstart. The following
-# values are possible:
-# false - do not use Kickstart
-# true  - use Kickstart. If a job is scheduled on a site that does not have
-#       Kickstart installed, that job will fail.
-# maybe - Use Kickstart if installed (i.e. the entry is present in the sites
-#       file)
-#
-# Default: maybe
-#
-
-kickstart.enabled=maybe
-
-#
-# Indicates when Kickstart records should be fetched from the remote site:
-# true	- always transfer Kickstart records if Kickstart was used (see
-#		kickstart.enabled)
-# false	- only transfer Kickstart records if the job fails
-#
-# Default: false
-#
-
-kickstart.always.transfer=false
-
-#
-# Indicates when wrapper logs should be fetched from the remote site:
-# true	- always transfer wrapper logs
-# false	- only transfer wrapper logs if the job fails
-#
-# Default: false
-#
-
-wrapperlog.always.transfer=false
-
-###########################################################################
-#                          Throttling options                             #
-###########################################################################
-#
-# For the throttling parameters, valid values are either a positive integer
-# or "off" (without the quotes).
-#
-
-#
-# Limits the number of concurrent submissions for a workflow instance. This
-# throttle only limits the number of concurrent tasks (jobs) that are being
-# sent to sites, not the total number of concurrent jobs that can be run.
-# The submission stage in GRAM is one of the most CPU expensive stages (due
-# mostly to the mutual authentication and delegation). Having too many
-# concurrent submissions can overload either or both the submit host CPU
-# and the remote host/head node causing degraded performance.
-#
-# Default: 4
-#
-
-throttle.submit=4
-#throttle.submit=off
-
-#
-# Limits the number of concurrent submissions for any of the sites Swift will
-# try to send jobs to. In other words it guarantees that no more than the
-# value of this throttle jobs sent to any site will be concurrently in a state
-# of being submitted.
-#
-# Default: 2
-#
-
-throttle.host.submit=2
-#throttle.host.submit=off
-
-#
-# The Swift scheduler has the ability to limit the number of concurrent jobs
-# allowed on a site based on the performance history of that site. Each site
-# is assigned a score (initially 1), which can increase or decrease based
-# on whether the site yields successful or faulty job runs. The score for a
-# site can take values in the (0.1, 100) interval. The number of allowed jobs
-# is calculated using the following formula:
-# 	2 + score*throttle.score.job.factor
-# This means a site will always be allowed at least two concurrent jobs and
-# at most 2 + 100*throttle.score.job.factor. With a default of 4 this means
-# at least 2 jobs and at most 402.
-#
-# Default: 4
-#
-
-throttle.score.job.factor=0.2
-#throttle.score.job.factor=off
-
-
-#
-# Limits the total number of concurrent file transfers that can happen at any
-# given time. File transfers consume bandwidth. Too many concurrent transfers
-# can cause the network to be overloaded preventing various other signalling
-# traffic from flowing properly.
-#
-# Default: 4
-#
-
-throttle.transfers=4
-#throttle.transfers=off
-
-# Limits the total number of concurrent file operations that can happen at any
-# given time. File operations (like transfers) require an exclusive connection
-# to a site. These connections can be expensive to establish. A large number
-# of concurrent file operations may cause Swift to attempt to establish many
-# such expensive connections to various sites. Limiting the number of concurrent
-# file operations causes Swift to use a small number of cached connections and
-# achieve better overall performance.
-#
-# Default: 8
-#
-
-throttle.file.operations=8
-#throttle.file.operations=off
-
-# Indicates whether the working directory on the remote site should be
-# left intact even when the workflow completes successfully. This can be
-# used to inspect the site working directory for debugging purposes.
-#
-# Default: false
-#
-
-sitedir.keep=false
-
-# number of time a job will be retried if it fails (giving a maximum of
-# 1 + execution.retries attempts at execution)
-#
-
-execution.retries=2
-
-
-# Enables/disables replication. Replication is used to deal with jobs sitting
-# in batch queues for abnormally large amounts of time. If replication is enabled
-# and certain conditions are met, Swift creates and submits replicas of jobs, and
-# allows multiple instances of a job to compete.
-#
-
-replication.enabled=false
-
-# If replication is enabled, this value specifies the minimum time, in seconds,
-# a job needs to be queued in a batch queue in order to be considered for
-# replication
-#
-
-replication.min.queue.time=60
-
-# The maximum number of replicas that Swift should attempt.
-
-replication.limit=3
-
-#
-# WARNING: This option is deprecated. Please use the hostname option.
-#
-# The IP address of the submit machine is used by GRAM as a callback
-# address to report the status of submitted jobs. In general, Swift
-# can automatically detect the IP address of the local machine.
-# However, if the machine has more than one network interface, Swift
-# will pick the first one, which may not be the right choice. It is
-# recommended that this property is set properly before attempting to
-# run jobs through GRAM.
-#
-# Format:
-#    ip.address=x.y.z.w
-#
-
-#ip.address=127.0.0.1
-
-
-# Controls how Swift will communicate the result code of running user programs
-# from workers to the submit side. In files mode, a file
-# indicating success or failure will be created on the site shared filesystem.
-# In provider mode, the execution provider job status will
-# be used. Notably, GRAM2 does not return job statuses correctly, and so
-# provider mode will not work with GRAM2. With other
-# providers, it can be used to reduce the amount of filesystem access compared
-# to files mode.
-#
-# status.mode=files
-
-# Controls how swift will supply parameters to the remote wrapper script.
-# 'args' mode will pass parameters on the command line
-# 'files' mode will pass parameters through an additional input file
-#
-# valid values: args, files
-# Default: files
-#
-# wrapper.parameter.mode=args
-
-# Determines if Swift remote wrappers will be executed by specifying an
-# absolute path, or a path relative to the job initial working directory
-#
-# valid values: absolute, relative
-# wrapper.invocation.mode=absolute
-
-#
-# Limits the number of concurrent iterations that each foreach statement
-# can have at one time. This conserves memory for swift programs that
-# have large numbers of iterations (which would otherwise all be executed
-# in parallel).
-#
-# Default: 1024
-#
-
-foreach.max.threads=16384
-
-# controls whether the log file will contain provenance information
-# enabling this will increase the size of log files, sometimes
-# significantly.
-
-provenance.log=false
-
-# Controls whether file staging is done by swift or by the execution
-# provider. If set to false, the standard swift staging mechanism is
-# used. If set to true, swift does not stage files. Instead, the
-# execution provider is instructed to stage files in and out.
-#
-# Provider staging is experimental.
-#
-# When enabled, and when coasters are used as an execution provider,
-# a staging mechanism can be selected for each site
-# using the swift:stagingMethod site profile in sites.xml. The
-# following is a list of accepted mechanisms:
-#
-# * file:  Staging is done from a filesystem accessible to the
-#          coaster service (typically running on the head node)
-# * proxy: Staging is done from a filesystem accessible to the
-#          client machine that swift is running on, and is proxied
-#          through the coaster service
-# * sfs:   (short for "shared filesystem") Staging is done by
-#          copying files to and from a filesystem accessible
-#          by the compute node (such as an NFS or GPFS mount).
-
-
-use.provider.staging=true
-provider.staging.pin.swiftfiles=true

Deleted: trunk/tests/cdm/ps/pinned/tc.template.data
===================================================================
--- trunk/tests/cdm/ps/pinned/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/cdm/ps/pinned/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,20 +0,0 @@
-#This is the transformation catalog.
-#
-#It comes pre-configured with a number of simple transformations with
-#paths that are likely to work on a linux box. However, on some systems,
-#the paths to these executables will be different (for example, sometimes
-#some of these programs are found in /usr/bin rather than in /bin)
-#
-#NOTE WELL: fields in this file must be separated by tabs, not spaces; and
-#there must be no trailing whitespace at the end of each line.
-#
-# sitename  transformation  path   INSTALLED  platform  profiles
-localhost 	echo 		/bin/echo	INSTALLED	INTEL32::LINUX	null
-localhost 	cat 		/bin/cat	INSTALLED	INTEL32::LINUX	null
-localhost 	ls 		/bin/ls		INSTALLED	INTEL32::LINUX	null
-localhost 	grep 		/bin/grep	INSTALLED	INTEL32::LINUX	null
-localhost 	sort 		/bin/sort	INSTALLED	INTEL32::LINUX	null
-localhost 	paste 		/bin/paste	INSTALLED	INTEL32::LINUX	null
-
-coasterslocal 	cp 		/bin/cp         INSTALLED	INTEL32::LINUX	null
-coasterslocal 	merge 		_DIR_/merge.sh      INSTALLED	INTEL32::LINUX	null

Deleted: trunk/tests/cdm/ps/sites.template.xml
===================================================================
--- trunk/tests/cdm/ps/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/cdm/ps/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,24 +0,0 @@
-<config>
-
-  <pool handle="localhost" sysinfo="INTEL32::LINUX">
-    <gridftp url="local://localhost" />
-    <execution provider="local" url="none" />
-    <workdirectory>_WORK_</workdirectory>
-    <profile namespace="swift" key="stagingMethod">file</profile>
-  </pool>
-
-  <pool handle="coasterslocal">
-    <filesystem provider="local" />
-    <execution provider="coaster" jobmanager="local:local"/>
-    <profile namespace="globus"   key="internalHostname">_HOST_</profile>
-    <profile namespace="karajan"  key="jobthrottle">2.55</profile>
-    <profile namespace="karajan"  key="initialScore">10000</profile>
-    <profile namespace="globus"   key="jobsPerNode">4</profile>
-    <profile namespace="globus"   key="slots">8</profile>
-    <profile namespace="globus"   key="maxTime">1000</profile>
-    <profile namespace="globus"   key="nodeGranularity">1</profile>
-    <profile namespace="globus"   key="maxNodes">4</profile>
-    <workdirectory>_WORK_</workdirectory>
-  </pool>
-
-</config>

Added: trunk/tests/cdm/ps/swift.conf
===================================================================
--- trunk/tests/cdm/ps/swift.conf	                        (rev 0)
+++ trunk/tests/cdm/ps/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,97 @@
+include "${swift.home}/etc/swift.conf"
+
+site.localhost {
+	execution {
+		type: "local"
+		URL: "none"
+	}
+	filesystem {
+		type: "local"
+		URL: "localhost"
+	}
+	workDirectory: "_WORK_"
+	OS: "INTEL32::LINUX"
+	app.cat {
+		executable: "/bin/cat"
+	}
+
+	app.echo {
+		executable: "/bin/echo"
+	}
+
+	app.grep {
+		executable: "/bin/grep"
+	}
+
+	app.ls {
+		executable: "/bin/ls"
+	}
+
+	app.paste {
+		executable: "/bin/paste"
+	}
+
+	app.sort {
+		executable: "/bin/sort"
+	}
+
+}
+
+site.coasterslocal {
+	execution {
+		type: "coaster"
+		URL: "localhost"
+		jobManager: "local:local"
+		options {
+			internalHostname: "_HOST_"
+			nodeGranularity: 1
+			maxNodesPerJob: 4
+			maxJobs: 8
+			tasksPerNode: 4
+			jobMaxTime: "00:16:40"
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "localhost"
+	}
+	workDirectory: "_WORK_"
+	maxParallelTasks: 255
+	initialParallelTasks: 255
+	app.cp {
+		executable: "/bin/cp"
+	}
+
+	app.merge {
+		executable: "_DIR_/merge.sh"
+	}
+
+}
+
+lazyErrors: false
+executionRetries: 2
+cachingAlgorithm: "LRU"
+jobSubmitThrottle: 4
+hostJobSubmitThrottle: 2
+fileTransfersThrottle: 4
+fileOperationsThrottle: 8
+siteScoreThrottlingFactor: 0.2
+keepSiteDir: false
+logProvenance: false
+replicationEnabled: false
+replicationMinQueueTime: 60
+replicationLimit: 3
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: false
+maxForeachThreads: 16384
+# option removed: pgraph.node.options
+# option removed: clustering.enabled
+# option removed: sites.file
+# option removed: kickstart.enabled
+staging: "local"
+# option removed: clustering.min.time
+# option removed: kickstart.always.transfer
+# option removed: pgraph.graph.options
+# option removed: clustering.queue.delay
+# option removed: pgraph
+# option removed: tc.file

Deleted: trunk/tests/cdm/ps/swift.properties
===================================================================
--- trunk/tests/cdm/ps/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/cdm/ps/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,353 +0,0 @@
-sites.file=${swift.home}/etc/sites.xml
-tc.file=${swift.home}/etc/tc.data
-
-#
-# The host name of the submit machine is used by GRAM as a callback
-# address to report the status of submitted jobs. In general, Swift
-# can automatically detect the host name of the local machine.
-# However, if the machine host name is improperly configured or if
-# it does not represent a valid DNS entry, certain services (such as
-# GRAM) will not be able to send job status notifications back to
-# the client. The value of this property can be an IP address.
-#
-# Format:
-#    hostname=string
-#
-
-
-#hostname=localhost
-
-#
-# A TCP port range can be specified to restrict the ports on which GRAM
-# callback services are started. This is likely needed if your submit
-# host is behind a firewall, in which case the firewall should be
-# configured to allow incoming connections on ports in the range.
-#
-# Format:
-#     tcp.port.range=start,end
-#
-
-#tcp.port.range=50000,50100
-
-#
-# false	- means an error will be immediately reported and cause the
-# 		workflow to abort. At this time remote jobs that are already
-#		running will not be canceled
-# true	- means that Swift will try to do as much work as possible and
-#		report all errors encountered at the end. However, "errors"
-#		here only applies to job execution errors. Certain errors
-#		that are related to the Swift implementation (should such
-#		errors occur) will still be reported eagerly.
-#
-# Default: false
-#
-lazy.errors=false
-
-#
-# What algorithm to use for caching of remote files. LRU (as in what
-# files to purge) is the only implementation right now. One can set
-# a target size (in bytes) for a host by using the swift:storagesize
-# profile for a host in sites.xml
-#
-# Default: LRU
-#
-caching.algorithm=LRU
-
-#
-# true       - generate a provenance graph in .dot format (Swift will
-#			 choose a random file name)
-# false      - do not generate a provenance graph
-# <filename> - generate a provenange graph in the give file name
-#
-# Default: false
-#
-pgraph=false
-
-
-#
-# graph properties for the provenance graph (.dot specific)
-#
-# Default: splines="compound", rankdir="TB"
-#
-pgraph.graph.options=splines="compound", rankdir="TB"
-
-
-#
-# node properties for the provenance graph (.dot specific)
-#
-# Default: color="seagreen", style="filled"
-#
-pgraph.node.options=color="seagreen", style="filled"
-
-#
-# true	- clustering of small jobs is enabled. Clustering works in the
-#       following way: If a job is clusterable (meaning that it has the
-#       GLOBUS::maxwalltime profile specified in tc.data and its value
-#       is less than the value of the "clustering.min.time" property) it will
-#       be put in a clustering queue. The queue is processed at intervals
-#       specified by the "clustering.queue.delay" property. The processing
-#       of the clustering queue consists of selecting compatible jobs and
-#		grouping them in clusters whose max wall time does not exceed twice
-#       the value of the "clustering.min.time" property. Two or more jobs are
-#       considered compatible if they share the same site and do not have
-#       conflicting profiles (e.g. different values for the same environment
-#       variable).
-# false	- clustering of small jobs is disabled.
-#
-# Default: false
-#
-clustering.enabled=false
-
-
-#
-# <seconds>	- the intervals at which the clustering queue is processed
-#
-# Default: 4
-#
-clustering.queue.delay=4
-
-#
-# <seconds>	- the threshold time for clustering
-#
-# Default: 60
-#
-clustering.min.time=60
-
-#
-# Kickstart is a useful tool that can be used to gather various information
-# about a remote process. Before it can be used it must be installed on the
-# remote site and the corresponding entry be set in the sites file.
-# This option allows controlling of how Swift uses Kickstart. The following
-# values are possible:
-# false - do not use Kickstart
-# true  - use Kickstart. If a job is scheduled on a site that does not have
-#       Kickstart installed, that job will fail.
-# maybe - Use Kickstart if installed (i.e. the entry is present in the sites
-#       file)
-#
-# Default: maybe
-#
-
-kickstart.enabled=maybe
-
-#
-# Indicates when Kickstart records should be fetched from the remote site:
-# true	- always transfer Kickstart records if Kickstart was used (see
-#		kickstart.enabled)
-# false	- only transfer Kickstart records if the job fails
-#
-# Default: false
-#
-
-kickstart.always.transfer=false
-
-#
-# Indicates when wrapper logs should be fetched from the remote site:
-# true	- always transfer wrapper logs
-# false	- only transfer wrapper logs if the job fails
-#
-# Default: false
-#
-
-wrapperlog.always.transfer=false
-
-###########################################################################
-#                          Throttling options                             #
-###########################################################################
-#
-# For the throttling parameters, valid values are either a positive integer
-# or "off" (without the quotes).
-#
-
-#
-# Limits the number of concurrent submissions for a workflow instance. This
-# throttle only limits the number of concurrent tasks (jobs) that are being
-# sent to sites, not the total number of concurrent jobs that can be run.
-# The submission stage in GRAM is one of the most CPU expensive stages (due
-# mostly to the mutual authentication and delegation). Having too many
-# concurrent submissions can overload either or both the submit host CPU
-# and the remote host/head node causing degraded performance.
-#
-# Default: 4
-#
-
-throttle.submit=4
-#throttle.submit=off
-
-#
-# Limits the number of concurrent submissions for any of the sites Swift will
-# try to send jobs to. In other words it guarantees that no more than the
-# value of this throttle jobs sent to any site will be concurrently in a state
-# of being submitted.
-#
-# Default: 2
-#
-
-throttle.host.submit=2
-#throttle.host.submit=off
-
-#
-# The Swift scheduler has the ability to limit the number of concurrent jobs
-# allowed on a site based on the performance history of that site. Each site
-# is assigned a score (initially 1), which can increase or decrease based
-# on whether the site yields successful or faulty job runs. The score for a
-# site can take values in the (0.1, 100) interval. The number of allowed jobs
-# is calculated using the following formula:
-# 	2 + score*throttle.score.job.factor
-# This means a site will always be allowed at least two concurrent jobs and
-# at most 2 + 100*throttle.score.job.factor. With a default of 4 this means
-# at least 2 jobs and at most 402.
-#
-# Default: 4
-#
-
-throttle.score.job.factor=0.2
-#throttle.score.job.factor=off
-
-
-#
-# Limits the total number of concurrent file transfers that can happen at any
-# given time. File transfers consume bandwidth. Too many concurrent transfers
-# can cause the network to be overloaded preventing various other signalling
-# traffic from flowing properly.
-#
-# Default: 4
-#
-
-throttle.transfers=4
-#throttle.transfers=off
-
-# Limits the total number of concurrent file operations that can happen at any
-# given time. File operations (like transfers) require an exclusive connection
-# to a site. These connections can be expensive to establish. A large number
-# of concurrent file operations may cause Swift to attempt to establish many
-# such expensive connections to various sites. Limiting the number of concurrent
-# file operations causes Swift to use a small number of cached connections and
-# achieve better overall performance.
-#
-# Default: 8
-#
-
-throttle.file.operations=8
-#throttle.file.operations=off
-
-# Indicates whether the working directory on the remote site should be
-# left intact even when the workflow completes successfully. This can be
-# used to inspect the site working directory for debugging purposes.
-#
-# Default: false
-#
-
-sitedir.keep=false
-
-# number of time a job will be retried if it fails (giving a maximum of
-# 1 + execution.retries attempts at execution)
-#
-
-execution.retries=2
-
-
-# Enables/disables replication. Replication is used to deal with jobs sitting
-# in batch queues for abnormally large amounts of time. If replication is enabled
-# and certain conditions are met, Swift creates and submits replicas of jobs, and
-# allows multiple instances of a job to compete.
-#
-
-replication.enabled=false
-
-# If replication is enabled, this value specifies the minimum time, in seconds,
-# a job needs to be queued in a batch queue in order to be considered for
-# replication
-#
-
-replication.min.queue.time=60
-
-# The maximum number of replicas that Swift should attempt.
-
-replication.limit=3
-
-#
-# WARNING: This option is deprecated. Please use the hostname option.
-#
-# The IP address of the submit machine is used by GRAM as a callback
-# address to report the status of submitted jobs. In general, Swift
-# can automatically detect the IP address of the local machine.
-# However, if the machine has more than one network interface, Swift
-# will pick the first one, which may not be the right choice. It is
-# recommended that this property is set properly before attempting to
-# run jobs through GRAM.
-#
-# Format:
-#    ip.address=x.y.z.w
-#
-
-#ip.address=127.0.0.1
-
-
-# Controls how Swift will communicate the result code of running user programs
-# from workers to the submit side. In files mode, a file
-# indicating success or failure will be created on the site shared filesystem.
-# In provider mode, the execution provider job status will
-# be used. Notably, GRAM2 does not return job statuses correctly, and so
-# provider mode will not work with GRAM2. With other
-# providers, it can be used to reduce the amount of filesystem access compared
-# to files mode.
-#
-# status.mode=files
-
-# Controls how swift will supply parameters to the remote wrapper script.
-# 'args' mode will pass parameters on the command line
-# 'files' mode will pass parameters through an additional input file
-#
-# valid values: args, files
-# Default: files
-#
-# wrapper.parameter.mode=args
-
-# Determines if Swift remote wrappers will be executed by specifying an
-# absolute path, or a path relative to the job initial working directory
-#
-# valid values: absolute, relative
-# wrapper.invocation.mode=absolute
-
-#
-# Limits the number of concurrent iterations that each foreach statement
-# can have at one time. This conserves memory for swift programs that
-# have large numbers of iterations (which would otherwise all be executed
-# in parallel).
-#
-# Default: 1024
-#
-
-foreach.max.threads=16384
-
-# controls whether the log file will contain provenance information
-# enabling this will increase the size of log files, sometimes
-# significantly.
-
-provenance.log=false
-
-# Controls whether file staging is done by swift or by the execution
-# provider. If set to false, the standard swift staging mechanism is
-# used. If set to true, swift does not stage files. Instead, the
-# execution provider is instructed to stage files in and out.
-#
-# Provider staging is experimental.
-#
-# When enabled, and when coasters are used as an execution provider,
-# a staging mechanism can be selected for each site
-# using the swift:stagingMethod site profile in sites.xml. The
-# following is a list of accepted mechanisms:
-#
-# * file:  Staging is done from a filesystem accessible to the
-#          coaster service (typically running on the head node)
-# * proxy: Staging is done from a filesystem accessible to the
-#          client machine that swift is running on, and is proxied
-#          through the coaster service
-# * sfs:   (short for "shared filesystem") Staging is done by
-#          copying files to and from a filesystem accessible
-#          by the compute node (such as an NFS or GPFS mount).
-
-
-use.provider.staging=true
-provider.staging.pin.swiftfiles=false

Deleted: trunk/tests/cdm/ps/tc.template.data
===================================================================
--- trunk/tests/cdm/ps/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/cdm/ps/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,20 +0,0 @@
-#This is the transformation catalog.
-#
-#It comes pre-configured with a number of simple transformations with
-#paths that are likely to work on a linux box. However, on some systems,
-#the paths to these executables will be different (for example, sometimes
-#some of these programs are found in /usr/bin rather than in /bin)
-#
-#NOTE WELL: fields in this file must be separated by tabs, not spaces; and
-#there must be no trailing whitespace at the end of each line.
-#
-# sitename  transformation  path   INSTALLED  platform  profiles
-localhost 	echo 		/bin/echo	INSTALLED	INTEL32::LINUX	null
-localhost 	cat 		/bin/cat	INSTALLED	INTEL32::LINUX	null
-localhost 	ls 		/bin/ls		INSTALLED	INTEL32::LINUX	null
-localhost 	grep 		/bin/grep	INSTALLED	INTEL32::LINUX	null
-localhost 	sort 		/bin/sort	INSTALLED	INTEL32::LINUX	null
-localhost 	paste 		/bin/paste	INSTALLED	INTEL32::LINUX	null
-
-coasterslocal 	cp 		/bin/cp         INSTALLED	INTEL32::LINUX	null
-coasterslocal 	merge 		_DIR_/merge.sh      INSTALLED	INTEL32::LINUX	null

Added: trunk/tests/documentation/tutorial/swift.conf
===================================================================
--- trunk/tests/documentation/tutorial/swift.conf	                        (rev 0)
+++ trunk/tests/documentation/tutorial/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,4 @@
+include "${swift.home}/etc/swift.conf"
+
+site: local
+keepSiteDir: true

Deleted: trunk/tests/documentation/tutorial/swift.properties
===================================================================
--- trunk/tests/documentation/tutorial/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/documentation/tutorial/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,2 +0,0 @@
-site=local
-sitedir.keep=true

Added: trunk/tests/functions/swift.conf
===================================================================
--- trunk/tests/functions/swift.conf	                        (rev 0)
+++ trunk/tests/functions/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,5 @@
+include "${swift.home}/etc/swift.conf"
+
+site: local
+keepSiteDir: true
+site.local.maxParallelTasks: 12

Deleted: trunk/tests/functions/swift.properties
===================================================================
--- trunk/tests/functions/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/functions/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,3 +0,0 @@
-site=local
-sitedir.keep=true
-taskThrottle=12

Added: trunk/tests/language/should-not-work/swift.conf
===================================================================
--- trunk/tests/language/should-not-work/swift.conf	                        (rev 0)
+++ trunk/tests/language/should-not-work/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,4 @@
+include "${swift.home}/etc/swift.conf"
+
+site: local
+keepSiteDir: true

Deleted: trunk/tests/language/should-not-work/swift.properties
===================================================================
--- trunk/tests/language/should-not-work/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language/should-not-work/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,2 +0,0 @@
-site=local
-sitedir.keep=true

Added: trunk/tests/language/working/swift.conf
===================================================================
--- trunk/tests/language/working/swift.conf	                        (rev 0)
+++ trunk/tests/language/working/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,13 @@
+include "${swift.home}/etc/swift.conf"
+
+site: local
+keepSiteDir: true
+
+site.local {
+	app.generate { executable: ${env.GROUP}/generate }
+	app.process { executable: ${env.GROUP}/process }
+	app.combine { executable: ${env.GROUP}/combine }
+	app.echo { executable: echo }
+	app.cat { executable: cat }
+	app.wc { executable: wc }
+}

Deleted: trunk/tests/language/working/swift.properties
===================================================================
--- trunk/tests/language/working/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language/working/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-site=local
-sitedir.keep=true
-app.local.generate=$GROUP/generate
-app.local.process=$GROUP/process
-app.local.combine=$GROUP/process
-app.local.echo=echo
-app.local.cat=cat
-app.local.wc=wc

Added: trunk/tests/language-behaviour/IO/swift.conf
===================================================================
--- trunk/tests/language-behaviour/IO/swift.conf	                        (rev 0)
+++ trunk/tests/language-behaviour/IO/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,4 @@
+include "${swift.home}/etc/swift.conf"
+
+site: local
+keepSiteDir: true

Deleted: trunk/tests/language-behaviour/IO/swift.properties
===================================================================
--- trunk/tests/language-behaviour/IO/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/IO/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,2 +0,0 @@
-site=local
-sitedir.keep=true

Added: trunk/tests/language-behaviour/arrays/swift.conf
===================================================================
--- trunk/tests/language-behaviour/arrays/swift.conf	                        (rev 0)
+++ trunk/tests/language-behaviour/arrays/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,4 @@
+include "${swift.home}/etc/swift.conf"
+
+site: local
+keepSiteDir: true

Deleted: trunk/tests/language-behaviour/arrays/swift.properties
===================================================================
--- trunk/tests/language-behaviour/arrays/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/arrays/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,2 +0,0 @@
-site=local
-sitedir.keep=true

Added: trunk/tests/language-behaviour/broken/swift.conf
===================================================================
--- trunk/tests/language-behaviour/broken/swift.conf	                        (rev 0)
+++ trunk/tests/language-behaviour/broken/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,4 @@
+include "${swift.home}/etc/swift.conf"
+
+site: local
+keepSiteDir: true

Deleted: trunk/tests/language-behaviour/broken/swift.properties
===================================================================
--- trunk/tests/language-behaviour/broken/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/broken/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,2 +0,0 @@
-site=local
-sitedir.keep=true

Added: trunk/tests/language-behaviour/cleanup/swift.conf
===================================================================
--- trunk/tests/language-behaviour/cleanup/swift.conf	                        (rev 0)
+++ trunk/tests/language-behaviour/cleanup/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,4 @@
+include "${swift.home}/etc/swift.conf"
+
+site: local
+keepSiteDir: true

Deleted: trunk/tests/language-behaviour/cleanup/swift.properties
===================================================================
--- trunk/tests/language-behaviour/cleanup/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/cleanup/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,2 +0,0 @@
-site=local
-sitedir.keep=true

Added: trunk/tests/language-behaviour/compounds/swift.conf
===================================================================
--- trunk/tests/language-behaviour/compounds/swift.conf	                        (rev 0)
+++ trunk/tests/language-behaviour/compounds/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,4 @@
+include "${swift.home}/etc/swift.conf"
+
+site: local
+keepSiteDir: true

Deleted: trunk/tests/language-behaviour/compounds/swift.properties
===================================================================
--- trunk/tests/language-behaviour/compounds/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/compounds/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,2 +0,0 @@
-site=local
-sitedir.keep=true

Added: trunk/tests/language-behaviour/control_structures/swift.conf
===================================================================
--- trunk/tests/language-behaviour/control_structures/swift.conf	                        (rev 0)
+++ trunk/tests/language-behaviour/control_structures/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,4 @@
+include "${swift.home}/etc/swift.conf"
+
+site: local
+keepSiteDir: true

Deleted: trunk/tests/language-behaviour/control_structures/swift.properties
===================================================================
--- trunk/tests/language-behaviour/control_structures/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/control_structures/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,2 +0,0 @@
-site=local
-sitedir.keep=true

Added: trunk/tests/language-behaviour/datatypes/swift.conf
===================================================================
--- trunk/tests/language-behaviour/datatypes/swift.conf	                        (rev 0)
+++ trunk/tests/language-behaviour/datatypes/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,4 @@
+include "${swift.home}/etc/swift.conf"
+
+site: local
+keepSiteDir: true

Deleted: trunk/tests/language-behaviour/datatypes/swift.properties
===================================================================
--- trunk/tests/language-behaviour/datatypes/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/datatypes/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,2 +0,0 @@
-site=local
-sitedir.keep=true

Added: trunk/tests/language-behaviour/logic/swift.conf
===================================================================
--- trunk/tests/language-behaviour/logic/swift.conf	                        (rev 0)
+++ trunk/tests/language-behaviour/logic/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,4 @@
+include "${swift.home}/etc/swift.conf"
+
+site: local
+keepSiteDir: true

Deleted: trunk/tests/language-behaviour/logic/swift.properties
===================================================================
--- trunk/tests/language-behaviour/logic/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/logic/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,2 +0,0 @@
-site=local
-sitedir.keep=true

Added: trunk/tests/language-behaviour/mappers/swift.conf
===================================================================
--- trunk/tests/language-behaviour/mappers/swift.conf	                        (rev 0)
+++ trunk/tests/language-behaviour/mappers/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,10 @@
+include "${swift.home}/etc/swift.conf"
+
+site: local
+keepSiteDir: true
+
+site.local {
+	app.echo_sh { executable: ${env.GROUP}"/760-csv-mapper.sh" }
+	app.echo { executable: "echo" }
+	app.touch { executable: "touch" }
+}
\ No newline at end of file

Deleted: trunk/tests/language-behaviour/mappers/swift.properties
===================================================================
--- trunk/tests/language-behaviour/mappers/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/mappers/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,5 +0,0 @@
-site=local
-sitedir.keep=true
-app.local.echo_sh=$GROUP/760-csv-mapper.sh
-app.local.echo=echo
-app.local.touch=touch

Added: trunk/tests/language-behaviour/math/swift.conf
===================================================================
--- trunk/tests/language-behaviour/math/swift.conf	                        (rev 0)
+++ trunk/tests/language-behaviour/math/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,4 @@
+include "${swift.home}/etc/swift.conf"
+
+site: local
+keepSiteDir: true

Deleted: trunk/tests/language-behaviour/math/swift.properties
===================================================================
--- trunk/tests/language-behaviour/math/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/math/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,2 +0,0 @@
-site=local
-sitedir.keep=true

Added: trunk/tests/language-behaviour/params/swift.conf
===================================================================
--- trunk/tests/language-behaviour/params/swift.conf	                        (rev 0)
+++ trunk/tests/language-behaviour/params/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,4 @@
+include "${swift.home}/etc/swift.conf"
+
+site: local
+keepSiteDir: true

Deleted: trunk/tests/language-behaviour/params/swift.properties
===================================================================
--- trunk/tests/language-behaviour/params/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/params/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,2 +0,0 @@
-site=local
-sitedir.keep=true

Deleted: trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-coasters/sites.template.xml
===================================================================
--- trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-coasters/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-coasters/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,16 +0,0 @@
-<config>
-
-  <pool handle="localhost">
-    <execution provider="coaster" jobmanager="local:local"/>
-    <profile namespace="globus"   key="internalHostname">_HOST_</profile>
-    <profile namespace="karajan"  key="jobthrottle">2.55</profile>
-    <profile namespace="karajan"  key="initialScore">10000</profile>
-    <profile namespace="globus"   key="jobsPerNode">4</profile>
-    <profile namespace="globus"   key="slots">8</profile>
-    <profile namespace="globus"   key="maxTime">1000</profile>
-    <profile namespace="globus"   key="nodeGranularity">1</profile>
-    <profile namespace="globus"   key="maxNodes">4</profile>
-    <workdirectory>_WORK_</workdirectory>
-  </pool>
-
-</config>

Added: trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-coasters/swift.conf
===================================================================
--- trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-coasters/swift.conf	                        (rev 0)
+++ trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-coasters/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,39 @@
+include "${swift.home}/etc/swift.conf"
+
+site.localhost {
+	execution {
+		type: "coaster"
+		URL: "localhost"
+		jobManager: "local:local"
+		options {
+			internalHostname: "_HOST_"
+			nodeGranularity: 1
+			maxNodesPerJob: 4
+			maxJobs: 8
+			tasksPerNode: 4
+			jobMaxTime: "00:16:40"
+		}
+	}
+	staging: "local"
+	workDirectory: "_WORK_"
+	maxParallelTasks: 255
+	initialParallelTasks: 255
+}
+
+lazyErrors: false
+executionRetries: 2
+cachingAlgorithm: "LRU"
+jobSubmitThrottle: 4
+hostJobSubmitThrottle: 2
+fileTransfersThrottle: 4
+fileOperationsThrottle: 8
+siteScoreThrottlingFactor: 0.2
+keepSiteDir: false
+logProvenance: false
+replicationEnabled: false
+replicationMinQueueTime: 60
+replicationLimit: 3
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: false
+maxForeachThreads: 16384
+staging: "local"

Deleted: trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-coasters/swift.properties
===================================================================
--- trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-coasters/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-coasters/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,353 +0,0 @@
-sites.file=${swift.home}/etc/sites.xml
-tc.file=${swift.home}/etc/tc.data
-
-#
-# The host name of the submit machine is used by GRAM as a callback
-# address to report the status of submitted jobs. In general, Swift
-# can automatically detect the host name of the local machine.
-# However, if the machine host name is improperly configured or if
-# it does not represent a valid DNS entry, certain services (such as
-# GRAM) will not be able to send job status notifications back to
-# the client. The value of this property can be an IP address.
-#
-# Format:
-#    hostname=string
-#
-
-
-#hostname=localhost
-
-#
-# A TCP port range can be specified to restrict the ports on which GRAM
-# callback services are started. This is likely needed if your submit
-# host is behind a firewall, in which case the firewall should be
-# configured to allow incoming connections on ports in the range.
-#
-# Format:
-#     tcp.port.range=start,end
-#
-
-#tcp.port.range=50000,50100
-
-#
-# false	- means an error will be immediately reported and cause the
-# 		workflow to abort. At this time remote jobs that are already
-#		running will not be canceled
-# true	- means that Swift will try to do as much work as possible and
-#		report all errors encountered at the end. However, "errors"
-#		here only applies to job execution errors. Certain errors
-#		that are related to the Swift implementation (should such
-#		errors occur) will still be reported eagerly.
-#
-# Default: false
-#
-lazy.errors=false
-
-#
-# What algorithm to use for caching of remote files. LRU (as in what
-# files to purge) is the only implementation right now. One can set
-# a target size (in bytes) for a host by using the swift:storagesize
-# profile for a host in sites.xml
-#
-# Default: LRU
-#
-caching.algorithm=LRU
-
-#
-# true       - generate a provenance graph in .dot format (Swift will
-#			 choose a random file name)
-# false      - do not generate a provenance graph
-# <filename> - generate a provenange graph in the give file name
-#
-# Default: false
-#
-pgraph=false
-
-
-#
-# graph properties for the provenance graph (.dot specific)
-#
-# Default: splines="compound", rankdir="TB"
-#
-pgraph.graph.options=splines="compound", rankdir="TB"
-
-
-#
-# node properties for the provenance graph (.dot specific)
-#
-# Default: color="seagreen", style="filled"
-#
-pgraph.node.options=color="seagreen", style="filled"
-
-#
-# true	- clustering of small jobs is enabled. Clustering works in the
-#       following way: If a job is clusterable (meaning that it has the
-#       GLOBUS::maxwalltime profile specified in tc.data and its value
-#       is less than the value of the "clustering.min.time" property) it will
-#       be put in a clustering queue. The queue is processed at intervals
-#       specified by the "clustering.queue.delay" property. The processing
-#       of the clustering queue consists of selecting compatible jobs and
-#		grouping them in clusters whose max wall time does not exceed twice
-#       the value of the "clustering.min.time" property. Two or more jobs are
-#       considered compatible if they share the same site and do not have
-#       conflicting profiles (e.g. different values for the same environment
-#       variable).
-# false	- clustering of small jobs is disabled.
-#
-# Default: false
-#
-clustering.enabled=false
-
-
-#
-# <seconds>	- the intervals at which the clustering queue is processed
-#
-# Default: 4
-#
-clustering.queue.delay=4
-
-#
-# <seconds>	- the threshold time for clustering
-#
-# Default: 60
-#
-clustering.min.time=60
-
-#
-# Kickstart is a useful tool that can be used to gather various information
-# about a remote process. Before it can be used it must be installed on the
-# remote site and the corresponding entry be set in the sites file.
-# This option allows controlling of how Swift uses Kickstart. The following
-# values are possible:
-# false - do not use Kickstart
-# true  - use Kickstart. If a job is scheduled on a site that does not have
-#       Kickstart installed, that job will fail.
-# maybe - Use Kickstart if installed (i.e. the entry is present in the sites
-#       file)
-#
-# Default: maybe
-#
-
-kickstart.enabled=maybe
-
-#
-# Indicates when Kickstart records should be fetched from the remote site:
-# true	- always transfer Kickstart records if Kickstart was used (see
-#		kickstart.enabled)
-# false	- only transfer Kickstart records if the job fails
-#
-# Default: false
-#
-
-kickstart.always.transfer=false
-
-#
-# Indicates when wrapper logs should be fetched from the remote site:
-# true	- always transfer wrapper logs
-# false	- only transfer wrapper logs if the job fails
-#
-# Default: false
-#
-
-wrapperlog.always.transfer=false
-
-###########################################################################
-#                          Throttling options                             #
-###########################################################################
-#
-# For the throttling parameters, valid values are either a positive integer
-# or "off" (without the quotes).
-#
-
-#
-# Limits the number of concurrent submissions for a workflow instance. This
-# throttle only limits the number of concurrent tasks (jobs) that are being
-# sent to sites, not the total number of concurrent jobs that can be run.
-# The submission stage in GRAM is one of the most CPU expensive stages (due
-# mostly to the mutual authentication and delegation). Having too many
-# concurrent submissions can overload either or both the submit host CPU
-# and the remote host/head node causing degraded performance.
-#
-# Default: 4
-#
-
-throttle.submit=4
-#throttle.submit=off
-
-#
-# Limits the number of concurrent submissions for any of the sites Swift will
-# try to send jobs to. In other words it guarantees that no more than the
-# value of this throttle jobs sent to any site will be concurrently in a state
-# of being submitted.
-#
-# Default: 2
-#
-
-throttle.host.submit=2
-#throttle.host.submit=off
-
-#
-# The Swift scheduler has the ability to limit the number of concurrent jobs
-# allowed on a site based on the performance history of that site. Each site
-# is assigned a score (initially 1), which can increase or decrease based
-# on whether the site yields successful or faulty job runs. The score for a
-# site can take values in the (0.1, 100) interval. The number of allowed jobs
-# is calculated using the following formula:
-# 	2 + score*throttle.score.job.factor
-# This means a site will always be allowed at least two concurrent jobs and
-# at most 2 + 100*throttle.score.job.factor. With a default of 4 this means
-# at least 2 jobs and at most 402.
-#
-# Default: 4
-#
-
-throttle.score.job.factor=0.2
-#throttle.score.job.factor=off
-
-
-#
-# Limits the total number of concurrent file transfers that can happen at any
-# given time. File transfers consume bandwidth. Too many concurrent transfers
-# can cause the network to be overloaded preventing various other signalling
-# traffic from flowing properly.
-#
-# Default: 4
-#
-
-throttle.transfers=4
-#throttle.transfers=off
-
-# Limits the total number of concurrent file operations that can happen at any
-# given time. File operations (like transfers) require an exclusive connection
-# to a site. These connections can be expensive to establish. A large number
-# of concurrent file operations may cause Swift to attempt to establish many
-# such expensive connections to various sites. Limiting the number of concurrent
-# file operations causes Swift to use a small number of cached connections and
-# achieve better overall performance.
-#
-# Default: 8
-#
-
-throttle.file.operations=8
-#throttle.file.operations=off
-
-# Indicates whether the working directory on the remote site should be
-# left intact even when the workflow completes successfully. This can be
-# used to inspect the site working directory for debugging purposes.
-#
-# Default: false
-#
-
-sitedir.keep=false
-
-# number of time a job will be retried if it fails (giving a maximum of
-# 1 + execution.retries attempts at execution)
-#
-
-execution.retries=2
-
-
-# Enables/disables replication. Replication is used to deal with jobs sitting
-# in batch queues for abnormally large amounts of time. If replication is enabled
-# and certain conditions are met, Swift creates and submits replicas of jobs, and
-# allows multiple instances of a job to compete.
-#
-
-replication.enabled=false
-
-# If replication is enabled, this value specifies the minimum time, in seconds,
-# a job needs to be queued in a batch queue in order to be considered for
-# replication
-#
-
-replication.min.queue.time=60
-
-# The maximum number of replicas that Swift should attempt.
-
-replication.limit=3
-
-#
-# WARNING: This option is deprecated. Please use the hostname option.
-#
-# The IP address of the submit machine is used by GRAM as a callback
-# address to report the status of submitted jobs. In general, Swift
-# can automatically detect the IP address of the local machine.
-# However, if the machine has more than one network interface, Swift
-# will pick the first one, which may not be the right choice. It is
-# recommended that this property is set properly before attempting to
-# run jobs through GRAM.
-#
-# Format:
-#    ip.address=x.y.z.w
-#
-
-#ip.address=127.0.0.1
-
-
-# Controls how Swift will communicate the result code of running user programs
-# from workers to the submit side. In files mode, a file
-# indicating success or failure will be created on the site shared filesystem.
-# In provider mode, the execution provider job status will
-# be used. Notably, GRAM2 does not return job statuses correctly, and so
-# provider mode will not work with GRAM2. With other
-# providers, it can be used to reduce the amount of filesystem access compared
-# to files mode.
-#
-# status.mode=files
-
-# Controls how swift will supply parameters to the remote wrapper script.
-# 'args' mode will pass parameters on the command line
-# 'files' mode will pass parameters through an additional input file
-#
-# valid values: args, files
-# Default: files
-#
-# wrapper.parameter.mode=args
-
-# Determines if Swift remote wrappers will be executed by specifying an
-# absolute path, or a path relative to the job initial working directory
-#
-# valid values: absolute, relative
-# wrapper.invocation.mode=absolute
-
-#
-# Limits the number of concurrent iterations that each foreach statement
-# can have at one time. This conserves memory for swift programs that
-# have large numbers of iterations (which would otherwise all be executed
-# in parallel).
-#
-# Default: 1024
-#
-
-foreach.max.threads=16384
-
-# controls whether the log file will contain provenance information
-# enabling this will increase the size of log files, sometimes
-# significantly.
-
-provenance.log=false
-
-# Controls whether file staging is done by swift or by the execution
-# provider. If set to false, the standard swift staging mechanism is
-# used. If set to true, swift does not stage files. Instead, the
-# execution provider is instructed to stage files in and out.
-#
-# Provider staging is experimental.
-#
-# When enabled, and when coasters are used as an execution provider,
-# a staging mechanism can be selected for each site
-# using the swift:stagingMethod site profile in sites.xml. The
-# following is a list of accepted mechanisms:
-#
-# * file:  Staging is done from a filesystem accessible to the
-#          coaster service (typically running on the head node)
-# * proxy: Staging is done from a filesystem accessible to the
-#          client machine that swift is running on, and is proxied
-#          through the coaster service
-# * sfs:   (short for "shared filesystem") Staging is done by
-#          copying files to and from a filesystem accessible
-#          by the compute node (such as an NFS or GPFS mount).
-
-
-use.provider.staging=true
-provider.staging.pin.swiftfiles=false

Deleted: trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-coasters/tc.template.data
===================================================================
--- trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-coasters/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-coasters/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1 +0,0 @@
-local	gen		_DIR_/900-dynamic-output.gen.sh		INSTALLED	INTEL32::LINUX	null

Deleted: trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-local/sites.template.xml
===================================================================
--- trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-local/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-local/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,10 +0,0 @@
-<config>
-
-  <pool handle="localhost" sysinfo="INTEL32::LINUX">
-    <gridftp url="local://localhost" />
-    <execution provider="local" url="none" />
-    <workdirectory>_WORK_</workdirectory>
-    <profile namespace="swift" key="stagingMethod">file</profile>
-  </pool>
-
-</config>

Added: trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-local/swift.conf
===================================================================
--- trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-local/swift.conf	                        (rev 0)
+++ trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-local/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,32 @@
+include "${swift.home}/etc/swift.conf"
+
+site.localhost {
+	execution {
+		type: "local"
+		URL: "none"
+	}
+	filesystem {
+		type: "local"
+		URL: "localhost"
+	}
+	workDirectory: "_WORK_"
+	OS: "INTEL32::LINUX"
+}
+
+lazyErrors: false
+executionRetries: 2
+cachingAlgorithm: "LRU"
+jobSubmitThrottle: 4
+hostJobSubmitThrottle: 2
+fileTransfersThrottle: 4
+fileOperationsThrottle: 8
+siteScoreThrottlingFactor: 0.2
+keepSiteDir: false
+logProvenance: false
+replicationEnabled: false
+replicationMinQueueTime: 60
+replicationLimit: 3
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: false
+maxForeachThreads: 16384
+staging: "local"

Deleted: trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-local/swift.properties
===================================================================
--- trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-local/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-local/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,353 +0,0 @@
-sites.file=${swift.home}/etc/sites.xml
-tc.file=${swift.home}/etc/tc.data
-
-#
-# The host name of the submit machine is used by GRAM as a callback
-# address to report the status of submitted jobs. In general, Swift
-# can automatically detect the host name of the local machine.
-# However, if the machine host name is improperly configured or if
-# it does not represent a valid DNS entry, certain services (such as
-# GRAM) will not be able to send job status notifications back to
-# the client. The value of this property can be an IP address.
-#
-# Format:
-#    hostname=string
-#
-
-
-#hostname=localhost
-
-#
-# A TCP port range can be specified to restrict the ports on which GRAM
-# callback services are started. This is likely needed if your submit
-# host is behind a firewall, in which case the firewall should be
-# configured to allow incoming connections on ports in the range.
-#
-# Format:
-#     tcp.port.range=start,end
-#
-
-#tcp.port.range=50000,50100
-
-#
-# false	- means an error will be immediately reported and cause the
-# 		workflow to abort. At this time remote jobs that are already
-#		running will not be canceled
-# true	- means that Swift will try to do as much work as possible and
-#		report all errors encountered at the end. However, "errors"
-#		here only applies to job execution errors. Certain errors
-#		that are related to the Swift implementation (should such
-#		errors occur) will still be reported eagerly.
-#
-# Default: false
-#
-lazy.errors=false
-
-#
-# What algorithm to use for caching of remote files. LRU (as in what
-# files to purge) is the only implementation right now. One can set
-# a target size (in bytes) for a host by using the swift:storagesize
-# profile for a host in sites.xml
-#
-# Default: LRU
-#
-caching.algorithm=LRU
-
-#
-# true       - generate a provenance graph in .dot format (Swift will
-#			 choose a random file name)
-# false      - do not generate a provenance graph
-# <filename> - generate a provenange graph in the give file name
-#
-# Default: false
-#
-pgraph=false
-
-
-#
-# graph properties for the provenance graph (.dot specific)
-#
-# Default: splines="compound", rankdir="TB"
-#
-pgraph.graph.options=splines="compound", rankdir="TB"
-
-
-#
-# node properties for the provenance graph (.dot specific)
-#
-# Default: color="seagreen", style="filled"
-#
-pgraph.node.options=color="seagreen", style="filled"
-
-#
-# true	- clustering of small jobs is enabled. Clustering works in the
-#       following way: If a job is clusterable (meaning that it has the
-#       GLOBUS::maxwalltime profile specified in tc.data and its value
-#       is less than the value of the "clustering.min.time" property) it will
-#       be put in a clustering queue. The queue is processed at intervals
-#       specified by the "clustering.queue.delay" property. The processing
-#       of the clustering queue consists of selecting compatible jobs and
-#		grouping them in clusters whose max wall time does not exceed twice
-#       the value of the "clustering.min.time" property. Two or more jobs are
-#       considered compatible if they share the same site and do not have
-#       conflicting profiles (e.g. different values for the same environment
-#       variable).
-# false	- clustering of small jobs is disabled.
-#
-# Default: false
-#
-clustering.enabled=false
-
-
-#
-# <seconds>	- the intervals at which the clustering queue is processed
-#
-# Default: 4
-#
-clustering.queue.delay=4
-
-#
-# <seconds>	- the threshold time for clustering
-#
-# Default: 60
-#
-clustering.min.time=60
-
-#
-# Kickstart is a useful tool that can be used to gather various information
-# about a remote process. Before it can be used it must be installed on the
-# remote site and the corresponding entry be set in the sites file.
-# This option allows controlling of how Swift uses Kickstart. The following
-# values are possible:
-# false - do not use Kickstart
-# true  - use Kickstart. If a job is scheduled on a site that does not have
-#       Kickstart installed, that job will fail.
-# maybe - Use Kickstart if installed (i.e. the entry is present in the sites
-#       file)
-#
-# Default: maybe
-#
-
-kickstart.enabled=maybe
-
-#
-# Indicates when Kickstart records should be fetched from the remote site:
-# true	- always transfer Kickstart records if Kickstart was used (see
-#		kickstart.enabled)
-# false	- only transfer Kickstart records if the job fails
-#
-# Default: false
-#
-
-kickstart.always.transfer=false
-
-#
-# Indicates when wrapper logs should be fetched from the remote site:
-# true	- always transfer wrapper logs
-# false	- only transfer wrapper logs if the job fails
-#
-# Default: false
-#
-
-wrapperlog.always.transfer=false
-
-###########################################################################
-#                          Throttling options                             #
-###########################################################################
-#
-# For the throttling parameters, valid values are either a positive integer
-# or "off" (without the quotes).
-#
-
-#
-# Limits the number of concurrent submissions for a workflow instance. This
-# throttle only limits the number of concurrent tasks (jobs) that are being
-# sent to sites, not the total number of concurrent jobs that can be run.
-# The submission stage in GRAM is one of the most CPU expensive stages (due
-# mostly to the mutual authentication and delegation). Having too many
-# concurrent submissions can overload either or both the submit host CPU
-# and the remote host/head node causing degraded performance.
-#
-# Default: 4
-#
-
-throttle.submit=4
-#throttle.submit=off
-
-#
-# Limits the number of concurrent submissions for any of the sites Swift will
-# try to send jobs to. In other words it guarantees that no more than the
-# value of this throttle jobs sent to any site will be concurrently in a state
-# of being submitted.
-#
-# Default: 2
-#
-
-throttle.host.submit=2
-#throttle.host.submit=off
-
-#
-# The Swift scheduler has the ability to limit the number of concurrent jobs
-# allowed on a site based on the performance history of that site. Each site
-# is assigned a score (initially 1), which can increase or decrease based
-# on whether the site yields successful or faulty job runs. The score for a
-# site can take values in the (0.1, 100) interval. The number of allowed jobs
-# is calculated using the following formula:
-# 	2 + score*throttle.score.job.factor
-# This means a site will always be allowed at least two concurrent jobs and
-# at most 2 + 100*throttle.score.job.factor. With a default of 4 this means
-# at least 2 jobs and at most 402.
-#
-# Default: 4
-#
-
-throttle.score.job.factor=0.2
-#throttle.score.job.factor=off
-
-
-#
-# Limits the total number of concurrent file transfers that can happen at any
-# given time. File transfers consume bandwidth. Too many concurrent transfers
-# can cause the network to be overloaded preventing various other signalling
-# traffic from flowing properly.
-#
-# Default: 4
-#
-
-throttle.transfers=4
-#throttle.transfers=off
-
-# Limits the total number of concurrent file operations that can happen at any
-# given time. File operations (like transfers) require an exclusive connection
-# to a site. These connections can be expensive to establish. A large number
-# of concurrent file operations may cause Swift to attempt to establish many
-# such expensive connections to various sites. Limiting the number of concurrent
-# file operations causes Swift to use a small number of cached connections and
-# achieve better overall performance.
-#
-# Default: 8
-#
-
-throttle.file.operations=8
-#throttle.file.operations=off
-
-# Indicates whether the working directory on the remote site should be
-# left intact even when the workflow completes successfully. This can be
-# used to inspect the site working directory for debugging purposes.
-#
-# Default: false
-#
-
-sitedir.keep=false
-
-# number of time a job will be retried if it fails (giving a maximum of
-# 1 + execution.retries attempts at execution)
-#
-
-execution.retries=2
-
-
-# Enables/disables replication. Replication is used to deal with jobs sitting
-# in batch queues for abnormally large amounts of time. If replication is enabled
-# and certain conditions are met, Swift creates and submits replicas of jobs, and
-# allows multiple instances of a job to compete.
-#
-
-replication.enabled=false
-
-# If replication is enabled, this value specifies the minimum time, in seconds,
-# a job needs to be queued in a batch queue in order to be considered for
-# replication
-#
-
-replication.min.queue.time=60
-
-# The maximum number of replicas that Swift should attempt.
-
-replication.limit=3
-
-#
-# WARNING: This option is deprecated. Please use the hostname option.
-#
-# The IP address of the submit machine is used by GRAM as a callback
-# address to report the status of submitted jobs. In general, Swift
-# can automatically detect the IP address of the local machine.
-# However, if the machine has more than one network interface, Swift
-# will pick the first one, which may not be the right choice. It is
-# recommended that this property is set properly before attempting to
-# run jobs through GRAM.
-#
-# Format:
-#    ip.address=x.y.z.w
-#
-
-#ip.address=127.0.0.1
-
-
-# Controls how Swift will communicate the result code of running user programs
-# from workers to the submit side. In files mode, a file
-# indicating success or failure will be created on the site shared filesystem.
-# In provider mode, the execution provider job status will
-# be used. Notably, GRAM2 does not return job statuses correctly, and so
-# provider mode will not work with GRAM2. With other
-# providers, it can be used to reduce the amount of filesystem access compared
-# to files mode.
-#
-# status.mode=files
-
-# Controls how swift will supply parameters to the remote wrapper script.
-# 'args' mode will pass parameters on the command line
-# 'files' mode will pass parameters through an additional input file
-#
-# valid values: args, files
-# Default: files
-#
-# wrapper.parameter.mode=args
-
-# Determines if Swift remote wrappers will be executed by specifying an
-# absolute path, or a path relative to the job initial working directory
-#
-# valid values: absolute, relative
-# wrapper.invocation.mode=absolute
-
-#
-# Limits the number of concurrent iterations that each foreach statement
-# can have at one time. This conserves memory for swift programs that
-# have large numbers of iterations (which would otherwise all be executed
-# in parallel).
-#
-# Default: 1024
-#
-
-foreach.max.threads=16384
-
-# controls whether the log file will contain provenance information
-# enabling this will increase the size of log files, sometimes
-# significantly.
-
-provenance.log=false
-
-# Controls whether file staging is done by swift or by the execution
-# provider. If set to false, the standard swift staging mechanism is
-# used. If set to true, swift does not stage files. Instead, the
-# execution provider is instructed to stage files in and out.
-#
-# Provider staging is experimental.
-#
-# When enabled, and when coasters are used as an execution provider,
-# a staging mechanism can be selected for each site
-# using the swift:stagingMethod site profile in sites.xml. The
-# following is a list of accepted mechanisms:
-#
-# * file:  Staging is done from a filesystem accessible to the
-#          coaster service (typically running on the head node)
-# * proxy: Staging is done from a filesystem accessible to the
-#          client machine that swift is running on, and is proxied
-#          through the coaster service
-# * sfs:   (short for "shared filesystem") Staging is done by
-#          copying files to and from a filesystem accessible
-#          by the compute node (such as an NFS or GPFS mount).
-
-
-use.provider.staging=true
-provider.staging.pin.swiftfiles=false

Deleted: trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-local/tc.template.data
===================================================================
--- trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-local/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/procedures/dynamic-output/provider-staging-local/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1 +0,0 @@
-local	gen		_DIR_/900-dynamic-output.gen.sh		INSTALLED	INTEL32::LINUX	null

Deleted: trunk/tests/language-behaviour/procedures/dynamic-output/swift-staging/sites.template.xml
===================================================================
--- trunk/tests/language-behaviour/procedures/dynamic-output/swift-staging/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/procedures/dynamic-output/swift-staging/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,10 +0,0 @@
-<config>
-
-  <pool handle="localhost" sysinfo="INTEL32::LINUX">
-    <gridftp url="local://localhost" />
-    <execution provider="local" url="none" />
-    <workdirectory>_WORK_</workdirectory>
-    <profile namespace="swift" key="stagingMethod">file</profile>
-  </pool>
-
-</config>

Added: trunk/tests/language-behaviour/procedures/dynamic-output/swift-staging/swift.conf
===================================================================
--- trunk/tests/language-behaviour/procedures/dynamic-output/swift-staging/swift.conf	                        (rev 0)
+++ trunk/tests/language-behaviour/procedures/dynamic-output/swift-staging/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,31 @@
+include "${swift.home}/etc/swift.conf"
+
+site.localhost {
+	execution {
+		type: "local"
+		URL: "none"
+	}
+	filesystem {
+		type: "local"
+		URL: "localhost"
+	}
+	workDirectory: "_WORK_"
+	OS: "INTEL32::LINUX"
+}
+
+lazyErrors: false
+executionRetries: 2
+cachingAlgorithm: "LRU"
+jobSubmitThrottle: 4
+hostJobSubmitThrottle: 2
+fileTransfersThrottle: 4
+fileOperationsThrottle: 8
+siteScoreThrottlingFactor: 0.2
+keepSiteDir: false
+logProvenance: false
+replicationEnabled: false
+replicationMinQueueTime: 60
+replicationLimit: 3
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: false
+maxForeachThreads: 16384

Deleted: trunk/tests/language-behaviour/procedures/dynamic-output/swift-staging/swift.properties
===================================================================
--- trunk/tests/language-behaviour/procedures/dynamic-output/swift-staging/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/procedures/dynamic-output/swift-staging/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,353 +0,0 @@
-sites.file=${swift.home}/etc/sites.xml
-tc.file=${swift.home}/etc/tc.data
-
-#
-# The host name of the submit machine is used by GRAM as a callback
-# address to report the status of submitted jobs. In general, Swift
-# can automatically detect the host name of the local machine.
-# However, if the machine host name is improperly configured or if
-# it does not represent a valid DNS entry, certain services (such as
-# GRAM) will not be able to send job status notifications back to
-# the client. The value of this property can be an IP address.
-#
-# Format:
-#    hostname=string
-#
-
-
-#hostname=localhost
-
-#
-# A TCP port range can be specified to restrict the ports on which GRAM
-# callback services are started. This is likely needed if your submit
-# host is behind a firewall, in which case the firewall should be
-# configured to allow incoming connections on ports in the range.
-#
-# Format:
-#     tcp.port.range=start,end
-#
-
-#tcp.port.range=50000,50100
-
-#
-# false	- means an error will be immediately reported and cause the
-# 		workflow to abort. At this time remote jobs that are already
-#		running will not be canceled
-# true	- means that Swift will try to do as much work as possible and
-#		report all errors encountered at the end. However, "errors"
-#		here only applies to job execution errors. Certain errors
-#		that are related to the Swift implementation (should such
-#		errors occur) will still be reported eagerly.
-#
-# Default: false
-#
-lazy.errors=false
-
-#
-# What algorithm to use for caching of remote files. LRU (as in what
-# files to purge) is the only implementation right now. One can set
-# a target size (in bytes) for a host by using the swift:storagesize
-# profile for a host in sites.xml
-#
-# Default: LRU
-#
-caching.algorithm=LRU
-
-#
-# true       - generate a provenance graph in .dot format (Swift will
-#			 choose a random file name)
-# false      - do not generate a provenance graph
-# <filename> - generate a provenange graph in the give file name
-#
-# Default: false
-#
-pgraph=false
-
-
-#
-# graph properties for the provenance graph (.dot specific)
-#
-# Default: splines="compound", rankdir="TB"
-#
-pgraph.graph.options=splines="compound", rankdir="TB"
-
-
-#
-# node properties for the provenance graph (.dot specific)
-#
-# Default: color="seagreen", style="filled"
-#
-pgraph.node.options=color="seagreen", style="filled"
-
-#
-# true	- clustering of small jobs is enabled. Clustering works in the
-#       following way: If a job is clusterable (meaning that it has the
-#       GLOBUS::maxwalltime profile specified in tc.data and its value
-#       is less than the value of the "clustering.min.time" property) it will
-#       be put in a clustering queue. The queue is processed at intervals
-#       specified by the "clustering.queue.delay" property. The processing
-#       of the clustering queue consists of selecting compatible jobs and
-#		grouping them in clusters whose max wall time does not exceed twice
-#       the value of the "clustering.min.time" property. Two or more jobs are
-#       considered compatible if they share the same site and do not have
-#       conflicting profiles (e.g. different values for the same environment
-#       variable).
-# false	- clustering of small jobs is disabled.
-#
-# Default: false
-#
-clustering.enabled=false
-
-
-#
-# <seconds>	- the intervals at which the clustering queue is processed
-#
-# Default: 4
-#
-clustering.queue.delay=4
-
-#
-# <seconds>	- the threshold time for clustering
-#
-# Default: 60
-#
-clustering.min.time=60
-
-#
-# Kickstart is a useful tool that can be used to gather various information
-# about a remote process. Before it can be used it must be installed on the
-# remote site and the corresponding entry be set in the sites file.
-# This option allows controlling of how Swift uses Kickstart. The following
-# values are possible:
-# false - do not use Kickstart
-# true  - use Kickstart. If a job is scheduled on a site that does not have
-#       Kickstart installed, that job will fail.
-# maybe - Use Kickstart if installed (i.e. the entry is present in the sites
-#       file)
-#
-# Default: maybe
-#
-
-kickstart.enabled=maybe
-
-#
-# Indicates when Kickstart records should be fetched from the remote site:
-# true	- always transfer Kickstart records if Kickstart was used (see
-#		kickstart.enabled)
-# false	- only transfer Kickstart records if the job fails
-#
-# Default: false
-#
-
-kickstart.always.transfer=false
-
-#
-# Indicates when wrapper logs should be fetched from the remote site:
-# true	- always transfer wrapper logs
-# false	- only transfer wrapper logs if the job fails
-#
-# Default: false
-#
-
-wrapperlog.always.transfer=false
-
-###########################################################################
-#                          Throttling options                             #
-###########################################################################
-#
-# For the throttling parameters, valid values are either a positive integer
-# or "off" (without the quotes).
-#
-
-#
-# Limits the number of concurrent submissions for a workflow instance. This
-# throttle only limits the number of concurrent tasks (jobs) that are being
-# sent to sites, not the total number of concurrent jobs that can be run.
-# The submission stage in GRAM is one of the most CPU expensive stages (due
-# mostly to the mutual authentication and delegation). Having too many
-# concurrent submissions can overload either or both the submit host CPU
-# and the remote host/head node causing degraded performance.
-#
-# Default: 4
-#
-
-throttle.submit=4
-#throttle.submit=off
-
-#
-# Limits the number of concurrent submissions for any of the sites Swift will
-# try to send jobs to. In other words it guarantees that no more than the
-# value of this throttle jobs sent to any site will be concurrently in a state
-# of being submitted.
-#
-# Default: 2
-#
-
-throttle.host.submit=2
-#throttle.host.submit=off
-
-#
-# The Swift scheduler has the ability to limit the number of concurrent jobs
-# allowed on a site based on the performance history of that site. Each site
-# is assigned a score (initially 1), which can increase or decrease based
-# on whether the site yields successful or faulty job runs. The score for a
-# site can take values in the (0.1, 100) interval. The number of allowed jobs
-# is calculated using the following formula:
-# 	2 + score*throttle.score.job.factor
-# This means a site will always be allowed at least two concurrent jobs and
-# at most 2 + 100*throttle.score.job.factor. With a default of 4 this means
-# at least 2 jobs and at most 402.
-#
-# Default: 4
-#
-
-throttle.score.job.factor=0.2
-#throttle.score.job.factor=off
-
-
-#
-# Limits the total number of concurrent file transfers that can happen at any
-# given time. File transfers consume bandwidth. Too many concurrent transfers
-# can cause the network to be overloaded preventing various other signalling
-# traffic from flowing properly.
-#
-# Default: 4
-#
-
-throttle.transfers=4
-#throttle.transfers=off
-
-# Limits the total number of concurrent file operations that can happen at any
-# given time. File operations (like transfers) require an exclusive connection
-# to a site. These connections can be expensive to establish. A large number
-# of concurrent file operations may cause Swift to attempt to establish many
-# such expensive connections to various sites. Limiting the number of concurrent
-# file operations causes Swift to use a small number of cached connections and
-# achieve better overall performance.
-#
-# Default: 8
-#
-
-throttle.file.operations=8
-#throttle.file.operations=off
-
-# Indicates whether the working directory on the remote site should be
-# left intact even when the workflow completes successfully. This can be
-# used to inspect the site working directory for debugging purposes.
-#
-# Default: false
-#
-
-sitedir.keep=false
-
-# number of time a job will be retried if it fails (giving a maximum of
-# 1 + execution.retries attempts at execution)
-#
-
-execution.retries=2
-
-
-# Enables/disables replication. Replication is used to deal with jobs sitting
-# in batch queues for abnormally large amounts of time. If replication is enabled
-# and certain conditions are met, Swift creates and submits replicas of jobs, and
-# allows multiple instances of a job to compete.
-#
-
-replication.enabled=false
-
-# If replication is enabled, this value specifies the minimum time, in seconds,
-# a job needs to be queued in a batch queue in order to be considered for
-# replication
-#
-
-replication.min.queue.time=60
-
-# The maximum number of replicas that Swift should attempt.
-
-replication.limit=3
-
-#
-# WARNING: This option is deprecated. Please use the hostname option.
-#
-# The IP address of the submit machine is used by GRAM as a callback
-# address to report the status of submitted jobs. In general, Swift
-# can automatically detect the IP address of the local machine.
-# However, if the machine has more than one network interface, Swift
-# will pick the first one, which may not be the right choice. It is
-# recommended that this property is set properly before attempting to
-# run jobs through GRAM.
-#
-# Format:
-#    ip.address=x.y.z.w
-#
-
-#ip.address=127.0.0.1
-
-
-# Controls how Swift will communicate the result code of running user programs
-# from workers to the submit side. In files mode, a file
-# indicating success or failure will be created on the site shared filesystem.
-# In provider mode, the execution provider job status will
-# be used. Notably, GRAM2 does not return job statuses correctly, and so
-# provider mode will not work with GRAM2. With other
-# providers, it can be used to reduce the amount of filesystem access compared
-# to files mode.
-#
-# status.mode=files
-
-# Controls how swift will supply parameters to the remote wrapper script.
-# 'args' mode will pass parameters on the command line
-# 'files' mode will pass parameters through an additional input file
-#
-# valid values: args, files
-# Default: files
-#
-# wrapper.parameter.mode=args
-
-# Determines if Swift remote wrappers will be executed by specifying an
-# absolute path, or a path relative to the job initial working directory
-#
-# valid values: absolute, relative
-# wrapper.invocation.mode=absolute
-
-#
-# Limits the number of concurrent iterations that each foreach statement
-# can have at one time. This conserves memory for swift programs that
-# have large numbers of iterations (which would otherwise all be executed
-# in parallel).
-#
-# Default: 1024
-#
-
-foreach.max.threads=16384
-
-# controls whether the log file will contain provenance information
-# enabling this will increase the size of log files, sometimes
-# significantly.
-
-provenance.log=false
-
-# Controls whether file staging is done by swift or by the execution
-# provider. If set to false, the standard swift staging mechanism is
-# used. If set to true, swift does not stage files. Instead, the
-# execution provider is instructed to stage files in and out.
-#
-# Provider staging is experimental.
-#
-# When enabled, and when coasters are used as an execution provider,
-# a staging mechanism can be selected for each site
-# using the swift:stagingMethod site profile in sites.xml. The
-# following is a list of accepted mechanisms:
-#
-# * file:  Staging is done from a filesystem accessible to the
-#          coaster service (typically running on the head node)
-# * proxy: Staging is done from a filesystem accessible to the
-#          client machine that swift is running on, and is proxied
-#          through the coaster service
-# * sfs:   (short for "shared filesystem") Staging is done by
-#          copying files to and from a filesystem accessible
-#          by the compute node (such as an NFS or GPFS mount).
-
-
-use.provider.staging=false
-provider.staging.pin.swiftfiles=false

Deleted: trunk/tests/language-behaviour/procedures/dynamic-output/swift-staging/tc.template.data
===================================================================
--- trunk/tests/language-behaviour/procedures/dynamic-output/swift-staging/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/procedures/dynamic-output/swift-staging/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1 +0,0 @@
-local	gen		_DIR_/gen.sh		INSTALLED	INTEL32::LINUX	null

Added: trunk/tests/language-behaviour/procedures/swift.conf
===================================================================
--- trunk/tests/language-behaviour/procedures/swift.conf	                        (rev 0)
+++ trunk/tests/language-behaviour/procedures/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,4 @@
+include "${swift.home}/etc/swift.conf"
+
+site: local
+keepSiteDir: true

Deleted: trunk/tests/language-behaviour/procedures/swift.properties
===================================================================
--- trunk/tests/language-behaviour/procedures/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/procedures/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,2 +0,0 @@
-site=local
-sitedir.keep=true

Added: trunk/tests/language-behaviour/strings/swift.conf
===================================================================
--- trunk/tests/language-behaviour/strings/swift.conf	                        (rev 0)
+++ trunk/tests/language-behaviour/strings/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,4 @@
+include "${swift.home}/etc/swift.conf"
+
+site: local
+keepSiteDir: true

Deleted: trunk/tests/language-behaviour/strings/swift.properties
===================================================================
--- trunk/tests/language-behaviour/strings/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/strings/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,2 +0,0 @@
-site=local
-sitedir.keep=true

Added: trunk/tests/language-behaviour/variables/swift.conf
===================================================================
--- trunk/tests/language-behaviour/variables/swift.conf	                        (rev 0)
+++ trunk/tests/language-behaviour/variables/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,4 @@
+include "${swift.home}/etc/swift.conf"
+
+site: local
+keepSiteDir: true

Deleted: trunk/tests/language-behaviour/variables/swift.properties
===================================================================
--- trunk/tests/language-behaviour/variables/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/language-behaviour/variables/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,2 +0,0 @@
-site=local
-sitedir.keep=true

Added: trunk/tests/local/swift.conf
===================================================================
--- trunk/tests/local/swift.conf	                        (rev 0)
+++ trunk/tests/local/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,10 @@
+include "${swift.home}/etc/swift.conf"
+
+site: local
+keepSiteDir: true
+
+site.local {
+	app.append { executable: ${env.GROUP}"/append.sh" }
+	app.echo { executable: "echo" }
+	app.cat { executable: "cat" }
+}

Deleted: trunk/tests/local/swift.properties
===================================================================
--- trunk/tests/local/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/local/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,5 +0,0 @@
-site=local
-sitedir.keep=true
-app.local.append=$GROUP/append.sh
-app.local.echo=echo
-app.local.cat=cat

Deleted: trunk/tests/mpi/beagle/long/sites.template.xml
===================================================================
--- trunk/tests/mpi/beagle/long/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/mpi/beagle/long/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,31 +0,0 @@
-<config>
-
-<import file="sys.xml"/>
-<set name="pwd"><sys:getenv name="PWD"/></set>
-<set name="wdir" value="{pwd}/work"/>
-<echo message="setting workDirectory to: {wdir}"/>
-
-<pool handle="beagle">
-  <execution jobmanager="local:pbs" provider="coaster" url="none"/>
-  <filesystem provider="local" url="none" />
-  <profile namespace="globus" key="maxWallTime">1</profile>
-  <profile namespace="globus" key="maxTime">7200</profile>
-
-  <profile namespace="globus" key="providerAttributes">
-    pbs.aprun;pbs.mpp;depth=1
-  </profile>
-  <profile key="jobsPerNode" namespace="globus">1</profile>
-  <profile key="slots" namespace="globus">1</profile>
-  <profile key="nodeGranularity" namespace="globus">10</profile>
-  <profile key="maxNodes" namespace="globus">20</profile>
-  <profile key="workerLoggingLevel" namespace="globus">DEBUG</profile>
-  <profile key="workerLoggingDirectory" namespace="globus">{wdir}</profile>
-  <profile key="jobThrottle" namespace="karajan">5.99</profile>
-  <profile key="initialScore" namespace="karajan">10000</profile>
-  <workdirectory>{wdir}</workdirectory>
-</pool>
-</config>
-
-<!-- RESERVATIONS:
-Add something like this to your providerAttributes:
-pbs.resources=advres=modFTDock.47 -->

Added: trunk/tests/mpi/beagle/long/swift.conf
===================================================================
--- trunk/tests/mpi/beagle/long/swift.conf	                        (rev 0)
+++ trunk/tests/mpi/beagle/long/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,43 @@
+include "${swift.home}/etc/swift.conf"
+
+site.beagle {
+	execution {
+		type: "coaster"
+		URL: "none"
+		jobManager: "local:pbs"
+		options {
+			nodeGranularity: 10
+			maxNodesPerJob: 20
+			workerLoggingLevel: "DEBUG"
+			maxJobs: 1
+			jobOptions {
+				pbs.aprun: true
+				pbs.mpp: true
+				depth: "1"
+			}
+			workerLoggingDirectory: "{wdir}"
+			tasksPerNode: 1
+			jobMaxTime: "02:00:00"
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "none"
+	}
+	workDirectory: "{wdir}"
+	maxParallelTasks: 600
+	initialParallelTasks: 599
+	app.mpi_cp {
+		executable: "_DIR_/mpi-cp"
+		options {hostCount: "2"}
+		maxWallTime: "00:01:00"
+	}
+
+	app.mpi_sleep {
+		executable: "_DIR_/mpi-sleep"
+		options {hostCount: "2"}
+		maxWallTime: "00:01:00"
+	}
+
+}
+

Deleted: trunk/tests/mpi/beagle/long/tc.template.data
===================================================================
--- trunk/tests/mpi/beagle/long/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/mpi/beagle/long/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,23 +0,0 @@
-#This is the transformation catalog.
-#
-#It comes pre-configured with a number of simple transformations with
-#paths that are likely to work on a linux box. However, on some systems,
-#the paths to these executables will be different (for example, sometimes
-#some of these programs are found in /usr/bin rather than in /bin)
-#
-#NOTE WELL: fields in this file must be separated by tabs, not spaces; and
-#there must be no trailing whitespace at the end of each line.
-#
-# sitename  transformation  path   INSTALLED  platform  profiles
-localhost 	echo 		/bin/echo	INSTALLED	INTEL32::LINUX	null
-localhost 	cat 		/bin/cat	INSTALLED	INTEL32::LINUX	null
-localhost 	ls 		/bin/ls		INSTALLED	INTEL32::LINUX	null
-localhost 	grep 		/bin/grep	INSTALLED	INTEL32::LINUX	null
-localhost 	sort 		/bin/sort	INSTALLED	INTEL32::LINUX	null
-localhost 	paste 		/bin/paste	INSTALLED	INTEL32::LINUX	null
-localhost 	cp 		/bin/cp	        INSTALLED	INTEL32::LINUX	null
-
-# hydra-tests-2
-
-beagle 	mpi_cp 		_DIR_/mpi-cp       INSTALLED	INTEL32::LINUX	globus::hostCount=2
-beagle 	mpi_sleep 	_DIR_/mpi-sleep    INSTALLED	INTEL32::LINUX	globus::hostCount=2

Deleted: trunk/tests/mpi/beagle/short/sites.template.xml
===================================================================
--- trunk/tests/mpi/beagle/short/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/mpi/beagle/short/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,31 +0,0 @@
-<config>
-
-<import file="sys.xml"/>
-<set name="pwd"><sys:getenv name="PWD"/></set>
-<set name="wdir" value="{pwd}/work"/>
-<echo message="setting workDirectory to: {wdir}"/>
-
-<pool handle="beagle">
-  <execution jobmanager="local:pbs" provider="coaster" url="none"/>
-  <filesystem provider="local" url="none" />
-  <profile namespace="globus" key="maxWallTime">1</profile>
-  <profile namespace="globus" key="maxTime">7200</profile>
-
-  <profile namespace="globus" key="providerAttributes">
-    pbs.aprun;pbs.mpp;depth=6
-  </profile>
-  <profile key="jobsPerNode" namespace="globus">1</profile>
-  <profile key="slots" namespace="globus">1</profile>
-  <profile key="nodeGranularity" namespace="globus">2</profile>
-  <profile key="maxNodes" namespace="globus">4</profile>
-  <profile key="workerLoggingLevel" namespace="globus">DEBUG</profile>
-  <profile key="workerLoggingDirectory" namespace="globus">{wdir}</profile>
-  <profile key="jobThrottle" namespace="karajan">5.99</profile>
-  <profile key="initialScore" namespace="karajan">10000</profile>
-  <workdirectory>{wdir}</workdirectory>
-</pool>
-</config>
-
-<!-- RESERVATIONS:
-Add something like this to your providerAttributes:
-pbs.resources=advres=modFTDock.47 -->

Added: trunk/tests/mpi/beagle/short/swift.conf
===================================================================
--- trunk/tests/mpi/beagle/short/swift.conf	                        (rev 0)
+++ trunk/tests/mpi/beagle/short/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,37 @@
+include "${swift.home}/etc/swift.conf"
+
+site.beagle {
+	execution {
+		type: "coaster"
+		URL: "none"
+		jobManager: "local:pbs"
+		options {
+			nodeGranularity: 2
+			maxNodesPerJob: 4
+			workerLoggingLevel: "DEBUG"
+			maxJobs: 1
+			jobOptions {
+				pbs.aprun: true
+				pbs.mpp: true
+				depth: "6"
+			}
+			workerLoggingDirectory: "{wdir}"
+			tasksPerNode: 1
+			jobMaxTime: "02:00:00"
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "none"
+	}
+	workDirectory: "{wdir}"
+	maxParallelTasks: 600
+	initialParallelTasks: 599
+	app.mpi_cp {
+		executable: "_DIR_/mpi-cp"
+		options {hostCount: "2"}
+		maxWallTime: "00:01:00"
+	}
+
+}
+

Deleted: trunk/tests/mpi/beagle/short/tc.template.data
===================================================================
--- trunk/tests/mpi/beagle/short/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/mpi/beagle/short/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,22 +0,0 @@
-#This is the transformation catalog.
-#
-#It comes pre-configured with a number of simple transformations with
-#paths that are likely to work on a linux box. However, on some systems,
-#the paths to these executables will be different (for example, sometimes
-#some of these programs are found in /usr/bin rather than in /bin)
-#
-#NOTE WELL: fields in this file must be separated by tabs, not spaces; and
-#there must be no trailing whitespace at the end of each line.
-#
-# sitename  transformation  path   INSTALLED  platform  profiles
-localhost 	echo 		/bin/echo	INSTALLED	INTEL32::LINUX	null
-localhost 	cat 		/bin/cat	INSTALLED	INTEL32::LINUX	null
-localhost 	ls 		/bin/ls		INSTALLED	INTEL32::LINUX	null
-localhost 	grep 		/bin/grep	INSTALLED	INTEL32::LINUX	null
-localhost 	sort 		/bin/sort	INSTALLED	INTEL32::LINUX	null
-localhost 	paste 		/bin/paste	INSTALLED	INTEL32::LINUX	null
-localhost 	cp 		/bin/cp	        INSTALLED	INTEL32::LINUX	null
-
-# hydra-tests-2
-
-beagle 	mpi_cp 		_DIR_/mpi-cp    INSTALLED	INTEL32::LINUX	globus::hostCount=2

Deleted: trunk/tests/mpi/crow/long/sites.template.xml
===================================================================
--- trunk/tests/mpi/crow/long/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/mpi/crow/long/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,31 +0,0 @@
-<config>
-
-<import file="sys.xml"/>
-<set name="pwd"><sys:getenv name="PWD"/></set>
-<set name="wdir" value="{pwd}/work"/>
-<echo message="setting workDirectory to: {wdir}"/>
-
-<pool handle="crow">
-  <execution jobmanager="local:pbs" provider="coaster" url="none"/>
-  <filesystem provider="local" url="none" />
-  <profile namespace="globus" key="maxWallTime">1</profile>
-  <profile namespace="globus" key="maxTime">7200</profile>
-
-  <profile namespace="globus" key="providerAttributes">
-    pbs.aprun;pbs.mpp;depth=6
-  </profile>
-  <profile key="jobsPerNode" namespace="globus">1</profile>
-  <profile key="slots" namespace="globus">1</profile>
-  <profile key="nodeGranularity" namespace="globus">10</profile>
-  <profile key="maxNodes" namespace="globus">20</profile>
-  <profile key="workerLoggingLevel" namespace="globus">DEBUG</profile>
-  <profile key="workerLoggingDirectory" namespace="globus">{wdir}</profile>
-  <profile key="jobThrottle" namespace="karajan">5.99</profile>
-  <profile key="initialScore" namespace="karajan">10000</profile>
-  <workdirectory>{wdir}</workdirectory>
-</pool>
-</config>
-
-<!-- RESERVATIONS:
-Add something like this to your providerAttributes:
-pbs.resources=advres=modFTDock.47 -->

Added: trunk/tests/mpi/crow/long/swift.conf
===================================================================
--- trunk/tests/mpi/crow/long/swift.conf	                        (rev 0)
+++ trunk/tests/mpi/crow/long/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,43 @@
+include "${swift.home}/etc/swift.conf"
+
+site.crow {
+	execution {
+		type: "coaster"
+		URL: "none"
+		jobManager: "local:pbs"
+		options {
+			nodeGranularity: 10
+			maxNodesPerJob: 20
+			workerLoggingLevel: "DEBUG"
+			maxJobs: 1
+			jobOptions {
+				pbs.aprun: true
+				pbs.mpp: true
+				depth: "6"
+			}
+			workerLoggingDirectory: "{wdir}"
+			tasksPerNode: 1
+			jobMaxTime: "02:00:00"
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "none"
+	}
+	workDirectory: "{wdir}"
+	maxParallelTasks: 600
+	initialParallelTasks: 599
+	app.mpi_cp {
+		executable: "_DIR_/mpi-cp"
+		options {hostCount: "2"}
+		maxWallTime: "00:01:00"
+	}
+
+	app.mpi_sleep {
+		executable: "_DIR_/mpi-sleep"
+		options {hostCount: "2"}
+		maxWallTime: "00:01:00"
+	}
+
+}
+

Deleted: trunk/tests/mpi/crow/long/tc.template.data
===================================================================
--- trunk/tests/mpi/crow/long/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/mpi/crow/long/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,23 +0,0 @@
-#This is the transformation catalog.
-#
-#It comes pre-configured with a number of simple transformations with
-#paths that are likely to work on a linux box. However, on some systems,
-#the paths to these executables will be different (for example, sometimes
-#some of these programs are found in /usr/bin rather than in /bin)
-#
-#NOTE WELL: fields in this file must be separated by tabs, not spaces; and
-#there must be no trailing whitespace at the end of each line.
-#
-# sitename  transformation  path   INSTALLED  platform  profiles
-localhost 	echo 		/bin/echo	INSTALLED	INTEL32::LINUX	null
-localhost 	cat 		/bin/cat	INSTALLED	INTEL32::LINUX	null
-localhost 	ls 		/bin/ls		INSTALLED	INTEL32::LINUX	null
-localhost 	grep 		/bin/grep	INSTALLED	INTEL32::LINUX	null
-localhost 	sort 		/bin/sort	INSTALLED	INTEL32::LINUX	null
-localhost 	paste 		/bin/paste	INSTALLED	INTEL32::LINUX	null
-localhost 	cp 		/bin/cp	        INSTALLED	INTEL32::LINUX	null
-
-# hydra-tests-2
-
-crow 	mpi_cp 		_DIR_/mpi-cp    INSTALLED	INTEL32::LINUX	globus::hostCount=2
-crow 	mpi_sleep 	_DIR_/mpi-sleep    INSTALLED	INTEL32::LINUX	globus::hostCount=2

Deleted: trunk/tests/mpi/crow/short/sites.template.xml
===================================================================
--- trunk/tests/mpi/crow/short/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/mpi/crow/short/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,31 +0,0 @@
-<config>
-
-<import file="sys.xml"/>
-<set name="pwd"><sys:getenv name="PWD"/></set>
-<set name="wdir" value="{pwd}/work"/>
-<echo message="setting workDirectory to: {wdir}"/>
-
-<pool handle="crow">
-  <execution jobmanager="local:pbs" provider="coaster" url="none"/>
-  <filesystem provider="local" url="none" />
-  <profile namespace="globus" key="maxWallTime">1</profile>
-  <profile namespace="globus" key="maxTime">7200</profile>
-
-  <profile namespace="globus" key="providerAttributes">
-    pbs.aprun;pbs.mpp;depth=6
-  </profile>
-  <profile key="jobsPerNode" namespace="globus">1</profile>
-  <profile key="slots" namespace="globus">1</profile>
-  <profile key="nodeGranularity" namespace="globus">2</profile>
-  <profile key="maxNodes" namespace="globus">4</profile>
-  <profile key="workerLoggingLevel" namespace="globus">DEBUG</profile>
-  <profile key="workerLoggingDirectory" namespace="globus">{wdir}</profile>
-  <profile key="jobThrottle" namespace="karajan">5.99</profile>
-  <profile key="initialScore" namespace="karajan">10000</profile>
-  <workdirectory>{wdir}</workdirectory>
-</pool>
-</config>
-
-<!-- RESERVATIONS:
-Add something like this to your providerAttributes:
-pbs.resources=advres=modFTDock.47 -->

Added: trunk/tests/mpi/crow/short/swift.conf
===================================================================
--- trunk/tests/mpi/crow/short/swift.conf	                        (rev 0)
+++ trunk/tests/mpi/crow/short/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,37 @@
+include "${swift.home}/etc/swift.conf"
+
+site.crow {
+	execution {
+		type: "coaster"
+		URL: "none"
+		jobManager: "local:pbs"
+		options {
+			nodeGranularity: 2
+			maxNodesPerJob: 4
+			workerLoggingLevel: "DEBUG"
+			maxJobs: 1
+			jobOptions {
+				pbs.aprun: true
+				pbs.mpp: true
+				depth: "6"
+			}
+			workerLoggingDirectory: "{wdir}"
+			tasksPerNode: 1
+			jobMaxTime: "02:00:00"
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "none"
+	}
+	workDirectory: "{wdir}"
+	maxParallelTasks: 600
+	initialParallelTasks: 599
+	app.mpi_cp {
+		executable: "_DIR_/mpi-cp"
+		options {hostCount: "2"}
+		maxWallTime: "00:01:00"
+	}
+
+}
+

Deleted: trunk/tests/mpi/crow/short/tc.template.data
===================================================================
--- trunk/tests/mpi/crow/short/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/mpi/crow/short/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,22 +0,0 @@
-#This is the transformation catalog.
-#
-#It comes pre-configured with a number of simple transformations with
-#paths that are likely to work on a linux box. However, on some systems,
-#the paths to these executables will be different (for example, sometimes
-#some of these programs are found in /usr/bin rather than in /bin)
-#
-#NOTE WELL: fields in this file must be separated by tabs, not spaces; and
-#there must be no trailing whitespace at the end of each line.
-#
-# sitename  transformation  path   INSTALLED  platform  profiles
-localhost 	echo 		/bin/echo	INSTALLED	INTEL32::LINUX	null
-localhost 	cat 		/bin/cat	INSTALLED	INTEL32::LINUX	null
-localhost 	ls 		/bin/ls		INSTALLED	INTEL32::LINUX	null
-localhost 	grep 		/bin/grep	INSTALLED	INTEL32::LINUX	null
-localhost 	sort 		/bin/sort	INSTALLED	INTEL32::LINUX	null
-localhost 	paste 		/bin/paste	INSTALLED	INTEL32::LINUX	null
-localhost 	cp 		/bin/cp	        INSTALLED	INTEL32::LINUX	null
-
-# hydra-tests-2
-
-crow 	mpi_cp 		_DIR_/mpi-cp    INSTALLED	INTEL32::LINUX	globus::hostCount=2

Deleted: trunk/tests/mpi/fusion/long/sites.template.xml
===================================================================
--- trunk/tests/mpi/fusion/long/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/mpi/fusion/long/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,26 +0,0 @@
-<config>
-
-<import file="sys.xml"/>
-<set name="pwd"><sys:getenv name="PWD"/></set>
-<set name="wdir" value="{pwd}/work"/>
-<echo message="setting workDirectory to: {wdir}"/>
-
-<pool handle="fusion">
-  <execution jobmanager="local:pbs" provider="coaster" url="none"/>
-  <filesystem provider="local" url="none" />
-  <profile namespace="globus" key="maxWallTime">1</profile> <!-- minutes -->
-  <!-- <profile namespace="globus" key="maxTime">500</profile> -->
-
-  <profile key="jobsPerNode" namespace="globus">1</profile>
-  <profile key="slots" namespace="globus">1</profile>
-  <profile key="nodeGranularity" namespace="globus">2</profile>
-  <profile key="maxNodes" namespace="globus">2</profile>
-  <profile key="workerLoggingLevel" namespace="globus">DEBUG</profile>
-  <profile key="workerLoggingDirectory" namespace="globus">{wdir}</profile>
-  <profile key="queue" namespace="globus">shared</profile>
-  <profile key="jobThrottle" namespace="karajan">5.99</profile>
-  <profile key="initialScore" namespace="karajan">10000</profile>
-
-  <workdirectory>{wdir}</workdirectory>
-</pool>
-</config>

Added: trunk/tests/mpi/fusion/long/swift.conf
===================================================================
--- trunk/tests/mpi/fusion/long/swift.conf	                        (rev 0)
+++ trunk/tests/mpi/fusion/long/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,32 @@
+include "${swift.home}/etc/swift.conf"
+
+site.fusion {
+	execution {
+		type: "coaster"
+		URL: "none"
+		jobManager: "local:pbs"
+		options {
+			nodeGranularity: 2
+			jobQueue: "shared"
+			maxNodesPerJob: 2
+			workerLoggingLevel: "DEBUG"
+			maxJobs: 1
+			workerLoggingDirectory: "{wdir}"
+			tasksPerNode: 1
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "none"
+	}
+	workDirectory: "{wdir}"
+	maxParallelTasks: 600
+	initialParallelTasks: 599
+	app.mpi_sleep {
+		executable: "_DIR_/mpi-sleep"
+		options {mpi.processes: "4", mpi.ppn: "2"}
+		maxWallTime: "00:01:00"
+	}
+
+}
+

Deleted: trunk/tests/mpi/fusion/long/tc.template.data
===================================================================
--- trunk/tests/mpi/fusion/long/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/mpi/fusion/long/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,20 +0,0 @@
-#This is the transformation catalog.
-#
-#It comes pre-configured with a number of simple transformations with
-#paths that are likely to work on a linux box. However, on some systems,
-#the paths to these executables will be different (for example, sometimes
-#some of these programs are found in /usr/bin rather than in /bin)
-#
-#NOTE WELL: fields in this file must be separated by tabs, not spaces; and
-#there must be no trailing whitespace at the end of each line.
-#
-# sitename  transformation  path   INSTALLED  platform  profiles
-localhost 	echo 		/bin/echo	INSTALLED	INTEL32::LINUX	null
-localhost 	cat 		/bin/cat	INSTALLED	INTEL32::LINUX	null
-localhost 	ls 		/bin/ls		INSTALLED	INTEL32::LINUX	null
-localhost 	grep 		/bin/grep	INSTALLED	INTEL32::LINUX	null
-localhost 	sort 		/bin/sort	INSTALLED	INTEL32::LINUX	null
-localhost 	paste 		/bin/paste	INSTALLED	INTEL32::LINUX	null
-localhost 	cp 		/bin/cp	        INSTALLED	INTEL32::LINUX	null
-
-fusion 	mpi_sleep _DIR_/mpi-sleep    INSTALLED	INTEL32::LINUX	globus::mpi.processes=4;globus::mpi.ppn=2

Deleted: trunk/tests/mpi/fusion/short/sites.template.xml
===================================================================
--- trunk/tests/mpi/fusion/short/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/mpi/fusion/short/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,28 +0,0 @@
-<config>
-
-<import file="sys.xml"/>
-<set name="pwd"><sys:getenv name="PWD"/></set>
-<set name="wdir" value="{pwd}/work"/>
-<echo message="setting workDirectory to: {wdir}"/>
-
-<pool handle="fusion">
-  <execution jobmanager="local:pbs" provider="coaster" url="none"/>
-  <filesystem provider="local" url="none" />
-  <profile namespace="globus" key="maxWallTime">1</profile>
-  <profile namespace="globus" key="maxTime">7200</profile>
-
-  <profile key="jobsPerNode" namespace="globus">1</profile>
-  <profile key="slots" namespace="globus">1</profile>
-  <profile key="nodeGranularity" namespace="globus">2</profile>
-  <profile key="maxNodes" namespace="globus">4</profile>
-  <profile key="workerLoggingLevel" namespace="globus">DEBUG</profile>
-  <profile key="workerLoggingDirectory" namespace="globus">{wdir}</profile>
-  <profile key="jobThrottle" namespace="karajan">5.99</profile>
-  <profile key="initialScore" namespace="karajan">10000</profile>
-  <workdirectory>{wdir}</workdirectory>
-</pool>
-</config>
-
-<!-- RESERVATIONS:
-Add something like this to your providerAttributes:
-pbs.resources=advres=modFTDock.47 -->

Added: trunk/tests/mpi/fusion/short/swift.conf
===================================================================
--- trunk/tests/mpi/fusion/short/swift.conf	                        (rev 0)
+++ trunk/tests/mpi/fusion/short/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,32 @@
+include "${swift.home}/etc/swift.conf"
+
+site.fusion {
+	execution {
+		type: "coaster"
+		URL: "none"
+		jobManager: "local:pbs"
+		options {
+			nodeGranularity: 2
+			maxNodesPerJob: 4
+			workerLoggingLevel: "DEBUG"
+			maxJobs: 1
+			workerLoggingDirectory: "{wdir}"
+			tasksPerNode: 1
+			jobMaxTime: "02:00:00"
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "none"
+	}
+	workDirectory: "{wdir}"
+	maxParallelTasks: 600
+	initialParallelTasks: 599
+	app.mpi_cp {
+		executable: "_DIR_/mpi-cp"
+		options {mpi.processes: "2"}
+		maxWallTime: "00:01:00"
+	}
+
+}
+

Deleted: trunk/tests/mpi/fusion/short/tc.template.data
===================================================================
--- trunk/tests/mpi/fusion/short/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/mpi/fusion/short/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,22 +0,0 @@
-#This is the transformation catalog.
-#
-#It comes pre-configured with a number of simple transformations with
-#paths that are likely to work on a linux box. However, on some systems,
-#the paths to these executables will be different (for example, sometimes
-#some of these programs are found in /usr/bin rather than in /bin)
-#
-#NOTE WELL: fields in this file must be separated by tabs, not spaces; and
-#there must be no trailing whitespace at the end of each line.
-#
-# sitename  transformation  path   INSTALLED  platform  profiles
-localhost 	echo 		/bin/echo	INSTALLED	INTEL32::LINUX	null
-localhost 	cat 		/bin/cat	INSTALLED	INTEL32::LINUX	null
-localhost 	ls 		/bin/ls		INSTALLED	INTEL32::LINUX	null
-localhost 	grep 		/bin/grep	INSTALLED	INTEL32::LINUX	null
-localhost 	sort 		/bin/sort	INSTALLED	INTEL32::LINUX	null
-localhost 	paste 		/bin/paste	INSTALLED	INTEL32::LINUX	null
-localhost 	cp 		/bin/cp	        INSTALLED	INTEL32::LINUX	null
-
-# hydra-tests-2
-
-fusion 	mpi_cp 		_DIR_/mpi-cp    INSTALLED	INTEL32::LINUX	globus::mpi.processes=2

Deleted: trunk/tests/mpi/local/sites.template.xml
===================================================================
--- trunk/tests/mpi/local/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/mpi/local/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,24 +0,0 @@
-<config>
-
-  <pool handle="localhost" sysinfo="INTEL32::LINUX">
-    <gridftp url="local://localhost" />
-    <execution provider="local" url="none" />
-    <workdirectory>_WORK_</workdirectory>
-    <profile namespace="swift" key="stagingMethod">file</profile>
-  </pool>
-
-  <pool handle="coasterslocal">
-    <filesystem provider="local" />
-    <execution provider="coaster" jobmanager="local:local"/>
-    <profile namespace="globus"   key="internalHostname">_HOST_</profile>
-    <profile namespace="karajan"  key="jobthrottle">2.55</profile>
-    <profile namespace="karajan"  key="initialScore">10000</profile>
-    <profile namespace="globus"   key="jobsPerNode">4</profile>
-    <profile namespace="globus"   key="slots">8</profile>
-    <profile namespace="globus"   key="maxTime">1000</profile>
-    <profile namespace="globus"   key="nodeGranularity">1</profile>
-    <profile namespace="globus"   key="maxNodes">4</profile>
-    <workdirectory>_WORK_</workdirectory>
-  </pool>
-
-</config>

Added: trunk/tests/mpi/local/swift.conf
===================================================================
--- trunk/tests/mpi/local/swift.conf	                        (rev 0)
+++ trunk/tests/mpi/local/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,71 @@
+include "${swift.home}/etc/swift.conf"
+
+site.localhost {
+	execution {
+		type: "local"
+		URL: "none"
+	}
+	filesystem {
+		type: "local"
+		URL: "localhost"
+	}
+	workDirectory: "_WORK_"
+	OS: "INTEL32::LINUX"
+	app.cat {
+		executable: "/bin/cat"
+	}
+
+	app.cp {
+		executable: "/bin/cp"
+	}
+
+	app.echo {
+		executable: "/bin/echo"
+	}
+
+	app.grep {
+		executable: "/bin/grep"
+	}
+
+	app.ls {
+		executable: "/bin/ls"
+	}
+
+	app.paste {
+		executable: "/bin/paste"
+	}
+
+	app.sort {
+		executable: "/bin/sort"
+	}
+
+}
+
+site.coasterslocal {
+	execution {
+		type: "coaster"
+		URL: "localhost"
+		jobManager: "local:local"
+		options {
+			internalHostname: "_HOST_"
+			nodeGranularity: 1
+			maxNodesPerJob: 4
+			maxJobs: 8
+			tasksPerNode: 4
+			jobMaxTime: "00:16:40"
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "localhost"
+	}
+	workDirectory: "_WORK_"
+	maxParallelTasks: 255
+	initialParallelTasks: 255
+	app.mpi_cp {
+		executable: "_DIR_/mpi-cp"
+		options {hostCount: "2"}
+	}
+
+}
+

Deleted: trunk/tests/mpi/local/tc.template.data
===================================================================
--- trunk/tests/mpi/local/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/mpi/local/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,22 +0,0 @@
-#This is the transformation catalog.
-#
-#It comes pre-configured with a number of simple transformations with
-#paths that are likely to work on a linux box. However, on some systems,
-#the paths to these executables will be different (for example, sometimes
-#some of these programs are found in /usr/bin rather than in /bin)
-#
-#NOTE WELL: fields in this file must be separated by tabs, not spaces; and
-#there must be no trailing whitespace at the end of each line.
-#
-# sitename  transformation  path   INSTALLED  platform  profiles
-localhost 	echo 		/bin/echo	INSTALLED	INTEL32::LINUX	null
-localhost 	cat 		/bin/cat	INSTALLED	INTEL32::LINUX	null
-localhost 	ls 		/bin/ls		INSTALLED	INTEL32::LINUX	null
-localhost 	grep 		/bin/grep	INSTALLED	INTEL32::LINUX	null
-localhost 	sort 		/bin/sort	INSTALLED	INTEL32::LINUX	null
-localhost 	paste 		/bin/paste	INSTALLED	INTEL32::LINUX	null
-localhost 	cp 		/bin/cp	        INSTALLED	INTEL32::LINUX	null
-
-# hydra-tests-2
-
-coasterslocal 	mpi_cp 		_DIR_/mpi-cp    INSTALLED	INTEL32::LINUX	globus::hostCount=2

Deleted: trunk/tests/multi_remote/sites.template.xml
===================================================================
--- trunk/tests/multi_remote/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/multi_remote/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,172 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-
-  <pool handle="local">
-    <execution provider="coaster" jobmanager="local:local"/>
-    <profile namespace="globus" key="jobsPerNode">4</profile>
-    <profile namespace="globus" key="maxWalltime">00:15:00</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="slots">1</profile>
-    <profile namespace="globus" key="maxNodes">1</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="karajan" key="jobThrottle">0.03</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <filesystem provider="local"/>
-    <workdirectory>.</workdirectory>
-  </pool>
-
-  <pool handle="ci">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="login.ci.uchicago.edu"/>
-    <profile namespace="globus" key="jobsPerNode">24</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">120</profile>
-    <profile namespace="globus" key="maxWalltime">00:02:00</profile>
-    <profile namespace="globus" key="slots">5</profile>
-    <profile namespace="globus" key="maxnodes">1</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="karajan" key="jobThrottle">4.80</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>PUBLISH_FOLDER</workdirectory>
-  </pool>
-
-  <pool handle="beagle">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="login4.beagle.ci.uchicago.edu"/>
-    <profile namespace="globus" key="jobsPerNode">24</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="providerAttributes">pbs.aprun;pbs.mpp;depth=24;</profile>
-    <profile namespace="globus" key="maxtime">36000</profile>
-    <profile namespace="globus" key="maxWalltime">01:05:00</profile>
-    <profile namespace="globus" key="userHomeOverride">/lustre/beagle/BEAGLE_USERNAME/swiftwork</profile>
-    <profile namespace="globus" key="slots">5</profile>
-    <profile namespace="globus" key="maxnodes">1</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="karajan" key="jobThrottle">4.80</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/tmp/BEAGLE_USERNAME/swiftwork</workdirectory>
-  </pool>
-
-  <pool handle="blues">
-    <execution jobmanager="ssh-cl:local" provider="coaster" url="blogin1.lcrc.anl.gov"/>
-    <filesystem provider="local" url="none" />
-    <profile namespace="globus" key="maxtime">3600</profile>
-    <profile namespace="globus" key="maxtime">36000</profile>
-    <profile namespace="globus" key="maxWalltime">01:05:00</profile>
-    <profile namespace="globus" key="jobsPerNode">2</profile>
-    <profile namespace="globus" key="ppn">8</profile>
-    <profile namespace="globus" key="queue">route</profile>
-    <profile namespace="globus" key="slots">1</profile>
-    <profile namespace="globus" key="nodeGranularity">2</profile>
-    <profile namespace="globus" key="maxNodes">2</profile>
-    <profile namespace="karajan" key="jobThrottle">2.20</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="globus" key="HighOverAllocation">1000</profile>
-    <profile namespace="globus" key="LowOverAllocation">1000</profile>
-    <workdirectory>/tmp/BLUES_USERNAME/swiftwork</workdirectory>
-  </pool>
-
-  <pool handle="fusion">
-    <execution jobmanager="ssh-cl:local" provider="coaster" url="flogin1.lcrc.anl.gov"/>
-    <filesystem provider="local" url="none" />
-    <profile namespace="globus" key="maxtime">4000</profile>
-    <profile namespace="globus" key="maxWalltime">01:05:00</profile>
-    <profile namespace="globus" key="jobsPerNode">2</profile>
-    <profile namespace="globus" key="slots">1</profile>
-    <profile namespace="globus" key="nodeGranularity">2</profile>
-    <profile namespace="globus" key="maxNodes">2</profile>
-    <profile namespace="globus" key="queue">shared</profile>
-    <profile namespace="karajan" key="jobThrottle">5.99</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="globus" key="HighOverAllocation">1000</profile>
-    <profile namespace="globus" key="LowOverAllocation">1000</profile>
-    <workdirectory>/homes/FUSION_USERNAME/swiftwork</workdirectory>
-  </pool>
-
-  <pool handle="midway">
-    <execution provider="coaster" jobmanager="local:local"/>
-    <profile namespace="globus" key="queue">sandyb</profile>
-    <profile namespace="globus" key="jobsPerNode">16</profile>
-    <profile namespace="globus" key="maxtime">36000</profile>
-    <profile namespace="globus" key="maxWalltime">01:05:00</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="slots">4</profile>
-    <profile namespace="globus" key="maxNodes">1</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="karajan" key="jobThrottle">.64</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/tmp/MIDWAY_USERNAME</workdirectory>
-  </pool>
-
-
-  <pool handle="uc3">
-    <execution provider="coaster" url="uc3-sub.uchicago.edu" jobmanager="ssh-cl:local"/>
-    <profile namespace="karajan" key="jobThrottle">10.00</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="globus"  key="jobsPerNode">1</profile>
-    <profile namespace="globus"  key="maxtime">36000</profile>
-    <profile namespace="globus"  key="maxWalltime">01:05:00</profile>
-    <profile namespace="globus"  key="highOverAllocation">100</profile>
-    <profile namespace="globus"  key="lowOverAllocation">100</profile>
-    <profile namespace="globus"  key="slots">2</profile>
-    <profile namespace="globus"  key="maxNodes">1</profile>
-    <profile namespace="globus"  key="nodeGranularity">1</profile>
-    <!-- <profile namespace="globus"  key="condor.+AccountingGroup">"group_friends.yadunand"</profile> -->
-    <profile namespace="globus"  key="jobType">nonshared</profile>
-    <workdirectory>/home/UC3_USERNAME/swiftwork</workdirectory>
-  </pool>
-
-  <pool handle="mcs">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="thwomp.mcs.anl.gov"/>
-    <profile namespace="globus" key="jobsPerNode">8</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">36000</profile>
-    <profile namespace="globus" key="maxWalltime">01:05:00</profile>
-    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="globus" key="userHomeOverride">/sandbox/MCS_USERNAME/</profile>
-    <workdirectory>/sandbox/MCS_USERNAME/swiftwork</workdirectory>
-  </pool>
-
-  <pool handle="frisbee">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="frisbee.mcs.anl.gov"/>
-    <profile namespace="globus" key="jobsPerNode">8</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">4000</profile>
-    <profile namespace="globus" key="maxWalltime">01:05:00</profile>
-    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/sandbox/MCS_USERNAME/swiftwork</workdirectory>
-  </pool>
-
-  <pool handle="bridled">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="bridled.ci.uchicago.edu"/>
-    <profile namespace="globus" key="jobsPerNode">2</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">36000</profile>
-    <profile namespace="globus" key="maxWalltime">01:05:00</profile>
-    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="globus" key="userHomeOverride">/home/BRID_USERNAME/swiftwork</profile>
-    <workdirectory>/home/BRID_USERNAME/swiftwork</workdirectory>
-  </pool>
-
-  <pool handle="communicado">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="communicado.ci.uchicago.edu"/>
-    <profile namespace="globus" key="jobsPerNode">2</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">36000</profile>
-    <profile namespace="globus" key="maxWalltime">01:05:00</profile>
-    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="globus" key="userHomeOverride">/home/COMM_USERNAME/swiftwork</profile>
-    <workdirectory>/home/COMM_USERNAME/swiftwork</workdirectory>
-  </pool>
-
-</config>

Added: trunk/tests/multi_remote/swift.conf
===================================================================
--- trunk/tests/multi_remote/swift.conf	                        (rev 0)
+++ trunk/tests/multi_remote/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,306 @@
+include "${swift.home}/etc/swift.conf"
+
+site.local {
+	execution {
+		type: "coaster"
+		URL: "localhost"
+		jobManager: "local:local"
+		options {
+			nodeGranularity: 1
+			lowOverallocation: 100
+			maxNodesPerJob: 1
+			maxJobs: 1
+			highOverallocation: 100
+			tasksPerNode: 4
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "localhost"
+	}
+	workDirectory: "."
+	maxParallelTasks: 4
+	initialParallelTasks: 3
+	app.l_bash {
+		executable: "/bin/bash"
+		maxWallTime: "00:15:00"
+	}
+
+}
+
+site.ci {
+	execution {
+		type: "coaster"
+		URL: "login.ci.uchicago.edu"
+		jobManager: "ssh-cl:local"
+		options {
+			nodeGranularity: 1
+			lowOverallocation: 100
+			maxNodesPerJob: 1
+			maxJobs: 5
+			highOverallocation: 100
+			tasksPerNode: 24
+			jobMaxTime: "00:02:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "PUBLISH_FOLDER"
+	maxParallelTasks: 481
+	initialParallelTasks: 480
+	app.pub {
+		executable: "/bin/bash"
+		maxWallTime: "00:02:00"
+	}
+
+}
+
+site.beagle {
+	execution {
+		type: "coaster"
+		URL: "login4.beagle.ci.uchicago.edu"
+		jobManager: "ssh-cl:local"
+		options {
+			nodeGranularity: 1
+			maxNodesPerJob: 1
+			userHomeOverride: "/lustre/beagle/BEAGLE_USERNAME/swiftwork"
+			maxJobs: 5
+			highOverallocation: 100
+			jobMaxTime: "10:00:00"
+			lowOverallocation: 100
+			jobOptions {
+				pbs.aprun: true
+				pbs.mpp: true
+				depth: "24"
+			}
+			tasksPerNode: 24
+		}
+	}
+	staging: "local"
+	workDirectory: "/tmp/BEAGLE_USERNAME/swiftwork"
+	maxParallelTasks: 481
+	initialParallelTasks: 480
+	app.bgl {
+		executable: "/bin/bash"
+		maxWallTime: "01:05:00"
+	}
+
+}
+
+site.blues {
+	execution {
+		type: "coaster"
+		URL: "blogin1.lcrc.anl.gov"
+		jobManager: "ssh-cl:local"
+		options {
+			nodeGranularity: 2
+			jobQueue: "route"
+			maxNodesPerJob: 2
+			maxJobs: 1
+			jobOptions.ppn: 8
+			highOverallocation: 1000
+			jobMaxTime: "10:00:00"
+			lowOverallocation: 1000
+			tasksPerNode: 2
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "none"
+	}
+	workDirectory: "/tmp/BLUES_USERNAME/swiftwork"
+	maxParallelTasks: 221
+	initialParallelTasks: 220
+	app.blu {
+		executable: "/bin/bash"
+		maxWallTime: "01:05:00"
+	}
+
+}
+
+site.fusion {
+	execution {
+		type: "coaster"
+		URL: "flogin1.lcrc.anl.gov"
+		jobManager: "ssh-cl:local"
+		options {
+			lowOverallocation: 1000
+			nodeGranularity: 2
+			jobQueue: "shared"
+			maxNodesPerJob: 2
+			maxJobs: 1
+			highOverallocation: 1000
+			tasksPerNode: 2
+			jobMaxTime: "01:06:40"
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "none"
+	}
+	workDirectory: "/homes/FUSION_USERNAME/swiftwork"
+	maxParallelTasks: 600
+	initialParallelTasks: 599
+	app.fus {
+		executable: "/bin/bash"
+		maxWallTime: "01:05:00"
+	}
+
+}
+
+site.midway {
+	execution {
+		type: "coaster"
+		URL: "localhost"
+		jobManager: "local:local"
+		options {
+			nodeGranularity: 1
+			lowOverallocation: 100
+			maxNodesPerJob: 1
+			jobQueue: "sandyb"
+			maxJobs: 4
+			highOverallocation: 100
+			tasksPerNode: 16
+			jobMaxTime: "10:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/tmp/MIDWAY_USERNAME"
+	maxParallelTasks: 65
+	initialParallelTasks: 64
+	app.mid {
+		executable: "/bin/bash"
+		maxWallTime: "01:05:00"
+	}
+
+}
+
+site.uc3 {
+	execution {
+		type: "coaster"
+		URL: "uc3-sub.uchicago.edu"
+		jobManager: "ssh-cl:local"
+		options {
+			# Option ignored: globus:jobtype = nonshared
+			nodeGranularity: 1
+			lowOverallocation: 100
+			maxNodesPerJob: 1
+			maxJobs: 2
+			highOverallocation: 100
+			tasksPerNode: 1
+			jobMaxTime: "10:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/home/UC3_USERNAME/swiftwork"
+	maxParallelTasks: 1001
+	initialParallelTasks: 999
+	app.uc3 {
+		executable: "/bin/bash"
+		maxWallTime: "01:05:00"
+	}
+
+}
+
+site.mcs {
+	execution {
+		type: "coaster"
+		URL: "thwomp.mcs.anl.gov"
+		jobManager: "ssh-cl:local"
+		options {
+			lowOverallocation: 100
+			userHomeOverride: "/sandbox/MCS_USERNAME/"
+			highOverallocation: 100
+			tasksPerNode: 8
+			jobMaxTime: "10:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/sandbox/MCS_USERNAME/swiftwork"
+	maxParallelTasks: 8
+	initialParallelTasks: 8
+	app.mcs {
+		executable: "/bin/bash"
+		maxWallTime: "01:05:00"
+	}
+
+}
+
+site.frisbee {
+	execution {
+		type: "coaster"
+		URL: "frisbee.mcs.anl.gov"
+		jobManager: "ssh-cl:local"
+		options {
+			lowOverallocation: 100
+			highOverallocation: 100
+			tasksPerNode: 8
+			jobMaxTime: "01:06:40"
+		}
+	}
+	staging: "local"
+	workDirectory: "/sandbox/MCS_USERNAME/swiftwork"
+	maxParallelTasks: 8
+	initialParallelTasks: 8
+	app.fsb {
+		executable: "/bin/bash"
+		maxWallTime: "01:05:00"
+	}
+
+}
+
+site.bridled {
+	execution {
+		type: "coaster"
+		URL: "bridled.ci.uchicago.edu"
+		jobManager: "ssh-cl:local"
+		options {
+			lowOverallocation: 100
+			userHomeOverride: "/home/BRID_USERNAME/swiftwork"
+			highOverallocation: 100
+			tasksPerNode: 2
+			jobMaxTime: "10:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/home/BRID_USERNAME/swiftwork"
+	maxParallelTasks: 8
+	initialParallelTasks: 8
+	app.bri {
+		executable: "/bin/bash"
+		maxWallTime: "01:05:00"
+	}
+
+}
+
+site.communicado {
+	execution {
+		type: "coaster"
+		URL: "communicado.ci.uchicago.edu"
+		jobManager: "ssh-cl:local"
+		options {
+			lowOverallocation: 100
+			userHomeOverride: "/home/COMM_USERNAME/swiftwork"
+			highOverallocation: 100
+			tasksPerNode: 2
+			jobMaxTime: "10:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/home/COMM_USERNAME/swiftwork"
+	maxParallelTasks: 8
+	initialParallelTasks: 8
+	app.com {
+		executable: "/bin/bash"
+		maxWallTime: "01:05:00"
+	}
+
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: true
+executionRetries: 0
+keepSiteDir: true
+statusMode: "provider"
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/multi_remote/swift.properties
===================================================================
--- trunk/tests/multi_remote/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/multi_remote/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,9 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=true
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
-status.mode=provider
\ No newline at end of file

Deleted: trunk/tests/multi_remote/tc.template.data
===================================================================
--- trunk/tests/multi_remote/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/multi_remote/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,11 +0,0 @@
-beagle       bgl /bin/bash null null null
-uc3          uc3 /bin/bash null null null
-midway       mid /bin/bash null null null
-mcs	         mcs /bin/bash null null null
-local     l_bash /bin/bash null null null
-ci           pub /bin/bash null null null
-fusion       fus /bin/bash null null null
-blues        blu /bin/bash null null null
-frisbee      fsb /bin/bash null null null
-communicado  com /bin/bash null null null
-bridled      bri /bin/bash null null null
\ No newline at end of file

Deleted: trunk/tests/sites/beagle/sites.template.xml
===================================================================
--- trunk/tests/sites/beagle/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/beagle/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,22 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-
-  <pool handle="beagle">
-    <execution provider="coaster" jobmanager="ssh-cl:pbs" url="login4.beagle.ci.uchicago.edu"/>
-    <profile namespace="globus" key="jobsPerNode">24</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="providerAttributes">pbs.aprun;pbs.mpp;depth=24</profile>
-    <profile namespace="globus" key="maxtime">3700</profile>
-    <profile namespace="globus" key="maxWalltime">01:00:00</profile>
-    <profile namespace="globus" key="userHomeOverride">/lustre/beagle/{env.USER}/swiftwork</profile>
-    <profile namespace="globus" key="slots">1</profile>
-    <profile namespace="globus" key="maxnodes">2</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="karajan" key="jobThrottle">1.00</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="karajan" key="workerLoggingLevel">trace</profile>
-    <!-- <workdirectory>/lustre/beagle/yadunandb/swiftwork</workdirectory> -->
-    <workdirectory>/tmp/{env.USER}/swiftwork</workdirectory>
-  </pool>
-</config>
\ No newline at end of file

Added: trunk/tests/sites/beagle/swift.conf
===================================================================
--- trunk/tests/sites/beagle/swift.conf	                        (rev 0)
+++ trunk/tests/sites/beagle/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,41 @@
+include "${swift.home}/etc/swift.conf"
+
+site.beagle {
+	execution {
+		type: "coaster"
+		URL: "login4.beagle.ci.uchicago.edu"
+		jobManager: "ssh-cl:pbs"
+		options {
+			nodeGranularity: 1
+			maxNodesPerJob: 2
+			userHomeOverride: "/lustre/beagle/"${env.USER}"/swiftwork"
+			maxJobs: 1
+			highOverallocation: 100
+			jobMaxTime: "01:01:40"
+			lowOverallocation: 100
+			jobOptions {
+				pbs.aprun: true
+				pbs.mpp: true
+				depth: "24"
+			}
+			tasksPerNode: 24
+		}
+	}
+	staging: "local"
+	workDirectory: "/tmp/"${env.USER}"/swiftwork"
+	maxParallelTasks: 101
+	initialParallelTasks: 100
+	app.date {
+		executable: "/bin/date"
+		maxWallTime: "01:00:00"
+	}
+
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/sites/beagle/swift.properties
===================================================================
--- trunk/tests/sites/beagle/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/beagle/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
\ No newline at end of file

Deleted: trunk/tests/sites/beagle/tc.template.data
===================================================================
--- trunk/tests/sites/beagle/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/beagle/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,5 +0,0 @@
-local  date /bin/date null null null
-beagle date /bin/date null null null
-uc3    date /bin/date null null null
-mcs    date /bin/date null null null
-midway date /bin/date null null null

Deleted: trunk/tests/sites/blues/sites.template.xml
===================================================================
--- trunk/tests/sites/blues/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/blues/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,17 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-    <pool handle="blues">
-        <execution jobmanager="ssh-cl:pbs" provider="coaster" url="blues.lcrc.anl.gov"/>
-        <filesystem provider="local" url="none" />
-        <profile namespace="globus" key="maxtime">3600</profile>
-        <profile namespace="globus" key="jobsPerNode">2</profile>
-        <profile namespace="globus" key="ppn">8</profile>
-        <profile namespace="globus" key="queue">route</profile>
-        <profile namespace="globus" key="slots">1</profile>
-        <profile namespace="globus" key="nodeGranularity">2</profile>
-        <profile namespace="globus" key="maxNodes">2</profile>
-        <profile namespace="karajan" key="jobThrottle">1.00</profile>
-        <profile namespace="karajan" key="initialScore">10000</profile>
-        <workdirectory>/home/{env.USER}/swiftwork</workdirectory>
-    </pool>
-</config>

Added: trunk/tests/sites/blues/swift.conf
===================================================================
--- trunk/tests/sites/blues/swift.conf	                        (rev 0)
+++ trunk/tests/sites/blues/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,37 @@
+include "${swift.home}/etc/swift.conf"
+
+site.blues {
+	execution {
+		type: "coaster"
+		URL: "blues.lcrc.anl.gov"
+		jobManager: "ssh-cl:pbs"
+		options {
+			nodeGranularity: 2
+			maxNodesPerJob: 2
+			jobQueue: "route"
+			maxJobs: 1
+			jobOptions.ppn: 8
+			tasksPerNode: 2
+			jobMaxTime: "01:00:00"
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "none"
+	}
+	workDirectory: "/home/"${env.USER}"/swiftwork"
+	maxParallelTasks: 101
+	initialParallelTasks: 100
+	app.date {
+		executable: "/bin/date"
+	}
+
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/sites/blues/swift.properties
===================================================================
--- trunk/tests/sites/blues/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/blues/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
\ No newline at end of file

Deleted: trunk/tests/sites/blues/tc.template.data
===================================================================
--- trunk/tests/sites/blues/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/blues/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,7 +0,0 @@
-local  date /bin/date null null null
-beagle date /bin/date null null null
-uc3    date /bin/date null null null
-mcs    date /bin/date null null null
-midway date /bin/date null null null
-blues  date /bin/date null null null
-

Deleted: trunk/tests/sites/bridled/sites.template.xml
===================================================================
--- trunk/tests/sites/bridled/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/bridled/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,17 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-
-  <pool handle="bridled">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="bridled.ci.uchicago.edu"/>
-    <profile namespace="globus" key="jobsPerNode">2</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">36000</profile>
-    <profile namespace="globus" key="maxWalltime">01:05:00</profile>
-    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="globus" key="userHomeOverride">/home/{env.USER}/swiftwork</profile>
-    <workdirectory>/home/{env.USER}/swiftwork</workdirectory>
-  </pool>
-
-</config>
\ No newline at end of file

Added: trunk/tests/sites/bridled/swift.conf
===================================================================
--- trunk/tests/sites/bridled/swift.conf	                        (rev 0)
+++ trunk/tests/sites/bridled/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,32 @@
+include "${swift.home}/etc/swift.conf"
+
+site.bridled {
+	execution {
+		type: "coaster"
+		URL: "bridled.ci.uchicago.edu"
+		jobManager: "ssh-cl:local"
+		options {
+			lowOverallocation: 100
+			userHomeOverride: "/home/"${env.USER}"/swiftwork"
+			highOverallocation: 100
+			tasksPerNode: 2
+			jobMaxTime: "10:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/home/"${env.USER}"/swiftwork"
+	maxParallelTasks: 8
+	initialParallelTasks: 8
+	app.date {
+		executable: "/bin/date"
+		maxWallTime: "01:05:00"
+	}
+
+}
+
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/sites/bridled/swift.properties
===================================================================
--- trunk/tests/sites/bridled/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/bridled/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-#tcp.port.range=60000,61000
\ No newline at end of file

Deleted: trunk/tests/sites/bridled/tc.template.data
===================================================================
--- trunk/tests/sites/bridled/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/bridled/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,7 +0,0 @@
-local  date /bin/date null null null
-beagle date /bin/date null null null
-uc3    date /bin/date null null null
-mcs    date /bin/date null null null
-midway date /bin/date null null null
-communicado date /bin/date null null null
-bridled date /bin/date null null null
\ No newline at end of file

Deleted: trunk/tests/sites/ci/sites.template.xml
===================================================================
--- trunk/tests/sites/ci/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/ci/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,19 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-
-  <pool handle="ci">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="login.ci.uchicago.edu"/>
-    <profile namespace="globus" key="jobsPerNode">24</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">3700</profile>
-    <profile namespace="globus" key="maxWalltime">01:00:00</profile>
-    <profile namespace="globus" key="slots">1</profile>
-    <profile namespace="globus" key="maxnodes">1</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="karajan" key="jobThrottle">4.80</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="karajan" key="workerLoggingLevel">trace</profile>
-    <workdirectory>/home/{env.USER}/swiftwork</workdirectory>
-  </pool>
-</config>
\ No newline at end of file

Added: trunk/tests/sites/ci/swift.conf
===================================================================
--- trunk/tests/sites/ci/swift.conf	                        (rev 0)
+++ trunk/tests/sites/ci/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,35 @@
+include "${swift.home}/etc/swift.conf"
+
+site.ci {
+	execution {
+		type: "coaster"
+		URL: "login.ci.uchicago.edu"
+		jobManager: "ssh-cl:local"
+		options {
+			nodeGranularity: 1
+			lowOverallocation: 100
+			maxNodesPerJob: 1
+			maxJobs: 1
+			highOverallocation: 100
+			tasksPerNode: 24
+			jobMaxTime: "01:01:40"
+		}
+	}
+	staging: "local"
+	workDirectory: "/home/"${env.USER}"/swiftwork"
+	maxParallelTasks: 481
+	initialParallelTasks: 480
+	app.date {
+		executable: "/bin/date"
+		maxWallTime: "01:00:00"
+	}
+
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/sites/ci/swift.properties
===================================================================
--- trunk/tests/sites/ci/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/ci/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
\ No newline at end of file

Deleted: trunk/tests/sites/ci/tc.template.data
===================================================================
--- trunk/tests/sites/ci/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/ci/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,6 +0,0 @@
-local  date /bin/date null null null
-beagle date /bin/date null null null
-ci     date /bin/date null null null
-uc3    date /bin/date null null null
-mcs    date /bin/date null null null
-midway date /bin/date null null null

Deleted: trunk/tests/sites/communicado/sites.template.xml
===================================================================
--- trunk/tests/sites/communicado/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/communicado/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,16 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-
-  <pool handle="communicado">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="communicado.ci.uchicago.edu"/>
-    <profile namespace="globus" key="jobsPerNode">2</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">36000</profile>
-    <profile namespace="globus" key="maxWalltime">01:05:00</profile>
-    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="globus" key="userHomeOverride">/home/{env.USER}/swiftwork</profile>
-    <workdirectory>/home/{env.USER}/swiftwork</workdirectory>
-  </pool>
-</config>
\ No newline at end of file

Added: trunk/tests/sites/communicado/swift.conf
===================================================================
--- trunk/tests/sites/communicado/swift.conf	                        (rev 0)
+++ trunk/tests/sites/communicado/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,32 @@
+include "${swift.home}/etc/swift.conf"
+
+site.communicado {
+	execution {
+		type: "coaster"
+		URL: "communicado.ci.uchicago.edu"
+		jobManager: "ssh-cl:local"
+		options {
+			lowOverallocation: 100
+			userHomeOverride: "/home/"${env.USER}"/swiftwork"
+			highOverallocation: 100
+			tasksPerNode: 2
+			jobMaxTime: "10:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/home/"${env.USER}"/swiftwork"
+	maxParallelTasks: 8
+	initialParallelTasks: 8
+	app.date {
+		executable: "/bin/date"
+		maxWallTime: "01:05:00"
+	}
+
+}
+
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/sites/communicado/swift.properties
===================================================================
--- trunk/tests/sites/communicado/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/communicado/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-#tcp.port.range=60000,61000
\ No newline at end of file

Deleted: trunk/tests/sites/communicado/tc.template.data
===================================================================
--- trunk/tests/sites/communicado/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/communicado/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,7 +0,0 @@
-local  date /bin/date null null null
-beagle date /bin/date null null null
-uc3    date /bin/date null null null
-mcs    date /bin/date null null null
-midway date /bin/date null null null
-communicado date /bin/date null null null
-bridled date /bin/date null null null
\ No newline at end of file

Deleted: trunk/tests/sites/fusion/sites.template.xml
===================================================================
--- trunk/tests/sites/fusion/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/fusion/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,18 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-
-   <pool handle="fusion">
-      <execution jobmanager="ssh-cl:pbs" provider="coaster" url="fusion.lcrc.anl.gov"/>
-      <filesystem provider="local" url="none" />
-      <profile namespace="globus" key="maxWallTime">00:00:30</profile>
-      <profile namespace="globus" key="jobsPerNode">2</profile>
-      <profile namespace="globus" key="slots">1</profile>
-      <profile namespace="globus" key="nodeGranularity">2</profile>
-      <profile namespace="globus" key="maxNodes">2</profile>
-      <profile namespace="globus" key="queue">shared</profile>
-      <profile namespace="karajan" key="jobThrottle">1.00</profile>
-      <profile namespace="karajan" key="initialScore">10000</profile>
-      <workdirectory>/homes/{env.USER}/swiftwork</workdirectory>
-   </pool>
-</config>
-

Added: trunk/tests/sites/fusion/swift.conf
===================================================================
--- trunk/tests/sites/fusion/swift.conf	                        (rev 0)
+++ trunk/tests/sites/fusion/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,36 @@
+include "${swift.home}/etc/swift.conf"
+
+site.fusion {
+	execution {
+		type: "coaster"
+		URL: "fusion.lcrc.anl.gov"
+		jobManager: "ssh-cl:pbs"
+		options {
+			nodeGranularity: 2
+			jobQueue: "shared"
+			maxNodesPerJob: 2
+			maxJobs: 1
+			tasksPerNode: 2
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "none"
+	}
+	workDirectory: "/homes/"${env.USER}"/swiftwork"
+	maxParallelTasks: 101
+	initialParallelTasks: 100
+	app.date {
+		executable: "/bin/date"
+		maxWallTime: "00:00:30"
+	}
+
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/sites/fusion/swift.properties
===================================================================
--- trunk/tests/sites/fusion/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/fusion/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
\ No newline at end of file

Deleted: trunk/tests/sites/fusion/tc.template.data
===================================================================
--- trunk/tests/sites/fusion/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/fusion/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,7 +0,0 @@
-local  date /bin/date null null null
-beagle date /bin/date null null null
-uc3    date /bin/date null null null
-mcs    date /bin/date null null null
-midway date /bin/date null null null
-blues  date /bin/date null null null
-fusion date /bin/date null null null

Deleted: trunk/tests/sites/local/sites.template.xml
===================================================================
--- trunk/tests/sites/local/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/local/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,10 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-<pool handle="localhost">
-  <execution provider="local"/>
-  <filesystem provider="local"/>
-  <profile key="initialScore" namespace="karajan">10000</profile>
-  <profile key="jobThrottle" namespace="karajan">.31</profile>
-  <workdirectory>_WORK_</workdirectory>
-</pool>
-</config>

Added: trunk/tests/sites/local/swift.conf
===================================================================
--- trunk/tests/sites/local/swift.conf	                        (rev 0)
+++ trunk/tests/sites/local/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,44 @@
+include "${swift.home}/etc/swift.conf"
+
+site.localhost {
+	filesystem {
+		type: "local"
+		URL: "localhost"
+	}
+	execution {
+		type: "local"
+		URL: "localhost"
+	}
+	workDirectory: "_WORK_"
+	maxParallelTasks: 32
+	initialParallelTasks: 31
+	app.cat {
+		executable: "/bin/cat"
+	}
+
+	app.echo {
+		executable: "/bin/echo"
+	}
+
+	app.grep {
+		executable: "/bin/grep"
+	}
+
+	app.ls {
+		executable: "/bin/ls"
+	}
+
+	app.paste {
+		executable: "/bin/paste"
+	}
+
+	app.sort {
+		executable: "/bin/sort"
+	}
+
+	app.wc {
+		executable: "/usr/bin/wc"
+	}
+
+}
+

Deleted: trunk/tests/sites/local/tc.template.data
===================================================================
--- trunk/tests/sites/local/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/local/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-localhost 	echo 		/bin/echo	INSTALLED	INTEL32::LINUX
-localhost 	cat 		/bin/cat	INSTALLED	INTEL32::LINUX
-localhost 	ls 		/bin/ls		INSTALLED	INTEL32::LINUX
-localhost 	grep 		/bin/grep	INSTALLED	INTEL32::LINUX
-localhost 	sort 		/bin/sort	INSTALLED	INTEL32::LINUX
-localhost 	paste 		/bin/paste	INSTALLED	INTEL32::LINUX
-localhost	wc		/usr/bin/wc	INSTALLED	INTEL32::LINUX
-

Deleted: trunk/tests/sites/local-coasters/sites.template.xml
===================================================================
--- trunk/tests/sites/local-coasters/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/local-coasters/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,24 +0,0 @@
-<config>
-
-  <pool handle="localhost" sysinfo="INTEL32::LINUX">
-    <gridftp url="local://localhost" />
-    <execution provider="local" url="none" />
-    <workdirectory>_WORK_</workdirectory>
-    <profile namespace="swift" key="stagingMethod">file</profile>
-  </pool>
-
-  <pool handle="coasterslocal">
-    <filesystem provider="local" />
-    <execution provider="coaster" jobmanager="local:local"/>
-    <profile namespace="globus"   key="internalHostname">_HOST_</profile>
-    <profile namespace="karajan"  key="jobthrottle">2.55</profile>
-    <profile namespace="karajan"  key="initialScore">10000</profile>
-    <profile namespace="globus"   key="jobsPerNode">4</profile>
-    <profile namespace="globus"   key="slots">8</profile>
-    <profile namespace="globus"   key="maxTime">1000</profile>
-    <profile namespace="globus"   key="nodeGranularity">1</profile>
-    <profile namespace="globus"   key="maxNodes">4</profile>
-    <workdirectory>_WORK_</workdirectory>
-  </pool>
-
-</config>

Added: trunk/tests/sites/local-coasters/swift.conf
===================================================================
--- trunk/tests/sites/local-coasters/swift.conf	                        (rev 0)
+++ trunk/tests/sites/local-coasters/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,66 @@
+include "${swift.home}/etc/swift.conf"
+
+site.localhost {
+	execution {
+		type: "local"
+		URL: "none"
+	}
+	filesystem {
+		type: "local"
+		URL: "localhost"
+	}
+	workDirectory: "_WORK_"
+	OS: "INTEL32::LINUX"
+	app.cat {
+		executable: "/bin/cat"
+	}
+
+	app.echo {
+		executable: "/bin/echo"
+	}
+
+	app.grep {
+		executable: "/bin/grep"
+	}
+
+	app.ls {
+		executable: "/bin/ls"
+	}
+
+	app.paste {
+		executable: "/bin/paste"
+	}
+
+	app.sort {
+		executable: "/bin/sort"
+	}
+
+}
+
+site.coasterslocal {
+	execution {
+		type: "coaster"
+		URL: "localhost"
+		jobManager: "local:local"
+		options {
+			internalHostname: "_HOST_"
+			nodeGranularity: 1
+			maxNodesPerJob: 4
+			maxJobs: 8
+			tasksPerNode: 4
+			jobMaxTime: "00:16:40"
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "localhost"
+	}
+	workDirectory: "_WORK_"
+	maxParallelTasks: 255
+	initialParallelTasks: 255
+	app.cp {
+		executable: "/bin/cp"
+	}
+
+}
+

Deleted: trunk/tests/sites/local-coasters/tc.template.data
===================================================================
--- trunk/tests/sites/local-coasters/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/local-coasters/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,19 +0,0 @@
-#This is the transformation catalog.
-#
-#It comes pre-configured with a number of simple transformations with
-#paths that are likely to work on a linux box. However, on some systems,
-#the paths to these executables will be different (for example, sometimes
-#some of these programs are found in /usr/bin rather than in /bin)
-#
-#NOTE WELL: fields in this file must be separated by tabs, not spaces; and
-#there must be no trailing whitespace at the end of each line.
-#
-# sitename  transformation  path   INSTALLED  platform  profiles
-localhost 	echo 		/bin/echo	INSTALLED	INTEL32::LINUX	null
-localhost 	cat 		/bin/cat	INSTALLED	INTEL32::LINUX	null
-localhost 	ls 		/bin/ls		INSTALLED	INTEL32::LINUX	null
-localhost 	grep 		/bin/grep	INSTALLED	INTEL32::LINUX	null
-localhost 	sort 		/bin/sort	INSTALLED	INTEL32::LINUX	null
-localhost 	paste 		/bin/paste	INSTALLED	INTEL32::LINUX	null
-
-coasterslocal 	cp 		/bin/cp         INSTALLED	INTEL32::LINUX	null

Deleted: trunk/tests/sites/mac-frisbee/sites.template.xml
===================================================================
--- trunk/tests/sites/mac-frisbee/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/mac-frisbee/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,15 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-
-  <pool handle="frisbee">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="frisbee.mcs.anl.gov"/>
-    <profile namespace="globus" key="jobsPerNode">8</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">3600</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/homes/{env.USER}/swiftwork</workdirectory>
-  </pool>
-</config>

Added: trunk/tests/sites/mac-frisbee/swift.conf
===================================================================
--- trunk/tests/sites/mac-frisbee/swift.conf	                        (rev 0)
+++ trunk/tests/sites/mac-frisbee/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,32 @@
+include "${swift.home}/etc/swift.conf"
+
+site.frisbee {
+	execution {
+		type: "coaster"
+		URL: "frisbee.mcs.anl.gov"
+		jobManager: "ssh-cl:local"
+		options {
+			lowOverallocation: 100
+			highOverallocation: 100
+			tasksPerNode: 8
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/homes/"${env.USER}"/swiftwork"
+	maxParallelTasks: 8
+	initialParallelTasks: 8
+	app.date {
+		executable: "/bin/date"
+		maxWallTime: "00:05:00"
+	}
+
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/sites/mac-frisbee/swift.properties
===================================================================
--- trunk/tests/sites/mac-frisbee/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/mac-frisbee/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
\ No newline at end of file

Deleted: trunk/tests/sites/mac-frisbee/tc.template.data
===================================================================
--- trunk/tests/sites/mac-frisbee/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/mac-frisbee/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,6 +0,0 @@
-local  date /bin/date null null null
-beagle date /bin/date null null null
-uc3    date /bin/date null null null
-mcs    date /bin/date null null null
-midway date /bin/date null null null
-frisbee date /bin/date null null null
\ No newline at end of file

Deleted: trunk/tests/sites/mcs/sites.template.xml
===================================================================
--- trunk/tests/sites/mcs/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/mcs/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,15 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-  <pool handle="crush">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="crush.mcs.anl.gov"/>
-    <profile namespace="globus" key="jobsPerNode">8</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">3600</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="globus" key="userHomeOverride">/sandbox/{env.USER}/</profile>
-    <workdirectory>/sandbox/{env.USER}/swiftwork</workdirectory>
-  </pool>
-</config>

Added: trunk/tests/sites/mcs/swift.conf
===================================================================
--- trunk/tests/sites/mcs/swift.conf	                        (rev 0)
+++ trunk/tests/sites/mcs/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,33 @@
+include "${swift.home}/etc/swift.conf"
+
+site.crush {
+	execution {
+		type: "coaster"
+		URL: "crush.mcs.anl.gov"
+		jobManager: "ssh-cl:local"
+		options {
+			lowOverallocation: 100
+			userHomeOverride: "/sandbox/"${env.USER}"/"
+			highOverallocation: 100
+			tasksPerNode: 8
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/sandbox/"${env.USER}"/swiftwork"
+	maxParallelTasks: 8
+	initialParallelTasks: 8
+	app.date {
+		executable: "/bin/date"
+		maxWallTime: "00:05:00"
+	}
+
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/sites/mcs/swift.properties
===================================================================
--- trunk/tests/sites/mcs/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/mcs/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
\ No newline at end of file

Deleted: trunk/tests/sites/mcs/tc.template.data
===================================================================
--- trunk/tests/sites/mcs/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/mcs/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,5 +0,0 @@
-local  date /bin/date null null null
-beagle date /bin/date null null null
-uc3    date /bin/date null null null
-crush  date /bin/date null null null
-midway date /bin/date null null null

Deleted: trunk/tests/sites/midway/sites.template.xml
===================================================================
--- trunk/tests/sites/midway/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/midway/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,18 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-
-  <pool handle="midway">
-    <execution provider="coaster" jobmanager="ssh-cl:slurm" url="swift.rcc.uchicago.edu"/>
-    <profile namespace="globus" key="queue">sandyb</profile>
-    <profile namespace="globus" key="jobsPerNode">1</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="globus" key="maxTime">500</profile>
-    <profile namespace="globus" key="slots">1</profile>
-    <profile namespace="globus" key="maxNodes">1</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="globus" key="slurm.exclusive">false</profile>
-    <profile namespace="karajan" key="jobThrottle">.64</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/tmp/{env.USER}</workdirectory>
-  </pool>
-</config>

Added: trunk/tests/sites/midway/swift.conf
===================================================================
--- trunk/tests/sites/midway/swift.conf	                        (rev 0)
+++ trunk/tests/sites/midway/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,35 @@
+include "${swift.home}/etc/swift.conf"
+
+site.midway {
+	execution {
+		type: "coaster"
+		URL: "swift.rcc.uchicago.edu"
+		jobManager: "ssh-cl:slurm"
+		options {
+			nodeGranularity: 1
+			maxNodesPerJob: 1
+			jobQueue: "sandyb"
+			maxJobs: 1
+			# Option ignored: globus:slurm.exclusive = false
+			tasksPerNode: 1
+			jobMaxTime: "00:08:20"
+		}
+	}
+	staging: "local"
+	workDirectory: "/tmp/"${env.USER}
+	maxParallelTasks: 65
+	initialParallelTasks: 64
+	app.date {
+		executable: "/bin/date"
+		maxWallTime: "00:05:00"
+	}
+
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/sites/midway/swift.properties
===================================================================
--- trunk/tests/sites/midway/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/midway/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
\ No newline at end of file

Deleted: trunk/tests/sites/midway/tc.template.data
===================================================================
--- trunk/tests/sites/midway/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/midway/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,5 +0,0 @@
-local  date /bin/date null null null
-beagle date /bin/date null null null
-uc3    date /bin/date null null null
-mcs    date /bin/date null null null
-midway date /bin/date null null null

Deleted: trunk/tests/sites/multiple_coaster_pools/sites.template.xml
===================================================================
--- trunk/tests/sites/multiple_coaster_pools/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/multiple_coaster_pools/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,32 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-
-  <pool handle="midway_single">
-    <execution provider="coaster" jobmanager="local:slurm" url="localhost:1"/>
-    <profile namespace="globus" key="jobsPerNode">1</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="queue">sandyb</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="globus" key="slots">1</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="globus" key="maxNodes">1</profile>
-    <filesystem provider="local"/>
-    <workdirectory>/scratch/midway/{env.USER}</workdirectory>
-  </pool>
-
-  <pool handle="midway_multiple">
-    <execution provider="coaster" jobmanager="local:slurm" url="localhost:2"/>
-    <profile namespace="globus" key="jobsPerNode">16</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="queue">sandyb</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="globus" key="slots">1</profile>
-    <profile namespace="globus" key="maxNodes">1</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <filesystem provider="local"/>
-    <workdirectory>/scratch/midway/{env.USER}</workdirectory>
-  </pool>
-
-</config>

Added: trunk/tests/sites/multiple_coaster_pools/swift.conf
===================================================================
--- trunk/tests/sites/multiple_coaster_pools/swift.conf	                        (rev 0)
+++ trunk/tests/sites/multiple_coaster_pools/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,58 @@
+include "${swift.home}/etc/swift.conf"
+
+site.midway_single {
+	execution {
+		type: "coaster"
+		URL: "localhost:1"
+		jobManager: "local:slurm"
+		options {
+			nodeGranularity: 1
+			lowOverallocation: 100
+			maxNodesPerJob: 1
+			jobQueue: "sandyb"
+			maxJobs: 1
+			highOverallocation: 100
+			tasksPerNode: 1
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "localhost"
+	}
+	workDirectory: "/scratch/midway/"${env.USER}
+	maxParallelTasks: 201
+	initialParallelTasks: 200
+	app.bash_single {
+		executable: "/bin/bash"
+	}
+
+}
+
+site.midway_multiple {
+	execution {
+		type: "coaster"
+		URL: "localhost:2"
+		jobManager: "local:slurm"
+		options {
+			nodeGranularity: 1
+			lowOverallocation: 100
+			maxNodesPerJob: 1
+			jobQueue: "sandyb"
+			maxJobs: 1
+			highOverallocation: 100
+			tasksPerNode: 16
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "localhost"
+	}
+	workDirectory: "/scratch/midway/"${env.USER}
+	maxParallelTasks: 201
+	initialParallelTasks: 200
+	app.bash_multiple {
+		executable: "/bin/bash"
+	}
+
+}
+

Deleted: trunk/tests/sites/multiple_coaster_pools/tc.template.data
===================================================================
--- trunk/tests/sites/multiple_coaster_pools/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/multiple_coaster_pools/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,2 +0,0 @@
-midway_single bash_single /bin/bash
-midway_multiple bash_multiple /bin/bash

Deleted: trunk/tests/sites/osgconnect/sites.template.xml
===================================================================
--- trunk/tests/sites/osgconnect/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/osgconnect/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,20 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-
-  <pool handle="osgc">
-    <execution provider="coaster" url="login01.osgconnect.net" jobmanager="ssh-cl:condor"/>
-    <profile namespace="karajan" key="jobThrottle">10.00</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="globus"  key="jobsPerNode">1</profile>
-    <profile namespace="globus"  key="maxtime">3600</profile>
-    <profile namespace="globus"  key="maxWalltime">00:30:00</profile>
-    <profile namespace="globus"  key="highOverAllocation">100</profile>
-    <profile namespace="globus"  key="lowOverAllocation">100</profile>
-    <profile namespace="globus"  key="slots">1</profile>
-    <profile namespace="globus"  key="maxNodes">1</profile>
-    <profile namespace="globus"  key="nodeGranularity">1</profile>
-    <profile namespace="globus"  key="jobType">nonshared</profile>
-    <profile namespace="globus"  key="condor.+ProjectName">"Swift"</profile>
-    <workdirectory>.</workdirectory>
-  </pool>
-</config>

Added: trunk/tests/sites/osgconnect/swift.conf
===================================================================
--- trunk/tests/sites/osgconnect/swift.conf	                        (rev 0)
+++ trunk/tests/sites/osgconnect/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,37 @@
+include "${swift.home}/etc/swift.conf"
+
+site.osgc {
+	execution {
+		type: "coaster"
+		URL: "login01.osgconnect.net"
+		jobManager: "ssh-cl:condor"
+		options {
+			# Option ignored: globus:jobtype = nonshared
+			nodeGranularity: 1
+			maxNodesPerJob: 1
+			maxJobs: 1
+			highOverallocation: 100
+			jobMaxTime: "01:00:00"
+			lowOverallocation: 100
+			# Option ignored: globus:condor.+projectname = "Swift"
+			tasksPerNode: 1
+		}
+	}
+	staging: "local"
+	workDirectory: "."
+	maxParallelTasks: 1001
+	initialParallelTasks: 999
+	app.date {
+		executable: "/bin/date"
+		maxWallTime: "00:30:00"
+	}
+
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/sites/osgconnect/swift.properties
===================================================================
--- trunk/tests/sites/osgconnect/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/osgconnect/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
\ No newline at end of file

Deleted: trunk/tests/sites/osgconnect/tc.template.data
===================================================================
--- trunk/tests/sites/osgconnect/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/osgconnect/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,6 +0,0 @@
-local  date /bin/date null null null
-beagle date /bin/date null null null
-uc3    date /bin/date null null null
-mcs    date /bin/date null null null
-midway date /bin/date null null null
-osgc   date /bin/date null null null
\ No newline at end of file

Deleted: trunk/tests/sites/raven/sites.template.xml
===================================================================
--- trunk/tests/sites/raven/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/raven/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,19 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-  <pool handle="raven">
-    <execution provider="coaster" jobmanager="ssh-cl:pbs" url="raven.cray.com"/>
-    <profile namespace="globus" key="project">CI-SES000031</profile>
-    <profile namespace="env" key="SWIFT_GEN_SCRIPTS">KEEP</profile>
-    <profile namespace="globus" key="jobsPerNode">24</profile>
-    <profile namespace="globus" key="providerAttributes">pbs.aprun;pbs.mpp;depth=24</profile>
-    <profile namespace="globus" key="maxWallTime">00:01:00</profile>
-    <profile namespace="globus" key="slots">1</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="globus" key="maxNodes">1</profile>
-    <profile namespace="karajan" key="jobThrottle">1.00</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <filesystem provider="local"/>
-    <workdirectory>/home/users/{env.USER}/swiftwork</workdirectory>
-  </pool>
-</config>
-

Added: trunk/tests/sites/raven/swift.conf
===================================================================
--- trunk/tests/sites/raven/swift.conf	                        (rev 0)
+++ trunk/tests/sites/raven/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,41 @@
+include "${swift.home}/etc/swift.conf"
+
+site.raven {
+	execution {
+		type: "coaster"
+		URL: "raven.cray.com"
+		jobManager: "ssh-cl:pbs"
+		options {
+			nodeGranularity: 1
+			maxNodesPerJob: 1
+			jobProject: "CI-SES000031"
+			maxJobs: 1
+			jobOptions {
+				pbs.aprun: true
+				pbs.mpp: true
+				depth: "24"
+			}
+			tasksPerNode: 24
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "localhost"
+	}
+	workDirectory: "/home/users/"${env.USER}"/swiftwork"
+	maxParallelTasks: 101
+	initialParallelTasks: 100
+	app.date {
+		executable: "/bin/date"
+		maxWallTime: "00:01:00"
+	}
+
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/sites/raven/swift.properties
===================================================================
--- trunk/tests/sites/raven/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/raven/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
\ No newline at end of file

Deleted: trunk/tests/sites/raven/tc.template.data
===================================================================
--- trunk/tests/sites/raven/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/raven/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-local  date /bin/date null null null
-beagle date /bin/date null null null
-uc3    date /bin/date null null null
-mcs    date /bin/date null null null
-midway date /bin/date null null null
-blues  date /bin/date null null null
-fusion date /bin/date null null null
-raven  date /bin/date null null null

Deleted: trunk/tests/sites/stampede/sites.template.xml
===================================================================
--- trunk/tests/sites/stampede/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/stampede/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,41 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-
-  <pool handle="stampede">
-    <execution provider="coaster"  jobmanager="local:slurm"/>
-    <filesystem provider="local"/>
-    <profile namespace="globus"  key="jobsPerNode">16</profile>
-    <profile namespace="globus"  key="ppn">16</profile>
-    <profile namespace="globus"  key="maxwalltime">00:01:00</profile>
-    <profile namespace="globus"  key="queue">development</profile>
-    <profile namespace="globus"  key="nodeGranularity">2</profile>
-    <profile namespace="globus"  key="maxNodes">2</profile>
-    <profile namespace="globus"  key="project">TG-MCA94P017</profile>
-    <profile namespace="karajan" key="jobThrottle">2</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>{env.SCRATCH}</workdirectory>
-  </pool>
-</config>
-
-
-<!--
-<config>
-  <pool handle="stampede">
-    <execution provider="coaster" jobmanager="gt2:gt2:slurm" url="login5.stampede.tacc.utexas.edu:2119/jobmanager-slurm"/>
-    <filesystem provider="gsiftp" url="gsiftp://gridftp.stampede.tacc.utexas.edu:2811"/>
-    <profile namespace="globus"  key="jobsPerNode">16</profile>
-    <profile namespace="globus"  key="ppn">16</profile>
-    <profile namespace="globus"  key="maxTime">3600</profile>
-    <profile namespace="globus"  key="maxwalltime">00:05:00</profile>
-    <profile namespace="globus"  key="lowOverallocation">100</profile>
-    <profile namespace="globus"  key="highOverallocation">100</profile>
-    <profile namespace="globus"  key="queue">normal</profile>
-    <profile namespace="globus"  key="nodeGranularity">1</profile>
-    <profile namespace="globus"  key="maxNodes">1</profile>
-    <profile namespace="globus"  key="project">TG-EAR130015</profile>
-    <profile namespace="karajan" key="jobThrottle">.3199</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/scratch/01739/ketan/swift.work</workdirectory>
-  </pool>
-</config>
--->
\ No newline at end of file

Added: trunk/tests/sites/stampede/swift.conf
===================================================================
--- trunk/tests/sites/stampede/swift.conf	                        (rev 0)
+++ trunk/tests/sites/stampede/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,37 @@
+include "${swift.home}/etc/swift.conf"
+
+site.stampede {
+	execution {
+		type: "coaster"
+		URL: "localhost"
+		jobManager: "local:slurm"
+		options {
+			nodeGranularity: 2
+			maxNodesPerJob: 2
+			jobQueue: "development"
+			jobProject: "TG-MCA94P017"
+			jobOptions.ppn: 16
+			tasksPerNode: 16
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "localhost"
+	}
+	workDirectory: ${env.SCRATCH}
+	maxParallelTasks: 201
+	initialParallelTasks: 200
+	app.date {
+		executable: "/bin/date"
+		maxWallTime: "00:01:00"
+	}
+
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/sites/stampede/swift.properties
===================================================================
--- trunk/tests/sites/stampede/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/stampede/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
\ No newline at end of file

Deleted: trunk/tests/sites/stampede/tc.template.data
===================================================================
--- trunk/tests/sites/stampede/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/stampede/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-local  date /bin/date null null null
-beagle date /bin/date null null null
-uc3    date /bin/date null null null
-mcs    date /bin/date null null null
-midway date /bin/date null null null
-blues  date /bin/date null null null
-fusion date /bin/date null null null
-stampede date /bin/date null null null

Deleted: trunk/tests/sites/todo/crow/sites.template.xml
===================================================================
--- trunk/tests/sites/todo/crow/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/todo/crow/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,31 +0,0 @@
-<config>
-
-<import file="sys.xml"/>
-<set name="pwd"><sys:getenv name="PWD"/></set>
-<set name="wdir" value="{pwd}/work"/>
-<echo message="setting workDirectory to: {wdir}"/>
-
-<pool handle="crow">
-  <execution jobmanager="local:pbs" provider="coaster" url="none"/>
-  <filesystem provider="local" url="none" />
-  <profile namespace="globus" key="maxWallTime">1</profile>
-  <profile namespace="globus" key="maxTime">7200</profile>
-
-  <profile namespace="globus" key="providerAttributes">
-    pbs.aprun;pbs.mpp;depth=6
-  </profile>
-  <profile key="jobsPerNode" namespace="globus">6</profile>
-  <profile key="slots" namespace="globus">1</profile>
-  <profile key="nodeGranularity" namespace="globus">1</profile>
-  <profile key="maxNodes" namespace="globus">1</profile>
-  <profile key="workerLoggingLevel" namespace="globus">DEBUG</profile>
-  <profile key="workerLoggingDirectory" namespace="globus">{wdir}</profile>
-  <profile key="jobThrottle" namespace="karajan">5.99</profile>
-  <profile key="initialScore" namespace="karajan">10000</profile>
-  <workdirectory>{wdir}</workdirectory>
-</pool>
-</config>
-
-<!-- RESERVATIONS:
-Add something like this to your providerAttributes:
-pbs.resources=advres=modFTDock.47 -->

Added: trunk/tests/sites/todo/crow/swift.conf
===================================================================
--- trunk/tests/sites/todo/crow/swift.conf	                        (rev 0)
+++ trunk/tests/sites/todo/crow/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,66 @@
+include "${swift.home}/etc/swift.conf"
+
+site.crow {
+	execution {
+		type: "coaster"
+		URL: "none"
+		jobManager: "local:pbs"
+		options {
+			nodeGranularity: 1
+			maxNodesPerJob: 1
+			workerLoggingLevel: "DEBUG"
+			maxJobs: 1
+			jobOptions {
+				pbs.aprun: true
+				pbs.mpp: true
+				depth: "6"
+			}
+			workerLoggingDirectory: ${env.PWD}/work
+			tasksPerNode: 6
+			jobMaxTime: "02:00:00"
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "none"
+	}
+	workDirectory: ${env.PWD}/work
+	maxParallelTasks: 600
+	initialParallelTasks: 599
+	app.cat {
+		executable: "/bin/cat"
+		maxWallTime: "00:01:00"
+	}
+
+	app.echo {
+		executable: "/bin/echo"
+		maxWallTime: "00:01:00"
+	}
+
+	app.grep {
+		executable: "/bin/grep"
+		maxWallTime: "00:01:00"
+	}
+
+	app.ls {
+		executable: "/bin/ls"
+		maxWallTime: "00:01:00"
+	}
+
+	app.paste {
+		executable: "/bin/paste"
+		maxWallTime: "00:01:00"
+	}
+
+	app.sort {
+		executable: "/bin/sort"
+		maxWallTime: "00:01:00"
+	}
+
+	app.wc {
+		executable: "/usr/bin/wc"
+		maxWallTime: "00:01:00"
+	}
+
+}
+

Deleted: trunk/tests/sites/todo/crow/tc.template.data
===================================================================
--- trunk/tests/sites/todo/crow/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/todo/crow/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,7 +0,0 @@
-crow 	echo 		/bin/echo	INSTALLED	INTEL32::LINUX
-crow 	cat 		/bin/cat	INSTALLED	INTEL32::LINUX
-crow 	ls 		/bin/ls		INSTALLED	INTEL32::LINUX
-crow 	grep 		/bin/grep	INSTALLED	INTEL32::LINUX
-crow 	sort 		/bin/sort	INSTALLED	INTEL32::LINUX
-crow 	paste 		/bin/paste	INSTALLED	INTEL32::LINUX
-crow	wc		/usr/bin/wc	INSTALLED	INTEL32::LINUX

Deleted: trunk/tests/sites/todo/fusion/sites.template.xml
===================================================================
--- trunk/tests/sites/todo/fusion/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/todo/fusion/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,16 +0,0 @@
-<config>
-<pool handle="fusion">
-  <execution jobmanager="local:pbs" provider="coaster" url="none"/>
-  <filesystem provider="local" url="none" />
-  <profile namespace="globus" key="maxtime">750</profile>
-  <profile namespace="globus" key="jobsPerNode">1</profile>
-  <profile namespace="globus" key="slots">1</profile>
-  <profile namespace="globus" key="nodeGranularity">1</profile>
-  <profile namespace="globus" key="maxNodes">2</profile>
-  <profile namespace="globus" key="queue">shared</profile>
-  <profile namespace="karajan" key="jobThrottle">5.99</profile>
-  <profile namespace="karajan" key="initialScore">10000</profile>
-  <workdirectory>_WORK_</workdirectory>
-</pool>
-</config>
-

Added: trunk/tests/sites/todo/fusion/swift.conf
===================================================================
--- trunk/tests/sites/todo/fusion/swift.conf	                        (rev 0)
+++ trunk/tests/sites/todo/fusion/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,53 @@
+include "${swift.home}/etc/swift.conf"
+
+site.fusion {
+	execution {
+		type: "coaster"
+		URL: "none"
+		jobManager: "local:pbs"
+		options {
+			nodeGranularity: 1
+			jobQueue: "shared"
+			maxNodesPerJob: 2
+			maxJobs: 1
+			tasksPerNode: 1
+			jobMaxTime: "00:12:30"
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "none"
+	}
+	workDirectory: "_WORK_"
+	maxParallelTasks: 600
+	initialParallelTasks: 599
+	app.cat {
+		executable: "/bin/cat"
+	}
+
+	app.echo {
+		executable: "/bin/echo"
+	}
+
+	app.grep {
+		executable: "/bin/grep"
+	}
+
+	app.ls {
+		executable: "/bin/ls"
+	}
+
+	app.paste {
+		executable: "/bin/paste"
+	}
+
+	app.sort {
+		executable: "/bin/sort"
+	}
+
+	app.wc {
+		executable: "/usr/bin/wc"
+	}
+
+}
+

Deleted: trunk/tests/sites/todo/fusion/tc.template.data
===================================================================
--- trunk/tests/sites/todo/fusion/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/todo/fusion/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,7 +0,0 @@
-fusion 	echo 		/bin/echo	INSTALLED	INTEL32::LINUX
-fusion 	cat 		/bin/cat	INSTALLED	INTEL32::LINUX
-fusion 	ls 		/bin/ls		INSTALLED	INTEL32::LINUX
-fusion 	grep 		/bin/grep	INSTALLED	INTEL32::LINUX
-fusion 	sort 		/bin/sort	INSTALLED	INTEL32::LINUX
-fusion 	paste 		/bin/paste	INSTALLED	INTEL32::LINUX
-fusion	wc		/usr/bin/wc	INSTALLED	INTEL32::LINUX

Deleted: trunk/tests/sites/todo/ibicluster/sites.template.xml
===================================================================
--- trunk/tests/sites/todo/ibicluster/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/todo/ibicluster/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,14 +0,0 @@
-<config>
- <pool handle="ibicluster">
-  <execution provider="coaster" url="none" jobmanager="local:sge"/>
-  <profile namespace="globus" key="pe">threaded</profile>
-  <profile namespace="globus" key="queue">all.q</profile>
-  <profile namespace="globus" key="jobsPerNode">8</profile>
-  <profile namespace="globus" key="nodeGranularity">1</profile>
-  <profile namespace="globus" key="maxnodes">2</profile>
-  <profile namespace="karajan" key="jobThrottle">0.159</profile>
-  <profile namespace="karajan" key="initialScore">10000</profile>
-  <filesystem provider="local" url="none"/>
-  <workdirectory>_WORK_</workdirectory>
- </pool>
-</config>

Added: trunk/tests/sites/todo/ibicluster/swift.conf
===================================================================
--- trunk/tests/sites/todo/ibicluster/swift.conf	                        (rev 0)
+++ trunk/tests/sites/todo/ibicluster/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,52 @@
+include "${swift.home}/etc/swift.conf"
+
+site.ibicluster {
+	execution {
+		type: "coaster"
+		URL: "none"
+		jobManager: "local:sge"
+		options {
+			nodeGranularity: 1
+			maxNodesPerJob: 2
+			jobQueue: "all.q"
+			# Option ignored: globus:pe = threaded
+			tasksPerNode: 8
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "none"
+	}
+	workDirectory: "_WORK_"
+	maxParallelTasks: 16
+	initialParallelTasks: 16
+	app.cat {
+		executable: "/bin/cat"
+	}
+
+	app.echo {
+		executable: "/bin/echo"
+	}
+
+	app.grep {
+		executable: "/bin/grep"
+	}
+
+	app.ls {
+		executable: "/bin/ls"
+	}
+
+	app.paste {
+		executable: "/bin/paste"
+	}
+
+	app.sort {
+		executable: "/bin/sort"
+	}
+
+	app.wc {
+		executable: "/usr/bin/wc"
+	}
+
+}
+

Deleted: trunk/tests/sites/todo/ibicluster/tc.template.data
===================================================================
--- trunk/tests/sites/todo/ibicluster/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/todo/ibicluster/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-ibicluster 	echo 		/bin/echo	INSTALLED	INTEL32::LINUX
-ibicluster 	cat 		/bin/cat	INSTALLED	INTEL32::LINUX
-ibicluster 	ls 		/bin/ls		INSTALLED	INTEL32::LINUX
-ibicluster 	grep 		/bin/grep	INSTALLED	INTEL32::LINUX
-ibicluster 	sort 		/bin/sort	INSTALLED	INTEL32::LINUX
-ibicluster 	paste 		/bin/paste	INSTALLED	INTEL32::LINUX
-ibicluster	wc		/usr/bin/wc	INSTALLED	INTEL32::LINUX
-

Deleted: trunk/tests/sites/todo/intrepid/sites.template.xml
===================================================================
--- trunk/tests/sites/todo/intrepid/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/todo/intrepid/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,32 +0,0 @@
-<config>
-
-  <pool handle="localhost" sysinfo="INTEL32::LINUX">
-    <gridftp url="local://localhost" />
-    <execution provider="local" url="none" />
-    <workdirectory>/scratch/wozniak/work</workdirectory>
-    <!-- <profile namespace="karajan" key="maxSubmitRate">1</profile> -->
-    <profile namespace="karajan" key="jobThrottle">0.04</profile>
-    <profile namespace="swift"   key="stagingMethod">file</profile>
-  </pool>
-
-  <pool handle="coasters_alcfbgp">
-    <filesystem provider="local" />
-    <execution provider="coaster" jobmanager="local:cobalt"/>
-    <!-- <profile namespace="swift"   key="stagingMethod">local</profile> -->
-    <profile namespace="globus"  key="internalHostname">_HOST_</profile>
-    <profile namespace="globus"  key="project">_PROJECT_</profile>
-    <profile namespace="globus"  key="queue">_QUEUE_</profile>
-    <profile namespace="globus"  key="kernelprofile">zeptoos</profile>
-    <profile namespace="globus"  key="alcfbgpnat">true</profile>
-    <profile namespace="karajan" key="jobthrottle">5.11</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="globus"  key="jobsPerNode">1</profile>
-    <profile namespace="globus"  key="workerLoggingLevel">DEBUG</profile>
-    <profile namespace="globus"  key="slots">1</profile>
-    <profile namespace="globus"  key="maxTime">900</profile> <!-- seconds -->
-    <profile namespace="globus"  key="nodeGranularity">512</profile>
-    <profile namespace="globus"  key="maxNodes">512</profile>
-    <workdirectory>_WORK_</workdirectory>
-  </pool>
-
-</config>

Added: trunk/tests/sites/todo/intrepid/swift.conf
===================================================================
--- trunk/tests/sites/todo/intrepid/swift.conf	                        (rev 0)
+++ trunk/tests/sites/todo/intrepid/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,47 @@
+include "${swift.home}/etc/swift.conf"
+
+site.localhost {
+	execution {
+		type: "local"
+		URL: "none"
+	}
+	filesystem {
+		type: "local"
+		URL: "localhost"
+	}
+	workDirectory: "/scratch/wozniak/work"
+	OS: "INTEL32::LINUX"
+}
+
+site.coasters_alcfbgp {
+	execution {
+		type: "coaster"
+		URL: "localhost"
+		jobManager: "local:cobalt"
+		options {
+			internalHostname: "_HOST_"
+			nodeGranularity: 512
+			maxNodesPerJob: 512
+			jobQueue: "_QUEUE_"
+			workerLoggingLevel: "DEBUG"
+			maxJobs: 1
+			# Option ignored: globus:alcfbgpnat = true
+			jobMaxTime: "00:15:00"
+			# Option ignored: globus:kernelprofile = zeptoos
+			jobProject: "_PROJECT_"
+			tasksPerNode: 1
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "localhost"
+	}
+	workDirectory: "_WORK_"
+	maxParallelTasks: 512
+	initialParallelTasks: 511
+	app.cp {
+		executable: "/bin/cp"
+	}
+
+}
+

Deleted: trunk/tests/sites/todo/intrepid/tc.template.data
===================================================================
--- trunk/tests/sites/todo/intrepid/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/todo/intrepid/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1 +0,0 @@
-coasters_alcfbgp	cp 		/bin/cp         INSTALLED	INTEL32::LINUX	null

Deleted: trunk/tests/sites/todo/surveyor/sites.template.xml
===================================================================
--- trunk/tests/sites/todo/surveyor/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/todo/surveyor/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,22 +0,0 @@
-<config>
-  <pool handle="surveyor">
-    <filesystem provider="local" />
-    <execution provider="coaster" jobmanager="local:cobalt"/>
-    <!-- <profile namespace="swift"   key="stagingMethod">local</profile> -->
-    <profile namespace="globus"  key="internalHostname">_HOST_</profile>
-    <profile namespace="globus"  key="project">_PROJECT_</profile>
-    <profile namespace="globus"  key="queue">_QUEUE_</profile>
-    <profile namespace="globus"  key="kernelprofile">zeptoos</profile>
-    <profile namespace="globus"  key="alcfbgpnat">true</profile>
-    <profile namespace="karajan" key="jobthrottle">21</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="globus"  key="jobsPerNode">1</profile>
-    <profile namespace="globus"  key="workerLoggingLevel">DEBUG</profile>
-    <profile namespace="globus"  key="slots">1</profile>
-    <profile namespace="globus"  key="maxTime">900</profile> <!-- seconds -->
-    <profile namespace="globus"  key="nodeGranularity">64</profile>
-    <profile namespace="globus"  key="maxNodes">64</profile>
-    <workdirectory>_WORK_</workdirectory>
-  </pool>
-</config>
-

Added: trunk/tests/sites/todo/surveyor/swift.conf
===================================================================
--- trunk/tests/sites/todo/surveyor/swift.conf	                        (rev 0)
+++ trunk/tests/sites/todo/surveyor/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,58 @@
+include "${swift.home}/etc/swift.conf"
+
+site.surveyor {
+	execution {
+		type: "coaster"
+		URL: "localhost"
+		jobManager: "local:cobalt"
+		options {
+			internalHostname: "_HOST_"
+			nodeGranularity: 64
+			maxNodesPerJob: 64
+			jobQueue: "_QUEUE_"
+			workerLoggingLevel: "DEBUG"
+			maxJobs: 1
+			# Option ignored: globus:alcfbgpnat = true
+			jobMaxTime: "00:15:00"
+			# Option ignored: globus:kernelprofile = zeptoos
+			jobProject: "_PROJECT_"
+			tasksPerNode: 1
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "localhost"
+	}
+	workDirectory: "_WORK_"
+	maxParallelTasks: 2101
+	initialParallelTasks: 2097
+	app.cat {
+		executable: "/bin/cat"
+	}
+
+	app.echo {
+		executable: "/bin/echo"
+	}
+
+	app.grep {
+		executable: "/bin/grep"
+	}
+
+	app.ls {
+		executable: "/bin/ls"
+	}
+
+	app.paste {
+		executable: "/bin/paste"
+	}
+
+	app.sort {
+		executable: "/bin/sort"
+	}
+
+	app.wc {
+		executable: "/usr/bin/wc"
+	}
+
+}
+

Deleted: trunk/tests/sites/todo/surveyor/tc.template.data
===================================================================
--- trunk/tests/sites/todo/surveyor/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/todo/surveyor/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,7 +0,0 @@
-surveyor 	echo 		/bin/echo	INSTALLED	INTEL32::LINUX
-surveyor 	cat 		/bin/cat	INSTALLED	INTEL32::LINUX
-surveyor 	ls 		/bin/ls		INSTALLED	INTEL32::LINUX
-surveyor 	grep 		/bin/grep	INSTALLED	INTEL32::LINUX
-surveyor 	sort 		/bin/sort	INSTALLED	INTEL32::LINUX
-surveyor 	paste 		/bin/paste	INSTALLED	INTEL32::LINUX
-surveyor	wc		/usr/bin/wc	INSTALLED	INTEL32::LINUX

Deleted: trunk/tests/sites/uc3/sites.template.xml
===================================================================
--- trunk/tests/sites/uc3/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/uc3/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,19 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-
-  <pool handle="uc3">
-    <execution provider="coaster" url="uc3-sub.uchicago.edu" jobmanager="ssh-cl:condor"/>
-    <profile namespace="karajan" key="jobThrottle">10.00</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="globus"  key="jobsPerNode">1</profile>
-    <profile namespace="globus"  key="maxtime">3600</profile>
-    <profile namespace="globus"  key="maxWalltime">00:30:00</profile>
-    <profile namespace="globus"  key="highOverAllocation">100</profile>
-    <profile namespace="globus"  key="lowOverAllocation">100</profile>
-    <profile namespace="globus"  key="slots">1</profile>
-    <profile namespace="globus"  key="maxNodes">1</profile>
-    <profile namespace="globus"  key="nodeGranularity">1</profile>
-    <profile namespace="globus"  key="jobType">nonshared</profile>
-    <workdirectory>.</workdirectory>
-  </pool>
-</config>
\ No newline at end of file

Added: trunk/tests/sites/uc3/swift.conf
===================================================================
--- trunk/tests/sites/uc3/swift.conf	                        (rev 0)
+++ trunk/tests/sites/uc3/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,36 @@
+include "${swift.home}/etc/swift.conf"
+
+site.uc3 {
+	execution {
+		type: "coaster"
+		URL: "uc3-sub.uchicago.edu"
+		jobManager: "ssh-cl:condor"
+		options {
+			# Option ignored: globus:jobtype = nonshared
+			nodeGranularity: 1
+			lowOverallocation: 100
+			maxNodesPerJob: 1
+			maxJobs: 1
+			highOverallocation: 100
+			tasksPerNode: 1
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "."
+	maxParallelTasks: 1001
+	initialParallelTasks: 999
+	app.date {
+		executable: "/bin/date"
+		maxWallTime: "00:30:00"
+	}
+
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/sites/uc3/swift.properties
===================================================================
--- trunk/tests/sites/uc3/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/uc3/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
\ No newline at end of file

Deleted: trunk/tests/sites/uc3/tc.template.data
===================================================================
--- trunk/tests/sites/uc3/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/sites/uc3/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,5 +0,0 @@
-local  date /bin/date null null null
-beagle date /bin/date null null null
-uc3    date /bin/date null null null
-mcs    date /bin/date null null null
-midway date /bin/date null null null

Deleted: trunk/tests/stress/IO/bagOnodes/sites.template.xml
===================================================================
--- trunk/tests/stress/IO/bagOnodes/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/IO/bagOnodes/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,124 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-
-  <pool handle="crank">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="crank.mcs.anl.gov"/>
-    <profile namespace="globus" key="jobsPerNode">8</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">3600</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/sandbox/{env.USER}/swiftwork</workdirectory>
-  </pool>
-
-  <pool handle="churn">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="churn.mcs.anl.gov"/>
-    <profile namespace="globus" key="jobsPerNode">8</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">3600</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/sandbox/{env.USER}/swiftwork</workdirectory>
-  </pool>
-
-  <pool handle="crush">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="crush.mcs.anl.gov"/>
-    <profile namespace="globus" key="jobsPerNode">8</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">3600</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/sandbox/{env.USER}/swiftwork</workdirectory>
-  </pool>
-
-  <pool handle="grind">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="grind.mcs.anl.gov"/>
-    <profile namespace="globus" key="jobsPerNode">8</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">3600</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/sandbox/{env.USER}/swiftwork</workdirectory>
-  </pool>
-
-  <pool handle="steamroller">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="steamroller.mcs.anl.gov"/>
-    <profile namespace="globus" key="jobsPerNode">8</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">3600</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/sandbox/{env.USER}/swiftwork</workdirectory>
-  </pool>
-
-  <pool handle="stomp">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="stomp.mcs.anl.gov"/>
-    <profile namespace="globus" key="jobsPerNode">8</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">3600</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/sandbox/{env.USER}/swiftwork</workdirectory>
-  </pool>
-
-  <pool handle="thrash">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="thrash.mcs.anl.gov"/>
-    <profile namespace="globus" key="jobsPerNode">8</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">3600</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/sandbox/{env.USER}/swiftwork</workdirectory>
-  </pool>
-
-  <pool handle="thwomp">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="thwomp.mcs.anl.gov"/>
-    <profile namespace="globus" key="jobsPerNode">8</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">3600</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/sandbox/{env.USER}/swiftwork</workdirectory>
-  </pool>
-
-  <pool handle="trounce">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="trounce.mcs.anl.gov"/>
-    <profile namespace="globus" key="jobsPerNode">8</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">3600</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/sandbox/{env.USER}/swiftwork</workdirectory>
-  </pool>
-
-  <pool handle="vanquish">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="vanquish.mcs.anl.gov"/>
-    <profile namespace="globus" key="jobsPerNode">8</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">3600</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/sandbox/{env.USER}/swiftwork</workdirectory>
-  </pool>
-
-</config>

Added: trunk/tests/stress/IO/bagOnodes/swift.conf
===================================================================
--- trunk/tests/stress/IO/bagOnodes/swift.conf	                        (rev 0)
+++ trunk/tests/stress/IO/bagOnodes/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,239 @@
+include "${swift.home}/etc/swift.conf"
+
+site.crank {
+	execution {
+		type: "coaster"
+		URL: "crank.mcs.anl.gov"
+		jobManager: "ssh-cl:local"
+		options {
+			lowOverallocation: 100
+			highOverallocation: 100
+			tasksPerNode: 8
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/sandbox/"${env.USER}"/swiftwork"
+	maxParallelTasks: 8
+	initialParallelTasks: 8
+	app.bash {
+		executable: "/bin/bash"
+		maxWallTime: "00:05:00"
+	}
+
+}
+
+site.churn {
+	execution {
+		type: "coaster"
+		URL: "churn.mcs.anl.gov"
+		jobManager: "ssh-cl:local"
+		options {
+			lowOverallocation: 100
+			highOverallocation: 100
+			tasksPerNode: 8
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/sandbox/"${env.USER}"/swiftwork"
+	maxParallelTasks: 8
+	initialParallelTasks: 8
+	app.bash {
+		executable: "/bin/bash"
+		maxWallTime: "00:05:00"
+	}
+
+}
+
+site.crush {
+	execution {
+		type: "coaster"
+		URL: "crush.mcs.anl.gov"
+		jobManager: "ssh-cl:local"
+		options {
+			lowOverallocation: 100
+			highOverallocation: 100
+			tasksPerNode: 8
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/sandbox/"${env.USER}"/swiftwork"
+	maxParallelTasks: 8
+	initialParallelTasks: 8
+	app.bash {
+		executable: "/bin/bash"
+		maxWallTime: "00:05:00"
+	}
+
+}
+
+site.grind {
+	execution {
+		type: "coaster"
+		URL: "grind.mcs.anl.gov"
+		jobManager: "ssh-cl:local"
+		options {
+			lowOverallocation: 100
+			highOverallocation: 100
+			tasksPerNode: 8
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/sandbox/"${env.USER}"/swiftwork"
+	maxParallelTasks: 8
+	initialParallelTasks: 8
+	app.bash {
+		executable: "/bin/bash"
+		maxWallTime: "00:05:00"
+	}
+
+}
+
+site.steamroller {
+	execution {
+		type: "coaster"
+		URL: "steamroller.mcs.anl.gov"
+		jobManager: "ssh-cl:local"
+		options {
+			lowOverallocation: 100
+			highOverallocation: 100
+			tasksPerNode: 8
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/sandbox/"${env.USER}"/swiftwork"
+	maxParallelTasks: 8
+	initialParallelTasks: 8
+	app.bash {
+		executable: "/bin/bash"
+		maxWallTime: "00:05:00"
+	}
+
+}
+
+site.stomp {
+	execution {
+		type: "coaster"
+		URL: "stomp.mcs.anl.gov"
+		jobManager: "ssh-cl:local"
+		options {
+			lowOverallocation: 100
+			highOverallocation: 100
+			tasksPerNode: 8
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/sandbox/"${env.USER}"/swiftwork"
+	maxParallelTasks: 8
+	initialParallelTasks: 8
+	app.bash {
+		executable: "/bin/bash"
+		maxWallTime: "00:05:00"
+	}
+
+}
+
+site.thrash {
+	execution {
+		type: "coaster"
+		URL: "thrash.mcs.anl.gov"
+		jobManager: "ssh-cl:local"
+		options {
+			lowOverallocation: 100
+			highOverallocation: 100
+			tasksPerNode: 8
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/sandbox/"${env.USER}"/swiftwork"
+	maxParallelTasks: 8
+	initialParallelTasks: 8
+	app.bash {
+		executable: "/bin/bash"
+		maxWallTime: "00:05:00"
+	}
+
+}
+
+site.thwomp {
+	execution {
+		type: "coaster"
+		URL: "thwomp.mcs.anl.gov"
+		jobManager: "ssh-cl:local"
+		options {
+			lowOverallocation: 100
+			highOverallocation: 100
+			tasksPerNode: 8
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/sandbox/"${env.USER}"/swiftwork"
+	maxParallelTasks: 8
+	initialParallelTasks: 8
+	app.bash {
+		executable: "/bin/bash"
+		maxWallTime: "00:05:00"
+	}
+
+}
+
+site.trounce {
+	execution {
+		type: "coaster"
+		URL: "trounce.mcs.anl.gov"
+		jobManager: "ssh-cl:local"
+		options {
+			lowOverallocation: 100
+			highOverallocation: 100
+			tasksPerNode: 8
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/sandbox/"${env.USER}"/swiftwork"
+	maxParallelTasks: 8
+	initialParallelTasks: 8
+	app.bash {
+		executable: "/bin/bash"
+		maxWallTime: "00:05:00"
+	}
+
+}
+
+site.vanquish {
+	execution {
+		type: "coaster"
+		URL: "vanquish.mcs.anl.gov"
+		jobManager: "ssh-cl:local"
+		options {
+			lowOverallocation: 100
+			highOverallocation: 100
+			tasksPerNode: 8
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/sandbox/"${env.USER}"/swiftwork"
+	maxParallelTasks: 8
+	initialParallelTasks: 8
+	app.bash {
+		executable: "/bin/bash"
+		maxWallTime: "00:05:00"
+	}
+
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/stress/IO/bagOnodes/swift.properties
===================================================================
--- trunk/tests/stress/IO/bagOnodes/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/IO/bagOnodes/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
\ No newline at end of file

Deleted: trunk/tests/stress/IO/bagOnodes/tc.template.data
===================================================================
--- trunk/tests/stress/IO/bagOnodes/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/IO/bagOnodes/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,10 +0,0 @@
-crank  bash /bin/bash null null null
-churn  bash /bin/bash null null null
-crush  bash /bin/bash null null null
-grind  bash /bin/bash null null null
-steamroller bash /bin/bash null null null
-stomp  bash /bin/bash null null null
-thrash bash /bin/bash null null null
-thwomp bash /bin/bash null null null
-trounce bash /bin/bash null null null
-vanquish bash /bin/bash null null null

Deleted: trunk/tests/stress/IO/beagle/sites.template.xml
===================================================================
--- trunk/tests/stress/IO/beagle/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/IO/beagle/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,22 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-  <pool handle="beagle">
-    <execution provider="coaster" jobmanager="ssh-cl:pbs" url="login4.beagle.ci.uchicago.edu"/>
-    <profile namespace="globus" key="jobsPerNode">24</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="providerAttributes">pbs.aprun;pbs.mpp;depth=24</profile>
-    <profile namespace="globus" key="maxtime">3700</profile>
-    <profile namespace="globus" key="maxWalltime">00:25:00</profile>
-    <!-- <profile namespace="globus" key="queue">development</profile> -->
-    <profile namespace="globus" key="userHomeOverride">/lustre/beagle/{env.USER}/swiftwork</profile>
-    <profile namespace="globus" key="slots">2</profile>
-    <profile namespace="globus" key="maxnodes">4</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="karajan" key="jobThrottle">4.80</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="karajan" key="workerLoggingLevel">trace</profile>
-    <!-- <workdirectory>/lustre/beagle/yadunandb/swiftwork</workdirectory> -->
-    <workdirectory>/tmp/{env.USER}/swiftwork</workdirectory>
-  </pool>
-</config>

Added: trunk/tests/stress/IO/beagle/swift.conf
===================================================================
--- trunk/tests/stress/IO/beagle/swift.conf	                        (rev 0)
+++ trunk/tests/stress/IO/beagle/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,41 @@
+include "${swift.home}/etc/swift.conf"
+
+site.beagle {
+	execution {
+		type: "coaster"
+		URL: "login4.beagle.ci.uchicago.edu"
+		jobManager: "ssh-cl:pbs"
+		options {
+			nodeGranularity: 1
+			maxNodesPerJob: 4
+			userHomeOverride: "/lustre/beagle/"${env.USER}"/swiftwork"
+			maxJobs: 2
+			highOverallocation: 100
+			jobMaxTime: "01:01:40"
+			lowOverallocation: 100
+			jobOptions {
+				pbs.aprun: true
+				pbs.mpp: true
+				depth: "24"
+			}
+			tasksPerNode: 24
+		}
+	}
+	staging: "local"
+	workDirectory: "/tmp/"${env.USER}"/swiftwork"
+	maxParallelTasks: 481
+	initialParallelTasks: 480
+	app.bash {
+		executable: "/bin/bash"
+		maxWallTime: "00:25:00"
+	}
+
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/stress/IO/beagle/swift.properties
===================================================================
--- trunk/tests/stress/IO/beagle/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/IO/beagle/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
\ No newline at end of file

Deleted: trunk/tests/stress/IO/beagle/tc.template.data
===================================================================
--- trunk/tests/stress/IO/beagle/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/IO/beagle/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,2 +0,0 @@
-local  perl /usr/bin/perl null null null
-beagle bash /bin/bash null null null

Deleted: trunk/tests/stress/IO/multiple/sites.template.xml
===================================================================
--- trunk/tests/stress/IO/multiple/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/IO/multiple/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,69 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-  <pool handle="uc3">
-    <execution provider="coaster" url="uc3-sub.uchicago.edu" jobmanager="ssh-cl:condor"/>
-    <profile namespace="karajan" key="jobThrottle">10.00</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="globus"  key="jobsPerNode">1</profile>
-    <profile namespace="globus"  key="maxtime">3600</profile>
-    <profile namespace="globus"  key="maxWalltime">00:05:00</profile>
-    <profile namespace="globus"  key="highOverAllocation">100</profile>
-    <profile namespace="globus"  key="lowOverAllocation">100</profile>
-    <profile namespace="globus"  key="slots">1000</profile>
-    <profile namespace="globus"  key="maxNodes">1</profile>
-    <profile namespace="globus"  key="nodeGranularity">1</profile>
-    <profile namespace="globus"  key="condor.+AccountingGroup">"group_friends.{uc3.USER}"</profile>
-    <profile namespace="globus"  key="jobType">nonshared</profile>
-    <!-- <profile namespace="globus"  key="condor.+Requirements">isUndefined(GLIDECLIENT_Name) == FALSE</profile> -->
-    <workdirectory>.</workdirectory>
-  </pool>
-
-  <pool handle="beagle">
-    <execution provider="coaster" jobmanager="ssh-cl:pbs" url="login4.beagle.ci.uchicago.edu"/>
-    <profile namespace="globus" key="jobsPerNode">24</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <!-- <profile namespace="globus" key="providerAttributes">pbs.aprun;pbs.mpp;depth=24</profile> -->
-    <profile namespace="globus" key="maxtime">3600</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="globus" key="userHomeOverride">/lustre/beagle/{beagle.USER}/swiftwork</profile>
-    <profile namespace="globus" key="slots">5</profile>
-    <profile namespace="globus" key="maxnodes">1</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="karajan" key="jobThrottle">4.80</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/tmp/{beagle.USER}/swiftwork</workdirectory>
-  </pool>
-
-  <pool handle="sandyb">
-    <execution provider="coaster" jobmanager="local:slurm"/>
-    <profile namespace="globus" key="queue">sandyb</profile>
-    <profile namespace="globus" key="jobsPerNode">16</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="globus" key="maxTime">3600</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="slots">4</profile>
-    <profile namespace="globus" key="maxNodes">1</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="karajan" key="jobThrottle">.64</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/tmp/{mid.USER}</workdirectory>
-  </pool>
-
-  <pool handle="westmere">
-    <execution provider="coaster" jobmanager="local:slurm"/>
-    <profile namespace="globus" key="queue">westmere</profile>
-    <profile namespace="globus" key="jobsPerNode">12</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="globus" key="maxTime">3600</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="slots">4</profile>
-    <profile namespace="globus" key="maxNodes">1</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="karajan" key="jobThrottle">.48</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/tmp/{mid.USER}</workdirectory>
-  </pool>
-</config>

Added: trunk/tests/stress/IO/multiple/swift.conf
===================================================================
--- trunk/tests/stress/IO/multiple/swift.conf	                        (rev 0)
+++ trunk/tests/stress/IO/multiple/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,103 @@
+include "${swift.home}/etc/swift.conf"
+
+site.uc3 {
+	execution {
+		type: "coaster"
+		URL: "uc3-sub.uchicago.edu"
+		jobManager: "ssh-cl:condor"
+		options {
+			# Option ignored: globus:jobtype = nonshared
+			nodeGranularity: 1
+			maxNodesPerJob: 1
+			maxJobs: 1000
+			highOverallocation: 100
+			# Option ignored: globus:condor.+accountinggroup = "group_friends.{uc3.USER}"
+			jobMaxTime: "01:00:00"
+			lowOverallocation: 100
+			tasksPerNode: 1
+		}
+	}
+	staging: "local"
+	workDirectory: "."
+	maxParallelTasks: 1001
+	initialParallelTasks: 999
+}
+
+site.beagle {
+	execution {
+		type: "coaster"
+		URL: "login4.beagle.ci.uchicago.edu"
+		jobManager: "ssh-cl:pbs"
+		options {
+			nodeGranularity: 1
+			lowOverallocation: 100
+			maxNodesPerJob: 1
+			userHomeOverride: "/lustre/beagle/"${beagle.USER}"/swiftwork"
+			maxJobs: 5
+			highOverallocation: 100
+			tasksPerNode: 24
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/tmp/"${beagle.USER}"/swiftwork"
+	maxParallelTasks: 481
+	initialParallelTasks: 480
+	app.bash {
+		executable: "/bin/bash"
+		maxWallTime: "00:05:00"
+	}
+
+}
+
+site.sandyb {
+	execution {
+		type: "coaster"
+		URL: "localhost"
+		jobManager: "local:slurm"
+		options {
+			nodeGranularity: 1
+			lowOverallocation: 100
+			maxNodesPerJob: 1
+			jobQueue: "sandyb"
+			maxJobs: 4
+			highOverallocation: 100
+			tasksPerNode: 16
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/tmp/"${mid.USER}
+	maxParallelTasks: 65
+	initialParallelTasks: 64
+}
+
+site.westmere {
+	execution {
+		type: "coaster"
+		URL: "localhost"
+		jobManager: "local:slurm"
+		options {
+			nodeGranularity: 1
+			lowOverallocation: 100
+			maxNodesPerJob: 1
+			jobQueue: "westmere"
+			maxJobs: 4
+			highOverallocation: 100
+			tasksPerNode: 12
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/tmp/"${mid.USER}
+	maxParallelTasks: 49
+	initialParallelTasks: 48
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/stress/IO/multiple/swift.properties
===================================================================
--- trunk/tests/stress/IO/multiple/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/IO/multiple/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
\ No newline at end of file

Deleted: trunk/tests/stress/IO/multiple/tc.template.data
===================================================================
--- trunk/tests/stress/IO/multiple/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/IO/multiple/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,2 +0,0 @@
-local  perl /usr/bin/perl null null null
-beagle bash /bin/bash null null null

Deleted: trunk/tests/stress/IO/uc3/sites.template.xml
===================================================================
--- trunk/tests/stress/IO/uc3/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/IO/uc3/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,17 +0,0 @@
-<config>
-  <pool handle="uc3">
-    <execution provider="coaster" url="uc3-sub.uchicago.edu" jobmanager="ssh-cl:condor"/>
-    <profile namespace="karajan" key="jobThrottle">10.00</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="globus"  key="jobsPerNode">1</profile>
-    <profile namespace="globus"  key="maxtime">3600</profile>
-    <profile namespace="globus"  key="maxWalltime">00:30:00</profile>
-    <profile namespace="globus"  key="highOverAllocation">100</profile>
-    <profile namespace="globus"  key="lowOverAllocation">100</profile>
-    <profile namespace="globus"  key="slots">1000</profile>
-    <profile namespace="globus"  key="maxNodes">1</profile>
-    <profile namespace="globus"  key="nodeGranularity">1</profile>
-    <profile namespace="globus"  key="jobType">nonshared</profile>
-    <workdirectory>.</workdirectory>
-  </pool>
-</config>
\ No newline at end of file

Added: trunk/tests/stress/IO/uc3/swift.conf
===================================================================
--- trunk/tests/stress/IO/uc3/swift.conf	                        (rev 0)
+++ trunk/tests/stress/IO/uc3/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,36 @@
+include "${swift.home}/etc/swift.conf"
+
+site.uc3 {
+	execution {
+		type: "coaster"
+		URL: "uc3-sub.uchicago.edu"
+		jobManager: "ssh-cl:condor"
+		options {
+			# Option ignored: globus:jobtype = nonshared
+			nodeGranularity: 1
+			lowOverallocation: 100
+			maxNodesPerJob: 1
+			maxJobs: 1000
+			highOverallocation: 100
+			tasksPerNode: 1
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "."
+	maxParallelTasks: 1001
+	initialParallelTasks: 999
+	app.bash {
+		executable: "/bin/bash"
+		maxWallTime: "00:30:00"
+	}
+
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/stress/IO/uc3/swift.properties
===================================================================
--- trunk/tests/stress/IO/uc3/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/IO/uc3/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
\ No newline at end of file

Deleted: trunk/tests/stress/IO/uc3/tc.template.data
===================================================================
--- trunk/tests/stress/IO/uc3/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/IO/uc3/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,2 +0,0 @@
-beagle bash /bin/bash null null null
-uc3    bash /bin/bash null null null

Deleted: trunk/tests/stress/apps/modis_beagle/sites.template.xml
===================================================================
--- trunk/tests/stress/apps/modis_beagle/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/apps/modis_beagle/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,20 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-
-  <pool handle="beagle">
-    <execution provider="coaster" jobmanager="ssh-cl:pbs" url="login4.beagle.ci.uchicago.edu"/>
-    <profile namespace="globus" key="jobsPerNode">24</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="providerAttributes">pbs.aprun;pbs.mpp;depth=24</profile>
-    <profile namespace="globus" key="maxtime">3600</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="globus" key="userHomeOverride">/lustre/beagle/{env.USER}/swiftwork</profile>
-    <profile namespace="globus" key="slots">2</profile>
-    <profile namespace="globus" key="maxnodes">1</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="karajan" key="jobThrottle">4.80</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/tmp/{env.USER}/swiftwork</workdirectory>
-  </pool>
-</config>

Added: trunk/tests/stress/apps/modis_beagle/swift.conf
===================================================================
--- trunk/tests/stress/apps/modis_beagle/swift.conf	                        (rev 0)
+++ trunk/tests/stress/apps/modis_beagle/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,41 @@
+include "${swift.home}/etc/swift.conf"
+
+site.beagle {
+	execution {
+		type: "coaster"
+		URL: "login4.beagle.ci.uchicago.edu"
+		jobManager: "ssh-cl:pbs"
+		options {
+			nodeGranularity: 1
+			maxNodesPerJob: 1
+			userHomeOverride: "/lustre/beagle/"${env.USER}"/swiftwork"
+			maxJobs: 2
+			highOverallocation: 100
+			jobMaxTime: "01:00:00"
+			lowOverallocation: 100
+			jobOptions {
+				pbs.aprun: true
+				pbs.mpp: true
+				depth: "24"
+			}
+			tasksPerNode: 24
+		}
+	}
+	staging: "local"
+	workDirectory: "/tmp/"${env.USER}"/swiftwork"
+	maxParallelTasks: 481
+	initialParallelTasks: 480
+	app.perl {
+		executable: "/usr/bin/perl"
+		maxWallTime: "00:05:00"
+	}
+
+}
+
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+statusMode: "provider"
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/stress/apps/modis_beagle/swift.properties
===================================================================
--- trunk/tests/stress/apps/modis_beagle/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/apps/modis_beagle/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-
-wrapperlog.always.transfer=true
-sitedir.keep=true
-execution.retries=0
-lazy.errors=false
-status.mode=provider
-use.provider.staging=true
-provider.staging.pin.swiftfiles=false

Deleted: trunk/tests/stress/apps/modis_beagle/tc.template.data
===================================================================
--- trunk/tests/stress/apps/modis_beagle/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/apps/modis_beagle/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1 +0,0 @@
-beagle perl /usr/bin/perl null null null

Deleted: trunk/tests/stress/apps/modis_local/sites.template.xml
===================================================================
--- trunk/tests/stress/apps/modis_local/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/apps/modis_local/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,17 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-  <pool handle="local">
-    <execution provider="coaster" jobmanager="local:local"/>
-    <profile namespace="globus" key="jobsPerNode">4</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="slots">1</profile>
-    <profile namespace="globus" key="maxNodes">1</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="karajan" key="jobThrottle">0.03</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <filesystem provider="local"/>
-    <workdirectory>/scratch/midway/{env.USER}</workdirectory>
-  </pool>
-</config>

Added: trunk/tests/stress/apps/modis_local/swift.conf
===================================================================
--- trunk/tests/stress/apps/modis_local/swift.conf	                        (rev 0)
+++ trunk/tests/stress/apps/modis_local/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,36 @@
+include "${swift.home}/etc/swift.conf"
+
+site.local {
+	execution {
+		type: "coaster"
+		URL: "localhost"
+		jobManager: "local:local"
+		options {
+			nodeGranularity: 1
+			lowOverallocation: 100
+			maxNodesPerJob: 1
+			maxJobs: 1
+			highOverallocation: 100
+			tasksPerNode: 4
+		}
+	}
+	filesystem {
+		type: "local"
+		URL: "localhost"
+	}
+	workDirectory: "/scratch/midway/"${env.USER}
+	maxParallelTasks: 4
+	initialParallelTasks: 3
+	app.perl {
+		executable: "/usr/bin/perl"
+		maxWallTime: "00:05:00"
+	}
+
+}
+
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+statusMode: "provider"
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true

Deleted: trunk/tests/stress/apps/modis_local/swift.properties
===================================================================
--- trunk/tests/stress/apps/modis_local/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/apps/modis_local/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-
-wrapperlog.always.transfer=true
-sitedir.keep=true
-execution.retries=0
-lazy.errors=false
-status.mode=provider
-use.provider.staging=false
-provider.staging.pin.swiftfiles=false

Deleted: trunk/tests/stress/apps/modis_local/tc.template.data
===================================================================
--- trunk/tests/stress/apps/modis_local/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/apps/modis_local/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1 +0,0 @@
-local perl /usr/bin/perl null null null

Deleted: trunk/tests/stress/apps/modis_midway/sites.template.xml
===================================================================
--- trunk/tests/stress/apps/modis_midway/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/apps/modis_midway/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,19 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-
-  <pool handle="midway">
-    <execution provider="coaster" jobmanager="local:slurm"/>
-    <profile namespace="globus" key="queue">westmere</profile>
-    <profile namespace="globus" key="jobsPerNode">16</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="globus" key="maxTime">3600</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="slots">2</profile>
-    <profile namespace="globus" key="maxNodes">1</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="karajan" key="jobThrottle">.64</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/tmp/{env.USER}</workdirectory>
-  </pool>
-</config>

Added: trunk/tests/stress/apps/modis_midway/swift.conf
===================================================================
--- trunk/tests/stress/apps/modis_midway/swift.conf	                        (rev 0)
+++ trunk/tests/stress/apps/modis_midway/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,36 @@
+include "${swift.home}/etc/swift.conf"
+
+site.midway {
+	execution {
+		type: "coaster"
+		URL: "localhost"
+		jobManager: "local:slurm"
+		options {
+			nodeGranularity: 1
+			lowOverallocation: 100
+			maxNodesPerJob: 1
+			jobQueue: "westmere"
+			maxJobs: 2
+			highOverallocation: 100
+			tasksPerNode: 16
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/tmp/"${env.USER}
+	maxParallelTasks: 65
+	initialParallelTasks: 64
+	app.perl {
+		executable: "/usr/bin/perl"
+		maxWallTime: "00:05:00"
+	}
+
+}
+
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+statusMode: "provider"
+providerStagingPinSwiftFiles: true
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/stress/apps/modis_midway/swift.properties
===================================================================
--- trunk/tests/stress/apps/modis_midway/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/apps/modis_midway/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,9 +0,0 @@
-
-wrapperlog.always.transfer=true
-sitedir.keep=true
-execution.retries=0
-lazy.errors=false
-status.mode=provider
-use.provider.staging=true
-provider.staging.pin.swiftfiles=true
-

Deleted: trunk/tests/stress/apps/modis_midway/tc.template.data
===================================================================
--- trunk/tests/stress/apps/modis_midway/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/apps/modis_midway/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1 +0,0 @@
-midway perl /usr/bin/perl null null null

Deleted: trunk/tests/stress/apps/modis_multiple/sites.template.xml
===================================================================
--- trunk/tests/stress/apps/modis_multiple/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/apps/modis_multiple/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,72 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-
-  <pool handle="uc3">
-    <execution provider="coaster" url="uc3-sub.uchicago.edu" jobmanager="ssh-cl:condor"/>
-    <profile namespace="karajan" key="jobThrottle">10.00</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="globus"  key="jobsPerNode">1</profile>
-    <profile namespace="globus"  key="maxtime">3600</profile>
-    <profile namespace="globus"  key="maxWalltime">00:10:00</profile>
-    <profile namespace="globus"  key="highOverAllocation">100</profile>
-    <profile namespace="globus"  key="lowOverAllocation">100</profile>
-    <profile namespace="globus"  key="slots">1</profile>
-    <profile namespace="globus"  key="maxNodes">1</profile>
-    <profile namespace="globus"  key="nodeGranularity">1</profile>
-    <profile namespace="globus"  key="condor.+AccountingGroup">"group_friends.{uc3.USER}"</profile>
-    <profile namespace="globus"  key="jobType">nonshared</profile>l
-    <!-- <profile namespace="globus"  key="condor.+Requirements">isUndefined(GLIDECLIENT_Name) == FALSE</profile> -->
-    <workdirectory>.</workdirectory>
-  </pool>
-
-  <pool handle="beagle">
-    <execution provider="coaster" jobmanager="ssh-cl:pbs" url="login4.beagle.ci.uchicago.edu"/>
-    <profile namespace="globus" key="jobsPerNode">24</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <!-- <profile namespace="globus" key="providerAttributes">pbs.aprun;pbs.mpp;depth=24</profile> -->
-    <profile namespace="globus" key="providerAttributes">pbs.aprun;pbs.mpp;depth=24;pbs.resource_list=advres=wilde.1768</profile>
-    <profile namespace="globus" key="maxtime">3600</profile>
-    <profile namespace="globus" key="maxWalltime">00:10:00</profile>
-    <profile namespace="globus" key="userHomeOverride">/lustre/beagle/{beagle.USER}/swiftwork</profile>
-    <profile namespace="globus" key="slots">1</profile>
-    <profile namespace="globus" key="maxnodes">1</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="karajan" key="jobThrottle">4.80</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/tmp/{beagle.USER}/swiftwork</workdirectory>
-  </pool>
-
-  <pool handle="sandyb">
-    <execution provider="coaster" jobmanager="local:slurm"/>
-    <profile namespace="globus" key="queue">sandyb</profile>
-    <profile namespace="globus" key="jobsPerNode">16</profile>
-    <profile namespace="globus" key="maxWalltime">00:10:00</profile>
-    <profile namespace="globus" key="maxTime">3600</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="slots">1</profile>
-    <profile namespace="globus" key="maxNodes">1</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="karajan" key="jobThrottle">.64</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/tmp/{mid.USER}</workdirectory>
-  </pool>
-
-  <pool handle="westmere">
-    <execution provider="coaster" jobmanager="local:slurm"/>
-    <profile namespace="globus" key="queue">westmere</profile>
-    <profile namespace="globus" key="jobsPerNode">12</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="globus" key="maxTime">3600</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="slots">1</profile>
-    <profile namespace="globus" key="maxNodes">1</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="karajan" key="jobThrottle">.48</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/tmp/{mid.USER}</workdirectory>
-  </pool>
-
-</config>

Added: trunk/tests/stress/apps/modis_multiple/swift.conf
===================================================================
--- trunk/tests/stress/apps/modis_multiple/swift.conf	                        (rev 0)
+++ trunk/tests/stress/apps/modis_multiple/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,119 @@
+include "${swift.home}/etc/swift.conf"
+
+site.uc3 {
+	execution {
+		type: "coaster"
+		URL: "uc3-sub.uchicago.edu"
+		jobManager: "ssh-cl:condor"
+		options {
+			# Option ignored: globus:jobtype = nonshared
+			nodeGranularity: 1
+			maxNodesPerJob: 1
+			maxJobs: 1
+			highOverallocation: 100
+			# Option ignored: globus:condor.+accountinggroup = "group_friends.{uc3.USER}"
+			jobMaxTime: "01:00:00"
+			lowOverallocation: 100
+			tasksPerNode: 1
+		}
+	}
+	staging: "local"
+	workDirectory: "."
+	maxParallelTasks: 1001
+	initialParallelTasks: 999
+	app.perl {
+		executable: "/usr/bin/perl"
+		maxWallTime: "00:10:00"
+	}
+
+}
+
+site.beagle {
+	execution {
+		type: "coaster"
+		URL: "login4.beagle.ci.uchicago.edu"
+		jobManager: "ssh-cl:pbs"
+		options {
+			nodeGranularity: 1
+			maxNodesPerJob: 1
+			userHomeOverride: "/lustre/beagle/"${beagle.USER}"/swiftwork"
+			maxJobs: 1
+			highOverallocation: 100
+			jobMaxTime: "01:00:00"
+			lowOverallocation: 100
+			jobOptions {
+				pbs.aprun: true
+				pbs.mpp: true
+				depth: "24"
+				pbs.resource_list: "advres=wilde.1768"
+			}
+			tasksPerNode: 24
+		}
+	}
+	staging: "local"
+	workDirectory: "/tmp/"${beagle.USER}"/swiftwork"
+	maxParallelTasks: 481
+	initialParallelTasks: 480
+	app.perl {
+		executable: "/usr/bin/perl"
+		maxWallTime: "00:10:00"
+	}
+
+}
+
+site.sandyb {
+	execution {
+		type: "coaster"
+		URL: "localhost"
+		jobManager: "local:slurm"
+		options {
+			nodeGranularity: 1
+			lowOverallocation: 100
+			maxNodesPerJob: 1
+			jobQueue: "sandyb"
+			maxJobs: 1
+			highOverallocation: 100
+			tasksPerNode: 16
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/tmp/"${mid.USER}
+	maxParallelTasks: 65
+	initialParallelTasks: 64
+}
+
+site.westmere {
+	execution {
+		type: "coaster"
+		URL: "localhost"
+		jobManager: "local:slurm"
+		options {
+			nodeGranularity: 1
+			lowOverallocation: 100
+			maxNodesPerJob: 1
+			jobQueue: "westmere"
+			maxJobs: 1
+			highOverallocation: 100
+			tasksPerNode: 12
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/tmp/"${mid.USER}
+	maxParallelTasks: 49
+	initialParallelTasks: 48
+	app.perl {
+		executable: "/usr/bin/perl"
+		maxWallTime: "00:05:00"
+	}
+
+}
+
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+statusMode: "provider"
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/stress/apps/modis_multiple/swift.properties
===================================================================
--- trunk/tests/stress/apps/modis_multiple/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/apps/modis_multiple/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-
-wrapperlog.always.transfer=true
-sitedir.keep=true
-execution.retries=0
-lazy.errors=false
-status.mode=provider
-use.provider.staging=true
-provider.staging.pin.swiftfiles=false

Deleted: trunk/tests/stress/apps/modis_multiple/tc.template.data
===================================================================
--- trunk/tests/stress/apps/modis_multiple/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/apps/modis_multiple/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,4 +0,0 @@
-uc3      perl /usr/bin/perl null null null
-beagle   perl /usr/bin/perl null null null
-#sandy    perl /usr/bin/perl null null null
-westmere perl /usr/bin/perl null null null

Deleted: trunk/tests/stress/apps/modis_uc3/sites.template.xml
===================================================================
--- trunk/tests/stress/apps/modis_uc3/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/apps/modis_uc3/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,21 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<config xmlns="http://www.ci.uchicago.edu/swift/SwiftSites">
-
-  <pool handle="uc3">
-    <execution provider="coaster" url="uc3-sub.uchicago.edu" jobmanager="ssh-cl:condor"/>
-    <profile namespace="karajan" key="jobThrottle">10.00</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="globus"  key="jobsPerNode">1</profile>
-    <profile namespace="globus"  key="maxtime">3600</profile>
-    <profile namespace="globus"  key="maxWalltime">00:10:00</profile>
-    <profile namespace="globus"  key="highOverAllocation">100</profile>
-    <profile namespace="globus"  key="lowOverAllocation">100</profile>
-    <profile namespace="globus"  key="slots">1</profile>
-    <profile namespace="globus"  key="maxNodes">1</profile>
-    <profile namespace="globus"  key="nodeGranularity">1</profile>
-    <profile namespace="globus"  key="condor.+AccountingGroup">"group_friends.{env.USER}"</profile>
-    <profile namespace="globus"  key="jobType">nonshared</profile>
-    <workdirectory>.</workdirectory>
-  </pool>
-
-</config>

Added: trunk/tests/stress/apps/modis_uc3/swift.conf
===================================================================
--- trunk/tests/stress/apps/modis_uc3/swift.conf	                        (rev 0)
+++ trunk/tests/stress/apps/modis_uc3/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,37 @@
+include "${swift.home}/etc/swift.conf"
+
+site.uc3 {
+	execution {
+		type: "coaster"
+		URL: "uc3-sub.uchicago.edu"
+		jobManager: "ssh-cl:condor"
+		options {
+			# Option ignored: globus:jobtype = nonshared
+			nodeGranularity: 1
+			maxNodesPerJob: 1
+			maxJobs: 1
+			highOverallocation: 100
+			# Option ignored: globus:condor.+accountinggroup = "group_friends.{env.USER}"
+			jobMaxTime: "01:00:00"
+			lowOverallocation: 100
+			tasksPerNode: 1
+		}
+	}
+	staging: "local"
+	workDirectory: "."
+	maxParallelTasks: 1001
+	initialParallelTasks: 999
+	app.perl {
+		executable: "/usr/bin/perl"
+		maxWallTime: "00:10:00"
+	}
+
+}
+
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+statusMode: "provider"
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/stress/apps/modis_uc3/swift.properties
===================================================================
--- trunk/tests/stress/apps/modis_uc3/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/apps/modis_uc3/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-
-wrapperlog.always.transfer=true
-sitedir.keep=true
-execution.retries=0
-lazy.errors=false
-status.mode=provider
-use.provider.staging=true
-provider.staging.pin.swiftfiles=false

Deleted: trunk/tests/stress/apps/modis_uc3/tc.template.data
===================================================================
--- trunk/tests/stress/apps/modis_uc3/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/apps/modis_uc3/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1 +0,0 @@
-uc3 perl /usr/bin/perl null null null

Deleted: trunk/tests/stress/long_runs/sites.template.xml
===================================================================
--- trunk/tests/stress/long_runs/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/long_runs/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,20 +0,0 @@
-<config>
-  <pool handle="beagle">
-    <execution provider="coaster" jobmanager="ssh-cl:pbs" url="login4.beagle.ci.uchicago.edu"/>
-    <profile namespace="globus" key="jobsPerNode">24</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="providerAttributes">pbs.aprun;pbs.mpp;depth=24</profile>
-    <profile namespace="globus" key="maxtime">36000</profile>
-    <profile namespace="globus" key="maxWalltime">09:00:00</profile>
-    <profile namespace="globus" key="userHomeOverride">/lustre/beagle/yadunandb/swiftwork</profile>
-    <profile namespace="globus" key="slots">20</profile>
-    <profile namespace="globus" key="maxnodes">1</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="karajan" key="jobThrottle">4.80</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="karajan" key="workerLoggingLevel">trace</profile>
-    <!-- <workdirectory>/lustre/beagle/yadunandb/swiftwork</workdirectory> -->
-    <workdirectory>/tmp/yadunandb/swiftwork</workdirectory>
-  </pool>
-</config>
\ No newline at end of file

Added: trunk/tests/stress/long_runs/swift.conf
===================================================================
--- trunk/tests/stress/long_runs/swift.conf	                        (rev 0)
+++ trunk/tests/stress/long_runs/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,41 @@
+include "${swift.home}/etc/swift.conf"
+
+site.beagle {
+	execution {
+		type: "coaster"
+		URL: "login4.beagle.ci.uchicago.edu"
+		jobManager: "ssh-cl:pbs"
+		options {
+			nodeGranularity: 1
+			maxNodesPerJob: 1
+			userHomeOverride: "/lustre/beagle/yadunandb/swiftwork"
+			maxJobs: 20
+			highOverallocation: 100
+			jobMaxTime: "10:00:00"
+			lowOverallocation: 100
+			jobOptions {
+				pbs.aprun: true
+				pbs.mpp: true
+				depth: "24"
+			}
+			tasksPerNode: 24
+		}
+	}
+	staging: "local"
+	workDirectory: "/tmp/yadunandb/swiftwork"
+	maxParallelTasks: 481
+	initialParallelTasks: 480
+	app.bash {
+		executable: "/bin/bash"
+		maxWallTime: "09:00:00"
+	}
+
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/stress/long_runs/swift.properties
===================================================================
--- trunk/tests/stress/long_runs/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/long_runs/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
\ No newline at end of file

Deleted: trunk/tests/stress/long_runs/tc.template.data
===================================================================
--- trunk/tests/stress/long_runs/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/long_runs/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,2 +0,0 @@
-local  perl /usr/bin/perl null null null
-beagle bash /bin/bash null null null

Deleted: trunk/tests/stress/remote_sanity/beagle/sites.template.xml
===================================================================
--- trunk/tests/stress/remote_sanity/beagle/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/remote_sanity/beagle/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,20 +0,0 @@
-<config>
-  <pool handle="beagle">
-    <execution provider="coaster" jobmanager="ssh-cl:pbs" url="login4.beagle.ci.uchicago.edu"/>
-    <profile namespace="globus" key="jobsPerNode">24</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="providerAttributes">pbs.aprun;pbs.mpp;depth=24</profile>
-    <profile namespace="globus" key="maxtime">3700</profile>
-    <profile namespace="globus" key="maxWalltime">01:00:00</profile>
-    <profile namespace="globus" key="userHomeOverride">/lustre/beagle/{env.USER}/swiftwork</profile>
-    <profile namespace="globus" key="slots">20</profile>
-    <profile namespace="globus" key="maxnodes">10</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="karajan" key="jobThrottle">4.80</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="karajan" key="workerLoggingLevel">trace</profile>
-    <!-- <workdirectory>/lustre/beagle/yadunandb/swiftwork</workdirectory> -->
-    <workdirectory>/tmp/{env.USER}/swiftwork</workdirectory>
-  </pool>
-</config>
\ No newline at end of file

Added: trunk/tests/stress/remote_sanity/beagle/swift.conf
===================================================================
--- trunk/tests/stress/remote_sanity/beagle/swift.conf	                        (rev 0)
+++ trunk/tests/stress/remote_sanity/beagle/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,41 @@
+include "${swift.home}/etc/swift.conf"
+
+site.beagle {
+	execution {
+		type: "coaster"
+		URL: "login4.beagle.ci.uchicago.edu"
+		jobManager: "ssh-cl:pbs"
+		options {
+			nodeGranularity: 1
+			maxNodesPerJob: 10
+			userHomeOverride: "/lustre/beagle/"${env.USER}"/swiftwork"
+			maxJobs: 20
+			highOverallocation: 100
+			jobMaxTime: "01:01:40"
+			lowOverallocation: 100
+			jobOptions {
+				pbs.aprun: true
+				pbs.mpp: true
+				depth: "24"
+			}
+			tasksPerNode: 24
+		}
+	}
+	staging: "local"
+	workDirectory: "/tmp/"${env.USER}"/swiftwork"
+	maxParallelTasks: 481
+	initialParallelTasks: 480
+	app.date {
+		executable: "/bin/date"
+		maxWallTime: "01:00:00"
+	}
+
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/stress/remote_sanity/beagle/swift.properties
===================================================================
--- trunk/tests/stress/remote_sanity/beagle/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/remote_sanity/beagle/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
\ No newline at end of file

Deleted: trunk/tests/stress/remote_sanity/beagle/tc.template.data
===================================================================
--- trunk/tests/stress/remote_sanity/beagle/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/remote_sanity/beagle/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,5 +0,0 @@
-local  date /bin/date null null null
-beagle date /bin/date null null null
-uc3    date /bin/date null null null
-mcs    date /bin/date null null null
-midway date /bin/date null null null

Deleted: trunk/tests/stress/remote_sanity/mac-frisbee/sites.template.xml
===================================================================
--- trunk/tests/stress/remote_sanity/mac-frisbee/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/remote_sanity/mac-frisbee/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,13 +0,0 @@
-<config>
-  <pool handle="frisbee">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="frisbee.mcs.anl.gov"/>
-    <profile namespace="globus" key="jobsPerNode">8</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">3600</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/scratch/{env.USER}/swiftwork</workdirectory> -->    
-  </pool>
-</config>

Added: trunk/tests/stress/remote_sanity/mac-frisbee/swift.conf
===================================================================
--- trunk/tests/stress/remote_sanity/mac-frisbee/swift.conf	                        (rev 0)
+++ trunk/tests/stress/remote_sanity/mac-frisbee/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,32 @@
+include "${swift.home}/etc/swift.conf"
+
+site.frisbee {
+	execution {
+		type: "coaster"
+		URL: "frisbee.mcs.anl.gov"
+		jobManager: "ssh-cl:local"
+		options {
+			lowOverallocation: 100
+			highOverallocation: 100
+			tasksPerNode: 8
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/scratch/"${env.USER}"/swiftwork"
+	maxParallelTasks: 8
+	initialParallelTasks: 8
+	app.date {
+		executable: "/bin/date"
+		maxWallTime: "00:05:00"
+	}
+
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/stress/remote_sanity/mac-frisbee/swift.properties
===================================================================
--- trunk/tests/stress/remote_sanity/mac-frisbee/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/remote_sanity/mac-frisbee/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
\ No newline at end of file

Deleted: trunk/tests/stress/remote_sanity/mac-frisbee/tc.template.data
===================================================================
--- trunk/tests/stress/remote_sanity/mac-frisbee/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/remote_sanity/mac-frisbee/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,6 +0,0 @@
-local  date /bin/date null null null
-beagle date /bin/date null null null
-uc3    date /bin/date null null null
-mcs    date /bin/date null null null
-midway date /bin/date null null null
-frisbee date /bin/date null null null
\ No newline at end of file

Deleted: trunk/tests/stress/remote_sanity/mcs/sites.template.xml
===================================================================
--- trunk/tests/stress/remote_sanity/mcs/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/remote_sanity/mcs/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,13 +0,0 @@
-<config>
-  <pool handle="crush">
-    <execution provider="coaster" jobmanager="ssh-cl:local" url="crush.mcs.anl.gov"/>
-    <profile namespace="globus" key="jobsPerNode">8</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="maxtime">3600</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="karajan" key="jobThrottle">0.0799</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/sandbox/{env.USER}/swiftwork</workdirectory>
-  </pool>
-</config>

Added: trunk/tests/stress/remote_sanity/mcs/swift.conf
===================================================================
--- trunk/tests/stress/remote_sanity/mcs/swift.conf	                        (rev 0)
+++ trunk/tests/stress/remote_sanity/mcs/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,32 @@
+include "${swift.home}/etc/swift.conf"
+
+site.crush {
+	execution {
+		type: "coaster"
+		URL: "crush.mcs.anl.gov"
+		jobManager: "ssh-cl:local"
+		options {
+			lowOverallocation: 100
+			highOverallocation: 100
+			tasksPerNode: 8
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/sandbox/"${env.USER}"/swiftwork"
+	maxParallelTasks: 8
+	initialParallelTasks: 8
+	app.date {
+		executable: "/bin/date"
+		maxWallTime: "00:05:00"
+	}
+
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/stress/remote_sanity/mcs/swift.properties
===================================================================
--- trunk/tests/stress/remote_sanity/mcs/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/remote_sanity/mcs/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
\ No newline at end of file

Deleted: trunk/tests/stress/remote_sanity/mcs/tc.template.data
===================================================================
--- trunk/tests/stress/remote_sanity/mcs/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/remote_sanity/mcs/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,5 +0,0 @@
-local  date /bin/date null null null
-beagle date /bin/date null null null
-uc3    date /bin/date null null null
-crush  date /bin/date null null null
-midway date /bin/date null null null

Deleted: trunk/tests/stress/remote_sanity/midway/sites.template.xml
===================================================================
--- trunk/tests/stress/remote_sanity/midway/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/remote_sanity/midway/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,17 +0,0 @@
-<config>
-  <pool handle="midway">
-    <execution provider="coaster" jobmanager="local:slurm"/>
-    <profile namespace="globus" key="queue">sandyb</profile>
-    <profile namespace="globus" key="jobsPerNode">16</profile>
-    <profile namespace="globus" key="maxWalltime">00:05:00</profile>
-    <profile namespace="globus" key="maxTime">3600</profile>
-    <profile namespace="globus" key="highOverAllocation">100</profile>
-    <profile namespace="globus" key="lowOverAllocation">100</profile>
-    <profile namespace="globus" key="slots">4</profile>
-    <profile namespace="globus" key="maxNodes">1</profile>
-    <profile namespace="globus" key="nodeGranularity">1</profile>
-    <profile namespace="karajan" key="jobThrottle">.64</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <workdirectory>/tmp/{env.USER}</workdirectory>
-  </pool>
-</config>

Added: trunk/tests/stress/remote_sanity/midway/swift.conf
===================================================================
--- trunk/tests/stress/remote_sanity/midway/swift.conf	                        (rev 0)
+++ trunk/tests/stress/remote_sanity/midway/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,36 @@
+include "${swift.home}/etc/swift.conf"
+
+site.midway {
+	execution {
+		type: "coaster"
+		URL: "localhost"
+		jobManager: "local:slurm"
+		options {
+			nodeGranularity: 1
+			lowOverallocation: 100
+			maxNodesPerJob: 1
+			jobQueue: "sandyb"
+			maxJobs: 4
+			highOverallocation: 100
+			tasksPerNode: 16
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "/tmp/"${env.USER}
+	maxParallelTasks: 65
+	initialParallelTasks: 64
+	app.date {
+		executable: "/bin/date"
+		maxWallTime: "00:05:00"
+	}
+
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/stress/remote_sanity/midway/swift.properties
===================================================================
--- trunk/tests/stress/remote_sanity/midway/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/remote_sanity/midway/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
\ No newline at end of file

Deleted: trunk/tests/stress/remote_sanity/midway/tc.template.data
===================================================================
--- trunk/tests/stress/remote_sanity/midway/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/remote_sanity/midway/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,5 +0,0 @@
-local  date /bin/date null null null
-beagle date /bin/date null null null
-uc3    date /bin/date null null null
-mcs    date /bin/date null null null
-midway date /bin/date null null null

Deleted: trunk/tests/stress/remote_sanity/uc3/sites.template.xml
===================================================================
--- trunk/tests/stress/remote_sanity/uc3/sites.template.xml	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/remote_sanity/uc3/sites.template.xml	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,17 +0,0 @@
-<config>
-  <pool handle="uc3">
-    <execution provider="coaster" url="uc3-sub.uchicago.edu" jobmanager="ssh-cl:condor"/>
-    <profile namespace="karajan" key="jobThrottle">10.00</profile>
-    <profile namespace="karajan" key="initialScore">10000</profile>
-    <profile namespace="globus"  key="jobsPerNode">1</profile>
-    <profile namespace="globus"  key="maxtime">3600</profile>
-    <profile namespace="globus"  key="maxWalltime">00:30:00</profile>
-    <profile namespace="globus"  key="highOverAllocation">100</profile>
-    <profile namespace="globus"  key="lowOverAllocation">100</profile>
-    <profile namespace="globus"  key="slots">1000</profile>
-    <profile namespace="globus"  key="maxNodes">1</profile>
-    <profile namespace="globus"  key="nodeGranularity">1</profile>
-    <profile namespace="globus"  key="jobType">nonshared</profile>
-    <workdirectory>.</workdirectory>
-  </pool>
-</config>
\ No newline at end of file

Added: trunk/tests/stress/remote_sanity/uc3/swift.conf
===================================================================
--- trunk/tests/stress/remote_sanity/uc3/swift.conf	                        (rev 0)
+++ trunk/tests/stress/remote_sanity/uc3/swift.conf	2014-07-12 08:50:14 UTC (rev 8006)
@@ -0,0 +1,36 @@
+include "${swift.home}/etc/swift.conf"
+
+site.uc3 {
+	execution {
+		type: "coaster"
+		URL: "uc3-sub.uchicago.edu"
+		jobManager: "ssh-cl:condor"
+		options {
+			# Option ignored: globus:jobtype = nonshared
+			nodeGranularity: 1
+			lowOverallocation: 100
+			maxNodesPerJob: 1
+			maxJobs: 1000
+			highOverallocation: 100
+			tasksPerNode: 1
+			jobMaxTime: "01:00:00"
+		}
+	}
+	staging: "local"
+	workDirectory: "."
+	maxParallelTasks: 1001
+	initialParallelTasks: 999
+	app.date {
+		executable: "/bin/date"
+		maxWallTime: "00:30:00"
+	}
+
+}
+
+TCPPortRange: "50000,51000"
+lazyErrors: false
+executionRetries: 0
+keepSiteDir: true
+providerStagingPinSwiftFiles: false
+alwaysTransferWrapperLog: true
+staging: "local"

Deleted: trunk/tests/stress/remote_sanity/uc3/swift.properties
===================================================================
--- trunk/tests/stress/remote_sanity/uc3/swift.properties	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/remote_sanity/uc3/swift.properties	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,8 +0,0 @@
-use.provider.staging=true
-use.wrapper.staging=false
-wrapperlog.always.transfer=true
-execution.retries=0
-lazy.errors=false
-provider.staging.pin.swiftfiles=false
-sitedir.keep=true
-tcp.port.range=50000,51000
\ No newline at end of file

Deleted: trunk/tests/stress/remote_sanity/uc3/tc.template.data
===================================================================
--- trunk/tests/stress/remote_sanity/uc3/tc.template.data	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/stress/remote_sanity/uc3/tc.template.data	2014-07-12 08:50:14 UTC (rev 8006)
@@ -1,5 +0,0 @@
-local  date /bin/date null null null
-beagle date /bin/date null null null
-uc3    date /bin/date null null null
-mcs    date /bin/date null null null
-midway date /bin/date null null null

Modified: trunk/tests/suite.sh
===================================================================
--- trunk/tests/suite.sh	2014-07-12 08:39:53 UTC (rev 8005)
+++ trunk/tests/suite.sh	2014-07-12 08:50:14 UTC (rev 8006)
@@ -986,8 +986,8 @@
 
 # Generate swift.properties
 group_swift_properties() {
-  if [ -f $GROUP/swift.properties ]; then
-    cp -v $GROUP/swift.properties .
+  if [ -f $GROUP/swift.conf ]; then
+    cp -v $GROUP/swift.conf .
     [ $? != 0 ] && crash "Could not copy swift.properties!"
   fi
 }




More information about the Swift-commit mailing list