sites.file=${swift.home}/etc/sites.xml tc.file=${swift.home}/etc/tc.data # # The IP address of the submit machine is used by GRAM as a callback # address to report the status of submitted jobs. In general, Swift # can automatically detect the IP address of the local machine. # However, if the machine has more than one network interface, Swift # will pick the first one, which may not be the right choice. It is # recommended that this property is set properly before attempting to # run jobs through GRAM. # Format: # ip.address=x.y.z.w # #ip.address=127.0.0.1 # # false - means an error will be immediately reported and cause the # workflow to abort. At this time remote jobs that are already # running will not be canceled # true - means that Swift will try to do as much work as possible and # report all errors encountered at the end. However, "errors" # here only applies to job execution errors. Certain errors # that are related to the Swift implementation (should such # errors occur) will still be reported eagerly. # # Default: true # lazy.errors=true # # What algorithm to use for caching of remote files. LRU (as in what # files to purge) is the only implementation right now. One can set # a target size (in bytes) for a host by using the swift:storagesize # profile for a host in sites.xml # # Default: LRU # caching.algorithm=LRU # # true - generate a provenance graph in .dot format (Swift will # choose a random file name) # false - do not generate a provenance graph # - generate a provenange graph in the give file name # # Default: false # pgraph=false # # graph properties for the provenance graph (.dot specific) # # Default: splines="compound", rankdir="TB" # pgraph.graph.options=splines="compound", rankdir="TB" # # node properties for the provenance graph (.dot specific) # # Default: color="seagreen", style="filled" # pgraph.node.options=color="seagreen", style="filled" # # true - clustering of small jobs is enabled. Clustering works in the # following way: If a job is clusterable (meaning that it has the # GLOBUS::maxwalltime profile specified in tc.data and its value # is less than the value of the "min.cluster.time" property) it will # be put in a clustering queue. The queue is processed at intervals # specified by the "clustering.queue.delay" property. The processing # of the clustering queue consists of selecting compatible jobs and # grouping them in clusters whose max wall time does not exceed twice # the value of the "clustering.min.time" property. Two or more jobs are # considered compatible if they share the same site and do not have # conflicting profiles (e.g. different values for the same environment # variable). # false - clustering of small jobs is disabled. # # Default: true # clustering.enabled=true # # - the intervals at which the clustering queue is processed # # Default: 4 # clustering.queue.delay=4 # # - the threshold time for clustering # # Default: 60 # clustering.min.time=60 # # Kickstart is a useful tool that can be used to gather various information # about a remote process. Before it can be used it must be installed on the # remote site and the corresponding entry be set in the sites file. # This option allows controlling of how Swift uses Kickstart. The following # values are possible: # false - do not use Kickstart # true - use Kickstart. If a job is scheduled on a site that does not have # Kickstart installed, that job will fail. # maybe - Use Kickstart if installed (i.e. the entry is present in the sites # file) # # Default: maybe # kickstart.enabled=false # # Indicates when Kickstart records should be fetched from the remote site: # true - always transfer Kickstart records if Kickstart was used (see # kickstart.enabled) # false - only transfer Kickstart records if the job fails # # Default: false # kickstart.always.transfer=false ########################################################################### # Throttling options # ########################################################################### # # Limits the number of concurrent submissions for a workflow instance. This # throttle only limits the number of concurrent tasks (jobs) that are being # sent to sites, not the total number of concurrent jobs that can be run. # The submission stage in GRAM is one of the most CPU expensive stages (due # mostly to the mutual authentication and delegation). Having too many # concurrent submissions can overload either or both the submit host CPU # and the remote host/head node causing degraded performance. # # Default: 4 # throttle.submit=4 # # Limits the number of concurrent submissions for any of the sites Swift will # try to send jobs to. In other words it guarantees that no more than the # value of this throttle jobs sent to any site will be concurrently in a state # of being submitted. # # Default: 2 # throttle.host.submit=2 # # Limits the total number of concurrent file transfers that can happen at any # given time. File transfers consume bandwidth. Too many concurrent transfers # can cause the network to be overloaded preventing various other signalling # traffic from flowing properly. # # Default: 4 # throttle.transfers=4 # Limits the total number of concurrent file operations that can happen at any # given time. File operations (like transfers) require an exclusive connection # to a site. These connections can be expensive to establish. A large number # of concurrent file operations may cause Swift to attempt to establish many # such expensive connections to various sites. Limiting the number of concurrent # file operations causes Swift to use a small number of cached connections and # achieve better overall performance. # # Default: 8 # throttle.file.operations=8