sites: [stampede] # Instructions for Stampede # 1. If you are running on the midway login nodes set jobManager: "local:slurm" # 2. Set workDirectory to /tmp/your_username_on_stampede site.stampede { execution { type : "coaster" # Use coasters to run on remote sites URL : "login4.stampede.tacc.utexas.edu" # Stampede login nodes login[1..4].stampede.tacc.utexas.edu jobManager: "ssh-cl:slurm" # Use ssh-cl to connect, slurm is the Local resource manager options { maxJobs : 1 # Max jobs submitted to LRM nodeGranularity : 1 # Nodes per job maxNodesPerJob : 1 # Nodes per job tasksPerNode : 4 # Tasks per Node jobQueue : "development" # Select queue from (development, normal, large) maxJobTime : "00:25:00" # Time requested per job } } staging : "local" # Stage files from "local" system to Midway workDirectory : "/tmp/"${env.USER}"/swiftwork" # Location for intermediate files maxParallelTasks : 101 # Maximum number of parallel tasks initialParallelTasks: 100 # Maximum number of tasks at start app.ALL { executable: "*" } # All tasks to be found from commandline } # Instructions for Blacklight # 1. If you are running on the blacklight login nodes, set jobManager: "local:pbs" # 2. If you are running Set userHomeOverride : "/lustre/blacklight2/YOUR_USERNAME_ON_BLACKLIGHT/swiftwork" # 4. Set workDirectory : "/tmp/YOUR_USERNAME_ON_BLACKLIGHT/swiftwork" site.blacklight { execution { type : "coaster" # Use coasters to run on remote sites URL : "blacklight.psc.xsede.org" # Blacklight login URL jobManager: "ssh-cl:pbs" # use ssh-cl to connect, pbs is the Local Resource manager(LRM) options { maxJobs : 1 # Max jobs submitted to LRM nodeGranularity : 1 # Nodes per job maxNodesPerJob : 1 # Nodes per job tasksPerNode : 4 # Tasks per Node maxJobTime : "00:25:00" # Time requested per job jobQueue : debug jobOptions { ppn : "16" # Virtual processors per node per Job } } } staging : "local" # Stage files from "local" system to Blacklight workDirectory : "/tmp/"${env.USER}"/swiftwork" # Location for intermediate files maxParallelTasks : 101 # Maximum number of parallel tasks initialParallelTasks: 100 # Maximum number of tasks at start app.ALL { executable: "*" } # All tasks to be found from commandline } # Instructions for Gordon # 1. Do *NOT* run on the Gordon login nodes. There are memory limits which prevent swift from running # properly on these machines. site.gordon { execution { type : "coaster" # Use coasters to run on remote sites URL : "gordon.sdsc.edu" # Gordon login URL jobManager: "ssh-cl:pbs" # use ssh-cl to connect, pbs is the Local Resource manager(LRM) options { maxJobs : 1 # Max jobs submitted to LRM nodeGranularity : 1 # Nodes per job maxNodesPerJob : 1 # Nodes per job tasksPerNode : 4 # Tasks per Node maxJobTime : "00:25:00" # Time requested per job jobOptions { ppn : "16" # Virtual processors per node per Job } } } staging : "local" # Stage files from "local" system to Gordon workDirectory : "/tmp/"${env.USER}"/swiftwork" # Location for intermediate files maxParallelTasks : 101 # Maximum number of parallel tasks initialParallelTasks: 100 # Maximum number of tasks at start app.ALL { executable: "*" } # All tasks to be found from commandline } # Instructions for Trestles # 1. Do *NOT* run on the Trestles login nodes. There are memory limits which prevent swift from running # properly on these machines. site.trestles { execution { type : "coaster" # Use coasters to run on remote sites URL : "trestles.sdsc.edu" # Trestles login URL jobManager: "ssh-cl:pbs" # use ssh-cl to connect, pbs is the Local Resource manager(LRM) options { maxJobs : 1 # Max jobs submitted to LRM nodeGranularity : 1 # Nodes per job maxNodesPerJob : 1 # Nodes per job tasksPerNode : 4 # Tasks per Node maxJobTime : "00:25:00" # Time requested per job jobOptions { ppn : "16" # Virtual processors per node per Job } } } staging : "local" # Stage files from "local" system to Trestles workDirectory : "/tmp/"${env.USER}"/swiftwork" # Location for intermediate files maxParallelTasks : 101 # Maximum number of parallel tasks initialParallelTasks: 100 # Maximum number of tasks at start app.ALL { executable: "*" } # All tasks to be found from commandline } site.stampede-mpi { execution { type : "coaster" # Use coasters to run on remote sites URL : "login4.stampede.tacc.utexas.edu" # Stampede login nodes login[1..4].stampede.tacc.utexas.edu jobManager: "local:slurm" # Use ssh-cl to connect, slurm is the Local resource manager options { maxJobs : 2 # Max jobs submitted to LRM nodeGranularity : 1 # Nodes per job maxNodesPerJob : 1 # Nodes per job tasksPerNode : 4 # Tasks per Node jobQueue : "normal" # Select queue from (development, normal, large) maxJobTime : "00:25:00" # Time requested per job } } staging : "direct" # Stage files from "local" system to Midway workDirectory : "/tmp/"${env.USER}"/swiftwork" # Location for intermediate files maxParallelTasks : 101 # Maximum number of parallel tasks initialParallelTasks: 100 # Maximum number of tasks at start app.ALL { executable: "*" } # All tasks to be found from commandline } TCPPortRange: "50000,51000" # TCP port range used by swift to communicate with remote sites lazyErrors: false # Swift fails immediately upon encountering an error executionRetries: 0 # Set number of retries upon task failures keepSiteDir: true # Keep Site Dir (useful for debug) providerStagingPinSwiftFiles: false # Pin staging files (useful for debug) alwaysTransferWrapperLog: true # Transfer wrapper logs (useful for debug)