-
Notifications
You must be signed in to change notification settings - Fork 25
/
job_conf.xml.j2
472 lines (460 loc) · 31.3 KB
/
job_conf.xml.j2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
<?xml version="1.0"?>
<!--
This file is maintained by Ansible - CHANGES WILL BE OVERWRITTEN
-->
<job_conf>
<plugins workers="8">
<plugin id="dynamic" type="runner">
<!-- These live in the virtualenv -->
<param id="rules_module">usegalaxy.jobs.rules</param>
</plugin>
<plugin id="local" type="runner" load="galaxy.jobs.runners.local:LocalJobRunner"/>
<plugin id="slurm" type="runner" load="galaxy.jobs.runners.slurm:SlurmJobRunner">
<param id="drmaa_library_path">/usr/lib64/libdrmaa.so</param>
<param id="invalidjobexception_retries">5</param>
<param id="internalexception_retries">5</param>
</plugin>
<expand macro="pulsar_plugin" id="jetstream_iu" />
<expand macro="pulsar_plugin" id="jetstream_iu_nagios" />
<expand macro="pulsar_plugin" id="jetstream_tacc" />
<expand macro="pulsar_plugin" id="jetstream_tacc_nagios" />
<expand macro="pulsar_plugin" id="stampede" />
<expand macro="pulsar_plugin" id="stampede_nagios" />
<expand macro="pulsar_plugin" id="bridges" />
<expand macro="pulsar_plugin" id="bridges_nagios" />
</plugins>
<handlers default="handlers" assign_with="db-skip-locked" max_grab="8">
<expand macro="normal_handler" hostid="3" id="0" />
<expand macro="normal_handler" hostid="3" id="1" />
<expand macro="multi_handler" hostid="3" id="2" />
<expand macro="normal_handler" hostid="4" id="0" />
<expand macro="normal_handler" hostid="4" id="1" />
<expand macro="multi_handler" hostid="4" id="2" />
</handlers>
<destinations default="dynamic_normal_reserved">
<!-- dynamic function for setting roundup multi walltime, also handles the explicit stampede resource selector -->
<expand macro="dynamic_destination" dest="local_stampede_select_dynamic_walltime" />
<!-- dynamic function for the multi-or-bridges selector -->
<expand macro="dynamic_destination" dest="multi_bridges_select" />
<!-- dynamic method for the stampede-only selector -->
<expand macro="dynamic_destination" dest="stampede_select" />
<!-- dynamic method for the bridges-only selector -->
<expand macro="dynamic_destination" dest="bridges_select" />
<!-- dynamic memory rule for the naive variant caller -->
<expand macro="dynamic_destination" dest="nvc_dynamic_memory" />
<!-- dynamic local/reserved destinations-->
<expand macro="slurm_reserved_destination" dest="normal" partition="normal" native_specification="--cpus-per-task=1 --time=36:00:00" />
<expand macro="slurm_reserved_destination" dest="normal_16gb" partition="normal" native_specification="--cpus-per-task=1 --time=36:00:00 --mem-per-cpu=15360" java_mem="15g" />
<expand macro="slurm_reserved_destination" dest="normal_32gb" partition="normal" native_specification="--cpus-per-task=1 --time=36:00:00 --mem-per-cpu=30720" java_mem="30g" />
<expand macro="slurm_reserved_destination" dest="normal_64gb" partition="normal" native_specification="--cpus-per-task=1 --time=36:00:00 --mem-per-cpu=61440" java_mem="60g" />
<expand macro="slurm_reserved_destination" dest="multi" partition="multi" native_specification="--cpus-per-task=6 --time=36:00:00" java_mem="30g" />
<destination id="slurm_multi_mpi" runner="slurm">
<param id="nativeSpecification">--clusters=roundup --partition=multi --nodes=1 --cpus-per-task=1 --ntasks=6 --time=36:00:00</param>
<!-- <expand macro="dest_local_env" java_mem="30g" /> -->
</destination>
<!-- local destinations -->
<expand macro="slurm_destination" dest="multi_development" partition="normal" native_specification="--cpus-per-task=2 --time=00:30:00 --mem-per-cpu=5120" java_mem="10g" />
<!-- temporary concurrency limit increase for du novo: align families -->
<expand macro="slurm_destination" dest="multi_align_families" partition="multi" native_specification="--cpus-per-task=6 --time=96:00:00" />
<!-- non-conda python 2 legacy tools -->
<destination id="slurm_normal_legacy" runner="slurm">
<param id="nativeSpecification">--partition=normal --nodes=1 --cpus-per-task=1 --time=36:00:00</param>
<expand macro="dest_local_env" java_mem="7g"/>
<param id="use_metadata_binary">true</param>
<env file="/cvmfs/main.galaxyproject.org/venv/bin/activate"/>
<env id="GALAXY_VIRTUAL_ENV">None</env>
<env id="PATH">/cvmfs/main.galaxyproject.org/deps/_py2/bin:$PATH</env>
</destination>
<destination id="slurm_multi_legacy" runner="slurm">
<param id="nativeSpecification">--partition=multi --nodes=1 --cpus-per-task=4 --time=36:00:00</param>
<expand macro="dest_local_env" java_mem="15g"/>
<param id="use_metadata_binary">true</param>
<env file="/cvmfs/main.galaxyproject.org/venv/bin/activate"/>
<env id="GALAXY_VIRTUAL_ENV">None</env>
<env id="PATH">/cvmfs/main.galaxyproject.org/deps/_py2/bin:$PATH</env>
</destination>
<!-- dynamic modified destinations: do not use directly -->
<expand macro="slurm_destination" dest="multi_dynamic_walltime" partition="multi" native_specification="--cpus-per-task=6" />
<!-- <resubmit condition="walltime_reached" destination="stampede_normal" handler="multi_handlers"/> -->
<expand macro="slurm_destination" dest="normal_dynamic_mem" partition="normal" native_specification="--cpus-per-task=1 --time=24:00:00" />
<!-- jetstream destinations -->
<destination id="jetstream_multi" runner="dynamic">
<!-- this destination exists for the dynamic runner to read the native spec for testing -->
<param id="nativeSpecification">--time=36:00:00 --nodes=1 --partition=multi</param>
</destination>
<expand macro="jetstream_destination" id="small" site="iu" native_specification="--partition=small --time=36:00:00 --mem=3584" />
<expand macro="jetstream_destination" id="normal" site="iu" native_specification="--partition=normal --time=36:00:00 --mem=7168" />
<expand macro="jetstream_destination" id="multi" site="iu" native_specification="--partition=multi --time=36:00:00 --mem=28672" />
<expand macro="jetstream_nagios_destination" site="iu" />
<expand macro="jetstream_destination" id="multi" site="tacc" native_specification="--partition=multi --nodes=1 --time=36:00:00" />
<expand macro="jetstream_nagios_destination" site="tacc" />
<!-- test -->
<expand macro="jetstream_destination" id="reserved" site="iu" native_specification="--partition=reserved --nodes=1 --time=36:00:00" />
<!-- stampede destinations -->
<!-- memory_mb vals are node spec mem minus 4 GB -->
<expand macro="stampede_destination" id="normal" native_specification="--partition=normal --nodes=1 --ntasks=64 --time=48:00:00 --account=TG-MCB140147" memory_mb="94208" />
<expand macro="stampede_destination" id="development" native_specification="--partition=development --nodes=1 --ntasks=64 --time=00:30:00 --account=TG-MCB140147" memory_mb="94208" />
<expand macro="stampede_destination" id="skx_normal" native_specification="--partition=skx-normal --nodes=1 --ntasks=48 --time=48:00:00 --account=TG-MCB140147" memory_mb="192512" />
<expand macro="stampede_destination" id="skx_development" native_specification="--partition=skx-dev --nodes=1 --ntasks=48 --time=00:30:00 --account=TG-MCB140147" memory_mb="192512" />
<expand macro="stampede_nagios_destination" />
<!-- MPI destinations -->
<expand macro="dynamic_destination" dest="mpi" />
<!-- ntasks and time are set by the dynamic rule -->
<expand macro="slurm_destination" dest="mpi_multi" partition="multi" native_specification="" java_mem="4g" />
<expand macro="stampede_mpi_destination" id="normal" native_specification="--account=TG-MCB140147 --partition=normal --nodes=1" />
<expand macro="stampede_mpi_destination" id="skx_normal" native_specification="--account=TG-MCB140147 --partition=skx-normal --nodes=1" />
<expand macro="slurm_destination" dest="mpi_development" partition="normal" native_specification="" java_mem="4g" />
<expand macro="stampede_mpi_destination" id="development" native_specification="--account=TG-MCB140147 --partition=development --nodes=1" />
<expand macro="stampede_mpi_destination" id="skx_development" native_specification="--account=TG-MCB140147 --partition=skx-dev --nodes=1" />
<!-- bridges destinations -->
<!-- walltime and mem are set dynamically -->
<!-- use constraint to avoid running on xl nodes, which do not mount /pylon5 -->
<expand macro="bridges_destination" id="normal" native_specification="-p LM --constraint=LM&EGRESS" />
<!-- 147456 MB == 144 GB (3 cores) (128GB is the minimum for LM) -->
<expand macro="bridges_destination" id="development" native_specification="-p LM --constraint=LM&EGRESS -t 00:30:00 --mem=147456" />
<expand macro="bridges_nagios_destination" />
<!-- local destination for nagios handler checks -->
<destination id="local" runner="local"/>
</destinations>
<tools>
<!-- non-conda python 2 legacy tools -->
{% for tool_id in galaxy_python2_legacy_tools %}
{% set short_tool_id = tool_id.split('/')[-2] %}
{% if short_tool_id in galaxy_multicore_tools %}
<tool id="{{ tool_id }}" destination="slurm_multi_legacy" />
{% else %}
<tool id="{{ tool_id }}" destination="slurm_normal_legacy" />
{% endif %}
{% endfor %}
<!-- multi/stampede jobs -->
{% for tool_id in galaxy_multicore_tools | difference(galaxy_multicore_tools_exclude) %}
<expand macro="stampede_resubmit_tool" id="{{ tool_id }}" />
{% endfor %}
<!-- mpi/stampede tools -->
<tool id="hyphy_absrel" destination="dynamic_mpi" handler="multi" resources="mpi" />
<tool id="hyphy_fel" destination="dynamic_mpi" handler="multi" resources="mpi" />
<tool id="hyphy_gard" destination="dynamic_mpi" handler="multi" resources="mpi" />
<tool id="hyphy_meme" destination="dynamic_mpi" handler="multi" resources="mpi" />
<!-- explicit stampede jobs -->
<expand macro="stampede_tool" id="ncbi_blastn_wrapper" />
<expand macro="stampede_tool" id="ncbi_blastp_wrapper" />
<expand macro="stampede_tool" id="ncbi_blastx_wrapper" />
<expand macro="stampede_tool" id="ncbi_rpsblast_wrapper" />
<expand macro="stampede_tool" id="ncbi_tblastn_wrapper" />
<expand macro="stampede_tool" id="ncbi_tblastx_wrapper" />
<expand macro="stampede_tool" id="megablast_wrapper" />
<!-- new LASTZ wrapper is not multicore, but it may need more memory
<expand macro="stampede_tool" id="lastz_wrapper_2" /> -->
<expand macro="stampede_tool" id="bwa_color_wrapper" />
<expand macro="stampede_tool" id="bowtie_color_wrapper" />
<!-- bridges jobs -->
<expand macro="bridges_tool" id="trinity_psc" />
<expand macro="bridges_tool" id="trinity" />
<expand macro="bridges_tool" id="spades" />
<expand macro="bridges_tool" id="unicycler" />
<expand macro="bridges_tool" id="star_fusion" />
<expand macro="bridges_tool" id="abyss-pe" />
<!-- trackster jobs -->
<tool id="cufflinks" destination="slurm_trackster_multi">
<param id="source">trackster</param>
</tool>
<!-- dynamic memory tools -->
<tool id="naive_variant_caller" destination="dynamic_nvc_dynamic_memory"/>
<!-- local/jetstream tools -->
<!--
<tool id="align_families" destination="dynamic_local_stampede_select_dynamic_walltime" handler="multi_handlers" resources="local_or_stampede"/>
-->
<tool id="align_families" destination="slurm_multi_align_families" handler="multi_handlers"/>
<!-- roundup multi/jetstream/bridges jobs -->
<tool id="rna_star" destination="dynamic_multi_bridges_select" handler="multi_handlers" resources="multi_or_bridges"/>
<!-- 16GB tools -->
<tool id="join1" destination="dynamic_normal_16gb_reserved"/>
<tool id="gops_join_1" destination="dynamic_normal_16gb_reserved"/>
<tool id="gatk_indel_realigner" destination="dynamic_normal_16gb_reserved"/>
<tool id="gatk_depth_of_coverage" destination="dynamic_normal_16gb_reserved"/>
<tool id="gatk_table_recalibration" destination="dynamic_normal_16gb_reserved"/>
<tool id="fastq_paired_end_joiner" destination="dynamic_normal_16gb_reserved"/>
<tool id="bamtools" destination="dynamic_normal_16gb_reserved"/>
<tool id="varscan" destination="dynamic_normal_16gb_reserved"/>
<tool id="scatterplot_rpy" destination="dynamic_normal_16gb_reserved"/>
<tool id="htseq_count" destination="dynamic_normal_16gb_reserved"/>
<tool id="flanking_features_1" destination="dynamic_normal_16gb_reserved"/>
<tool id="cummeRbund" destination="dynamic_normal_16gb_reserved"/>
<tool id="collection_column_join" destination="dynamic_normal_16gb_reserved"/>
<tool id="rseqc_read_duplication" destination="dynamic_normal_16gb_reserved"/>
<tool id="rseqc_RPKM_saturation" destination="dynamic_normal_16gb_reserved"/>
<tool id="rseqc_bam2wig" destination="dynamic_normal_16gb_reserved"/>
<tool id="seqtk_sample" destination="dynamic_normal_16gb_reserved"/>
<tool id="ggplot2_heatmap2" destination="dynamic_normal_16gb_reserved"/>
<!-- 32GB tools -->
<tool id="Interval2Maf1" destination="dynamic_normal_32gb_reserved"/>
<!-- 64GB tools -->
<tool id="wig_to_bigWig" destination="dynamic_normal_64gb_reserved"/>
<tool id="CONVERTER_bedgraph_to_bigwig" destination="dynamic_normal_64gb_reserved"/>
<!-- jetstream autoscale test -->
<tool id="toolshed.g2.bx.psu.edu/repos/bgruening/text_processing/tp_sed_tool/1.1.0" destination="jetstream_iu_small" handler="multi_handlers" />
<tool id="toolshed.g2.bx.psu.edu/repos/bgruening/text_processing/tp_awk_tool/1.1.0" destination="jetstream_iu_small" handler="multi_handlers" />
<!-- nagios checks -->
<!-- run wherever it lands
<tool id="echo_main_cluster"/>
-->
<tool id="echo_main_w3_handler0" destination="local" handler="main_w3_handler0"/>
<tool id="echo_main_w3_handler1" destination="local" handler="main_w3_handler1"/>
<tool id="echo_main_w3_handler2" destination="local" handler="main_w3_handler2"/>
<tool id="echo_main_w4_handler0" destination="local" handler="main_w4_handler0"/>
<tool id="echo_main_w4_handler1" destination="local" handler="main_w4_handler1"/>
<tool id="echo_main_w4_handler2" destination="local" handler="main_w4_handler2"/>
<tool id="echo_main_jetstream_iu" destination="jetstream_iu_nagios" handler="multi_handlers"/>
<tool id="echo_main_jetstream_tacc" destination="jetstream_tacc_nagios" handler="multi_handlers"/>
<tool id="echo_main_stampede" destination="stampede_nagios" handler="multi_handlers"/>
<tool id="echo_main_bridges" destination="bridges_nagios" handler="multi_handlers"/>
</tools>
<resources default="default">
<group id="default"></group>
<group id="local_or_stampede">tacc_compute_resource</group>
<group id="multi_or_bridges">multi_bridges_compute_resource</group>
<group id="stampede">stampede_compute_resource</group>
<group id="bridges">bridges_compute_resource</group>
<group id="mpi">tacc_compute_resource_advanced,cores,time</group>
</resources>
<limits>
<!--
<limit type="registered_user_concurrent_jobs">6</limit>
-->
<limit type="anonymous_user_concurrent_jobs">1</limit>
<limit type="job_walltime">49:00:00</limit>
<limit type="output_size">200G</limit>
<!-- per-destination per-user limits -->
<limit type="destination_user_concurrent_jobs" id="slurm_normal">4</limit>
<limit type="destination_user_concurrent_jobs" id="slurm_normal_legacy">1</limit>
<limit type="destination_user_concurrent_jobs" id="slurm_normal_16gb">1</limit>
<limit type="destination_user_concurrent_jobs" id="slurm_normal_64gb">1</limit>
<limit type="destination_user_concurrent_jobs" id="slurm_normal_dynamic_mem">1</limit>
<limit type="destination_user_concurrent_jobs" id="slurm_multi">2</limit>
<limit type="destination_user_concurrent_jobs" id="slurm_multi_legacy">1</limit>
<limit type="destination_user_concurrent_jobs" id="slurm_multi_dynamic_walltime">2</limit>
<limit type="destination_user_concurrent_jobs" id="slurm_multi_development">1</limit>
<limit type="destination_user_concurrent_jobs" id="slurm_multi_trackster">1</limit>
<limit type="destination_user_concurrent_jobs" id="slurm_multi_align_families">4</limit>
<limit type="destination_user_concurrent_jobs" id="reserved_normal">16</limit>
<limit type="destination_user_concurrent_jobs" id="reserved_normal_16gb">8</limit>
<limit type="destination_user_concurrent_jobs" id="reserved_normal_32gb">4</limit>
<limit type="destination_user_concurrent_jobs" id="reserved_normal_64gb">2</limit>
<limit type="destination_user_concurrent_jobs" id="reserved_multi">8</limit>
<limit type="destination_user_concurrent_jobs" id="stampede_normal">4</limit>
<limit type="destination_user_concurrent_jobs" id="stampede_skx_normal">4</limit>
<limit type="destination_user_concurrent_jobs" id="stampede_development">1</limit>
<limit type="destination_user_concurrent_jobs" id="stampede_skx_development">1</limit>
<!--
<limit type="destination_user_concurrent_jobs" id="bridges_normal">1</limit>
<limit type="destination_user_concurrent_jobs" id="bridges_development">1</limit>
-->
<limit type="destination_user_concurrent_jobs" id="jetstream">4</limit>
<!-- per-destination total limits -->
<limit type="destination_total_concurrent_jobs" id="stampede_normal">48</limit>
<limit type="destination_total_concurrent_jobs" id="stampede_skx_normal">23</limit>
<limit type="destination_total_concurrent_jobs" id="stampede_mpi_normal">4</limit>
<limit type="destination_total_concurrent_jobs" id="stampede_mpi_skx_normal">4</limit>
<limit type="destination_total_concurrent_jobs" id="stampede_development">1</limit>
<limit type="destination_total_concurrent_jobs" id="stampede_skx_development">1</limit>
<limit type="destination_total_concurrent_jobs" id="stampede_mpi_development">1</limit>
<limit type="destination_total_concurrent_jobs" id="stampede_mpi_skx_development">1</limit>
<!--
<limit type="destination_total_concurrent_jobs" id="bridges_normal">10</limit>
<limit type="destination_total_concurrent_jobs" id="bridges_development">1</limit>
-->
</limits>
<macros>
<xml name="pulsar_plugin" tokens="id">
<plugin id="pulsar_@ID@" type="runner" load="galaxy.jobs.runners.pulsar:PulsarMQJobRunner">
<param id="amqp_url">{{ galaxy_job_conf_amqp_url }}</param>
<param id="galaxy_url">https://{{ inventory_hostname_short }}.galaxyproject.org</param>
<param id="manager">@ID@</param>
<param id="persistence_directory">/srv/galaxy/{{ galaxy_instance_codename }}/var/pulsar_amqp_ack</param>
<param id="amqp_acknowledge">True</param>
<param id="amqp_ack_republish_time">1200</param>
<param id="amqp_consumer_timeout">2.0</param>
<param id="amqp_publish_retry">True</param>
<param id="amqp_publish_retry_max_retries">60</param>
</plugin>
</xml>
<xml name="normal_handler" tokens="id,hostid">
<handler id="main_w@HOSTID@_handler@ID@" tags="handlers">
<plugin id="local"/>
<plugin id="slurm"/>
</handler>
</xml>
<xml name="multi_handler" tokens="id,hostid">
<handler id="main_w@HOSTID@_handler@ID@" tags="multi_handlers">
<plugin id="local"/>
<plugin id="slurm"/>
<plugin id="pulsar_jetstream_iu"/>
<plugin id="pulsar_jetstream_iu_nagios"/>
<plugin id="pulsar_jetstream_tacc"/>
<plugin id="pulsar_jetstream_tacc_nagios"/>
<plugin id="pulsar_stampede"/>
<plugin id="pulsar_stampede_nagios"/>
<plugin id="pulsar_bridges"/>
<plugin id="pulsar_bridges_nagios"/>
</handler>
</xml>
<xml name="dynamic_destination" tokens="dest">
<destination id="dynamic_@DEST@" runner="dynamic">
<param id="type">python</param>
<param id="function">dynamic_@DEST@</param>
</destination>
</xml>
<xml name="slurm_destination" tokens="dest,partition,native_specification,java_mem" token_java_mem="7g">
<destination id="slurm_@DEST@" runner="slurm">
<param id="nativeSpecification">--partition=@PARTITION@ --nodes=1 @NATIVE_SPECIFICATION@</param>
<expand macro="dest_local_env" java_mem="@JAVA_MEM@ "/>
</destination>
</xml>
<xml name="slurm_reserved_destination" tokens="dest,partition,native_specification,java_mem" token_java_mem="7g">
<expand macro="dynamic_destination" dest="@DEST@_reserved" />
<expand macro="slurm_destination" dest="@DEST@" partition="@PARTITION@" native_specification="@NATIVE_SPECIFICATION@" java_mem="@JAVA_MEM@" />
<destination id="reserved_@DEST@" runner="slurm">
<param id="nativeSpecification">--clusters=roundup --partition=reserved --nodes=1 @NATIVE_SPECIFICATION@</param>
<expand macro="dest_local_env" java_mem="@JAVA_MEM@" />
</destination>
</xml>
<!-- some unfortunate duplication here because macro expansions in the macro attribute of an expand tag is not supported -->
<xml name="jetstream_destination" tokens="id,site,native_specification">
<!-- the "jetstream" tag is for limits -->
<destination id="jetstream_@SITE@_@ID@" runner="pulsar_jetstream_@SITE@" tags="jetstream">
<param id="remote_metadata">true</param>
<param id="submit_native_specification">@NATIVE_SPECIFICATION@</param>
<expand macro="dest_pulsar_common_params" />
<expand macro="dest_pulsar_jetstream_params" />
</destination>
</xml>
<xml name="jetstream_nagios_destination" tokens="site">
<destination id="jetstream_@SITE@_nagios" runner="pulsar_jetstream_@SITE@_nagios">
<expand macro="dest_pulsar_common_params" />
<expand macro="dest_pulsar_jetstream_params" />
</destination>
</xml>
{# TODO: Now that we use --ntasks for regular (non-MPI) jobs, these destinations can be merged, but it will
require changes to the mpi dynamic rule that I don't have time to make at the moment #}
<xml name="stampede_destination" tokens="id,native_specification,memory_mb">
<destination id="stampede_@ID@" runner="pulsar_stampede">
<param id="remote_metadata">true</param>
<param id="submit_native_specification">@NATIVE_SPECIFICATION@</param>
<expand macro="dest_pulsar_common_params" />
<expand macro="dest_pulsar_stampede_params" />
<!-- Stampede assigns whole nodes, so $SLURM_CPUS_ON_NODE is not the same as the requested number of tasks -->
<env exec="GALAXY_SLOTS=$SLURM_NTASKS" />
<!-- Mem=0 if the Slurm -mem param is not used, which is not allowed on Stampede2 -->
<env exec="GALAXY_MEMORY_MB=@MEMORY_MB@" />
</destination>
</xml>
<xml name="stampede_mpi_destination" tokens="id,native_specification">
<destination id="stampede_mpi_@ID@" runner="pulsar_stampede">
<param id="remote_metadata">true</param>
<param id="submit_native_specification">@NATIVE_SPECIFICATION@</param>
<expand macro="dest_pulsar_common_params" />
<expand macro="dest_pulsar_stampede_params" />
<!-- Stampede assigns whole nodes, so $SLURM_CPUS_ON_NODE is not the same as the requested number of tasks -->
<env exec="GALAXY_SLOTS=$SLURM_NTASKS" />
</destination>
</xml>
<xml name="stampede_nagios_destination">
<destination id="stampede_nagios" runner="pulsar_stampede_nagios">
<expand macro="dest_pulsar_common_params" />
<expand macro="dest_pulsar_stampede_params" />
</destination>
</xml>
<xml name="bridges_destination" tokens="id,native_specification">
<destination id="bridges_@ID@" runner="pulsar_bridges">
<param id="remote_metadata">true</param>
<param id="submit_native_specification">@NATIVE_SPECIFICATION@</param>
<expand macro="dest_pulsar_common_params" />
<expand macro="dest_pulsar_bridges_params" />
</destination>
</xml>
<xml name="bridges_nagios_destination">
<destination id="bridges_nagios" runner="pulsar_bridges_nagios">
<expand macro="dest_pulsar_common_params" />
<expand macro="dest_pulsar_bridges_params" />
</destination>
</xml>
<xml name="dest_local_env" tokens="java_mem" token_java_mem="7g">
<!-- cloudmap tools are still using R 2.11(!) from here, also the genome diversity tools use things in /galaxy/software -->
<env id="PATH">/cvmfs/{{ galaxy_instance_codename }}.galaxyproject.org/deps/_manual/bin:/galaxy/{{ galaxy_instance_codename }}/linux-x86_64/bin:/galaxy/software/linux-x86_64/bin:/usr/local/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin</env>
<env id="LD_LIBRARY_PATH">/cvmfs/{{ galaxy_instance_codename }}.galaxyproject.org/venv/lib</env>
<expand macro="dest_tmp_env" />
<env id="_JAVA_OPTIONS">$_JAVA_OPTIONS -Xmx@JAVA_MEM@ -Xms256m</env>
<!-- explicit venv activation is necessary until we upgrade to 17.09 (slurm is passing $VIRTUAL_ENV but we reset $PATH) -->
<env file="/cvmfs/{{ galaxy_instance_codename }}.galaxyproject.org/venv/bin/activate" />
</xml>
<xml name="dest_tmp_env">
<env id="XDG_DATA_HOME">/cvmfs/{{ galaxy_instance_codename }}.galaxyproject.org/xdg/data</env>
<env id="TEMP">$(dirname ${BASH_SOURCE[0]})/_job_tmp</env>
<env id="TMPDIR">$TEMP</env>
<env id="_JAVA_OPTIONS">-Djava.io.tmpdir=$TEMP</env>
<env exec="mkdir -p $TEMP" />
</xml>
<xml name="dest_pulsar_common_params">
<param id="transport">curl</param>
<param id="default_file_action">remote_transfer</param>
<param id="dependency_resolution">remote</param>
<param id="rewrite_parameters">true</param>
<param id="submit_user_email">$__user_email__</param>
<env id="LC_ALL">C</env>
</xml>
<xml name="dest_pulsar_jetstream_params">
<param id="use_remote_datatypes">true</param>
<param id="remote_property_galaxy_datatypes_config_file">/cvmfs/{{ galaxy_instance_codename }}.galaxyproject.org/galaxy/config/datatypes_conf.xml.sample</param>
<param id="jobs_directory">/jetstream/scratch0/{{ galaxy_instance_codename }}/jobs</param>
<!-- this doesn't work, set in supervisor environment instead
<param id="remote_property_galaxy_virtual_env">/cvmfs/{{ galaxy_instance_codename }}.galaxyproject.org/venv</param>
-->
<param id="remote_property_galaxy_home">/cvmfs/{{ galaxy_instance_codename }}.galaxyproject.org/galaxy</param>
<param id="file_action_config">{{ galaxy_config_dir }}/pulsar_jetstream_actions.yml</param>
<env id="PATH">/jetstream/scratch0/main/conda/envs/set_metadata@20171114/bin:$PATH</env>
<expand macro="dest_tmp_env" />
</xml>
<xml name="dest_pulsar_stampede_params">
<param id="use_remote_datatypes">true</param>
<param id="remote_property_galaxy_datatypes_config_file">/cvmfs/{{ galaxy_instance_codename }}.galaxyproject.org/galaxy/config/datatypes_conf.xml.sample</param>
<param id="jobs_directory">/scratch/03166/xcgalaxy/{{ galaxy_instance_codename }}/staging/</param>
<!-- this doesn't work, set in supervisor environment instead
<param id="remote_property_galaxy_virtual_env">/work/galaxy/{{ galaxy_instance_codename }}/galaxy/venv</param>
-->
<!-- this used to work but doesn't now either, set in supervisor environment instead, however, the Pulsar
client still requires it to be set when remote_metadata is enabled -->
<param id="remote_property_galaxy_home">/cvmfs/{{ galaxy_instance_codename }}.galaxyproject.org/galaxy</param>
<param id="file_action_config">{{ galaxy_config_dir }}/pulsar_stampede_actions.yml</param>
<env exec="eval `/opt/apps/lmod/lmod/libexec/lmod bash purge`" />
<env exec="ulimit -c 0" />
</xml>
<xml name="dest_pulsar_bridges_params">
<param id="use_remote_datatypes">true</param>
<param id="remote_property_galaxy_datatypes_config_file">/pylon5/mc48nsp/xcgalaxy/{{ galaxy_instance_codename }}/galaxy/server/config/datatypes_conf.xml.sample</param>
<param id="jobs_directory">/pylon5/mc48nsp/xcgalaxy/{{ galaxy_instance_codename }}/staging/</param>
<!-- see stampede comments
<param id="remote_property_galaxy_virtual_env">/pylon5/mc48nsp/xcgalaxy/{{ galaxy_instance_codename }}/galaxy/venv</param>
-->
<param id="remote_property_galaxy_home">/cvmfs/{{ galaxy_instance_codename }}.galaxyproject.org/galaxy</param>
<param id="file_action_config">{{ galaxy_config_dir }}/pulsar_bridges_actions.yml</param>
<env exec="eval `modulecmd sh purge`" />
<!-- https://bugs.openjdk.java.net/browse/JDK-7085890 -->
<env id="PATH">/pylon5/mc48nsp/xcgalaxy/openjdk8/bin:$PATH</env>
<env id="_JAVA_OPTIONS">-Dsun.zip.disableMemoryMapping=true</env>
<!-- SPAdes as run by Unicycler is leaving cores in Pulsar's site-packages dir -->
<env exec="ulimit -c 0" />
</xml>
<xml name="multi_tool" tokens="id">
<tool id="@ID@" destination="dynamic_multi_reserved" handler="multi_handlers" />
</xml>
<xml name="stampede_tool" tokens="id">
<tool id="@ID@" destination="dynamic_stampede_select" handler="multi_handlers" resources="stampede"/>
</xml>
<xml name="stampede_resubmit_tool" tokens="id">
<tool id="@ID@" destination="dynamic_local_stampede_select_dynamic_walltime" handler="multi_handlers" resources="local_or_stampede" />
</xml>
<xml name="bridges_tool" tokens="id">
<tool id="@ID@" destination="dynamic_bridges_select" handler="multi_handlers" resources="bridges"/>
</xml>
</macros>
</job_conf>