cryoSPARC Cluster Integration Script Examples
Examples of cluster_info.json and cluster_script.sh scripts for various cluster workload managers

SLURM

Example A

cluster_info.json
1
{
2
"qdel_cmd_tpl": "scancel {{ cluster_job_id }}",
3
"worker_bin_path": "/home/cryosparcuser/cryosparc_worker/bin/cryosparcw",
4
"title": "debug_cluster",
5
"cache_path": "/ssd/tmp",
6
"qinfo_cmd_tpl": "sinfo --format='%.8N %.6D %.10P %.6T %.14C %.5c %.6z %.7m %.7G %.9d %20E'",
7
"qsub_cmd_tpl": "sbatch {{ script_path_abs }}",
8
"qstat_cmd_tpl": "squeue -j {{ cluster_job_id }}",
9
"cache_quota_mb": null,
10
"send_cmd_tpl": "{{ command }}",
11
"cache_reserve_mb": 10000,
12
"name": "debug_cluster"
13
}
Copied!
cluster_script.sh
1
#!/bin/bash
2
#SBATCH --job-name=cryosparc_{{ project_uid }}_{{ job_uid }}
3
#SBATCH --partition=debug
4
#SBATCH --output={{ job_log_path_abs }}
5
#SBATCH --error={{ job_log_path_abs }}
6
#SBATCH --nodes=1
7
#SBATCH --mem={{ (ram_gb*1000)|int }}M
8
#SBATCH --ntasks-per-node=1
9
#SBATCH --cpus-per-task={{ num_cpu }}
10
#SBATCH --gres=gpu:{{ num_gpu }}
11
#SBATCH --gres-flags=enforce-binding
12
13
available_devs=""
14
for devidx in $(seq 1 16);
15
do
16
if [[ -z $(nvidia-smi -i $devidx --query-compute-apps=pid --format=csv,noheader) ]] ; then
17
if [[ -z "$available_devs" ]] ; then
18
available_devs=$devidx
19
else
20
available_devs=$available_devs,$devidx
21
fi
22
fi
23
done
24
export CUDA_VISIBLE_DEVICES=$available_devs
25
26
srun {{ run_cmd }}
Copied!

Example B

cluster_info.json
1
{
2
"qdel_cmd_tpl": "scancel {{ cluster_job_id }}",
3
"worker_bin_path": "/home/cryosparcuser/cryosparc_worker/bin/cryosparcw",
4
"title": "test",
5
"cache_path": "",
6
"qinfo_cmd_tpl": "sinfo",
7
"qsub_cmd_tpl": "sbatch {{ script_path_abs }}",
8
"qstat_cmd_tpl": "squeue -j {{ cluster_job_id }}",
9
"send_cmd_tpl": "{{ command }}",
10
"name": "test"
11
}
Copied!
cluster_script.sh
1
#!/bin/bash
2
#SBATCH --job-name=cryosparc_{{ project_uid }}_{{ job_uid }}
3
#SBATCH --output={{ job_log_path_abs }}
4
#SBATCH --error={{ job_log_path_abs }}
5
#SBATCH --ntasks={{ num_cpu }}
6
#SBATCH --mem={{ (ram_gb*1000)|int }}M
7
#SBATCH --cpus-per-task=1
8
#SBATCH --gres=gpu:{{ num_gpu }}
9
#SBATCH --gres-flags=enforce-binding
10
11
available_devs=""
12
for devidx in $(seq 1 16);
13
do
14
if [[ -z $(nvidia-smi -i $devidx --query-compute-apps=pid --format=csv,noheader) ]] ; then
15
if [[ -z "$available_devs" ]] ; then
16
available_devs=$devidx
17
else
18
available_devs=$available_devs,$devidx
19
fi
20
fi
21
done
22
export CUDA_VISIBLE_DEVICES=$available_devs
23
24
srun {{ run_cmd }}
Copied!

Example C

cluster_script.sh
1
#!/bin/bash
2
#SBATCH --partition=gpu
3
#SBATCH --nodes=1
4
#SBATCH --ntasks={{ num_cpu }}
5
#SBATCH --gres=gpu:{{ num_gpu }}
6
#SBATCH --time=48:00:00
7
#SBATCH --mem={{ (ram_gb)|int }}GB
8
#SBATCH --exclusive
9
#SBATCH --job-name cspark_{{ project_uid }}_{{ job_uid }}
10
#SBATCH --output={{ job_dir_abs }}/output.txt
11
#SBATCH --error={{ job_dir_abs }}/error.txt
12
13
{{ run_cmd }}
Copied!

Example D

cluster_script.sh
1
#!/bin/bash
2
#SBATCH --job-name=cryosparc_{{ project_uid }}_{{ job_uid }}
3
#SBATCH --partition=q2
4
#SBATCH --output={{ job_log_path_abs }}
5
#SBATCH --error={{ job_log_path_abs }}
6
{%- if num_gpu == 0 %}
7
#SBATCH --ntasks={{ num_cpu }}
8
#SBATCH --cpus-per-task=1
9
#SBATCH --threads-per-core=1
10
{%- else %}
11
#SBATCH --nodes=1
12
#SBATCH --ntasks-per-node={{ num_cpu }}
13
#SBATCH --cpus-per-task=1
14
#SBATCH --threads-per-core=1
15
#SBATCH --gres=gpu:{{ num_gpu }}
16
#SBATCH --gres-flags=enforce-binding
17
{%- endif %}
18
19
available_devs=""
20
for devidx in $(seq 1 16);
21
do
22
if [[ -z $(nvidia-smi -i $devidx --query-compute-apps=pid --format=csv,noheader) ]] ; then
23
if [[ -z "$available_devs" ]] ; then
24
available_devs=$devidx
25
else
26
available_devs=$available_devs,$devidx
27
fi
28
fi
29
done
30
export CUDA_VISIBLE_DEVICES=$available_devs
31
32
{{ run_cmd }}
Copied!

Example E

cluster_script.sh
1
{%- macro _min(a, b) -%}
2
{%- if a <= b %}{{a}}{% else %}{{b}}{% endif -%}
3
{%- endmacro -%}
4
5
#SBATCH --job-name=cryosparc_{{ project_uid }}_{{ job_uid }}
6
#SBATCH --output={{ job_log_path_abs }}.out
7
#SBATCH --error={{ job_log_path_abs }}.err
8
#SBATCH --cpus-per-task=1
9
#SBATCH --threads-per-core=1
10
#SBATCH --partition=gpu
11
#SBATCH --exclusive
12
#SBATCH --mem=100000
13
14
{%- if num_gpu == 0 %}
15
# Use CPU cluster
16
#SBATCH --constraint=mc
17
#SBATCH --ntasks={{ num_cpu }}
18
19
available_devs=""
20
for devidx in $(seq 1 16);
21
do
22
if [[ -z $(nvidia-smi -i $devidx --query-compute-apps=pid --format=csv,noheader) ]] ; then
23
if [[ -z "$available_devs" ]] ; then
24
available_devs=$devidx
25
else
26
available_devs=$available_devs,$devidx
27
fi
28
fi
29
done
30
export CUDA_VISIBLE_DEVICES=$available_devs
Copied!

PBS

Example A

cluster_info.json
1
{
2
"name" : "pbscluster",
3
"worker_bin_path" : "/path/to/cryosparc_worker/bin/cryosparcw",
4
"cache_path" : "/path/to/local/SSD/on/cluster/nodes"
5
"send_cmd_tpl" : "ssh loginnode {{ command }}",
6
"qsub_cmd_tpl" : "qsub {{ script_path_abs }}",
7
"qstat_cmd_tpl" : "qstat -as {{ cluster_job_id }}",
8
"qdel_cmd_tpl" : "qdel {{ cluster_job_id }}",
9
"qinfo_cmd_tpl" : "qstat -q",
10
"transfer_cmd_tpl" : "scp {{ src_path }} loginnode:{{ dest_path }}"
11
}
Copied!
cluster_script.sh
1
#!/bin/bash
2
#### cryoSPARC cluster submission script template for PBS
3
## Available variables:
4
## {{ run_cmd }} - the complete command string to run the job
5
## {{ num_cpu }} - the number of CPUs needed
6
## {{ num_gpu }} - the number of GPUs needed.
7
## Note: the code will use this many GPUs starting from dev id 0
8
## the cluster scheduler or this script have the responsibility
9
## of setting CUDA_VISIBLE_DEVICES so that the job code ends up
10
## using the correct cluster-allocated GPUs.
11
## {{ ram_gb }} - the amount of RAM needed in GB
12
## {{ job_dir_abs }} - absolute path to the job directory
13
## {{ project_dir_abs }} - absolute path to the project dir
14
## {{ job_log_path_abs }} - absolute path to the log file for the job
15
## {{ worker_bin_path }} - absolute path to the cryosparc worker command
16
## {{ run_args }} - arguments to be passed to cryosparcw run
17
## {{ project_uid }} - uid of the project
18
## {{ job_uid }} - uid of the job
19
## {{ job_creator }} - name of the user that created the job (may contain spaces)
20
## {{ cryosparc_username }} - cryosparc username of the user that created the job (usually an email)
21
##
22
## What follows is a simple PBS script:
23
24
#PBS -N cryosparc_{{ project_uid }}_{{ job_uid }}
25
#PBS -l select=1:ncpus={{ num_cpu }}:ngpus={{ num_gpu }}:mem={{ (ram_gb*1000)|int }}mb:gputype=P100
26
#PBS -o {{ job_dir_abs }}
27
#PBS -e {{ job_dir_abs }}
28
29
available_devs=""
30
for devidx in $(seq 1 16);
31
do
32
if [[ -z $(nvidia-smi -i $devidx --query-compute-apps=pid --format=csv,noheader) ]] ; then
33
if [[ -z "$available_devs" ]] ; then
34
available_devs=$devidx
35
else
36
available_devs=$available_devs,$devidx
37
fi
38
fi
39
done
40
export CUDA_VISIBLE_DEVICES=$available_devs
41
42
{{ run_cmd }}
Copied!

UGE

Example A

cluster_info.json
1
{
2
"name" : "ugecluster",
3
"worker_bin_path" : "/u/cryosparcuser/cryosparc/cryosparc_worker/bin/cryosparcw",
4
"cache_path" : "/scratch/cryosparc_cache",
5
"send_cmd_tpl" : "{{ command }}",
6
"qsub_cmd_tpl" : "qsub {{ script_path_abs }}",
7
"qstat_cmd_tpl" : "qstat -j {{ cluster_job_id }}",
8
"qdel_cmd_tpl" : "qdel {{ cluster_job_id }}",
9
"qinfo_cmd_tpl" : "qstat -q default.q",
10
"transfer_cmd_tpl" : "scp {{ src_path }} uoft:{{ dest_path }}"
11
}
Copied!
cluster_script.sh
1
#!/bin/bash
2
3
## What follows is a simple UGE script:
4
## Job Name
5
#$ -N cryosparc_{{ project_uid }}_{{ job_uid }}
6
7
## Number of CPUs (select 1 CPU always, and oversubscribe as GPU is per core value)
8
##$ -pe smp {{ num_cpu }}
9
#$ -pe smp 1
10
11
## Memory per CPU core
12
#$ -l m_mem_free={{ (ram_gb)|int }}G
13
14
## Number of GPUs
15
#$ -l gpu_card={{ num_gpu }}
16
17
## Time limit 4 days
18
#$ -l h_rt=345600
19
20
## STDOUT/STDERR
21
#$ -o {{ job_dir_abs }}/uge.log
22
#$ -e {{ job_dir_abs }}/uge.log
23
#$ -j y
24
25
## Number of threads
26
export OMP_NUM_THREADS={{ num_cpu }}
27
28
echo "HOSTNAME: $HOSTNAME"
29
30
available_devs=""
31
for devidx in $(seq 1 16);
32
do
33
if [[ -z $(nvidia-smi -i $devidx --query-compute-apps=pid --format=csv,noheader) ]] ; then
34
if [[ -z "$available_devs" ]] ; then
35
available_devs=$devidx
36
else
37
available_devs=$available_devs,$devidx
38
fi
39
fi
40
done
41
export CUDA_VISIBLE_DEVICES=$available_devs
42
43
{{ run_cmd }}
Copied!
Last modified 5mo ago