Permalink
Cannot retrieve contributors at this time
Name already in use
A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
parallel-slurm/parallel_opts.sh
Go to fileThis commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
executable file
116 lines (98 sloc)
2.33 KB
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env bash | |
# GNU Parallel setup for SLURM | |
# | |
# Author: Pariksheet Nanda <hpc@uconn.edu> 2016-2017,2019 | |
# | |
# License: Public Domain / CC0 | |
# | |
# To the extent possible under law, Pariksheet Nanda has waived all | |
# copyright and related or neighboring rights to GNU Parallel setup | |
# for SLURM. | |
# This directive applies to the entire script. | |
# shellcheck disable=2039 | |
true | |
is_slurm_env () { | |
if [[ -n "$SLURM_JOB_ID" ]] | |
then # yes | |
return 0 | |
else | |
return 1 | |
fi | |
} | |
# Helper to expand hostnames | |
has_clustershell () { | |
if python -m ClusterShell.CLI.Nodeset -h &> /dev/null | |
then | |
return 0 | |
else | |
return 1 | |
fi | |
} | |
install_clustershell () { | |
python -m pip install --user clustershell | |
} | |
setup_on_cluster () { | |
# Allow export of environment using `--env` option | |
if [[ ! -e ~/.parallel/ignored_vars ]]; then | |
# Create an empty ignored_vars file to pass all the environment | |
# variables to the SSH instance | |
mkdir -p ~/.parallel | |
touch ~/.parallel/ignored_vars | |
fi | |
} | |
# Expand tasks from "2,5(x1),3(x2)" to "2 5 3 3 " | |
expand_slurm_tasks_per_node () { | |
[[ -z "${SLURM_TASKS_PER_NODE}" ]] && return | |
local tasks | |
# shellcheck disable=2207 | |
tasks=( $(echo "${SLURM_TASKS_PER_NODE}" | tr ',' ' ') ) | |
local num count | |
for val in ${tasks[*]}; do | |
num="${val/(*)/}" | |
if [[ -z "${val%%*)}" ]]; then | |
count=$(echo "$val" | sed -E 's#[0-9]+\(x([0-9]+)\)#\1#') | |
else | |
count=1 | |
fi | |
# shellcheck disable=2046 | |
printf "$num%.0s " $(seq $count) | |
done | |
} | |
# Make list in the form of "cpu/host" | |
cpu_host_array () { | |
local nodeset hosts cpus | |
nodeset="python -m ClusterShell.CLI.Nodeset" | |
# shellcheck disable=2207 | |
hosts=( $($nodeset -e "${SLURM_NODELIST}") ) | |
# shellcheck disable=2207 | |
cpus=( $(expand_slurm_tasks_per_node) ) | |
for ((i=0; i<${#hosts[*]}; ++i)); do | |
echo "${cpus[i]}/${hosts[i]}" | |
done | |
} | |
prefix () { | |
echo "${SLURM_JOB_NAME%.*}" | |
} | |
machinefile () { | |
echo "$(prefix).sshloginfile" | |
} | |
write_machinefile () { | |
cpu_host_array > "$(machinefile)" | |
} | |
parallel_opts () { | |
local machinefile | |
machinefile=$(machinefile) | |
echo " | |
--env _ | |
--sshdelay 0.1 | |
--sshloginfile $machinefile | |
--workdir . | |
" | |
} | |
main () { | |
is_slurm_env && setup_on_cluster | |
! has_clustershell && install_clustershell | |
write_machinefile | |
parallel_opts | |
} | |
[[ "$0" != "${BASH_SOURCE[0]}" ]] || main "$@" |