#!/bin/bash # Copyright (C) 2007-2017 University of Oxford # Authors: Dave Flitney, Stephen Smith, Matthew Webster and Duncan Mortimer # Part of FSL - FMRIB's Software Library # http://www.fmrib.ox.ac.uk/fsl # fsl@fmrib.ox.ac.uk # # Developed at FMRIB (Oxford Centre for Functional Magnetic Resonance # Imaging of the Brain), Department of Clinical Neurology, Oxford # University, Oxford, UK # # # LICENCE # # FMRIB Software Library, Release 6.0 (c) 2018, The University of # Oxford (the "Software") # # The Software remains the property of the Oxford University Innovation # ("the University"). # # The Software is distributed "AS IS" under this Licence solely for # non-commercial use in the hope that it will be useful, but in order # that the University as a charitable foundation protects its assets for # the benefit of its educational and research purposes, the University # makes clear that no condition is made or to be implied, nor is any # warranty given or to be implied, as to the accuracy of the Software, # or that it will be suitable for any particular purpose or for use # under any specific conditions. Furthermore, the University disclaims # all responsibility for the use which is made of the Software. It # further disclaims any liability for the outcomes arising from using # the Software. # # The Licensee agrees to indemnify the University and hold the # University harmless from and against any and all claims, damages and # liabilities asserted by third parties (including claims for # negligence) which arise directly or indirectly from the use of the # Software or the sale of any products based on the Software. # # No part of the Software may be reproduced, modified, transmitted or # transferred in any form or by any means, electronic or mechanical, # without the express permission of the University. The permission of # the University is not required if the said reproduction, modification, # transmission or transference is done without financial return, the # conditions of this Licence are imposed upon the receiver of the # product, and all original and amended source code is included in any # transmitted product. You may be held legally responsible for any # copyright infringement that is caused or encouraged by your failure to # abide by these terms and conditions. # # You are not permitted under this Licence to use this Software # commercially. Use for which any financial return is received shall be # defined as commercial use, and includes (1) integration of all or part # of the source code or the Software into a product for sale or license # by or on behalf of Licensee to third parties or (2) use of the # Software or any derivative of it for research with the final aim of # developing software products for sale or license to a third party or # (3) use of the Software or any derivative of it for research with the # final aim of developing non-software products for sale or license to a # third party, or (4) use of the Software to provide any service to an # external organisation for which payment is received. If you are # interested in using the Software commercially, please contact Oxford # University Innovation ("OUI"), the technology transfer company of the # University, to negotiate a licence. Contact details are: # fsl@innovation.ox.ac.uk quoting Reference Project 9564, FSL. export LC_ALL=C export LC_ALL=C ########################################################################### # Edit this file in order to setup FSL to use your local compute # cluster. ########################################################################### set +o errexit ########################################################################### # The following section determines what to do when fsl_sub is called # by an FSL program. If SGE_ROOT is set it will attempt to pass the # commands onto the cluster, otherwise it will run the commands # itself. There are two values for the METHOD variable, "SGE" and # "NONE". Note that a user can unset SGE_ROOT if they don't want the # cluster to be used. ########################################################################### METHOD=SGE unset module if [[ "x$SGE_ROOT" = "x" ]] ; then METHOD=NONE else QCONF=$(which qconf) if [[ "x$QCONF" = "x" ]]; then METHOD=NONE echo "Warning: SGE_ROOT environment variable is set but Grid Engine software not found, will run locally" >&2 fi fi # stop submitted scripts from submitting jobs themselves if [[ "X$FSLSUBALREADYRUN" = "Xtrue" ]] ; then METHOD=NONE echo "Warning: job on queue attempted to submit parallel jobs - running jobs serially instead" >&2 fi if [[ "X$METHOD" = "XNONE" ]]; then QCONF=echo fi FSLSUBALREADYRUN=true export FSLSUBALREADYRUN ########################################################################### # The following auto-decides what cluster queue to use. The calling # FSL program will probably use the -T option when calling fsl_sub, # which tells fsl_sub how long (in minutes) the process is expected to # take (in the case of the -t option, how long each line in the # supplied file is expected to take). You need to setup the following # list to map ranges of timings into your cluster queues - it doesn't # matter how many you setup, that's up to you. ########################################################################### map_qname () { if [[ "$1" -le 20 ]] ; then queue=veryshort.q elif [[ "$1" -le 120 ]] ; then queue=short.q elif [[ "$1" -le 1440 ]] ; then queue=long.q else queue=verylong.q fi queueCmd=" -q $queue " #echo "Estimated time was $1 mins: queue name is $queue" } ########################################################################### # Don't change the following (but keep scrolling down!) ########################################################################### if [[ ! -z "${POSIXLY_CORRECT}" ]]; then OLD_POSIXLY_CORRECT=${POSIXLY_CORRECT} fi POSIXLY_CORRECT=1 export POSIXLY_CORRECT command=$(basename "$0") usage () { cat < $command gzip *.img *.hdr $command -q short.q gzip *.img *.hdr $command -a darwin regscript rawdata outputdir ... -T Estimated job length in minutes, used to auto-set queue name -q Possible values for are "verylong.q", "long.q" and "short.q". See below for details Default is "long.q". -a Architecture [e.g., darwin or lx24-amd64] -p Lower priority [0:-1024] default = 0 -M Who to email, default = $(whoami)@$(hostname -f | cut -d . -f 2-) -j Place a hold on this task until job jid has completed -t Specify a task file of commands to execute in parallel -N Specify jobname as it will appear on queue -R Max total RAM to use for job (integer in MB) -l Where to output logfiles -m Change the SGE mail options, see qsub for details -z If image or file already exists, do nothing and exit -F Use flags embedded in scripts to set SGE queuing options -s , Submit a multi-threaded task - requires a PE () to be configured for the requested queues. specifies the number of threads to run -v Verbose mode. Queues: There are several batch queues configured on the cluster, each with defined CPU time limits. All queues, except bigmem.q, have a 8GB memory limit. veryshort.q:This queue is for jobs which last under 30mins. short.q: This queue is for jobs which last up to 4h. long.q: This queue is for jobs which last less than 24h. Jobs run with a nice value of 10. verylong.q: This queue is for jobs which will take longer than 24h CPU time. There is one slot per node, and jobs on this queue have a nice value of 15. bigmem.q: This queue is like the verylong.q but has no memory limits. EOF exit 1 } nargs=$# if [[ "$nargs" -eq 0 ]] ; then usage fi #if the newer whitespace-safe getopt format is available, use it if [[ $(getopt -T >/dev/null 2>&1; echo $?) == 4 ]]; then eval set -- $(getopt -s bash T:q:a:p:M:j:t:z:N:R:Fvm:l:s: "$@") result=$? else set -- $(getopt T:q:a:p:M:j:t:z:N:R:Fvm:l:s: "$@") result=$? fi if [[ "$result" != 0 ]] ; then echo "What? Your arguments make no sense!" fi if [[ "$nargs" -eq 0 ]] || [[ $result != 0 ]] ; then usage fi if [[ -z "${OLD_POSIXLY_CORRECT}" ]]; then unset POSIXLY_CORRECT else POSIXLY_CORRECT=${OLD_POSIXLY_CORRECT} export POSIXLY_CORRECT fi ########################################################################### # If you have a Parallel Environment configured for OpenMP tasks then # the variable omp_pe should be set to the name you have defined for that # PE. The script will work out which queues have that PE setup on them. # Note, we support openmp tasks even when Grid Engine is not in use. ########################################################################### omp_pe='openmp' ########################################################################### # If you wish to disable processor affinities under Grid Engine then # comment the following line. # This instructs Grid Engine to bind the task to the number of job slots # allocated to the job (or PE) ########################################################################### proc_affinities="-binding linear:slots" ########################################################################### # The following sets up the default queue name, which you may want to # change. It also sets up the basic emailing control. ########################################################################### queue=long.q queueCmd=" -q long.q " mailto=$(whoami)@$(hostname -f | cut -d . -f 2-) MailOpts="a" ########################################################################### # In the following, you might want to change the behaviour of some # flags so that they prepare the right arguments for the actual # cluster queue submission program, in our case "qsub". # # -a sets is the cluster submission flag for controlling the required # hardware architecture (normally not set by the calling program) # # -p set the priority of the job - ignore this if your cluster # environment doesn't have priority control in this way. # # -j tells the cluster not to start this job until cluster job ID $jid # has completed. You will need this feature. # # -t will pass on to the cluster software the name of a text file # containing a set of commands to run in parallel; one command per # line. # # -N option determines what the command will be called when you list # running processes. # # -l tells the cluster what to call the standard output and standard # -error logfiles for the submitted program. ########################################################################### if [[ -z "$FSLSUBVERBOSE" ]] ; then verbose=0 else verbose=$FSLSUBVERBOSE; echo "METHOD=$METHOD : args=$*" >&2 fi scriptmode=0 while [[ "$1" != -- ]] ; do case "$1" in -z) if [[ -e "$2" || $("${FSLDIR}/bin/imtest" "$2") = 1 ]] ; then exit 0 fi shift;; -T) map_qname "$2" shift;; -q) queue="$2" queueCmd=" -q $queue " "$QCONF" -sq "$queue" >/dev/null 2>&1 if [[ $? -eq 1 ]]; then echo "Invalid queue specified!" exit 127 fi shift;; -a) acceptable_arch=no available_archs=$(qhost | tail -n +4 | awk '{print $2}' | sort | uniq) for a in $available_archs; do if [[ "$2" = "$a" ]] ; then acceptable_arch="yes" fi done if [[ "$acceptable_arch" = "yes" ]]; then sge_arch="-l arch=$2" else echo "Sorry arch of $2 is not supported on this SGE configuration!" echo "Should be one of: $available_archs" exit 127 fi shift;; -p) # Not implmented shift;; -M) mailto=$2 shift;; -j) jid=$2 sge_hold="-hold_jid $jid" shift;; -t) taskfile=$2 if [[ -f "$taskfile" ]] ; then tasks=$(wc -l "$taskfile" | awk '{print $1}') if [[ "$tasks" -ne 0 ]]; then sge_tasks="-t 1-$tasks" else echo "Task file ${taskfile} is empty" echo "Should be a text file listing all the commands to run!" exit -1 fi else echo "Task file (${taskfile}) does not exist" exit -1 fi shift;; -N) JobName=$2; shift;; -R) RAM="-l mem_free=${2}M" shift;; -m) MailOpts=$2; shift;; -l) LogOpts="-o $2 -e $2"; LogDir="${2}/"; if [[ ! -e "${2}" ]]; then mkdir -p "$2" else echo "${2}" | grep '/dev/null' >/dev/null 2>&1 if [[ $? -eq 1 ]] && [[ -f "${2}" ]]; then echo "Log destination is a file (should be a folder)" exit -1 fi fi shift;; -F) scriptmode=1; ;; -v) verbose=1 ;; -s) pe_string=$2; peName=$(echo "$pe_string" | cut -d',' -f 1) peThreads=$(echo "$pe_string" | cut -d',' -f 2) shift;; esac shift # next flag done shift ########################################################################### # Don't change the following (but keep scrolling down!) ########################################################################### commandline=("$@") command="${commandline[0]}" if [[ -z "$taskfile" ]] && [[ -z "$command" ]]; then echo "Either supply a command to run or a parallel task file" exit -1 fi if [[ -z "$taskfile" ]] && [[ ! -x "$command" ]]; then which "$command" >/dev/null 2>&1 if [[ $? -ne 0 ]]; then echo "The command you have requested cannot be found or is not executable" exit -1 fi fi if [[ "x$JobName" = x ]] ; then if [[ "x$taskfile" != x ]] ; then JobName=$(basename "$taskfile") else JobName=$(basename "$command") fi fi if [[ -n "$tasks" ]] && [[ -n "${commandline[*]}" ]] ; then echo "Spurious input after parsing command line: \"${commandline[*]}\"!" echo "You appear to have specified both a task file and a command to run" exit -1 fi if [[ -n "$peName" ]]; then # If the PE name is 'openmp' then limit the number of threads to those specified if [[ "X$peName" = "X$omp_pe" ]]; then OMP_NUM_THREADS=$peThreads export OMP_NUM_THREADS fi fi case "$METHOD" in ########################################################################### # The following is the main call to the cluster, using the "qsub" SGE # program. If $tasks has not been set then qsub is running a single # command, otherwise qsub is processing a text file of parallel # commands. ########################################################################### SGE) ########################################################################### # Test Parallel environment options ########################################################################### if [[ -n "$peName" ]]; then # Is this a configured PE? "$QCONF" -sp "$peName" >/dev/null 2>&1 if [[ $? -eq 1 ]]; then echo "${commandline[*]}" echo "$peName is not a valid PE" exit -1 fi # Get a list of queues configured for this PE and confirm that the queue # we have submitted to has that PE set up. qstat -g c -pe "$peName" >/dev/null 2>&1 if [[ $? -eq 1 ]]; then echo "No parallel environments configured!" exit -1 fi qstat -g c -pe "$peName" | sed '1,2d' | awk '{ print $1 }' | grep "^$queue" >/dev/null 2>&1 if [[ $? -eq 1 ]]; then echo "${commandline[*]}" echo "PE $peName is not configured on $queue" exit -1 fi # The -w e option will result in the job failing if there are insufficient slots # on any of the cluster nodes pe_options="-pe $peName $peThreads -w e" fi if [[ -z "$tasks" ]] ; then if [[ "$scriptmode" -ne 1 ]] ; then sge_command=(qsub -V -cwd -shell n -b y -r y $queueCmd $proc_affinities $pe_options -M $mailto -N "$JobName" -m $MailOpts $LogOpts $sge_arch $RAM $sge_hold) else sge_command=(qsub $proc_affinities $LogOpts $sge_arch $sge_hold) fi if [[ "$verbose" -eq 1 ]] ; then echo "sge_command: ${sge_command[*]}" >&2 echo "executing: ${commandline[*]}" >&2 fi exec "${sge_command[@]}" "${commandline[@]}" | awk '{print $3}' else sge_command=(qsub -V -cwd $queueCmd $proc_affinities $pe_options -M $mailto -N "$JobName" -m $MailOpts $LogOpts $sge_arch $RAM $sge_hold $sge_tasks) if [[ "$verbose" -eq 1 ]] ; then echo "sge_command: ${sge_command[*]}" >&2 echo "control file: $taskfile" >&2 fi exec "${sge_command[@]}" <&2 fi "${commandline[@]}" > "${LogDir}${JobName}.o$$" 2> "${LogDir}${JobName}.e$$" ERR=$? if [[ "$ERR" -ne 0 ]] ; then cat "${LogDir}${JobName}.e$$" >&2 exit $ERR fi else if [[ "$verbose" -eq 1 ]] ; then echo "Running commands in: $taskfile" >&2 fi n=1 while [[ "$n" -le "$tasks" ]] ; do line=$(sed -n -e ''${n}'p' "$taskfile") if [[ "$verbose" -eq 1 ]] ; then echo "executing: $line" >&2 fi /bin/sh < "${LogDir}${JobName}.o$$.$n" 2> "${LogDir}${JobName}.e$$.$n" $line EOF2 n=$((n+1)) done fi echo $$ ;; esac ########################################################################### # Done. ###########################################################################