The MSE Flames clusters uses Rocks to manage jobs
This is an example submission script for Hipergator which uses SLURM to manage jobs.
#!/bin/sh
#SBATCH --job-name=<put_a_job_name_here # Job name
#SBATCH --mail-type=NONE # Mail events (NONE, BEGIN, END, FAIL)
#SBATCH --ntasks=16 # Number of MPI ranks
#SBATCH --cpus-per-task=1 # Number of cores per MPI rank
#SBATCH --distribution=cyclic:cyclic # Distribute tasks cyclically on nodes
#SBATCH --mem-per-cpu=3000mb # Memory per processor
#SBATCH --time=8:00:00 # Time limit hrs:min:sec
#SBATCH --output=job.out # Standard output and error log
#SBATCH --qos=<put_a_queue_name_here> # Queue you are submitting to
echo start_time $(date)
# set up compile chain
module load intel/2016.0.109
module load openmpi/1.10.2
srun --mpi=pmi2 python Ni__eam__morse_exp_fs.py > log.out
echo stop_time:$(date)
COMMANDS FOR SLURM
I'm too lazy to give you more commands so here is a cheatsheet.
this assumes thath VASP_STD_BIN environment variable is set in your
#PBS -A PAA0028
#PBS -l walltime=01:00:00
#PBS -l nodes=1:ppn=40 -q debug
#PBS -N si_job_name
#PBS -e job.err
#PBS -o job.out
#PBS -S /bin/bash
# change to working directory
cd $PBS_O_WORKDIR
# change compiler change
module load intel/19.0.5 > /dev/null 2>&1
module load intelmpi/2019.3 > /dev/null 2>&1
echo "VASP_STD_BIN=$VASP_STD_BIN"
mpiexec $VASP_STD_BIN > vasp.out