Version 16Mar2018
submission script:
#!/bin/bash
#SBATCH --job-name=<job-name>
#SBATCH --output LAMMPS_%j.out
#SBATCH --error LAMMPS-%j.err
##SBATCH --mail-type=ALL
#SBATCH --qos=<queue>
#SBATCH --mail-user=<mail-user>
#SBATCH --ntasks=<# of MPI processes> # Number of MPI processes (CPUs)
#SBATCH --mem-per-cpu=2G # Per processor memory request
#SBATCH --time=96:00:00 # Walltime in hh:mm:ss or d-hh:mm:ss
########################Optional Part####################################
# These codes aim to make subfolders ./data and ./sav if they don't exist
if [ ! -d "./data" ]; then
mkdir ./data
# Will enter here if $DIRECTORY exists, even if it contains spaces
fi
if [ ! -d "./sav" ]; then
mkdir ./sav
# Will enter here if $DIRECTORY exists, even if it contains spaces
fi
##########################################################################
module purge
module load intel/2018 openmpi/3.0.0 lammps/13Mar18
mpiexec lmp_ufhpc -in in.* > job.log
Version 11Aug2017
submission script:
#!/bin/bash
# For lammps: must have in in.* within the directory
#!/bin/bash
#$ -N <job_name>
#$ -cwd
#$ -pe mpi <no._of_processors> (ie 8, 16)
#$ -S /bin/bash
#$ -q <queue_name> (eg. all.q or single.q)
#$ -e lammps.err
#$ -o lammps.out
module load compilers/intel_2018
LAMMPS=/share/apps/lammps/lammps-16Mar18/src/lmp_intel_cpu_intelmpi
MPIRUN=mpiexec.hydra
$MPIRUN $LAMMPS < in.*
Version 11Aug2017
submission script:
#!/bin/bash
# For lammps: must have in in.* within the directory
#!/bin/bash
#$ -N <job_name>
#$ -cwd
#$ -pe mpi <no._of_processors> (ie 8, 16)
#$ -S /bin/bash
#$ -q <queue_name> (eg. all.q or single.q)
#$ -e lammps.err
#$ -o lammps.out
module load compilers/intel_16.0.3
export LAMMPS=/share/apps/lammps/lammps-11Aug17/src/lmp_mpi
MPIRUN=mpiexec.hydra
$MPIRUN $LAMMPS < in.*
LAMMPS Kokkos version submission script
#!/bin/bash
#SBATCH --job-name=3000k_10mol_100ps_NVT_oxygen_gas
#SBATCH --output=LAMMPS_%j.out
#SBATCH --error LAMMPS-%j.err
#SBATCH --account=michael.tonks
#SBATCH --qos=michael.tonks
#SBATCH --mail-type=END,FAIL
#SBATCH --mail-user=sly1993@ufl.edu
#SBATCH --ntasks=128
#SBATCH --cpus-per-task=1
#SBATCH --distribution=cyclic:cyclic
#SBATCH --mem-per-cpu=2048mb
#SBATCH --time=15-00:00:00
#SBATCH --partition=hpg2-compute
#SBATCH --nodes=6
########################Optional Part####################################
# These codes aim to make subfolders ./data and ./sav if they don't exist
if [ ! -d "./data" ]; then
mkdir ./data
Will enter here if $DIRECTORY exists, even if it contains spaces
fi
if [ ! -d "./sav" ]; then
mkdir ./sav
Will enter here if $DIRECTORY exists, even if it contains spaces
fi
##########################################################################
export OMP_PROC_BIND=spread
module purge
#module load intel/2018 openmpi/3.1.0
module load intel/2019.1.144 openmpi/4.0.0 cmake/3.6.3
#module load python
LAMMPS=/home/sly1993/bin/lmp_12_Dec_kk_mpi
# LAMMPS=lmp_intel_cpu_intelmpi_Aug22_18_2016CHO
#LAMMPS=/home/sly1993/bin/lmp_intel_cpu_openmpi
#LAMMPS=$HPC_LAMMPS_BIN/lmp_ufhpc
#LAMMPS=/home/sly1993/lmp_ufhpc
# LAMMPS=/home/sly1993/lmp_mpi
srun --mpi=pmix_v2 $LAMMPS -k on -sf kk -pk kokkos newton on neigh half comm no -in in.relax > job.log
You must install LAMMPS on Hermes
1) Download LAMMPS from http://lammps.sandia.gov/download.html
2) Create a software folder in your home directory (ie. ‘mkdir ~/soft’)
3) Upload the tarball to the ‘soft’ directory using WinSCP (https://winscp.net/eng/download.php) or MobaXterm (http://mobaxterm.mobatek.net/)
4) Untar the lammps tarball (ie. ‘tar -xvf lammps-stable.tar.gz’)
5) Go to the lammps src folder (ie. ‘cd lammps-*/src’)
6) Compile lammps (ie. make mpi)
7) Create a bin folder in your home directory (ie. ‘mkdir ~/bin’)
8) Copy the lammps executable to the bin folder (ie. ‘cp lmp_mpi ~/bin)
9) In your working directory have your lammps input files (ie. ‘in.lammps’)
10) Create a file ‘qlammps’ in your working directory
11) Inside the qlammps enter the following –
Submission script:
#!/bin/sh
# For lammps must have in in.* within the directory
#$ -S /bin/bash
#$ -q <queue_name> (eg. all2.q, all.q, eight.q or single.q)
#$ -pe mpi <no._of_processors> (ie 8, 24 etc)
#$ -cwd
#$ -e err.lammps
#$ -N <job_name>
export LAMMPS=/home/<username>/bin/lmp_mpi
export MPIRUN=/opt/intel/openmpi-1.4.4/bin/mpirun
$MPIRUN -machinefile $TMPDIR/machines -np $NSLOTS $LAMMPS < in.*
12) To run simply enter ‘qsub qlammps’
13) To check if your job is running use ‘qstat -u <username>’