User Tools

Site Tools


hpc:lemaitre2

The Lemaitre2 cluster

Specifications

This cluster is dedicated to massively parallel jobs (several dozens of cores) with many communications and/or a lot of parallel disk IO.

  • 1344 Intel Xeon cores (112×12)
  • Memory: 48 GB/12 cores
  • Time limit: 5 days but soon 24h.
  • Infiniband

Configuration .ac file for ifort 13

Here is an example of a configuration .ac file:

First issue in the prompt:

 module purge
 export PATH=/usr/local/intel/ics_2013.0.028/composer_xe_2013.1.117/bin/intel64:$PATH
 export LD_LIBRARY_PATH=/usr/local/intel/ics_2013.0.028/composer_xe_2013.1.117/compiler/lib/intel64:$LD_LIBRARY_PATH
 export PATH=/usr/local/openmpi/1.6.4/intel-13.0.1.117/bin:$PATH
 export LD_LIBRARY_PATH=/usr/local/openmpi/1.6.4/intel-13.0.1.117/lib64:$LD_LIBRARY_PATH
 source /usr/local/intel/ics_2013.0.028/composer_xe_2013.1.117/mkl/bin/mklvars.sh intel64
enable_mpi="yes"
enable_mpi_io="yes"
with_mpi_prefix='/usr/local/openmpi/1.6.4/intel-13.0.1.117'
enable_64bit_flags="yes"

AR=ar
FC="mpif90"
CC="mpicc"
CXX="mpicxx"

enable_clib="yes"
with_fft_flavor="fftw3"
with_fft_libs="-L/usr/local/intel/ics_2013.0.028/composer_xe_2013.1.117/mkl/lib/intel64  -Wl,--start-group -lmkl_intel_lp64 -lmkl_sequential -lmkl_core -Wl,--end-group -lpthread -lm"

with_linalg_flavor="mkl"
with_linalg_libs="-L/usr/local/intel/ics_2013.0.028/composer_xe_2013.1.117/mkl/lib -Wl,--start-group -lmkl_intel_lp64 -lmkl_sequential -lmkl_core -Wl,--end-group -lpthread -lm"

with_dft_flavor="libxc"

enable_gw_dpc="yes"

Configuration .ac file for clusterstudio

First issue in the prompt:

module purge
module load intel/clusterstudio/impi/4.1.0p-024
module load intel/clusterstudio/compiler/13.0.1.117
source /usr/local/intel/ics_2013.0.028/impi/4.1.0.024/bin64/mpivars.sh intel64

with the associated .ac file:

enable_mpi="yes"
enable_mpi_io="yes"
with_mpi_incs='-I/usr/local/intel/ics_2014.0.028/impi/4.1.0.024/include64/'
with_mpi_libs='-L/usr/local/intel/ics_2013.0.028/impi/4.1.0.024/lib64 -lmpi'
enable_64bit_flags="yes"

AR=ar
FC="mpiifort"
CC="mpiicc"
CXX="mpicxx"

enable_clib="yes"
with_fft_flavor="fftw3"
with_fft_libs="-L/usr/local/intel/ics_2013.0.028/composer_xe_2013.1.117/mkl/lib/intel64  -Wl,--start-group -lmkl_intel_lp64 -lmkl_sequential -lmkl_core -Wl,--end-group -lpthread -lm"

with_linalg_flavor="mkl"
with_linalg_libs="-L/usr/local/intel/ics_2013.0.028/composer_xe_2013.1.117/mkl/lib -Wl,--start-group -lmkl_intel_lp64 -lmkl_sequential -lmkl_core -Wl,--end-group -lpthread -lm"

with_dft_flavor="libxc"

enable_gw_dpc="yes

Submission scripts

Lemaitre2 use a Slurm submission script system.

Here is an example submission script:

#!/bin/bash
#SBATCH --job-name=your_job_name
#SBATCH --mail-user=your_e_mail@blabla.com
#SBATCH --mail-type=ALL
#SBATCH --time=90:00:00
#SBATCH --ntasks=48
###SBATCH --ntasks-per-node=16
#SBATCH --cpus-per-task=1
####SBATCH --partition=High
#SBATCH --mem-per-cpu=4000

module purge
export PATH=/usr/local/intel/ics_2013.0.028/composer_xe_2013.1.117/bin/intel64:$PATH
export LD_LIBRARY_PATH=/usr/local/intel/ics_2013.0.028/composer_xe_2013.1.117/compiler/lib/intel64:$LD_LIBRARY_PATH
export PATH=/usr/local/openmpi/1.6.4/intel-13.0.1.117/bin:$PATH
export LD_LIBRARY_PATH=/usr/local/openmpi/1.6.4/intel-13.0.1.117/lib64:$LD_LIBRARY_PATH
source /usr/local/intel/ics_2013.0.028/composer_xe_2013.1.117/mkl/bin/mklvars.sh intel64

#export OMP_NUM_THREADS=1
#unset SLURM_CPUS_PER_TASK

MPIRUN="mpirun"
MPIOPT="-n 2"
ABINIT="/home/ucl/naps/sponce/NAPS/abinit-6.12.3-public/build2/src/98_main/abinit"

${MPIRUN} ${MPIOPT} ${ABINIT} < input.files >& log
echo "--" 
hpc/lemaitre2.txt · Last modified: 2013/07/02 16:39 by 127.0.0.1