#!/bin/bash
#
#SBATCH --job-name=treecorr
#SBATCH --output=treecorr.o
#SBATCH --error=treecorr.e
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=1
#SBATCH --mem=3G
#SBATCH --time=15:00:00
##SBATCH   --exclude=worker001,worker007,worker004 
#SBATCH   --constraint=datadisk 
#SBATCH --array=1-10

# ----------------------------------------------------------------
#File Name:   submit_MOCKS_script.sh
#
# Description: SLURM submission script
#              called by launch_MOCKS_script.sh
#==============================================================

source /usr/local/Modules/init/bash

module load intel
module load anaconda/3.9 
cd $SLURM_SUBMIT_DIR
pwd

#Set Variables
dir_in=$1 
dir_in2=$2 
GalCat=$3 
GalCat2=$4 
my_config=$5
model=cosmoSLICS
#==============================================================

#Set-up where all the log files are sent
USER="jharno"
LOGDIR="/home/"$USER"/cuillin_logs/Peaks/"
mkdir -p $LOGDIR

#Record in the main log where this mock was run
#WORKERID=$hostname
#echo ' RUNNING MOCK scripts on worker' $WORKERID

#Also record which mock is running where and the start time
#STARTTIME=$date
#echo "$NBINS $WORKERID $STARTTIME" > $LOGDIR/MOCK_${LOS}_worker_ID

#==============================================================

#Set-up relevant paths
#code=/home/$USER/Galaxy_Bias/python_scripts/    #Path to the python code

# All the data will be stored locally on the worker here
DD="/data/jharno/KiDS/"$model'/'
mkdir -p $DD

OUTPUT_DIR='/home/jharno/Projects/Codes/TreeCorr/'
mkdir -p $OUTPUT_DIR

#Path to input Catalogues:
echo 'Running treecorr on ' $GalCat '  with config ' $my_config ':'
cat $my_config
#==============================================================

#And then copy the data from the RAID to the local worker
#Use lockfiles so we don't have an I/O jam when lots of workers
#are working simultaneously



#==========
#Start checking for the I/O lockfile before transferring the data
#This command staggers the time when different jobs check for the existence
#of the all important lockfile

ran=`echo  $[RANDOM%300]| awk '{print $1/100}'`
sleep $ran

# Look for a lockfile and wait until one doesn't exist
WORKERID=`hostname`
LOCKFILEDIR=/home/$USER/IO_lockfile
locked=1
while [ $locked = 1 ]
do
    sleep 1
    locked=0
    if mkdir $LOCKFILEDIR ; then
	echo "Locking succeeded" > $LOCKFILEDIR/locked_$WORKERID_$FIELD
    else
	locked=1
    fi
done
# Unlocked, now time to transfer the data
#============

#Only copy over the data that we need
#Sync peaks
rsync -a -W --no-compress $dir_in$GalCat $DD
rsync -a -W --no-compress $dir_in2$GalCat2 $DD
#rsync -a -W --no-compress $halo_path/0.*$halo_file $DD/.
#rsync -a -W --no-compress $halo_path/0.221$halo_file $DD/.

echo "Done rsync"
ls $DD

# Once copying is completed delete the lockfile so another
# process can begin

rm -rf $LOCKFILEDIR
echo "removed lockfile"

#===============================================================
# All the data is now in place on the worker, so time to use it!

#rm KiDSLenS
#rm GalDir

#ln -s $DD ./HaloDir
#ln -s $DD ./KiDSLenS
#ln -s $DD ./GalDir

echo "2PCF:"

# Run Treecorr:

# Then

#cd $dir_in
cd $DD
corr2 $SLURM_SUBMIT_DIR'/'$my_config'_it'${SLURM_ARRAY_TASK_ID} ; wait ; rsync -rutv wtheta*  $OUTPUT_DIR
#corr2 $SLURM_SUBMIT_DIR'/'$my_config ; wait ; rsync -rutv wtheta*  $OUTPUT_DIR
#corr2 $SLURM_SUBMIT_DIR'/'$my_config ; wait ; rsync -rutv xipm*  $OUTPUT_DIR

# Then erase the config file
#rm config_SLICS_xipm_peaks_DES_sbatch$SLURM_JOBID.yaml
#rm config_SLICS_xipm_peaks_DES_sbatch$SLURM_JOBID'_1.yaml'
#rm config_SLICS_xipm_peaks_DES_sbatch$SLURM_JOBID'_2.yaml'

echo "Done"
wait

rm -v $SLURM_SUBMIT_DIR'/'$my_config

#Remove data on the worker
rm -r -f $DD$GalCat
rm -r -f $DD$GalCat2











