#*  **************************************************************************
#*
#*  CDTK, Chemical Dynamics Toolkit
#*  A modular system for chemical dynamics applications and more
#*
#*  Copyright (C) 2011, 2012, 2013, 2014, 2015, 2016
#*  Oriol Vendrell, DESY, <oriol.vendrell@desy.de>
#*
#*  Copyright (C) 2017, 2018, 2019
#*  Ralph Welsch, DESY, <ralph.welsch@desy.de>
#*
#*  Copyright (C) 2020, 2021, 2022, 2023
#*  Ludger Inhester, DESY, ludger.inhester@cfel.de
#*
#*  This file is part of CDTK.
#*
#*  CDTK is free software: you can redistribute it and/or modify
#*  it under the terms of the GNU General Public License as published by
#*  the Free Software Foundation, either version 3 of the License, or
#*  (at your option) any later version.
#*
#*  This program is distributed in the hope that it will be useful,
#*  but WITHOUT ANY WARRANTY; without even the implied warranty of
#*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#*  GNU General Public License for more details.
#*
#*  You should have received a copy of the GNU General Public License
#*  along with this program.  If not, see <http://www.gnu.org/licenses/>.
#*
#*  **************************************************************************
import datetime
import uuid
import math
import random
import numpy as np
import CDTK.Dynamics.SimulationBox as sbo
import CDTK.Dynamics.Tools as ntool
import CDTK.Tools.Mathematics as mth
import CDTK.Tools.Conversion as con
import CDTK.Tools.Geometry as geo
import CDTK.Tools.NormalModeAnalysis as nmo
import scipy.fftpack as fft
KBAU = con.FPC_K_B_EV * con.ev2au # Boltzmann constant in au.
TINY = 1.0e-6
[docs]
class IniConSampler(object):
    """
    Samples initial conditions according to some statistical distribution
    The object produces a bunch of files, each one containing a simulationbox object
    ready for propagation.
    """
    def __init__(self,x0,funcE,**opts):
        """
        Start sampler
        Input arguments
            x0 -- 3N array with initial geometry
            funcE -- function returning potential energy
        Options
            T -- temperature (Kelvin)
                default: 300
            M -- array, mass of every coordinate
                default: array of ones
            DIST -- string, statistical distribution for sampling positions
                default: boltzmann
                options: boltzmann, wigner
            REC -- bool, whether to collect samples, set to False for a dry run
                default: True
            NSAMPLES -- int, number of samples to collect
                default: 10
            RefEnergy -- reference energy to subtract from funcE during sampling
                Important when the energy function returns energies of very large
                absolute values. In such a case the exponential of the energy may
                diverge.
                default: 0.0
            internals -- bool, whether internal coordinates are being used, basically
                to avoid assumptions on having a multiple of 3 coordinates and similar things
                default: False
            ndim -- integer, spatial dimension
        """
        self.X0 = x0.copy()
        self.funcE = funcE
        self.P0 = opts.get('P0',np.zeros(x0.size,float)) # Initial momenta
        self.QP0 = np.concatenate((self.X0,self.P0))
        self.TEMP = opts.get('T',300.0)  # Temperature for Boltzmann and Wigner sampling (Kelvin)
        self.M = opts.get('M',np.ones(x0.size,float)) # Masses (3N)
        self.DIST = opts.get('DIST','boltzmann')  # Distribution to be sampled
        self.REC = opts.get('REC',True)  # Set to False if run without recording samples
        self.NSAMPLES = opts.get('NSAMPLES',10)  # Number of samples to collect
        self.RefEnergy = opts.get('RefEnergy',0.0) # reference energy to subtract from funcE during sampling
        self.internals = opts.get('internals',False) # set true if internal coordinates
                                                      # are being used vs Cartesians
        self.ndim = opts.get('ndim',3) # spatial dimensions
        self._optimalStepNorm = opts.get('stepnorm',[1.0, 1.0])
        self.modes = opts.get('modes', range(len(self.X0)))
        self.qnum = opts.get('qnum',[]) # quantum number of harmonic modes
        self.HMode = opts.get('HMode',[]) # harmonic modes [energy, mass, normal coordinates]
        self.RefGeom = opts.get('RefGeom',[]) # reference geometry for Wigner sampling
        self.has_plusrotation = opts.get('plus_rotation',True) # additional rotation to quantum initial condition
        self.has_purevibration = opts.get('pure_vibration',False) # pure quantum vibration
        self._log = {} # information log of the sampling
        self._log['ncol'] = 0 # N collected samples
        self._log['ntot'] = 0 # N total calls during record
        self._log['NABS'] = 0 # N absolute total calls
        self._log['ratio'] = 0.0 # acceptance ratio ncol/ntot
        self._log['lastQP'] = self.QP0 # last accepted phase space point
        self._log['lastP'] = 0.0 # probability of last accepted geometry
        self._log['energy'] = [] # energy of the nth sample
        self._log['dist'] = [] # one distance for testing purpose
        self._ID = uuid.uuid4().hex # unique ID
        self._CTIME = datetime.datetime.today() # creation date/time
        self._BNAME = 'ic_%4i%02i%02i' % (self._CTIME.year,self._CTIME.month,self._CTIME.day)
[docs]
    def sample(self,**opts):
        """
        Sample initial conditions and produce a list of files ready for use
        Options
            stepnorm -- norm of the MonteCarlo steps to be taken.
            NSAMPLES -- int, number of samples to collect
            savefiles -- bool, True, save new simulationbox objects to disk
        """
        if self.DIST == 'wigner' or self.DIST == 'wigner0K':
            raise ValueError("sample is Boltzmann distribution. use sampleW for Wigner distribution.") 
        # Do some initialization
        stepnorm = opts.get('stepnorm',self._optimalStepNorm) # optimized in the burn-in phase
        self.NSAMPLES = opts.get('NSAMPLES',self.NSAMPLES)  # Number of samples to collect
        self.savefiles = opts.get('savefiles',True)
        ncoords = self.X0.size
        sboxes = []
        # Initialize sampler
        pfunc = self._getProbFunc()
        qp = self._log['lastQP']
        prob = pfunc(self._log['lastQP'])
        # Start sampling
        ncol = self._log['ncol']
        ntot = self._log['ntot']
        nabs = self._log['NABS']
        ratio = self._log['ratio']
        #
        while ncol < self.NSAMPLES:
            nabs += 1
            if self.REC: ntot += 1
            accepted,qp,prob = mth.metropolisStep(qp,prob,pfunc,stepnorm=stepnorm)
            if accepted:
                ncol += 1
                ratio = float(ncol)/float(ntot)
                self._log['lastQP'] = qp
                self._log['lastP'] = prob
                if self.REC:
                    sb = sbo.SimulationBox(ndim=self.ndim)
                    sb.X = qp[0:ncoords].copy()
                    sb.V = qp[ncoords:]/self.M
                    sb.M = self.M
                    sb.internals = self.internals
                    sb.removeCenterMassMotion()
                    sboxes.append(sb)
                    if self.savefiles:
                        sb.save()
        self._log['ncol'] = ncol
        self._log['ntot'] = ntot
        self._log['NABS'] = nabs
        self._log['ratio'] = ratio
        return sboxes 
[docs]
    def burnin(self,**opts):
        """
        Run the sampler without collecting data and adjust acceptance rate to target
        Options
            steps -- number of steps to be taken in this burin phase (1000)
                default: 1000
            increment -- number of steps used to compute rate (50)
                default: 50
            targetRate -- target acceptance rate (0.4)
                default: 0.4
            startnorm -- initial norm of MonteCarlo steps
                default: 0.1
        """
#        if self.DIST == 'wigner' or self.DIST == 'wigner0K':
#            raise ValueError("burnin is for Boltzmann distribution. use burninW for Wigner distribution.")
        if self.DIST == 'wigner' or self.DIST == 'wigner0K':
            hessian = opts.get('hessian', None)
            is_linear = opts.get('is_linear', None)
            modes = opts.get('modes', None)
            self.init_samplewigner(hessian, self.M, is_linear, modes)            
        steps = opts.get('steps',1000) # steps to be taken in this burin phase
        increment = opts.get('increment',50) # number of steps used to compute rate
        targetRate = opts.get('targetRate',0.4) # target acceptance rate
        startnorm = opts.get('startnorm',self._optimalStepNorm) # initial norm; on return the optimal one is got
        pfunc = self._getProbFunc()
        qp = self._log['lastQP']
        if self.DIST == 'wigner' or self.DIST == 'wigner0K':
            qp = np.zeros(2*len(self.w_normalModes))
        prob = pfunc(qp)
        ncol = 0 # collected during whole burnin
        ntot = 0 # counter for total steps
        ninccol = 0 # collected during last increment
        ninctot = 0 # counter for steps during increment
        stepnorm = startnorm
        ratio = 0.0
        #
        while ntot < steps:
            ntot += 1
            ninctot += 1
            accepted,qp,prob = mth.metropolisStep(qp,prob,pfunc,stepnorm=stepnorm)
	    
            if accepted:
                ncol += 1
                ninccol += 1
                ratio = float(ncol)/float(ntot)
                self._log['lastQP'] = qp
                self._log['lastP'] = prob
            if ninctot == increment:
                rate = float(ninccol)/float(ninctot) + TINY
                if rate/targetRate > 1.25: 
                    k = np.random.randint(0, 2)
                    #stepnorm[k] = stepnorm[k]*1.2
                    stepnorm = stepnorm*1.2
                if targetRate/rate > 1.25: 
                    k = np.random.randint(0, 2)
                    #stepnorm[k] = stepnorm[k]/1.2
                    stepnorm = stepnorm/1.2
                ninctot = 0
                ninccol = 0
        self._optimalStepNorm = stepnorm
        return rate, ratio 
[docs]
    def sampleW(self,hessian, is_linear, modes, **opts):
        """
        Sample Wigner initial conditions and produce a list of files ready for use
        Options
            stepnorm -- norm of the MonteCarlo steps to be taken.
            NSAMPLES -- int, number of samples to collect
            savefiles -- bool, True, save new simulationbox objects to disk
        """
        X = []
        V = []
        if self.DIST == 'wigner' or self.DIST == 'wigner0K' or self.DIST == 'wignerHM':
            self.init_samplewigner(hessian, self.M, is_linear, modes)
        # Do some initialization
        stepnorm = opts.get('stepnorm',self._optimalStepNorm) # optimized in the burn-in phase
        Nr = opts.get('NumRotationPresample',30) # Number of rotational pre-samples
        self.NSAMPLES = opts.get('NSAMPLES',self.NSAMPLES)  # Number of samples to collect
        self.savefiles = opts.get('savefiles',True)
        ncoords = self.RefGeom.size
        sboxes = []
        # Initialize sampler
        qp = self._log['lastQP']
        # Start sampling
        ncol = self._log['ncol']
        ntot = self._log['ntot']
        nabs = self._log['NABS']
        ratio = self._log['ratio']
        #
        M_nmode    = self.M_normalModes
        w_nmode    = self.w_normalModes
        qnum_nmode = self.qnum
        print(ncol, self.NSAMPLES)
        print("M_nmode", M_nmode)
        print("w_normalModes", w_nmode)
        print(qnum_nmode)
        print(self.REC)
        while ncol < self.NSAMPLES:
            nabs += 1
            if self.REC: ntot += 1
            qp_nmode = mth.WignerQuantumStep(M_nmode,w_nmode,qnum_nmode)
#            print nabs
#            print qp_nmode
#            print "+++++"
            # transform backwards to cartesian coordinates
            # harmonic motion initial conditions
            qp_hm = self.nm2x_coord(qp_nmode) # reference geometry for rotational initial condition func
#            print nabs
#            print qp_hm
#            print "+++++"
            # rotational initial conditions
            m = self.M.copy()
            natoms = ncoords / self.ndim 
            m.shape = (natoms, self.ndim)
            masses = m[:,0]
            x0_hm = qp_hm[0:ncoords]
            x0_hm.shape = (natoms, self.ndim)
            # 
            # Get moments of inertia and principal axes
            xref = np.asarray(x0_hm)
            mass = np.asarray(masses)
            xyz = geo.XYZGeometry()
            xyz.atomPositions = xref
            xyz.masses = mass
            xyz.centerGeometry()
            im,iv = xyz.momentsOfInertia()
            ix = iv[:,0]
            iy = iv[:,1]
            iz = iv[:,2]
            temp = self.TEMP
            # Define the rotational probability function
            def _probabilityFunctionRotation(w):
                """
                w -- angular velocities in au
                """
                tmp = 0.5 * im * w**2
                erot = tmp.sum()                       # angular velocity
                return math.exp( -erot/KBAU/temp )     # boltzmann factor
            # Burn-in to find the step norm giving the desired acceptance ratio (~0.4 usually OK)
            w0 = np.array([0,0,0],float)
            nburn = 6000
            guessStepNorm = 0.0001
            targetRatio = 0.4
            incr = 200
            w,stepNorm,logB = _burner(w0,
                                     _probabilityFunctionRotation,
                                     nburn,
                                     guessStepNorm,
                                     targetRatio,
                                     incr)
            #
            xl, vl, logs = sampleRotation(x0_hm, masses, self.TEMP, Nr, burnerForWigner=True, angular_velocity=w, stepNorm=stepNorm)
            idx = int((Nr - 1) * random.random())
            xr = np.array(xl[idx])
            vr = np.array(vl[idx])
            xr = np.reshape(xr, (natoms*self.ndim))
            vr = np.reshape(vr, (natoms*self.ndim))
            # complete initial conditions
            if self.has_plusrotation: 
                qp = np.zeros(2*ncoords, dtype=float)
                for i in range(ncoords):
                    qp[i]         = xr[i]
                    qp[ncoords+i] = qp_hm[ncoords+i] + self.M[i] * vr[i]
            elif self.has_purevibration:
                qp = qp_hm.copy() 
            else:
                qp = np.zeros(2*ncoords, dtype=float)
                for i in range(ncoords):
                    qp[i]         = xr[i]
                    qp[ncoords+i] = qp_hm[ncoords+i]
            #    
            self._log['lastQP'] = qp
            if self.REC:
                ncol += 1
                ratio = float(ncol)/float(ntot)
                sb = sbo.SimulationBox(ndim=self.ndim)
                sb.X = qp[0:ncoords].copy()
                sb.V = qp[ncoords:]/self.M
                sb.M = self.M
                sb.internals = self.internals
#                sb.removeCenterMassMotion()
                X.append(sb.X)
                V.append(sb.V)
#                sboxes.append(sb)
#                if self.savefiles:
#                    sb.save()
        self._log['ncol'] = ncol
        self._log['ntot'] = ntot
        self._log['NABS'] = nabs
        self._log['ratio'] = ratio
#        return sboxes
        return X, V 
[docs]
    def sampleWigner(self,hessian,nmfreq,nmcoord,is_linear,m,is_1d=False,**opts):
        """
        Sample Wigner initial conditions and produce a list of files ready for use.
        WARNING: THIS ONLY WORKS FOR 1D AND NEEDS TO BE UPDATED TO WORK FOR GENERAL CASES!!!
        Options.
            NSAMPLES -- int, number of samples to collect
        """
        
        X = []
        V = []
        if hessian != None:
            self.init_samplewigner(hessian,m,is_linear)
        else :
            if is_linear:
                Ne = 5
            else:
                Ne = 6
            self.M_normalModes = np.asarray(m[Ne:])
            self.w_normalModes = np.asarray(nmfreq[Ne:])
            if is_1d:
                Ne = 0
                self.M_normalModes = np.asarray(m[:1])
                self.w_normalModes = np.asarray(nmfreq[:1])
            nm = np.asarray(nmcoord)
            Lx = np.dot(nm, np.diag(1.0/np.sqrt(np.asarray(m))))
            for i,mode in enumerate(Lx):
                nfactor = 1.0/np.linalg.norm(mode)
                Lx[i] = mode*nfactor
            self.x_normalModes = np.asarray(Lx[Ne:])
            if is_1d:
                self.x_normalModes = np.asarray(Lx[:1])
        # Do some initialization
        self.NSAMPLES = opts.get('NSAMPLES',self.NSAMPLES)  # Number of samples to collect
        Pm_ini  = np.zeros(6,dtype=float)
        x = np.array(self.RefGeom)
        Pm = Pm_ini
        ncoords = self.RefGeom.size
        M_nmode    = self.M_normalModes
        w_nmode    = self.w_normalModes
        qnum_nmode = self.qnum
        E = []
        i = 0
        while i < self.NSAMPLES:
### WARNING: THIS ONLY WORKS FOR 1D AND NEEDS TO BE UPDATED TO WORK FOR GENERAL CASES!!!
            P = np.random.uniform(-3.0,3.0,1)
            Q = np.random.uniform(-3.0,3.0,1)
            W = np.exp(-(P**2)) * np.exp(-(Q**2))
            r = np.random.uniform(0.0,1.0,1)
            if W>r:
                QQ = Q /  np.sqrt( w_nmode[0] * M_nmode[0] )
                VV = P * np.sqrt( w_nmode[0] / M_nmode[0])
#                E.append(0.5 * PP**2 / M_nmode[0] + 0.5 * M_nmode[0] * w_nmode**2 * QQ**2)
#                print 0.5 * PP**2 / M_nmode[0]
#                print 0.5 * M_nmode[0] * w_nmode**2 * QQ**2
#                exit(-1)
#                x += np.dot(self.x_normalModes.transpose(), QQ)
#                Pm += np.dot(self.x_normalModes.transpose(), PP)
                x = np.asarray(self.RefGeom) + np.dot(self.x_normalModes.transpose(), QQ)
                v = np.dot(self.x_normalModes.transpose(), VV)
#                x  = np.asarray(self.RefGeom) + ( Q * self.x_normalModes ) / np.sqrt( 2.0  * M_nmode[0] * (2.0 * np.pi * w_nmode[0])**2 )
#                Pm = Pm_ini + (P * self.x_normalModes * np.sqrt( M_nmode[0] * (2.0 * np.pi * w_nmode[0])**2 ) ) / np.sqrt(2)
                X.append( x )
                V.append( v )
#                print (np.linalg.norm(X[-1][0:3] - X[-1][3:6]) - np.linalg.norm(self.RefGeom[0:3] - self.RefGeom[3:6]))
#                print V[-1]
#                if i > 10:
#                    exit(-1)
#                print 0.5 * M_nmode[0] * np.dot(V[-1], V[-1])
#                print 0.5 * (M_nmode[0]/2.0) * w_nmode[0]**2 * (np.linalg.norm(X[-1][0:3] - X[-1][3:6]) - np.linalg.norm(self.RefGeom[0:3] - self.RefGeom[3:6]))**2
#                print np.linalg.norm(X[-1][0:3] - X[-1][3:6]),  np.linalg.norm(self.RefGeom[0:3] - self.RefGeom[3:6])
#                E.append( 0.5 * M_nmode[0] * np.dot(V[-1], V[-1]) + 0.5 * (M_nmode[0]/2.0) * w_nmode[0]**2 * (np.linalg.norm(X[-1][0:3] - X[-1][3:6]) - np.linalg.norm(self.RefGeom[0:3] - self.RefGeom[3:6]))**2)
#                E.append(0.5 * (M_nmode[0]/2.0) * w_nmode[0]**2 * (np.linalg.norm(X[-1][0:3] - X[-1][3:6]) - np.linalg.norm(self.RefGeom[0:3] - self.RefGeom[3:6]))**2)
#                print E
                i += 1
 #       E = np.array(E)
 #       print np.mean(E)
 #       print np.std(E)
 #       exit(-1)
        return X, V 
[docs]
    def nm2x_coord(self,qp_nmode):
        """
        Transform from normal mode coordinates to cartesian coordinates
        """
        x_nmode = self.x_normalModes # harmonic coordinates
        w_nmode = self.w_normalModes # harmonic frequency
        M_nmode = self.M_normalModes # mass of harmonic modes
        #
        x0 = self.RefGeom # reference geometry
        ncoords = len(x0)
        nharmmodes = len(w_nmode)
#        print "x0=", x0
        x  = x0.copy()    # cartesian geometry
        p  = np.zeros(ncoords, dtype=float) # cartesian momentum
        for i in range(nharmmodes):
            M = M_nmode[i] * con.au2am
            x += qp_nmode[i] * x_nmode[i]            #/ np.sqrt(M)  
#            print i, M, x, qp_nmode[i], x_nmode[i]
            p += qp_nmode[nharmmodes+i] * x_nmode[i] #* np.sqrt(M) 
        qp = np.zeros(2*ncoords, dtype=float)
        for i in range(ncoords):
            qp[i]         = x[i]
            qp[ncoords+i] = p[i]
        return qp 
    def _getProbFunc(self,**opts):
        """
        Return the probability function to be used
        in return f(x). Uses a closure.
        """
        if self.DIST == 'boltzmann':
            def pfunc(x):
                ## Remove center of mass motions; missing rotation in positions
                self._removal(x)
                xx = x[0:len(x)//2]
                pp = x[len(x)//2:]
                epot = self.funcE(xx)
                ekin = 0.5*np.dot(pp,pp/self.M)
                h = ekin + epot
                val = -(h-self.RefEnergy)/KBAU/self.TEMP
                return np.exp(val)
            return pfunc
        elif self.DIST == 'wigner0K':
            m = self.M_normalModes
            w = self.w_normalModes
            def pfunc(x):
                xx = x[0:len(x)//2]
                pp = x[len(x)//2:]
                return mth.wignerHarmonicND(xx,pp,m,w,hbar=1.0)
            return pfunc
        elif self.DIST == 'wigner':
            if self.TEMP == 0:
                raise ValueError("Use [wigner0K] for 0 Kelvin Wigner distribution.")
            m = self.M_normalModes
            w = self.w_normalModes
            def pfunc(x):
                xx = x[0:len(x)//2]
                pp = x[len(x)//2:]
                return mth.wignerHarmonicFTND(xx,pp,m,w,1.0/(self.TEMP*KBAU),hbar=1.0)
            return pfunc
        else:
            raise ValueError('Unknown or undefined probability distribution')
    def _removal(self, x):
        """
        Remove center of mass motion and rotation.
        """
        n = len(x)/6
        if n < 1:
            return 
        ## COM removal
        xx = x[0:len(x)//2]
        com = np.sum(xx.reshape(n,3) * self.M.reshape(n,3), axis=0) / np.sum(self.M.reshape(n,3), axis=0)
        xx -= np.tile(com, n)
        ## Overall rotation removal
        pp = x[len(x)//2:]
        pcom = np.sum(pp.reshape(n,3) * self.M.reshape(n,3), axis=0) / np.sum(self.M.reshape(n,3), axis=0)
        pp -= np.tile(pcom, n)
        vv = pp / self.M
        # L is overall angular momentum
        L = np.zeros(3)
        # Theta is inertia tensoer
        Theta = np.zeros((3,3))
        for i in range(n):
            # calculate particle position relative to center of mass
            r_com = xx[i*3:(i+1)*3] - com
            # calculate overall angular momentum
            L += (self.M[i*3] * np.cross(r_com, vv[i*3:(i+1)*3]))
            # calculate inertia tensor
            Theta += self.M[i*3] * (np.dot(r_com, r_com) * np.identity(3) - np.outer(r_com, r_com))
            # regularize Theta!
            Theta += 1e-10 * np.identity(3)   
            for i in range(n):
                r_com = xx[i*3:(i+1)*3] - com
                # v_p = v_p - Th^-1 * L (X) r_p
                try:
                    vv[i*3:(i+1)*3] -= np.cross(np.dot(np.linalg.inv(Theta), L), r_com)                        
                except np.linalg.linalg.LinAlgError:
                    print("TODO RW Some error here")
            x[0:len(x)//2] = xx
            x[len(x)//2:] = self.M * vv
[docs]
    def init_wigner(self):
        """
        Return harmonic modes
        """
        energy_nmode = []
        mass_nmode   = []
        coor_nmode   = []
        for imode in range(len(self.HMode)):
            energy_nmode.append(self.HMode[imode][0])
            mass_nmode.append(self.HMode[imode][1])
            coor_nmode.append(self.HMode[imode][2])
        #
        self.w_normalModes = np.array(energy_nmode, dtype=float)
        self.M_normalModes = np.array(mass_nmode,   dtype=float)
        self.x_normalModes = np.array(coor_nmode,   dtype=float) 
[docs]
    def init_samplewigner(self,hessian,m,is_linear,modes=None):
        """
        Return harmonic modes
        """
        if modes != None:
            modlist = modes
        elif len(m) < 6:
            modlist = range(len(m))
        elif is_linear:
            modlist = range(5,len(m))
        else:
            modlist = range(6,len(m))
        w,L = nmo.normal_modes_from_HessianCart(hessian,m)
        Lx,mu = nmo.q_mw_to_x(m,L)
        self.w_normalModes = np.array(w[modlist], dtype=float)
        self.M_normalModes = np.array(mu[modlist],   dtype=float)
        self.x_normalModes = np.array(Lx[modlist],   dtype=float) 
[docs]
    def sampleR(self,**opts):
        """
        Sample pure rotational initial conditions and produce a list of files ready for use
        Options
            stepnorm -- norm of the MonteCarlo steps to be taken.
            NSAMPLES -- int, number of samples to collect
            savefiles -- bool, True, save new simulationbox objects to disk
        """
        # Do some initialization
        stepnorm = opts.get('stepnorm',self._optimalStepNorm) # optimized in the burn-in phase
        self.NSAMPLES = opts.get('NSAMPLES',self.NSAMPLES)  # Number of samples to collect
        self.savefiles = opts.get('savefiles',True)
        # Start sampling
        ncol = self._log['ncol']
        ntot = self._log['ntot']
        nabs = self._log['NABS']
        #
        ncoords = self.X0.size
        sboxes = []
        # Pure rotational initial conditions
        m = self.M.copy()
        natoms = ncoords / self.ndim 
        m.shape = (natoms, self.ndim)
        masses = m[:,0]
        Nr = self.NSAMPLES 
        x0 = self.X0.copy()
        x0.shape = (natoms, self.ndim)
        # Burn-in to find the step norm giving the desired acceptance ratio (~0.4 usually OK)
        w0 = np.array([0,0,0],float)
        w,stepNorm,logB = _burner(w0,
                                 _probabilityFunctionRotation,
                                 nburn,
                                 guessStepNorm,
                                 targetRatio,
                                 incr)
        xl, vl, logs = sampleRotation(x0, masses, self.TEMP, Nr, nburn=10000, guessStepNorm=0.0001)
        #
        while ncol < self.NSAMPLES:
            xr = np.array(xl[ncol])
            vr = np.array(vl[ncol])
            xr = np.reshape(xr, (natoms*self.ndim))
            vr = np.reshape(vr, (natoms*self.ndim))
            nabs += 1
            if self.REC: ntot += 1
            # complete initial conditions
            qp = np.zeros(2*ncoords, dtype=float)
            for i in range(ncoords):
                qp[i]         = xr[i]
                qp[ncoords+i] = vr[i] * self.M[i]
            #    
            self._log['lastQP'] = qp
            if self.REC:
                ncol += 1
                ratio = float(ncol)/float(ntot)
                sb = sbo.SimulationBox(ndim=self.ndim)
                sb.X = qp[0:ncoords].copy()
                sb.V = qp[ncoords:]/self.M
                sb.M = self.M
                sb.internals = self.internals
                sb.removeCenterMassMotion()
                sboxes.append(sb)
                if self.savefiles:
                    sb.save()
        self._log['ncol'] = ncol
        self._log['ntot'] = ntot
        self._log['NABS'] = nabs
        self._log['ratio'] = ratio
        return sboxes 
[docs]
    def sampler_boltzmann(self,hessian=None,is_linear=None,modes=None,**opts):
        """
        Sample initial conditions according to exp(-betaH)
        and produce a list of files ready for use
        
        Options
        stepnorm -- norm of the MonteCarlo steps to be taken.
        NSAMPLES -- int, number of samples to collect
        savefiles -- bool, True, save new simulationbox objects to disk
        """
        
        X = []
        V = []
    
#        if self.DIST == 'wigner' or self.DIST == 'wigner0K':
#            raise ValueError("sample is Boltzmann distribution. use sampleW for Wigner distribution.") 
        if self.DIST == 'wigner' or self.DIST == 'wigner0K' or self.DIST == 'wignerHM':
            self.init_samplewigner(hessian, self.M, is_linear, modes)
        # Do some initialization
        stepnorm = opts.get('stepnorm',self._optimalStepNorm) # optimized in the burn-in phase
        self.NSAMPLES = opts.get('NSAMPLES',self.NSAMPLES)  # Number of samples to collect
        ncoords = self.X0.size
        # Initialize sampler
        pfunc = self._getProbFunc()
        qp = self._log['lastQP']
        prob = pfunc(self._log['lastQP'])
        # Start sampling
        ncol = self._log['ncol'] 
        ntot = self._log['ntot'] 
        nabs = self._log['NABS'] 
        ratio = self._log['ratio']
        # Iterate until all samples are produced
        while ncol < self.NSAMPLES:
            nabs += 1
            if self.REC: ntot += 1
            if self.DIST == 'boltzmann':
                # perform MC step
                accepted,qp,prob = mth.metropolisStep(qp,prob,pfunc,stepnorm=stepnorm)
                xx = qp[0:len(qp)//2]
                pp = qp[len(qp)//2:]
            elif self.DIST == 'wigner0K':
                m = self.M_normalModes
                w = self.w_normalModes
                nm = self.x_normalModes
                qnum_nmode = self.qnum
                accepted,qp,prob = mth.metropolisStep(qp,prob,pfunc,stepnorm=stepnorm)
                xx = np.array(self.RefGeom) + np.dot(nm.transpose(), qp[0:len(qp)//2])
                pp = np.dot(nm.transpose(), qp[len(qp)//2:])
            elif self.DIST == 'wigner':
                m = self.M_normalModes
                w = self.w_normalModes
                nm = self.x_normalModes
                qnum_nmode = self.qnum
                accepted,qp,prob = mth.metropolisStep(qp,prob,pfunc,stepnorm=stepnorm)
                xx = np.array(self.RefGeom) + np.dot(nm.transpose(), qp[0:len(qp)//2])
                pp = np.dot(nm.transpose(), qp[len(qp)//2:])
            # if successful, then save data
            if accepted:
                ncol += 1
                ratio = float(ncol)/float(ntot)
                self._log['lastQP'] = qp
                self._log['lastP'] = prob
                if self.DIST == 'boltzmann':
                    self._log['energy'].append(-np.log(prob) * KBAU * self.TEMP)
                elif self.DIST == 'wigner0K':
                    e = np.dot(pp, pp) / (2.0 * self.M[0])
                    e += self.funcE(xx)
                    self._log['energy'].append(e) #mth.wignerE(xx,pp,m,w))
                elif self.DIST == 'wigner':
                    e = np.dot(pp, pp) / (2.0 * self.M[0])
                    e += self.funcE(xx)
                    self._log['energy'].append(e) #mth.wignerEFT(xx,pp,m,w))
#                self._log['dist'].append( np.linalg.norm(qp[0:3] - qp[3:6]) )
                X.append(np.array(xx))
                V.append(np.array(pp/self.M))
        # set some logging variables and return positions/velocities
        self._log['ncol'] = ncol
        self._log['ntot'] = ntot
        self._log['NABS'] = nabs
        self._log['ratio'] = ratio
        return X,V 
 
#
[docs]
def sampleRotation(xref,mass,temp,nsample,**opts):
    """
    xref -- reference geometry to sample rotations [[x1,y1,z1,],[x2,y2,z2,],...], a.u.
    mass -- atom masses [m1,m2,...] assume a.u.
    temp -- rotational temperature
    nsample -- number of initial conditions to generate
    """
    nburn = opts.get('nburn',5000)
    targetRatio = opts.get('targetRatio',0.4)
    guessStepNorm = opts.get('guessStepNorm',0.0001)
    incr = opts.get('burnIncrement',200)
    has_burnerForWigner = opts.get('burnerForWigner',False)
    w = opts.get('angular_velocity',[])
    stepNorm = opts.get('stepNorm',0.0001)
    # Get moments of inertia and principal axes
    xref = np.asarray(xref)
    mass = np.asarray(mass)
    xyz = geo.XYZGeometry()
    xyz.atomPositions = xref
    xyz.masses = mass
    xyz.centerGeometry()
    im,iv = xyz.momentsOfInertia()
    ix = iv[:,0]
    iy = iv[:,1]
    iz = iv[:,2]
    # Define the probability function
    def _probabilityFunctionRotation(w):
        """
        w -- angular velocities in au
        """
        tmp = 0.5 * im * w**2
        erot = tmp.sum()                       # angular velocity
        return math.exp( -erot/KBAU/temp )     # boltzmann factor
    def _erot(w):
        tmp = 0.5 * im * w**2
        return tmp.sum()
    logB = []
    if not has_burnerForWigner:
        # Burn-in to find the step norm giving the desired acceptance ratio (~0.4 usually OK)
        w0 = np.array([0,0,0],float)
        w,stepNorm,logB = _burner(w0,
                                 _probabilityFunctionRotation,
                                 nburn,
                                 guessStepNorm,
                                 targetRatio,
                                 incr)
    # Sample rotational velocities around principal axes
    wlist,logS = _sampler(w,
                          _probabilityFunctionRotation,
                          nsample,
                          stepNorm)
    x0 = xyz.atomPositions
    # Rotate ref. geometry such that p. axes are aligned with x,y,z
    xr = np.dot(iv.transpose(),x0.transpose()).transpose()
    # Transform angular velocities around principal axes to Cartesian velocities
    vlist1 = []
    for wvel in wlist:
        v = np.cross(xr,wvel)  # v = r x w
        vlist1.append(v)
    # Generate a random orientations and rotate velocities and ref. geometry
    xlist = []
    vlist = []
    for v1 in vlist1:
        u = 2*np.random.rand(3)-1 # random 3D vector
        phi = np.random.rand()*2*np.pi # random rotation angle
        R = mth.rodriguesMatrix(u,phi)
        x = np.dot(R,xr.transpose()).transpose()
        v = np.dot(R,v1.transpose()).transpose()
        xlist.append(x)
        vlist.append(v)
    logS['rotationalEnergies'] = map(_erot,wlist)
    return xlist,vlist,[logB,logS] 
def _burner(p0,pfunc,nburn,istep,targetRatio,incr,**opts):
    """
    Do burn-in steps to bring the sampling to the correct acceptance ratio
    p0 -- initial set of parameters
    pfunc -- probability function
    nburn -- number of burning steps
    istep -- initial step length in parameter space
    incr -- increments at which ratio is checked and readjusted (~nburn/10)
    Returns:
    p -- final set of parameters
    stepNorm -- final step size
    log -- dictionaty with logging info
    """
    if nburn <= incr: raise ValueError
    log = {}
    log['ratio'] = []
    log['stepnorm'] = []
    prob = pfunc(p0)
    ncol = 0    # collected during whole burnin
    ntot = 0    # counter for total steps
    ninccol = 0 # collected during last increment
    ninctot = 0 # counter for steps during increment
    p = p0
    stepNorm = istep
    while ntot < nburn:
        ntot += 1
        ninctot += 1
        accepted,p,prob = mth.metropolisStep(p,prob,pfunc,stepnorm=stepNorm)
        if accepted:
            ncol += 1
            ninccol += 1
            ratio = float(ncol)/float(ntot)
            log['lastParam'] = p
            log['lastProb'] = prob
        if ninctot == incr:
            ratio = float(ninccol)/float(ninctot) + TINY
            log['ratio'].append(ratio)
            log['stepnorm'].append(stepNorm)
            if ratio/targetRatio > 1.15: stepNorm = stepNorm*1.05
            if targetRatio/ratio > 1.15: stepNorm = stepNorm/1.05
            ninctot = 0
            ninccol = 0
    return p,stepNorm,log
def _sampler(p0,pfunc,nsample,stepNorm):
    """
    Sample the parameter space starting from p0
    The sampler generates steps of length stepNorm in parameter space. An adequate
    stepNorm can be determined in the burn-in phase
    p0 -- initial set of parameters
    pfunc -- probability function
    nsample -- number of samples to collect
    stepNorm -- length of steps in parameter space
    """
    log = {}
    log['ratio'] = []
    # Initialize sampler
    p = p0
    plist = []
    prob = pfunc(p)
    # Start sampling
    ncol = 0
    ntot = 0
    while ncol < nsample:
        ntot += 1
        accepted,p,prob = mth.metropolisStep(p,prob,pfunc,stepnorm=stepNorm)
        if accepted:
            ncol += 1
            ratio = float(ncol)/float(ntot)
            log['ratio'].append(ratio)
            log['lastParam'] = p
            log['lastProb'] = prob
            plist.append(p)
    log['ncol'] = ncol
    log['ntot'] = ntot
    return plist,log
[docs]
def get_omega(x0,m,hessian,
              quantum_numbers=None,
              is_linear=False,modes=None):
    X = []
    V = []
    if modes != None:
        modlist = modes
    elif len(x0) < 6:
        modlist = range(len(m))
    elif is_linear:
        modlist = range(5,len(m))
    else:
        modlist = range(6,len(m))
    N = len(modlist)
    if quantum_numbers:
        qnum = np.asarray(quantum_numbers, dtype=int)
    else:
        qnum = np.zeros(N)
    w,L = nmo.normal_modes_from_HessianCart(hessian,m)
    Lx,mu = nmo.q_mw_to_x(m,L)
    w_int = w[modlist]
    mu_int = mu[modlist]
    Lx_int = Lx[modlist]
    return w_int, mu_int, Lx_int 
[docs]
def sampler_zero_point_energy(nsample,x0,m,hessian,
                              quantum_numbers=None,
                              is_linear=False,modes=None,rpmd=False):
    """
    Sample independent normal modes according to quantum state energies
    nsample -- int,         number of positions and velocities to generate
    x0      -- np array,    Cartesian coordinates of the reference geometry
    m       -- np array,    Mass of each Cartesian coordinate
    hessian -- np array,    Hessian matrix
    quan... -- list,        quantum number of each mode increasing in energy
    is_linear -- bool,      True if linear (e.g. diatomic) system
    """
    X = []
    V = []
    if modes != None:
        modlist = modes
    elif len(x0) < 6:
        modlist = range(len(m))
    elif is_linear:
        modlist = range(5,len(m))
    else:
        modlist = range(6,len(m))
    N = len(modlist)
    # if x0.size < 6:
    #     Ne = 0
    #     N = x0.size - Ne
    # elif is_linear:
    #     Ne = 5
    #     N = x0.size - Ne
    # else:
    #     Ne = 6
    #     N = x0.size - Ne
    if quantum_numbers:
        qnum = np.asarray(quantum_numbers, dtype=int)
    else:
        qnum = np.zeros(N)
    w,L = nmo.normal_modes_from_HessianCart(hessian,m)
    Lx,mu = nmo.q_mw_to_x(m,L)
    w_int = w[modlist]
    print(w_int)
    mu_int = mu[modlist]
    Lx_int = Lx[modlist]
    factr = np.sqrt( 1.0 + 2.0*qnum )
    facQtoX = 1.0/np.sqrt(mu_int * w_int)
    facPtoV = np.sqrt(w_int / mu_int)
    i = 0
    while i < nsample:
        ran = np.random.random(N)
        phi = ran* 2.0*np.pi
        Q = factr*np.sin(phi)
        P = factr*np.cos(phi)
        d = Q*facQtoX
        dv = P*facPtoV
        x = x0 + np.dot( Lx_int.transpose(), d )
        v = np.dot( Lx_int.transpose(), dv )
        X.append(x)
        V.append(v)
        i+=1
    return X,V 
[docs]
def bwd_fft(modes, nb):
    """
    Transform from normal mode representation to bead representation using FFT.
    
    Input: 
    modes   --- current position/velocity values in normal mode representation
    nb      --- number of beads
    
    Output:
    beads   --- positions/velocities in bead representation
    """
    
    nf = float(nb)
    # for the classical case they are the same
    if nb == 1:
        return modes
    # rearrange to fit FFT structure
    modes_re = np.zeros(nb)
    
    modes_re[0] = modes[0]
    modes_re[1] = modes[1]
    modes_re[-1] = modes[nb//2]
    for i in range(1,nb):
        if i%2==0:
            modes_re[i] = modes[-i//2]
    for i in range (1,nb//2):
        modes_re[2*i+1] = modes[i+1]
    # apply RPMD scaling constants
    modes_re *= np.sqrt(nf)
    modes_re[1:-1] /= np.sqrt(2.0)
    # do backward  FFT
    var_bwd = fft.irfft(modes_re)
    return var_bwd 
[docs]
def sampler_zero_point_energy_rpmd(nsample,x0,m,hessian,nb,beta,
                                   quantum_numbers=None,
                                   is_linear=False,modes=None):
    """
    Sample independent normal modes according to quantum state energies
    nsample -- int,         number of positions and velocities to generate
    x0      -- np array,    Cartesian coordinates of the reference geometry
    m       -- np array,    Mass of each Cartesian coordinate
    hessian -- np array,    Hessian matrix
    quan... -- list,        quantum number of each mode increasing in energy
    is_linear -- bool,      True if linear (e.g. diatomic) system
    """
    X_rp = []
    V_rp = []
    for i in range(len(nb)-1):
        if nb[i] != nb[i+1]:
            print("Error: Zero point energy for RPMD only implemented if all DOF have the same bead number")
            exit(-1)
    if modes != None:
        modlist = modes
    elif len(x0) < 6:
        modlist = range(len(m))
    elif is_linear:
        modlist = range(5,len(m))
    else:
        modlist = range(6,len(m))
    N = len(modlist)
    # if x0.size < 6:
    #     Ne = 0
    #     N = x0.size - Ne
    # elif is_linear:
    #     Ne = 5
    #     N = x0.size - Ne
    # else:
    #     Ne = 6
    #     N = x0.size - Ne
    if quantum_numbers:
        qnum = np.asarray(quantum_numbers, dtype=int)
    else:
        qnum = np.zeros(N)
    w,L = nmo.normal_modes_from_HessianCart(hessian,m)
    Lx,mu = nmo.q_mw_to_x(m,L)
    w_int = w[modlist]
    print(w_int)
    mu_int = mu[modlist]
    Lx_int = Lx[modlist]
    factr = np.sqrt( 1.0 + 2.0*qnum )
    facQtoX = 1.0/np.sqrt(mu_int * w_int)
    facPtoV = np.sqrt(w_int / mu_int)
    j = 0
    while j < nsample:
        ran = np.random.random(N)
        phi = ran* 2.0*np.pi
        Q = factr*np.sin(phi)
        P = factr*np.cos(phi)
        d = Q*facQtoX
        dv = P*facPtoV
        # RPMD
        # iterate over every degree of freedom
        R_rp = []
        VV_rp = []
        for i in range(len(Q)):
            c = d[i]
            cv = dv[i]
            m = mu_int[i]
            w_k = np.zeros(nb[i])
            nf = float(nb[i])
#            print m, nf, beta, type(m), type(beta)
            sigma = np.zeros(nb[i])
            sigmav = np.sqrt(nf * m / beta)
            nm_sample = np.zeros(nb[i])
            nm_samplev = np.zeros(nb[i])
            # first coordinate is given centroid coordinate
            nm_sample[0] = np.sqrt(float(nb[i])) * c
            nm_samplev[0] = np.sqrt(float(nb[i])) * cv * m
            # set up width of distribution for each bead - free ring polymer distribution
            for k in range(nb[i]):
                w_k[k] = (2.0 * (nf/beta) * np.sin(float(k) * np.pi / nf))**2
                try:
                    w_k[k] += w_int[i]**2
                except:
                    pass
                if k > 0:
                    sigma[k] = np.sqrt(nf / (m * beta * w_k[k]))
                else:
                    sigma[k] = 0.0
            # sample normal mode distribution - all except the centroid
            nm_sample[1:] = np.random.normal(np.zeros(nb[i]-1), sigma[1:])
            nm_samplev[1:] = np.random.normal(np.zeros(nb[i]-1), sigmav, nb[i]-1)
            # transform back from normal modes
            R_rp.append(bwd_fft(nm_sample, nb[i]))
            VV_rp.append(bwd_fft(nm_samplev, nb[i]) / m)
        # todo transformation of RP here!
        # print R_rp
        # print
        # print "lx", Lx_int.transpose()
        # print 
        # print d
        # print np.dot( Lx_int.transpose(), np.array(R_rp) )
        # print x0
        x = np.repeat(x0, nb).reshape((len(x0), nb[0])) + np.dot( Lx_int.transpose(), np.array(R_rp) )
        v = np.dot( Lx_int.transpose(), np.array(VV_rp) )
        X_rp.append(x)
        V_rp.append(v)
        j+=1
        # print "---"
        # print x
        # print v
    return X_rp,V_rp 
[docs]
def rotation(x0, v0, phi=None):
    """
    Isotropically sample one rotaional degree of freedom. Currently limited to 
    rotations about the y-axis.
    Input:
    x0 - list of input cartesian coordinates.
    v0 - list of input cartesian velocities.
    phi - optional list of given rotation angles; for testing purposes.
    Output:
    x - list of cartesian coordinates with added sampling.
    v - list of cartesian velocities rotated correctly.
    """
    # N number of samples
    N = len(x0)
    # m number of atoms
    m = len(x0[0])//3
    # output lists
    X = []
    V = []
    if phi == None:
        # generate isotropic random angles
        ran = np.random.random(N)
        phi = ran * 2.0*np.pi
    # sample
    for i in range(N):
        # generate rotation matrix
        Ry = np.array([[np.cos(phi[i]), 0, -np.sin(phi[i])], [0, 1, 0], [np.sin(phi[i]), 0, np.cos(phi[i])]])
        # rotate and save 
        rot_x = np.dot(Ry, x0[i].reshape((m,3)).T)
        X.append(rot_x.T.reshape(3*m))
        rot_v = np.dot(Ry, v0[i].reshape((m,3)).T)
        V.append(rot_v.T.reshape(3*m))
    return X, V 
if __name__ == '__main__':
    import sys
    from CDTK.Dynamics.Models.testModels import tetrahedron as func
    sys.stdout.write('Testing IniConSampler\n')
    xl = [1,1,1,-1,-1,1,-1,1,-1,1,-1,-1]
    x0 = np.array(xl,float) / np.sqrt(8.0)
    sampler = IniConSampler(x0,func,T=500.0,DIST='boltzmann')
    sampler.burnin(steps=500)
    sampler.sample()
    sys.stdout.write(sampler.DIST+'\n')
    ncol  = sampler._log['ncol']
    ntot  = sampler._log['ntot']
    nabs  = sampler._log['NABS']
    ratio = sampler._log['ratio']
    sys.stdout.write('N Collected: %i\n' % (ncol,))
    sys.stdout.write('N Calls (collection): %i\n' % (ntot,))
    sys.stdout.write('N Calls (absolute): %i\n' % (nabs,))
    sys.stdout.write('Acceptance ratio: %6.3f\n' % (ratio,))
    sys.stdout.write('Step size: %6.3f\n' % (sampler._optimalStepNorm,))