From 6b1a256569dbdb562d13f3187fb6e1534ff49dfe 2024-04-08 19:09:35 From: sebastianVP Date: 2024-04-08 19:09:35 Subject: [PATCH] jrproc_parametes.py update y script EWDriftsMP_01feb2023.py --- diff --git a/schainpy/model/proc/jroproc_parameters.py b/schainpy/model/proc/jroproc_parameters.py index 92306b7..a83f0f4 100755 --- a/schainpy/model/proc/jroproc_parameters.py +++ b/schainpy/model/proc/jroproc_parameters.py @@ -1,3 +1,4 @@ +# MASTER import numpy import math from scipy import optimize, interpolate, signal, stats, ndimage @@ -17,7 +18,7 @@ from scipy.optimize import fmin_l_bfgs_b #optimize with bounds on state papamete from .jroproc_base import ProcessingUnit, Operation, MPDecorator from schainpy.model.data.jrodata import Parameters, hildebrand_sekhon from schainpy.model.data.jrodata import Spectra -from scipy import asarray as ar,exp +#from scipy import asarray as ar,exp from scipy.optimize import fmin, curve_fit from schainpy.utils import log import warnings @@ -180,7 +181,6 @@ class ParametersProc(ProcessingUnit): self.__updateObjFromInput() self.dataOut.utctimeInit = self.dataIn.utctime self.dataOut.paramInterval = self.dataIn.timeInterval - return @@ -242,7 +242,6 @@ class RemoveWideGC(Operation): continue junk3 = numpy.squeeze(numpy.diff(j1index)) junk4 = numpy.squeeze(numpy.diff(j2index)) - valleyindex = j2index[numpy.where(junk4>1)] peakindex = j1index[numpy.where(junk3>1)] @@ -252,7 +251,6 @@ class RemoveWideGC(Operation): if numpy.size(isvalid) >1 : vindex = numpy.argmax(self.spc[ich,gc_values[peakindex[isvalid]],ir]) isvalid = isvalid[vindex] - # clutter peak gcpeak = peakindex[isvalid] vl = numpy.where(valleyindex < gcpeak) @@ -302,8 +300,6 @@ class SpectralFilters(Operation): VelRange = dataOut.spc_range[2] # novalid corresponds to data within the Negative and PositiveLimit - - # Removing novalid data from the spectra for i in range(self.Num_Chn): self.spc[i,novalid,:] = dataOut.noise[i] @@ -435,7 +431,6 @@ class GaussianFit(Operation): # print ('stop 2.1') fatspectra=1.0 # noise per channel.... we might want to use the noise at each range - # wnoise = noise_ #/ spc_norm_max #commented by D. Scipión 19.03.2021 #wnoise,stdv,i_max,index =enoise(spc,num_intg) #noise estimate using Hildebrand Sekhon, only wnoise is used #if wnoise>1.1*pnoise: # to be tested later @@ -617,7 +612,7 @@ class GaussianFit(Operation): if Amplitude1<0.05: shift1,width1,Amplitude1,p1 = 4*[numpy.NaN] - # print ('stop 16 ') + # print ('stop 16 ') # SPC_ch1[:,ht] = noise + Amplitude0*numpy.exp(-0.5*(abs(x-shift0)/width0)**p0) # SPC_ch2[:,ht] = noise + Amplitude1*numpy.exp(-0.5*(abs(x-shift1)/width1)**p1) # SPCparam = (SPC_ch1,SPC_ch2) @@ -714,7 +709,6 @@ class Oblique_Gauss_Fit(Operation): return gaussian(x, popt[0], popt[1], popt[2], popt[3]), popt[0], popt[1], popt[2], popt[3] - def Gauss_fit_2(self,spc,x,nGauss): @@ -1099,7 +1093,6 @@ class Oblique_Gauss_Fit(Operation): # fit bounds=([0,-numpy.inf,0,0,-400,0,0,0],[numpy.inf,-340,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf]) - params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,1,spc_max] x0_value = numpy.array([spc_max,-400,30,spc_max/4,-200,150,1,1.0e7]) popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0) @@ -1881,8 +1874,7 @@ class PrecipitationProc(Operation): self.i=0 def run(self, dataOut, radar=None, Pt=5000, Gt=295.1209, Gr=70.7945, Lambda=0.6741, aL=2.5118, - tauW=4e-06, ThetaT=0.1656317, ThetaR=0.36774087, Km2 = 0.93, Altitude=3350,SNRdBlimit=-30, - channel=None): + tauW=4e-06, ThetaT=0.1656317, ThetaR=0.36774087, Km2 = 0.93, Altitude=3350,SNRdBlimit=-30,channel=None): # print ('Entering PrecepitationProc ... ') @@ -1915,7 +1907,7 @@ class PrecipitationProc(Operation): self.Lambda = Lambda self.aL = aL self.tauW = tauW - self.ThetaT = ThetaT + self.ThetaT = ThetaT self.ThetaR = ThetaR self.GSys = 10**(36.63/10) # Ganancia de los LNA 36.63 dB self.lt = 10**(1.67/10) # Perdida en cables Tx 1.67 dB @@ -1958,7 +1950,7 @@ class PrecipitationProc(Operation): ETAn = (RadarConstant *ExpConstant) * Pr * rMtrx**2 #Reflectivity (ETA) ETAd = ETAn * 6.18 * exp( -0.6 * D_Vz ) * delv_z # Radar Cross Section - sigmaD = Km2 * (D_Vz * 1e-3 )**6 * numpy.pi**5 / Lambda**4 + sigmaD = Km2 * (D_Vz * 1e-3 )**6 * numpy.pi**5 / Lambda**4 # Drop Size Distribution DSD = ETAn / sigmaD # Equivalente Reflectivy @@ -1979,7 +1971,7 @@ class PrecipitationProc(Operation): dataOut.data_output = RR[8] dataOut.data_param = numpy.ones([3,self.Num_Hei]) dataOut.channelList = [0,1,2] - + dataOut.data_param[0]=10*numpy.log10(Ze_org) dataOut.data_param[1]=-W dataOut.data_param[2]=RR @@ -2054,8 +2046,7 @@ class FullSpectralAnalysis(Operation): Parameters affected: Winds, height range, SNR """ - def run(self, dataOut, Xi01=None, Xi02=None, Xi12=None, Eta01=None, Eta02=None, Eta12=None, SNRdBlimit=-30, - minheight=None, maxheight=None, NegativeLimit=None, PositiveLimit=None): + def run(self, dataOut, Xi01=None, Xi02=None, Xi12=None, Eta01=None, Eta02=None, Eta12=None, SNRdBlimit=-30,minheight=None, maxheight=None, NegativeLimit=None, PositiveLimit=None): spc = dataOut.data_pre[0].copy() cspc = dataOut.data_pre[1] @@ -2098,14 +2089,14 @@ class FullSpectralAnalysis(Operation): if Height >= range_min and Height < range_max: # error_code will be useful in future analysis - [Vzon,Vmer,Vver, error_code] = self.WindEstimation(spc[:,:,Height], cspc[:,:,Height], pairsList, + [Vzon,Vmer,Vver, error_code] = self.WindEstimation(spc[:,:,Height], cspc[:,:,Height], pairsList, ChanDist, Height, dataOut.noise, dataOut.spc_range, dbSNR[Height], SNRdBlimit, NegativeLimit, PositiveLimit,dataOut.frequency) if abs(Vzon) < 100. and abs(Vmer) < 100.: velocityX[Height] = Vzon velocityY[Height] = -Vmer velocityZ[Height] = Vver - + # Censoring data with SNR threshold dbSNR [dbSNR < SNRdBlimit] = numpy.NaN @@ -2205,7 +2196,7 @@ class FullSpectralAnalysis(Operation): xSamples = xFrec # the frequency range is taken delta_x = xSamples[1] - xSamples[0] # delta_f or delta_x - # only consider velocities with in NegativeLimit and PositiveLimit + # only consider velocities with in NegativeLimit and PositiveLimit if (NegativeLimit is None): NegativeLimit = numpy.min(xVel) if (PositiveLimit is None): @@ -2622,7 +2613,7 @@ class SpectralMoments(Operation): if (type1 is None): type1 = 0 if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1 - if (snrth is None): snrth = -20.0 + if (snrth is None): snrth = -3 #-20.0 if (dc is None): dc = 0 if (aliasing is None): aliasing = 0 if (oldfd is None): oldfd = 0 @@ -3146,76 +3137,20 @@ class SpectralFitting(Operation): __dataReady = False bloques = None bloque0 = None - index = 0 - fint = 0 - buffer = 0 - buffer2 = 0 - buffer3 = 0 def __init__(self): Operation.__init__(self) self.i=0 self.isConfig = False - - def setup(self,dataOut,groupList,path,file,filec): + def setup(self,nChan,nProf,nHei,nBlocks): self.__dataReady = False - # new - self.nChannels = dataOut.nChannels - self.channels = dataOut.channelList - self.nHeights = dataOut.heightList.size - self.heights = dataOut.heightList - self.nProf = dataOut.nProfiles - self.nIncohInt = dataOut.nIncohInt - self.absc = dataOut.abscissaList[:-1] - - - #To be inserted as a parameter - try: - self.groupArray = numpy.array(groupList)#groupArray = numpy.array([[0,1],[2,3]]) - except: - print("Please insert groupList. Example (0,1),(2,3) format multilist") - dataOut.groupList = self.groupArray - self.crosspairs = dataOut.groupList - self.nPairs = len(self.crosspairs) - self.nGroups = self.groupArray.shape[0] - - #List of possible combinations - - self.listComb = itertools.combinations(numpy.arange(self.groupArray.shape[1]),2) - self.indCross = numpy.zeros(len(list(self.listComb)), dtype = 'int') - - #Parameters Array - dataOut.data_param = None - dataOut.data_paramC = None - dataOut.clean_num_aver = None - dataOut.coh_num_aver = None - dataOut.tmp_spectra_i = None - dataOut.tmp_cspectra_i = None - dataOut.tmp_spectra_c = None - dataOut.tmp_cspectra_c = None - dataOut.index = None - - if path != None: - sys.path.append(path) - self.library = importlib.import_module(file) - if filec != None: - self.weightf = importlib.import_module(filec) - - #Set constants - self.constants = self.library.setConstants(dataOut) - dataOut.constants = self.constants - self.M = dataOut.normFactor - self.N = dataOut.nFFTPoints - self.ippSeconds = dataOut.ippSeconds - self.K = dataOut.nIncohInt - self.pairsArray = numpy.array(dataOut.pairsList) - self.snrth = 20 + self.bloques = numpy.zeros([2, nProf, nHei,nBlocks], dtype= complex) + self.bloque0 = numpy.zeros([nChan, nProf, nHei, nBlocks]) def __calculateMoments(self,oldspec, oldfreq, n0, nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None): - if (nicoh is None): nicoh = 1 - if (graph is None): graph = 0 + if (graph is None): graph = 0 if (smooth is None): smooth = 0 elif (self.smooth < 3): smooth = 0 @@ -3226,65 +3161,61 @@ class SpectralFitting(Operation): if (aliasing is None): aliasing = 0 if (oldfd is None): oldfd = 0 if (wwauto is None): wwauto = 0 - + if (n0 < 1.e-20): n0 = 1.e-20 - freq = oldfreq vec_power = numpy.zeros(oldspec.shape[1]) vec_fd = numpy.zeros(oldspec.shape[1]) vec_w = numpy.zeros(oldspec.shape[1]) vec_snr = numpy.zeros(oldspec.shape[1]) - oldspec = numpy.ma.masked_invalid(oldspec) for ind in range(oldspec.shape[1]): - spec = oldspec[:,ind] aux = spec*fwindow max_spec = aux.max() m = list(aux).index(max_spec) - - #Smooth + #Smooth if (smooth == 0): spec2 = spec else: spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth) - + # Calculo de Momentos bb = spec2[list(range(m,spec2.size))] bb = (bb m): ss1 = m - - valid = numpy.asarray(list(range(int(m + bb0 - ss1 + 1)))) + ss1 + + valid = numpy.asarray(list(range(int(m + bb0 - ss1 + 1)))) + ss1 power = ((spec2[valid] - n0)*fwindow[valid]).sum() fd = ((spec2[valid]- n0)*freq[valid]*fwindow[valid]).sum()/power w = math.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum()/power) - snr = (spec2.mean()-n0)/n0 - - if (snr < 1.e-20) : + snr = (spec2.mean()-n0)/n0 + + if (snr < 1.e-20) : snr = 1.e-20 - + vec_power[ind] = power vec_fd[ind] = fd vec_w[ind] = w vec_snr[ind] = snr - + moments = numpy.vstack((vec_snr, vec_power, vec_fd, vec_w)) - return moments + return moments def __DiffCoherent(self, spectra, cspectra, dataOut, noise, snrth, coh_th, hei_th): @@ -3305,12 +3236,12 @@ class SpectralFitting(Operation): coh_spectra = numpy.zeros([nChan, nProf, nHei], dtype='float') coh_cspectra = numpy.zeros([nPairs, nProf, nHei], dtype='complex') coh_aver = numpy.zeros([nChan, nHei]) - + incoh_spectra = numpy.zeros([nChan, nProf, nHei], dtype='float') incoh_cspectra = numpy.zeros([nPairs, nProf, nHei], dtype='complex') incoh_aver = numpy.zeros([nChan, nHei]) power = numpy.sum(spectra, axis=1) - + if coh_th == None : coh_th = numpy.array([0.75,0.65,0.15]) # 0.65 if hei_th == None : hei_th = numpy.array([60,300,650]) for ic in range(nPairs): @@ -3328,7 +3259,7 @@ class SpectralFitting(Operation): if len(indv[0]) == 0 : valid = numpy.concatenate((valid,valid2[iv]), axis=None) if len(valid)>0: - my_coh_aver[pair[0],valid]=1 + my_coh_aver[pair[0],valid]=1 my_coh_aver[pair[1],valid]=1 # si la coherencia es mayor a la coherencia threshold los datos se toman coh = numpy.squeeze(numpy.nansum(cspectra[ic,:,:], axis=0)/numpy.sqrt(numpy.nansum(spectra[pair[0],:,:], axis=0)*numpy.nansum(spectra[pair[1],:,:], axis=0))) @@ -3341,7 +3272,7 @@ class SpectralFitting(Operation): if len(valid)>0: my_coh_aver[pair[0],hvalid[valid]] =1 my_coh_aver[pair[1],hvalid[valid]] =1 - + coh_echoes = (my_coh_aver[pair[0],:] == 1).nonzero() incoh_echoes = (my_coh_aver[pair[0],:] != 1).nonzero() incoh_echoes = incoh_echoes[0] @@ -3352,7 +3283,7 @@ class SpectralFitting(Operation): my_incoh_aver[pair[0],incoh_echoes] = 1 my_incoh_aver[pair[1],incoh_echoes] = 1 - + for ic in range(nPairs): pair = crosspairs[ic] @@ -3390,8 +3321,8 @@ class SpectralFitting(Operation): incoh_cspectra[ic,:,incoh_echoes] = cspectra[ic,:,incoh_echoes] incoh_aver[pair[0],incoh_echoes]=1 incoh_aver[pair[1],incoh_echoes]=1 - return my_incoh_spectra ,my_incoh_cspectra,my_incoh_aver,my_coh_aver, incoh_spectra, coh_spectra, incoh_cspectra, coh_cspectra, incoh_aver, coh_aver + return my_incoh_spectra ,my_incoh_cspectra,my_incoh_aver,my_coh_aver, incoh_spectra, coh_spectra, incoh_cspectra, coh_cspectra, incoh_aver, coh_aver def __CleanCoherent(self,snrth, spectra, cspectra, coh_aver,dataOut, noise,clean_coh_echoes,index): @@ -3402,9 +3333,10 @@ class SpectralFitting(Operation): nChan = len(channels) crosspairs = dataOut.groupList nPairs = len(crosspairs) - + absc = dataOut.abscissaList[:-1] data_param = numpy.zeros((nChan, 4, spectra.shape[2])) + clean_coh_spectra = spectra.copy() clean_coh_cspectra = cspectra.copy() clean_coh_aver = coh_aver.copy() @@ -3433,14 +3365,14 @@ class SpectralFitting(Operation): # Checking spectral widths if (spwd[pair[0],ih] > spwd_th[0]) or (spwd[pair[1],ih] > spwd_th[0]) : # satelite - clean_coh_spectra[pair,ih,:] = 0.0 - clean_coh_cspectra[ic,ih,:] = 0.0 + clean_coh_spectra[pair,:,ih] = 0.0 + clean_coh_cspectra[ic,:,ih] = 0.0 clean_coh_aver[pair,ih] = 0 else : if ((spwd[pair[0],ih] < spwd_th[1]) or (spwd[pair[1],ih] < spwd_th[1])) : # Especial event like sun. - clean_coh_spectra[pair,ih,:] = 0.0 - clean_coh_cspectra[ic,ih,:] = 0.0 + clean_coh_spectra[pair,:,ih] = 0.0 + clean_coh_cspectra[ic,:,ih] = 0.0 clean_coh_aver[pair,ih] = 0 return clean_coh_spectra, clean_coh_cspectra, clean_coh_aver @@ -3449,9 +3381,9 @@ class SpectralFitting(Operation): rfunc = cspectra.copy() n_funct = len(rfunc[0,:,0,0]) - val_spc = spectra*0.0 + val_spc = spectra*0.0 val_cspc = cspectra*0.0 - in_sat_spectra = spectra.copy() + in_sat_spectra = spectra.copy() in_sat_cspectra = cspectra.copy() min_hei = 200 @@ -3464,13 +3396,14 @@ class SpectralFitting(Operation): nPairs = len(crosspairs) hval=(heights >= min_hei).nonzero() ih=hval[0] + for ih in range(hval[0][0],nHei): for ifreq in range(nProf): for ii in range(n_funct): - + func2clean = 10*numpy.log10(numpy.absolute(rfunc[:,ii,ifreq,ih])) val = (numpy.isfinite(func2clean)==True).nonzero() - if len(val)>0: + if len(val)>0: min_val = numpy.around(numpy.amin(func2clean)-2) #> (-40) if min_val <= -40 : min_val = -40 max_val = numpy.around(numpy.amax(func2clean)+2) #< 200 @@ -3478,22 +3411,22 @@ class SpectralFitting(Operation): step = 1 #Getting bins and the histogram x_dist = min_val + numpy.arange(1 + ((max_val-(min_val))/step))*step - y_dist,binstep = numpy.histogram(func2clean,bins=range(int(min_val),int(max_val+2),step)) + y_dist,binstep = numpy.histogram(func2clean,bins=range(int(min_val),int(max_val+2),step)) mean = numpy.sum(x_dist * y_dist) / numpy.sum(y_dist) sigma = numpy.sqrt(numpy.sum(y_dist * (x_dist - mean)**2) / numpy.sum(y_dist)) parg = [numpy.amax(y_dist),mean,sigma] try : gauss_fit, covariance = curve_fit(fit_func, x_dist, y_dist,p0=parg) mode = gauss_fit[1] - stdv = gauss_fit[2] + stdv = gauss_fit[2] except: mode = mean - stdv = sigma + stdv = sigma #Removing echoes greater than mode + 3*stdv factor_stdv = 2.5 noval = (abs(func2clean - mode)>=(factor_stdv*stdv)).nonzero() - + if len(noval[0]) > 0: novall = ((func2clean - mode) >= (factor_stdv*stdv)).nonzero() cross_pairs = crosspairs[ii] @@ -3512,11 +3445,12 @@ class SpectralFitting(Operation): out_cspectra = numpy.zeros([nPairs,nProf,nHei], dtype=complex) #+numpy.nan for ih in range(nHei): for ifreq in range(nProf): - for ich in range(nChan): - tmp = spectra[:,ich,ifreq,ih] - valid = (numpy.isfinite(tmp[:])==True).nonzero() + for ich in range(nChan): + tmp = spectra[:,ich,ifreq,ih] + valid = (numpy.isfinite(tmp[:])==True).nonzero() if len(valid[0]) >0 : out_spectra[ich,ifreq,ih] = numpy.nansum(tmp)/len(valid[0]) + for icr in range(nPairs): tmp = numpy.squeeze(cspectra[:,icr,ifreq,ih]) valid = (numpy.isfinite(tmp)==True).nonzero() @@ -3525,10 +3459,10 @@ class SpectralFitting(Operation): #Removing fake coherent echoes (at least 4 points around the point) val_spectra = numpy.sum(val_spc,0) val_cspectra = numpy.sum(val_cspc,0) - + val_spectra = self.REM_ISOLATED_POINTS(val_spectra,4) val_cspectra = self.REM_ISOLATED_POINTS(val_cspectra,4) - + for i in range(nChan): for j in range(nProf): for k in range(nHei): @@ -3544,10 +3478,11 @@ class SpectralFitting(Operation): tmp_sat_spectra = tmp_sat_spectra*numpy.nan tmp_sat_cspectra = cspectra.copy() tmp_sat_cspectra = tmp_sat_cspectra*numpy.nan + val = (val_spc > 0).nonzero() - if len(val[0]) > 0: + if len(val[0]) > 0: tmp_sat_spectra[val] = in_sat_spectra[val] - + val = (val_cspc > 0).nonzero() if len(val[0]) > 0: tmp_sat_cspectra[val] = in_sat_cspectra[val] @@ -3559,7 +3494,7 @@ class SpectralFitting(Operation): for ifreq in range(nProf): for ich in range(nChan): tmp = numpy.squeeze(tmp_sat_spectra[:,ich,ifreq,ih]) - valid = (numpy.isfinite(tmp)).nonzero() + valid = (numpy.isfinite(tmp)).nonzero() if len(valid[0]) > 0: sat_spectra[ich,ifreq,ih] = numpy.nansum(tmp)/len(valid[0]) @@ -3568,40 +3503,45 @@ class SpectralFitting(Operation): valid = (numpy.isfinite(tmp)).nonzero() if len(valid[0]) > 0: sat_cspectra[icr,ifreq,ih] = numpy.nansum(tmp)/len(valid[0]) + return out_spectra, out_cspectra,sat_spectra,sat_cspectra - + def REM_ISOLATED_POINTS(self,array,rth): if rth == None : rth = 4 num_prof = len(array[0,:,0]) num_hei = len(array[0,0,:]) n2d = len(array[:,0,0]) - + for ii in range(n2d) : tmp = array[ii,:,:] tmp = numpy.reshape(tmp,num_prof*num_hei) indxs1 = (numpy.isfinite(tmp)==True).nonzero() - indxs2 = (tmp > 0).nonzero() + indxs2 = (tmp > 0).nonzero() indxs1 = (indxs1[0]) indxs2 = indxs2[0] indxs = None + for iv in range(len(indxs2)): indv = numpy.array((indxs1 == indxs2[iv]).nonzero()) if len(indv[0]) > 0 : indxs = numpy.concatenate((indxs,indxs2[iv]), axis=None) + indxs = indxs[1:] if len(indxs) < 4 : array[ii,:,:] = 0. return - - xpos = numpy.mod(indxs ,num_hei) - ypos = (indxs / num_hei) + + xpos = numpy.mod(indxs ,num_prof) + ypos = (indxs / num_prof) sx = numpy.argsort(xpos) # Ordering respect to "x" (time) xpos = xpos[sx] ypos = ypos[sx] - # *********************************** Cleaning isolated points ********************************** + + # *********************************** Cleaning isolated points ********************************** ic = 0 - while True : + while True : r = numpy.sqrt(list(numpy.power((xpos[ic]-xpos),2)+ numpy.power((ypos[ic]-ypos),2))) + no_coh1 = (numpy.isfinite(r)==True).nonzero() no_coh2 = (r <= rth).nonzero() no_coh1 = numpy.array(no_coh1[0]) @@ -3615,32 +3555,33 @@ class SpectralFitting(Operation): if len(no_coh) < 4 : xpos[ic] = numpy.nan ypos[ic] = numpy.nan - - ic = ic + 1 - if (ic == len(indxs)) : + + ic = ic + 1 + if (ic == len(indxs)) : break indxs = (numpy.isfinite(list(xpos))==True).nonzero() if len(indxs[0]) < 4 : array[ii,:,:] = 0. return - + xpos = xpos[indxs[0]] ypos = ypos[indxs[0]] for i in range(0,len(ypos)): - ypos[i]=int(ypos[i]) + ypos[i]=int(ypos[i]) junk = tmp tmp = junk*0.0 - - tmp[list(xpos + (ypos*num_hei))] = junk[list(xpos + (ypos*num_hei))] + + tmp[list(xpos + (ypos*num_hei))] = junk[list(xpos + (ypos*num_hei))] array[ii,:,:] = numpy.reshape(tmp,(num_prof,num_hei)) + return array def moments(self,doppler,yarray,npoints): ytemp = yarray val = (ytemp > 0).nonzero() val = val[0] - if len(val) == 0 : val = range(npoints-1) - + if len(val) == 0 : val = range(npoints-1) + ynew = 0.5*(ytemp[val[0]]+ytemp[val[len(val)-1]]) ytemp[len(ytemp):] = [ynew] @@ -3653,317 +3594,1492 @@ class SpectralFitting(Operation): smom = numpy.sum(doppler*doppler*ytemp)/numpy.sum(ytemp) return [fmom,numpy.sqrt(smom)] - def run(self, dataOut, getSNR = True, path=None, file=None, groupList=None, filec=None,coh_th=None, hei_th=None,taver=None,proc=None,nhei=None,nprofs=None,ipp=None,channelList=None): - if not self.isConfig: - self.setup(dataOut = dataOut,groupList=groupList,path=path,file=file,filec=filec) - self.isConfig = True - - if not numpy.any(proc): - if numpy.any(taver): - taver = int(taver) - else : - taver = 5 - tini = time.localtime(dataOut.utctime) - if (tini.tm_min % taver) == 0 and (tini.tm_sec < 5 and self.fint==0): - self.index = 0 - jspc = self.buffer - jcspc = self.buffer2 - jnoise = self.buffer3 - self.buffer = dataOut.data_spc - self.buffer2 = dataOut.data_cspc - self.buffer3 = dataOut.noise - self.fint = 1 - if numpy.any(jspc) : - jspc = numpy.reshape(jspc ,(int(len(jspc) / self.nChannels) , self.nChannels ,self.nProf,self.nHeights )) - jcspc = numpy.reshape(jcspc ,(int(len(jcspc) /int(self.nChannels/2)),int(self.nChannels/2),self.nProf,self.nHeights )) - jnoise = numpy.reshape(jnoise,(int(len(jnoise)/ self.nChannels) , self.nChannels)) - else: - dataOut.flagNoData = True - return dataOut - else : - if (tini.tm_min % taver) == 0 : - self.fint = 1 - else : - self.fint = 0 - - self.index += 1 - if numpy.any(self.buffer): - self.buffer = numpy.concatenate((self.buffer,dataOut.data_spc), axis=0) - self.buffer2 = numpy.concatenate((self.buffer2,dataOut.data_cspc), axis=0) - self.buffer3 = numpy.concatenate((self.buffer3,dataOut.noise), axis=0) - else: - self.buffer = dataOut.data_spc - self.buffer2 = dataOut.data_cspc - self.buffer3 = dataOut.noise - dataOut.flagNoData = True - return dataOut + def windowing_single_old(self,spc,x,A,B,C,D,nFFTPoints): + ''' + Written by R. Flores + ''' + from scipy.optimize import curve_fit,fmin - jnoise = jnoise/self.N# creo que falta dividirlo entre N - noise = numpy.nansum(jnoise,axis=0)#/len(jnoise) - index = tini.tm_hour*12+tini.tm_min/taver - dataOut.index = index - jspc = jspc/self.N/self.N - jcspc = jcspc/self.N/self.N + def gaussian(x, a, b, c, d): + val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d + return val - tmp_spectra,tmp_cspectra,sat_spectra,sat_cspectra = self.CleanRayleigh(dataOut,jspc,jcspc,2) - jspectra = tmp_spectra * len(jspc[:,0,0,0]) - jcspectra = tmp_cspectra * len(jspc[:,0,0,0]) + def R_gaussian(x, a, b, c): + N = int(numpy.shape(x)[0]) + val = a * numpy.exp(-((x)*c*2*2*numpy.pi)**2 / (2))* numpy.exp(1.j*b*x*4*numpy.pi) + return val - my_incoh_spectra ,my_incoh_cspectra,my_incoh_aver,my_coh_aver, incoh_spectra, coh_spectra, incoh_cspectra, coh_cspectra, incoh_aver, coh_aver = self.__DiffCoherent(jspectra, jcspectra, dataOut, noise, self.snrth,coh_th, hei_th) - clean_coh_spectra, clean_coh_cspectra, clean_coh_aver = self.__CleanCoherent(self.snrth, coh_spectra, coh_cspectra, coh_aver, dataOut, noise,1,index) + def T(x,N): + T = 1-abs(x)/N + return T - dataOut.data_spc = incoh_spectra - dataOut.data_cspc = incoh_cspectra - clean_num_aver = incoh_aver * len(jspc[:,0,0,0]) - coh_num_aver = clean_coh_aver* len(jspc[:,0,0,0]) - dataOut.clean_num_aver = clean_num_aver - dataOut.coh_num_aver = coh_num_aver + def R_T_spc_fun(x, a, b, c, d, nFFTPoints): - else: - clean_num_aver = dataOut.clean_num_aver - coh_num_aver = dataOut.coh_num_aver - dataOut.data_spc = dataOut.tmp_spectra_i - dataOut.data_cspc = dataOut.tmp_cspectra_i - clean_coh_spectra = dataOut.tmp_spectra_c - clean_coh_cspectra = dataOut.tmp_cspectra_c - jspectra = dataOut.data_spc+clean_coh_spectra - nHeights = len(dataOut.heightList) # nhei - nProf = int(dataOut.nProfiles) - dataOut.nProfiles = nProf - dataOut.data_param = None - dataOut.data_paramC = None - dataOut.code = numpy.array([[-1.,-1.,1.],[1.,1.,-1.]]) - #M=600 - #N=200 - dataOut.flagDecodeData=True - M = int(dataOut.normFactor) - N = int(dataOut.nFFTPoints) - dataOut.nFFTPoints = N - dataOut.nIncohInt= int(dataOut.nIncohInt) - dataOut.nProfiles = int(dataOut.nProfiles) - dataOut.nCohInt = int(dataOut.nCohInt) - #dataOut.nFFTPoints=nprofs - #dataOut.normFactor = nprofs - dataOut.channelList = channelList - #dataOut.ippFactor=1 - #ipp = ipp/150*1.e-3 - vmax = (300000000/49920000.0/2) / (dataOut.ippSeconds) - #dataOut.ippSeconds=ipp - absc = vmax*( numpy.arange(nProf,dtype='float')-nProf/2.)/nProf - if path != None: - sys.path.append(path) - self.library = importlib.import_module(file) - constants = self.library.setConstants(dataOut) - constants['M'] = M - dataOut.constants = constants - - #List of possible combinations - listComb = itertools.combinations(numpy.arange(self.groupArray.shape[1]),2) - indCross = numpy.zeros(len(list(listComb)), dtype = 'int') - if dataOut.data_paramC is None: - dataOut.data_paramC = numpy.zeros((self.nGroups*4, self.nHeights ,2))*numpy.nan - for i in range(self.nGroups): - coord = self.groupArray[i,:] - #Input data array - data = dataOut.data_spc[coord,:,:]/(self.M*self.N) - data = data.reshape((data.shape[0]*data.shape[1],data.shape[2])) + N = int(numpy.shape(x)[0]) - #Cross Spectra data array for Covariance Matrixes - ind = 0 - for pairs in listComb: - pairsSel = numpy.array([coord[x],coord[y]]) - indCross[ind] = int(numpy.where(numpy.all(self.pairsArray == pairsSel, axis = 1))[0][0]) - ind += 1 - dataCross = dataOut.data_cspc[indCross,:,:]/(self.M*self.N) - dataCross = dataCross**2 - nhei = self.nHeights - poweri = numpy.sum(dataOut.data_spc[:,1:self.nProf-0,:],axis=1)/clean_num_aver[:,:] + x_max = x[-1] - if i == 0 : my_noises = numpy.zeros(4,dtype=float) - n0i = numpy.nanmin(poweri[0+i*2,0:nhei-0])/(self.nProf-1) - n1i = numpy.nanmin(poweri[1+i*2,0:nhei-0])/(self.nProf-1) - n0 = n0i - n1= n1i - my_noises[2*i+0] = n0 - my_noises[2*i+1] = n1 - snrth = -15.0 # -4 -16 -25 - snrth = 10**(snrth/10.0) - jvelr = numpy.zeros(self.nHeights, dtype = 'float') - hvalid = [0] - coh2 = abs(dataOut.data_cspc[i,1:self.nProf,:])**2/(dataOut.data_spc[0+i*2,1:self.nProf-0,:]*dataOut.data_spc[1+i*2,1:self.nProf-0,:]) + x_pos = x[nFFTPoints:] + x_neg = x[:nFFTPoints] - for h in range(self.nHeights): - smooth = clean_num_aver[i+1,h] - signalpn0 = (dataOut.data_spc[i*2,1:(self.nProf-0),h])/smooth - signalpn1 = (dataOut.data_spc[i*2+1,1:(self.nProf-0),h])/smooth - signal0 = signalpn0-n0 - signal1 = signalpn1-n1 - snr0 = numpy.sum(signal0/n0)/(self.nProf-1) - snr1 = numpy.sum(signal1/n1)/(self.nProf-1) - gamma = coh2[:,h] - indxs = (numpy.isfinite(list(gamma))==True).nonzero() - if len(indxs) >0: - if numpy.nanmean(gamma) > 0.07: - maxp0 = numpy.argmax(signal0*gamma) - maxp1 = numpy.argmax(signal1*gamma) - #print('usa gamma',numpy.nanmean(gamma)) - else: - maxp0 = numpy.argmax(signal0) - maxp1 = numpy.argmax(signal1) - jvelr[h] = (self.absc[maxp0]+self.absc[maxp1])/2. - else: jvelr[h] = self.absc[0] - if snr0 > 0.1 and snr1 > 0.1: hvalid = numpy.concatenate((hvalid,h), axis=None) - #print(maxp0,absc[maxp0],snr0,jvelr[h]) - - if len(hvalid)> 1: fd0 = numpy.median(jvelr[hvalid[1:]])*-1 - else: fd0 = numpy.nan - for h in range(self.nHeights): - d = data[:,h] - smooth = clean_num_aver[i+1,h] #dataOut.data_spc[:,1:nProf-0,:] - signalpn0 = (dataOut.data_spc[i*2,1:(self.nProf-0),h])/smooth - signalpn1 = (dataOut.data_spc[i*2+1,1:(self.nProf-0),h])/smooth - signal0 = signalpn0-n0 - signal1 = signalpn1-n1 - snr0 = numpy.sum(signal0/n0)/(self.nProf-1) - snr1 = numpy.sum(signal1/n1)/(self.nProf-1) - if snr0 > snrth and snr1 > snrth and clean_num_aver[i+1,h] > 0 : - #Covariance Matrix - D = numpy.diag(d**2) - ind = 0 - for pairs in listComb: - #Coordinates in Covariance Matrix - x = pairs[0] - y = pairs[1] - #Channel Index - S12 = dataCross[ind,:,h] - D12 = numpy.diag(S12) - #Completing Covariance Matrix with Cross Spectras - D[x*N:(x+1)*N,y*N:(y+1)*N] = D12 - D[y*N:(y+1)*N,x*N:(x+1)*N] = D12 - ind += 1 - diagD = numpy.zeros(256) + R_T_neg_1 = R_gaussian(x, a, b, c)[:nFFTPoints]*T(x_neg,-x[0]) + R_T_pos_1 = R_gaussian(x, a, b, c)[nFFTPoints:]*T(x_pos,x[-1]) + #print(T(x_pos,x[-1]),x_pos,x[-1]) + #print(R_T_neg_1.shape,R_T_pos_1.shape) + R_T_sum_1 = R_T_pos_1 + R_T_neg_1 + R_T_spc_1 = numpy.fft.fft(R_T_sum_1).real + R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1) + max_val_1 = numpy.max(R_T_spc_1) + R_T_spc_1 = R_T_spc_1*a/max_val_1 + print("R_T_spc_1: ", R_T_spc_1) - try: - Dinv=numpy.linalg.inv(D) - L=numpy.linalg.cholesky(Dinv) - except: - Dinv = D*numpy.nan - L= D*numpy.nan - LT=L.T + R_T_d = d*numpy.fft.fftshift(signal.unit_impulse(N)) + R_T_d_neg = R_T_d[:nFFTPoints]*T(x_neg,-x[0]) + R_T_d_pos = R_T_d[nFFTPoints:]*T(x_pos,x[-1]) + R_T_d_sum = R_T_d_pos + R_T_d_neg + R_T_spc_3 = numpy.fft.fft(R_T_d_sum).real + R_T_spc_3 = numpy.fft.fftshift(R_T_spc_3) - dp = numpy.dot(LT,d) - #Initial values - data_spc = dataOut.data_spc[coord,:,h] - w = data_spc/data_spc - if filec != None: - w = self.weightf.weightfit(w,tini.tm_year,tini.tm_yday,index,h,i) - if (h>6)and(error1[3]<25): - p0 = dataOut.data_param[i,:,h-1].copy() - else: - p0 = numpy.array(self.library.initialValuesFunction(data_spc*w, self.constants))# sin el i(data_spc, constants, i) - p0[3] = fd0 + R_T_final = R_T_spc_1# + R_T_spc_3 - if filec != None: - p0 = self.weightf.Vrfit(p0,tini.tm_year,tini.tm_yday,index,h,i) - - try: - #Least Squares - minp,covp,infodict,mesg,ier = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,self.constants),full_output=True) - #minp,covp = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants)) - #Chi square error - error0 = numpy.sum(infodict['fvec']**2)/(2*self.N) - #Error with Jacobian - error1 = self.library.errorFunction(minp,self.constants,LT) + return R_T_final - except: - minp = p0*numpy.nan - error0 = numpy.nan - error1 = p0*numpy.nan - else : - data_spc = dataOut.data_spc[coord,:,h] - p0 = numpy.array(self.library.initialValuesFunction(data_spc, self.constants)) - minp = p0*numpy.nan - error0 = numpy.nan - error1 = p0*numpy.nan - if dataOut.data_param is None: - dataOut.data_param = numpy.zeros((self.nGroups, p0.size, self.nHeights ))*numpy.nan - dataOut.data_error = numpy.zeros((self.nGroups, p0.size + 1, self.nHeights ))*numpy.nan - dataOut.data_error[i,:,h] = numpy.hstack((error0,error1)) - dataOut.data_param[i,:,h] = minp + y = spc#gaussian(x, a, meanY, sigmaY) + a*0.1*numpy.random.normal(0, 1, size=len(x)) - for ht in range(self.nHeights-1) : - smooth = coh_num_aver[i+1,ht] #datc[0,ht,0,beam] - dataOut.data_paramC[4*i,ht,1] = smooth - signalpn0 = (clean_coh_spectra[i*2 ,1:(self.nProf-0),ht])/smooth #coh_spectra - signalpn1 = (clean_coh_spectra[i*2+1,1:(self.nProf-0),ht])/smooth - val0 = (signalpn0 > 0).nonzero() - val0 = val0[0] - if len(val0) == 0 : val0_npoints = self.nProf - else : val0_npoints = len(val0) - - val1 = (signalpn1 > 0).nonzero() - val1 = val1[0] - if len(val1) == 0 : val1_npoints = self.nProf - else : val1_npoints = len(val1) + from scipy.stats import norm + mean,std=norm.fit(spc) - dataOut.data_paramC[0+4*i,ht,0] = numpy.sum((signalpn0/val0_npoints))/n0 - dataOut.data_paramC[1+4*i,ht,0] = numpy.sum((signalpn1/val1_npoints))/n1 - - signal0 = (signalpn0-n0) - vali = (signal0 < 0).nonzero() - vali = vali[0] - if len(vali) > 0 : signal0[vali] = 0 - signal1 = (signalpn1-n1) - vali = (signal1 < 0).nonzero() - vali = vali[0] - if len(vali) > 0 : signal1[vali] = 0 - snr0 = numpy.sum(signal0/n0)/(self.nProf-1) - snr1 = numpy.sum(signal1/n1)/(self.nProf-1) - doppler = self.absc[1:] - if snr0 >= snrth and snr1 >= snrth and smooth : - signalpn0_n0 = signalpn0 - signalpn0_n0[val0] = signalpn0[val0] - n0 - mom0 = self.moments(doppler,signalpn0-n0,self.nProf) - signalpn1_n1 = signalpn1 - signalpn1_n1[val1] = signalpn1[val1] - n1 - mom1 = self.moments(doppler,signalpn1_n1,self.nProf) - dataOut.data_paramC[2+4*i,ht,0] = (mom0[0]+mom1[0])/2. - dataOut.data_paramC[3+4*i,ht,0] = (mom0[1]+mom1[1])/2. - dataOut.data_spc = jspectra - dataOut.spc_noise = my_noises*self.nProf*self.M - if numpy.any(proc): dataOut.spc_noise = my_noises*self.nProf*self.M - if getSNR: - listChannels = self.groupArray.reshape((self.groupArray.size)) - listChannels.sort() - # TEST - noise_C = numpy.zeros(self.nChannels) - noise_C = dataOut.getNoise() - #print("noise_C",noise_C) - dataOut.data_snr = self.__getSNR(dataOut.data_spc[listChannels,:,:],noise_C/(600.0*1.15))# PRUEBA *nProf*M - #dataOut.data_snr = self.__getSNR(dataOut.data_spc[listChannels,:,:], noise_C[listChannels])# PRUEBA *nProf*M - dataOut.flagNoData = False - return dataOut - - def __residFunction(self, p, dp, LT, constants): + # estimate starting values from the data + print("A: ", A) + a = A-D + b = B + c = C#numpy.std(spc) #C + d = D + #''' + #ippSeconds = 250*20*1.e-6/3 - fm = self.library.modelFunction(p, constants) - fmp=numpy.dot(LT,fm) - return dp-fmp + #x_t = ippSeconds * (numpy.arange(nFFTPoints) - nFFTPoints / 2.) + + #x_t = numpy.linspace(x_t[0],x_t[-1],3200) + #print("x_t: ", x_t) + #print("nFFTPoints: ", nFFTPoints) + x_vel = numpy.linspace(x[0],x[-1],int(2*nFFTPoints)) + #print("x_vel: ", x_vel) + #x_freq = numpy.fft.fftfreq(1600,d=ippSeconds) + #x_freq = numpy.fft.fftshift(x_freq) + #''' + # define a least squares function to optimize + import matplotlib.pyplot as plt + aui = R_T_spc_fun(x_vel,a,b,c,d,nFFTPoints) + print("aux_max: ", numpy.nanmax(aui)) + #print(dataOut.heightList[hei]) + plt.figure() + plt.plot(x,spc,marker='*',linestyle='--') + plt.plot(x,gaussian(x, a, b, c, d),color='b',marker='^',linestyle='') + plt.plot(x,aui,color='k') + #plt.title(dataOut.heightList[hei]) + plt.show() + + def minfunc(params): + #print("y.shape: ", numpy.shape(y)) + return sum((y-R_T_spc_fun(x_vel,params[0],params[1],params[2],params[3],nFFTPoints))**2/1)#y**2) + + # fit + + popt_full = fmin(minfunc,[a,b,c,d], disp=False) + #print("nIter", popt_full[2]) + popt = popt_full#[0] + + fun = gaussian(x, popt[0], popt[1], popt[2], popt[3]) + print("pop1[0]: ", popt[0]) + #return R_T_spc_fun(x_t,popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6] + return fun, popt[0], popt[1], popt[2], popt[3] + + def windowing_single(self,spc,x,A,B,C,D,nFFTPoints): + ''' + Written by R. Flores + ''' + from scipy.optimize import curve_fit,fmin + + def gaussian(x, a, b, c, d): + val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d + return val + + def R_gaussian(x, a, b, c): + N = int(numpy.shape(x)[0]) + + val = (a*numpy.exp((-(1/2)*x*(x*c**2 + 2*1.j*b)))/numpy.sqrt(1/c**2)) + + return val + + def T(x,N): + T = 1-abs(x)/N + return T + + def R_T_spc_fun(x, a, id_dop, c, d, nFFTPoints): + + N = int(numpy.shape(x)[0]) + b = 0 + x_max = x[-1] + + x_pos = x[nFFTPoints:] + x_neg = x[:nFFTPoints] + R_T_neg_1 = R_gaussian(x, a, b, c)[:nFFTPoints]*T(x_neg,-x[0]) + R_T_pos_1 = R_gaussian(x, a, b, c)[nFFTPoints:]*T(x_pos,x[-1]) + + R_T_sum_1 = R_T_pos_1 + R_T_neg_1 + R_T_spc_1 = numpy.fft.fft(R_T_sum_1).real + R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1) + max_val_1 = numpy.max(R_T_spc_1) + R_T_spc_1 = R_T_spc_1*a/max_val_1 + #raise NotImplementedError + R_T_d = d*numpy.fft.fftshift(signal.unit_impulse(N)) + R_T_d_neg = R_T_d[:nFFTPoints]*T(x_neg,-x[0]) + R_T_d_pos = R_T_d[nFFTPoints:]*T(x_pos,x[-1]) + R_T_d_sum = R_T_d_pos + R_T_d_neg + R_T_spc_3 = numpy.fft.fft(R_T_d_sum).real + R_T_spc_3 = numpy.fft.fftshift(R_T_spc_3) + + R_T_final = R_T_spc_1 + R_T_spc_3 + + id_dop = int(id_dop) + + R_T_final = numpy.roll(R_T_final,-id_dop) + + return R_T_final + + y = spc#gaussian(x, a, meanY, sigmaY) + a*0.1*numpy.random.normal(0, 1, size=len(x)) + + from scipy.stats import norm + mean,std=norm.fit(spc) + + # estimate starting values from the data + a = A-D + b = B + c = C#numpy.std(spc) #C + d = D + + id_dop = numpy.argmax(spc) + id_dop = int(spc.shape[0]/2 - id_dop) + + x_vel = numpy.linspace(x[0],x[-1],int(2*nFFTPoints)) + + # define a least squares function to optimize + + def minfunc(params): + #print("y.shape: ", numpy.shape(y)) + return sum((y-R_T_spc_fun(x_vel,params[0],params[1],params[2],params[3],nFFTPoints))**2/1)#y**2) + + # fit + popt_full = fmin(minfunc,[a,id_dop,c,d], disp=False) + popt = popt_full#[0] + + fun = gaussian(x, a, 0, popt[2], popt[3]) + fun = numpy.roll(fun,-int(popt[1])) + + return fun, popt[0], popt[1], popt[2], popt[3] + + def windowing_single_direct(self,spc_mod,x,A,B,C,D,nFFTPoints,timeInterval): + ''' + Written by R. Flores + ''' + from scipy.optimize import curve_fit,fmin + + def gaussian(x, a, b, c, d): + val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d + return val + + def R_gaussian(x, a, b, c, d): + N = int(numpy.shape(x)[0]) + val = (a*numpy.exp(-2*c**2*x**2 + 2*x*1.j*b))*(numpy.sqrt(2*numpy.pi)*c)/((numpy.pi)) + d*signal.unit_impulse(N)*numpy.shape(x)[0]/2 + + return 2*val/numpy.shape(val)[0] + + def T(x,N): + T = 1-abs(x)/N + return T + + def R_T_spc_fun(x, a, b, c, d, nFFTPoints, timeInterval): #"x" should be time + + #timeInterval = 2 + x_double = numpy.linspace(0,timeInterval,nFFTPoints) + x_double_m = numpy.flip(x_double) + x_double_aux = numpy.linspace(0,x_double[-2],nFFTPoints) + x_double_t = numpy.concatenate((x_double_m,x_double_aux)) + x_double_t /= max(x_double_t) + + + R_T_sum_1 = R_gaussian(x, a, b, c, d) + + R_T_sum_1_flip = numpy.copy(numpy.flip(R_T_sum_1)) + R_T_sum_1_flip[-1] = R_T_sum_1_flip[0] + R_T_sum_1_flip = numpy.roll(R_T_sum_1_flip,1) + + R_T_sum_1_flip.imag *= -1 + + R_T_sum_1_total = numpy.concatenate((R_T_sum_1,R_T_sum_1_flip)) + R_T_sum_1_total *= x_double_t #times trian_fun + + R_T_sum_1_total = R_T_sum_1_total[:nFFTPoints] + R_T_sum_1_total[nFFTPoints:] + + R_T_spc_1 = numpy.fft.fft(R_T_sum_1_total).real + R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1) + + freq = numpy.fft.fftfreq(nFFTPoints, d=timeInterval/nFFTPoints) + + freq = numpy.fft.fftshift(freq) + + freq *= 6/2 #lambda/2 + + return R_T_spc_1 + + y = spc_mod + + #from scipy.stats import norm + + # estimate starting values from the data + + a = A-D + b = B + c = C + d = D + + # define a least squares function to optimize + import matplotlib.pyplot as plt + #ippSeconds = 2 + t_range = numpy.linspace(0,timeInterval,nFFTPoints) + #aui = R_T_spc_fun(t_range,a,b,c,d,nFFTPoints,timeInterval) + + def minfunc(params): + return sum((y-R_T_spc_fun(t_range,params[0],params[1],params[2],params[3],nFFTPoints,timeInterval))**2/1)#y**2) + + # fit + popt_full = fmin(minfunc,[a,b,c,d], disp=False) + popt = popt_full + + fun = R_T_spc_fun(t_range,popt[0],popt[1],popt[2],popt[3],nFFTPoints,timeInterval) + + return fun, popt[0], popt[1], popt[2], popt[3] + # ********************************************************************************************** + index = 0 + fint = 0 + buffer = 0 + buffer2 = 0 + buffer3 = 0 + + def run(self, dataOut, getSNR = True, path=None, file=None, groupList=None, filec=None,coh_th=None, hei_th=None,taver=None,proc=None,nhei=None,nprofs=None,ipp=None,channelList=None,Gaussian_Windowed=0): + + if not numpy.any(proc): + nChannels = dataOut.nChannels + nHeights= dataOut.heightList.size + nProf = dataOut.nProfiles + if numpy.any(taver): taver=int(taver) + else : taver = 5 + tini=time.localtime(dataOut.utctime) + if (tini.tm_min % taver) == 0 and (tini.tm_sec < 5 and self.fint==0): + self.index = 0 + jspc = self.buffer + jcspc = self.buffer2 + jnoise = self.buffer3 + self.buffer = dataOut.data_spc + self.buffer2 = dataOut.data_cspc + self.buffer3 = dataOut.noise + self.fint = 1 + if numpy.any(jspc) : + jspc= numpy.reshape(jspc,(int(len(jspc)/nChannels),nChannels,nProf,nHeights)) + jcspc= numpy.reshape(jcspc,(int(len(jcspc)/int(nChannels/2)),int(nChannels/2),nProf,nHeights)) + jnoise= numpy.reshape(jnoise,(int(len(jnoise)/nChannels),nChannels)) + else: + dataOut.flagNoData = True + return dataOut + else: + if (tini.tm_min % taver) == 0 : self.fint = 1 + else : self.fint = 0 + self.index += 1 + if numpy.any(self.buffer): + self.buffer = numpy.concatenate((self.buffer,dataOut.data_spc), axis=0) + self.buffer2 = numpy.concatenate((self.buffer2,dataOut.data_cspc), axis=0) + self.buffer3 = numpy.concatenate((self.buffer3,dataOut.noise), axis=0) + else: + self.buffer = dataOut.data_spc + self.buffer2 = dataOut.data_cspc + self.buffer3 = dataOut.noise + dataOut.flagNoData = True + return dataOut + if path != None: + sys.path.append(path) + self.library = importlib.import_module(file) + if filec != None: + self.weightf = importlib.import_module(filec) + #self.weightf = importlib.import_module('weightfit') + + #To be inserted as a parameter + groupArray = numpy.array(groupList) + #groupArray = numpy.array([[0,1],[2,3]]) + dataOut.groupList = groupArray + + nGroups = groupArray.shape[0] + nChannels = dataOut.nChannels + nHeights = dataOut.heightList.size + + #Parameters Array + dataOut.data_param = None + dataOut.data_paramC = None + dataOut.clean_num_aver = None + dataOut.coh_num_aver = None + dataOut.tmp_spectra_i = None + dataOut.tmp_cspectra_i = None + dataOut.tmp_spectra_c = None + dataOut.tmp_cspectra_c = None + dataOut.sat_spectra = None + dataOut.sat_cspectra = None + dataOut.index = None + + #Set constants + constants = self.library.setConstants(dataOut) + dataOut.constants = constants + M = dataOut.normFactor + N = dataOut.nFFTPoints + + ippSeconds = dataOut.ippSeconds + K = dataOut.nIncohInt + pairsArray = numpy.array(dataOut.pairsList) + + snrth= 15 + spectra = dataOut.data_spc + cspectra = dataOut.data_cspc + nProf = dataOut.nProfiles + heights = dataOut.heightList + nHei = len(heights) + channels = dataOut.channelList + nChan = len(channels) + nIncohInt = dataOut.nIncohInt + crosspairs = dataOut.groupList + noise = dataOut.noise + jnoise = jnoise/N + noise = numpy.nansum(jnoise,axis=0)#/len(jnoise) + power = numpy.sum(spectra, axis=1) + nPairs = len(crosspairs) + absc = dataOut.abscissaList[:-1] + print('para escribir h5 ',dataOut.paramInterval) + if not self.isConfig: + self.isConfig = True + + index = tini.tm_hour*12+tini.tm_min/taver + dataOut.index= index + jspc = jspc/N/N + jcspc = jcspc/N/N + tmp_spectra,tmp_cspectra,sat_spectra,sat_cspectra = self.CleanRayleigh(dataOut,jspc,jcspc,2) + jspectra = tmp_spectra*len(jspc[:,0,0,0]) + jcspectra = tmp_cspectra*len(jspc[:,0,0,0]) + my_incoh_spectra ,my_incoh_cspectra,my_incoh_aver,my_coh_aver, incoh_spectra, coh_spectra, incoh_cspectra, coh_cspectra, incoh_aver, coh_aver = self.__DiffCoherent(jspectra, jcspectra, dataOut, noise, snrth,coh_th, hei_th) + + clean_coh_spectra, clean_coh_cspectra, clean_coh_aver = self.__CleanCoherent(snrth, coh_spectra, coh_cspectra, coh_aver, dataOut, noise,1,index) + dataOut.data_spc = incoh_spectra + dataOut.data_cspc = incoh_cspectra + dataOut.sat_spectra = sat_spectra + dataOut.sat_cspectra = sat_cspectra + # dataOut.data_spc = tmp_spectra + # dataOut.data_cspc = tmp_cspectra + + clean_num_aver = incoh_aver*len(jspc[:,0,0,0]) + coh_num_aver = clean_coh_aver*len(jspc[:,0,0,0]) + # clean_num_aver = (numpy.zeros([nChan, nHei])+1)*len(jspc[:,0,0,0]) + # coh_num_aver = numpy.zeros([nChan, nHei])*0*len(jspc[:,0,0,0]) + dataOut.clean_num_aver = clean_num_aver + dataOut.coh_num_aver = coh_num_aver + dataOut.tmp_spectra_i = incoh_spectra + dataOut.tmp_cspectra_i = incoh_cspectra + dataOut.tmp_spectra_c = clean_coh_spectra + dataOut.tmp_cspectra_c = clean_coh_cspectra + #List of possible combinations + listComb = itertools.combinations(numpy.arange(groupArray.shape[1]),2) + indCross = numpy.zeros(len(list(listComb)), dtype = 'int') + if Gaussian_Windowed == 1: + #dataOut.data_spc = jspectra + ''' + Written by R. Flores + ''' + print("normFactor: ", dataOut.normFactor) + data_spc_aux = numpy.copy(dataOut.data_spc)#[:,0,:] + data_spc_aux[:,0,:] = (data_spc_aux[:,1,:]+data_spc_aux[:,-1,:])/2 + #''' + from scipy.signal import medfilt + import matplotlib.pyplot as plt + dataOut.moments = numpy.ones((dataOut.nChannels,4,dataOut.nHeights))*numpy.NAN + dataOut.VelRange = dataOut.getVelRange(0) + for nChannel in range(dataOut.nChannels): + for hei in range(dataOut.heightList.shape[0]): + #print("ipp: ", dataOut.ippSeconds) + #spc = numpy.copy(dataOut.data_spc[nChannel,:,hei]) + spc = data_spc_aux[nChannel,:,hei] + if spc.all() == 0.: + print("CONTINUE") + continue + #print(VelRange) + #print(dataOut.getFreqRange(64)) + #print("Hei: ", dataOut.heightList[hei]) + + spc_mod = numpy.copy(spc) + spcm = medfilt(spc_mod,11) + spc_max = numpy.max(spcm) + dop1_x0 = dataOut.VelRange[numpy.argmax(spcm)] + #D = numpy.min(spcm) + D_in = (numpy.mean(spcm[:15])+numpy.mean(spcm[-15:]))/2. + #print("spc_max: ", spc_max) + #print("dataOut.ippSeconds: ", dataOut.ippSeconds, dataOut.timeInterval) + ##fun, A, B, C, D = self.windowing_single(spc,dataOut.VelRange,spc_max,dop1_x0,abs(dop1_x0),D,dataOut.nFFTPoints) + #fun, A, B, C, D = self.windowing_single(spc,dataOut.VelRange,spc_max,dop1_x0,abs(dop1_x0),D,dataOut.nFFTPoints) + fun, A, B, C, D = self.windowing_single_direct(spc_mod,dataOut.VelRange,spc_max,dop1_x0,abs(dop1_x0/5),D_in,dataOut.nFFTPoints,dataOut.timeInterval) + + dataOut.moments[nChannel,0,hei] = A + dataOut.moments[nChannel,1,hei] = B + dataOut.moments[nChannel,2,hei] = C + dataOut.moments[nChannel,3,hei] = D + ''' + if nChannel == 0: + print(dataOut.heightList[hei]) + plt.figure() + plt.plot(dataOut.VelRange,spc,marker='*',linestyle='--') + plt.plot(dataOut.VelRange,fun) + plt.title(dataOut.heightList[hei]) + plt.show() + ''' + #plt.show() + #''' + dataOut.data_spc = jspectra + print("SUCCESS") + return dataOut + + elif Gaussian_Windowed == 2: #Only to clean spc + dataOut.VelRange = dataOut.getVelRange(0) + return dataOut + + if getSNR: + listChannels = groupArray.reshape((groupArray.size)) + listChannels.sort() + dataOut.data_snr = self.__getSNR(dataOut.data_spc[listChannels,:,:], noise[listChannels]) + else: + if numpy.any(taver): taver=int(taver) + else : taver = 5 + tini=time.localtime(dataOut.utctime) + index = tini.tm_hour*12+tini.tm_min/taver + clean_num_aver = dataOut.clean_num_aver + coh_num_aver = dataOut.coh_num_aver + dataOut.data_spc = dataOut.tmp_spectra_i + dataOut.data_cspc = dataOut.tmp_cspectra_i + clean_coh_spectra = dataOut.tmp_spectra_c + clean_coh_cspectra = dataOut.tmp_cspectra_c + jspectra = dataOut.data_spc+clean_coh_spectra + nHeights = len(dataOut.heightList) # nhei + nProf = int(dataOut.nProfiles) + dataOut.nProfiles = nProf + dataOut.data_param = None + dataOut.data_paramC = None + dataOut.code = numpy.array([[-1.,-1.,1.],[1.,1.,-1.]]) + #dataOut.paramInterval = 2.0 + #M=600 + #N=200 + dataOut.flagDecodeData=True + M = int(dataOut.normFactor) + N = int(dataOut.nFFTPoints) + dataOut.nFFTPoints = N + dataOut.nIncohInt= int(dataOut.nIncohInt) + dataOut.nProfiles = int(dataOut.nProfiles) + dataOut.nCohInt = int(dataOut.nCohInt) + print('sale',dataOut.nProfiles,dataOut.nHeights) + #dataOut.nFFTPoints=nprofs + #dataOut.normFactor = nprofs + dataOut.channelList = channelList + nChan = len(channelList) + #dataOut.ippFactor=1 + #ipp = ipp/150*1.e-3 + vmax = (300000000/49920000.0/2) / (dataOut.ippSeconds) + #dataOut.ippSeconds=ipp + absc = vmax*( numpy.arange(nProf,dtype='float')-nProf/2.)/nProf + print('sale 2',dataOut.ippSeconds,M,N) + print('Empieza procesamiento offline') + if path != None: + sys.path.append(path) + self.library = importlib.import_module(file) + constants = self.library.setConstants(dataOut) + constants['M'] = M + dataOut.constants = constants + if filec != None: + self.weightf = importlib.import_module(filec) + + groupArray = numpy.array(groupList) + dataOut.groupList = groupArray + nGroups = groupArray.shape[0] +#List of possible combinations + listComb = itertools.combinations(numpy.arange(groupArray.shape[1]),2) + indCross = numpy.zeros(len(list(listComb)), dtype = 'int') + if dataOut.data_paramC is None: + dataOut.data_paramC = numpy.zeros((nGroups*4, nHeights,2))*numpy.nan + dataOut.data_snr1_i = numpy.zeros((nGroups*2, nHeights))*numpy.nan + # dataOut.smooth_i = numpy.zeros((nGroups*2, nHeights))*numpy.nan + + for i in range(nGroups): + coord = groupArray[i,:] + #Input data array + data = dataOut.data_spc[coord,:,:]/(M*N) + data = data.reshape((data.shape[0]*data.shape[1],data.shape[2])) + + #Cross Spectra data array for Covariance Matrixes + ind = 0 + for pairs in listComb: + pairsSel = numpy.array([coord[x],coord[y]]) + indCross[ind] = int(numpy.where(numpy.all(pairsArray == pairsSel, axis = 1))[0][0]) + ind += 1 + dataCross = dataOut.data_cspc[indCross,:,:]/(M*N) + dataCross = dataCross**2 + nhei = nHeights + poweri = numpy.sum(dataOut.data_spc[:,1:nProf-0,:],axis=1)/clean_num_aver[:,:] + if i == 0 : my_noises = numpy.zeros(4,dtype=float) + n0i = numpy.nanmin(poweri[0+i*2,0:nhei-0])/(nProf-1) + n1i = numpy.nanmin(poweri[1+i*2,0:nhei-0])/(nProf-1) + n0 = n0i + n1= n1i + my_noises[2*i+0] = n0 + my_noises[2*i+1] = n1 + snrth = -13 #-13.0 # -4 -16 -25 + snrth = 10**(snrth/10.0) + jvelr = numpy.zeros(nHeights, dtype = 'float') + #snr0 = numpy.zeros(nHeights, dtype = 'float') + #snr1 = numpy.zeros(nHeights, dtype = 'float') + hvalid = [0] + + coh2 = abs(dataOut.data_cspc[i,1:nProf,:])**2/(dataOut.data_spc[0+i*2,1:nProf-0,:]*dataOut.data_spc[1+i*2,1:nProf-0,:]) + + for h in range(nHeights): + smooth = clean_num_aver[i+1,h] + signalpn0 = (dataOut.data_spc[i*2,1:(nProf-0),h])/smooth + signalpn1 = (dataOut.data_spc[i*2+1,1:(nProf-0),h])/smooth + signal0 = signalpn0-n0 + signal1 = signalpn1-n1 + snr0 = numpy.sum(signal0/n0)/(nProf-1) + snr1 = numpy.sum(signal1/n1)/(nProf-1) + #jmax0 = MAX(signal0,maxp0) + #jmax1 = MAX(signal1,maxp1) + gamma = coh2[:,h] + + indxs = (numpy.isfinite(list(gamma))==True).nonzero() + + if len(indxs) >0: + if numpy.nanmean(gamma) > 0.07: + maxp0 = numpy.argmax(signal0*gamma) + maxp1 = numpy.argmax(signal1*gamma) + #print('usa gamma',numpy.nanmean(gamma)) + else: + maxp0 = numpy.argmax(signal0) + maxp1 = numpy.argmax(signal1) + jvelr[h] = (absc[maxp0]+absc[maxp1])/2. + else: jvelr[h] = absc[0] + if snr0 > 0.1 and snr1 > 0.1: hvalid = numpy.concatenate((hvalid,h), axis=None) + #print(maxp0,absc[maxp0],snr0,jvelr[h]) + + if len(hvalid)> 1: fd0 = numpy.median(jvelr[hvalid[1:]])*-1 + else: fd0 = numpy.nan + print(fd0) + for h in range(nHeights): + d = data[:,h] + smooth = clean_num_aver[i+1,h] #dataOut.data_spc[:,1:nProf-0,:] + signalpn0 = (dataOut.data_spc[i*2,1:(nProf-0),h])/smooth + signalpn1 = (dataOut.data_spc[i*2+1,1:(nProf-0),h])/smooth + signal0 = signalpn0-n0 + signal1 = signalpn1-n1 + snr0 = numpy.sum(signal0/n0)/(nProf-1) + snr1 = numpy.sum(signal1/n1)/(nProf-1) + + if snr0 > snrth and snr1 > snrth and clean_num_aver[i+1,h] > 0 : + #Covariance Matrix + D = numpy.diag(d**2) + ind = 0 + for pairs in listComb: + #Coordinates in Covariance Matrix + x = pairs[0] + y = pairs[1] + #Channel Index + S12 = dataCross[ind,:,h] + D12 = numpy.diag(S12) + #Completing Covariance Matrix with Cross Spectras + D[x*N:(x+1)*N,y*N:(y+1)*N] = D12 + D[y*N:(y+1)*N,x*N:(x+1)*N] = D12 + ind += 1 + diagD = numpy.zeros(256) + + try: + Dinv=numpy.linalg.inv(D) + L=numpy.linalg.cholesky(Dinv) + except: + Dinv = D*numpy.nan + L= D*numpy.nan + LT=L.T + + dp = numpy.dot(LT,d) + #Initial values + data_spc = dataOut.data_spc[coord,:,h] + w = data_spc/data_spc + if filec != None: + w = self.weightf.weightfit(w,tini.tm_year,tini.tm_yday,index,h,i) + if (h>6) and (error1[3]<25): + p0 = dataOut.data_param[i,:,h-1].copy() + else: + p0 = numpy.array(self.library.initialValuesFunction(data_spc*w, constants))# sin el i(data_spc, constants, i) + p0[3] = fd0 + if filec != None: + p0 = self.weightf.Vrfit(p0,tini.tm_year,tini.tm_yday,index,h,i) + + try: + #Least Squares + minp,covp,infodict,mesg,ier = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants),full_output=True) + #minp,covp = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants)) + #Chi square error + error0 = numpy.sum(infodict['fvec']**2)/(2*N) + #Error with Jacobian + error1 = self.library.errorFunction(minp,constants,LT) + + except: + minp = p0*numpy.nan + error0 = numpy.nan + error1 = p0*numpy.nan + else : + data_spc = dataOut.data_spc[coord,:,h] + p0 = numpy.array(self.library.initialValuesFunction(data_spc, constants)) + minp = p0*numpy.nan + error0 = numpy.nan + error1 = p0*numpy.nan + if dataOut.data_param is None: + dataOut.data_param = numpy.zeros((nGroups, p0.size, nHeights))*numpy.nan + dataOut.data_error = numpy.zeros((nGroups, p0.size + 1, nHeights))*numpy.nan + + dataOut.data_error[i,:,h] = numpy.hstack((error0,error1)) + dataOut.data_param[i,:,h] = minp + dataOut.data_snr1_i[i*2,h] = numpy.sum(signalpn0/(nProf-1))/n0 + dataOut.data_snr1_i[i*2+1,h] = numpy.sum(signalpn1/(nProf-1))/n1 + + for ht in range(nHeights-1) : + smooth = coh_num_aver[i+1,ht] #datc[0,ht,0,beam] + dataOut.data_paramC[4*i,ht,1] = smooth + signalpn0 = (clean_coh_spectra[i*2 ,1:(nProf-0),ht])/smooth #coh_spectra + signalpn1 = (clean_coh_spectra[i*2+1,1:(nProf-0),ht])/smooth + + val0 = (signalpn0 > 0).nonzero() + val0 = val0[0] + + if len(val0) == 0 : val0_npoints = nProf + else : val0_npoints = len(val0) + + val1 = (signalpn1 > 0).nonzero() + val1 = val1[0] + if len(val1) == 0 : val1_npoints = nProf + else : val1_npoints = len(val1) + + dataOut.data_paramC[0+4*i,ht,0] = numpy.sum((signalpn0/val0_npoints))/n0 + dataOut.data_paramC[1+4*i,ht,0] = numpy.sum((signalpn1/val1_npoints))/n1 + + signal0 = (signalpn0-n0) + vali = (signal0 < 0).nonzero() + vali = vali[0] + if len(vali) > 0 : signal0[vali] = 0 + signal1 = (signalpn1-n1) + vali = (signal1 < 0).nonzero() + vali = vali[0] + if len(vali) > 0 : signal1[vali] = 0 + snr0 = numpy.sum(signal0/n0)/(nProf-1) + snr1 = numpy.sum(signal1/n1)/(nProf-1) + doppler = absc[1:] + if snr0 >= snrth and snr1 >= snrth and smooth : + signalpn0_n0 = signalpn0 + signalpn0_n0[val0] = signalpn0[val0] - n0 + mom0 = self.moments(doppler,signalpn0-n0,nProf) + + signalpn1_n1 = signalpn1 + signalpn1_n1[val1] = signalpn1[val1] - n1 + mom1 = self.moments(doppler,signalpn1_n1,nProf) + dataOut.data_paramC[2+4*i,ht,0] = (mom0[0]+mom1[0])/2. + dataOut.data_paramC[3+4*i,ht,0] = (mom0[1]+mom1[1])/2. + + dataOut.data_spc = jspectra + dataOut.spc_noise = my_noises*nProf*M + + if numpy.any(proc): dataOut.spc_noise = my_noises*nProf*M + if 0: + listChannels = groupArray.reshape((groupArray.size)) + listChannels.sort() + dataOut.data_snr = self.__getSNR(dataOut.data_spc[listChannels,:,:], my_noises[listChannels]) + #print(dataOut.data_snr1_i) + # Adding coherent echoes from possible satellites. + #sat_spectra = numpy.zeros((nChan,nProf,nHei), dtype=float) + #sat_spectra = sat_spectra[*,*,anal_header.channels] + isat_spectra = numpy.zeros([2,int(nChan/2),nProf,nhei], dtype=float) + + sat_fits = numpy.zeros([4,nhei], dtype=float) + noises = my_noises/nProf + #nchan2 = int(nChan/2) + for beam in range(int(nChan/2)-0) : + n0 = noises[2*beam] + n1 = noises[2*beam+1] + isat_spectra[0:2,beam,:,:] = dataOut.sat_spectra[2*beam +0:2*beam+2 ,:,:] + + for ht in range(nhei-1) : + signalpn0 = isat_spectra[0,beam,:,ht] + signalpn0 = numpy.reshape(signalpn0,nProf) + signalpn1 = isat_spectra[1,beam,:,ht] + signalpn1 = numpy.reshape(signalpn1,nProf) + + cval0 = len((signalpn0 > 0).nonzero()[0]) + if cval0 == 0 : val0_npoints = nProf + else: val0_npoints = cval0 + + cval1 = len((signalpn1 > 0).nonzero()[0]) + if cval1 == 0 : val1_npoints = nProf + else: val1_npoints = cval1 + + sat_fits[0+2*beam,ht] = numpy.sum(signalpn0/(val0_npoints*nProf))/n0 + sat_fits[1+2*beam,ht] = numpy.sum(signalpn1/(val1_npoints*nProf))/n1 + + dataOut.sat_fits = sat_fits + return dataOut + + def __residFunction(self, p, dp, LT, constants): + + fm = self.library.modelFunction(p, constants) + fmp=numpy.dot(LT,fm) + return dp-fmp + + def __getSNR(self, z, noise): + + avg = numpy.average(z, axis=1) + SNR = (avg.T-noise)/noise + SNR = SNR.T + return SNR + + def __chisq(self, p, chindex, hindex): + #similar to Resid but calculates CHI**2 + [LT,d,fm]=setupLTdfm(p,chindex,hindex) + dp=numpy.dot(LT,d) + fmp=numpy.dot(LT,fm) + chisq=numpy.dot((dp-fmp).T,(dp-fmp)) + return chisq + +class WindProfiler_V0(Operation): + + __isConfig = False + + __initime = None + __lastdatatime = None + __integrationtime = None + + __buffer = None + + __dataReady = False + + __firstdata = None + + n = None + + def __init__(self): + Operation.__init__(self) + + def __calculateCosDir(self, elev, azim): + zen = (90 - elev)*numpy.pi/180 + azim = azim*numpy.pi/180 + cosDirX = numpy.sqrt((1-numpy.cos(zen)**2)/((1+numpy.tan(azim)**2))) + cosDirY = numpy.sqrt(1-numpy.cos(zen)**2-cosDirX**2) + + signX = numpy.sign(numpy.cos(azim)) + signY = numpy.sign(numpy.sin(azim)) + + cosDirX = numpy.copysign(cosDirX, signX) + cosDirY = numpy.copysign(cosDirY, signY) + return cosDirX, cosDirY + + def __calculateAngles(self, theta_x, theta_y, azimuth): + + dir_cosw = numpy.sqrt(1-theta_x**2-theta_y**2) + zenith_arr = numpy.arccos(dir_cosw) + azimuth_arr = numpy.arctan2(theta_x,theta_y) + azimuth*math.pi/180 + + dir_cosu = numpy.sin(azimuth_arr)*numpy.sin(zenith_arr) + dir_cosv = numpy.cos(azimuth_arr)*numpy.sin(zenith_arr) + + return azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw + + def __calculateMatA(self, dir_cosu, dir_cosv, dir_cosw, horOnly): + + if horOnly: + A = numpy.c_[dir_cosu,dir_cosv] + else: + A = numpy.c_[dir_cosu,dir_cosv,dir_cosw] + A = numpy.asmatrix(A) + A1 = numpy.linalg.inv(A.transpose()*A)*A.transpose() + + return A1 + + def __correctValues(self, heiRang, phi, velRadial, SNR): + listPhi = phi.tolist() + maxid = listPhi.index(max(listPhi)) + minid = listPhi.index(min(listPhi)) + + rango = list(range(len(phi))) + # rango = numpy.delete(rango,maxid) + + heiRang1 = heiRang*math.cos(phi[maxid]) + heiRangAux = heiRang*math.cos(phi[minid]) + indOut = (heiRang1 < heiRangAux[0]).nonzero() + heiRang1 = numpy.delete(heiRang1,indOut) + + velRadial1 = numpy.zeros([len(phi),len(heiRang1)]) + SNR1 = numpy.zeros([len(phi),len(heiRang1)]) + + for i in rango: + x = heiRang*math.cos(phi[i]) + y1 = velRadial[i,:] + f1 = interpolate.interp1d(x,y1,kind = 'cubic') + + x1 = heiRang1 + y11 = f1(x1) + + y2 = SNR[i,:] + f2 = interpolate.interp1d(x,y2,kind = 'cubic') + y21 = f2(x1) + + velRadial1[i,:] = y11 + SNR1[i,:] = y21 + + return heiRang1, velRadial1, SNR1 + + def __calculateVelUVW(self, A, velRadial): + + #Operacion Matricial +# velUVW = numpy.zeros((velRadial.shape[1],3)) +# for ind in range(velRadial.shape[1]): +# velUVW[ind,:] = numpy.dot(A,velRadial[:,ind]) +# velUVW = velUVW.transpose() + velUVW = numpy.zeros((A.shape[0],velRadial.shape[1])) + velUVW[:,:] = numpy.dot(A,velRadial) + + + return velUVW + +# def techniqueDBS(self, velRadial0, dirCosx, disrCosy, azimuth, correct, horizontalOnly, heiRang, SNR0): + + def techniqueDBS(self, kwargs): + """ + Function that implements Doppler Beam Swinging (DBS) technique. + + Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth, + Direction correction (if necessary), Ranges and SNR + + Output: Winds estimation (Zonal, Meridional and Vertical) + + Parameters affected: Winds, height range, SNR + """ + velRadial0 = kwargs['velRadial'] + heiRang = kwargs['heightList'] + SNR0 = kwargs['SNR'] + + if 'dirCosx' in kwargs and 'dirCosy' in kwargs: + theta_x = numpy.array(kwargs['dirCosx']) + theta_y = numpy.array(kwargs['dirCosy']) + else: + elev = numpy.array(kwargs['elevation']) + azim = numpy.array(kwargs['azimuth']) + theta_x, theta_y = self.__calculateCosDir(elev, azim) + azimuth = kwargs['correctAzimuth'] + if 'horizontalOnly' in kwargs: + horizontalOnly = kwargs['horizontalOnly'] + else: horizontalOnly = False + if 'correctFactor' in kwargs: + correctFactor = kwargs['correctFactor'] + else: correctFactor = 1 + if 'channelList' in kwargs: + channelList = kwargs['channelList'] + if len(channelList) == 2: + horizontalOnly = True + arrayChannel = numpy.array(channelList) + param = param[arrayChannel,:,:] + theta_x = theta_x[arrayChannel] + theta_y = theta_y[arrayChannel] + + azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth) + heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, zenith_arr, correctFactor*velRadial0, SNR0) + A = self.__calculateMatA(dir_cosu, dir_cosv, dir_cosw, horizontalOnly) + + #Calculo de Componentes de la velocidad con DBS + winds = self.__calculateVelUVW(A,velRadial1) + + return winds, heiRang1, SNR1 + + def __calculateDistance(self, posx, posy, pairs_ccf, azimuth = None): + + nPairs = len(pairs_ccf) + posx = numpy.asarray(posx) + posy = numpy.asarray(posy) + + #Rotacion Inversa para alinear con el azimuth + if azimuth!= None: + azimuth = azimuth*math.pi/180 + posx1 = posx*math.cos(azimuth) + posy*math.sin(azimuth) + posy1 = -posx*math.sin(azimuth) + posy*math.cos(azimuth) + else: + posx1 = posx + posy1 = posy + + #Calculo de Distancias + distx = numpy.zeros(nPairs) + disty = numpy.zeros(nPairs) + dist = numpy.zeros(nPairs) + ang = numpy.zeros(nPairs) + + for i in range(nPairs): + distx[i] = posx1[pairs_ccf[i][1]] - posx1[pairs_ccf[i][0]] + disty[i] = posy1[pairs_ccf[i][1]] - posy1[pairs_ccf[i][0]] + dist[i] = numpy.sqrt(distx[i]**2 + disty[i]**2) + ang[i] = numpy.arctan2(disty[i],distx[i]) + + return distx, disty, dist, ang + #Calculo de Matrices +# nPairs = len(pairs) +# ang1 = numpy.zeros((nPairs, 2, 1)) +# dist1 = numpy.zeros((nPairs, 2, 1)) +# +# for j in range(nPairs): +# dist1[j,0,0] = dist[pairs[j][0]] +# dist1[j,1,0] = dist[pairs[j][1]] +# ang1[j,0,0] = ang[pairs[j][0]] +# ang1[j,1,0] = ang[pairs[j][1]] +# +# return distx,disty, dist1,ang1 + + + def __calculateVelVer(self, phase, lagTRange, _lambda): + + Ts = lagTRange[1] - lagTRange[0] + velW = -_lambda*phase/(4*math.pi*Ts) + + return velW + + def __calculateVelHorDir(self, dist, tau1, tau2, ang): + nPairs = tau1.shape[0] + nHeights = tau1.shape[1] + vel = numpy.zeros((nPairs,3,nHeights)) + dist1 = numpy.reshape(dist, (dist.size,1)) + + angCos = numpy.cos(ang) + angSin = numpy.sin(ang) + + vel0 = dist1*tau1/(2*tau2**2) + vel[:,0,:] = (vel0*angCos).sum(axis = 1) + vel[:,1,:] = (vel0*angSin).sum(axis = 1) + + ind = numpy.where(numpy.isinf(vel)) + vel[ind] = numpy.nan + + return vel + +# def __getPairsAutoCorr(self, pairsList, nChannels): +# +# pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan +# +# for l in range(len(pairsList)): +# firstChannel = pairsList[l][0] +# secondChannel = pairsList[l][1] +# +# #Obteniendo pares de Autocorrelacion +# if firstChannel == secondChannel: +# pairsAutoCorr[firstChannel] = int(l) +# +# pairsAutoCorr = pairsAutoCorr.astype(int) +# +# pairsCrossCorr = range(len(pairsList)) +# pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr) +# +# return pairsAutoCorr, pairsCrossCorr + +# def techniqueSA(self, pairsSelected, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, lagTRange, correctFactor): + def techniqueSA(self, kwargs): + + """ + Function that implements Spaced Antenna (SA) technique. + + Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth, + Direction correction (if necessary), Ranges and SNR + + Output: Winds estimation (Zonal, Meridional and Vertical) + + Parameters affected: Winds + """ + position_x = kwargs['positionX'] + position_y = kwargs['positionY'] + azimuth = kwargs['azimuth'] + + if 'correctFactor' in kwargs: + correctFactor = kwargs['correctFactor'] + else: + correctFactor = 1 + + groupList = kwargs['groupList'] + pairs_ccf = groupList[1] + tau = kwargs['tau'] + _lambda = kwargs['_lambda'] + + #Cross Correlation pairs obtained +# pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairssList, nChannels) +# pairsArray = numpy.array(pairsList)[pairsCrossCorr] +# pairsSelArray = numpy.array(pairsSelected) +# pairs = [] +# +# #Wind estimation pairs obtained +# for i in range(pairsSelArray.shape[0]/2): +# ind1 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i], axis = 1))[0][0] +# ind2 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i + 1], axis = 1))[0][0] +# pairs.append((ind1,ind2)) + + indtau = tau.shape[0]/2 + tau1 = tau[:indtau,:] + tau2 = tau[indtau:-1,:] +# tau1 = tau1[pairs,:] +# tau2 = tau2[pairs,:] + phase1 = tau[-1,:] + + #--------------------------------------------------------------------- + #Metodo Directo + distx, disty, dist, ang = self.__calculateDistance(position_x, position_y, pairs_ccf,azimuth) + winds = self.__calculateVelHorDir(dist, tau1, tau2, ang) + winds = stats.nanmean(winds, axis=0) + #--------------------------------------------------------------------- + #Metodo General +# distx, disty, dist = self.calculateDistance(position_x,position_y,pairsCrossCorr, pairsList, azimuth) +# #Calculo Coeficientes de Funcion de Correlacion +# F,G,A,B,H = self.calculateCoef(tau1,tau2,distx,disty,n) +# #Calculo de Velocidades +# winds = self.calculateVelUV(F,G,A,B,H) + + #--------------------------------------------------------------------- + winds[2,:] = self.__calculateVelVer(phase1, lagTRange, _lambda) + winds = correctFactor*winds + return winds + + def __checkTime(self, currentTime, paramInterval, outputInterval): + + dataTime = currentTime + paramInterval + deltaTime = dataTime - self.__initime + + if deltaTime >= outputInterval or deltaTime < 0: + self.__dataReady = True + return + + def techniqueMeteors(self, arrayMeteor, meteorThresh, heightMin, heightMax): + ''' + Function that implements winds estimation technique with detected meteors. + + Input: Detected meteors, Minimum meteor quantity to wind estimation + + Output: Winds estimation (Zonal and Meridional) + + Parameters affected: Winds + ''' + #Settings + nInt = (heightMax - heightMin)/2 + nInt = int(nInt) + winds = numpy.zeros((2,nInt))*numpy.nan + + #Filter errors + error = numpy.where(arrayMeteor[:,-1] == 0)[0] + finalMeteor = arrayMeteor[error,:] + + #Meteor Histogram + finalHeights = finalMeteor[:,2] + hist = numpy.histogram(finalHeights, bins = nInt, range = (heightMin,heightMax)) + nMeteorsPerI = hist[0] + heightPerI = hist[1] + + #Sort of meteors + indSort = finalHeights.argsort() + finalMeteor2 = finalMeteor[indSort,:] + + # Calculating winds + ind1 = 0 + ind2 = 0 + + for i in range(nInt): + nMet = nMeteorsPerI[i] + ind1 = ind2 + ind2 = ind1 + nMet + + meteorAux = finalMeteor2[ind1:ind2,:] + + if meteorAux.shape[0] >= meteorThresh: + vel = meteorAux[:, 6] + zen = meteorAux[:, 4]*numpy.pi/180 + azim = meteorAux[:, 3]*numpy.pi/180 + + n = numpy.cos(zen) + # m = (1 - n**2)/(1 - numpy.tan(azim)**2) + # l = m*numpy.tan(azim) + l = numpy.sin(zen)*numpy.sin(azim) + m = numpy.sin(zen)*numpy.cos(azim) + + A = numpy.vstack((l, m)).transpose() + A1 = numpy.dot(numpy.linalg.inv( numpy.dot(A.transpose(),A) ),A.transpose()) + windsAux = numpy.dot(A1, vel) + + winds[0,i] = windsAux[0] + winds[1,i] = windsAux[1] + + return winds, heightPerI[:-1] + + def techniqueNSM_SA(self, **kwargs): + metArray = kwargs['metArray'] + heightList = kwargs['heightList'] + timeList = kwargs['timeList'] + + rx_location = kwargs['rx_location'] + groupList = kwargs['groupList'] + azimuth = kwargs['azimuth'] + dfactor = kwargs['dfactor'] + k = kwargs['k'] + + azimuth1, dist = self.__calculateAzimuth1(rx_location, groupList, azimuth) + d = dist*dfactor + #Phase calculation + metArray1 = self.__getPhaseSlope(metArray, heightList, timeList) + + metArray1[:,-2] = metArray1[:,-2]*metArray1[:,2]*1000/(k*d[metArray1[:,1].astype(int)]) #angles into velocities + + velEst = numpy.zeros((heightList.size,2))*numpy.nan + azimuth1 = azimuth1*numpy.pi/180 + + for i in range(heightList.size): + h = heightList[i] + indH = numpy.where((metArray1[:,2] == h)&(numpy.abs(metArray1[:,-2]) < 100))[0] + metHeight = metArray1[indH,:] + if metHeight.shape[0] >= 2: + velAux = numpy.asmatrix(metHeight[:,-2]).T #Radial Velocities + iazim = metHeight[:,1].astype(int) + azimAux = numpy.asmatrix(azimuth1[iazim]).T #Azimuths + A = numpy.hstack((numpy.cos(azimAux),numpy.sin(azimAux))) + A = numpy.asmatrix(A) + A1 = numpy.linalg.pinv(A.transpose()*A)*A.transpose() + velHor = numpy.dot(A1,velAux) + + velEst[i,:] = numpy.squeeze(velHor) + return velEst + + def __getPhaseSlope(self, metArray, heightList, timeList): + meteorList = [] + #utctime sec1 height SNR velRad ph0 ph1 ph2 coh0 coh1 coh2 + #Putting back together the meteor matrix + utctime = metArray[:,0] + uniqueTime = numpy.unique(utctime) + + phaseDerThresh = 0.5 + ippSeconds = timeList[1] - timeList[0] + sec = numpy.where(timeList>1)[0][0] + nPairs = metArray.shape[1] - 6 + nHeights = len(heightList) + + for t in uniqueTime: + metArray1 = metArray[utctime==t,:] +# phaseDerThresh = numpy.pi/4 #reducir Phase thresh + tmet = metArray1[:,1].astype(int) + hmet = metArray1[:,2].astype(int) + + metPhase = numpy.zeros((nPairs, heightList.size, timeList.size - 1)) + metPhase[:,:] = numpy.nan + metPhase[:,hmet,tmet] = metArray1[:,6:].T + + #Delete short trails + metBool = ~numpy.isnan(metPhase[0,:,:]) + heightVect = numpy.sum(metBool, axis = 1) + metBool[heightVect phaseDerThresh)) + metPhase[phDerAux] = numpy.nan + + #--------------------------METEOR DETECTION ----------------------------------------- + indMet = numpy.where(numpy.any(metBool,axis=1))[0] + + for p in numpy.arange(nPairs): + phase = metPhase[p,:,:] + phDer = metDer[p,:,:] + + for h in indMet: + height = heightList[h] + phase1 = phase[h,:] #82 + phDer1 = phDer[h,:] + + phase1[~numpy.isnan(phase1)] = numpy.unwrap(phase1[~numpy.isnan(phase1)]) #Unwrap + + indValid = numpy.where(~numpy.isnan(phase1))[0] + initMet = indValid[0] + endMet = 0 + + for i in range(len(indValid)-1): + + #Time difference + inow = indValid[i] + inext = indValid[i+1] + idiff = inext - inow + #Phase difference + phDiff = numpy.abs(phase1[inext] - phase1[inow]) + + if idiff>sec or phDiff>numpy.pi/4 or inext==indValid[-1]: #End of Meteor + sizeTrail = inow - initMet + 1 + if sizeTrail>3*sec: #Too short meteors + x = numpy.arange(initMet,inow+1)*ippSeconds + y = phase1[initMet:inow+1] + ynnan = ~numpy.isnan(y) + x = x[ynnan] + y = y[ynnan] + slope, intercept, r_value, p_value, std_err = stats.linregress(x,y) + ylin = x*slope + intercept + rsq = r_value**2 + if rsq > 0.5: + vel = slope#*height*1000/(k*d) + estAux = numpy.array([utctime,p,height, vel, rsq]) + meteorList.append(estAux) + initMet = inext + metArray2 = numpy.array(meteorList) + + return metArray2 + + def __calculateAzimuth1(self, rx_location, pairslist, azimuth0): + + azimuth1 = numpy.zeros(len(pairslist)) + dist = numpy.zeros(len(pairslist)) + + for i in range(len(rx_location)): + ch0 = pairslist[i][0] + ch1 = pairslist[i][1] + + diffX = rx_location[ch0][0] - rx_location[ch1][0] + diffY = rx_location[ch0][1] - rx_location[ch1][1] + azimuth1[i] = numpy.arctan2(diffY,diffX)*180/numpy.pi + dist[i] = numpy.sqrt(diffX**2 + diffY**2) + + azimuth1 -= azimuth0 + return azimuth1, dist + + def techniqueNSM_DBS(self, **kwargs): + metArray = kwargs['metArray'] + heightList = kwargs['heightList'] + timeList = kwargs['timeList'] + azimuth = kwargs['azimuth'] + theta_x = numpy.array(kwargs['theta_x']) + theta_y = numpy.array(kwargs['theta_y']) + + utctime = metArray[:,0] + cmet = metArray[:,1].astype(int) + hmet = metArray[:,3].astype(int) + SNRmet = metArray[:,4] + vmet = metArray[:,5] + spcmet = metArray[:,6] + + nChan = numpy.max(cmet) + 1 + nHeights = len(heightList) + + azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth) + hmet = heightList[hmet] + h1met = hmet*numpy.cos(zenith_arr[cmet]) #Corrected heights + + velEst = numpy.zeros((heightList.size,2))*numpy.nan + + for i in range(nHeights - 1): + hmin = heightList[i] + hmax = heightList[i + 1] + + thisH = (h1met>=hmin) & (h1met8) & (vmet<50) & (spcmet<10) + indthisH = numpy.where(thisH) + + if numpy.size(indthisH) > 3: + + vel_aux = vmet[thisH] + chan_aux = cmet[thisH] + cosu_aux = dir_cosu[chan_aux] + cosv_aux = dir_cosv[chan_aux] + cosw_aux = dir_cosw[chan_aux] + + nch = numpy.size(numpy.unique(chan_aux)) + if nch > 1: + A = self.__calculateMatA(cosu_aux, cosv_aux, cosw_aux, True) + velEst[i,:] = numpy.dot(A,vel_aux) + + return velEst + + def run(self, dataOut, technique, nHours=1, hmin=70, hmax=110, **kwargs): + + param = dataOut.data_param + #if dataOut.abscissaList != None: + if numpy.any(dataOut.abscissaList): + absc = dataOut.abscissaList[:-1] + # noise = dataOut.noise + heightList = dataOut.heightList + SNR = dataOut.data_snr + + if technique == 'DBS': + + kwargs['velRadial'] = param[:,1,:] #Radial velocity + kwargs['heightList'] = heightList + kwargs['SNR'] = SNR + + dataOut.data_output, dataOut.heightList, dataOut.data_snr = self.techniqueDBS(kwargs) #DBS Function + dataOut.utctimeInit = dataOut.utctime + dataOut.outputInterval = dataOut.paramInterval + + elif technique == 'SA': + + #Parameters +# position_x = kwargs['positionX'] +# position_y = kwargs['positionY'] +# azimuth = kwargs['azimuth'] +# +# if kwargs.has_key('crosspairsList'): +# pairs = kwargs['crosspairsList'] +# else: +# pairs = None +# +# if kwargs.has_key('correctFactor'): +# correctFactor = kwargs['correctFactor'] +# else: +# correctFactor = 1 + +# tau = dataOut.data_param +# _lambda = dataOut.C/dataOut.frequency +# pairsList = dataOut.groupList +# nChannels = dataOut.nChannels + + kwargs['groupList'] = dataOut.groupList + kwargs['tau'] = dataOut.data_param + kwargs['_lambda'] = dataOut.C/dataOut.frequency +# dataOut.data_output = self.techniqueSA(pairs, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, absc, correctFactor) + dataOut.data_output = self.techniqueSA(kwargs) + dataOut.utctimeInit = dataOut.utctime + dataOut.outputInterval = dataOut.timeInterval + + elif technique == 'Meteors': + dataOut.flagNoData = True + self.__dataReady = False + + if 'nHours' in kwargs: + nHours = kwargs['nHours'] + else: + nHours = 1 - def __getSNR(self, z, noise): + if 'meteorsPerBin' in kwargs: + meteorThresh = kwargs['meteorsPerBin'] + else: + meteorThresh = 6 - avg = numpy.average(z, axis=1) - SNR = (avg.T-noise)/noise - SNR = SNR.T - return SNR + if 'hmin' in kwargs: + hmin = kwargs['hmin'] + else: hmin = 70 + if 'hmax' in kwargs: + hmax = kwargs['hmax'] + else: hmax = 110 - def __chisq(self, p, chindex, hindex): - #similar to Resid but calculates CHI**2 - [LT,d,fm]=setupLTdfm(p,chindex,hindex) - dp=numpy.dot(LT,d) - fmp=numpy.dot(LT,fm) - chisq=numpy.dot((dp-fmp).T,(dp-fmp)) - return chisq + dataOut.outputInterval = nHours*3600 + + if self.__isConfig == False: +# self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03) + #Get Initial LTC time + self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime) + self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds() + + self.__isConfig = True + + if self.__buffer is None: + self.__buffer = dataOut.data_param + self.__firstdata = copy.copy(dataOut) + + else: + self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param)) + + self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready + + if self.__dataReady: + dataOut.utctimeInit = self.__initime + + self.__initime += dataOut.outputInterval #to erase time offset + + dataOut.data_output, dataOut.heightList = self.techniqueMeteors(self.__buffer, meteorThresh, hmin, hmax) + dataOut.flagNoData = False + self.__buffer = None + + elif technique == 'Meteors1': + dataOut.flagNoData = True + self.__dataReady = False + + if 'nMins' in kwargs: + nMins = kwargs['nMins'] + else: nMins = 20 + if 'rx_location' in kwargs: + rx_location = kwargs['rx_location'] + else: rx_location = [(0,1),(1,1),(1,0)] + if 'azimuth' in kwargs: + azimuth = kwargs['azimuth'] + else: azimuth = 51.06 + if 'dfactor' in kwargs: + dfactor = kwargs['dfactor'] + if 'mode' in kwargs: + mode = kwargs['mode'] + if 'theta_x' in kwargs: + theta_x = kwargs['theta_x'] + if 'theta_y' in kwargs: + theta_y = kwargs['theta_y'] + else: mode = 'SA' + + #Borrar luego esto + if dataOut.groupList is None: + dataOut.groupList = [(0,1),(0,2),(1,2)] + groupList = dataOut.groupList + C = 3e8 + freq = 50e6 + lamb = C/freq + k = 2*numpy.pi/lamb + + timeList = dataOut.abscissaList + heightList = dataOut.heightList + + if self.__isConfig == False: + dataOut.outputInterval = nMins*60 +# self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03) + #Get Initial LTC time + initime = datetime.datetime.utcfromtimestamp(dataOut.utctime) + minuteAux = initime.minute + minuteNew = int(numpy.floor(minuteAux/nMins)*nMins) + self.__initime = (initime.replace(minute = minuteNew, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds() + + self.__isConfig = True + + if self.__buffer is None: + self.__buffer = dataOut.data_param + self.__firstdata = copy.copy(dataOut) + + else: + self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param)) + + self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready + + if self.__dataReady: + dataOut.utctimeInit = self.__initime + self.__initime += dataOut.outputInterval #to erase time offset + + metArray = self.__buffer + if mode == 'SA': + dataOut.data_output = self.techniqueNSM_SA(rx_location=rx_location, groupList=groupList, azimuth=azimuth, dfactor=dfactor, k=k,metArray=metArray, heightList=heightList,timeList=timeList) + elif mode == 'DBS': + dataOut.data_output = self.techniqueNSM_DBS(metArray=metArray,heightList=heightList,timeList=timeList, azimuth=azimuth, theta_x=theta_x, theta_y=theta_y) + dataOut.data_output = dataOut.data_output.T + dataOut.flagNoData = False + self.__buffer = None + + return class WindProfiler(Operation): @@ -4137,7 +5253,6 @@ class WindProfiler(Operation): return distx, disty, dist, ang #Calculo de Matrices - def __calculateVelVer(self, phase, lagTRange, _lambda): Ts = lagTRange[1] - lagTRange[0] @@ -4645,15 +5760,36 @@ class EWDriftsEstimation(Operation): def run(self, dataOut, zenith, zenithCorrection,fileDrifts): - dataOut.lat=-11.95 - dataOut.lon=-76.87 + dataOut.lat = -11.95 + dataOut.lon = -76.87 + dataOut.spcst = 0.00666 + dataOut.pl = 0.0003 + dataOut.cbadn = 3 + dataOut.inttms = 300 + dataOut.azw = -115.687 + dataOut.elw = 86.1095 + dataOut.aze = 130.052 + dataOut.ele = 87.6558 + dataOut.jro14 = numpy.log10(dataOut.spc_noise[0]/dataOut.normFactor) + dataOut.jro15 = numpy.log10(dataOut.spc_noise[1]/dataOut.normFactor) + dataOut.jro16 = numpy.log10(dataOut.spc_noise[2]/dataOut.normFactor) + dataOut.nwlos = numpy.log10(dataOut.spc_noise[3]/dataOut.normFactor) + heiRang = dataOut.heightList velRadial = dataOut.data_param[:,3,:] velRadialm = dataOut.data_param[:,2:4,:]*-1 + rbufc=dataOut.data_paramC[:,:,0] ebufc=dataOut.data_paramC[:,:,1] - SNR = dataOut.data_snr + #SNR = dataOut.data_snr + SNR = dataOut.data_snr1_i + rbufi = dataOut.data_snr1_i velRerr = dataOut.data_error[:,4,:] + range1 = dataOut.heightList + nhei = len(range1) + + sat_fits = dataOut.sat_fits + channels = dataOut.channelList nChan = len(channels) my_nbeams = nChan/2 @@ -4662,18 +5798,49 @@ class EWDriftsEstimation(Operation): else : moments=numpy.vstack(([velRadialm[0,:]],[velRadialm[0,:]])) dataOut.moments=moments + #Incoherent + smooth_w = dataOut.clean_num_aver[0,:] + chisq_w = dataOut.data_error[0,0,:] + p_w0 = rbufi[0,:] + p_w1 = rbufi[1,:] + # Coherent smooth_wC = ebufc[0,:] p_w0C = rbufc[0,:] p_w1C = rbufc[1,:] w_wC = rbufc[2,:]*-1 #*radial_sign(radial EQ 1) t_wC = rbufc[3,:] + val = (numpy.isfinite(p_w0)==False).nonzero() + p_w0[val]=0 + val = (numpy.isfinite(p_w1)==False).nonzero() + p_w1[val]=0 + val = (numpy.isfinite(p_w0C)==False).nonzero() + p_w0C[val]=0 + val = (numpy.isfinite(p_w1C)==False).nonzero() + p_w1C[val]=0 + val = (numpy.isfinite(smooth_w)==False).nonzero() + smooth_w[val]=0 + val = (numpy.isfinite(smooth_wC)==False).nonzero() + smooth_wC[val]=0 + + #p_w0 = (p_w0*smooth_w+p_w0C*smooth_wC)/(smooth_w+smooth_wC) + #p_w1 = (p_w1*smooth_w+p_w1C*smooth_wC)/(smooth_w+smooth_wC) + + if len(sat_fits) >0 : + p_w0C = p_w0C + sat_fits[0,:] + p_w1C = p_w1C + sat_fits[1,:] + if my_nbeams == 1: w = velRadial[0,:] winds = velRadial.copy() w_err = velRerr[0,:] - snr1 = 10*numpy.log10(SNR[0]) + u = w*numpy.nan + u_err = w_err*numpy.nan + p_e0 = p_w0*numpy.nan + p_e1 = p_w1*numpy.nan + #snr1 = 10*numpy.log10(SNR[0]) if my_nbeams == 2: + zenith = numpy.array(zenith) zenith -= zenithCorrection zenith *= numpy.pi/180 @@ -4691,59 +5858,101 @@ class EWDriftsEstimation(Operation): w_e = velRadial1[1,:] w_w_err = velRerr[0,:] w_e_err = velRerr[1,:] - - val = (numpy.isfinite(w_w)==False).nonzero() - val = val[0] - bad = val - if len(bad) > 0 : - w_w[bad] = w_wC[bad] - w_w_err[bad]= numpy.nan + smooth_e = dataOut.clean_num_aver[2,:] + chisq_e = dataOut.data_error[1,0,:] + p_e0 = rbufi[2,:] + p_e1 = rbufi[3,:] + + tini=time.localtime(dataOut.utctime) + + if tini[3] >= 6 and tini[3] < 18 : + w_wtmp = numpy.where(numpy.isfinite(w_wC)==True,w_wC,w_w) + w_w_errtmp = numpy.where(numpy.isfinite(w_wC)==True,numpy.nan,w_w_err) + else: + w_wtmp = numpy.where(numpy.isfinite(w_wC)==True,w_wC,w_w) + w_wtmp = numpy.where(range1 > 200,w_w,w_wtmp) + w_w_errtmp = numpy.where(numpy.isfinite(w_wC)==True,numpy.nan,w_w_err) + w_w_errtmp = numpy.where(range1 > 200,w_w_err,w_w_errtmp) + w_w = w_wtmp + w_w_err = w_w_errtmp + + #if my_nbeams == 2: smooth_eC=ebufc[4,:] p_e0C = rbufc[4,:] p_e1C = rbufc[5,:] w_eC = rbufc[6,:]*-1 t_eC = rbufc[7,:] - val = (numpy.isfinite(w_e)==False).nonzero() - val = val[0] - bad = val - if len(bad) > 0 : - w_e[bad] = w_eC[bad] - w_e_err[bad]= numpy.nan - - w = (w_w*numpy.sin(bet) - w_e*numpy.sin(alp))/(numpy.cos(alp)*numpy.sin(bet) - numpy.cos(bet)*numpy.sin(alp)) - u = (w_w*numpy.cos(bet) - w_e*numpy.cos(alp))/(numpy.sin(alp)*numpy.cos(bet) - numpy.sin(bet)*numpy.cos(alp)) + val = (numpy.isfinite(p_e0)==False).nonzero() + p_e0[val]=0 + val = (numpy.isfinite(p_e1)==False).nonzero() + p_e1[val]=0 + val = (numpy.isfinite(p_e0C)==False).nonzero() + p_e0C[val]=0 + val = (numpy.isfinite(p_e1C)==False).nonzero() + p_e1C[val]=0 + val = (numpy.isfinite(smooth_e)==False).nonzero() + smooth_e[val]=0 + val = (numpy.isfinite(smooth_eC)==False).nonzero() + smooth_eC[val]=0 + #p_e0 = (p_e0*smooth_e+p_e0C*smooth_eC)/(smooth_e+smooth_eC) + #p_e1 = (p_e1*smooth_e+p_e1C*smooth_eC)/(smooth_e+smooth_eC) + + if len(sat_fits) >0 : + p_e0C = p_e0C + sat_fits[2,:] + p_e1C = p_e1C + sat_fits[3,:] + + if tini[3] >= 6 and tini[3] < 18 : + w_etmp = numpy.where(numpy.isfinite(w_eC)==True,w_eC,w_e) + w_e_errtmp = numpy.where(numpy.isfinite(w_eC)==True,numpy.nan,w_e_err) + else: + w_etmp = numpy.where(numpy.isfinite(w_eC)==True,w_eC,w_e) + w_etmp = numpy.where(range1 > 200,w_e,w_etmp) + w_e_errtmp = numpy.where(numpy.isfinite(w_eC)==True,numpy.nan,w_e_err) + w_e_errtmp = numpy.where(range1 > 200,w_e_err,w_e_errtmp) + w_e = w_etmp + w_e_err = w_e_errtmp + + w = (w_w*numpy.sin(bet) - w_e*numpy.sin(alp))/(numpy.cos(alp)*numpy.sin(bet) - numpy.cos(bet)*numpy.sin(alp)) + u = (w_w*numpy.cos(bet) - w_e*numpy.cos(alp))/(numpy.sin(alp)*numpy.cos(bet) - numpy.sin(bet)*numpy.cos(alp)) w_err = numpy.sqrt((w_w_err*numpy.sin(bet))**2.+(w_e_err*numpy.sin(alp))**2.)/ numpy.absolute(numpy.cos(alp)*numpy.sin(bet)-numpy.cos(bet)*numpy.sin(alp)) u_err = numpy.sqrt((w_w_err*numpy.cos(bet))**2.+(w_e_err*numpy.cos(alp))**2.)/ numpy.absolute(numpy.cos(alp)*numpy.sin(bet)-numpy.cos(bet)*numpy.sin(alp)) - - winds = numpy.vstack((w,u)) + winds = numpy.vstack((w,u)) dataOut.heightList = heiRang1 - snr1 = 10*numpy.log10(SNR1[0]) + #snr1 = 10*numpy.log10(SNR1[0]) dataOut.data_output = winds - #snr1 = 10*numpy.log10(SNR1[0])# estaba comentado - dataOut.data_snr1 = numpy.reshape(snr1,(1,snr1.shape[0])) - #print("data_snr1",dataOut.data_snr1) + range1 = dataOut.heightList + nhei = len(range1) + #print('alt ',range1*numpy.sin(86.1*numpy.pi/180)) + #print(numpy.min([dataOut.eldir7,dataOut.eldir8])) + galt = range1*numpy.sin(numpy.min([dataOut.elw,dataOut.ele])*numpy.pi/180.) + dataOut.params = numpy.vstack((range1,galt,w,w_err,u,u_err,w_w,w_w_err,w_e,w_e_err,numpy.log10(p_w0),numpy.log10(p_w0C),numpy.log10(p_w1),numpy.log10(p_w1C),numpy.log10(p_e0),numpy.log10(p_e0C),numpy.log10(p_e1),numpy.log10(p_e1C),chisq_w,chisq_e)) + #snr1 = 10*numpy.log10(SNR1[0]) + #print(min(snr1), max(snr1)) + snr1 = numpy.vstack((p_w0,p_w1,p_e0,p_e1)) + snr1db = 10*numpy.log10(snr1[0]) + + #dataOut.data_snr1 = numpy.reshape(snr1,(1,snr1.shape[0])) + dataOut.data_snr1 = numpy.reshape(snr1db,(1,snr1db.shape[0])) dataOut.utctimeInit = dataOut.utctime dataOut.outputInterval = dataOut.timeInterval - hei_aver0 = 218 + hei_aver0 = 218 jrange = 450 #900 para HA drifts deltah = 15.0 #dataOut.spacing(0) 25 HAD h0 = 0.0 #dataOut.first_height(0) - heights = dataOut.heightList - nhei = len(heights) - + range1 = numpy.arange(nhei) * deltah + h0 jhei = (range1 >= hei_aver0).nonzero() if len(jhei[0]) > 0 : h0_index = jhei[0][0] # Initial height for getting averages 218km - + mynhei = 7 nhei_avg = int(jrange/deltah) h_avgs = int(nhei_avg/mynhei) nhei_avg = h_avgs*(mynhei-1)+mynhei - + navgs = numpy.zeros(mynhei,dtype='float') delta_h = numpy.zeros(mynhei,dtype='float') range_aver = numpy.zeros(mynhei,dtype='float') @@ -4751,11 +5960,11 @@ class EWDriftsEstimation(Operation): range_aver[ih] = numpy.sum(range1[h0_index+h_avgs*ih:h0_index+h_avgs*(ih+1)-0])/h_avgs navgs[ih] = h_avgs delta_h[ih] = deltah*h_avgs - + range_aver[mynhei-1] = numpy.sum(range1[h0_index:h0_index+6*h_avgs-0])/(6*h_avgs) navgs[mynhei-1] = 6*h_avgs delta_h[mynhei-1] = deltah*6*h_avgs - + wA = w[h0_index:h0_index+nhei_avg-0] wA_err = w_err[h0_index:h0_index+nhei_avg-0] for i in range(5) : @@ -4765,15 +5974,14 @@ class EWDriftsEstimation(Operation): sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.)) wA[6*h_avgs+i] = avg wA_err[6*h_avgs+i] = sigma - - + vals = wA[0:6*h_avgs-0] errs=wA_err[0:6*h_avgs-0] avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2) sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.)) wA[nhei_avg-1] = avg wA_err[nhei_avg-1] = sigma - + wA = wA[6*h_avgs:nhei_avg-0] wA_err=wA_err[6*h_avgs:nhei_avg-0] if my_nbeams == 2 : @@ -4796,18 +6004,81 @@ class EWDriftsEstimation(Operation): uA_err[nhei_avg-1] = sigma uA = uA[6*h_avgs:nhei_avg-0] uA_err = uA_err[6*h_avgs:nhei_avg-0] - dataOut.drifts_avg = numpy.vstack((wA,uA)) + dataOut.drifts_avg = numpy.vstack((wA,uA)) + if my_nbeams == 1: dataOut.drifts_avg = wA + #deltahavg= wA*0.0+deltah + dataOut.range = range1 + galtavg = range_aver*numpy.sin(numpy.min([dataOut.elw,dataOut.ele])*numpy.pi/180.) + dataOut.params_avg = numpy.vstack((wA,wA_err,uA,uA_err,range_aver,galtavg,delta_h)) + + #print('comparando dim de avg ',wA.shape,deltahavg.shape,range_aver.shape) tini=time.localtime(dataOut.utctime) datefile= str(tini[0]).zfill(4)+str(tini[1]).zfill(2)+str(tini[2]).zfill(2) - nfile = fileDrifts+'/jro'+datefile+'drifts.txt' + nfile = fileDrifts+'/jro'+datefile+'drifts_sch3.txt' + f1 = open(nfile,'a') datedriftavg=str(tini[0])+' '+str(tini[1])+' '+str(tini[2])+' '+str(tini[3])+' '+str(tini[4]) driftavgstr=str(dataOut.drifts_avg) numpy.savetxt(f1,numpy.column_stack([tini[0],tini[1],tini[2],tini[3],tini[4]]),fmt='%4i') - numpy.savetxt(f1,dataOut.drifts_avg,fmt='%10.2f') + numpy.savetxt(f1,numpy.reshape(range_aver,(1,len(range_aver))) ,fmt='%10.2f') + numpy.savetxt(f1,dataOut.drifts_avg[:,:],fmt='%10.2f') + f1.close() + + swfile = fileDrifts+'/jro'+datefile+'drifts_sw.txt' + f1 = open(swfile,'a') + numpy.savetxt(f1,numpy.column_stack([tini[0],tini[1],tini[2],tini[3],tini[4]]),fmt='%4i') + numpy.savetxt(f1,numpy.reshape(heiRang,(1,len(heiRang))),fmt='%10.2f') + numpy.savetxt(f1,dataOut.data_param[:,0,:],fmt='%10.2f') f1.close() + dataOut.heightListtmp = dataOut.heightList + ''' + #Envio data de drifts a mysql + fechad = str(tini[0]).zfill(4)+'-'+str(tini[1]).zfill(2)+'-'+str(tini[2]).zfill(2)+' '+str(tini[3]).zfill(2)+':'+str(tini[4]).zfill(2)+':'+str(0).zfill(2) + mydb = mysql.connector.connect( + host="10.10.110.213", + user="user_clima", + password="5D.bh(B2)Y_wRNz9", + database="clima_espacial" + ) + + mycursor = mydb.cursor() + #mycursor.execute("CREATE TABLE drifts_vertical (id INT AUTO_INCREMENT PRIMARY KEY, fecha DATETIME(6), Vertical FLOAT(10,2))") + + sql = "INSERT INTO drifts_vertical (datetime, value) VALUES (%s, %s)" + if numpy.isfinite(dataOut.drifts_avg[0,6]): vdql = dataOut.drifts_avg[0,6] + else : vdql = 999 + val = (fechad, vdql) + mycursor.execute(sql, val) + mydb.commit() + sql = "INSERT INTO drifts_zonal (datetime, value) VALUES (%s, %s)" + if numpy.isfinite(dataOut.drifts_avg[1,6]): zdql = dataOut.drifts_avg[1,6] + else : zdql = 999 + val = (fechad, zdql) + mycursor.execute(sql, val) + mydb.commit() + + print(mycursor.rowcount, "record inserted.") + ''' + return dataOut +class setHeightDrifts(Operation): + + def __init__(self): + Operation.__init__(self) + def run(self, dataOut): + #print('h inicial ',dataOut.heightList,dataOut.heightListtmp) + dataOut.heightList = dataOut.heightListtmp + #print('regresa H ',dataOut.heightList) + return dataOut +class setHeightDriftsavg(Operation): + + def __init__(self): + Operation.__init__(self) + def run(self, dataOut): + #print('h inicial ',dataOut.heightList) + dataOut.heightList = dataOut.params_avg[4] + #print('cambia H ',dataOut.params_avg[4],dataOut.heightList) return dataOut #--------------- Non Specular Meteor ---------------- @@ -5918,6 +7189,7 @@ class SMOperations(): ph0_aux = ph0 + ph1 ph0_aux = numpy.angle(numpy.exp(1j*ph0_aux)) + #First Estimation cosdir0[:,i] = (ph0_aux)/(2*numpy.pi*(d0 - d1)) @@ -6052,9 +7324,12 @@ class SMOperations(): pairslist = [(chanCX, chan1X),(chanCX, chan2X),(chanCY,chan1Y),(chanCY, chan2Y)] - return pairslist, distances + return pairslist, distances class IGRFModel(Operation): + ''' + Written by R. Flores + ''' """Operation to calculate Geomagnetic parameters. Parameters: @@ -6077,7 +7352,7 @@ class IGRFModel(Operation): def run(self,dataOut): try: - from schainpy.model.proc import mkfact_short_2020 + from schainpy.model.proc import mkfact_short_2020_2 except: log.warning('You should install "mkfact_short_2020" module to process IGRF Model') @@ -6091,8 +7366,9 @@ class IGRFModel(Operation): dataOut.ut=dataOut.bd_time.tm_hour+dataOut.bd_time.tm_min/60.0+dataOut.bd_time.tm_sec/3600.0 self.aux=0 - - dataOut.h=numpy.arange(0.0,15.0*dataOut.MAXNRANGENDT,15.0,dtype='float32') + dh = dataOut.heightList[1]-dataOut.heightList[0] + #dataOut.h=numpy.arange(0.0,15.0*dataOut.MAXNRANGENDT,15.0,dtype='float32') + dataOut.h=numpy.arange(0.0,dh*dataOut.MAXNRANGENDT,dh,dtype='float32') dataOut.bfm=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32') dataOut.bfm=numpy.array(dataOut.bfm,order='F') dataOut.thb=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32') @@ -6100,7 +7376,7 @@ class IGRFModel(Operation): dataOut.bki=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32') dataOut.bki=numpy.array(dataOut.bki,order='F') - mkfact_short_2020.mkfact(dataOut.year,dataOut.h,dataOut.bfm,dataOut.thb,dataOut.bki,dataOut.MAXNRANGENDT) + mkfact_short_2020_2.mkfact(dataOut.year,dataOut.h,dataOut.bfm,dataOut.thb,dataOut.bki,dataOut.MAXNRANGENDT) return dataOut @@ -6113,9 +7389,7 @@ class MergeProc(ProcessingUnit): self.dataOut = getattr(self, self.inputs[0]) data_inputs = [getattr(self, attr) for attr in self.inputs] - #print(self.inputs) - #print(numpy.shape([getattr(data, attr_data) for data in data_inputs][1])) - #exit(1) + if mode==0: data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs]) setattr(self.dataOut, attr_data, data) @@ -6181,39 +7455,6 @@ class MergeProc(ProcessingUnit): #setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP #print("Merge data_acf: ",self.dataOut.data_acf.shape) - #exit(1) - #print(self.dataOut.data_spc_LP.shape) - #print("Exit") - #exit(1) - #setattr(self.dataOut, 'dataLag_cspc', [getattr(data, attr_data_2) for data in data_inputs][0]) - #setattr(self.dataOut, 'dataLag_cspc_LP', [getattr(data, attr_data_2) for data in data_inputs][1]) - #setattr(self.dataOut, 'nIncohInt', [getattr(data, attr_data_3) for data in data_inputs][0]) - #setattr(self.dataOut, 'nIncohInt_LP', [getattr(data, attr_data_3) for data in data_inputs][1]) - ''' - print(self.dataOut.dataLag_spc_LP.shape) - print(self.dataOut.dataLag_cspc_LP.shape) - exit(1) - ''' - ''' - print(self.dataOut.dataLag_spc_LP[0,:,100]) - print(self.dataOut.dataLag_spc_LP[1,:,100]) - exit(1) - ''' - #self.dataOut.dataLag_spc_LP = numpy.transpose(self.dataOut.dataLag_spc_LP[0],(2,0,1)) - #self.dataOut.dataLag_cspc_LP = numpy.transpose(self.dataOut.dataLag_cspc_LP,(3,1,2,0)) - ''' - print("Merge") - print(numpy.shape(self.dataOut.dataLag_spc)) - print(numpy.shape(self.dataOut.dataLag_spc_LP)) - print(numpy.shape(self.dataOut.dataLag_cspc)) - print(numpy.shape(self.dataOut.dataLag_cspc_LP)) - exit(1) - ''' - #print(numpy.sum(self.dataOut.dataLag_spc_LP[2,:,164])/128) - #print(numpy.sum(self.dataOut.dataLag_cspc_LP[0,:,30,1])/128) - #exit(1) - #print(self.dataOut.NDP) - #print(self.dataOut.nNoiseProfiles) #self.dataOut.nIncohInt_LP = 128 #self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP @@ -6224,6 +7465,8 @@ class MergeProc(ProcessingUnit): #print("sahpi",self.dataOut.nIncohInt_LP) #exit(1) self.dataOut.NLAG = 16 + self.dataOut.NLAG = self.dataOut.data_acf.shape[1] + self.dataOut.NRANGE = self.dataOut.data_acf.shape[-1] #print(numpy.shape(self.dataOut.data_spc)) @@ -6234,6 +7477,97 @@ class MergeProc(ProcessingUnit): setattr(self.dataOut, attr_data, data) data = numpy.concatenate([getattr(data, attr_data_2) for data in data_inputs]) setattr(self.dataOut, attr_data_2, data) - #data = numpy.concatenate([getattr(data, attr_data_3) for data in data_inputs]) - #setattr(self.dataOut, attr_data_3, data) - #print(self.dataOut.moments.shape,self.dataOut.data_snr.shape,self.dataOut.heightList.shape) \ No newline at end of file + + if mode==6: #Hybrid Spectra-Voltage + #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1) + #setattr(self.dataOut, attr_data, data) + setattr(self.dataOut, 'dataLag_spc', getattr(data_inputs[1], attr_data)) #DP + setattr(self.dataOut, 'dataLag_cspc', getattr(data_inputs[1], attr_data_2)) #DP + setattr(self.dataOut, 'output_LP_integrated', getattr(data_inputs[0], attr_data_3)) #LP + #setattr(self.dataOut, 'dataLag_cspc_LP', getattr(data_inputs[1], attr_data_4)) #LP + #setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP + #setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP + #print("Merge data_acf: ",self.dataOut.data_acf.shape) + #print(self.dataOut.NSCAN) + self.dataOut.nIncohInt = int(self.dataOut.NAVG * self.dataOut.nint) + #print(self.dataOut.dataLag_spc.shape) + self.dataOut.nProfiles = self.dataOut.nProfiles_DP = self.dataOut.dataLag_spc.shape[1] + ''' + #self.dataOut.nIncohInt_LP = 128 + #self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP + self.dataOut.nProfiles_LP = 16#28#self.dataOut.nIncohInt_LP + self.dataOut.nProfiles_LP = self.dataOut.data_acf.shape[1]#28#self.dataOut.nIncohInt_LP + self.dataOut.NSCAN = 128 + self.dataOut.nIncohInt_LP = self.dataOut.nIncohInt*self.dataOut.NSCAN + #print("sahpi",self.dataOut.nIncohInt_LP) + #exit(1) + self.dataOut.NLAG = 16 + self.dataOut.NLAG = self.dataOut.data_acf.shape[1] + self.dataOut.NRANGE = self.dataOut.data_acf.shape[-1] + ''' + #print(numpy.shape(self.dataOut.data_spc)) + #print("*************************GOOD*************************") + #exit(1) + + if mode==11: #MST ISR + #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1) + #setattr(self.dataOut, attr_data, data) + #setattr(self.dataOut, 'ph2', [getattr(data, attr_data) for data in data_inputs][1]) + #setattr(self.dataOut, 'dphi', [getattr(data, attr_data_2) for data in data_inputs][1]) + #setattr(self.dataOut, 'sdp2', [getattr(data, attr_data_3) for data in data_inputs][1]) + + setattr(self.dataOut, 'ph2', getattr(data_inputs[1], attr_data)) #DP + setattr(self.dataOut, 'dphi', getattr(data_inputs[1], attr_data_2)) #DP + setattr(self.dataOut, 'sdp2', getattr(data_inputs[1], attr_data_3)) #DP + + print("MST Density", numpy.shape(self.dataOut.ph2)) + print("cf MST: ", self.dataOut.cf) + #exit(1) + #print("MST Density", self.dataOut.ph2[116:283]) + print("MST Density", self.dataOut.ph2[80:120]) + print("MST dPhi", self.dataOut.dphi[80:120]) + self.dataOut.ph2 *= self.dataOut.cf#0.0008136899 + #print("MST Density", self.dataOut.ph2[116:283]) + self.dataOut.sdp2 *= 0#self.dataOut.cf#0.0008136899 + #print("MST Density", self.dataOut.ph2[116:283]) + print("MST Density", self.dataOut.ph2[80:120]) + self.dataOut.NSHTS = int(numpy.shape(self.dataOut.ph2)[0]) + dH = self.dataOut.heightList[1]-self.dataOut.heightList[0] + dH /= self.dataOut.windowOfFilter + self.dataOut.heightList = numpy.arange(0,self.dataOut.NSHTS)*dH + dH + #print("heightList: ", self.dataOut.heightList) + self.dataOut.NDP = self.dataOut.NSHTS + #exit(1) + #print(self.dataOut.heightList) + +class MST_Den_Conv(Operation): + ''' + Written by R. Flores + ''' + """Operation to calculate Geomagnetic parameters. + + Parameters: + ----------- + None + + Example + -------- + + op = proc_unit.addOperation(name='MST_Den_Conv', optype='other') + + """ + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + def run(self,dataOut): + + dataOut.PowDen = numpy.zeros((1,dataOut.NDP)) + dataOut.PowDen[0] = numpy.copy(dataOut.ph2[:dataOut.NDP]) + + dataOut.FarDen = numpy.zeros((1,dataOut.NDP)) + dataOut.FarDen[0] = numpy.copy(dataOut.dphi[:dataOut.NDP]) + print("pow den shape", numpy.shape(dataOut.PowDen)) + print("far den shape", numpy.shape(dataOut.FarDen)) + return dataOut diff --git a/schainpy/scripts/EWDriftsMP_01feb2023.py b/schainpy/scripts/EWDriftsMP_01feb2023.py new file mode 100644 index 0000000..e0bf94d --- /dev/null +++ b/schainpy/scripts/EWDriftsMP_01feb2023.py @@ -0,0 +1,262 @@ + +import os, sys +import json + +#from controller import * +from schainpy.controller import Project + +desc = "EW DRIFTS MP Experiment" +filename = "EWDrifts.xml" + +controllerObj = Project() + +controllerObj.setup(id = '191', name='test01', description=desc) + +#Experimentos + +#path = "/data/dia" +#path = '/home/pcondor/data' +#path = '/media/pcondor/DATA1/Database/ewdriftsschain2023prue/data' +#path = '/data/2024_01/MP_ISR/main_radar/rawdata/d2024023' +path = '/data/ISR_JULIA/d2024092' +#pathFigure = '/media/pcondor/DATA1/Database/ewdriftsschain2023wh5' +pathFile = '/media/pcondor/DATA1/Database/ewdriftsabr2024sch/EW_Drifts_01abr' +pathFigure = pathFile +pathFileavg = pathFile+'/avg' +pathFiledata = pathFile+'/Drifts-data' +#pathFileavg = '/media/pcondor/DATA1/Database/ewdriftsschain2023wh5/avg' +#pathFiledata = '/media/pcondor/DATA1/Database/ewdriftsschain2023wh5/Drifts-data' + +xmin = 0 +xmax = 24 +#------------------------------------------------------------------------------------------------ +readUnitConfObj = controllerObj.addReadUnit(datatype='VoltageReader', + path=path, + startDate='2024/04/01', + endDate='2024/04/01', + startTime='00:00:00', + endTime='23:59:59', + online=0, + getByBlock=1, + walk=0) + +#-------------------------------------------------------------------------------------------------- + +procUnitConfObj0 = controllerObj.addProcUnit(datatype='VoltageProc', inputId=readUnitConfObj.getId()) + +#opObj11 = procUnitConfObj0.addOperation(name='selectHeights') +# # opObj11.addParameter(name='minHei', value='320.0', format='float') +# # opObj11.addParameter(name='maxHei', value='350.0', format='float') +#opObj11.addParameter(name='minHei', value='0.01', format='float') +#opObj11.addParameter(name='maxHei', value='960.0', format='float') + +opObj11 = procUnitConfObj0.addOperation(name='selectChannels') +opObj11.addParameter(name='channelList', value='0,0,1,1', format='intlist') + +#opObj11 = procUnitConfObj0.addOperation(name='Reshaper') +#opObj11.addParameter(name='shape', value='(500,980)', format='intlist') + +opObj11 = procUnitConfObj0.addOperation(name='ProfileSelector', optype='other') +opObj11.addParameter(name='profileRangeList', value='0,127', format='intlist') + +opObj11 = procUnitConfObj0.addOperation(name='filterByHeights') +opObj11.addParameter(name='window', value='10', format='int') + +code=[[-1,-1,1],[1,1,-1]] +#code = [[1,1,-1],[-1,-1,1],[1,1,-1],[1,1,-1],[1,1,-1],[-1,-1,1],[-1,-1,1],[1,1,-1],[1,1,-1],[-1,-1,1],[1,1,-1],[1,1,-1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[1,1,-1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[1,1,-1],[1,1,-1],[-1,-1,1],[-1,-1,1],[1,1,-1],[-1,-1,1],[1,1,-1],[1,1,-1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[1,1,-1],[-1,-1,1],[1,1,-1],[1,1,-1],[-1,-1,1],[-1,-1,1],[1,1,-1],[1,1,-1],[-1,-1,1],[1,1,-1],[-1,-1,1],[-1,-1,1],[1,1,-1],[1,1,-1],[-1,-1,1],[-1,-1,1],[1,1,-1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[1,1,-1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[1,1,-1],[1,1,-1],[1,1,-1],[1,1,-1],[1,1,-1],[1,1,-1],[1,1,-1],[1,1,-1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[1,1,-1],[-1,-1,1],[1,1,-1],[1,1,-1],[-1,-1,1],[-1,-1,1],[1,1,-1],[-1,-1,1],[1,1,-1],[1,1,-1],[1,1,-1],[1,1,-1],[-1,-1,1],[1,1,-1],[1,1,-1],[1,1,-1],[-1,-1,1],[1,1,-1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[1,1,-1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[1,1,-1],[1,1,-1],[1,1,-1],[1,1,-1],[1,1,-1],[-1,-1,1],[1,1,-1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[1,1,-1],[-1,-1,1],[1,1,-1],[-1,-1,1],[1,1,-1],[1,1,-1],[-1,-1,1],[1,1,-1],[-1,-1,1],[-1,-1,1],[1,1,-1],[1,1,-1],[-1,-1,1],[1,1,-1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[1,1,-1],[-1,-1,1],[-1,-1,1],[1,1,-1],[-1,-1,1],[1,1,-1],[1,1,-1],[-1,-1,1],[-1,-1,1],[1,1,-1],[-1,-1,1],[1,1,-1],[1,1,-1],[1,1,-1],[-1,-1,1],[1,1,-1],[-1,-1,1],[-1,-1,1],[1,1,-1],[1,1,-1],[1,1,-1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[1,1,-1],[-1,-1,1],[-1,-1,1],[1,1,-1],[1,1,-1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[1,1,-1],[-1,-1,1],[1,1,-1],[1,1,-1],[1,1,-1],[1,1,-1],[1,1,-1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[1,1,-1],[1,1,-1],[1,1,-1],[-1,-1,1],[-1,-1,1],[1,1,-1],[1,1,-1],[1,1,-1],[1,1,-1],[1,1,-1],[-1,-1,1],[1,1,-1],[1,1,-1],[-1,-1,1],[1,1,-1],[1,1,-1],[-1,-1,1],[-1,-1,1],[-1,-1,1],[1,1,-1],[1,1,-1],[1,1,-1],[1,1,-1],[1,1,-1],[-1,-1,1],[-1,-1,1],[1,1,-1],[-1,-1,1],[-1,-1,1],[1,1,-1],[1,1,-1]] +opObj11 = procUnitConfObj0.addOperation(name='Decoder', optype='other') +opObj11.addParameter(name='code', value=code, format='floatlist') +opObj11.addParameter(name='nCode', value='2', format='int') +opObj11.addParameter(name='nBaud', value='3', format='int') + +opObj11 = procUnitConfObj0.addOperation(name='selectHeights') +opObj11.addParameter(name='minHei', value='0.0', format='float') +opObj11.addParameter(name='maxHei', value='960', format='float') + +procUnitConfObj1 = controllerObj.addProcUnit(datatype='SpectraProc', inputId=procUnitConfObj0.getId()) +procUnitConfObj1.addParameter(name='nFFTPoints', value='128', format='int') +procUnitConfObj1.addParameter(name='nProfiles', value='128', format='int') +#procUnitConfObj1.addParameter(name='pairsList', value='(2,3),(4,5)', format='pairsList')#,(2,3) +procUnitConfObj1.addParameter(name='pairsList', value='(0,1),(2,3)', format='pairsList') + +#opObj11 = procUnitConfObj1.addOperation(name='selectHeights') +# # opObj11.addParameter(name='minHei', value='320.0', format='float') +# # opObj11.addParameter(name='maxHei', value='350.0', format='float') +#opObj11.addParameter(name='minHei', value='0.0', format='float') +#opObj11.addParameter(name='maxHei', value='960.0', format='float') + +#opObj11 = procUnitConfObj1.addOperation(name='selectChannels') +#opObj11.addParameter(name='channelList', value='2,3,4,5', format='intlist') + +opObj11 = procUnitConfObj1.addOperation(name='IncohInt', optype='other') +opObj11.addParameter(name='n', value='1', format='float') +#opObj11.addParameter(name='timeInterval', value='300.0', format='float') + +#opObj13 = procUnitConfObj1.addOperation(name='removeDC') + +#opObj14 = procUnitConfObj1.addOperation(name='SpectraPlot', optype='other') +#opObj14.addParameter(name='id', value='65', format='int') +## # opObj14.addParameter(name='wintitle', value='Con interf', format='str') +#opObj14.addParameter(name='save', value=pathFigure, format='str') +##opObj14.addParameter(name='save_period', value=1, format='int') +#opObj14.addParameter(name='zmin', value='10', format='int') +#opObj14.addParameter(name='zmax', value='26', format='int') +# + +#opObj12 = procUnitConfObj1.addOperation(name='RTIPlot', optype='other') +#opObj12.addParameter(name='id', value='63', format='int') +#opObj12.addParameter(name='wintitle', value='RTI Plot', format='str') +#opObj12.addParameter(name='save', value=pathFigure, format='str') +#opObj12.addParameter(name='save_period', value=10, format='int') +##opObj12.addParameter(name='figpath', value = pathFigure, format='str') +#opObj12.addParameter(name='xmin', value=xmin, format='float') +#opObj12.addParameter(name='xmax', value=xmax, format='float') +#opObj12.addParameter(name='zmin', value='20', format='int') +#opObj12.addParameter(name='zmax', value='36', format='int') + +#-------------------------------------------------------------------------------------------------- + +procUnitConfObj2 = controllerObj.addProcUnit(datatype='ParametersProc', inputId=procUnitConfObj1.getId()) +opObj20 = procUnitConfObj2.addOperation(name='SpectralFitting', optype='other') +opObj20.addParameter(name='path', value='/home/pcondor/DIR_MADRIGAL/schain/schainpy/model/proc', format='str') +opObj20.addParameter(name='file', value='modelSpectralFitting', format='str') +opObj20.addParameter(name='groupList', value='(0,1),(2,3)',format='multiList') +opObj20.addParameter(name='taver', value='5') +opObj20.addParameter(name='coh_th', value='[1]',format='multiList') +opObj20.addParameter(name='hei_th', value='[2000]',format='multiList') +#opObj20.addParameter(name='filec', value='weightfit', format='str') + +opObj22 = procUnitConfObj2.addOperation(name='HDFWriter', optype='other') +opObj22.addParameter(name='path', value=pathFiledata) +opObj22.addParameter(name='blocksPerFile', value='1') +opObj22.addParameter(name='metadataList',value='heightList,timeZone') +opObj22.addParameter(name='dataList',value='tmp_spectra_i,tmp_cspectra_i,tmp_spectra_c,tmp_cspectra_c,clean_num_aver,coh_num_aver,sat_spectra,sat_cspectra,index,utctime,nIncohInt,nCohInt,nProfiles,nFFTPoints,ippFactor,ippSeconds,paramInterval') +##opObj22.addParameter(name='dataList',value='tmp_spectra_i,tmp_cspectra_i,tmp_spectra_c,tmp_cspectra_c,clean_num_aver,coh_num_aver,index,utctime,nIncohInt,nCohInt,nProfiles,nFFTPoints,normFactor,channelList,ippFactor,ippSeconds') + +#angles :-2.41116 3.01082 +opObj21 = procUnitConfObj2.addOperation(name='EWDriftsEstimation', optype='other') +opObj21.addParameter(name='zenith', value='-2.41116, 3.01082', format='floatlist') +opObj21.addParameter(name='zenithCorrection', value='0.0', format='float') +opObj21.addParameter(name='fileDrifts', value=pathFile) + +# Drifts en h5 +one = {'gdlatr': 'lat', 'gdlonr': 'lon', 'spcst':'spcst','pl':'pl','cbadn':'cbadn','inttms': 'inttms','azdir7':'azw','eldir7':'elw','azdir8':'aze','eldir8':'ele','jro14':'jro14','jro15':'jro15','jro16':'jro16','nwlos':'nwlos'} +two = { + 'range': ('params', 0), + 'gdalt': ('params', 1), + 'VIPN': ('params', 2), + 'dvipn': ('params', 3), + 'vipe': ('params', 4), + 'dvipe': ('params', 5), + 'vi7': ('params', 6), + 'dvi7': ('params', 7), + 'vi8': ('params', 8), + 'dvi8': ('params', 9), + 'PAIWL': ('params', 10), + 'pacwl': ('params', 11), + 'pbiwl': ('params', 12), + 'pbcwl': ('params', 13), + 'pciel': ('params', 14), + 'pccel': ('params', 15), + 'pdiel': ('params', 16), + 'pdcel': ('params', 17), + 'jro10': ('params', 18), + 'jro11': ('params', 19) + } #writer +ind = ['gdalt'] + +meta = { + 'kinst': 10, #instrument code + 'kindat': 1910, #type of data + 'catalog': { + 'principleInvestigator': 'Danny Scipión', + 'expPurpose': 'Drifts'#, + #'sciRemarks': file_contents + }, + 'header': { + 'analyst': 'Danny Scipión' + } +} + +op_writer = procUnitConfObj2.addOperation(name='MADWriter') +op_writer.addParameter(name='path', value=pathFile) +op_writer.addParameter(name='format', value='hdf5') +op_writer.addParameter(name='oneDDict', value=json.dumps(one)) +op_writer.addParameter(name='twoDDict', value=json.dumps(two)) +op_writer.addParameter(name='ind2DList', value=json.dumps(ind)) +op_writer.addParameter(name='metadata', value=json.dumps(meta)) + +op_writer = procUnitConfObj2.addOperation(name='setHeightDriftsavg') + +# Avg Drifts +one_avg = {'gdlatr': 'lat', 'gdlonr': 'lon', 'spcst':'spcst','pl':'pl','cbadn':'cbadn','inttms': 'inttms'} +two_avg = { + 'range': ('params_avg', 4), + 'gdalt': ('params_avg', 5), + 'altav': ('params_avg', 6), + 'VIPN': ('params_avg', 0), + 'dvipn': ('params_avg', 1), + 'vipe': ('params_avg', 2), + 'dvipe': ('params_avg', 3) + } +ind_avg = ['gdalt'] +meta = { + 'kinst': 10, #instrument code + 'kindat': 1911, #type of data + 'catalog': { + 'principleInvestigator': 'Danny Scipión', + 'expPurpose': 'Drifts'#, + #'sciRemarks': file_contents + }, + 'header': { + 'analyst': 'Danny Scipión' + } +} +#dataOut.heightList = dataOut.params_avg[4] +op_writer = procUnitConfObj2.addOperation(name='MADWriter') +op_writer.addParameter(name='path', value=pathFileavg) +op_writer.addParameter(name='format', value='hdf5') +op_writer.addParameter(name='oneDDict', value=json.dumps(one_avg)) +op_writer.addParameter(name='twoDDict', value=json.dumps(two_avg)) +op_writer.addParameter(name='ind2DList', value=json.dumps(ind_avg)) +op_writer.addParameter(name='metadata', value=json.dumps(meta)) + +op_writer = procUnitConfObj2.addOperation(name='setHeightDrifts') + +opObj24 = procUnitConfObj2.addOperation(name='SpectralMomentsPlot', optype='other') +opObj24.addParameter(name='id', value='1', format='int') +### # opObj14.addParameter(name='wintitle', value='Spectral Averaged', format='str') +opObj24.addParameter(name='save', value=pathFigure, format='str') +###opObj24.addParameter(name='save_period', value=1, format='int') +opObj24.addParameter(name='zmin', value='-8', format='int') +opObj24.addParameter(name='zmax', value='16', format='int') +opObj24.addParameter(name='xaxis', value='Velocity', format='str') + +# +titles=('SNR,Vertical Drifts,Zonal Drifts') +#titles=('Zonal Drifts,Vertical Drifts') +opObj23 = procUnitConfObj2.addOperation(name='GenericRTIPlot') +opObj23.addParameter(name='colormaps', value='jet,RdBu_r,RdBu_r') +opObj23.addParameter(name='attr_data', value='data_snr1,data_output') +#opObj23.addParameter(name='colormaps', value='RdBu,RdBu') +#opObj23.addParameter(name='attr_data', value='data_output') +opObj23.addParameter(name='wintitle', value='EW Drifts') +opObj23.addParameter(name='save', value=pathFigure) +opObj23.addParameter(name='titles', value=titles) +opObj23.addParameter(name='zfactors', value='1,1,1') +opObj23.addParameter(name='zlimits', value='(-5,20),(-50,50),(-150,150)') +opObj23.addParameter(name='cb_labels', value='dB,m/s,m/s') +#opObj23.addParameter(name='titles', value=titles) +#opObj23.addParameter(name='zfactors', value='1,1') +#opObj23.addParameter(name='zlimits', value='(-150,150),(-40,40)') +#opObj23.addParameter(name='cb_labels', value='m/s,m/s') +opObj23.addParameter(name='throttle', value='1') +opObj23.addParameter(name='xmin', value=xmin) +opObj23.addParameter(name='xmax', value=xmax) +#opObj23.addParameter(name='exp_code', value='110', format='int') +#opObj23.addParameter(name='server', value='10.10.110.243:4444', format='int') +#opObj23.addParameter(name='tag', value= 'jicamarca', format='str') + +#-------------------------------------------------------------------------------------------------- + +controllerObj.start() diff --git a/schainpy/scripts/EWDriftsMP_01feb2023proc.py b/schainpy/scripts/EWDriftsMP_01feb2023proc.py new file mode 100644 index 0000000..793bac2 --- /dev/null +++ b/schainpy/scripts/EWDriftsMP_01feb2023proc.py @@ -0,0 +1,186 @@ + +import os, sys +import json +#from controller import * +from schainpy.controller import Project + +desc = "EW DRIFTS MP Experiment" +filename = "EWDrifts.xml" + +controllerObj = Project() + +controllerObj.setup(id = '191', name='test01', description=desc) + +#Experimentos + +#path = '/media/pcondor/DATA1/Database/ewdriftsene2024sch/EW_Drifts_01ene/Drifts-data' +path = '/media/soporte/DATA/PERCY_SCHAIN_UPDATE/driftsschain' +#pathFigure = '/media/pcondor/DATA1/Database/ewdriftsschain2023proc' +pathFile ='/media/soporte/DATA/PERCY_SCHAIN_UPDATE/driftsschain/tmp' +#pathFile = '/media/pcondor/DATA1/Database/ewdriftsene2024sch/EW_Drifts_01enetmp' +pathFigure = pathFile +pathFileavg = pathFile+'/avg' +pathFiledata = pathFile+'/Drifts-data' + +xmin = 0 +xmax = 24 +#------------------------------------------------------------------------------------------------ +readUnitConfObj = controllerObj.addReadUnit(datatype='HDFReader', + path=path, + startDate='2024/01/23', + endDate='2024/01/23', + startTime='00:00:00', + endTime='23:59:59', + #online=0, + #getByBlock=1, + walk=1, + utcoffset='-18000') + +#-------------------------------------------------------------------------------------------------- + +#-------------------------------------------------------------------------------------------------- + +procUnitConfObj2 = controllerObj.addProcUnit(datatype='ParametersProc', inputId=readUnitConfObj.getId()) + +opObj20 = procUnitConfObj2.addOperation(name='SpectralFitting', optype='other') +opObj20.addParameter(name='path', value='/home/pcondor/DIR_MADRIGAL/schain/schainpy/model/proc', format='str') +opObj20.addParameter(name='file', value='modelSpectralFitting', format='str') +opObj20.addParameter(name='groupList', value='(0,1),(2,3)',format='multiList') +opObj20.addParameter(name='taver', value='5') +opObj20.addParameter(name='coh_th', value='[1]',format='multiList') +opObj20.addParameter(name='hei_th', value='[2000]',format='multiList') +opObj20.addParameter(name='proc', value='1') +opObj20.addParameter(name='channelList', value='0,0,1,1') +opObj20.addParameter(name='filec', value='weightfit', format='str') + +#opObj22 = procUnitConfObj2.addOperation(name='HDFWriter', optype='other') +#opObj22.addParameter(name='path', value=pathFile) +#opObj22.addParameter(name='blocksPerFile', value='1') +#opObj22.addParameter(name='metadataList',value='heightList,timeZone') +#opObj22.addParameter(name='dataList',value='tmp_spectra_i,tmp_cspectra_i,tmp_spectra_c,tmp_cspectra_c,clean_num_aver,coh_num_aver,index,utctime') +#angles :-2.41116 3.01082 +opObj21 = procUnitConfObj2.addOperation(name='EWDriftsEstimation', optype='other') +opObj21.addParameter(name='zenith', value='-2.41116, 3.01082', format='floatlist') +opObj21.addParameter(name='zenithCorrection', value='0.0', format='float') +opObj21.addParameter(name='fileDrifts', value=pathFile) + +# Drifts en h5 +one = {'gdlatr': 'lat', 'gdlonr': 'lon', 'spcst':'spcst','pl':'pl','cbadn':'cbadn','inttms': 'inttms','azdir7':'azw','eldir7':'elw','azdir8':'aze','eldir8':'ele','jro14':'jro14','jro15':'jro15','jro16':'jro16','nwlos':'nwlos'} +two = { + 'range': ('params', 0), + 'gdalt': ('params', 1), + 'VIPN': ('params', 2), + 'dvipn': ('params', 3), + 'vipe': ('params', 4), + 'dvipe': ('params', 5), + 'vi7': ('params', 6), + 'dvi7': ('params', 7), + 'vi8': ('params', 8), + 'dvi8': ('params', 9), + 'PAIWL': ('params', 10), + 'pacwl': ('params', 11), + 'pbiwl': ('params', 12), + 'pbcwl': ('params', 13), + 'pciel': ('params', 14), + 'pccel': ('params', 15), + 'pdiel': ('params', 16), + 'pdcel': ('params', 17), + 'jro10': ('params', 18), + 'jro11': ('params', 19) + } #writer +ind = ['gdalt'] + +#f=open('/home/roberto/moder_test.txt','r') +#file_contents=f.read() + +meta = { + 'kinst': 10, #instrument code + 'kindat': 1910, #type of data + 'catalog': { + 'principleInvestigator': 'Danny Scipión', + 'expPurpose': 'Drifts'#, + #'sciRemarks': file_contents + }, + 'header': { + 'analyst': 'Danny Scipión' + } +} +#f.close() + +op_writer = procUnitConfObj2.addOperation(name='MADWriter') +op_writer.addParameter(name='path', value=pathFile) +op_writer.addParameter(name='format', value='hdf5') +op_writer.addParameter(name='oneDDict', value=json.dumps(one)) +op_writer.addParameter(name='twoDDict', value=json.dumps(two)) +op_writer.addParameter(name='ind2DList', value=json.dumps(ind)) +op_writer.addParameter(name='metadata', value=json.dumps(meta)) + +op_writer = procUnitConfObj2.addOperation(name='setHeightDriftsavg') + +# Avg Drifts +one_avg = {'gdlatr': 'lat', 'gdlonr': 'lon', 'spcst':'spcst','pl':'pl','cbadn':'cbadn','inttms': 'inttms'} +two_avg = { + 'range': ('params_avg', 4), + 'gdalt': ('params_avg', 5), + 'altav': ('params_avg', 6), + 'VIPN': ('params_avg', 0), + 'dvipn': ('params_avg', 1), + 'vipe': ('params_avg', 2), + 'dvipe': ('params_avg', 3) + } +ind_avg = ['gdalt'] +meta = { + 'kinst': 10, #instrument code + 'kindat': 1911, #type of data + 'catalog': { + 'principleInvestigator': 'Danny Scipión', + 'expPurpose': 'Drifts'#, + #'sciRemarks': file_contents + }, + 'header': { + 'analyst': 'Danny Scipión' + } +} + +op_writer = procUnitConfObj2.addOperation(name='MADWriter') +op_writer.addParameter(name='path', value=pathFileavg) +op_writer.addParameter(name='format', value='hdf5') +op_writer.addParameter(name='oneDDict', value=json.dumps(one_avg)) +op_writer.addParameter(name='twoDDict', value=json.dumps(two_avg)) +op_writer.addParameter(name='ind2DList', value=json.dumps(ind_avg)) +op_writer.addParameter(name='metadata', value=json.dumps(meta)) + +op_writer = procUnitConfObj2.addOperation(name='setHeightDrifts') + +opObj24 = procUnitConfObj2.addOperation(name='SpectralMomentsPlot', optype='other') +opObj24.addParameter(name='id', value='1', format='int') +### # opObj14.addParameter(name='wintitle', value='Spectral Averaged', format='str') +opObj24.addParameter(name='save', value=pathFigure, format='str') +###opObj24.addParameter(name='save_period', value=1, format='int') +opObj24.addParameter(name='zmin', value='-8', format='int') +opObj24.addParameter(name='zmax', value='16', format='int') +opObj24.addParameter(name='xaxis', value='Velocity', format='str') + +# +titles=('SNR,Vertical Drifts,Zonal Drifts') +opObj23 = procUnitConfObj2.addOperation(name='GenericRTIPlot') +#opObj23.addParameter(name='colormaps', value='jet,RdBu_r,RdBu_r') +opObj23.addParameter(name='colormaps', value='jro,seismic,seismic') +#opObj23.addParameter(name='colormaps', value='jro,bwr,bwr') +opObj23.addParameter(name='attr_data', value='data_snr1,data_output') +opObj23.addParameter(name='wintitle', value='EW Drifts') +opObj23.addParameter(name='save', value=pathFigure) +opObj23.addParameter(name='titles', value=titles) +opObj23.addParameter(name='zfactors', value='1,1,1') +opObj23.addParameter(name='zlimits', value='(0,13),(-50,50),(-150,150)') +opObj23.addParameter(name='cb_labels', value='dB,m/s,m/s') +opObj23.addParameter(name='throttle', value='1') +opObj23.addParameter(name='xmin', value=xmin) +opObj23.addParameter(name='xmax', value=xmax) +#opObj23.addParameter(name='exp_code', value='110', format='int') +#opObj23.addParameter(name='server', value='10.10.110.243:4444', format='int') +#opObj23.addParameter(name='tag', value= 'jicamarca', format='str') + +#-------------------------------------------------------------------------------------------------- + +controllerObj.start()