diff --git a/schainpy/controller.py b/schainpy/controller.py index c53269c..e02e4d1 100644 --- a/schainpy/controller.py +++ b/schainpy/controller.py @@ -653,6 +653,7 @@ class Project(Process): elif not ok: break #print("****************************************************end") + #exit(1) if n == 0: err = True diff --git a/schainpy/model/graphics/jroplot_spectra.py b/schainpy/model/graphics/jroplot_spectra.py index f92ac8b..5d53bc1 100644 --- a/schainpy/model/graphics/jroplot_spectra.py +++ b/schainpy/model/graphics/jroplot_spectra.py @@ -42,6 +42,7 @@ class SpectraPlot(Plot): spc = 10*numpy.log10(dataOut.data_spc/dataOut.normFactor) data['spc'] = spc data['rti'] = dataOut.getPower() + #print("NormFactor: ",dataOut.normFactor) #data['noise'] = 10*numpy.log10(dataOut.getNoise()/dataOut.normFactor) if hasattr(dataOut, 'LagPlot'): #Double Pulse max_hei_id = dataOut.nHeights - 2*dataOut.LagPlot @@ -101,10 +102,11 @@ class SpectraPlot(Plot): self.xmin = self.xmin if self.xmin else numpy.nanmin(x)#-self.xmax self.zmin = self.zmin if self.zmin else numpy.nanmin(z) self.zmax = self.zmax if self.zmax else numpy.nanmax(z) + ax.plt = ax.pcolormesh(x, y, z[n].T, vmin=self.zmin, vmax=self.zmax, - cmap=plt.get_cmap(self.colormap) + cmap=plt.get_cmap(self.colormap), ) if self.showprofile: diff --git a/schainpy/model/graphics/jroplot_voltage_lags.py b/schainpy/model/graphics/jroplot_voltage_lags.py index ed4dd2d..7be4846 100644 --- a/schainpy/model/graphics/jroplot_voltage_lags.py +++ b/schainpy/model/graphics/jroplot_voltage_lags.py @@ -263,8 +263,6 @@ class DenRTIPlot(RTIPlot): ) - - class ETempRTIPlot(RTIPlot): ''' @@ -351,7 +349,6 @@ class ETempRTIPlot(RTIPlot): ) - class ITempRTIPlot(ETempRTIPlot): ''' @@ -372,7 +369,6 @@ class ITempRTIPlot(ETempRTIPlot): return data, meta - class HFracRTIPlot(ETempRTIPlot): ''' @@ -545,6 +541,8 @@ class TempsHPPlot(Plot): ax.errorbar(Ti, self.y, fmt='k^', xerr=errTi,elinewidth=1.0,color='b',linewidth=2.0, label='Ti') plt.legend(loc='lower right') ax.yaxis.set_minor_locator(MultipleLocator(15)) + ax.grid(which='minor') + class FracsHPPlot(Plot): ''' diff --git a/schainpy/model/proc/jroproc_base.py b/schainpy/model/proc/jroproc_base.py index 4b46c1a..33df194 100644 --- a/schainpy/model/proc/jroproc_base.py +++ b/schainpy/model/proc/jroproc_base.py @@ -70,14 +70,16 @@ class ProcessingUnit(object): def call(self, **kwargs): ''' ''' - + #print("call") try: - #print("try") if self.dataIn is not None and self.dataIn.flagNoData and not self.dataIn.error: - #print("Try") - #print(self.dataIn) - #print(self.dataIn.flagNoData) - return self.dataIn.isReady() + #if self.dataIn is not None and self.dataIn.flagNoData and not self.dataIn.error and not self.dataIn.runNextUnit: + if self.dataIn.runNextUnit: + #print("SUCCESSSSSSS") + #exit(1) + return not self.dataIn.isReady() + else: + return self.dataIn.isReady() elif self.dataIn is None or not self.dataIn.error: #print([getattr(self, at) for at in self.inputs]) #print("Elif 1") @@ -112,6 +114,7 @@ class ProcessingUnit(object): #elif optype == 'external' and self.dataOut.isReady(): #op.queue.put(copy.deepcopy(self.dataOut)) #print(not self.dataOut.isReady()) + try: if self.dataOut.runNextUnit: runNextUnit = self.dataOut.runNextUnit @@ -125,6 +128,7 @@ class ProcessingUnit(object): #if not self.dataOut.isReady(): #return 'Error' if self.dataOut.error else input() #print("NexT",runNextUnit) + #print("error: ",self.dataOut.error) return 'Error' if self.dataOut.error else runNextUnit# self.dataOut.isReady() def setup(self): diff --git a/schainpy/model/proc/jroproc_parameters.py b/schainpy/model/proc/jroproc_parameters.py index 149dff7..1a0a9cb 100644 --- a/schainpy/model/proc/jroproc_parameters.py +++ b/schainpy/model/proc/jroproc_parameters.py @@ -1164,18 +1164,34 @@ class Oblique_Gauss_Fit(Operation): #popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=1) popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0) # popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x_scale=params_scale,verbose=1) + #print(popt) + + J = popt.jac + + try: + cov = numpy.linalg.inv(J.T.dot(J)) + error = numpy.sqrt(numpy.diagonal(cov)) + except: + error = numpy.ones((9))*numpy.NAN + #print("error_inside",error) + #exit(1) A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]; K1f = popt.x[3] A2f = popt.x[4]; B2f = popt.x[5]; C2f = popt.x[6]; K2f = popt.x[7] Df = popt.x[8] - + ''' + A1f_err = error.x[0]; B1f_err= error.x[1]; C1f_err = error.x[2]; K1f_err = error.x[3] + A2f_err = error.x[4]; B2f_err = error.x[5]; C2f_err = error.x[6]; K2f_err = error.x[7] + Df_err = error.x[8] + ''' aux1 = self.gaussian_skew(freq, A1f, B1f, C1f, K1f, Df) doppler1 = freq[numpy.argmax(aux1)] aux2 = self.gaussian_skew(freq, A2f, B2f, C2f, K2f, Df) doppler2 = freq[numpy.argmax(aux2)] - - return A1f, B1f, C1f, K1f, A2f, B2f, C2f, K2f, Df, doppler1, doppler2 + #print("error",error) + #exit(1) + return A1f, B1f, C1f, K1f, A2f, B2f, C2f, K2f, Df, doppler1, doppler2, error def Double_Gauss_Double_Skew_fit_weight_bound_with_inputs(self, spc, freq, a1, b1, c1, a2, b2, c2, k2, d): @@ -1276,6 +1292,7 @@ class Oblique_Gauss_Fit(Operation): dataOut.Oblique_params = numpy.ones((1,10,dataOut.nHeights))*numpy.NAN elif mode == 9: dataOut.Oblique_params = numpy.ones((1,11,dataOut.nHeights))*numpy.NAN + dataOut.Oblique_param_errors = numpy.ones((1,9,dataOut.nHeights))*numpy.NAN dataOut.VelRange = x @@ -1303,6 +1320,7 @@ class Oblique_Gauss_Fit(Operation): else: for hei in itertools.chain(l1, l2): + #for hei in range(79,81): try: @@ -1322,11 +1340,14 @@ class Oblique_Gauss_Fit(Operation): dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,9,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei])) elif mode == 9: #Double Skewed Weighted Bounded no inputs - dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei],dataOut.Oblique_params[0,10,hei] = self.Double_Gauss_Double_Skew_fit_weight_bound_no_inputs(spc,x) + dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei],dataOut.Oblique_params[0,10,hei],dataOut.Oblique_param_errors[0,:,hei] = self.Double_Gauss_Double_Skew_fit_weight_bound_no_inputs(spc,x) dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,10,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei])) #print(hei) #print(dataOut.Oblique_params[0,10,hei]) #print(dataOut.dplr_2_u[0,0,hei]) + #print("outside",dataOut.Oblique_param_errors[0,:,hei]) + #print("SUCCESSSSSSS") + #exit(1) else: spc_fit, A1, B1, C1, D1 = self.Gauss_fit_2(spc,x,'first') @@ -5575,7 +5596,7 @@ class MergeProc(ProcessingUnit): def __init__(self): ProcessingUnit.__init__(self) - def run(self, attr_data, attr_data_2 = None, mode=0): + def run(self, attr_data, attr_data_2 = None, attr_data_3 = None, attr_data_4 = None, attr_data_5 = None, mode=0): self.dataOut = getattr(self, self.inputs[0]) data_inputs = [getattr(self, attr) for attr in self.inputs] @@ -5636,3 +5657,62 @@ class MergeProc(ProcessingUnit): self.dataOut.freqRange = self.dataOut.getFreqRange(1)/1000. #exit(1) + + if mode==4: #Hybrid LP-SSheightProfiles + #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1) + #setattr(self.dataOut, attr_data, data) + setattr(self.dataOut, 'dataLag_spc', getattr(data_inputs[0], attr_data)) #DP + setattr(self.dataOut, 'dataLag_cspc', getattr(data_inputs[0], attr_data_2)) #DP + setattr(self.dataOut, 'dataLag_spc_LP', getattr(data_inputs[1], attr_data_3)) #LP + #setattr(self.dataOut, 'dataLag_cspc_LP', getattr(data_inputs[1], attr_data_4)) #LP + #setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP + setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP + #print("Merge data_acf: ",self.dataOut.data_acf.shape) + #exit(1) + #print(self.dataOut.data_spc_LP.shape) + #print("Exit") + #exit(1) + #setattr(self.dataOut, 'dataLag_cspc', [getattr(data, attr_data_2) for data in data_inputs][0]) + #setattr(self.dataOut, 'dataLag_cspc_LP', [getattr(data, attr_data_2) for data in data_inputs][1]) + #setattr(self.dataOut, 'nIncohInt', [getattr(data, attr_data_3) for data in data_inputs][0]) + #setattr(self.dataOut, 'nIncohInt_LP', [getattr(data, attr_data_3) for data in data_inputs][1]) + ''' + print(self.dataOut.dataLag_spc_LP.shape) + print(self.dataOut.dataLag_cspc_LP.shape) + exit(1) + ''' + ''' + print(self.dataOut.dataLag_spc_LP[0,:,100]) + print(self.dataOut.dataLag_spc_LP[1,:,100]) + exit(1) + ''' + #self.dataOut.dataLag_spc_LP = numpy.transpose(self.dataOut.dataLag_spc_LP[0],(2,0,1)) + #self.dataOut.dataLag_cspc_LP = numpy.transpose(self.dataOut.dataLag_cspc_LP,(3,1,2,0)) + ''' + print("Merge") + print(numpy.shape(self.dataOut.dataLag_spc)) + print(numpy.shape(self.dataOut.dataLag_spc_LP)) + print(numpy.shape(self.dataOut.dataLag_cspc)) + print(numpy.shape(self.dataOut.dataLag_cspc_LP)) + exit(1) + ''' + #print(numpy.sum(self.dataOut.dataLag_spc_LP[2,:,164])/128) + #print(numpy.sum(self.dataOut.dataLag_cspc_LP[0,:,30,1])/128) + #exit(1) + #print(self.dataOut.NDP) + #print(self.dataOut.nNoiseProfiles) + + #self.dataOut.nIncohInt_LP = 128 + #self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP + self.dataOut.nProfiles_LP = 16#28#self.dataOut.nIncohInt_LP + self.dataOut.nProfiles_LP = self.dataOut.data_acf.shape[1]#28#self.dataOut.nIncohInt_LP + self.dataOut.NSCAN = 128 + self.dataOut.nIncohInt_LP = self.dataOut.nIncohInt*self.dataOut.NSCAN + #print("sahpi",self.dataOut.nIncohInt_LP) + #exit(1) + self.dataOut.NLAG = 16 + self.dataOut.NRANGE = self.dataOut.data_acf.shape[-1] + + #print(numpy.shape(self.dataOut.data_spc)) + + #exit(1) diff --git a/schainpy/model/proc/jroproc_spectra_acf.py b/schainpy/model/proc/jroproc_spectra_acf.py index c5bc965..047890a 100644 --- a/schainpy/model/proc/jroproc_spectra_acf.py +++ b/schainpy/model/proc/jroproc_spectra_acf.py @@ -4,7 +4,7 @@ from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation from schainpy.model.data.jrodata import Spectra from schainpy.model.data.jrodata import hildebrand_sekhon -class SpectraAFCProc(ProcessingUnit): +class SpectraAFCProc_V0(ProcessingUnit): def __init__(self, **kwargs): @@ -140,12 +140,16 @@ class SpectraAFCProc(ProcessingUnit): if self.dataIn.type == "Spectra": self.dataOut.copy(self.dataIn) + #print(self.dataOut.data.shape) + #exit(1) spc = self.dataOut.data_spc data = numpy.fft.fftshift( spc, axes=(1,)) data = numpy.fft.ifft(data, axis=1) - #data = numpy.fft.fftshift( data, axes=(1,)) - #acf = numpy.abs(data) # Autocorrelacion LLAMAR A ESTE VALOR ACF - acf = data + data = numpy.fft.fftshift( data, axes=(1,)) + acf = numpy.abs(data) # Autocorrelacion LLAMAR A ESTE VALOR ACF + acf = data #Comentarlo? + print("acf",acf[0,:,150]) + exit(1) #''' if real: acf = data.real @@ -167,7 +171,7 @@ class SpectraAFCProc(ProcessingUnit): acf[i,:,j]= acf[i,:,j] / numpy.max(numpy.abs(acf[i,:,j])) ''' self.dataOut.data_acf = acf - self.dataOut.data_spc = acf + self.dataOut.data_spc = acf.real #print(self.dataOut.data_acf[0,:,0]) #exit(1) ''' @@ -183,6 +187,7 @@ class SpectraAFCProc(ProcessingUnit): #print(self.dataOut.data_spc.shape) #print(self.dataOut.data_acf.shape) ''' + ''' import matplotlib.pyplot as plt hei = 10 #print(self.dataOut.heightList) @@ -197,7 +202,851 @@ class SpectraAFCProc(ProcessingUnit): plt.ylim(0,1000) plt.show() exit(1) + ''' + return True + + if code is not None: + self.code = numpy.array(code).reshape(nCode,nBaud) + else: + self.code = None + + if self.dataIn.type == "Voltage": + + if nFFTPoints == None: + raise ValueError("This SpectraProc.run() need nFFTPoints input variable") + + if nProfiles == None: + nProfiles = nFFTPoints + + self.dataOut.ippFactor = 1 + + self.dataOut.nFFTPoints = nFFTPoints + self.dataOut.nProfiles = nProfiles + self.dataOut.pairsList = pairsList + +# if self.buffer is None: +# self.buffer = numpy.zeros( (self.dataIn.nChannels, nProfiles, self.dataIn.nHeights), +# dtype='complex') + + if not self.dataIn.flagDataAsBlock: + self.buffer = self.dataIn.data.copy() + +# for i in range(self.dataIn.nHeights): +# self.buffer[:, self.profIndex, self.profIndex:] = voltage_data[:,:self.dataIn.nHeights - self.profIndex] +# +# self.profIndex += 1 + + else: + raise ValueError("") + + self.firstdatatime = self.dataIn.utctime + + self.profIndex == nProfiles + + self.__updateSpecFromVoltage() + + self.__getFft() + + self.dataOut.flagNoData = False + + return True + + raise ValueError("The type of input object '%s' is not valid"%(self.dataIn.type)) + + def __selectPairs(self, pairsList): + + if channelList == None: + return + + pairsIndexListSelected = [] + + for thisPair in pairsList: + + if thisPair not in self.dataOut.pairsList: + continue + + pairIndex = self.dataOut.pairsList.index(thisPair) + + pairsIndexListSelected.append(pairIndex) + + if not pairsIndexListSelected: + self.dataOut.data_cspc = None + self.dataOut.pairsList = [] + return + + self.dataOut.data_cspc = self.dataOut.data_cspc[pairsIndexListSelected] + self.dataOut.pairsList = [self.dataOut.pairsList[i] for i in pairsIndexListSelected] + + return + + def __selectPairsByChannel(self, channelList=None): + + if channelList == None: + return + + pairsIndexListSelected = [] + for pairIndex in self.dataOut.pairsIndexList: + #First pair + if self.dataOut.pairsList[pairIndex][0] not in channelList: + continue + #Second pair + if self.dataOut.pairsList[pairIndex][1] not in channelList: + continue + + pairsIndexListSelected.append(pairIndex) + + if not pairsIndexListSelected: + self.dataOut.data_cspc = None + self.dataOut.pairsList = [] + return + + self.dataOut.data_cspc = self.dataOut.data_cspc[pairsIndexListSelected] + self.dataOut.pairsList = [self.dataOut.pairsList[i] for i in pairsIndexListSelected] + + return + + def selectChannels(self, channelList): + + channelIndexList = [] + + for channel in channelList: + if channel not in self.dataOut.channelList: + raise ValueError("Error selecting channels, Channel %d is not valid.\nAvailable channels = %s" %(channel, str(self.dataOut.channelList))) + + index = self.dataOut.channelList.index(channel) + channelIndexList.append(index) + + self.selectChannelsByIndex(channelIndexList) + + def selectChannelsByIndex(self, channelIndexList): + """ + Selecciona un bloque de datos en base a canales segun el channelIndexList + + Input: + channelIndexList : lista sencilla de canales a seleccionar por ej. [2,3,7] + + Affected: + self.dataOut.data_spc + self.dataOut.channelIndexList + self.dataOut.nChannels + + Return: + None + """ + + for channelIndex in channelIndexList: + if channelIndex not in self.dataOut.channelIndexList: + raise ValueError("Error selecting channels: The value %d in channelIndexList is not valid.\nAvailable channel indexes = " %(channelIndex, self.dataOut.channelIndexList)) + +# nChannels = len(channelIndexList) + + data_spc = self.dataOut.data_spc[channelIndexList,:] + data_dc = self.dataOut.data_dc[channelIndexList,:] + + self.dataOut.data_spc = data_spc + self.dataOut.data_dc = data_dc + + self.dataOut.channelList = [self.dataOut.channelList[i] for i in channelIndexList] +# self.dataOut.nChannels = nChannels + + self.__selectPairsByChannel(self.dataOut.channelList) + + return 1 + + def selectHeights(self, minHei, maxHei): + """ + Selecciona un bloque de datos en base a un grupo de valores de alturas segun el rango + minHei <= height <= maxHei + + Input: + minHei : valor minimo de altura a considerar + maxHei : valor maximo de altura a considerar + + Affected: + Indirectamente son cambiados varios valores a travez del metodo selectHeightsByIndex + + Return: + 1 si el metodo se ejecuto con exito caso contrario devuelve 0 + """ + + if (minHei > maxHei): + raise ValueError("Error selecting heights: Height range (%d,%d) is not valid" % (minHei, maxHei)) + + if (minHei < self.dataOut.heightList[0]): + minHei = self.dataOut.heightList[0] + + if (maxHei > self.dataOut.heightList[-1]): + maxHei = self.dataOut.heightList[-1] + + minIndex = 0 + maxIndex = 0 + heights = self.dataOut.heightList + + inda = numpy.where(heights >= minHei) + indb = numpy.where(heights <= maxHei) + + try: + minIndex = inda[0][0] + except: + minIndex = 0 + + try: + maxIndex = indb[0][-1] + except: + maxIndex = len(heights) + + self.selectHeightsByIndex(minIndex, maxIndex) + + return 1 + + def getBeaconSignal(self, tauindex = 0, channelindex = 0, hei_ref=None): + newheis = numpy.where(self.dataOut.heightList>self.dataOut.radarControllerHeaderObj.Taus[tauindex]) + + if hei_ref != None: + newheis = numpy.where(self.dataOut.heightList>hei_ref) + + minIndex = min(newheis[0]) + maxIndex = max(newheis[0]) + data_spc = self.dataOut.data_spc[:,:,minIndex:maxIndex+1] + heightList = self.dataOut.heightList[minIndex:maxIndex+1] + + # determina indices + nheis = int(self.dataOut.radarControllerHeaderObj.txB/(self.dataOut.heightList[1]-self.dataOut.heightList[0])) + avg_dB = 10*numpy.log10(numpy.sum(data_spc[channelindex,:,:],axis=0)) + beacon_dB = numpy.sort(avg_dB)[-nheis:] + beacon_heiIndexList = [] + for val in avg_dB.tolist(): + if val >= beacon_dB[0]: + beacon_heiIndexList.append(avg_dB.tolist().index(val)) + + #data_spc = data_spc[:,:,beacon_heiIndexList] + data_cspc = None + if self.dataOut.data_cspc is not None: + data_cspc = self.dataOut.data_cspc[:,:,minIndex:maxIndex+1] + #data_cspc = data_cspc[:,:,beacon_heiIndexList] + + data_dc = None + if self.dataOut.data_dc is not None: + data_dc = self.dataOut.data_dc[:,minIndex:maxIndex+1] + #data_dc = data_dc[:,beacon_heiIndexList] + + self.dataOut.data_spc = data_spc + self.dataOut.data_cspc = data_cspc + self.dataOut.data_dc = data_dc + self.dataOut.heightList = heightList + self.dataOut.beacon_heiIndexList = beacon_heiIndexList + + return 1 + + + def selectHeightsByIndex(self, minIndex, maxIndex): + """ + Selecciona un bloque de datos en base a un grupo indices de alturas segun el rango + minIndex <= index <= maxIndex + + Input: + minIndex : valor de indice minimo de altura a considerar + maxIndex : valor de indice maximo de altura a considerar + + Affected: + self.dataOut.data_spc + self.dataOut.data_cspc + self.dataOut.data_dc + self.dataOut.heightList + Return: + 1 si el metodo se ejecuto con exito caso contrario devuelve 0 + """ + + if (minIndex < 0) or (minIndex > maxIndex): + raise ValueError("Error selecting heights: Index range (%d,%d) is not valid" % (minIndex, maxIndex)) + + if (maxIndex >= self.dataOut.nHeights): + maxIndex = self.dataOut.nHeights-1 + + #Spectra + data_spc = self.dataOut.data_spc[:,:,minIndex:maxIndex+1] + + data_cspc = None + if self.dataOut.data_cspc is not None: + data_cspc = self.dataOut.data_cspc[:,:,minIndex:maxIndex+1] + + data_dc = None + if self.dataOut.data_dc is not None: + data_dc = self.dataOut.data_dc[:,minIndex:maxIndex+1] + + self.dataOut.data_spc = data_spc + self.dataOut.data_cspc = data_cspc + self.dataOut.data_dc = data_dc + + self.dataOut.heightList = self.dataOut.heightList[minIndex:maxIndex+1] + + return 1 + + def removeDC(self, mode = 2): + jspectra = self.dataOut.data_spc + jcspectra = self.dataOut.data_cspc + + + num_chan = jspectra.shape[0] + num_hei = jspectra.shape[2] + + if jcspectra is not None: + jcspectraExist = True + num_pairs = jcspectra.shape[0] + else: jcspectraExist = False + + freq_dc = jspectra.shape[1]/2 + ind_vel = numpy.array([-2,-1,1,2]) + freq_dc + + if ind_vel[0]<0: + ind_vel[range(0,1)] = ind_vel[range(0,1)] + self.num_prof + + if mode == 1: + jspectra[:,freq_dc,:] = (jspectra[:,ind_vel[1],:] + jspectra[:,ind_vel[2],:])/2 #CORRECCION + + if jcspectraExist: + jcspectra[:,freq_dc,:] = (jcspectra[:,ind_vel[1],:] + jcspectra[:,ind_vel[2],:])/2 + + if mode == 2: + + vel = numpy.array([-2,-1,1,2]) + xx = numpy.zeros([4,4]) + + for fil in range(4): + xx[fil,:] = vel[fil]**numpy.asarray(range(4)) + + xx_inv = numpy.linalg.inv(xx) + xx_aux = xx_inv[0,:] + + for ich in range(num_chan): + yy = jspectra[ich,ind_vel,:] + jspectra[ich,freq_dc,:] = numpy.dot(xx_aux,yy) + + junkid = jspectra[ich,freq_dc,:]<=0 + cjunkid = sum(junkid) + + if cjunkid.any(): + jspectra[ich,freq_dc,junkid.nonzero()] = (jspectra[ich,ind_vel[1],junkid] + jspectra[ich,ind_vel[2],junkid])/2 + + if jcspectraExist: + for ip in range(num_pairs): + yy = jcspectra[ip,ind_vel,:] + jcspectra[ip,freq_dc,:] = numpy.dot(xx_aux,yy) + + + self.dataOut.data_spc = jspectra + self.dataOut.data_cspc = jcspectra + + return 1 + + def removeInterference(self, interf = 2,hei_interf = None, nhei_interf = None, offhei_interf = None): + + jspectra = self.dataOut.data_spc + jcspectra = self.dataOut.data_cspc + jnoise = self.dataOut.getNoise() + num_incoh = self.dataOut.nIncohInt + + num_channel = jspectra.shape[0] + num_prof = jspectra.shape[1] + num_hei = jspectra.shape[2] + + #hei_interf + if hei_interf is None: + count_hei = num_hei/2 #Como es entero no importa + hei_interf = numpy.asmatrix(range(count_hei)) + num_hei - count_hei + hei_interf = numpy.asarray(hei_interf)[0] + #nhei_interf + if (nhei_interf == None): + nhei_interf = 5 + if (nhei_interf < 1): + nhei_interf = 1 + if (nhei_interf > count_hei): + nhei_interf = count_hei + if (offhei_interf == None): + offhei_interf = 0 + + ind_hei = range(num_hei) +# mask_prof = numpy.asarray(range(num_prof - 2)) + 1 +# mask_prof[range(num_prof/2 - 1,len(mask_prof))] += 1 + mask_prof = numpy.asarray(range(num_prof)) + num_mask_prof = mask_prof.size + comp_mask_prof = [0, num_prof/2] + + + #noise_exist: Determina si la variable jnoise ha sido definida y contiene la informacion del ruido de cada canal + if (jnoise.size < num_channel or numpy.isnan(jnoise).any()): + jnoise = numpy.nan + noise_exist = jnoise[0] < numpy.Inf + + #Subrutina de Remocion de la Interferencia + for ich in range(num_channel): + #Se ordena los espectros segun su potencia (menor a mayor) + power = jspectra[ich,mask_prof,:] + power = power[:,hei_interf] + power = power.sum(axis = 0) + psort = power.ravel().argsort() + + #Se estima la interferencia promedio en los Espectros de Potencia empleando + junkspc_interf = jspectra[ich,:,hei_interf[psort[range(offhei_interf, nhei_interf + offhei_interf)]]] + + if noise_exist: + # tmp_noise = jnoise[ich] / num_prof + tmp_noise = jnoise[ich] + junkspc_interf = junkspc_interf - tmp_noise + #junkspc_interf[:,comp_mask_prof] = 0 + + jspc_interf = junkspc_interf.sum(axis = 0) / nhei_interf + jspc_interf = jspc_interf.transpose() + #Calculando el espectro de interferencia promedio + noiseid = numpy.where(jspc_interf <= tmp_noise/ numpy.sqrt(num_incoh)) + noiseid = noiseid[0] + cnoiseid = noiseid.size + interfid = numpy.where(jspc_interf > tmp_noise/ numpy.sqrt(num_incoh)) + interfid = interfid[0] + cinterfid = interfid.size + + if (cnoiseid > 0): jspc_interf[noiseid] = 0 + + #Expandiendo los perfiles a limpiar + if (cinterfid > 0): + new_interfid = (numpy.r_[interfid - 1, interfid, interfid + 1] + num_prof)%num_prof + new_interfid = numpy.asarray(new_interfid) + new_interfid = {x for x in new_interfid} + new_interfid = numpy.array(list(new_interfid)) + new_cinterfid = new_interfid.size + else: new_cinterfid = 0 + + for ip in range(new_cinterfid): + ind = junkspc_interf[:,new_interfid[ip]].ravel().argsort() + jspc_interf[new_interfid[ip]] = junkspc_interf[ind[nhei_interf/2],new_interfid[ip]] + + + jspectra[ich,:,ind_hei] = jspectra[ich,:,ind_hei] - jspc_interf #Corregir indices + + #Removiendo la interferencia del punto de mayor interferencia + ListAux = jspc_interf[mask_prof].tolist() + maxid = ListAux.index(max(ListAux)) + + + if cinterfid > 0: + for ip in range(cinterfid*(interf == 2) - 1): + ind = (jspectra[ich,interfid[ip],:] < tmp_noise*(1 + 1/numpy.sqrt(num_incoh))).nonzero() + cind = len(ind) + + if (cind > 0): + jspectra[ich,interfid[ip],ind] = tmp_noise*(1 + (numpy.random.uniform(cind) - 0.5)/numpy.sqrt(num_incoh)) + + ind = numpy.array([-2,-1,1,2]) + xx = numpy.zeros([4,4]) + + for id1 in range(4): + xx[:,id1] = ind[id1]**numpy.asarray(range(4)) + + xx_inv = numpy.linalg.inv(xx) + xx = xx_inv[:,0] + ind = (ind + maxid + num_mask_prof)%num_mask_prof + yy = jspectra[ich,mask_prof[ind],:] + jspectra[ich,mask_prof[maxid],:] = numpy.dot(yy.transpose(),xx) + + + indAux = (jspectra[ich,:,:] < tmp_noise*(1-1/numpy.sqrt(num_incoh))).nonzero() + jspectra[ich,indAux[0],indAux[1]] = tmp_noise * (1 - 1/numpy.sqrt(num_incoh)) + + #Remocion de Interferencia en el Cross Spectra + if jcspectra is None: return jspectra, jcspectra + num_pairs = jcspectra.size/(num_prof*num_hei) + jcspectra = jcspectra.reshape(num_pairs, num_prof, num_hei) + + for ip in range(num_pairs): + + #------------------------------------------- + + cspower = numpy.abs(jcspectra[ip,mask_prof,:]) + cspower = cspower[:,hei_interf] + cspower = cspower.sum(axis = 0) + + cspsort = cspower.ravel().argsort() + junkcspc_interf = jcspectra[ip,:,hei_interf[cspsort[range(offhei_interf, nhei_interf + offhei_interf)]]] + junkcspc_interf = junkcspc_interf.transpose() + jcspc_interf = junkcspc_interf.sum(axis = 1)/nhei_interf + + ind = numpy.abs(jcspc_interf[mask_prof]).ravel().argsort() + + median_real = numpy.median(numpy.real(junkcspc_interf[mask_prof[ind[range(3*num_prof/4)]],:])) + median_imag = numpy.median(numpy.imag(junkcspc_interf[mask_prof[ind[range(3*num_prof/4)]],:])) + junkcspc_interf[comp_mask_prof,:] = numpy.complex(median_real, median_imag) + + for iprof in range(num_prof): + ind = numpy.abs(junkcspc_interf[iprof,:]).ravel().argsort() + jcspc_interf[iprof] = junkcspc_interf[iprof, ind[nhei_interf/2]] + + #Removiendo la Interferencia + jcspectra[ip,:,ind_hei] = jcspectra[ip,:,ind_hei] - jcspc_interf + + ListAux = numpy.abs(jcspc_interf[mask_prof]).tolist() + maxid = ListAux.index(max(ListAux)) + + ind = numpy.array([-2,-1,1,2]) + xx = numpy.zeros([4,4]) + + for id1 in range(4): + xx[:,id1] = ind[id1]**numpy.asarray(range(4)) + + xx_inv = numpy.linalg.inv(xx) + xx = xx_inv[:,0] + + ind = (ind + maxid + num_mask_prof)%num_mask_prof + yy = jcspectra[ip,mask_prof[ind],:] + jcspectra[ip,mask_prof[maxid],:] = numpy.dot(yy.transpose(),xx) + + #Guardar Resultados + self.dataOut.data_spc = jspectra + self.dataOut.data_cspc = jcspectra + + return 1 + + def setRadarFrequency(self, frequency=None): + + if frequency != None: + self.dataOut.frequency = frequency + + return 1 + + def getNoise(self, minHei=None, maxHei=None, minVel=None, maxVel=None): + #validacion de rango + if minHei == None: + minHei = self.dataOut.heightList[0] + + if maxHei == None: + maxHei = self.dataOut.heightList[-1] + + if (minHei < self.dataOut.heightList[0]) or (minHei > maxHei): + print('minHei: %.2f is out of the heights range'%(minHei)) + print('minHei is setting to %.2f'%(self.dataOut.heightList[0])) + minHei = self.dataOut.heightList[0] + + if (maxHei > self.dataOut.heightList[-1]) or (maxHei < minHei): + print('maxHei: %.2f is out of the heights range'%(maxHei)) + print('maxHei is setting to %.2f'%(self.dataOut.heightList[-1])) + maxHei = self.dataOut.heightList[-1] + + # validacion de velocidades + velrange = self.dataOut.getVelRange(1) + + if minVel == None: + minVel = velrange[0] + + if maxVel == None: + maxVel = velrange[-1] + + if (minVel < velrange[0]) or (minVel > maxVel): + print('minVel: %.2f is out of the velocity range'%(minVel)) + print('minVel is setting to %.2f'%(velrange[0])) + minVel = velrange[0] + + if (maxVel > velrange[-1]) or (maxVel < minVel): + print('maxVel: %.2f is out of the velocity range'%(maxVel)) + print('maxVel is setting to %.2f'%(velrange[-1])) + maxVel = velrange[-1] + + # seleccion de indices para rango + minIndex = 0 + maxIndex = 0 + heights = self.dataOut.heightList + + inda = numpy.where(heights >= minHei) + indb = numpy.where(heights <= maxHei) + + try: + minIndex = inda[0][0] + except: + minIndex = 0 + + try: + maxIndex = indb[0][-1] + except: + maxIndex = len(heights) + + if (minIndex < 0) or (minIndex > maxIndex): + raise ValueError("some value in (%d,%d) is not valid" % (minIndex, maxIndex)) + + if (maxIndex >= self.dataOut.nHeights): + maxIndex = self.dataOut.nHeights-1 + + # seleccion de indices para velocidades + indminvel = numpy.where(velrange >= minVel) + indmaxvel = numpy.where(velrange <= maxVel) + try: + minIndexVel = indminvel[0][0] + except: + minIndexVel = 0 + + try: + maxIndexVel = indmaxvel[0][-1] + except: + maxIndexVel = len(velrange) + + #seleccion del espectro + data_spc = self.dataOut.data_spc[:,minIndexVel:maxIndexVel+1,minIndex:maxIndex+1] + #estimacion de ruido + noise = numpy.zeros(self.dataOut.nChannels) + + for channel in range(self.dataOut.nChannels): + daux = data_spc[channel,:,:] + noise[channel] = hildebrand_sekhon(daux, self.dataOut.nIncohInt) + + self.dataOut.noise_estimation = noise.copy() + + return 1 + +class SpectraAFCProc(ProcessingUnit): + + def __init__(self, **kwargs): + + ProcessingUnit.__init__(self, **kwargs) + + self.buffer = None + self.firstdatatime = None + self.profIndex = 0 + self.dataOut = Spectra() + self.id_min = None + self.id_max = None + + def __updateSpecFromVoltage(self): + + self.dataOut.plotting = "spectra_acf" + + self.dataOut.timeZone = self.dataIn.timeZone + self.dataOut.dstFlag = self.dataIn.dstFlag + self.dataOut.errorCount = self.dataIn.errorCount + self.dataOut.useLocalTime = self.dataIn.useLocalTime + + self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy() + self.dataOut.systemHeaderObj = self.dataIn.systemHeaderObj.copy() + self.dataOut.ippSeconds = self.dataIn.getDeltaH()*(10**-6)/0.15 + + self.dataOut.channelList = self.dataIn.channelList + self.dataOut.heightList = self.dataIn.heightList + self.dataOut.dtype = numpy.dtype([('real',' nums_min: rtest = float(j)/(j-1) + 1.0/navg + #print(rtest) + #print(sump) if ((sumq*j) > (rtest*sump**2)): j = j - 1 sump = sump - sortdata[j] @@ -2903,7 +2931,7 @@ class IntegrationFaradaySpectraNoLags(Operation): #lnoise = sump / j return j,sortID - + #''' def pushData(self): """ Return the sum of the last profiles and the profiles used in the sum. @@ -2938,12 +2966,20 @@ class IntegrationFaradaySpectraNoLags(Operation): if i==1 and j==0: #NOT CONSIDERING DC PROFILE AT CHANNEL 1 continue buffer=buffer1[:,j] - index,sortID=self.hildebrand_sekhon_Integration(buffer,1) + #if k != 100: + index=int(_HS_algorithm.HS_algorithm(numpy.sort(buffer, axis=None),1)) + sortID = buffer.argsort() + #else: + #index,sortID=self.hildebrand_sekhon_Integration(buffer,1) + #if k == 100: + # print(k,index,sortID) + # exit(1) indexes.append(index) #sortIDs.append(sortID) outliers_IDs=numpy.append(outliers_IDs,sortID[index:]) - + #if k == 100: + # exit(1) outliers_IDs=numpy.array(outliers_IDs) outliers_IDs=outliers_IDs.ravel() outliers_IDs=numpy.unique(outliers_IDs) @@ -3332,7 +3368,9 @@ class IncohIntLag(Operation): return dataOut dataOut.flagNoData = True - + #print("incohint") + #print("IncohInt",dataOut.data_spc.shape) + #print("IncohInt",dataOut.data_cspc.shape) if not self.isConfig: self.setup(n, timeInterval, overlapping) self.isConfig = True @@ -3352,7 +3390,7 @@ class IncohIntLag(Operation): dataOut.dataLag_spc, dataOut.dataLag_cspc, dataOut.dataLag_dc) - + #print("Incoh Int: ",self.__profIndex,n) if self.__dataReady: if not dataOut.ByLags: @@ -3369,7 +3407,8 @@ class IncohIntLag(Operation): #print(numpy.sum(dataOut.dataLag_spc[1,:,100,2])) #exit(1) - #print("HERE") + #print("INCOH INT DONE") + #exit(1) ''' print(numpy.sum(dataOut.dataLag_spc[0,:,20,10])/32) print(numpy.sum(dataOut.dataLag_spc[1,:,20,10])/32) @@ -3580,6 +3619,8 @@ class IncohInt(Operation): dataOut.nIncohInt *= self.n dataOut.utctime = avgdatatime dataOut.flagNoData = False + #print("Power",numpy.sum(dataOut.data_spc[0,:,20:30],axis=0)) + #print("Power",numpy.sum(dataOut.data_spc[0,100:110,:],axis=1)) #exit(1) #print(numpy.sum(dataOut.data_spc[0,:,53]*numpy.conjugate(dataOut.data_spc[0,:,53]))) #print(numpy.sum(dataOut.data_spc[1,:,53]*numpy.conjugate(dataOut.data_spc[1,:,53]))) @@ -3773,7 +3814,6 @@ class SpectraDataToFaraday(Operation): dataOut.lat=-11.95 dataOut.lon=-76.87 - self.normFactor(dataOut) dataOut.NDP=dataOut.nHeights @@ -3848,7 +3888,7 @@ class SpectraDataToFaraday(Operation): #print("Noise dB: ",10*numpy.log10(dataOut.tnoise)) #exit(1) #dataOut.pan=dataOut.tnoise[0]/float(dataOut.nProfiles_LP*dataOut.nIncohInt) - + print("done") return dataOut @@ -3903,7 +3943,7 @@ class SpectraDataToHybrid(SpectraDataToFaraday): - def ConvertDataLP(self,dataOut): + def ConvertDataLP_V0(self,dataOut): #print(dataOut.dataLag_spc[:,:,:,1]/dataOut.data_spc) #exit(1) @@ -3922,6 +3962,20 @@ class SpectraDataToHybrid(SpectraDataToFaraday): #exit(1) + def ConvertDataLP(self,dataOut): + + #print(dataOut.dataLag_spc[:,:,:,1]/dataOut.data_spc) + #exit(1) + normfactor=1.0/(dataOut.nIncohInt_LP*dataOut.nProfiles_LP)#*dataOut.nProfiles + + buffer = self.dataLag_spc_LP=dataOut.dataLag_spc_LP + ##self.dataLag_cspc_LP=(dataOut.dataLag_cspc_LP.sum(axis=1))*(1./dataOut.nProfiles_LP) + #self.dataLag_dc=dataOut.dataLag_dc.sum(axis=1)/dataOut.rnint2[0] + #aux=numpy.expand_dims(self.dataLag_spc_LP, axis=2) + #print(aux.shape) + ##buffer = numpy.concatenate((numpy.expand_dims(self.dataLag_spc_LP, axis=2),self.dataLag_cspc_LP),axis=2) + dataOut.output_LP_integrated = numpy.transpose(buffer,(1,2,0)) + def normFactor(self,dataOut): dataOut.rnint2=numpy.zeros(dataOut.DPL,'float32') for l in range(dataOut.DPL): @@ -4011,3 +4065,115 @@ class SpectraDataToHybrid(SpectraDataToFaraday): #exit(1) return dataOut + +class SpectraDataToHybrid_V2(SpectraDataToFaraday): + """Operation to use spectra data in Faraday processing. + + Parameters: + ----------- + nint : int + Number of integrations. + + Example + -------- + + op = proc_unit.addOperation(name='SpectraDataToFaraday', optype='other') + + """ + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + self.dataLag_spc=None + self.dataLag_cspc=None + self.dataLag_dc=None + self.dataLag_spc_LP=None + self.dataLag_cspc_LP=None + self.dataLag_dc_LP=None + + def noise(self,dataOut): + + dataOut.data_spc = dataOut.dataLag_spc_LP.real + #print(dataOut.dataLag_spc.shape) + #exit(1) + #dataOut.data_spc = dataOut.dataLag_spc[:,:,:,0].real + #print("spc noise shape: ",dataOut.data_spc.shape) + dataOut.tnoise = dataOut.getNoise(ymin_index=100,ymax_index=166) + #print("Noise LP: ",10*numpy.log10(dataOut.tnoise)) + #exit(1) + #dataOut.tnoise[0]*=0.995#0.976 + #dataOut.tnoise[1]*=0.995 + #print(dataOut.nProfiles) + #dataOut.pan=dataOut.tnoise[0]/float(dataOut.nProfiles_LP*dataOut.nIncohInt) + #dataOut.pbn=dataOut.tnoise[1]/float(dataOut.nProfiles_LP*dataOut.nIncohInt) + dataOut.pan=dataOut.tnoise[0]/float(dataOut.nProfiles_LP*dataOut.nIncohInt_LP) + dataOut.pbn=dataOut.tnoise[1]/float(dataOut.nProfiles_LP*dataOut.nIncohInt_LP) + ##dataOut.pan=dataOut.tnoise[0]*float(self.normfactor_LP) + ##dataOut.pbn=dataOut.tnoise[1]*float(self.normfactor_LP) + #print("pan: ",10*numpy.log10(dataOut.pan)) + #print("pbn: ",dataOut.pbn) + #print(numpy.shape(dataOut.pnoise)) + #exit(1) + #print("pan: ",numpy.sum(dataOut.pan)) + #exit(1) + + def ConvertDataLP(self,dataOut): + + #print(dataOut.dataLag_spc[:,:,:,1]/dataOut.data_spc) + #exit(1) + self.normfactor_LP=1.0/(dataOut.nIncohInt_LP*dataOut.nProfiles_LP)#*dataOut.nProfiles + #print("acf: ",dataOut.data_acf[0,0,100]) + #print("Power: ",numpy.mean(dataOut.dataLag_spc_LP[0,:,100])) + #buffer = dataOut.data_acf*(1./(normfactor*dataOut.nProfiles_LP)) + #buffer = dataOut.data_acf*(1./(normfactor)) + buffer = dataOut.data_acf#*(self.normfactor_LP) + #print("acf: ",numpy.sum(buffer)) + + dataOut.output_LP_integrated = numpy.transpose(buffer,(1,2,0)) + + def normFactor(self,dataOut): + dataOut.rnint2=numpy.zeros(dataOut.DPL,'float32') + for l in range(dataOut.DPL): + if(l==0 or (l>=3 and l <=6)): + dataOut.rnint2[l]=1.0/(dataOut.nIncohInt*dataOut.nProfiles) + else: + dataOut.rnint2[l]=2*(1.0/(dataOut.nIncohInt*dataOut.nProfiles)) + + def run(self,dataOut): + + dataOut.paramInterval=0#int(dataOut.nint*dataOut.header[7][0]*2 ) + dataOut.lat=-11.95 + dataOut.lon=-76.87 + + dataOut.NDP=dataOut.nHeights + dataOut.NR=len(dataOut.channelList) + dataOut.DH=dataOut.heightList[1]-dataOut.heightList[0] + dataOut.H0=int(dataOut.heightList[0]) + + self.normFactor(dataOut) + + #Probar sin comentar lo siguiente y comentando + #dataOut.data_acf *= 16 #Corrects the zero padding + #dataOut.dataLag_spc_LP *= 16 #Corrects the zero padding + self.ConvertDataLP(dataOut) + #dataOut.dataLag_spc_LP *= 2 + #dataOut.output_LP_integrated[:,:,3] *= float(dataOut.NSCAN/22)#(dataOut.nNoiseProfiles) #Corrects the zero padding + + dataOut.nis=dataOut.NSCAN*dataOut.nIncohInt_LP*10 + + self.ConvertData(dataOut) + + dataOut.kabxys_integrated[4][:,(1,2,7,8,9,10),0] *= 2 #Corrects the zero padding + dataOut.kabxys_integrated[6][:,(1,2,7,8,9,10),0] *= 2 #Corrects the zero padding + dataOut.kabxys_integrated[8][:,(1,2,7,8,9,10),0] *= 2 #Corrects the zero padding + dataOut.kabxys_integrated[10][:,(1,2,7,8,9,10),0] *= 2 #Corrects the zero padding + hei = 2 + + self.noise(dataOut) + + dataOut.NAVG=1#dataOut.rnint2[0] #CHECK THIS! + dataOut.nint=dataOut.nIncohInt + dataOut.MAXNRANGENDT=dataOut.output_LP_integrated.shape[1] + + return dataOut diff --git a/schainpy/model/proc/jroproc_voltage.py b/schainpy/model/proc/jroproc_voltage.py index a0fbfcd..c866cbb 100644 --- a/schainpy/model/proc/jroproc_voltage.py +++ b/schainpy/model/proc/jroproc_voltage.py @@ -442,13 +442,22 @@ class deFlipHP(Operation): if not channelList: data[:,:] = data[:,:]*self.flip else: - channelList=[1] + #channelList=[1] for thisChannel in channelList: if thisChannel not in dataOut.channelList: continue - data[thisChannel,:] = data[thisChannel,:]*self.flip + if not byHeights: + data[thisChannel,:] = data[thisChannel,:]*flip + + else: + firstHeight = HeiRangeList[0] + lastHeight = HeiRangeList[1]+1 + flip = -1.0 + data[thisChannel,firstHeight:lastHeight] = data[thisChannel,firstHeight:lastHeight]*flip + + #data[thisChannel,:] = data[thisChannel,:]*self.flip self.flip *= -1. @@ -1093,28 +1102,56 @@ class LagsReshapeDP_V2(Operation): #print(dataOut.data[1,:12,:15]) #exit(1) #print(numpy.shape(dataOut.data)) - print(dataOut.profileIndex) - #dataOut.flagNoData = True - if dataOut.profileIndex == 140: - print("here") - print(dataOut.data.shape) - - dataOut.data = numpy.transpose(numpy.array(self.data_buffer),(1,0,2)) - print(dataOut.data.shape) - dataOut.datalags = numpy.copy(self.LagDistribution(dataOut)) - #print(numpy.shape(dataOut.datalags)) - #exit(1) - #print("AFTER RESHAPE DP") + #print(dataOut.profileIndex) - dataOut.data = dataOut.data[:,:,200:] - self.data_buffer = [] - #dataOut.flagNoData = False + if not dataOut.flagDataAsBlock: - else: - self.data_buffer.append(dataOut.data) - #print(numpy.shape(dataOut.data)) - #exit(1) + dataOut.flagNoData = True + #print("nProfiles: ",dataOut.nProfiles) + #if dataOut.profileIndex == 140: + #print("id: ",dataOut.profileIndex) + if dataOut.profileIndex == dataOut.nProfiles-1: + #print("here") + #print(dataOut.data.shape) + self.data_buffer.append(dataOut.data) + dataOut.data = numpy.transpose(numpy.array(self.data_buffer),(1,0,2)) + #print(dataOut.data.shape) + #print(numpy.sum(dataOut.data)) + #print(dataOut.data[1,100,:]) + #exit(1) + dataOut.datalags = numpy.copy(self.LagDistribution(dataOut)) + #print(numpy.shape(dataOut.datalags)) + #exit(1) + #print("AFTER RESHAPE DP") + + dataOut.data = dataOut.data[:,:,200:] + self.data_buffer = [] + dataOut.flagDataAsBlock = True + dataOut.flagNoData = False + + deltaHeight = dataOut.heightList[1] - dataOut.heightList[0] + dataOut.heightList = numpy.arange(dataOut.NDP) *deltaHeight# + dataOut.heightList[0] + #exit(1) + #print(numpy.sum(dataOut.datalags)) + #exit(1) + else: + self.data_buffer.append(dataOut.data) + #print(numpy.shape(dataOut.data)) + #exit(1) + else: + #print(dataOut.data.shape) + #print(numpy.sum(dataOut.data)) + #print(dataOut.data[1,100,:]) + #exit(1) + dataOut.datalags = numpy.copy(self.LagDistribution(dataOut)) + #print(dataOut.datalags.shape) + dataOut.data = dataOut.data[:,:,200:] + deltaHeight = dataOut.heightList[1] - dataOut.heightList[0] + dataOut.heightList = numpy.arange(dataOut.NDP) * deltaHeight# + dataOut.heightList[0] + #print(dataOut.nHeights) + #print(numpy.sum(dataOut.datalags)) + #exit(1) return dataOut @@ -2523,8 +2560,8 @@ class CleanCohEchoes(Operation): return dataOut -class NoisePower(Operation): - """Operation to get noise power from the integrated data of the Double Pulse. +class CleanCohEchoesHP(Operation): + """Operation to clean coherent echoes. Parameters: ----------- @@ -2533,7 +2570,7 @@ class NoisePower(Operation): Example -------- - op = proc_unit.addOperation(name='NoisePower', optype='other') + op = proc_unit.addOperation(name='CleanCohEchoes') """ @@ -2541,125 +2578,498 @@ class NoisePower(Operation): Operation.__init__(self, **kwargs) - def hildebrand(self,dataOut,data): - - divider=10 # divider was originally 10 - noise=0.0 - n1=0 - n2=int(dataOut.NDP/2) - sorts= sorted(data) - nums_min= dataOut.NDP/divider - if((dataOut.NDP/divider)> 2): - nums_min= int(dataOut.NDP/divider) - - else: - nums_min=2 - sump=0.0 - sumq=0.0 - j=0 - cont=1 - while( (cont==1) and (j nums_min): - rtest= float(j/(j-1)) +1.0/dataOut.NAVG - t1= (sumq*j) - t2=(rtest*sump*sump) - if( (t1/t2) > 0.990): - j=j-1 - sump-= sorts[j+n1] - sumq-=sorts[j+n1]*sorts[j+n1] - cont= 0 + def remove_coh(self,pow): + #print("pow inside: ",pow) + #print(pow.shape) + q75,q25 = numpy.percentile(pow,[75,25],axis=0) + #print(q75,q25) + intr_qr = q75-q25 - noise= sump/j - stdv=numpy.sqrt((sumq- noise*noise)/(j-1)) - return noise + max = q75+(1.5*intr_qr) + min = q25-(1.5*intr_qr) - def run(self,dataOut): + pow[pow > max] = numpy.nan - p=numpy.zeros((dataOut.NR,dataOut.NDP,dataOut.DPL),'float32') - av=numpy.zeros(dataOut.NDP,'float32') - dataOut.pnoise=numpy.zeros(dataOut.NR,'float32') + #print("Max: ",max) + #print("Min: ",min) - p[0,:,:]=dataOut.kabxys_integrated[4][:,:,0]+dataOut.kabxys_integrated[5][:,:,0] #total power for channel 0, just pulse with non-flip - p[1,:,:]=dataOut.kabxys_integrated[6][:,:,0]+dataOut.kabxys_integrated[7][:,:,0] #total power for channel 1 + return pow - for i in range(dataOut.NR): - dataOut.pnoise[i]=0.0 - for k in range(dataOut.DPL): - dataOut.pnoise[i]+= self.hildebrand(dataOut,p[i,:,k]) + def mad_based_outlier_V0(self, points, thresh=3.5): + #print("points: ",points) + if len(points.shape) == 1: + points = points[:,None] + median = numpy.nanmedian(points, axis=0) + diff = numpy.nansum((points - median)**2, axis=-1) + diff = numpy.sqrt(diff) + med_abs_deviation = numpy.nanmedian(diff) - dataOut.pnoise[i]=dataOut.pnoise[i]/dataOut.DPL + modified_z_score = 0.6745 * diff / med_abs_deviation + #print(modified_z_score) + return modified_z_score > thresh + def mad_based_outlier(self, points, thresh=3.5): - dataOut.pan=1.0*dataOut.pnoise[0] # weights could change - dataOut.pbn=1.0*dataOut.pnoise[1] # weights could change - print("pan: ",dataOut.pan) - print("pbn: ",dataOut.pbn) - print("pan dB: ",10*numpy.log10(dataOut.pan)) - print("pbn dB: ",10*numpy.log10(dataOut.pbn)) - exit(1) - dataOut.power = dataOut.getPower() - return dataOut + median = numpy.nanmedian(points) + diff = (points - median)**2 + diff = numpy.sqrt(diff) + med_abs_deviation = numpy.nanmedian(diff) + modified_z_score = 0.6745 * diff / med_abs_deviation -class DoublePulseACFs(Operation): - """Operation to get the ACFs of the Double Pulse. + return modified_z_score > thresh - Parameters: - ----------- - None + def removeSpreadF_V0(self,dataOut): + for i in range(11): + print("BEFORE Chb: ",i,dataOut.kabxys_integrated[6][:,i,0]) + #exit(1) - Example - -------- + #Removing echoes greater than 35 dB + maxdB = 35 #DEBERÍA SER NOISE+ALGO!!!!!!!!!!!!!!!!!!!!!! + #print(dataOut.kabxys_integrated[6][:,0,0]) + data = numpy.copy(10*numpy.log10(dataOut.kabxys_integrated[6][:,0,0])) #Lag0 ChB + #print(data) + for i in range(12,data.shape[0]): + #for j in range(data.shape[1]): + if data[i]>maxdB: + dataOut.kabxys_integrated[4][i-2:i+3,:,0] = numpy.nan #Debido a que estos ecos son intensos, se + dataOut.kabxys_integrated[6][i-2:i+3,:,0] = numpy.nan #remueve además dos muestras antes y después + #dataOut.kabxys_integrated[4][i-1,:,0] = numpy.nan + #dataOut.kabxys_integrated[6][i-1,:,0] = numpy.nan + #dataOut.kabxys_integrated[4][i+1,:,0] = numpy.nan + #dataOut.kabxys_integrated[6][i+1,:,0] = numpy.nan + dataOut.flagSpreadF = True + print("Removing Threshold",i) + #print("i: ",i) - op = proc_unit.addOperation(name='DoublePulseACFs', optype='other') + #print("BEFORE Chb: ",dataOut.kabxys_integrated[6][:,0,0]) + #exit(1) - """ + #Removing outliers from the profile + nlag = 9 + minHei = 180 + #maxHei = 600 + maxHei = 525 + inda = numpy.where(dataOut.heightList >= minHei) + indb = numpy.where(dataOut.heightList <= maxHei) + minIndex = inda[0][0] + maxIndex = indb[0][-1] + #l0 = 0 + #print("BEFORE Cha: ",dataOut.kabxys_integrated[4][:,l0,0]) + #print("BEFORE Chb: ",dataOut.kabxys_integrated[6][:,l0,0]) + #exit(1) + #''' + l0 = 0 + #print("BEFORE Cha: ",dataOut.kabxys_integrated[4][:,l0,0]) + #print("BEFORE Chb: ",dataOut.kabxys_integrated[6][:,l0,0]) - def __init__(self, **kwargs): + import matplotlib.pyplot as plt + for i in range(l0,l0+11): + plt.plot(dataOut.kabxys_integrated[6][:,i,0],dataOut.heightList,label='{}'.format(i)) + #plt.xlim(1.e5,1.e8) + plt.legend() + plt.xlim(0,2000) + plt.show() + #''' + #dataOut.kabxys_integrated[4][minIndex:,:,0] = self.remove_coh(dataOut.kabxys_integrated[4][minIndex:,:,0 + outliers_IDs = [] + ''' + for lag in range(11): + outliers = self.mad_based_outlier(dataOut.kabxys_integrated[4][minIndex:,lag,0], thresh=3.) + #print("Outliers: ",outliers) + #indexes.append(outliers.nonzero()) + #numpy.concatenate((outliers)) + #dataOut.kabxys_integrated[4][minIndex:,lag,0][outliers == True] = numpy.nan + outliers_IDs=numpy.append(outliers_IDs,outliers.nonzero()) + ''' + for lag in range(11): + #outliers = self.mad_based_outlier(dataOut.kabxys_integrated[6][minIndex:maxIndex,lag,0], thresh=2.) + outliers = self.mad_based_outlier(dataOut.kabxys_integrated[6][minIndex:maxIndex,lag,0]) + outliers_IDs=numpy.append(outliers_IDs,outliers.nonzero()) + #print(outliers_IDs) + #exit(1) + if outliers_IDs != []: + outliers_IDs=numpy.array(outliers_IDs) + outliers_IDs=outliers_IDs.ravel() + outliers_IDs=outliers_IDs.astype(numpy.dtype('int64')) - Operation.__init__(self, **kwargs) - self.aux=1 + (uniq, freq) = (numpy.unique(outliers_IDs, return_counts=True)) + aux_arr = numpy.column_stack((uniq,freq)) + #print("repetitions: ",aux_arr) - def run(self,dataOut): + #if aux_arr != []: + final_index = [] + for i in range(aux_arr.shape[0]): + if aux_arr[i,1] >= 10: + final_index.append(aux_arr[i,0]) - dataOut.igcej=numpy.zeros((dataOut.NDP,dataOut.DPL),'int32') + if final_index != [] and len(final_index) > 1: + final_index += minIndex + #print("final_index: ",final_index) + following_index = final_index[-1]+1 #Remove following index to ensure we remove remaining SpreadF + previous_index = final_index[0]-1 #Remove previous index to ensure we remove remaning SpreadF + final_index = numpy.concatenate(([previous_index],final_index,[following_index])) + final_index = numpy.unique(final_index) #If there was only one outlier + #print("final_index: ",final_index) + #exit(1) + dataOut.kabxys_integrated[4][final_index,:,0] = numpy.nan + dataOut.kabxys_integrated[6][final_index,:,0] = numpy.nan - if self.aux==1: - dataOut.rhor=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float) - dataOut.rhoi=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float) - dataOut.sdp=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float) - dataOut.sd=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float) - dataOut.p=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float) - dataOut.alag=numpy.zeros(dataOut.NDP,'float32') - for l in range(dataOut.DPL): - dataOut.alag[l]=l*dataOut.DH*2.0/150.0 - self.aux=0 - sn4=dataOut.pan*dataOut.pbn - rhorn=0 - rhoin=0 - panrm=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float) + dataOut.flagSpreadF = True - id = numpy.where(dataOut.heightList>700)[0] + #print(final_index+minIndex) + #print(outliers_IDs) + #exit(1) + #print("flagSpreadF",dataOut.flagSpreadF) - for i in range(dataOut.NDP): - for j in range(dataOut.DPL): - ################# Total power - pa=numpy.abs(dataOut.kabxys_integrated[4][i,j,0]+dataOut.kabxys_integrated[5][i,j,0]) - pb=numpy.abs(dataOut.kabxys_integrated[6][i,j,0]+dataOut.kabxys_integrated[7][i,j,0]) - st4=pa*pb - ''' - if i > id[0]: - dataOut.p[i,j] pa-dataOut.pan - else: - dataOut.p[i,j]=pa+pb-(dataOut.pan+dataOut.pbn) - ''' - dataOut.p[i,j]=pa+pb-(dataOut.pan+dataOut.pbn) - dataOut.sdp[i,j]=2*dataOut.rnint2[j]*((pa+pb)*(pa+pb)) + ''' + for lag in range(11): + #print("Lag: ",lag) + outliers = self.mad_based_outlier(dataOut.kabxys_integrated[6][minIndex:maxIndex,lag,0], thresh=2.) + dataOut.kabxys_integrated[6][minIndex:maxIndex,lag,0][outliers == True] = numpy.nan + ''' + #dataOut.kabxys_integrated[4][minIndex:,:,0] = self.remove_coh(dataOut.kabxys_integrated[4][minIndex:,:,0]) + ''' + import matplotlib.pyplot as plt + for i in range(11): + plt.plot(dataOut.kabxys_integrated[6][:,i,0],dataOut.heightList,label='{}'.format(i)) + plt.xlim(0,2000) + plt.legend() + plt.grid() + plt.show() + ''' + ''' + for nlag in range(11): + print("BEFORE",dataOut.kabxys_integrated[6][:,nlag,0]) + #exit(1) + ''' + #dataOut.kabxys_integrated[6][minIndex:,:,0] = self.remove_coh(dataOut.kabxys_integrated[6][minIndex:,:,0]) + + + ''' + for nlag in range(11): + print("AFTER",dataOut.kabxys_integrated[6][:,nlag,0]) + exit(1) + ''' + #print("AFTER",dataOut.kabxys_integrated[4][33,:,0]) + #print("AFTER",dataOut.kabxys_integrated[6][33,:,0]) + #exit(1) + + def removeSpreadF(self,dataOut): + #for i in range(11): + #print("BEFORE Chb: ",i,dataOut.kabxys_integrated[6][:,i,0]) + #exit(1) + + #for i in range(12,data.shape[0]): + #for j in range(data.shape[1]): + #if data[i]>maxdB: + #dataOut.kabxys_integrated[4][i-2:i+3,:,0] = numpy.nan #Debido a que estos ecos son intensos, se + #dataOut.kabxys_integrated[6][i-2:i+3,:,0] = numpy.nan #remueven además dos muestras antes y después + #dataOut.flagSpreadF = True + #print("Removing Threshold",i) + #print("i: ",i) + + #print("BEFORE Chb: ",dataOut.kabxys_integrated[6][:,0,0]) + #exit(1) + + #Removing outliers from the profile + nlag = 9 + minHei = 180 + #maxHei = 600 + maxHei = 525 + inda = numpy.where(dataOut.heightList >= minHei) + indb = numpy.where(dataOut.heightList <= maxHei) + minIndex = inda[0][0] + maxIndex = indb[0][-1] + #l0 = 0 + #print("BEFORE Cha: ",dataOut.kabxys_integrated[4][:,l0,0]) + #print("BEFORE Chb: ",dataOut.kabxys_integrated[6][:,l0,0]) + #exit(1) + ''' + l0 = 0 + #print("BEFORE Cha: ",dataOut.kabxys_integrated[4][:,l0,0]) + #print("BEFORE Chb: ",dataOut.kabxys_integrated[6][:,l0,0]) + + import matplotlib.pyplot as plt + for i in range(l0,l0+11): + plt.plot(dataOut.kabxys_integrated[6][:,i,0],dataOut.heightList,label='{}'.format(i)) + #plt.xlim(1.e5,1.e8) + plt.legend() + plt.xlim(0,2000) + plt.show() + ''' + #dataOut.kabxys_integrated[4][minIndex:,:,0] = self.remove_coh(dataOut.kabxys_integrated[4][minIndex:,:,0 + outliers_IDs = [] + ''' + for lag in range(11): + outliers = self.mad_based_outlier(dataOut.kabxys_integrated[4][minIndex:,lag,0], thresh=3.) + #print("Outliers: ",outliers) + #indexes.append(outliers.nonzero()) + #numpy.concatenate((outliers)) + #dataOut.kabxys_integrated[4][minIndex:,lag,0][outliers == True] = numpy.nan + outliers_IDs=numpy.append(outliers_IDs,outliers.nonzero()) + ''' + ''' + for lag in range(11): + #outliers = self.mad_based_outlier(dataOut.kabxys_integrated[6][minIndex:maxIndex,lag,0], thresh=2.) + outliers = self.mad_based_outlier(dataOut.kabxys_integrated[6][minIndex:maxIndex,lag,0]) + outliers_IDs=numpy.append(outliers_IDs,outliers.nonzero()) + ''' + + for i in range(15): + minIndex = 12+i#12 + #maxIndex = 22+i#35 + if gmtime(dataOut.utctime).tm_hour >= 23. or gmtime(dataOut.utctime).tm_hour < 3.: + maxIndex = 31+i#35 + else: + maxIndex = 22+i#35 + for lag in range(11): + #outliers = mad_based_outlier(pow_clean3[12:27], thresh=2.) + #print("Cuts: ",first_cut*15, last_cut*15) + outliers = self.mad_based_outlier(dataOut.kabxys_integrated[6][minIndex:maxIndex,lag,0]) + aux = minIndex+numpy.array(outliers.nonzero()).ravel() + outliers_IDs=numpy.append(outliers_IDs,aux) + #print(minIndex+numpy.array(outliers.nonzero()).ravel()) + #print(outliers_IDs) + #exit(1) + if outliers_IDs != []: + outliers_IDs=numpy.array(outliers_IDs) + #outliers_IDs=outliers_IDs.ravel() + outliers_IDs=outliers_IDs.astype(numpy.dtype('int64')) + #print(outliers_IDs) + #exit(1) + + (uniq, freq) = (numpy.unique(outliers_IDs, return_counts=True)) + aux_arr = numpy.column_stack((uniq,freq)) + #print("repetitions: ",aux_arr) + #exit(1) + + #if aux_arr != []: + final_index = [] + for i in range(aux_arr.shape[0]): + if aux_arr[i,1] >= 3*11: + final_index.append(aux_arr[i,0]) + + if final_index != []:# and len(final_index) > 1: + #final_index += minIndex + #print("final_index: ",final_index) + following_index = final_index[-1]+1 #Remove following index to ensure we remove remaining SpreadF + previous_index = final_index[0]-1 #Remove previous index to ensure we remove remaning SpreadF + final_index = numpy.concatenate(([previous_index],final_index,[following_index])) + final_index = numpy.unique(final_index) #If there was only one outlier + #print("final_index: ",final_index) + #exit(1) + dataOut.kabxys_integrated[4][final_index,:,0] = numpy.nan + dataOut.kabxys_integrated[6][final_index,:,0] = numpy.nan + + dataOut.flagSpreadF = True + + #Removing echoes greater than 35 dB + maxdB = 10*numpy.log10(dataOut.pbn[0]) + 10 #Lag 0 NOise + #maxdB = 35 #DEBERÍA SER NOISE+ALGO!!!!!!!!!!!!!!!!!!!!!! + #print("noise: ",maxdB - 10) + #print(dataOut.kabxys_integrated[6][:,0,0]) + data = numpy.copy(10*numpy.log10(dataOut.kabxys_integrated[6][:,0,0])) #Lag0 ChB + #print("data: ",data) + + for i in range(12,data.shape[0]): + #for j in range(data.shape[1]): + if data[i]>maxdB: + dataOut.kabxys_integrated[4][i-2:i+3,:,0] = numpy.nan #Debido a que estos ecos son intensos, se + dataOut.kabxys_integrated[6][i-2:i+3,:,0] = numpy.nan #remueven además dos muestras antes y después + dataOut.flagSpreadF = True + #print("Removing Threshold",i) + + #print(final_index+minIndex) + #print(outliers_IDs) + #exit(1) + #print("flagSpreadF",dataOut.flagSpreadF) + + ''' + for lag in range(11): + #print("Lag: ",lag) + outliers = self.mad_based_outlier(dataOut.kabxys_integrated[6][minIndex:maxIndex,lag,0], thresh=2.) + dataOut.kabxys_integrated[6][minIndex:maxIndex,lag,0][outliers == True] = numpy.nan + ''' + #dataOut.kabxys_integrated[4][minIndex:,:,0] = self.remove_coh(dataOut.kabxys_integrated[4][minIndex:,:,0]) + ''' + import matplotlib.pyplot as plt + for i in range(11): + plt.plot(dataOut.kabxys_integrated[6][:,i,0],dataOut.heightList,label='{}'.format(i)) + plt.xlim(0,2000) + plt.legend() + plt.grid() + plt.show() + ''' + ''' + for nlag in range(11): + print("BEFORE",dataOut.kabxys_integrated[6][:,nlag,0]) + #exit(1) + ''' + #dataOut.kabxys_integrated[6][minIndex:,:,0] = self.remove_coh(dataOut.kabxys_integrated[6][minIndex:,:,0]) + + + ''' + for nlag in range(11): + print("AFTER",dataOut.kabxys_integrated[6][:,nlag,0]) + exit(1) + ''' + + def run(self,dataOut): + dataOut.flagSpreadF = False + #print(gmtime(dataOut.utctime).tm_hour) + #print(dataOut.ut_Faraday) + #exit(1) + if gmtime(dataOut.utctime).tm_hour >= 23. or gmtime(dataOut.utctime).tm_hour < 11.: #18-06 LT + #print("Inside if we are in SpreadF Time: ",gmtime(dataOut.utctime).tm_hour) + self.removeSpreadF(dataOut) + #exit(1) + + return dataOut + +class NoisePower(Operation): + """Operation to get noise power from the integrated data of the Double Pulse. + + Parameters: + ----------- + None + + Example + -------- + + op = proc_unit.addOperation(name='NoisePower', optype='other') + + """ + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + def hildebrand(self,dataOut,data): + + divider=10 # divider was originally 10 + noise=0.0 + n1=0 + n2=int(dataOut.NDP/2) + sorts= sorted(data) + nums_min= dataOut.NDP/divider + if((dataOut.NDP/divider)> 2): + nums_min= int(dataOut.NDP/divider) + + else: + nums_min=2 + sump=0.0 + sumq=0.0 + j=0 + cont=1 + while( (cont==1) and (j nums_min): + rtest= float(j/(j-1)) +1.0/dataOut.NAVG + t1= (sumq*j) + t2=(rtest*sump*sump) + if( (t1/t2) > 0.990): + j=j-1 + sump-= sorts[j+n1] + sumq-=sorts[j+n1]*sorts[j+n1] + cont= 0 + + noise= sump/j + stdv=numpy.sqrt((sumq- noise*noise)/(j-1)) + return noise + + def run(self,dataOut): + + p=numpy.zeros((dataOut.NR,dataOut.NDP,dataOut.DPL),'float32') + av=numpy.zeros(dataOut.NDP,'float32') + dataOut.pnoise=numpy.zeros(dataOut.NR,'float32') + + p[0,:,:]=dataOut.kabxys_integrated[4][:,:,0]+dataOut.kabxys_integrated[5][:,:,0] #total power for channel 0, just pulse with non-flip + p[1,:,:]=dataOut.kabxys_integrated[6][:,:,0]+dataOut.kabxys_integrated[7][:,:,0] #total power for channel 1 + + for i in range(dataOut.NR): + dataOut.pnoise[i]=0.0 + for k in range(dataOut.DPL): + dataOut.pnoise[i]+= self.hildebrand(dataOut,p[i,:,k]) + + dataOut.pnoise[i]=dataOut.pnoise[i]/dataOut.DPL + + + dataOut.pan=1.0*dataOut.pnoise[0] # weights could change + dataOut.pbn=1.0*dataOut.pnoise[1] # weights could change + ''' + print("pan: ",dataOut.pan) + print("pbn: ",dataOut.pbn) + print("pan dB: ",10*numpy.log10(dataOut.pan)) + print("pbn dB: ",10*numpy.log10(dataOut.pbn)) + exit(1) + ''' + dataOut.power = dataOut.getPower() + return dataOut + + +class DoublePulseACFs(Operation): + """Operation to get the ACFs of the Double Pulse. + + Parameters: + ----------- + None + + Example + -------- + + op = proc_unit.addOperation(name='DoublePulseACFs', optype='other') + + """ + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + self.aux=1 + + def run(self,dataOut): + + dataOut.igcej=numpy.zeros((dataOut.NDP,dataOut.DPL),'int32') + #print("init") + if self.aux==1: + dataOut.rhor=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float) + dataOut.rhoi=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float) + dataOut.sdp=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float) + dataOut.sd=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float) + dataOut.p=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float) + dataOut.alag=numpy.zeros(dataOut.NDP,'float32') + for l in range(dataOut.DPL): + dataOut.alag[l]=l*dataOut.DH*2.0/150.0 + self.aux=0 + sn4=dataOut.pan*dataOut.pbn + rhorn=0 + rhoin=0 + panrm=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float) + + id = numpy.where(dataOut.heightList>700)[0] + + for i in range(dataOut.NDP): + for j in range(dataOut.DPL): + ################# Total power + pa=numpy.abs(dataOut.kabxys_integrated[4][i,j,0]+dataOut.kabxys_integrated[5][i,j,0]) + pb=numpy.abs(dataOut.kabxys_integrated[6][i,j,0]+dataOut.kabxys_integrated[7][i,j,0]) + st4=pa*pb + + ''' + if i > id[0]: + dataOut.p[i,j] pa-dataOut.pan + else: + dataOut.p[i,j]=pa+pb-(dataOut.pan+dataOut.pbn) + ''' + #print("init 2.6",pa,dataOut.pan) + dataOut.p[i,j]=pa+pb-(dataOut.pan+dataOut.pbn) + + dataOut.sdp[i,j]=2*dataOut.rnint2[j]*((pa+pb)*(pa+pb)) ## ACF + rhorp=dataOut.kabxys_integrated[8][i,j,0]+dataOut.kabxys_integrated[11][i,j,0] rhoip=dataOut.kabxys_integrated[10][i,j,0]-dataOut.kabxys_integrated[9][i,j,0] @@ -2744,6 +3154,7 @@ class DoublePulseACFs(Operation): #print("p: ",dataOut.p[33,:]) #exit(1) ''' + return dataOut class DoublePulseACFs_PerLag(Operation): @@ -3040,6 +3451,7 @@ class ElectronDensityFaraday(Operation): plt.plot(dataOut.bki) plt.show() ''' + for i in range(2,dataOut.NSHTS-2): fact=(-0.5/(dataOut.RATE*dataOut.DH))*dataOut.bki[i] #four-point derivative, no phase unwrapping necessary @@ -3755,6 +4167,7 @@ class DPTemperaturesEstimation(Operation): plt.show() ''' + return dataOut @@ -4981,8 +5394,13 @@ class SSheightProfiles(Operation): dataOut.flagDataAsBlock = True dataOut.ippSeconds = ippSeconds dataOut.step = self.step + + + #print("SSH") #print(numpy.shape(dataOut.data)) #exit(1) + #print(dataOut.data[0,:,150]) + #exit(1) return dataOut @@ -6476,125 +6894,451 @@ class IntegrationHP(IntegrationDP): nint : int Number of integrations. - Example - -------- + Example + -------- + + op = proc_unit.addOperation(name='IntegrationHP', optype='other') + op.addParameter(name='nint', value='30', format='int') + + """ + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + self.counter = 0 + self.aux = 0 + + def integration_noise(self,dataOut): + + if self.counter == 0: + dataOut.tnoise=numpy.zeros((dataOut.NR),dtype='float32') + + dataOut.tnoise+=dataOut.noise_final + + def integration_for_long_pulse(self,dataOut): + + if self.counter == 0: + dataOut.output_LP_integrated=numpy.zeros((dataOut.NLAG,dataOut.NRANGE,dataOut.NR),order='F',dtype='complex128') + + dataOut.output_LP_integrated+=dataOut.output_LP + + def run(self,dataOut,nint=None): + + dataOut.flagNoData=True + + dataOut.nint=nint + dataOut.paramInterval=0#int(dataOut.nint*dataOut.header[7][0]*2 ) + dataOut.lat=-11.95 + dataOut.lon=-76.87 + + self.integration_for_long_pulse(dataOut) + + self.integration_noise(dataOut) + + if self.counter==dataOut.nint-1: + dataOut.nis=dataOut.NSCAN*dataOut.NAVG*dataOut.nint*10 + #print(dataOut.tnoise) + #exit(1) + dataOut.tnoise[0]*=0.995 + dataOut.tnoise[1]*=0.995 + dataOut.pan=dataOut.tnoise[0]/float(dataOut.NSCAN*dataOut.nint*dataOut.NAVG) + dataOut.pbn=dataOut.tnoise[1]/float(dataOut.NSCAN*dataOut.nint*dataOut.NAVG) + #print(self.counter) ToDo: Fix when nint = 1 + ''' + print("pan: ",dataOut.pan) + print("pbn: ",dataOut.pbn) + #print("tnoise: ",dataOut.tnoise) + exit(1) + ''' + #print(dataOut.output_LP_integrated[0,30,2]) + #exit(1) + self.integration_for_double_pulse(dataOut) + #if self.counter==dataOut.nint: + #print(dataOut.kabxys_integrated[8][53,6,0]+dataOut.kabxys_integrated[11][53,6,0]) + #print(dataOut.kabxys_integrated[8][53,9,0]+dataOut.kabxys_integrated[11][53,9,0]) + #exit(1) + return dataOut + +class SumFlipsHP(SumFlips): + """Operation to sum the flip and unflip part of certain cross products of the Double Pulse. + + Parameters: + ----------- + None + + Example + -------- + + op = proc_unit.addOperation(name='SumFlipsHP', optype='other') + + """ + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + def rint2HP(self,dataOut): + + dataOut.rnint2=numpy.zeros(dataOut.DPL,'float32') + + for l in range(dataOut.DPL): + if(l==0 or (l>=3 and l <=6)): + dataOut.rnint2[l]=0.5/float(dataOut.nint*dataOut.NAVG*16.0) + else: + dataOut.rnint2[l]=0.5/float(dataOut.nint*dataOut.NAVG*8.0) + + def run(self,dataOut): + + self.rint2HP(dataOut) + self.SumLags(dataOut) + + hei = 2 + lag = 0 + ''' + for hei in range(67): + print("hei",hei) + print(dataOut.kabxys_integrated[8][hei,:,0]+dataOut.kabxys_integrated[11][hei,:,0]) + print(dataOut.kabxys_integrated[10][hei,:,0]-dataOut.kabxys_integrated[9][hei,:,0]) + exit(1) + ''' + ''' + print("b",(dataOut.kabxys_integrated[4][hei,lag,0]+dataOut.kabxys_integrated[5][hei,lag,0])) + print((dataOut.kabxys_integrated[6][hei,lag,0]+dataOut.kabxys_integrated[7][hei,lag,0])) + print("c",(dataOut.kabxys_integrated[8][hei,lag,0]+dataOut.kabxys_integrated[11][hei,lag,0])) + print((dataOut.kabxys_integrated[10][hei,lag,0]-dataOut.kabxys_integrated[9][hei,lag,0])) + exit(1) + ''' + return dataOut + + +class LongPulseAnalysis(Operation): + """Operation to estimate ACFs, temperatures, total electron density and Hydrogen/Helium fractions from the Long Pulse data. + + Parameters: + ----------- + NACF : int + .* + + Example + -------- + + op = proc_unit.addOperation(name='LongPulseAnalysis', optype='other') + op.addParameter(name='NACF', value='16', format='int') + + """ + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + self.aux=1 + + def run(self,dataOut,NACF): + + dataOut.NACF=NACF + dataOut.heightList=dataOut.DH*(numpy.arange(dataOut.NACF)) + anoise0=dataOut.tnoise[0] + anoise1=anoise0*0.0 #seems to be noise in 1st lag 0.015 before '14 + #print(anoise0) + #exit(1) + if self.aux: + #dataOut.cut=31#26#height=31*15=465 + self.cal=numpy.zeros((dataOut.NLAG),'float32') + self.drift=numpy.zeros((200),'float32') + self.rdrift=numpy.zeros((200),'float32') + self.ddrift=numpy.zeros((200),'float32') + self.sigma=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32') + self.powera=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32') + self.powerb=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32') + self.perror=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32') + dataOut.ene=numpy.zeros((dataOut.NRANGE),'float32') + self.dpulse=numpy.zeros((dataOut.NACF),'float32') + self.lpulse=numpy.zeros((dataOut.NACF),'float32') + dataOut.lags_LP=numpy.zeros((dataOut.IBITS),order='F',dtype='float32') + self.lagp=numpy.zeros((dataOut.NACF),'float32') + self.u=numpy.zeros((2*dataOut.NACF,2*dataOut.NACF),'float32') + dataOut.ne=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32') + dataOut.te=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.ete=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.ti=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.eti=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.ph=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.eph=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.phe=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.ephe=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.errors=numpy.zeros((dataOut.IBITS,max(dataOut.NRANGE,dataOut.NSHTS)),order='F',dtype='float32') + dataOut.fit_array_real=numpy.zeros((max(dataOut.NRANGE,dataOut.NSHTS),dataOut.NLAG),order='F',dtype='float32') + dataOut.status=numpy.zeros(1,'float32') + dataOut.tx=240.0 #debería provenir del header #hybrid + + for i in range(dataOut.IBITS): + dataOut.lags_LP[i]=float(i)*(dataOut.tx/150.0)/float(dataOut.IBITS) # (float)i*(header.tx/150.0)/(float)IBITS; + + self.aux=0 + + dataOut.cut=30 + for i in range(30,15,-1): + if numpy.nanmax(dataOut.acfs_error_to_plot[i,:])>=10 or dataOut.info2[i]==0: + dataOut.cut=i-1 + + for i in range(dataOut.NLAG): + self.cal[i]=sum(dataOut.output_LP_integrated[i,:,3].real) + + #print(numpy.sum(self.cal)) #Coinciden + #exit(1) + self.cal/=float(dataOut.NRANGE) + #print(anoise0) + #print(anoise1) + #exit(1) + + + #################### PROBAR MÁS INTEGRACIÓN, SINO MODIFICAR VALOR DE "NIS" #################### + # VER dataOut.nProfiles_LP # + + ''' + #PLOTEAR POTENCIA VS RUIDO, QUIZA SE ESTA REMOVIENDO MUCHA SEÑAL + #print(dataOut.heightList) + import matplotlib.pyplot as plt + plt.plot(10*numpy.log10(dataOut.output_LP_integrated.real[0,:,0]),dataOut.range1) + #plt.plot(10*numpy.log10(dataOut.output_LP_integrated.real[0,:,0]/dataOut.nProfiles_LP),dataOut.range1) + plt.axvline(10*numpy.log10(anoise0),color='k',linestyle='dashed') + plt.grid() + plt.xlim(20,100) + plt.show() + ''' + + + for j in range(dataOut.NACF+2*dataOut.IBITS+2): + + dataOut.output_LP_integrated.real[0,j,0]-=anoise0 #lag0 ch0 + dataOut.output_LP_integrated.real[1,j,0]-=anoise1 #lag1 ch0 + + for i in range(1,dataOut.NLAG): #remove cal data from certain lags + dataOut.output_LP_integrated.real[i,j,0]-=self.cal[i] + k=max(j,26) #constant power below range 26 + self.powera[j]=dataOut.output_LP_integrated.real[0,k,0] + + ## examine drifts here - based on 60 'indep.' estimates + #print(numpy.sum(self.powera)) + #exit(1) + #nis=dataOut.NSCAN*dataOut.NAVG*dataOut.nint*10 + nis = dataOut.nis + #print("nis",nis) + alpha=beta=delta=0.0 + nest=0 + gamma=3.0/(2.0*numpy.pi*dataOut.lags_LP[1]*1.0e-3) + beta=gamma*(math.atan2(dataOut.output_LP_integrated.imag[14,0,2],dataOut.output_LP_integrated.real[14,0,2])-math.atan2(dataOut.output_LP_integrated.imag[1,0,2],dataOut.output_LP_integrated.real[1,0,2]))/13.0 + #print(gamma,beta) + #exit(1) + for i in range(1,3): + gamma=3.0/(2.0*numpy.pi*dataOut.lags_LP[i]*1.0e-3) + #print("gamma",gamma) + for j in range(34,44): + rho2=numpy.abs(dataOut.output_LP_integrated[i,j,0])/numpy.abs(dataOut.output_LP_integrated[0,j,0]) + dataOut.dphi2=(1.0/rho2-1.0)/(float(2*nis)) + dataOut.dphi2*=gamma**2 + pest=gamma*math.atan(dataOut.output_LP_integrated.imag[i,j,0]/dataOut.output_LP_integrated.real[i,j,0]) + #print("1",dataOut.output_LP_integrated.imag[i,j,0]) + #print("2",dataOut.output_LP_integrated.real[i,j,0]) + self.drift[nest]=pest + self.ddrift[nest]=dataOut.dphi2 + self.rdrift[nest]=float(nest) + nest+=1 + + sorted(self.drift[:nest]) + + #print(dataOut.dphi2) + #exit(1) + + for j in range(int(nest/4),int(3*nest/4)): + #i=int(self.rdrift[j]) + alpha+=self.drift[j]/self.ddrift[j] + delta+=1.0/self.ddrift[j] + + alpha/=delta + delta=1./numpy.sqrt(delta) + vdrift=alpha-beta + dvdrift=delta + + #need to develop estimate of complete density profile using all + #available data - op = proc_unit.addOperation(name='IntegrationHP', optype='other') - op.addParameter(name='nint', value='30', format='int') + #estimate sample variances for long-pulse power profile - """ + #nis=dataOut.NSCAN*dataOut.NAVG*dataOut.nint + nis = dataOut.nis/10 + #print("nis",nis) - def __init__(self, **kwargs): + self.sigma[:dataOut.NACF+2*dataOut.IBITS+2]=((anoise0+self.powera[:dataOut.NACF+2*dataOut.IBITS+2])**2)/float(nis) + #print(self.sigma) + #exit(1) + ioff=1 - Operation.__init__(self, **kwargs) + #deconvolve rectangular pulse shape from profile ==> powerb, perror - self.counter = 0 - self.aux = 0 - def integration_noise(self,dataOut): + ############# START nnlswrap############# - if self.counter == 0: - dataOut.tnoise=numpy.zeros((dataOut.NR),dtype='float32') + if dataOut.ut_Faraday>14.0: + alpha_nnlswrap=20.0 + else: + alpha_nnlswrap=30.0 - dataOut.tnoise+=dataOut.noise_final + range1_nnls=dataOut.NACF + range2_nnls=dataOut.NACF+dataOut.IBITS-1 - def integration_for_long_pulse(self,dataOut): + g_nnlswrap=numpy.zeros((range1_nnls,range2_nnls),'float32') + a_nnlswrap=numpy.zeros((range2_nnls,range2_nnls),'float64') - if self.counter == 0: - dataOut.output_LP_integrated=numpy.zeros((dataOut.NLAG,dataOut.NRANGE,dataOut.NR),order='F',dtype='complex128') + for i in range(range1_nnls): + for j in range(range2_nnls): + if j>=i and j16: + self.dpulse[i]+=dataOut.ph2[k]/dataOut.h2[k] + elif k>=36-aux: + self.lpulse[i]+=self.powerb[k] + self.lagp[i]=self.powera[i] - Example - -------- + #find scale factor that best merges profiles - op = proc_unit.addOperation(name='SumFlipsHP', optype='other') + qi=sum(self.dpulse[32:dataOut.NACF]**2/(self.lagp[32:dataOut.NACF]+anoise0)**2) + ri=sum((self.dpulse[32:dataOut.NACF]*self.lpulse[32:dataOut.NACF])/(self.lagp[32:dataOut.NACF]+anoise0)**2) + si=sum((self.dpulse[32:dataOut.NACF]*self.lagp[32:dataOut.NACF])/(self.lagp[32:dataOut.NACF]+anoise0)**2) + ui=sum(self.lpulse[32:dataOut.NACF]**2/(self.lagp[32:dataOut.NACF]+anoise0)**2) + vi=sum((self.lpulse[32:dataOut.NACF]*self.lagp[32:dataOut.NACF])/(self.lagp[32:dataOut.NACF]+anoise0)**2) - """ + alpha=(si*ui-vi*ri)/(qi*ui-ri*ri) + beta=(qi*vi-ri*si)/(qi*ui-ri*ri) - def __init__(self, **kwargs): + #form density profile estimate, merging rescaled power profiles + #print(dataOut.h2) + #print(numpy.sum(alpha)) + #print(numpy.sum(dataOut.ph2)) + self.powerb[16:36-aux]=alpha*dataOut.ph2[16:36-aux]/dataOut.h2[16:36-aux] + self.powerb[36-aux:dataOut.NACF]*=beta - Operation.__init__(self, **kwargs) + #form Ne estimate, fill in error estimate at low altitudes - def rint2HP(self,dataOut): + dataOut.ene[0:36-aux]=dataOut.sdp2[0:36-aux]/dataOut.ph2[0:36-aux] + dataOut.ne[:dataOut.NACF]=self.powerb[:dataOut.NACF]*dataOut.h2[:dataOut.NACF]/alpha + #print(numpy.sum(self.powerb)) + #print(numpy.sum(dataOut.ene)) + #print(numpy.sum(dataOut.ne)) + #exit(1) + #now do error propagation: store zero lag error covariance in u - dataOut.rnint2=numpy.zeros(dataOut.DPL,'float32') + nis=dataOut.NSCAN*dataOut.NAVG*dataOut.nint/1 # DLH serious debris removal - for l in range(dataOut.DPL): - if(l==0 or (l>=3 and l <=6)): - dataOut.rnint2[l]=0.5/float(dataOut.nint*dataOut.NAVG*16.0) - else: - dataOut.rnint2[l]=0.5/float(dataOut.nint*dataOut.NAVG*8.0) + for i in range(dataOut.NACF): + for j in range(i,dataOut.NACF): + if j-i>=dataOut.IBITS: + self.u[i,j]=0.0 + else: + self.u[i,j]=dataOut.output_LP_integrated.real[j-i,i,0]**2/float(nis) + self.u[i,j]*=(anoise0+dataOut.output_LP_integrated.real[0,i,0])/dataOut.output_LP_integrated.real[0,i,0] + self.u[i,j]*=(anoise0+dataOut.output_LP_integrated.real[0,j,0])/dataOut.output_LP_integrated.real[0,j,0] - def run(self,dataOut): + self.u[j,i]=self.u[i,j] - self.rint2HP(dataOut) - self.SumLags(dataOut) + #now error analyis for lag product matrix (diag), place in acf_err - hei = 2 - lag = 0 + for i in range(dataOut.NACF): + for j in range(dataOut.IBITS): + if j==0: + dataOut.errors[0,i]=numpy.sqrt(self.u[i,i]) + else: + dataOut.errors[j,i]=numpy.sqrt(((dataOut.output_LP_integrated.real[0,i,0]+anoise0)*(dataOut.output_LP_integrated.real[0,i+j,0]+anoise0)+dataOut.output_LP_integrated.real[j,i,0]**2)/float(2*nis)) ''' - for hei in range(67): - print("hei",hei) - print(dataOut.kabxys_integrated[8][hei,:,0]+dataOut.kabxys_integrated[11][hei,:,0]) - print(dataOut.kabxys_integrated[10][hei,:,0]-dataOut.kabxys_integrated[9][hei,:,0]) + print(numpy.sum(dataOut.output_LP_integrated)) + print(numpy.sum(dataOut.errors)) + print(numpy.sum(self.powerb)) + print(numpy.sum(dataOut.ne)) + print(numpy.sum(dataOut.lags_LP)) + print(numpy.sum(dataOut.thb)) + print(numpy.sum(dataOut.bfm)) + print(numpy.sum(dataOut.te)) + print(numpy.sum(dataOut.ete)) + print(numpy.sum(dataOut.ti)) + print(numpy.sum(dataOut.eti)) + print(numpy.sum(dataOut.ph)) + print(numpy.sum(dataOut.eph)) + print(numpy.sum(dataOut.phe)) + print(numpy.sum(dataOut.ephe)) + print(numpy.sum(dataOut.range1)) + print(numpy.sum(dataOut.ut)) + print(numpy.sum(dataOut.NACF)) + print(numpy.sum(dataOut.fit_array_real)) + print(numpy.sum(dataOut.status)) + print(numpy.sum(dataOut.NRANGE)) + print(numpy.sum(dataOut.IBITS)) exit(1) ''' ''' - print("b",(dataOut.kabxys_integrated[4][hei,lag,0]+dataOut.kabxys_integrated[5][hei,lag,0])) - print((dataOut.kabxys_integrated[6][hei,lag,0]+dataOut.kabxys_integrated[7][hei,lag,0])) - print("c",(dataOut.kabxys_integrated[8][hei,lag,0]+dataOut.kabxys_integrated[11][hei,lag,0])) - print((dataOut.kabxys_integrated[10][hei,lag,0]-dataOut.kabxys_integrated[9][hei,lag,0])) + print(dataOut.te2[13:16]) + print(numpy.sum(dataOut.te2)) exit(1) ''' - return dataOut + print("Success") + #print(dataOut.NRANGE) + with suppress_stdout_stderr(): + #pass + full_profile_profile.profile(numpy.transpose(dataOut.output_LP_integrated,(2,1,0)),numpy.transpose(dataOut.errors),self.powerb,dataOut.ne,dataOut.lags_LP,dataOut.thb,dataOut.bfm,dataOut.te,dataOut.ete,dataOut.ti,dataOut.eti,dataOut.ph,dataOut.eph,dataOut.phe,dataOut.ephe,dataOut.range1,dataOut.ut,dataOut.NACF,dataOut.fit_array_real,dataOut.status,dataOut.NRANGE,dataOut.IBITS) + print("status: ",dataOut.status) -class LongPulseAnalysis(Operation): + if dataOut.status>=3.5: + dataOut.te[:]=numpy.nan + dataOut.ete[:]=numpy.nan + dataOut.ti[:]=numpy.nan + dataOut.eti[:]=numpy.nan + dataOut.ph[:]=numpy.nan + dataOut.eph[:]=numpy.nan + dataOut.phe[:]=numpy.nan + dataOut.ephe[:]=numpy.nan + + return dataOut + +class LongPulseAnalysis_V2(Operation): """Operation to estimate ACFs, temperatures, total electron density and Hydrogen/Helium fractions from the Long Pulse data. Parameters: @@ -6853,36 +7597,7 @@ class LongPulseAnalysis(Operation): dataOut.errors[0,i]=numpy.sqrt(self.u[i,i]) else: dataOut.errors[j,i]=numpy.sqrt(((dataOut.output_LP_integrated.real[0,i,0]+anoise0)*(dataOut.output_LP_integrated.real[0,i+j,0]+anoise0)+dataOut.output_LP_integrated.real[j,i,0]**2)/float(2*nis)) - ''' - print(numpy.sum(dataOut.output_LP_integrated)) - print(numpy.sum(dataOut.errors)) - print(numpy.sum(self.powerb)) - print(numpy.sum(dataOut.ne)) - print(numpy.sum(dataOut.lags_LP)) - print(numpy.sum(dataOut.thb)) - print(numpy.sum(dataOut.bfm)) - print(numpy.sum(dataOut.te)) - print(numpy.sum(dataOut.ete)) - print(numpy.sum(dataOut.ti)) - print(numpy.sum(dataOut.eti)) - print(numpy.sum(dataOut.ph)) - print(numpy.sum(dataOut.eph)) - print(numpy.sum(dataOut.phe)) - print(numpy.sum(dataOut.ephe)) - print(numpy.sum(dataOut.range1)) - print(numpy.sum(dataOut.ut)) - print(numpy.sum(dataOut.NACF)) - print(numpy.sum(dataOut.fit_array_real)) - print(numpy.sum(dataOut.status)) - print(numpy.sum(dataOut.NRANGE)) - print(numpy.sum(dataOut.IBITS)) - exit(1) - ''' - ''' - print(dataOut.te2[13:16]) - print(numpy.sum(dataOut.te2)) - exit(1) - ''' + print("Success") with suppress_stdout_stderr(): #pass @@ -6900,7 +7615,6 @@ class LongPulseAnalysis(Operation): return dataOut - class PulsePairVoltage(Operation): ''' Function PulsePair(Signal Power, Velocity) diff --git a/setup.py b/setup.py index 5fa2a77..892173c 100644 --- a/setup.py +++ b/setup.py @@ -74,6 +74,7 @@ setup( cmdclass = {'build_ext': build_ext}, ext_modules=[ Extension("schainpy.model.data._noise", ["schainc/_noise.c"]), + Extension("schainpy.model.data._HS_algorithm", ["schainc/_HS_algorithm.c"]), ], setup_requires = ["numpy"], install_requires = [