diff --git a/schainpy/model/data/jrodata.py b/schainpy/model/data/jrodata.py index 57d68d6..213dfcb 100644 --- a/schainpy/model/data/jrodata.py +++ b/schainpy/model/data/jrodata.py @@ -260,7 +260,7 @@ class JROData(GenericData): def getFmax(self): PRF = 1. / (self.ippSeconds * self.nCohInt) - + #print("ippsec",self.ippSeconds) fmax = PRF return fmax diff --git a/schainpy/model/graphics/jroplot_base.py b/schainpy/model/graphics/jroplot_base.py index 3a861a7..98a6b5c 100644 --- a/schainpy/model/graphics/jroplot_base.py +++ b/schainpy/model/graphics/jroplot_base.py @@ -256,6 +256,7 @@ class Plot(Operation): self.__throttle_plot = apply_throttle(self.throttle) code = self.attr_data if self.attr_data else self.CODE self.data = PlotterData(self.CODE, self.exp_code, self.localtime) + #self.EEJtype = kwargs.get('EEJtype', 2) if self.server: if not self.server.startswith('tcp://'): diff --git a/schainpy/model/graphics/jroplot_parameters.py b/schainpy/model/graphics/jroplot_parameters.py index 83b1c45..df92584 100644 --- a/schainpy/model/graphics/jroplot_parameters.py +++ b/schainpy/model/graphics/jroplot_parameters.py @@ -75,7 +75,7 @@ class SnrPlot(RTIPlot): def update(self, dataOut): data = { - 'snr': 10*numpy.log10(dataOut.data_snr) + 'snr': 10*numpy.log10(dataOut.data_snr) } return data, {} @@ -91,7 +91,120 @@ class DopplerPlot(RTIPlot): def update(self, dataOut): data = { - 'dop': 10*numpy.log10(dataOut.data_dop) + 'dop': 10*numpy.log10(dataOut.data_dop) + } + + return data, {} + +class DopplerEEJPlot_V0(RTIPlot): + ''' + Written by R. Flores + ''' + ''' + Plot for EEJ + ''' + + CODE = 'dop' + colormap = 'RdBu_r' + colormap = 'jet' + + def setup(self): + + self.xaxis = 'time' + self.ncols = 1 + self.nrows = len(self.data.channels) + self.nplots = len(self.data.channels) + self.ylabel = 'Range [km]' + self.xlabel = 'Time' + self.cb_label = '(m/s)' + self.plots_adjust.update({'hspace':0.8, 'left': 0.1, 'bottom': 0.1, 'right':0.95}) + self.titles = ['{} Channel {}'.format( + self.CODE.upper(), x) for x in range(self.nrows)] + + def update(self, dataOut): + #print(self.EEJtype) + + if self.EEJtype == 1: + data = { + 'dop': dataOut.Oblique_params[:,-2,:] + } + elif self.EEJtype == 2: + data = { + 'dop': dataOut.Oblique_params[:,-1,:] + } + + return data, {} + +class DopplerEEJPlot(RTIPlot): + ''' + Written by R. Flores + ''' + ''' + Plot for Doppler Shift EEJ + ''' + + CODE = 'dop' + colormap = 'RdBu_r' + #colormap = 'jet' + + def setup(self): + + self.xaxis = 'time' + self.ncols = 1 + self.nrows = 2 + self.nplots = 2 + self.ylabel = 'Range [km]' + self.xlabel = 'Time' + self.cb_label = '(m/s)' + self.plots_adjust.update({'hspace':0.8, 'left': 0.1, 'bottom': 0.1, 'right':0.95}) + self.titles = ['{} EJJ Type {} /'.format( + self.CODE.upper(), x) for x in range(1,1+self.nrows)] + + def update(self, dataOut): + + if dataOut.mode == 11: #Double Gaussian + doppler = numpy.append(dataOut.Oblique_params[:,1,:],dataOut.Oblique_params[:,4,:],axis=0) + elif dataOut.mode == 9: #Double Skew Gaussian + doppler = numpy.append(dataOut.Oblique_params[:,-2,:],dataOut.Oblique_params[:,-1,:],axis=0) + data = { + 'dop': doppler + } + + return data, {} + +class SpcWidthEEJPlot(RTIPlot): + ''' + Written by R. Flores + ''' + ''' + Plot for EEJ Spectral Width + ''' + + CODE = 'width' + colormap = 'RdBu_r' + colormap = 'jet' + + def setup(self): + + self.xaxis = 'time' + self.ncols = 1 + self.nrows = 2 + self.nplots = 2 + self.ylabel = 'Range [km]' + self.xlabel = 'Time' + self.cb_label = '(m/s)' + self.plots_adjust.update({'hspace':0.8, 'left': 0.1, 'bottom': 0.1, 'right':0.95}) + self.titles = ['{} EJJ Type {} /'.format( + self.CODE.upper(), x) for x in range(1,1+self.nrows)] + + def update(self, dataOut): + + if dataOut.mode == 11: #Double Gaussian + width = numpy.append(dataOut.Oblique_params[:,2,:],dataOut.Oblique_params[:,5,:],axis=0) + elif dataOut.mode == 9: #Double Skew Gaussian + width = numpy.append(dataOut.Oblique_params[:,2,:],dataOut.Oblique_params[:,6,:],axis=0) + data = { + 'width': width } return data, {} @@ -107,7 +220,7 @@ class PowerPlot(RTIPlot): def update(self, dataOut): data = { - 'pow': 10*numpy.log10(dataOut.data_pow/dataOut.normFactor) + 'pow': 10*numpy.log10(dataOut.data_pow/dataOut.normFactor) } return data, {} @@ -208,7 +321,7 @@ class GenericRTIPlot(Plot): meta = {} return data, meta - + def plot(self): # self.data.normalize_heights() self.x = self.data.times diff --git a/schainpy/model/graphics/jroplot_spectra.py b/schainpy/model/graphics/jroplot_spectra.py index 5d53bc1..182edaa 100644 --- a/schainpy/model/graphics/jroplot_spectra.py +++ b/schainpy/model/graphics/jroplot_spectra.py @@ -39,9 +39,14 @@ class SpectraPlot(Plot): data = {} meta = {} + spc = 10*numpy.log10(dataOut.data_spc/dataOut.normFactor) + #print("Spc: ",spc[0]) + #exit(1) data['spc'] = spc data['rti'] = dataOut.getPower() + #print(data['rti'][0]) + #exit(1) #print("NormFactor: ",dataOut.normFactor) #data['noise'] = 10*numpy.log10(dataOut.getNoise()/dataOut.normFactor) if hasattr(dataOut, 'LagPlot'): #Double Pulse @@ -163,11 +168,22 @@ class SpectraObliquePlot(Plot): data['rti'] = dataOut.getPower() data['noise'] = 10*numpy.log10(dataOut.getNoise()/dataOut.normFactor) meta['xrange'] = (dataOut.getFreqRange(1)/1000., dataOut.getAcfRange(1), dataOut.getVelRange(1)) - - data['shift1'] = dataOut.Oblique_params[0][1] - data['shift2'] = dataOut.Oblique_params[0][4] - data['shift1_error'] = dataOut.Oblique_param_errors[0][1] - data['shift2_error'] = dataOut.Oblique_param_errors[0][4] + ''' + data['shift1'] = dataOut.Oblique_params[0,-2,:] + data['shift2'] = dataOut.Oblique_params[0,-1,:] + data['shift1_error'] = dataOut.Oblique_param_errors[0,-2,:] + data['shift2_error'] = dataOut.Oblique_param_errors[0,-1,:] + ''' + ''' + data['shift1'] = dataOut.Oblique_params[0,1,:] + data['shift2'] = dataOut.Oblique_params[0,4,:] + data['shift1_error'] = dataOut.Oblique_param_errors[0,1,:] + data['shift2_error'] = dataOut.Oblique_param_errors[0,4,:] + ''' + data['shift1'] = dataOut.Dop_EEJ_T1[0] + data['shift2'] = dataOut.Dop_EEJ_T2[0] + data['shift1_error'] = dataOut.Err_Dop_EEJ_T1[0] + data['shift2_error'] = dataOut.Err_Dop_EEJ_T2[0] return data, meta @@ -187,15 +203,19 @@ class SpectraObliquePlot(Plot): y = self.data.yrange self.y = y - z = self.data['spc'] + + data = self.data[-1] + z = data['spc'] for n, ax in enumerate(self.axes): noise = self.data['noise'][n][-1] - shift1 = self.data['shift1'] - shift2 = self.data['shift2'] - err1 = self.data['shift1_error'] - err2 = self.data['shift2_error'] + shift1 = data['shift1'] + #print(shift1) + shift2 = data['shift2'] + err1 = data['shift1_error'] + err2 = data['shift2_error'] if ax.firsttime: + self.xmax = self.xmax if self.xmax else numpy.nanmax(x) self.xmin = self.xmin if self.xmin else -self.xmax self.zmin = self.zmin if self.zmin else numpy.nanmin(z) @@ -212,17 +232,20 @@ class SpectraObliquePlot(Plot): ax.plt_noise = self.pf_axes[n].plot(numpy.repeat(noise, len(y)), y, color="k", linestyle="dashed", lw=1)[0] - self.ploterr1 = ax.errorbar(shift1, y, xerr=err1, fmt='k^', elinewidth=0.2, marker='x', linestyle='None',markersize=0.5,capsize=0.3,markeredgewidth=0.2) - self.ploterr2 = ax.errorbar(shift2, y, xerr=err2, fmt='m^',elinewidth=0.2,marker='x',linestyle='None',markersize=0.5,capsize=0.3,markeredgewidth=0.2) + self.ploterr1 = ax.errorbar(shift1, y, xerr=err1, fmt='k^', elinewidth=2.2, marker='o', linestyle='None',markersize=2.5,capsize=0.3,markeredgewidth=0.2) + self.ploterr2 = ax.errorbar(shift2, y, xerr=err2, fmt='m^',elinewidth=2.2,marker='o',linestyle='None',markersize=2.5,capsize=0.3,markeredgewidth=0.2) + #print("plotter1: ", self.ploterr1,shift1) + else: + #print("else plotter1: ", self.ploterr1,shift1) self.ploterr1.remove() self.ploterr2.remove() ax.plt.set_array(z[n].T.ravel()) if self.showprofile: ax.plt_profile.set_data(self.data['rti'][n][-1], y) ax.plt_noise.set_data(numpy.repeat(noise, len(y)), y) - self.ploterr1 = ax.errorbar(shift1, y, xerr=err1, fmt='k^',elinewidth=0.2,marker='x',linestyle='None',markersize=0.5,capsize=0.3,markeredgewidth=0.2) - self.ploterr2 = ax.errorbar(shift2, y, xerr=err2, fmt='m^',elinewidth=0.2,marker='x',linestyle='None',markersize=0.5,capsize=0.3,markeredgewidth=0.2) + self.ploterr1 = ax.errorbar(shift1, y, xerr=err1, fmt='k^', elinewidth=2.2, marker='o', linestyle='None',markersize=2.5,capsize=0.3,markeredgewidth=0.2) + self.ploterr2 = ax.errorbar(shift2, y, xerr=err2, fmt='m^',elinewidth=2.2,marker='o',linestyle='None',markersize=2.5,capsize=0.3,markeredgewidth=0.2) self.titles.append('CH {}: {:3.2f}dB'.format(n, noise)) @@ -671,6 +694,7 @@ class RTIPlot(Plot): data = {} meta = {} data['rti'] = dataOut.getPower() + #print(numpy.shape(data['rti'])) data['noise'] = 10*numpy.log10(dataOut.getNoise()/dataOut.normFactor) @@ -692,6 +716,7 @@ class RTIPlot(Plot): for n, ax in enumerate(self.axes): self.zmin = self.zmin if self.zmin else numpy.min(self.z) self.zmax = self.zmax if self.zmax else numpy.max(self.z) + if ax.firsttime: ax.plt = ax.pcolormesh(x, y, z[n].T, vmin=self.zmin, diff --git a/schainpy/model/graphics/jroplot_voltage_lags.py b/schainpy/model/graphics/jroplot_voltage_lags.py index 7be4846..20bd2ed 100644 --- a/schainpy/model/graphics/jroplot_voltage_lags.py +++ b/schainpy/model/graphics/jroplot_voltage_lags.py @@ -213,7 +213,7 @@ class DenRTIPlot(RTIPlot): data = {} meta = {} - data['denrti'] = dataOut.DensityFinal + data['denrti'] = dataOut.DensityFinal*1.e-6 #To Plot in cm^-3 return data, meta diff --git a/schainpy/model/proc/jroproc_base.py b/schainpy/model/proc/jroproc_base.py index 33df194..d374f1f 100644 --- a/schainpy/model/proc/jroproc_base.py +++ b/schainpy/model/proc/jroproc_base.py @@ -70,7 +70,7 @@ class ProcessingUnit(object): def call(self, **kwargs): ''' ''' - #print("call") + try: if self.dataIn is not None and self.dataIn.flagNoData and not self.dataIn.error: #if self.dataIn is not None and self.dataIn.flagNoData and not self.dataIn.error and not self.dataIn.runNextUnit: @@ -125,6 +125,7 @@ class ProcessingUnit(object): runNextUnit = self.dataOut.isReady() except: runNextUnit = self.dataOut.isReady() + #exit(1) #if not self.dataOut.isReady(): #return 'Error' if self.dataOut.error else input() #print("NexT",runNextUnit) diff --git a/schainpy/model/proc/jroproc_parameters.py b/schainpy/model/proc/jroproc_parameters.py index 1a0a9cb..0970ff8 100644 --- a/schainpy/model/proc/jroproc_parameters.py +++ b/schainpy/model/proc/jroproc_parameters.py @@ -2,6 +2,7 @@ import numpy import math from scipy import optimize, interpolate, signal, stats, ndimage import scipy +from scipy.optimize import least_squares import re import datetime import copy @@ -91,6 +92,7 @@ class ParametersProc(ProcessingUnit): self.dataOut.timeInterval1 = self.dataIn.timeInterval self.dataOut.heightList = self.dataIn.heightList self.dataOut.frequency = self.dataIn.frequency + #self.dataOut.runNextUnit = self.dataIn.runNextUnit #self.dataOut.noise = self.dataIn.noise def run(self): @@ -156,6 +158,7 @@ class ParametersProc(ProcessingUnit): if hasattr(self.dataIn, 'COFA'): #COFA self.dataOut.COFA = self.dataIn.COFA + #self.dataOut.runNextUnit = self.dataIn.runNextUnit #---------------------- Correlation Data --------------------------- @@ -668,7 +671,9 @@ class GaussianFit(Operation): return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model2(x,state)))**2)#/(64-9.) class Oblique_Gauss_Fit(Operation): - + ''' + Written by R. Flores + ''' def __init__(self): Operation.__init__(self) @@ -1060,6 +1065,17 @@ class Oblique_Gauss_Fit(Operation): val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-y2**2/2)/(1-k2*z2) + d return val + def gaussian(self, x, a, b, c, d): + z = (x-b)/c + val = a * numpy.exp(-z**2/2) + d + return val + + def double_gaussian(self, x, a1, b1, c1, a2, b2, c2, d): + z1 = (x-b1)/c1 + z2 = (x-b2)/c2 + val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-z2**2/2) + d + return val + def double_gaussian_double_skew(self,x, a1, b1, c1, k1, a2, b2, c2, k2, d): z1 = (x-b1)/c1 @@ -1136,7 +1152,7 @@ class Oblique_Gauss_Fit(Operation): #return A1f, B1f, C1f, A2f, B2f, C2f, K2f, Df, doppler return A1f, B1f, C1f, A2f, B2f, C2f, K2f, Df, doppler - def Double_Gauss_Double_Skew_fit_weight_bound_no_inputs(self,spc,freq): + def Double_Gauss_Double_Skew_fit_weight_bound_no_inputs(self,spc,freq,Nincoh): from scipy.optimize import least_squares @@ -1146,6 +1162,7 @@ class Oblique_Gauss_Fit(Operation): from scipy.signal import medfilt Nincoh = 20 Nincoh = 80 + Nincoh = Nincoh spcm = medfilt(spc,11)/numpy.sqrt(Nincoh) # define a least squares function to optimize @@ -1156,11 +1173,23 @@ class Oblique_Gauss_Fit(Operation): # bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf]) # bounds=([0,-numpy.inf,0,0,-numpy.inf,0,-numpy.inf,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,0,numpy.inf]) #print(a1,b1,c1,a2,b2,c2,k2,d) - bounds=([0,-numpy.inf,0,-numpy.inf,0,-400,0,0,0],[numpy.inf,-340,numpy.inf,0,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf]) + #bounds=([0,-numpy.inf,0,-numpy.inf,0,-400,0,0,0],[numpy.inf,-340,numpy.inf,0,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf]) + bounds=([0,-numpy.inf,0,-numpy.inf,0,-400,0,0,0],[numpy.inf,-140,numpy.inf,0,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf]) #print(bounds) #bounds=([0,-numpy.inf,0,0,-numpy.inf,0,0,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf]) params_scale = [spc_max,freq_max,freq_max,1,spc_max,freq_max,freq_max,1,spc_max] - x0_value = numpy.array([spc_max,-400,30,-.1,spc_max/4,-200,150,1,1.0e7]) + ####################x0_value = numpy.array([spc_max,-400,30,-.1,spc_max/4,-200,150,1,1.0e7]) + + dop1_x0 = freq[numpy.argmax(spc)] + ####dop1_x0 = freq[numpy.argmax(spcm)] + if dop1_x0 < 0: + dop2_x0 = dop1_x0 + 100 + if dop1_x0 > 0: + dop2_x0 = dop1_x0 - 100 + + ###########x0_value = numpy.array([spc_max,-200.5,30,-.1,spc_max/4,-100.5,150,1,1.0e7]) + x0_value = numpy.array([spc_max,dop1_x0,30,-.1,spc_max/4, dop2_x0,150,1,1.0e7]) + #x0_value = numpy.array([spc_max,-400.5,30,-.1,spc_max/4,-200.5,150,1,1.0e7]) #popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=1) popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0) # popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x_scale=params_scale,verbose=1) @@ -1193,6 +1222,65 @@ class Oblique_Gauss_Fit(Operation): #exit(1) return A1f, B1f, C1f, K1f, A2f, B2f, C2f, K2f, Df, doppler1, doppler2, error + def Double_Gauss_fit_weight_bound_no_inputs(self,spc,freq,Nincoh): + + from scipy.optimize import least_squares + + freq_max = numpy.max(numpy.abs(freq)) + spc_max = numpy.max(spc) + + from scipy.signal import medfilt + Nincoh = 20 + Nincoh = 80 + Nincoh = Nincoh + spcm = medfilt(spc,11)/numpy.sqrt(Nincoh) + + # define a least squares function to optimize + def lsq_func(params): + return (spc-self.double_gaussian(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))/spcm + + # fit + # bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf]) + # bounds=([0,-numpy.inf,0,0,-numpy.inf,0,-numpy.inf,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,0,numpy.inf]) + #print(a1,b1,c1,a2,b2,c2,k2,d) + + dop1_x0 = freq[numpy.argmax(spcm)] + + #####bounds=([0,-numpy.inf,0,0,-400,0,0],[numpy.inf,-340,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf]) + #####bounds=([0,-numpy.inf,0,0,dop1_x0-50,0,0],[numpy.inf,-340,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf]) + bounds=([0,-numpy.inf,0,0,dop1_x0-50,0,0],[numpy.inf,-300,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf]) + #####bounds=([0,-numpy.inf,0,0,-500,0,0],[numpy.inf,-340,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf]) + #bounds=([0,-numpy.inf,0,-numpy.inf,0,-500,0,0,0],[numpy.inf,-240,numpy.inf,0,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf]) + #print(bounds) + #bounds=([0,-numpy.inf,0,0,-numpy.inf,0,0,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf]) + params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,spc_max] + #x0_value = numpy.array([spc_max,-400.5,30,spc_max/4,-200.5,150,1.0e7]) + x0_value = numpy.array([spc_max,-400.5,30,spc_max/4,dop1_x0,150,1.0e7]) + #x0_value = numpy.array([spc_max,-420.5,30,-.1,spc_max/4,-50,150,.1,numpy.mean(spc[-50:])]) + #print("before popt") + #print(x0_value) + #print("freq: ",freq) + #popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=1) + popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0) + # popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x_scale=params_scale,verbose=1) + #print("after popt") + J = popt.jac + + try: + cov = numpy.linalg.inv(J.T.dot(J)) + error = numpy.sqrt(numpy.diagonal(cov)) + except: + error = numpy.ones((7))*numpy.NAN + + A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2] + A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5] + Df = popt.x[6] + #print("before return") + return A1f, B1f, C1f, A2f, B2f, C2f, Df, error + + + + def Double_Gauss_Double_Skew_fit_weight_bound_with_inputs(self, spc, freq, a1, b1, c1, a2, b2, c2, k2, d): from scipy.optimize import least_squares @@ -1264,7 +1352,124 @@ class Oblique_Gauss_Fit(Operation): return A1f, B1f, C1f, A2f, B2f, C2f, K2f, A3f, B3f, C3f, K3f, Df, doppler - def run(self, dataOut, mode = 0): + def CEEJ_Skew_fit_weight_bound_no_inputs(self,spc,freq,Nincoh): + + from scipy.optimize import least_squares + + freq_max = numpy.max(numpy.abs(freq)) + spc_max = numpy.max(spc) + + from scipy.signal import medfilt + Nincoh = 20 + Nincoh = 80 + Nincoh = Nincoh + spcm = medfilt(spc,11)/numpy.sqrt(Nincoh) + + # define a least squares function to optimize + def lsq_func(params): + return (spc-self.gaussian_skew(freq,params[0],params[1],params[2],params[3],params[4]))#/spcm + + + bounds=([0,0,0,-numpy.inf,0],[numpy.inf,numpy.inf,numpy.inf,0,numpy.inf]) + + params_scale = [spc_max,freq_max,freq_max,1,spc_max] + + x0_value = numpy.array([spc_max,freq[numpy.argmax(spc)],30,-.1,numpy.mean(spc[:50])]) + + popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0) + + J = popt.jac + + try: + error = numpy.ones((9))*numpy.NAN + cov = numpy.linalg.inv(J.T.dot(J)) + error[:4] = numpy.sqrt(numpy.diagonal(cov))[:4] + error[-1] = numpy.sqrt(numpy.diagonal(cov))[-1] + except: + error = numpy.ones((9))*numpy.NAN + + A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]; K1f = popt.x[3] + Df = popt.x[4] + + aux1 = self.gaussian_skew(freq, A1f, B1f, C1f, K1f, Df) + doppler1 = freq[numpy.argmax(aux1)] + #print("CEEJ ERROR:",error) + + return A1f, B1f, C1f, K1f, numpy.NAN, numpy.NAN, numpy.NAN, numpy.NAN, Df, doppler1, numpy.NAN, error + + def CEEJ_fit_weight_bound_no_inputs(self,spc,freq,Nincoh): + + from scipy.optimize import least_squares + + freq_max = numpy.max(numpy.abs(freq)) + spc_max = numpy.max(spc) + + from scipy.signal import medfilt + Nincoh = 20 + Nincoh = 80 + Nincoh = Nincoh + spcm = medfilt(spc,11)/numpy.sqrt(Nincoh) + + # define a least squares function to optimize + def lsq_func(params): + return (spc-self.gaussian(freq,params[0],params[1],params[2],params[3]))#/spcm + + + bounds=([0,0,0,0],[numpy.inf,numpy.inf,numpy.inf,numpy.inf]) + + params_scale = [spc_max,freq_max,freq_max,spc_max] + + x0_value = numpy.array([spc_max,freq[numpy.argmax(spcm)],30,numpy.mean(spc[:50])]) + + popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0) + + J = popt.jac + + try: + error = numpy.ones((4))*numpy.NAN + cov = numpy.linalg.inv(J.T.dot(J)) + error = numpy.sqrt(numpy.diagonal(cov)) + except: + error = numpy.ones((4))*numpy.NAN + + A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2] + Df = popt.x[3] + + return A1f, B1f, C1f, Df, error + + def Simple_fit_bound(self,spc,freq,Nincoh): + + freq_max = numpy.max(numpy.abs(freq)) + spc_max = numpy.max(spc) + + Nincoh = Nincoh + + def lsq_func(params): + return (spc-self.gaussian(freq,params[0],params[1],params[2],params[3])) + + bounds=([0,-50,0,0],[numpy.inf,+50,numpy.inf,numpy.inf]) + + params_scale = [spc_max,freq_max,freq_max,spc_max] + + x0_value = numpy.array([spc_max,-20.5,5,1.0e7]) + + popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0) + + J = popt.jac + + try: + cov = numpy.linalg.inv(J.T.dot(J)) + error = numpy.sqrt(numpy.diagonal(cov)) + except: + error = numpy.ones((4))*numpy.NAN + + A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2] + Df = popt.x[3] + + return A1f, B1f, C1f, Df, error + + + def run(self, dataOut, mode = 0, Hmin1 = None, Hmax1 = None, Hmin2 = None, Hmax2 = None): pwcode = 1 @@ -1293,12 +1498,54 @@ class Oblique_Gauss_Fit(Operation): elif mode == 9: dataOut.Oblique_params = numpy.ones((1,11,dataOut.nHeights))*numpy.NAN dataOut.Oblique_param_errors = numpy.ones((1,9,dataOut.nHeights))*numpy.NAN + elif mode == 11: + dataOut.Oblique_params = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN + dataOut.Oblique_param_errors = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN + elif mode == 10: #150 km + dataOut.Oblique_params = numpy.ones((1,4,dataOut.nHeights))*numpy.NAN + dataOut.Oblique_param_errors = numpy.ones((1,4,dataOut.nHeights))*numpy.NAN + dataOut.snr_log10 = numpy.ones((1,dataOut.nHeights))*numpy.NAN dataOut.VelRange = x - l1=range(22,36) + + + #l1=range(22,36) #+62 #l1=range(32,36) - l2=range(58,99) + #l2=range(58,99) #+62 + + #if Hmin1 == None or Hmax1 == None or Hmin2 == None or Hmax2 == None: + + minHei1 = 105. + maxHei1 = 122.5 + maxHei1 = 130.5 + + if mode == 10: #150 km + minHei1 = 100 + maxHei1 = 100 + + inda1 = numpy.where(dataOut.heightList >= minHei1) + indb1 = numpy.where(dataOut.heightList <= maxHei1) + + minIndex1 = inda1[0][0] + maxIndex1 = indb1[0][-1] + + minHei2 = 150. + maxHei2 = 201.25 + maxHei2 = 225.3 + + if mode == 10: #150 km + minHei2 = 110 + maxHei2 = 165 + + inda2 = numpy.where(dataOut.heightList >= minHei2) + indb2 = numpy.where(dataOut.heightList <= maxHei2) + + minIndex2 = inda2[0][0] + maxIndex2 = indb2[0][-1] + + l1=range(minIndex1,maxIndex1) + l2=range(minIndex2,maxIndex2) if mode == 4: ''' @@ -1321,9 +1568,10 @@ class Oblique_Gauss_Fit(Operation): for hei in itertools.chain(l1, l2): #for hei in range(79,81): + if numpy.isnan(dataOut.data_snr[0,hei]): + continue #Avoids the analysis when there is only noise try: - spc = dataOut.data_spc[0,:,hei] if mode == 6: #Skew Weighted Bounded @@ -1340,14 +1588,39 @@ class Oblique_Gauss_Fit(Operation): dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,9,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei])) elif mode == 9: #Double Skewed Weighted Bounded no inputs - dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei],dataOut.Oblique_params[0,10,hei],dataOut.Oblique_param_errors[0,:,hei] = self.Double_Gauss_Double_Skew_fit_weight_bound_no_inputs(spc,x) - dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,10,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei])) - #print(hei) - #print(dataOut.Oblique_params[0,10,hei]) - #print(dataOut.dplr_2_u[0,0,hei]) - #print("outside",dataOut.Oblique_param_errors[0,:,hei]) - #print("SUCCESSSSSSS") - #exit(1) + #if numpy.max(spc) <= 0: + if x[numpy.argmax(spc)] <= 0: + #print("EEJ") + dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei],dataOut.Oblique_params[0,10,hei],dataOut.Oblique_param_errors[0,:,hei] = self.Double_Gauss_Double_Skew_fit_weight_bound_no_inputs(spc,x,dataOut.nIncohInt) + #if dataOut.Oblique_params[0,-2,hei] < -500 or dataOut.Oblique_params[0,-2,hei] > 500 or dataOut.Oblique_params[0,-1,hei] < -500 or dataOut.Oblique_params[0,-1,hei] > 500: + # dataOut.Oblique_params[0,:,hei] *= numpy.NAN + dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,10,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei])) + + else: + #print("CEEJ") + dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei],dataOut.Oblique_params[0,10,hei],dataOut.Oblique_param_errors[0,:,hei] = self.CEEJ_Skew_fit_weight_bound_no_inputs(spc,x,dataOut.nIncohInt) + #if dataOut.Oblique_params[0,-2,hei] < -500 or dataOut.Oblique_params[0,-2,hei] > 500 or dataOut.Oblique_params[0,-1,hei] < -500 or dataOut.Oblique_params[0,-1,hei] > 500: + # dataOut.Oblique_params[0,:,hei] *= numpy.NAN + dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,10,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei])) + elif mode == 11: #Double Weighted Bounded no inputs + #if numpy.max(spc) <= 0: + from scipy.signal import medfilt + spcm = medfilt(spc,11) + + if x[numpy.argmax(spcm)] <= 0: + #print("EEJ") + #print("EEJ",dataOut.heightList[hei]) + dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_param_errors[0,:,hei] = self.Double_Gauss_fit_weight_bound_no_inputs(spc,x,dataOut.nIncohInt) + #if dataOut.Oblique_params[0,-2,hei] < -500 or dataOut.Oblique_params[0,-2,hei] > 500 or dataOut.Oblique_params[0,-1,hei] < -500 or dataOut.Oblique_params[0,-1,hei] > 500: + # dataOut.Oblique_params[0,:,hei] *= numpy.NAN + else: + #print("CEEJ",dataOut.heightList[hei]) + dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_param_errors[0,:,hei] = self.CEEJ_fit_weight_bound_no_inputs(spc,x,dataOut.nIncohInt) + + elif mode == 10: #150km + dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_param_errors[0,:,hei] = self.Simple_fit_bound(spc,x,dataOut.nIncohInt) + snr = (dataOut.power[0,hei]*factor - dataOut.Oblique_params[0,3,hei])/dataOut.Oblique_params[0,3,hei] + dataOut.snr_log10[0,hei] = numpy.log10(snr) else: spc_fit, A1, B1, C1, D1 = self.Gauss_fit_2(spc,x,'first') @@ -1393,7 +1666,34 @@ class Oblique_Gauss_Fit(Operation): ###dataOut.Oblique_params[0,:,hei] = dataOut.Oblique_params[0,:,hei]*numpy.NAN pass - #exit(1) + #exit(1) + dataOut.paramInterval = dataOut.nProfiles*dataOut.nCohInt*dataOut.ippSeconds + dataOut.lat=-11.95 + dataOut.lon=-76.87 + + if mode == 9: #Double Skew Gaussian + dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,-2,:] + dataOut.Spec_W_T1 = dataOut.Oblique_params[:,2,:] + dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,-1,:] + dataOut.Spec_W_T2 = dataOut.Oblique_params[:,6,:] + + dataOut.Err_Dop_EEJ_T1 = dataOut.Oblique_param_errors[:,1,:] #En realidad este es el error? + dataOut.Err_Spec_W_T1 = dataOut.Oblique_param_errors[:,2,:] + dataOut.Err_Dop_EEJ_T2 = dataOut.Oblique_param_errors[:,5,:] #En realidad este es el error? + dataOut.Err_Spec_W_T2 = dataOut.Oblique_param_errors[:,6,:] + + elif mode == 11: #Double Gaussian + dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,1,:] + dataOut.Spec_W_T1 = dataOut.Oblique_params[:,2,:] + dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,4,:] + dataOut.Spec_W_T2 = dataOut.Oblique_params[:,5,:] + + dataOut.Err_Dop_EEJ_T1 = dataOut.Oblique_param_errors[:,1,:] + dataOut.Err_Spec_W_T1 = dataOut.Oblique_param_errors[:,2,:] + dataOut.Err_Dop_EEJ_T2 = dataOut.Oblique_param_errors[:,4,:] + dataOut.Err_Spec_W_T2 = dataOut.Oblique_param_errors[:,5,:] + + dataOut.mode = mode return dataOut @@ -3104,7 +3404,7 @@ class SpectralFitting(Operation): chisq=numpy.dot((dp-fmp).T,(dp-fmp)) return chisq -class WindProfiler(Operation): +class WindProfiler_V0(Operation): __isConfig = False @@ -3656,7 +3956,8 @@ class WindProfiler(Operation): def run(self, dataOut, technique, nHours=1, hmin=70, hmax=110, **kwargs): param = dataOut.data_param - if dataOut.abscissaList != None: + #if dataOut.abscissaList != None: + if numpy.any(dataOut.abscissaList): absc = dataOut.abscissaList[:-1] # noise = dataOut.noise heightList = dataOut.heightList @@ -3821,11 +4122,60 @@ class WindProfiler(Operation): return -class EWDriftsEstimation(Operation): +class WindProfiler(Operation): + + __isConfig = False + + __initime = None + __lastdatatime = None + __integrationtime = None + + __buffer = None + + __dataReady = False + + __firstdata = None + + n = None def __init__(self): Operation.__init__(self) + def __calculateCosDir(self, elev, azim): + zen = (90 - elev)*numpy.pi/180 + azim = azim*numpy.pi/180 + cosDirX = numpy.sqrt((1-numpy.cos(zen)**2)/((1+numpy.tan(azim)**2))) + cosDirY = numpy.sqrt(1-numpy.cos(zen)**2-cosDirX**2) + + signX = numpy.sign(numpy.cos(azim)) + signY = numpy.sign(numpy.sin(azim)) + + cosDirX = numpy.copysign(cosDirX, signX) + cosDirY = numpy.copysign(cosDirY, signY) + return cosDirX, cosDirY + + def __calculateAngles(self, theta_x, theta_y, azimuth): + + dir_cosw = numpy.sqrt(1-theta_x**2-theta_y**2) + zenith_arr = numpy.arccos(dir_cosw) + azimuth_arr = numpy.arctan2(theta_x,theta_y) + azimuth*math.pi/180 + + dir_cosu = numpy.sin(azimuth_arr)*numpy.sin(zenith_arr) + dir_cosv = numpy.cos(azimuth_arr)*numpy.sin(zenith_arr) + + return azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw + + def __calculateMatA(self, dir_cosu, dir_cosv, dir_cosw, horOnly): + + if horOnly: + A = numpy.c_[dir_cosu,dir_cosv] + else: + A = numpy.c_[dir_cosu,dir_cosv,dir_cosw] + A = numpy.asmatrix(A) + A1 = numpy.linalg.inv(A.transpose()*A)*A.transpose() + + return A1 + def __correctValues(self, heiRang, phi, velRadial, SNR): listPhi = phi.tolist() maxid = listPhi.index(max(listPhi)) @@ -3845,23 +4195,13 @@ class EWDriftsEstimation(Operation): for i in rango: x = heiRang*math.cos(phi[i]) y1 = velRadial[i,:] - vali= (numpy.isfinite(y1)==True).nonzero() - y1=y1[vali] - x = x[vali] - f1 = interpolate.interp1d(x,y1,kind = 'cubic',bounds_error=False) + f1 = interpolate.interp1d(x,y1,kind = 'cubic') - #heiRang1 = x*math.cos(phi[maxid]) x1 = heiRang1 y11 = f1(x1) y2 = SNR[i,:] - #print 'snr ', y2 - x = heiRang*math.cos(phi[i]) - vali= (y2 != -1).nonzero() - y2 = y2[vali] - x = x[vali] - #print 'snr ',y2 - f2 = interpolate.interp1d(x,y2,kind = 'cubic',bounds_error=False) + f2 = interpolate.interp1d(x,y2,kind = 'cubic') y21 = f2(x1) velRadial1[i,:] = y11 @@ -3869,36 +4209,716 @@ class EWDriftsEstimation(Operation): return heiRang1, velRadial1, SNR1 + def __calculateVelUVW(self, A, velRadial): + #Operacion Matricial +# velUVW = numpy.zeros((velRadial.shape[1],3)) +# for ind in range(velRadial.shape[1]): +# velUVW[ind,:] = numpy.dot(A,velRadial[:,ind]) +# velUVW = velUVW.transpose() + velUVW = numpy.zeros((A.shape[0],velRadial.shape[1])) + velUVW[:,:] = numpy.dot(A,velRadial) - def run(self, dataOut, zenith, zenithCorrection): - heiRang = dataOut.heightList - velRadial = dataOut.data_param[:,3,:] - velRadialm = dataOut.data_param[:,2:4,:]*-1 + return velUVW - rbufc=dataOut.data_paramC[:,:,0] - ebufc=dataOut.data_paramC[:,:,1] - SNR = dataOut.data_snr - velRerr = dataOut.data_error[:,4,:] - moments=numpy.vstack(([velRadialm[0,:]],[velRadialm[0,:]],[velRadialm[1,:]],[velRadialm[1,:]])) - dataOut.moments=moments - # Coherent - smooth_wC = ebufc[0,:] - p_w0C = rbufc[0,:] - p_w1C = rbufc[1,:] - w_wC = rbufc[2,:]*-1 #*radial_sign(radial EQ 1) - t_wC = rbufc[3,:] - my_nbeams = 2 +# def techniqueDBS(self, velRadial0, dirCosx, disrCosy, azimuth, correct, horizontalOnly, heiRang, SNR0): - zenith = numpy.array(zenith) - zenith -= zenithCorrection - zenith *= numpy.pi/180 - if zenithCorrection != 0 : - heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, numpy.abs(zenith), velRadial, SNR) - else : - heiRang1 = heiRang - velRadial1 = velRadial + def techniqueDBS(self, kwargs): + """ + Function that implements Doppler Beam Swinging (DBS) technique. + + Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth, + Direction correction (if necessary), Ranges and SNR + + Output: Winds estimation (Zonal, Meridional and Vertical) + + Parameters affected: Winds, height range, SNR + """ + velRadial0 = kwargs['velRadial'] + heiRang = kwargs['heightList'] + SNR0 = kwargs['SNR'] + + if 'dirCosx' in kwargs and 'dirCosy' in kwargs: + theta_x = numpy.array(kwargs['dirCosx']) + theta_y = numpy.array(kwargs['dirCosy']) + else: + elev = numpy.array(kwargs['elevation']) + azim = numpy.array(kwargs['azimuth']) + theta_x, theta_y = self.__calculateCosDir(elev, azim) + azimuth = kwargs['correctAzimuth'] + if 'horizontalOnly' in kwargs: + horizontalOnly = kwargs['horizontalOnly'] + else: horizontalOnly = False + if 'correctFactor' in kwargs: + correctFactor = kwargs['correctFactor'] + else: correctFactor = 1 + if 'channelList' in kwargs: + channelList = kwargs['channelList'] + if len(channelList) == 2: + horizontalOnly = True + arrayChannel = numpy.array(channelList) + param = param[arrayChannel,:,:] + theta_x = theta_x[arrayChannel] + theta_y = theta_y[arrayChannel] + + azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth) + heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, zenith_arr, correctFactor*velRadial0, SNR0) + A = self.__calculateMatA(dir_cosu, dir_cosv, dir_cosw, horizontalOnly) + + #Calculo de Componentes de la velocidad con DBS + winds = self.__calculateVelUVW(A,velRadial1) + + return winds, heiRang1, SNR1 + + def __calculateDistance(self, posx, posy, pairs_ccf, azimuth = None): + + nPairs = len(pairs_ccf) + posx = numpy.asarray(posx) + posy = numpy.asarray(posy) + + #Rotacion Inversa para alinear con el azimuth + if azimuth!= None: + azimuth = azimuth*math.pi/180 + posx1 = posx*math.cos(azimuth) + posy*math.sin(azimuth) + posy1 = -posx*math.sin(azimuth) + posy*math.cos(azimuth) + else: + posx1 = posx + posy1 = posy + + #Calculo de Distancias + distx = numpy.zeros(nPairs) + disty = numpy.zeros(nPairs) + dist = numpy.zeros(nPairs) + ang = numpy.zeros(nPairs) + + for i in range(nPairs): + distx[i] = posx1[pairs_ccf[i][1]] - posx1[pairs_ccf[i][0]] + disty[i] = posy1[pairs_ccf[i][1]] - posy1[pairs_ccf[i][0]] + dist[i] = numpy.sqrt(distx[i]**2 + disty[i]**2) + ang[i] = numpy.arctan2(disty[i],distx[i]) + + return distx, disty, dist, ang + #Calculo de Matrices +# nPairs = len(pairs) +# ang1 = numpy.zeros((nPairs, 2, 1)) +# dist1 = numpy.zeros((nPairs, 2, 1)) +# +# for j in range(nPairs): +# dist1[j,0,0] = dist[pairs[j][0]] +# dist1[j,1,0] = dist[pairs[j][1]] +# ang1[j,0,0] = ang[pairs[j][0]] +# ang1[j,1,0] = ang[pairs[j][1]] +# +# return distx,disty, dist1,ang1 + + + def __calculateVelVer(self, phase, lagTRange, _lambda): + + Ts = lagTRange[1] - lagTRange[0] + velW = -_lambda*phase/(4*math.pi*Ts) + + return velW + + def __calculateVelHorDir(self, dist, tau1, tau2, ang): + nPairs = tau1.shape[0] + nHeights = tau1.shape[1] + vel = numpy.zeros((nPairs,3,nHeights)) + dist1 = numpy.reshape(dist, (dist.size,1)) + + angCos = numpy.cos(ang) + angSin = numpy.sin(ang) + + vel0 = dist1*tau1/(2*tau2**2) + vel[:,0,:] = (vel0*angCos).sum(axis = 1) + vel[:,1,:] = (vel0*angSin).sum(axis = 1) + + ind = numpy.where(numpy.isinf(vel)) + vel[ind] = numpy.nan + + return vel + +# def __getPairsAutoCorr(self, pairsList, nChannels): +# +# pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan +# +# for l in range(len(pairsList)): +# firstChannel = pairsList[l][0] +# secondChannel = pairsList[l][1] +# +# #Obteniendo pares de Autocorrelacion +# if firstChannel == secondChannel: +# pairsAutoCorr[firstChannel] = int(l) +# +# pairsAutoCorr = pairsAutoCorr.astype(int) +# +# pairsCrossCorr = range(len(pairsList)) +# pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr) +# +# return pairsAutoCorr, pairsCrossCorr + +# def techniqueSA(self, pairsSelected, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, lagTRange, correctFactor): + def techniqueSA(self, kwargs): + + """ + Function that implements Spaced Antenna (SA) technique. + + Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth, + Direction correction (if necessary), Ranges and SNR + + Output: Winds estimation (Zonal, Meridional and Vertical) + + Parameters affected: Winds + """ + position_x = kwargs['positionX'] + position_y = kwargs['positionY'] + azimuth = kwargs['azimuth'] + + if 'correctFactor' in kwargs: + correctFactor = kwargs['correctFactor'] + else: + correctFactor = 1 + + groupList = kwargs['groupList'] + pairs_ccf = groupList[1] + tau = kwargs['tau'] + _lambda = kwargs['_lambda'] + + #Cross Correlation pairs obtained +# pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairssList, nChannels) +# pairsArray = numpy.array(pairsList)[pairsCrossCorr] +# pairsSelArray = numpy.array(pairsSelected) +# pairs = [] +# +# #Wind estimation pairs obtained +# for i in range(pairsSelArray.shape[0]/2): +# ind1 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i], axis = 1))[0][0] +# ind2 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i + 1], axis = 1))[0][0] +# pairs.append((ind1,ind2)) + + indtau = tau.shape[0]/2 + tau1 = tau[:indtau,:] + tau2 = tau[indtau:-1,:] +# tau1 = tau1[pairs,:] +# tau2 = tau2[pairs,:] + phase1 = tau[-1,:] + + #--------------------------------------------------------------------- + #Metodo Directo + distx, disty, dist, ang = self.__calculateDistance(position_x, position_y, pairs_ccf,azimuth) + winds = self.__calculateVelHorDir(dist, tau1, tau2, ang) + winds = stats.nanmean(winds, axis=0) + #--------------------------------------------------------------------- + #Metodo General +# distx, disty, dist = self.calculateDistance(position_x,position_y,pairsCrossCorr, pairsList, azimuth) +# #Calculo Coeficientes de Funcion de Correlacion +# F,G,A,B,H = self.calculateCoef(tau1,tau2,distx,disty,n) +# #Calculo de Velocidades +# winds = self.calculateVelUV(F,G,A,B,H) + + #--------------------------------------------------------------------- + winds[2,:] = self.__calculateVelVer(phase1, lagTRange, _lambda) + winds = correctFactor*winds + return winds + + def __checkTime(self, currentTime, paramInterval, outputInterval): + + dataTime = currentTime + paramInterval + deltaTime = dataTime - self.__initime + + if deltaTime >= outputInterval or deltaTime < 0: + self.__dataReady = True + return + + def techniqueMeteors(self, arrayMeteor, meteorThresh, heightMin, heightMax): + ''' + Function that implements winds estimation technique with detected meteors. + + Input: Detected meteors, Minimum meteor quantity to wind estimation + + Output: Winds estimation (Zonal and Meridional) + + Parameters affected: Winds + ''' + #Settings + nInt = (heightMax - heightMin)/2 + nInt = int(nInt) + winds = numpy.zeros((2,nInt))*numpy.nan + + #Filter errors + error = numpy.where(arrayMeteor[:,-1] == 0)[0] + finalMeteor = arrayMeteor[error,:] + + #Meteor Histogram + finalHeights = finalMeteor[:,2] + hist = numpy.histogram(finalHeights, bins = nInt, range = (heightMin,heightMax)) + nMeteorsPerI = hist[0] + heightPerI = hist[1] + + #Sort of meteors + indSort = finalHeights.argsort() + finalMeteor2 = finalMeteor[indSort,:] + + # Calculating winds + ind1 = 0 + ind2 = 0 + + for i in range(nInt): + nMet = nMeteorsPerI[i] + ind1 = ind2 + ind2 = ind1 + nMet + + meteorAux = finalMeteor2[ind1:ind2,:] + + if meteorAux.shape[0] >= meteorThresh: + vel = meteorAux[:, 6] + zen = meteorAux[:, 4]*numpy.pi/180 + azim = meteorAux[:, 3]*numpy.pi/180 + + n = numpy.cos(zen) + # m = (1 - n**2)/(1 - numpy.tan(azim)**2) + # l = m*numpy.tan(azim) + l = numpy.sin(zen)*numpy.sin(azim) + m = numpy.sin(zen)*numpy.cos(azim) + + A = numpy.vstack((l, m)).transpose() + A1 = numpy.dot(numpy.linalg.inv( numpy.dot(A.transpose(),A) ),A.transpose()) + windsAux = numpy.dot(A1, vel) + + winds[0,i] = windsAux[0] + winds[1,i] = windsAux[1] + + return winds, heightPerI[:-1] + + def techniqueNSM_SA(self, **kwargs): + metArray = kwargs['metArray'] + heightList = kwargs['heightList'] + timeList = kwargs['timeList'] + + rx_location = kwargs['rx_location'] + groupList = kwargs['groupList'] + azimuth = kwargs['azimuth'] + dfactor = kwargs['dfactor'] + k = kwargs['k'] + + azimuth1, dist = self.__calculateAzimuth1(rx_location, groupList, azimuth) + d = dist*dfactor + #Phase calculation + metArray1 = self.__getPhaseSlope(metArray, heightList, timeList) + + metArray1[:,-2] = metArray1[:,-2]*metArray1[:,2]*1000/(k*d[metArray1[:,1].astype(int)]) #angles into velocities + + velEst = numpy.zeros((heightList.size,2))*numpy.nan + azimuth1 = azimuth1*numpy.pi/180 + + for i in range(heightList.size): + h = heightList[i] + indH = numpy.where((metArray1[:,2] == h)&(numpy.abs(metArray1[:,-2]) < 100))[0] + metHeight = metArray1[indH,:] + if metHeight.shape[0] >= 2: + velAux = numpy.asmatrix(metHeight[:,-2]).T #Radial Velocities + iazim = metHeight[:,1].astype(int) + azimAux = numpy.asmatrix(azimuth1[iazim]).T #Azimuths + A = numpy.hstack((numpy.cos(azimAux),numpy.sin(azimAux))) + A = numpy.asmatrix(A) + A1 = numpy.linalg.pinv(A.transpose()*A)*A.transpose() + velHor = numpy.dot(A1,velAux) + + velEst[i,:] = numpy.squeeze(velHor) + return velEst + + def __getPhaseSlope(self, metArray, heightList, timeList): + meteorList = [] + #utctime sec1 height SNR velRad ph0 ph1 ph2 coh0 coh1 coh2 + #Putting back together the meteor matrix + utctime = metArray[:,0] + uniqueTime = numpy.unique(utctime) + + phaseDerThresh = 0.5 + ippSeconds = timeList[1] - timeList[0] + sec = numpy.where(timeList>1)[0][0] + nPairs = metArray.shape[1] - 6 + nHeights = len(heightList) + + for t in uniqueTime: + metArray1 = metArray[utctime==t,:] +# phaseDerThresh = numpy.pi/4 #reducir Phase thresh + tmet = metArray1[:,1].astype(int) + hmet = metArray1[:,2].astype(int) + + metPhase = numpy.zeros((nPairs, heightList.size, timeList.size - 1)) + metPhase[:,:] = numpy.nan + metPhase[:,hmet,tmet] = metArray1[:,6:].T + + #Delete short trails + metBool = ~numpy.isnan(metPhase[0,:,:]) + heightVect = numpy.sum(metBool, axis = 1) + metBool[heightVect phaseDerThresh)) + metPhase[phDerAux] = numpy.nan + + #--------------------------METEOR DETECTION ----------------------------------------- + indMet = numpy.where(numpy.any(metBool,axis=1))[0] + + for p in numpy.arange(nPairs): + phase = metPhase[p,:,:] + phDer = metDer[p,:,:] + + for h in indMet: + height = heightList[h] + phase1 = phase[h,:] #82 + phDer1 = phDer[h,:] + + phase1[~numpy.isnan(phase1)] = numpy.unwrap(phase1[~numpy.isnan(phase1)]) #Unwrap + + indValid = numpy.where(~numpy.isnan(phase1))[0] + initMet = indValid[0] + endMet = 0 + + for i in range(len(indValid)-1): + + #Time difference + inow = indValid[i] + inext = indValid[i+1] + idiff = inext - inow + #Phase difference + phDiff = numpy.abs(phase1[inext] - phase1[inow]) + + if idiff>sec or phDiff>numpy.pi/4 or inext==indValid[-1]: #End of Meteor + sizeTrail = inow - initMet + 1 + if sizeTrail>3*sec: #Too short meteors + x = numpy.arange(initMet,inow+1)*ippSeconds + y = phase1[initMet:inow+1] + ynnan = ~numpy.isnan(y) + x = x[ynnan] + y = y[ynnan] + slope, intercept, r_value, p_value, std_err = stats.linregress(x,y) + ylin = x*slope + intercept + rsq = r_value**2 + if rsq > 0.5: + vel = slope#*height*1000/(k*d) + estAux = numpy.array([utctime,p,height, vel, rsq]) + meteorList.append(estAux) + initMet = inext + metArray2 = numpy.array(meteorList) + + return metArray2 + + def __calculateAzimuth1(self, rx_location, pairslist, azimuth0): + + azimuth1 = numpy.zeros(len(pairslist)) + dist = numpy.zeros(len(pairslist)) + + for i in range(len(rx_location)): + ch0 = pairslist[i][0] + ch1 = pairslist[i][1] + + diffX = rx_location[ch0][0] - rx_location[ch1][0] + diffY = rx_location[ch0][1] - rx_location[ch1][1] + azimuth1[i] = numpy.arctan2(diffY,diffX)*180/numpy.pi + dist[i] = numpy.sqrt(diffX**2 + diffY**2) + + azimuth1 -= azimuth0 + return azimuth1, dist + + def techniqueNSM_DBS(self, **kwargs): + metArray = kwargs['metArray'] + heightList = kwargs['heightList'] + timeList = kwargs['timeList'] + azimuth = kwargs['azimuth'] + theta_x = numpy.array(kwargs['theta_x']) + theta_y = numpy.array(kwargs['theta_y']) + + utctime = metArray[:,0] + cmet = metArray[:,1].astype(int) + hmet = metArray[:,3].astype(int) + SNRmet = metArray[:,4] + vmet = metArray[:,5] + spcmet = metArray[:,6] + + nChan = numpy.max(cmet) + 1 + nHeights = len(heightList) + + azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth) + hmet = heightList[hmet] + h1met = hmet*numpy.cos(zenith_arr[cmet]) #Corrected heights + + velEst = numpy.zeros((heightList.size,2))*numpy.nan + + for i in range(nHeights - 1): + hmin = heightList[i] + hmax = heightList[i + 1] + + thisH = (h1met>=hmin) & (h1met8) & (vmet<50) & (spcmet<10) + indthisH = numpy.where(thisH) + + if numpy.size(indthisH) > 3: + + vel_aux = vmet[thisH] + chan_aux = cmet[thisH] + cosu_aux = dir_cosu[chan_aux] + cosv_aux = dir_cosv[chan_aux] + cosw_aux = dir_cosw[chan_aux] + + nch = numpy.size(numpy.unique(chan_aux)) + if nch > 1: + A = self.__calculateMatA(cosu_aux, cosv_aux, cosw_aux, True) + velEst[i,:] = numpy.dot(A,vel_aux) + + return velEst + + def run(self, dataOut, technique, nHours=1, hmin=70, hmax=110, **kwargs): + + param = dataOut.moments + #param = dataOut.data_param + #if dataOut.abscissaList != None: + if numpy.any(dataOut.abscissaList) : + absc = dataOut.abscissaList[:-1] + # noise = dataOut.noise + heightList = dataOut.heightList + SNR = dataOut.data_snr + + if technique == 'DBS': + + kwargs['velRadial'] = param[:,1,:] #Radial velocity + kwargs['heightList'] = heightList + kwargs['SNR'] = SNR + + dataOut.data_output, dataOut.heightList, dataOut.data_snr = self.techniqueDBS(kwargs) #DBS Function + dataOut.utctimeInit = dataOut.utctime + dataOut.outputInterval = dataOut.paramInterval + + elif technique == 'SA': + + #Parameters +# position_x = kwargs['positionX'] +# position_y = kwargs['positionY'] +# azimuth = kwargs['azimuth'] +# +# if kwargs.has_key('crosspairsList'): +# pairs = kwargs['crosspairsList'] +# else: +# pairs = None +# +# if kwargs.has_key('correctFactor'): +# correctFactor = kwargs['correctFactor'] +# else: +# correctFactor = 1 + +# tau = dataOut.data_param +# _lambda = dataOut.C/dataOut.frequency +# pairsList = dataOut.groupList +# nChannels = dataOut.nChannels + + kwargs['groupList'] = dataOut.groupList + kwargs['tau'] = dataOut.data_param + kwargs['_lambda'] = dataOut.C/dataOut.frequency +# dataOut.data_output = self.techniqueSA(pairs, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, absc, correctFactor) + dataOut.data_output = self.techniqueSA(kwargs) + dataOut.utctimeInit = dataOut.utctime + dataOut.outputInterval = dataOut.timeInterval + + elif technique == 'Meteors': + dataOut.flagNoData = True + self.__dataReady = False + + if 'nHours' in kwargs: + nHours = kwargs['nHours'] + else: + nHours = 1 + + if 'meteorsPerBin' in kwargs: + meteorThresh = kwargs['meteorsPerBin'] + else: + meteorThresh = 6 + + if 'hmin' in kwargs: + hmin = kwargs['hmin'] + else: hmin = 70 + if 'hmax' in kwargs: + hmax = kwargs['hmax'] + else: hmax = 110 + + dataOut.outputInterval = nHours*3600 + + if self.__isConfig == False: +# self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03) + #Get Initial LTC time + self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime) + self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds() + + self.__isConfig = True + + if self.__buffer is None: + self.__buffer = dataOut.data_param + self.__firstdata = copy.copy(dataOut) + + else: + self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param)) + + self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready + + if self.__dataReady: + dataOut.utctimeInit = self.__initime + + self.__initime += dataOut.outputInterval #to erase time offset + + dataOut.data_output, dataOut.heightList = self.techniqueMeteors(self.__buffer, meteorThresh, hmin, hmax) + dataOut.flagNoData = False + self.__buffer = None + + elif technique == 'Meteors1': + dataOut.flagNoData = True + self.__dataReady = False + + if 'nMins' in kwargs: + nMins = kwargs['nMins'] + else: nMins = 20 + if 'rx_location' in kwargs: + rx_location = kwargs['rx_location'] + else: rx_location = [(0,1),(1,1),(1,0)] + if 'azimuth' in kwargs: + azimuth = kwargs['azimuth'] + else: azimuth = 51.06 + if 'dfactor' in kwargs: + dfactor = kwargs['dfactor'] + if 'mode' in kwargs: + mode = kwargs['mode'] + if 'theta_x' in kwargs: + theta_x = kwargs['theta_x'] + if 'theta_y' in kwargs: + theta_y = kwargs['theta_y'] + else: mode = 'SA' + + #Borrar luego esto + if dataOut.groupList is None: + dataOut.groupList = [(0,1),(0,2),(1,2)] + groupList = dataOut.groupList + C = 3e8 + freq = 50e6 + lamb = C/freq + k = 2*numpy.pi/lamb + + timeList = dataOut.abscissaList + heightList = dataOut.heightList + + if self.__isConfig == False: + dataOut.outputInterval = nMins*60 +# self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03) + #Get Initial LTC time + initime = datetime.datetime.utcfromtimestamp(dataOut.utctime) + minuteAux = initime.minute + minuteNew = int(numpy.floor(minuteAux/nMins)*nMins) + self.__initime = (initime.replace(minute = minuteNew, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds() + + self.__isConfig = True + + if self.__buffer is None: + self.__buffer = dataOut.data_param + self.__firstdata = copy.copy(dataOut) + + else: + self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param)) + + self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready + + if self.__dataReady: + dataOut.utctimeInit = self.__initime + self.__initime += dataOut.outputInterval #to erase time offset + + metArray = self.__buffer + if mode == 'SA': + dataOut.data_output = self.techniqueNSM_SA(rx_location=rx_location, groupList=groupList, azimuth=azimuth, dfactor=dfactor, k=k,metArray=metArray, heightList=heightList,timeList=timeList) + elif mode == 'DBS': + dataOut.data_output = self.techniqueNSM_DBS(metArray=metArray,heightList=heightList,timeList=timeList, azimuth=azimuth, theta_x=theta_x, theta_y=theta_y) + dataOut.data_output = dataOut.data_output.T + dataOut.flagNoData = False + self.__buffer = None + #print("ENDDD") + return dataOut + +class EWDriftsEstimation(Operation): + + def __init__(self): + Operation.__init__(self) + + def __correctValues(self, heiRang, phi, velRadial, SNR): + listPhi = phi.tolist() + maxid = listPhi.index(max(listPhi)) + minid = listPhi.index(min(listPhi)) + + rango = list(range(len(phi))) + # rango = numpy.delete(rango,maxid) + + heiRang1 = heiRang*math.cos(phi[maxid]) + heiRangAux = heiRang*math.cos(phi[minid]) + indOut = (heiRang1 < heiRangAux[0]).nonzero() + heiRang1 = numpy.delete(heiRang1,indOut) + + velRadial1 = numpy.zeros([len(phi),len(heiRang1)]) + SNR1 = numpy.zeros([len(phi),len(heiRang1)]) + + for i in rango: + x = heiRang*math.cos(phi[i]) + y1 = velRadial[i,:] + vali= (numpy.isfinite(y1)==True).nonzero() + y1=y1[vali] + x = x[vali] + f1 = interpolate.interp1d(x,y1,kind = 'cubic',bounds_error=False) + + #heiRang1 = x*math.cos(phi[maxid]) + x1 = heiRang1 + y11 = f1(x1) + + y2 = SNR[i,:] + #print 'snr ', y2 + x = heiRang*math.cos(phi[i]) + vali= (y2 != -1).nonzero() + y2 = y2[vali] + x = x[vali] + #print 'snr ',y2 + f2 = interpolate.interp1d(x,y2,kind = 'cubic',bounds_error=False) + y21 = f2(x1) + + velRadial1[i,:] = y11 + SNR1[i,:] = y21 + + return heiRang1, velRadial1, SNR1 + + + + def run(self, dataOut, zenith, zenithCorrection): + + heiRang = dataOut.heightList + velRadial = dataOut.data_param[:,3,:] + velRadialm = dataOut.data_param[:,2:4,:]*-1 + + rbufc=dataOut.data_paramC[:,:,0] + ebufc=dataOut.data_paramC[:,:,1] + SNR = dataOut.data_snr + velRerr = dataOut.data_error[:,4,:] + moments=numpy.vstack(([velRadialm[0,:]],[velRadialm[0,:]],[velRadialm[1,:]],[velRadialm[1,:]])) + dataOut.moments=moments + # Coherent + smooth_wC = ebufc[0,:] + p_w0C = rbufc[0,:] + p_w1C = rbufc[1,:] + w_wC = rbufc[2,:]*-1 #*radial_sign(radial EQ 1) + t_wC = rbufc[3,:] + my_nbeams = 2 + + zenith = numpy.array(zenith) + zenith -= zenithCorrection + zenith *= numpy.pi/180 + if zenithCorrection != 0 : + heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, numpy.abs(zenith), velRadial, SNR) + else : + heiRang1 = heiRang + velRadial1 = velRadial SNR1 = SNR alp = zenith[0] @@ -5542,6 +6562,9 @@ class SMOperations(): class IGRFModel(Operation): + ''' + Written by R. Flores + ''' """Operation to calculate Geomagnetic parameters. Parameters: @@ -5597,10 +6620,13 @@ class MergeProc(ProcessingUnit): ProcessingUnit.__init__(self) def run(self, attr_data, attr_data_2 = None, attr_data_3 = None, attr_data_4 = None, attr_data_5 = None, mode=0): + #print("*****************************Merge***************") self.dataOut = getattr(self, self.inputs[0]) data_inputs = [getattr(self, attr) for attr in self.inputs] #print(data_inputs) + #print("Run: ",self.dataOut.runNextUnit) + #exit(1) #print(numpy.shape([getattr(data, attr_data) for data in data_inputs][1])) #exit(1) if mode==0: @@ -5668,39 +6694,7 @@ class MergeProc(ProcessingUnit): #setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP #print("Merge data_acf: ",self.dataOut.data_acf.shape) - #exit(1) - #print(self.dataOut.data_spc_LP.shape) - #print("Exit") - #exit(1) - #setattr(self.dataOut, 'dataLag_cspc', [getattr(data, attr_data_2) for data in data_inputs][0]) - #setattr(self.dataOut, 'dataLag_cspc_LP', [getattr(data, attr_data_2) for data in data_inputs][1]) - #setattr(self.dataOut, 'nIncohInt', [getattr(data, attr_data_3) for data in data_inputs][0]) - #setattr(self.dataOut, 'nIncohInt_LP', [getattr(data, attr_data_3) for data in data_inputs][1]) - ''' - print(self.dataOut.dataLag_spc_LP.shape) - print(self.dataOut.dataLag_cspc_LP.shape) - exit(1) - ''' - ''' - print(self.dataOut.dataLag_spc_LP[0,:,100]) - print(self.dataOut.dataLag_spc_LP[1,:,100]) - exit(1) - ''' - #self.dataOut.dataLag_spc_LP = numpy.transpose(self.dataOut.dataLag_spc_LP[0],(2,0,1)) - #self.dataOut.dataLag_cspc_LP = numpy.transpose(self.dataOut.dataLag_cspc_LP,(3,1,2,0)) - ''' - print("Merge") - print(numpy.shape(self.dataOut.dataLag_spc)) - print(numpy.shape(self.dataOut.dataLag_spc_LP)) - print(numpy.shape(self.dataOut.dataLag_cspc)) - print(numpy.shape(self.dataOut.dataLag_cspc_LP)) - exit(1) - ''' - #print(numpy.sum(self.dataOut.dataLag_spc_LP[2,:,164])/128) - #print(numpy.sum(self.dataOut.dataLag_cspc_LP[0,:,30,1])/128) - #exit(1) - #print(self.dataOut.NDP) - #print(self.dataOut.nNoiseProfiles) + #self.dataOut.nIncohInt_LP = 128 #self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP @@ -5711,8 +6705,45 @@ class MergeProc(ProcessingUnit): #print("sahpi",self.dataOut.nIncohInt_LP) #exit(1) self.dataOut.NLAG = 16 + self.dataOut.NLAG = self.dataOut.data_acf.shape[1] self.dataOut.NRANGE = self.dataOut.data_acf.shape[-1] #print(numpy.shape(self.dataOut.data_spc)) #exit(1) + if mode==5: + data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs]) + setattr(self.dataOut, attr_data, data) + data = numpy.concatenate([getattr(data, attr_data_2) for data in data_inputs]) + setattr(self.dataOut, attr_data_2, data) + + if mode==6: #Hybrid Spectra-Voltage + #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1) + #setattr(self.dataOut, attr_data, data) + setattr(self.dataOut, 'dataLag_spc', getattr(data_inputs[1], attr_data)) #DP + setattr(self.dataOut, 'dataLag_cspc', getattr(data_inputs[1], attr_data_2)) #DP + setattr(self.dataOut, 'output_LP_integrated', getattr(data_inputs[0], attr_data_3)) #LP + #setattr(self.dataOut, 'dataLag_cspc_LP', getattr(data_inputs[1], attr_data_4)) #LP + #setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP + #setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP + #print("Merge data_acf: ",self.dataOut.data_acf.shape) + #print(self.dataOut.NSCAN) + self.dataOut.nIncohInt = int(self.dataOut.NAVG * self.dataOut.nint) + #print(self.dataOut.dataLag_spc.shape) + self.dataOut.nProfiles = self.dataOut.nProfiles_DP = self.dataOut.dataLag_spc.shape[1] + ''' + #self.dataOut.nIncohInt_LP = 128 + #self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP + self.dataOut.nProfiles_LP = 16#28#self.dataOut.nIncohInt_LP + self.dataOut.nProfiles_LP = self.dataOut.data_acf.shape[1]#28#self.dataOut.nIncohInt_LP + self.dataOut.NSCAN = 128 + self.dataOut.nIncohInt_LP = self.dataOut.nIncohInt*self.dataOut.NSCAN + #print("sahpi",self.dataOut.nIncohInt_LP) + #exit(1) + self.dataOut.NLAG = 16 + self.dataOut.NLAG = self.dataOut.data_acf.shape[1] + self.dataOut.NRANGE = self.dataOut.data_acf.shape[-1] + ''' + #print(numpy.shape(self.dataOut.data_spc)) + print("*************************GOOD*************************") + #exit(1) diff --git a/schainpy/model/proc/jroproc_spectra.py b/schainpy/model/proc/jroproc_spectra.py index 966d2ef..1b79952 100644 --- a/schainpy/model/proc/jroproc_spectra.py +++ b/schainpy/model/proc/jroproc_spectra.py @@ -123,9 +123,10 @@ class SpectraProc(ProcessingUnit): self.dataOut.flagShiftFFT = False def run(self, nProfiles=None, nFFTPoints=None, pairsList=None, ippFactor=None, shift_fft=False, runNextUnit = 0): - + self.dataIn.runNextUnit = runNextUnit if self.dataIn.type == "Spectra": + self.dataOut.copy(self.dataIn) if shift_fft: #desplaza a la derecha en el eje 2 determinadas posiciones @@ -147,9 +148,15 @@ class SpectraProc(ProcessingUnit): if nProfiles == None: nProfiles = nFFTPoints - + #print(self.dataOut.ipp) + #exit(1) if ippFactor == None: self.dataOut.ippFactor = 1 + #if ippFactor is not None: + #self.dataOut.ippFactor = ippFactor + #print(ippFactor) + #print(self.dataOut.ippFactor) + #exit(1) self.dataOut.nFFTPoints = nFFTPoints @@ -425,6 +432,46 @@ class SpectraProc(ProcessingUnit): return 1 +class GetSNR(Operation): + ''' + Written by R. Flores + ''' + """Operation to get SNR. + + Parameters: + ----------- + + Example + -------- + + op = proc_unit.addOperation(name='GetSNR', optype='other') + + """ + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + + def run(self,dataOut): + + noise = dataOut.getNoise() + maxdB = 16 + + normFactor = 24 + + #dataOut.data_snr = (dataOut.data_spc.sum(axis=1))/(noise[:,None]*dataOut.normFactor) + dataOut.data_snr = (dataOut.data_spc.sum(axis=1))/(noise[:,None]*dataOut.nFFTPoints) + + dataOut.data_snr = numpy.where(10*numpy.log10(dataOut.data_snr)<.5, numpy.nan, dataOut.data_snr) + #dataOut.data_snr = 10*numpy.log10(dataOut.data_snr) + #dataOut.data_snr = numpy.expand_dims(dataOut.data_snr,axis=0) + #print(dataOut.data_snr.shape) + #exit(1) + + + return dataOut + class removeDC(Operation): def run(self, dataOut, mode=2): diff --git a/schainpy/model/proc/jroproc_spectra_lags_faraday.py b/schainpy/model/proc/jroproc_spectra_lags_faraday.py index f4f9311..7a3d4ad 100644 --- a/schainpy/model/proc/jroproc_spectra_lags_faraday.py +++ b/schainpy/model/proc/jroproc_spectra_lags_faraday.py @@ -19,10 +19,15 @@ from schainpy.model.data.jrodata import hildebrand_sekhon from schainpy.utils import log from schainpy.model.data import _HS_algorithm +from schainpy.model.proc.jroproc_voltage import CleanCohEchoes + from time import time, mktime, strptime, gmtime, ctime class SpectraLagProc(ProcessingUnit): + ''' + Written by R. Flores + ''' def __init__(self): ProcessingUnit.__init__(self) @@ -308,7 +313,9 @@ class SpectraLagProc(ProcessingUnit): #print("after",self.dataOut.data_spc[0,:,20]) class removeDCLag(Operation): - + ''' + Written by R. Flores + ''' def remover(self,mode): jspectra = self.dataOut.data_spc jcspectra = self.dataOut.data_cspc @@ -414,7 +421,9 @@ class removeDCLag(Operation): class removeDCLagFlip(Operation): - + ''' + Written by R. Flores + ''' #CHANGES MADE ONLY FOR MODE 2 AND NOT CONSIDERING CSPC def remover(self,mode): @@ -501,7 +510,7 @@ class removeDCLagFlip(Operation): def run(self, dataOut, mode=2): - #print("***********************************Remove DC***********************************") + print("***********************************Remove DC***********************************") ##print(dataOut.FlipChannels) #exit(1) self.dataOut = dataOut @@ -1098,7 +1107,9 @@ class removeInterference(Operation): class IntegrationFaradaySpectra(Operation): - + ''' + Written by R. Flores + ''' __profIndex = 0 __withOverapping = False @@ -1441,7 +1452,9 @@ class IntegrationFaradaySpectra(Operation): class IntegrationFaradaySpectra2(Operation): - + ''' + Written by R. Flores + ''' __profIndex = 0 __withOverapping = False @@ -2061,7 +2074,9 @@ class IntegrationFaradaySpectra2(Operation): return dataOut class IntegrationFaradaySpectra3(Operation): #This class should manage data with no lags as well - + ''' + Written by R. Flores + ''' __profIndex = 0 __withOverapping = False @@ -2815,7 +2830,9 @@ class IntegrationFaradaySpectra3(Operation): #This class should manage data with return dataOut class IntegrationFaradaySpectraNoLags(Operation): - + ''' + Written by R. Flores + ''' __profIndex = 0 __withOverapping = False @@ -3149,6 +3166,9 @@ class IntegrationFaradaySpectraNoLags(Operation): return dataOut class HybridSelectSpectra(Operation): + ''' + Written by R. Flores + ''' """Operation to rearange and use selected channels of spectra data and pairs of cross-spectra data for Hybrid Experiment. Parameters: @@ -3213,7 +3233,9 @@ class HybridSelectSpectra(Operation): class IncohIntLag(Operation): - + ''' + Written by R. Flores + ''' __profIndex = 0 __withOverapping = False @@ -3598,6 +3620,7 @@ class IncohInt(Operation): #exit(1) if n == 1: + dataOut.VelRange = dataOut.getVelRange(0) return dataOut dataOut.flagNoData = True @@ -3619,6 +3642,9 @@ class IncohInt(Operation): dataOut.nIncohInt *= self.n dataOut.utctime = avgdatatime dataOut.flagNoData = False + + dataOut.VelRange = dataOut.getVelRange(0) + #print("Power",numpy.sum(dataOut.data_spc[0,:,20:30],axis=0)) #print("Power",numpy.sum(dataOut.data_spc[0,100:110,:],axis=1)) #exit(1) @@ -3678,6 +3704,9 @@ class IncohInt(Operation): return dataOut class SnrFaraday(Operation): + ''' + Written by R. Flores + ''' """Operation to use get SNR in Faraday processing. Parameters: @@ -3723,6 +3752,9 @@ class SnrFaraday(Operation): return dataOut class SpectraDataToFaraday(Operation): + ''' + Written by R. Flores + ''' """Operation to use spectra data in Faraday processing. Parameters: @@ -3754,12 +3786,19 @@ class SpectraDataToFaraday(Operation): dataOut.year=dataOut.bd_time.tm_year+(dataOut.bd_time.tm_yday-1)/364.0 dataOut.ut_Faraday=dataOut.bd_time.tm_hour+dataOut.bd_time.tm_min/60.0+dataOut.bd_time.tm_sec/3600.0 - + ''' tmpx=numpy.zeros((dataOut.nHeights,dataOut.DPL,2),'float32') tmpx_a2=numpy.zeros((dataOut.nHeights,dataOut.DPL,2),'float32') tmpx_b2=numpy.zeros((dataOut.nHeights,dataOut.DPL,2),'float32') tmpx_abr=numpy.zeros((dataOut.nHeights,dataOut.DPL,2),'float32') tmpx_abi=numpy.zeros((dataOut.nHeights,dataOut.DPL,2),'float32') + ''' + #print("NDP",dataOut.NDP) + tmpx=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + tmpx_a2=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + tmpx_b2=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + tmpx_abr=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + tmpx_abi=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') dataOut.kabxys_integrated=[tmpx,tmpx,tmpx,tmpx,tmpx_a2,tmpx,tmpx_b2,tmpx,tmpx_abr,tmpx,tmpx_abi,tmpx,tmpx,tmpx] ''' dataOut.rnint2=numpy.zeros(dataOut.DPL,'float32') @@ -3806,25 +3845,7 @@ class SpectraDataToFaraday(Operation): for l in range(dataOut.DPL): dataOut.rnint2[l]=1.0/(dataOut.nIncohInt*dataOut.nProfiles) - - def run(self,dataOut): - #print(dataOut.nIncohInt) - #exit(1) - dataOut.paramInterval=dataOut.nIncohInt*2*2#nIncohInt*numero de fft/nprofiles*segundos de cada muestra - dataOut.lat=-11.95 - dataOut.lon=-76.87 - - self.normFactor(dataOut) - - dataOut.NDP=dataOut.nHeights - dataOut.NR=len(dataOut.channelList) - dataOut.DH=dataOut.heightList[1]-dataOut.heightList[0] - dataOut.H0=int(dataOut.heightList[0]) - - self.ConvertData(dataOut) - - dataOut.NAVG=16#dataOut.rnint2[0] #CHECK THIS! - dataOut.MAXNRANGENDT=dataOut.NDP + def noise(self,dataOut): dataOut.noise_lag = numpy.zeros((dataOut.nChannels,dataOut.DPL),'float32') #print("Lags") @@ -3878,6 +3899,114 @@ class SpectraDataToFaraday(Operation): dataOut.pan = dataOut.tnoise[0] dataOut.pbn = dataOut.tnoise[1] + def get_eej_index_V0(self,data_to_remov_eej,dataOut): + + dataOut.data_spc = data_to_remov_eej + #print(dataOut.data_spc) + data_eej = dataOut.getPower()[1] + print("data_eej: ", data_eej) + #exit(1) + index_eej = CleanCohEchoes.mad_based_outlier(self,data_eej[:20]) + aux_eej = numpy.array(index_eej.nonzero()).ravel() + + index2 = CleanCohEchoes.mad_based_outlier(self,data_eej[aux_eej[-1]+1:aux_eej[-1]+1+20]) + aux2 = numpy.array(index2.nonzero()).ravel() + if aux2.size > 0: + #print(aux2) + #print(aux2[-1]) + #print(arr[aux[-1]+aux2[-1]+1]) + dataOut.min_id_eej = aux_eej[-1]+aux2[-1]+1 + else: + dataOut.min_id_eej = aux_eej[-1] + + + print(dataOut.min_id_eej) + exit(1) + + def get_eej_index_V1(self,data_to_remov_eej,dataOut): + + dataOut.data_spc = data_to_remov_eej + outliers_IDs = [] + #print(dataOut.data_spc) + for ich in range(dataOut.nChannels): + + data_eej = dataOut.getPower()[ich] + #print("data_eej: ", data_eej) + #exit(1) + index_eej = CleanCohEchoes.mad_based_outlier(self,data_eej[:20]) + aux_eej = numpy.array(index_eej.nonzero()).ravel() + + #index2 = CleanCohEchoes.mad_based_outlier(self,data_eej[aux_eej[-1]+1:aux_eej[-1]+1+20]) + index2 = CleanCohEchoes.mad_based_outlier(self,data_eej[aux_eej[-1]+1:aux_eej[-1]+1+10],thresh=1.) + aux2 = numpy.array(index2.nonzero()).ravel() + if aux2.size > 0: + #min_id_eej = aux_eej[-1]+aux2[-1]+1 + ids = numpy.concatenate((aux_eej,aux2+aux_eej[-1]+1)) + else: + ids = aux_eej + + outliers_IDs=numpy.append(outliers_IDs,ids) + + outliers_IDs=numpy.array(outliers_IDs) + outliers_IDs=outliers_IDs.astype(numpy.dtype('int64')) + + (uniq, freq) = (numpy.unique(outliers_IDs, return_counts=True)) + aux_arr = numpy.column_stack((uniq,freq)) + + final_index = [] + for i in range(aux_arr.shape[0]): + if aux_arr[i,1] == 2: + final_index.append(aux_arr[i,0]) + + if final_index != []: + dataOut.min_id_eej = final_index[-1] + else: + print("CHECKKKKK!!!!!!!!!!!!!!!") + + print(dataOut.min_id_eej) + exit(1) + + def get_eej_index(self,data_to_remov_eej,dataOut): + + dataOut.data_spc = data_to_remov_eej + + data_eej = dataOut.getPower()[0] + #print(data_eej) + index_eej = CleanCohEchoes.mad_based_outlier(self,data_eej[:17]) + aux_eej = numpy.array(index_eej.nonzero()).ravel() + + dataOut.min_id_eej = aux_eej[-1] + + print(dataOut.min_id_eej) + #exit(1) + + def run(self,dataOut): + #print(dataOut.nIncohInt) + #exit(1) + dataOut.paramInterval=dataOut.nIncohInt*2*2#nIncohInt*numero de fft/nprofiles*segundos de cada muestra + dataOut.lat=-11.95 + dataOut.lon=-76.87 + + data_to_remov_eej = dataOut.dataLag_spc[:,:,:,0] + + self.normFactor(dataOut) + + dataOut.NDP=dataOut.nHeights + dataOut.NR=len(dataOut.channelList) + dataOut.DH=dataOut.heightList[1]-dataOut.heightList[0] + dataOut.H0=int(dataOut.heightList[0]) + + self.ConvertData(dataOut) + + dataOut.NAVG=16#dataOut.rnint2[0] #CHECK THIS! + if hasattr(dataOut, 'NRANGE'): + dataOut.MAXNRANGENDT = max(dataOut.NRANGE,dataOut.NDT) + else: + dataOut.MAXNRANGENDT = dataOut.NDP + + if not hasattr(dataOut, 'tnoise'): + self.noise(dataOut) + #dataOut.pan = numpy.mean(dataOut.pan) #dataOut.pbn = numpy.mean(dataOut.pbn) #print(dataOut.pan) @@ -3888,12 +4017,18 @@ class SpectraDataToFaraday(Operation): #print("Noise dB: ",10*numpy.log10(dataOut.tnoise)) #exit(1) #dataOut.pan=dataOut.tnoise[0]/float(dataOut.nProfiles_LP*dataOut.nIncohInt) + if gmtime(dataOut.utctime).tm_hour >= 22. or gmtime(dataOut.utctime).tm_hour < 12.: + self.get_eej_index(data_to_remov_eej,dataOut) print("done") + #exit(1) return dataOut class SpectraDataToHybrid(SpectraDataToFaraday): + ''' + Written by R. Flores + ''' """Operation to use spectra data in Faraday processing. Parameters: @@ -4067,6 +4202,9 @@ class SpectraDataToHybrid(SpectraDataToFaraday): return dataOut class SpectraDataToHybrid_V2(SpectraDataToFaraday): + ''' + Written by R. Flores + ''' """Operation to use spectra data in Faraday processing. Parameters: @@ -4092,7 +4230,7 @@ class SpectraDataToHybrid_V2(SpectraDataToFaraday): self.dataLag_cspc_LP=None self.dataLag_dc_LP=None - def noise(self,dataOut): + def noise_v0(self,dataOut): dataOut.data_spc = dataOut.dataLag_spc_LP.real #print(dataOut.dataLag_spc.shape) @@ -4115,9 +4253,104 @@ class SpectraDataToHybrid_V2(SpectraDataToFaraday): #print("pbn: ",dataOut.pbn) #print(numpy.shape(dataOut.pnoise)) #exit(1) - #print("pan: ",numpy.sum(dataOut.pan)) + #print("pan: ",dataOut.pan) + #print("pbn: ",dataOut.pbn) #exit(1) + def noise_v0_aux(self,dataOut): + + dataOut.data_spc = dataOut.dataLag_spc + #print(dataOut.dataLag_spc.shape) + #exit(1) + #dataOut.data_spc = dataOut.dataLag_spc[:,:,:,0].real + #print("spc noise shape: ",dataOut.data_spc.shape) + tnoise = dataOut.getNoise(ymin_index=100,ymax_index=166) + #print("Noise LP: ",10*numpy.log10(dataOut.tnoise)) + #exit(1) + #dataOut.tnoise[0]*=0.995#0.976 + #dataOut.tnoise[1]*=0.995 + #print(dataOut.nProfiles) + #dataOut.pan=dataOut.tnoise[0]/float(dataOut.nProfiles_LP*dataOut.nIncohInt) + #dataOut.pbn=dataOut.tnoise[1]/float(dataOut.nProfiles_LP*dataOut.nIncohInt) + dataOut.pan=tnoise[0]/float(dataOut.nProfiles*dataOut.nIncohInt) + dataOut.pbn=tnoise[1]/float(dataOut.nProfiles*dataOut.nIncohInt) + + def noise(self,dataOut): + + dataOut.noise_lag = numpy.zeros((dataOut.nChannels,dataOut.DPL),'float32') + #print("Lags") + ''' + for lag in range(dataOut.DPL): + #print(lag) + dataOut.data_spc = dataOut.dataLag_spc[:,:,:,lag] + dataOut.noise_lag[:,lag] = dataOut.getNoise(ymin_index=46) + #dataOut.noise_lag[:,lag] = dataOut.getNoise(ymin_index=33,ymax_index=46) + ''' + #print(dataOut.NDP) + #exit(1) + #Channel B + for lag in range(dataOut.DPL): + #print(lag) + dataOut.data_spc = dataOut.dataLag_spc[:,:,:,lag] + max_hei_id = dataOut.NDP - 2*lag + #if lag < 6: + dataOut.noise_lag[1,lag] = dataOut.getNoise(ymin_index=53,ymax_index=max_hei_id)[1] + #else: + #dataOut.noise_lag[1,lag] = numpy.mean(dataOut.noise_lag[1,:6]) + #dataOut.noise_lag[:,lag] = dataOut.getNoise(ymin_index=33,ymax_index=46) + #Channel A + for lag in range(dataOut.DPL): + #print(lag) + dataOut.data_spc = dataOut.dataLag_spc[:,:,:,lag] + dataOut.noise_lag[0,lag] = dataOut.getNoise(ymin_index=53)[0] + + nanindex = numpy.argwhere(numpy.isnan(numpy.log10(dataOut.noise_lag[1,:]))) + i1 = nanindex[0][0] + dataOut.noise_lag[1,(1,2,7,8,9,10)] *= 2 #Correction LP + dataOut.noise_lag[1,i1:] = numpy.mean(dataOut.noise_lag[1,:i1]) #El ruido de lags contaminados se + #determina a partir del promedio del + #ruido de los lags limpios + ''' + dataOut.noise_lag[1,:] = dataOut.noise_lag[1,0] #El ruido de los lags diferentes de cero para + #el canal B es contaminado por el Tx y EEJ + #del siguiente perfil, por ello se asigna el ruido + #del lag 0 a todos los lags + ''' + #print("Noise lag: ", 10*numpy.log10(dataOut.noise_lag/dataOut.normFactor)) + #exit(1) + ''' + dataOut.tnoise = dataOut.getNoise(ymin_index=46) + dataOut.tnoise /= float(dataOut.nProfiles*dataOut.nIncohInt) + dataOut.pan = dataOut.tnoise[0] + dataOut.pbn = dataOut.tnoise[1] + ''' + #print("i1: ", i1) + #exit(1) + tnoise = dataOut.noise_lag/float(dataOut.nProfiles*dataOut.nIncohInt) + #dataOut.tnoise /= float(dataOut.nProfiles*dataOut.nIncohInt) + dataOut.pan = tnoise[0] + dataOut.pbn = tnoise[1] + + def noise_LP(self,dataOut): + + dataOut.data_spc = dataOut.dataLag_spc_LP.real + #print(dataOut.dataLag_spc.shape) + #exit(1) + #dataOut.data_spc = dataOut.dataLag_spc[:,:,:,0].real + #print("spc noise shape: ",dataOut.data_spc.shape) + dataOut.tnoise = dataOut.getNoise(ymin_index=100,ymax_index=166) + #print("Noise LP: ",10*numpy.log10(dataOut.tnoise)) + #exit(1) + #dataOut.tnoise[0]*=0.995#0.976 + #dataOut.tnoise[1]*=0.995 + #print(dataOut.nProfiles) + #dataOut.pan=dataOut.tnoise[0]/float(dataOut.nProfiles_LP*dataOut.nIncohInt) + #dataOut.pbn=dataOut.tnoise[1]/float(dataOut.nProfiles_LP*dataOut.nIncohInt) + ######dataOut.pan=dataOut.tnoise[0]/float(dataOut.nProfiles_LP*dataOut.nIncohInt_LP) + ######dataOut.pbn=dataOut.tnoise[1]/float(dataOut.nProfiles_LP*dataOut.nIncohInt_LP) + dataOut.pan_LP=dataOut.tnoise[0]/float(dataOut.nProfiles_LP*dataOut.nIncohInt_LP) + dataOut.pbn_LP=dataOut.tnoise[1]/float(dataOut.nProfiles_LP*dataOut.nIncohInt_LP) + def ConvertDataLP(self,dataOut): #print(dataOut.dataLag_spc[:,:,:,1]/dataOut.data_spc) @@ -4161,6 +4394,7 @@ class SpectraDataToHybrid_V2(SpectraDataToFaraday): #dataOut.output_LP_integrated[:,:,3] *= float(dataOut.NSCAN/22)#(dataOut.nNoiseProfiles) #Corrects the zero padding dataOut.nis=dataOut.NSCAN*dataOut.nIncohInt_LP*10 + dataOut.nis=dataOut.NSCAN*dataOut.nIncohInt_LP*dataOut.nProfiles_LP*10 self.ConvertData(dataOut) @@ -4170,10 +4404,98 @@ class SpectraDataToHybrid_V2(SpectraDataToFaraday): dataOut.kabxys_integrated[10][:,(1,2,7,8,9,10),0] *= 2 #Corrects the zero padding hei = 2 - self.noise(dataOut) + self.noise(dataOut) #Noise for DP Profiles + dataOut.pan[[1,2,7,8,9,10]] *= 2 #Corrects the zero padding + #dataOut.pbn[[1,2,7,8,9,10]] *= 2 #Corrects the zero padding #Chequear debido a que se están mezclando lags en self.noise() + self.noise_LP(dataOut) #Noise for LP Profiles + + print("pan: , pan_LP: ",dataOut.pan,dataOut.pan_LP) + print("pbn: , pbn_LP: ",dataOut.pbn,dataOut.pbn_LP) + + dataOut.NAVG=1#dataOut.rnint2[0] #CHECK THIS! dataOut.nint=dataOut.nIncohInt dataOut.MAXNRANGENDT=dataOut.output_LP_integrated.shape[1] + ''' + range_aux=numpy.zeros(dataOut.MAXNRANGENDT,order='F',dtype='float32') + range_aux_dp=numpy.zeros(dataOut.NDT,order='F',dtype='float32') + for i in range(dataOut.MAXNRANGENDT): + range_aux[i]=dataOut.H0 + i*dataOut.DH + for i in range(dataOut.NDT): + range_aux_dp[i]=dataOut.H0 + i*dataOut.DH + import matplotlib.pyplot as plt + #plt.plot(10*numpy.log10(dataOut.output_LP_integrated.real[0,:,0]),range_aux) + plt.plot(10*numpy.log10(dataOut.output_LP_integrated.real[0,:,0]),range_aux_dp) + #plt.plot(10*numpy.log10(dataOut.output_LP_integrated.real[0,:,0]/dataOut.nProfiles_LP),dataOut.range1) + plt.axvline(10*numpy.log10(dataOut.tnoise[0]),color='k',linestyle='dashed') + plt.grid() + plt.xlim(20,100) + plt.show() + exit(1) + ''' + return dataOut + +class SpcVoltageDataToHybrid(SpectraDataToFaraday): + ''' + Written by R. Flores + ''' + """Operation to use spectra data in Faraday processing. + + Parameters: + ----------- + nint : int + Number of integrations. + + Example + -------- + + op = proc_unit.addOperation(name='SpcVoltageDataToHybrid', optype='other') + + """ + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + self.dataLag_spc=None + self.dataLag_cspc=None + self.dataLag_dc=None + + def normFactor(self,dataOut): + dataOut.rnint2=numpy.zeros(dataOut.DPL,'float32') + #print(dataOut.nIncohInt,dataOut.nProfiles) + for l in range(dataOut.DPL): + if(l==0 or (l>=3 and l <=6)): + dataOut.rnint2[l]=1.0/(dataOut.nIncohInt*dataOut.nProfiles_DP) + else: + dataOut.rnint2[l]=2*(1.0/(dataOut.nIncohInt*dataOut.nProfiles_DP)) + + def run(self,dataOut): + + dataOut.paramInterval=0#int(dataOut.nint*dataOut.header[7][0]*2 ) + dataOut.lat=-11.95 + dataOut.lon=-76.87 + + #dataOut.NDP=dataOut.nHeights + #dataOut.NR=len(dataOut.channelList) + #dataOut.DH=dataOut.heightList[1]-dataOut.heightList[0] + #dataOut.H0=int(dataOut.heightList[0]) + + self.normFactor(dataOut) + + dataOut.nis=dataOut.NSCAN*dataOut.NAVG*dataOut.nint*10 + + self.ConvertData(dataOut) + + dataOut.kabxys_integrated[4][:,(1,2,7,8,9,10),0] *= 2 #Corrects the zero padding + dataOut.kabxys_integrated[6][:,(1,2,7,8,9,10),0] *= 2 #Corrects the zero padding + dataOut.kabxys_integrated[8][:,(1,2,7,8,9,10),0] *= 2 #Corrects the zero padding + dataOut.kabxys_integrated[10][:,(1,2,7,8,9,10),0] *= 2 #Corrects the zero padding + #print(numpy.sum(dataOut.kabxys_integrated[4][:,1,0])) + dataOut.MAXNRANGENDT = max(dataOut.NRANGE,dataOut.NDP) + #print(dataOut.rnint2) + #print(dataOut.nis) + #exit(1) return dataOut diff --git a/schainpy/model/proc/jroproc_voltage.py b/schainpy/model/proc/jroproc_voltage.py index c866cbb..5e25884 100644 --- a/schainpy/model/proc/jroproc_voltage.py +++ b/schainpy/model/proc/jroproc_voltage.py @@ -29,13 +29,15 @@ class VoltageProc(ProcessingUnit): self.flip = 1 self.setupReq = False - def run(self): + def run(self, runNextUnit = 0): if self.dataIn.type == 'AMISR': self.__updateObjFromAmisrInput() if self.dataIn.type == 'Voltage': self.dataOut.copy(self.dataIn) + self.dataOut.runNextUnit = runNextUnit + #print(self.dataOut.data.shape) def __updateObjFromAmisrInput(self): @@ -397,6 +399,9 @@ class deFlip(Operation): return dataOut class deFlipHP(Operation): + ''' + Written by R. Flores + ''' def __init__(self): self.flip = 1 @@ -530,6 +535,9 @@ class interpolateHeights(Operation): class LagsReshape(Operation): + ''' + Written by R. Flores + ''' """Operation to reshape input data into (Channels,Profiles(with same lag),Heights,Lags) and heights reconstruction. Parameters: @@ -621,6 +629,9 @@ class LagsReshape(Operation): return dataOut class LagsReshape150(Operation): + ''' + Written by R. Flores + ''' """Operation to reshape input data into (Channels,Profiles(with same lag),Heights,Lags) and heights reconstruction. Parameters: @@ -714,6 +725,9 @@ class LagsReshape150(Operation): return dataOut class LagsReshapeHP(Operation): + ''' + Written by R. Flores + ''' """Operation to reshape input data into (Channels,Profiles(with same lag),Heights,Lags) and heights reconstruction. Parameters: @@ -805,6 +819,9 @@ class LagsReshapeHP(Operation): return dataOut class LagsReshapeDP(Operation): + ''' + Written by R. Flores + ''' """Operation to reshape input data into (Channels,Profiles(with same lag),Heights,Lags) and heights reconstruction. Parameters: @@ -957,6 +974,9 @@ class LagsReshapeDP(Operation): return dataOut class LagsReshapeDP_V2(Operation): + ''' + Written by R. Flores + ''' """Operation to reshape input data into (Channels,Profiles(with same lag),Heights,Lags) and heights reconstruction. Parameters: @@ -1156,6 +1176,9 @@ class LagsReshapeDP_V2(Operation): return dataOut class LagsReshapeLP(Operation): + ''' + Written by R. Flores + ''' """Operation to reshape input data into (Channels,Profiles(with same lag),Heights,Lags) and heights reconstruction. Parameters: @@ -1322,6 +1345,9 @@ class LagsReshapeLP(Operation): return dataOut class LagsReshapeHP2(Operation): + ''' + Written by R. Flores + ''' """Operation to reshape input data into (Channels,Profiles(with same lag),Heights,Lags) and heights reconstruction. Parameters: @@ -1499,6 +1525,9 @@ class LagsReshapeHP2(Operation): return dataOut class CrossProdDP(Operation): + ''' + Written by R. Flores + ''' """Operation to calculate cross products of the Double Pulse Experiment. Parameters: @@ -1983,6 +2012,9 @@ class CrossProdDP(Operation): class IntegrationDP(Operation): + ''' + Written by R. Flores + ''' """Operation to integrate the Double Pulse data. Parameters: @@ -2068,6 +2100,9 @@ class IntegrationDP(Operation): class SumFlips(Operation): + ''' + Written by R. Flores + ''' """Operation to sum the flip and unflip part of certain cross products of the Double Pulse. Parameters: @@ -2128,6 +2163,9 @@ class SumFlips(Operation): class FlagBadHeights(Operation): + ''' + Written by R. Flores + ''' """Operation to flag bad heights (bad data) of the Double Pulse. Parameters: @@ -2161,6 +2199,9 @@ class FlagBadHeights(Operation): return dataOut class FlagBadHeightsSpectra(Operation): + ''' + Written by R. Flores + ''' """Operation to flag bad heights (bad data) of the Double Pulse. Parameters: @@ -2194,6 +2235,9 @@ class FlagBadHeightsSpectra(Operation): return dataOut class CleanCohEchoes(Operation): + ''' + Written by R. Flores + ''' """Operation to clean coherent echoes. Parameters: @@ -2561,6 +2605,9 @@ class CleanCohEchoes(Operation): return dataOut class CleanCohEchoesHP(Operation): + ''' + Written by R. Flores + ''' """Operation to clean coherent echoes. Parameters: @@ -2928,6 +2975,9 @@ class CleanCohEchoesHP(Operation): return dataOut class NoisePower(Operation): + ''' + Written by R. Flores + ''' """Operation to get noise power from the integrated data of the Double Pulse. Parameters: @@ -3012,6 +3062,9 @@ class NoisePower(Operation): class DoublePulseACFs(Operation): + ''' + Written by R. Flores + ''' """Operation to get the ACFs of the Double Pulse. Parameters: @@ -3066,7 +3119,7 @@ class DoublePulseACFs(Operation): ''' #print("init 2.6",pa,dataOut.pan) dataOut.p[i,j]=pa+pb-(dataOut.pan+dataOut.pbn) - + #print(i,j,dataOut.p[i,j]) dataOut.sdp[i,j]=2*dataOut.rnint2[j]*((pa+pb)*(pa+pb)) ## ACF @@ -3116,11 +3169,11 @@ class DoublePulseACFs(Operation): ''' #''' if ((pb/dataOut.pbn-1.0)>2.25*(pa/dataOut.pan-1.0)): #To flag bad points from the pulse and EEJ for lags != 0 for Channel B - #print("EJJ") + #print(dataOut.heightList[i],"EJJ") dataOut.igcej[i,j]=1 elif ((pa/dataOut.pan-1.0)>2.25*(pb/dataOut.pbn-1.0)): - #print("EJJ") + #print(dataOut.heightList[i],"EJJ") dataOut.igcej[i,j]=1 #''' ''' @@ -3154,10 +3207,14 @@ class DoublePulseACFs(Operation): #print("p: ",dataOut.p[33,:]) #exit(1) ''' - + #print(numpy.sum(dataOut.rhor)) + #exit(1) return dataOut class DoublePulseACFs_PerLag(Operation): + ''' + Written by R. Flores + ''' """Operation to get the ACFs of the Double Pulse. Parameters: @@ -3258,18 +3315,18 @@ class DoublePulseACFs_PerLag(Operation): ''' #''' if ((pb/dataOut.pbn[j]-1.0)>2.25*(pa/dataOut.pan[j]-1.0)): #To flag bad points from the pulse and EEJ for lags != 0 for Channel B - #print("EJJ") + #print(dataOut.heightList[i],j,"EJJ") dataOut.igcej[i,j]=1 elif ((pa/dataOut.pan[j]-1.0)>2.25*(pb/dataOut.pbn[j]-1.0)): - #print("EJJ") + #print(dataOut.heightList[i],j,"EJJ") dataOut.igcej[i,j]=1 #''' ''' if ((pa/dataOut.pan-1.0)>2.25*(pb/dataOut.pbn-1.0)): #print("EJJ") dataOut.igcej[i,j]=1 - ''' + #''' ''' if i == 4: exit(1) @@ -3299,6 +3356,9 @@ class DoublePulseACFs_PerLag(Operation): return dataOut class FaradayAngleAndDPPower(Operation): + ''' + Written by R. Flores + ''' """Operation to calculate Faraday angle and Double Pulse power. Parameters: @@ -3345,6 +3405,7 @@ class FaradayAngleAndDPPower(Operation): st=0.# // total signal ibt=0# // bad lags ns=0# // no. good lags + #print(dataOut.heightList[j]) for l in range(dataOut.DPL): #add in other lags if outside of e-jet contamination if( (dataOut.igcej[j][l] == 0) and (dataOut.ibad[j][l] == 0) ): @@ -3352,8 +3413,8 @@ class FaradayAngleAndDPPower(Operation): dataOut.ph2[j]+=dataOut.p[j][l]/dataOut.sdp[j][l] dataOut.sdp2[j]=dataOut.sdp2[j]+1./dataOut.sdp[j][l] ns+=1 - - + #if dataOut.igcej[j][l] != 0: + #print(l) pt+=dataOut.p[j][l]/dataOut.sdp[j][l] st+=1./dataOut.sdp[j][l] ibt|=dataOut.ibad[j][l]; @@ -3387,6 +3448,9 @@ class FaradayAngleAndDPPower(Operation): class ElectronDensityFaraday(Operation): + ''' + Written by R. Flores + ''' """Operation to calculate electron density from Faraday angle. Parameters: @@ -3433,6 +3497,7 @@ class ElectronDensityFaraday(Operation): ndphi=dataOut.NSHTS-4 #print(dataOut.phi) #exit(1) + #''' if dataOut.flagSpreadF: nanindex = numpy.argwhere(numpy.isnan(dataOut.phi)) i1 = nanindex[-1][0] @@ -3443,6 +3508,7 @@ class ElectronDensityFaraday(Operation): else: #dataOut.phi_uwrp = dataOut.phi.copy() dataOut.phi[:]=numpy.unwrap(dataOut.phi[:]) #Better results + #''' #print(dataOut.phi) #print(dataOut.ph2) #exit(1) @@ -3455,9 +3521,11 @@ class ElectronDensityFaraday(Operation): for i in range(2,dataOut.NSHTS-2): fact=(-0.5/(dataOut.RATE*dataOut.DH))*dataOut.bki[i] #four-point derivative, no phase unwrapping necessary - ####dataOut.dphi[i]=((((theta[i+1]-theta[i-1])+(2.0*(theta[i+2]-theta[i-2])))/thetai[i])).real/10.0 + #####dataOut.dphi[i]=((((theta[i+1]-theta[i-1])+(2.0*(theta[i+2]-theta[i-2])))/thetai[i])).real/10.0 #Original from C program + ##dataOut.dphi[i]=((((theta[i-2]-theta[i+2])+(8.0*(theta[i+1]-theta[i-1])))/thetai[i])).real/12.0 dataOut.dphi[i]=((dataOut.phi[i+1]-dataOut.phi[i-1])+(2.0*(dataOut.phi[i+2]-dataOut.phi[i-2])))/10.0 #Better results + #dataOut.dphi_uc[i] = abs(dataOut.phi[i]*dataOut.bki[i]*(-0.5)/dataOut.DH) dataOut.dphi[i]=abs(dataOut.dphi[i]*fact) dataOut.sdn1[i]=(4.*(dataOut.sdn2[i-2]+dataOut.sdn2[i+2])+dataOut.sdn2[i-1]+dataOut.sdn2[i+1]) @@ -3468,7 +3536,10 @@ class ElectronDensityFaraday(Operation): class NormalizeDPPower(Operation): - """Operation to normalize relative electron density from power with total electron density from Farday angle. + ''' + Written by R. Flores + ''' + """Operation to normalize relative electron density from power with total electron density from Faraday angle. Parameters: ----------- @@ -3602,6 +3673,9 @@ class NormalizeDPPower(Operation): return dataOut class NormalizeDPPowerRoberto(Operation): + ''' + Written by R. Flores + ''' """Operation to normalize relative electron density from power with total electron density from Farday angle. Parameters: @@ -3738,6 +3812,9 @@ class NormalizeDPPowerRoberto(Operation): return dataOut class NormalizeDPPowerRoberto_V2(Operation): + ''' + Written by R. Flores + ''' """Operation to normalize relative electron density from power with total electron density from Farday angle. Parameters: @@ -3799,7 +3876,7 @@ class NormalizeDPPowerRoberto_V2(Operation): self.aux=0 night_first=250.0 - night_first1= 400.0 + night_first1= 350.0 night_end= 450.0 day_first=220.0 day_end=400.0 @@ -3812,7 +3889,7 @@ class NormalizeDPPowerRoberto_V2(Operation): #print("EARLY") i2=(night_end-dataOut.range1[0])/dataOut.DH i1=(night_first -dataOut.range1[0])/dataOut.DH - elif (dataOut.ut_Faraday>=23.0 or dataOut.ut_Faraday<2.0): + elif (dataOut.ut_Faraday>=23.0 or dataOut.ut_Faraday<=2.0): #print("NIGHT") i2=(night_end-dataOut.range1[0])/dataOut.DH i1=(night_first1 -dataOut.range1[0])/dataOut.DH @@ -3847,15 +3924,17 @@ class NormalizeDPPowerRoberto_V2(Operation): #print("Flag: ",dataOut.flagTeTiCorrection) #print(dataOut.dphi[i1::]) #print(dataOut.ph2[:]) + if dataOut.flagTeTiCorrection: for i in range(dataOut.NSHTS): dataOut.ph2[i]/=dataOut.cf dataOut.sdp2[i]/=dataOut.cf + #''' if dataOut.flagSpreadF: i2=int((620-dataOut.range1[0])/dataOut.DH) nanindex = numpy.argwhere(numpy.isnan(dataOut.ph2)) - print(nanindex) + print("nanindex",nanindex) i1 = nanindex[-1][0] #VER CUANDO i1>i2 i1 += 1+2 #Se suma uno para no tomar el nan, se suma 2 para no tomar datos nan de "phi" debido al calculo de la derivada #print("i1, i2",i1,i2) @@ -3887,11 +3966,24 @@ class NormalizeDPPowerRoberto_V2(Operation): except: pass - #time_text = datetime.datetime.utcfromtimestamp(dataOut.utctime) + #print(dataOut.cf,dataOut.cflast[0]) + time_text = datetime.datetime.utcfromtimestamp(dataOut.utctime) + #''' #if (time_text.hour == 5 and time_text.minute == 32): #Year: 2022, DOY:104 #if (time_text.hour == 0 and time_text.minute == 12): #Year: 2022, DOY:93 - #dataOut.cf = dataOut.cflast[0] - + #if (time_text.hour == 0 and time_text.minute == 22) or (time_text.hour == 0 and time_text.minute == 54) or (time_text.hour == 1 and time_text.minute == 48): #Year: 2022, DOY:242 + #if (time_text.hour == 1 and time_text.minute == 23) or (time_text.hour == 1 and time_text.minute == 44): #Year: 2022, DOY:243 + if (time_text.hour == 0 and time_text.minute == 4): #Year: 2022, DOY:244 + dataOut.cf = dataOut.cflast[0] + #dataOut.cf = 0.08 + #print("here") + if (time_text.hour == 2 and time_text.minute == 23): #Year: 2022, DOY:244 + dataOut.cf = 0.08 + if (time_text.hour == 2 and time_text.minute == 33): #Year: 2022, DOY:244 + dataOut.cf = 0.09 + if (time_text.hour == 3 and time_text.minute == 59) or (time_text.hour == 4 and time_text.minute == 20): #Year: 2022, DOY:244 + dataOut.cf = 0.09 + #''' dataOut.cflast[0]=dataOut.cf #print(dataOut.cf) @@ -3953,6 +4045,9 @@ class suppress_stdout_stderr(object): class DPTemperaturesEstimation(Operation): + ''' + Written by R. Flores + ''' """Operation to estimate temperatures for Double Pulse data. Parameters: @@ -4172,7 +4267,9 @@ class DPTemperaturesEstimation(Operation): class DenCorrection(NormalizeDPPowerRoberto_V2): - + ''' + Written by R. Flores + ''' def __init__(self, **kwargs): Operation.__init__(self, **kwargs) @@ -4199,42 +4296,109 @@ class DenCorrection(NormalizeDPPowerRoberto_V2): bline=0.0 #bline=numpy.zeros(1,order='F',dtype='float32') my_aux = numpy.ones(dataOut.NSHTS,order='F',dtype='float32') + acf_Temps = numpy.ones(dataOut.NSHTS,order='F',dtype='float32')*numpy.nan + acf_no_Temps = numpy.ones(dataOut.NSHTS,order='F',dtype='float32')*numpy.nan + + from scipy import signal + + + teti = numpy.ones(dataOut.NSHTS,order='F',dtype='float32') + + for i in range(10,26): + teti[i] = dataOut.te2[i]/dataOut.ti2[i] + + ratio2 = teti-1 + + ratio2 = signal.medfilt(ratio2) + + def func(params): + return (ratio2-self.gaussian(dataOut.heightList[:dataOut.NSHTS],params[0],params[1],params[2])) + #print(ratio2) + + #plt.show() + x0_value = numpy.array([.5,250,20]) + + popt = least_squares(func,x0=x0_value,verbose=0) + + A = popt.x[0]; B = popt.x[1]; C = popt.x[2] + + ratio2 = self.gaussian(dataOut.heightList[:dataOut.NSHTS], A, B, C) + 1 #Te/Ti + 1 + ''' + import matplotlib.pyplot as plt + plt.clf() + plt.plot(teti,dataOut.heightList[:dataOut.NSHTS]) + plt.plot(ratio2,dataOut.heightList[:dataOut.NSHTS]) + plt.title("{}".format(datetime.datetime.fromtimestamp(dataOut.utctime))) + plt.xlim(.99,3) + plt.grid() + plt.savefig("/home/roberto/Pictures/Density_Comparison/TeTi_from_temps/{}.png".format(dataOut.utctime)) + ''' + + my_te2 = dataOut.ti2*ratio2 + #''' + def func(params): + return (dataOut.te2-self.gaussian(dataOut.heightList[:dataOut.NSHTS],params[0],params[1],params[2])) + x0_value = numpy.array([2000,250,20]) + popt = least_squares(func,x0=x0_value,verbose=0) + A = popt.x[0]; B = popt.x[1]; C = popt.x[2] + te2_smooth = self.gaussian(dataOut.heightList[:dataOut.NSHTS], A, B, C) + #''' + ''' + import matplotlib.pyplot as plt + plt.clf() + plt.plot(te2_smooth,dataOut.heightList[:dataOut.NSHTS],'*-',label = 'My Te') + plt.plot(dataOut.te2,dataOut.heightList[:dataOut.NSHTS],'*-',label = 'Te') + #plt.plot(signal.medfilt(dataOut.te2),dataOut.heightList[:dataOut.NSHTS],'*-',label = 'Te') + plt.title("{}".format(datetime.datetime.fromtimestamp(dataOut.utctime))) + plt.xlim(-50,3000) + plt.grid() + plt.legend() + plt.savefig("/home/roberto/Pictures/Density_Comparison/Te/{}.png".format(dataOut.utctime)) + ''' #print("**** ACF2 WRAPPER ***** ",fitacf_acf2.acf2.__doc__ ) for i in range(dataOut.NSHTS): if dataOut.info2[i]==1: angle=dataOut.thb[i]*0.01745 nue=nui[0]=nui[1]=nui[2]=0.0#nui[3]=0.0 - wion[0]=16 - wion[1]=1 + wion[0]=16 #O + wion[1]=1 #H wion[2]=4 tion[0]=tion[1]=tion[2]=dataOut.ti2[i] - fion[0]=1.0-dataOut.phy2[i] - fion[1]=dataOut.phy2[i] - fion[2]=0.0 + fion[0]=1.0-dataOut.phy2[i] #1 + fion[1]=dataOut.phy2[i] #0 + fion[2]=0.0 #0 for j in range(dataOut.DPL): tau=dataOut.alag[j]*1.0e-3 with suppress_stdout_stderr(): y[j]=fitacf_acf2.acf2(wl,tau,dataOut.te2[i],tion,fion,nue,nui,wion,angle,dataOut.ph2[i],dataOut.bfm[i],y[j],three) + #y[j]=fitacf_acf2.acf2(wl,tau,my_te2[i],tion,fion,nue,nui,wion,angle,dataOut.ph2[i],dataOut.bfm[i],y[j],three) #if dataOut.ut_Faraday>11.0 and dataOut.range1[i]>150.0 and dataOut.range1[i]<400.0: if dataOut.ut_Faraday>11.0 and dataOut.range1[i]>150.0 and dataOut.range1[i]<300.0: + #if dataOut.ut_Faraday>11.0 and dataOut.range1[i]>150.0 and dataOut.range1[i]<400.0: tau=0.0 with suppress_stdout_stderr(): bline=fitacf_acf2.acf2(wl,tau,tion,tion,fion,nue,nui,wion,angle,dataOut.ph2[i],dataOut.bfm[i],bline,three) - cf=min(1.2,max(1.0,bline/y[0])) + cf=min(1.2,max(1.0,bline/y[0])) #FACTOR DE EFICIENCIA + #cf = bline/y[0] + #cf=min(2.,max(1.0,bline/y[0])) my_aux[i] = cf - #dataOut.ph2[i]=cf*dataOut.ph2[i] #Now we adjust the curve "cf" into a Gaussian, + acf_Temps[i] = y[0] + acf_no_Temps[i] = bline + #dataOut.ph2[i]=cf*dataOut.ph2[i] #Instead we adjust the curve "cf" into a Gaussian, #dataOut.sdp2[i]=cf*dataOut.sdp2[i] #in order to get smoother values of density for j in range(1,dataOut.DPL): - y[j]=(y[j]/y[0])*dataOut.DH+dataOut.range1[i] + #y[j]=(y[j]/y[0])*dataOut.DH+dataOut.range1[i] + y[j]=min(max((y[j]/y[0]),-1.0),1.0)*dataOut.DH+dataOut.range1[i] y[0]=dataOut.range1[i]+dataOut.DH ratio = my_aux-1 + #ratio = dataOut.te2[:dataOut.NSHTS]/dataOut.ti2[:dataOut.NSHTS] def lsq_func(params): return (ratio-self.gaussian(dataOut.heightList[:dataOut.NSHTS],params[0],params[1],params[2])) @@ -4244,30 +4408,40 @@ class DenCorrection(NormalizeDPPowerRoberto_V2): A = popt.x[0]; B = popt.x[1]; C = popt.x[2] - aux = self.gaussian(dataOut.heightList[:dataOut.NSHTS], A, B, C) + 1 + aux = self.gaussian(dataOut.heightList[:dataOut.NSHTS], A, B, C) + 1 #ratio + 1 ''' import matplotlib.pyplot as plt plt.clf() - plt.plot(aux,dataOut.heightList[:dataOut.NSHTS],label='Fitting') - plt.plot(my_aux,dataOut.heightList[:dataOut.NSHTS],label='Ratio') + plt.plot(aux,dataOut.heightList[:dataOut.NSHTS],'*:',label='Fitting') + plt.plot(my_aux,dataOut.heightList[:dataOut.NSHTS],'*:',label='Ratio') + #plt.plot(acf_Temps,dataOut.heightList[:dataOut.NSHTS],'b*:',label='Temps') + #plt.plot(acf_no_Temps,dataOut.heightList[:dataOut.NSHTS],'k*:',label='No Temps') + #plt.plot(dataOut.te2[:dataOut.NSHTS]/dataOut.ti2[:dataOut.NSHTS],dataOut.heightList[:dataOut.NSHTS],label='Ratio') #plt.ylim(180) - plt.title("{}".format(datetime.fromtimestamp(dataOut.utctime))) + plt.title("{}".format(datetime.datetime.fromtimestamp(dataOut.utctime))) plt.legend() plt.grid() + #plt.xlim(.99,1.25) #plt.show() - plt.savefig("/home/roberto/Pictures/Faraday_TeTi_Test/Ratio_at_400km/{}.png".format(dataOut.utctime)) + #plt.savefig("/home/roberto/Pictures/Density_Comparison/FactorEf_NoLimits/{}.png".format(dataOut.utctime)) + plt.savefig("/home/roberto/Pictures/Faraday/2022/08/Density_Comparison/FactorEf/{}.png".format(dataOut.utctime)) #plt.savefig("/home/roberto/Pictures/Faraday_TeTi_Test/Ratio/{}.png".format(dataOut.utctime)) ''' #print("inside correction",dataOut.ph2) + + #print("heeere",aux) + #exit(1) dataOut.ph2[:dataOut.NSHTS]*=aux dataOut.sdp2[:dataOut.NSHTS]*=aux + #dataOut.ph2[:26]*=aux[:26] + #dataOut.sdp2[:26]*=aux[:26] #print(aux) #print("inside correction",dataOut.ph2) def run(self,dataOut): #print("hour",gmtime(dataOut.utctime).tm_hour) - if gmtime(dataOut.utctime).tm_hour < 23. and gmtime(dataOut.utctime).tm_hour >= 11.: + if gmtime(dataOut.utctime).tm_hour < 24. and gmtime(dataOut.utctime).tm_hour >= 11.: #print("inside") self.TeTiEstimation(dataOut) dataOut.flagTeTiCorrection = True @@ -4277,6 +4451,9 @@ class DenCorrection(NormalizeDPPowerRoberto_V2): return dataOut class DataPlotCleaner(Operation): + ''' + Written by R. Flores + ''' def __init__(self, **kwargs): Operation.__init__(self, **kwargs) @@ -4341,6 +4518,9 @@ class DataPlotCleaner(Operation): class DataSaveCleaner(Operation): + ''' + Written by R. Flores + ''' def __init__(self, **kwargs): Operation.__init__(self, **kwargs) @@ -4350,6 +4530,7 @@ class DataSaveCleaner(Operation): #print(dataOut.heightList) #exit(1) dataOut.DensityFinal=numpy.zeros((1,dataOut.NDP)) + dataOut.dphiFinal=numpy.zeros((1,dataOut.NDP)) dataOut.EDensityFinal=numpy.zeros((1,dataOut.NDP)) dataOut.ElecTempFinal=numpy.zeros((1,dataOut.NDP)) dataOut.EElecTempFinal=numpy.zeros((1,dataOut.NDP)) @@ -4359,6 +4540,7 @@ class DataSaveCleaner(Operation): dataOut.EPhyFinal=numpy.zeros((1,dataOut.NDP)) dataOut.DensityFinal[0]=numpy.copy(dataOut.ph2) + dataOut.dphiFinal[0]=numpy.copy(dataOut.dphi) dataOut.EDensityFinal[0]=numpy.copy(dataOut.sdp2) dataOut.ElecTempFinal[0,:dataOut.NSHTS]=numpy.copy(dataOut.te2) dataOut.EElecTempFinal[0,:dataOut.NSHTS]=numpy.copy(dataOut.ete2) @@ -4368,14 +4550,14 @@ class DataSaveCleaner(Operation): dataOut.EPhyFinal[0,:dataOut.NSHTS]=numpy.copy(dataOut.ephy2) missing=numpy.nan - + #print("den1: ",dataOut.DensityFinal) temp_min=100.0 temp_max=3000.0#6000.0e #print("Density: ",dataOut.DensityFinal[0]) #print("Error: ",dataOut.EDensityFinal[0]) #print(100*dataOut.EDensityFinal[0]/dataOut.DensityFinal[0]) den_err_percent = 100*dataOut.EDensityFinal[0]/dataOut.DensityFinal[0] - max_den_err_per = 30#30 #Densidades con error mayor al 30% se setean en NaN + max_den_err_per = 35#30 #Densidades con error mayor al 30% se setean en NaN for i in range(dataOut.NSHTS): if den_err_percent[i] >= max_den_err_per: @@ -4478,25 +4660,28 @@ class DataSaveCleaner(Operation): dataOut.EDensityFinal[0,i1:] = dataOut.DensityFinal[0,i1:] = missing ''' - #''' + ''' + #print("den2: ",dataOut.DensityFinal) if gmtime(dataOut.utctime).tm_hour >= 23. or gmtime(dataOut.utctime).tm_hour < 5.: #18-00 LT nanindex = numpy.argwhere(numpy.isnan(dataOut.DensityFinal[0,:33])) #print(nanindex) i1 = nanindex[-1][0] #print("i1",i1) dataOut.EDensityFinal[0,:i1] = dataOut.DensityFinal[0,:i1] = missing + #print("den3: ",dataOut.DensityFinal) elif gmtime(dataOut.utctime).tm_hour >= 6. or gmtime(dataOut.utctime).tm_hour < 11.: #18-00 LT nanindex = numpy.argwhere(numpy.isnan(dataOut.DensityFinal[0,:20])) #print(nanindex) i1 = nanindex[-1][0] #print("i1",i1) dataOut.EDensityFinal[0,:i1] = dataOut.DensityFinal[0,:i1] = missing - #''' + ''' #print("den_nans: ",dataOut.DensityFinal[0,12:50]) if numpy.count_nonzero(~numpy.isnan(dataOut.DensityFinal[0,12:50]))<=5: dataOut.DensityFinal[0,:]=dataOut.EDensityFinal[0,:]=missing #for i in range(dataOut.NSHTS,dataOut.NDP): #for i in range(40,dataOut.NDP): + #print("den2: ",dataOut.DensityFinal) dataOut.DensityFinal[0,dataOut.NSHTS:]=missing dataOut.EDensityFinal[0,dataOut.NSHTS:]=missing dataOut.ElecTempFinal[0,dataOut.NSHTS:]=missing @@ -4510,6 +4695,7 @@ class DataSaveCleaner(Operation): ''' nanindex = numpy.argwhere(numpy.isnan(dataOut.DensityFinal)) i1 = nanindex[-1][0] + print("i1",i1) dataOut.DensityFinal[0,i1+1:]=missing dataOut.EDensityFinal[0,i1+1:]=missing dataOut.ElecTempFinal[0,i1+1:]=missing @@ -4519,6 +4705,27 @@ class DataSaveCleaner(Operation): dataOut.PhyFinal[0,i1+1:]=missing dataOut.EPhyFinal[0,i1+1:]=missing ''' + #''' + if gmtime(dataOut.utctime).tm_hour >= 12. and gmtime(dataOut.utctime).tm_hour < 22.: #07-17 LT + dataOut.DensityFinal[0,:13]=missing + dataOut.EDensityFinal[0,:13]=missing + dataOut.ElecTempFinal[0,:13]=missing + dataOut.EElecTempFinal[0,:13]=missing + dataOut.IonTempFinal[0,:13]=missing + dataOut.EIonTempFinal[0,:13]=missing + dataOut.PhyFinal[0,:13]=missing + dataOut.EPhyFinal[0,:13]=missing + #''' + else: + dataOut.DensityFinal[0,:dataOut.min_id_eej+1]=missing + dataOut.EDensityFinal[0,:dataOut.min_id_eej+1]=missing + dataOut.ElecTempFinal[0,:dataOut.min_id_eej+1]=missing + dataOut.EElecTempFinal[0,:dataOut.min_id_eej+1]=missing + dataOut.IonTempFinal[0,:dataOut.min_id_eej+1]=missing + dataOut.EIonTempFinal[0,:dataOut.min_id_eej+1]=missing + dataOut.PhyFinal[0,:dataOut.min_id_eej+1]=missing + dataOut.EPhyFinal[0,:dataOut.min_id_eej+1]=missing + ''' if gmtime(dataOut.utctime).tm_hour >= 11. or gmtime(dataOut.utctime).tm_hour < 23.: #06-18 LT dataOut.DensityFinal[0,:13]=missing dataOut.EDensityFinal[0,:13]=missing @@ -4538,6 +4745,7 @@ class DataSaveCleaner(Operation): dataOut.EIonTempFinal[0,:12]=missing dataOut.PhyFinal[0,:12]=missing dataOut.EPhyFinal[0,:12]=missing + ''' #print(dataOut.EDensityFinal) #exit(1) ''' @@ -4545,9 +4753,8 @@ class DataSaveCleaner(Operation): print(dataOut.heightList) exit(1) ''' - - ''' time_text = datetime.datetime.utcfromtimestamp(dataOut.utctime) + #''' #Agregar año y mes para no estar comentando esta parte!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #if (time_text.hour == 13 and time_text.minute == 20) or (time_text.hour == 0 and time_text.minute > 40) or (time_text.hour == 2 and time_text.minute == 8) or (time_text.hour == 2 and time_text.minute == 19): #Year: 2022, DOY:101 #if (time_text.hour == 17 and time_text.minute == 5) or (time_text.hour == 8 and time_text.minute >= 22) or (time_text.hour == 9) or (time_text.hour == 11 and time_text.minute == 2): #Year: 2022, DOY:102 @@ -4558,7 +4765,14 @@ class DataSaveCleaner(Operation): #if (time_text.hour == 6 and time_text.minute>=34) or (time_text.hour >= 7 and time_text.hour<11) or (time_text.hour == 11 and time_text.minute<47): #Year: 2022, DOY:94 #if (time_text.hour == 7 and time_text.minute>=18) or (time_text.hour >= 8 and time_text.hour<11) or (time_text.hour == 11 and time_text.minute==2) or (time_text.hour == 1) or (time_text.hour == 2 and time_text.minute<=20) or (time_text.hour >= 3 and time_text.hour<=4): #Year: 2022, DOY:92 #if (time_text.hour < 11 and time_text.hour >=5 ) or (time_text.hour == 11 and time_text.minute<=34): #Year: 2022, DOY:93 - if (time_text.hour == 7 and time_text.minute>=18) or (time_text.hour >= 8 and time_text.hour <=11 ): #Year: 2022, DOY:91 + #if (time_text.hour == 7 and time_text.minute>=18) or (time_text.hour >= 8 and time_text.hour <=11 ): #Year: 2022, DOY:91 + + #print(time_text.hour,time_text.minute) + #if (time_text.hour == 16 and time_text.minute==48) or (time_text.hour == 19 and time_text.minute ==49 ) or (time_text.hour >= 0 and time_text.hour < 5): #Year: 2022, DOY:241 + #if (time_text.hour == 5 and time_text.minute==21) or (time_text.hour == 19 and time_text.minute ==49 ) or (time_text.hour == 7 and time_text.minute==40) or (time_text.hour == 7 and time_text.minute==50) or (time_text.hour >= 8 and time_text.hour < 11) or (time_text.hour == 11 and time_text.minute==2) or (time_text.hour == 11 and time_text.minute==13): #Year: 2022, DOY:242 + #if (time_text.hour >= 8 and time_text.hour < 11) or (time_text.hour == 11 and time_text.minute==2) or (time_text.hour == 11 and time_text.minute==13) or (time_text.hour == 11 and time_text.minute==24): #Year: 2022, DOY:243 + #if (time_text.hour >= 9 and time_text.hour < 11) or (time_text.hour == 8 and time_text.minute==12) or (time_text.hour == 8 and time_text.minute==22) or (time_text.hour == 8 and time_text.minute==33) or (time_text.hour == 8 and time_text.minute==44) or (time_text.hour == 8 and time_text.minute==54) or (time_text.hour == 11 and time_text.minute==2) or (time_text.hour == 11 and time_text.minute==13): #Year: 2022, DOY:245 + if (time_text.hour >= 8 and time_text.hour < 11) or (time_text.hour == 1) or (time_text.hour == 0 and time_text.minute==25) or (time_text.hour == 0 and time_text.minute==36) or (time_text.hour == 0 and time_text.minute==47) or (time_text.hour == 0 and time_text.minute==57) or (time_text.hour == 2 and time_text.minute==1) or (time_text.hour == 11 and time_text.minute==2) or (time_text.hour == 11 and time_text.minute==13) or (time_text.hour == 11 and time_text.minute==24) or (time_text.hour == 7 and time_text.minute==40) or (time_text.hour == 7 and time_text.minute==50) or (time_text.hour == 3 and time_text.minute==5): #Year: 2022, DOY:244 dataOut.DensityFinal[0,:]=missing dataOut.EDensityFinal[0,:]=missing @@ -4569,30 +4783,139 @@ class DataSaveCleaner(Operation): dataOut.PhyFinal[0,:]=missing dataOut.EPhyFinal[0,:]=missing - dataOut.flagNoData = True - ''' + dataOut.flagNoData = True #Remueve todo el perfil + #''' ''' - if (time_text.hour >= 7 and time_text.hour < 9): #Year: 2022, DOY:102 - dataOut.DensityFinal[0,33:]=missing - dataOut.EDensityFinal[0,33:]=missing - dataOut.ElecTempFinal[0,33:]=missing - dataOut.EElecTempFinal[0,33:]=missing - dataOut.IonTempFinal[0,33:]=missing - dataOut.EIonTempFinal[0,33:]=missing - dataOut.PhyFinal[0,33:]=missing - dataOut.EPhyFinal[0,33:]=missing - - #dataOut.flagNoData = True + #if (time_text.hour >= 7 and time_text.hour < 9): #Year: 2022, DOY:102 + if (time_text.hour == 20 and time_text.minute == 8): #Year: 2022, DOY:243 + id_aux = 35 + dataOut.DensityFinal[0,id_aux:]=missing + dataOut.EDensityFinal[0,id_aux:]=missing + dataOut.ElecTempFinal[0,id_aux:]=missing + dataOut.EElecTempFinal[0,id_aux:]=missing + dataOut.IonTempFinal[0,id_aux:]=missing + dataOut.EIonTempFinal[0,id_aux:]=missing + dataOut.PhyFinal[0,id_aux:]=missing + dataOut.EPhyFinal[0,id_aux:]=missing + if (time_text.hour == 20 and time_text.minute == 19): #Year: 2022, DOY:243 + id_aux = 33 + dataOut.DensityFinal[0,id_aux:]=missing + dataOut.EDensityFinal[0,id_aux:]=missing + dataOut.ElecTempFinal[0,id_aux:]=missing + dataOut.EElecTempFinal[0,id_aux:]=missing + dataOut.IonTempFinal[0,id_aux:]=missing + dataOut.EIonTempFinal[0,id_aux:]=missing + dataOut.PhyFinal[0,id_aux:]=missing + dataOut.EPhyFinal[0,id_aux:]=missing + if (time_text.hour == 20 and time_text.minute == 29) or (time_text.hour == 20 and time_text.minute == 44): #Year: 2022, DOY:243 + id_aux = 31 + dataOut.DensityFinal[0,id_aux:]=missing + dataOut.EDensityFinal[0,id_aux:]=missing + dataOut.ElecTempFinal[0,id_aux:]=missing + dataOut.EElecTempFinal[0,id_aux:]=missing + dataOut.IonTempFinal[0,id_aux:]=missing + dataOut.EIonTempFinal[0,id_aux:]=missing + dataOut.PhyFinal[0,id_aux:]=missing + dataOut.EPhyFinal[0,id_aux:]=missing ''' - + #''' + if (time_text.hour == 2 and time_text.minute == 23): #Year: 2022, DOY:244 + id_aux = 12 + dataOut.DensityFinal[0,:id_aux]=missing + dataOut.EDensityFinal[0,:id_aux]=missing + dataOut.ElecTempFinal[0,:id_aux]=missing + dataOut.EElecTempFinal[0,:id_aux]=missing + dataOut.IonTempFinal[0,:id_aux]=missing + dataOut.EIonTempFinal[0,:id_aux]=missing + dataOut.PhyFinal[0,:id_aux]=missing + dataOut.EPhyFinal[0,:id_aux]=missing + if (time_text.hour == 20 and time_text.minute == 52): #Year: 2022, DOY:244 + id_aux = 25 + dataOut.DensityFinal[0,id_aux:]=missing + dataOut.EDensityFinal[0,id_aux:]=missing + dataOut.ElecTempFinal[0,id_aux:]=missing + dataOut.EElecTempFinal[0,id_aux:]=missing + dataOut.IonTempFinal[0,id_aux:]=missing + dataOut.EIonTempFinal[0,id_aux:]=missing + dataOut.PhyFinal[0,id_aux:]=missing + dataOut.EPhyFinal[0,id_aux:]=missing + if (time_text.hour == 21 and time_text.minute == 3): #Year: 2022, DOY:244 + id_aux = 38 + dataOut.DensityFinal[0,id_aux:]=missing + dataOut.EDensityFinal[0,id_aux:]=missing + dataOut.ElecTempFinal[0,id_aux:]=missing + dataOut.EElecTempFinal[0,id_aux:]=missing + dataOut.IonTempFinal[0,id_aux:]=missing + dataOut.EIonTempFinal[0,id_aux:]=missing + dataOut.PhyFinal[0,id_aux:]=missing + dataOut.EPhyFinal[0,id_aux:]=missing + if (time_text.hour == 21 and time_text.minute == 13): #Year: 2022, DOY:244 + id_aux = 26 + dataOut.DensityFinal[0,id_aux:]=missing + dataOut.EDensityFinal[0,id_aux:]=missing + dataOut.ElecTempFinal[0,id_aux:]=missing + dataOut.EElecTempFinal[0,id_aux:]=missing + dataOut.IonTempFinal[0,id_aux:]=missing + dataOut.EIonTempFinal[0,id_aux:]=missing + dataOut.PhyFinal[0,id_aux:]=missing + dataOut.EPhyFinal[0,id_aux:]=missing + if (time_text.hour == 21 and time_text.minute == 24): #Year: 2022, DOY:244 + id_aux = 37 + dataOut.DensityFinal[0,id_aux:]=missing + dataOut.EDensityFinal[0,id_aux:]=missing + dataOut.ElecTempFinal[0,id_aux:]=missing + dataOut.EElecTempFinal[0,id_aux:]=missing + dataOut.IonTempFinal[0,id_aux:]=missing + dataOut.EIonTempFinal[0,id_aux:]=missing + dataOut.PhyFinal[0,id_aux:]=missing + dataOut.EPhyFinal[0,id_aux:]=missing + if (time_text.hour == 21 and time_text.minute == 35): #Year: 2022, DOY:244 + id_aux = 37 + dataOut.DensityFinal[0,id_aux:]=missing + dataOut.EDensityFinal[0,id_aux:]=missing + dataOut.ElecTempFinal[0,id_aux:]=missing + dataOut.EElecTempFinal[0,id_aux:]=missing + dataOut.IonTempFinal[0,id_aux:]=missing + dataOut.EIonTempFinal[0,id_aux:]=missing + dataOut.PhyFinal[0,id_aux:]=missing + dataOut.EPhyFinal[0,id_aux:]=missing + if (time_text.hour == 21 and time_text.minute == 45): #Year: 2022, DOY:244 + id_aux = 36 + dataOut.DensityFinal[0,id_aux:]=missing + dataOut.EDensityFinal[0,id_aux:]=missing + dataOut.ElecTempFinal[0,id_aux:]=missing + dataOut.EElecTempFinal[0,id_aux:]=missing + dataOut.IonTempFinal[0,id_aux:]=missing + dataOut.EIonTempFinal[0,id_aux:]=missing + dataOut.PhyFinal[0,id_aux:]=missing + dataOut.EPhyFinal[0,id_aux:]=missing + if (time_text.hour == 3 and time_text.minute == 59): #Year: 2022, DOY:244 + id_aux = 36 + dataOut.DensityFinal[0,id_aux:]=missing + dataOut.EDensityFinal[0,id_aux:]=missing + dataOut.ElecTempFinal[0,id_aux:]=missing + dataOut.EElecTempFinal[0,id_aux:]=missing + dataOut.IonTempFinal[0,id_aux:]=missing + dataOut.EIonTempFinal[0,id_aux:]=missing + dataOut.PhyFinal[0,id_aux:]=missing + dataOut.EPhyFinal[0,id_aux:]=missing + #''' #print("den_final",dataOut.DensityFinal) dataOut.flagNoData = numpy.all(numpy.isnan(dataOut.DensityFinal)) #Si todos los valores son NaN no se prosigue + + ####dataOut.flagNoData = False #Solo para ploteo + + dataOut.DensityFinal *= 1.e6 #Convert units to m^⁻3 + dataOut.EDensityFinal *= 1.e6 #Convert units to m^⁻3 #print(dataOut.flagNoData) return dataOut class DataSaveCleanerHP(Operation): + ''' + Written by R. Flores + ''' def __init__(self, **kwargs): Operation.__init__(self, **kwargs) @@ -4770,6 +5093,9 @@ class DataSaveCleanerHP(Operation): class ACFs(Operation): + ''' + Written by R. Flores + ''' def __init__(self, **kwargs): Operation.__init__(self, **kwargs) @@ -5110,7 +5436,7 @@ class CohInt(Operation): if not self.isConfig: self.setup(n=n, stride=stride, timeInterval=timeInterval, overlapping=overlapping, byblock=byblock, **kwargs) self.isConfig = True - print("inside") + #print("inside") if dataOut.flagDataAsBlock: """ Si la data es leida por bloques, dimension = [nChannels, nProfiles, nHeis] @@ -5141,6 +5467,9 @@ class CohInt(Operation): return dataOut class TimesCode(Operation): + ''' + Written by R. Flores + ''' """ """ @@ -5149,8 +5478,6 @@ class TimesCode(Operation): Operation.__init__(self, **kwargs) - - def run(self,dataOut,code): #code = numpy.repeat(code, repeats=osamp, axis=1) @@ -5214,44 +5541,11 @@ class TimesCode(Operation): return dataOut -''' -class Spectrogram(Operation): - """ - - """ - - def __init__(self, **kwargs): - - Operation.__init__(self, **kwargs) - - - - def run(self,dataOut): - - import scipy - - - - fs = 3200*1e-6 - fs = fs/64 - fs = 1/fs - - nperseg=64 - noverlap=48 - - f, t, Sxx = signal.spectrogram(x, fs, return_onesided=False, nperseg=nperseg, noverlap=noverlap, mode='complex') - - - for ich in range(dataOut.nChannels): - for ihe in range(nheicode): - - - return dataOut -''' - class RemoveDcHae(Operation): - + ''' + Written by R. Flores + ''' def __init__(self, **kwargs): Operation.__init__(self, **kwargs) @@ -5523,6 +5817,7 @@ class Decoder(Operation): profilesList = range(self.__nProfiles) #print(numpy.shape(self.datadecTime)) #print(numpy.shape(data)) + #print(profilesList) for i in range(self.__nChannels): for j in profilesList: self.datadecTime[i,j,:] = numpy.correlate(data[i,j,:], code_block[j,:], mode='full')[self.nBaud-1:] @@ -6693,9 +6988,9 @@ class CrossProdHybrid(CrossProdDP): self.lag_products_LP_median_estimates_aux=0 - #for i in range(dataOut.NLAG): - my_list = ([0,1,2,3,4,5,6,7]) #hasta 7 funciona, en 6 ya no - for i in my_list: + for i in range(dataOut.NLAG): + #my_list = ([0,1,2,3,4,5,6,7]) #hasta 7 funciona, en 6 ya no + #for i in my_list: for j in range(dataOut.NRANGE): for l in range(4): #four outputs for k in range(dataOut.NAVG): @@ -6709,6 +7004,7 @@ class CrossProdHybrid(CrossProdDP): self.lagp2[i,j,:]=sorted(self.lagp2[i,j,:], key=lambda x: x.real) #sorted(self.lagp2[i,j,:].real) if l==3: self.lagp3[i,j,:]=sorted(self.lagp3[i,j,:], key=lambda x: x.real) #sorted(self.lagp3[i,j,:].real) + ''' x = 2 if k>=x and k=dataOut.nkill/2 and k=3 and l <=6)): dataOut.rnint2[l]=0.5/float(dataOut.nint*dataOut.NAVG*16.0) @@ -7009,6 +7307,10 @@ class SumFlipsHP(SumFlips): print((dataOut.kabxys_integrated[10][hei,lag,0]-dataOut.kabxys_integrated[9][hei,lag,0])) exit(1) ''' + #print(dataOut.rnint2) + #print(numpy.sum(dataOut.kabxys_integrated[4][:,1,0]+dataOut.kabxys_integrated[5][:,1,0])) + #print(dataOut.nis) + #exit(1) return dataOut