diff --git a/schainpy/model/data/jrodata.py b/schainpy/model/data/jrodata.py index c927057..590e326 100644 --- a/schainpy/model/data/jrodata.py +++ b/schainpy/model/data/jrodata.py @@ -76,6 +76,8 @@ def hildebrand_sekhon(data, navg): """ sortdata = numpy.sort(data, axis=None) + #print(numpy.shape(data)) + #exit() ''' lenOfData = len(sortdata) nums_min = lenOfData*0.2 @@ -273,13 +275,13 @@ class JROData(GenericData): ''' ''' return self.radarControllerHeaderObj.ippSeconds - + @ippSeconds.setter def ippSeconds(self, ippSeconds): ''' ''' self.radarControllerHeaderObj.ippSeconds = ippSeconds - + @property def code(self): ''' @@ -370,7 +372,7 @@ class Voltage(JROData): self.flagShiftFFT = False self.flagDataAsBlock = False # Asumo que la data es leida perfil a perfil self.profileIndex = 0 - self.metadata_list = ['type', 'heightList', 'timeZone', 'nProfiles', 'channelList', 'nCohInt', + self.metadata_list = ['type', 'heightList', 'timeZone', 'nProfiles', 'channelList', 'nCohInt', 'code', 'nCode', 'nBaud', 'ippSeconds', 'ipp'] def getNoisebyHildebrand(self, channel=None): @@ -428,6 +430,103 @@ class Voltage(JROData): noise = property(getNoise, "I'm the 'nHeights' property.") +class CrossProds(JROData): + + # data es un numpy array de 2 dmensiones (canales, alturas) + data = None + + def __init__(self): + ''' + Constructor + ''' + + self.useLocalTime = True + ''' + self.radarControllerHeaderObj = RadarControllerHeader() + self.systemHeaderObj = SystemHeader() + self.type = "Voltage" + self.data = None +# self.dtype = None +# self.nChannels = 0 +# self.nHeights = 0 + self.nProfiles = None + self.heightList = None + self.channelList = None +# self.channelIndexList = None + self.flagNoData = True + self.flagDiscontinuousBlock = False + self.utctime = None + self.timeZone = None + self.dstFlag = None + self.errorCount = None + self.nCohInt = None + self.blocksize = None + self.flagDecodeData = False # asumo q la data no esta decodificada + self.flagDeflipData = False # asumo q la data no esta sin flip + self.flagShiftFFT = False + self.flagDataAsBlock = False # Asumo que la data es leida perfil a perfil + self.profileIndex = 0 + + + def getNoisebyHildebrand(self, channel=None): + + + if channel != None: + data = self.data[channel] + nChannels = 1 + else: + data = self.data + nChannels = self.nChannels + + noise = numpy.zeros(nChannels) + power = data * numpy.conjugate(data) + + for thisChannel in range(nChannels): + if nChannels == 1: + daux = power[:].real + else: + daux = power[thisChannel, :].real + noise[thisChannel] = hildebrand_sekhon(daux, self.nCohInt) + + return noise + + def getNoise(self, type=1, channel=None): + + if type == 1: + noise = self.getNoisebyHildebrand(channel) + + return noise + + def getPower(self, channel=None): + + if channel != None: + data = self.data[channel] + else: + data = self.data + + power = data * numpy.conjugate(data) + powerdB = 10 * numpy.log10(power.real) + powerdB = numpy.squeeze(powerdB) + + return powerdB + + def getTimeInterval(self): + + timeInterval = self.ippSeconds * self.nCohInt + + return timeInterval + + noise = property(getNoise, "I'm the 'nHeights' property.") + timeInterval = property(getTimeInterval, "I'm the 'timeInterval' property") + ''' + def getTimeInterval(self): + + timeInterval = self.ippSeconds * self.nCohInt + + return timeInterval + + + class Spectra(JROData): def __init__(self): @@ -458,7 +557,7 @@ class Spectra(JROData): self.ippFactor = 1 self.beacon_heiIndexList = [] self.noise_estimation = None - self.metadata_list = ['type', 'heightList', 'timeZone', 'pairsList', 'channelList', 'nCohInt', + self.metadata_list = ['type', 'heightList', 'timeZone', 'pairsList', 'channelList', 'nCohInt', 'code', 'nCode', 'nBaud', 'ippSeconds', 'ipp','nIncohInt', 'nFFTPoints', 'nProfiles'] def getNoisebyHildebrand(self, xmin_index=None, xmax_index=None, ymin_index=None, ymax_index=None): @@ -608,7 +707,7 @@ class Spectra(JROData): print("This property should not be initialized") return - + noise = property(getNoise, setValue, "I'm the 'nHeights' property.") @@ -705,7 +804,7 @@ class Fits(JROData): return self.ipp_sec noise = property(getNoise, "I'm the 'nHeights' property.") - + class Correlation(JROData): @@ -886,6 +985,7 @@ class Parameters(Spectra): else: return self.paramInterval + def setValue(self, value): print("This property should not be initialized") @@ -928,6 +1028,10 @@ class PlotterData(object): self.plottypes = ['cspc', 'spc', 'noise', 'rti'] elif code == 'rti': self.plottypes = ['noise', 'rti'] + elif code == 'crossprod': + self.plottypes = ['crossprod', 'kay'] + elif code == 'spectrogram': + self.plottypes = ['spc', 'spectrogram'] else: self.plottypes = [code] @@ -976,9 +1080,11 @@ class PlotterData(object): plot = 'snr' elif 'spc_moments' == plot: plot = 'moments' + elif 'spc_oblique' == plot: + plot = 'oblique' self.data[plot] = {} - if 'spc' in self.data or 'rti' in self.data or 'cspc' in self.data or 'moments' in self.data: + if 'spc' in self.data or 'rti' in self.data or 'cspc' in self.data or 'moments' in self.data or 'oblique' in self.data: self.data['noise'] = {} self.data['rti'] = {} if 'noise' not in self.plottypes: @@ -1020,16 +1126,33 @@ class PlotterData(object): self.__heights.append(dataOut.heightList) self.__all_heights.update(dataOut.heightList) + + for plot in self.plottypes: - if plot in ('spc', 'spc_moments', 'spc_cut'): + if plot in ('spc', 'spc_moments', 'spc_cut', 'spc_oblique'): + + + self.shift1 = dataOut.Oblique_params[0][1] + self.shift2 = dataOut.Oblique_params[0][4] + self.shift1_error = dataOut.Oblique_param_errors[0][1] + self.shift2_error = dataOut.Oblique_param_errors[0][4] + z = dataOut.data_spc/dataOut.normFactor + #print(dataOut.normFactor) + #print(z[0,3,15]) + #print("here") + #print(dataOut.data_spc[0,0,0]) + #exit() buffer = 10*numpy.log10(z) if plot == 'cspc': buffer = (dataOut.data_spc, dataOut.data_cspc) + self.nFactor=dataOut.normFactor if plot == 'noise': buffer = 10*numpy.log10(dataOut.getNoise()/dataOut.normFactor) if plot in ('rti', 'spcprofile'): buffer = dataOut.getPower() + #print(buffer[0,0]) + #exit() if plot == 'snr_db': buffer = dataOut.data_SNR if plot == 'snr': @@ -1048,6 +1171,277 @@ class PlotterData(object): buffer = dataOut.data_output if plot == 'param': buffer = dataOut.data_param + if plot == 'spectrogram': + maxHei = 1350 #11 + #maxHei = 2500 + maxHei = 0 + #maxHei = 990 #12 + ###maxHei = 990 + indb = numpy.where(dataOut.heightList <= maxHei) + hei = indb[0][-1] + #hei = 19 + print(hei) + #hei = 0 + factor = dataOut.nIncohInt + #print(factor) + + #exit(1) + z = dataOut.data_spc[:,:,hei] / factor + + #for j in range(z.shape[1]): + #z[:,j] = z[:,j]/hildebrand_sekhon(z[], self.nCohInt) + + ##z = z/hildebrand_sekhon(z, factor) + noise = numpy.zeros(dataOut.nChannels) + for i in range(dataOut.nChannels): + #daux = numpy.sort(pair0[i,:,:],axis= None) + noise[i]=hildebrand_sekhon( z[i,:] ,dataOut.nIncohInt) + #for j in range(z.shape[1]): + #z[:,j] = z[:,j]/noise + + #print(z.shape[1]) + norm_factor = numpy.copy(z[:,int(z.shape[1]/2)])#/z[:,int(z.shape[1]/2)])*8000 + #print(norm_factor) + #print(z[0,315:325]) + #norm_factor = norm_factor.reshape((z.shape[0],z.shape[1])) + #print(norm_factor) + #exit(1) + #print(z.shape[1]) + + #for j in range(z.shape[1]): + #z[:,j] = z[:,j]/norm_factor + + #print(z[0,315:325]) + #exit(1) + + #z = numpy.mean(dataOut.data_spc[:,:,:],axis=2) / factor + z = numpy.where(numpy.isfinite(z), z, numpy.NAN) + #avg = numpy.average(z, axis=1) + #print((dataOut.data_spc.shape)) + #exit(1) + self.hei = hei + self.heightList = dataOut.heightList + self.DH = (dataOut.heightList[1] - dataOut.heightList[0])/dataOut.step + self.nProfiles = dataOut.nProfiles + #print(dataOut.heightList) + + + buffer = 10 * numpy.log10(z) + + + ###buffer = z + import matplotlib.pyplot as plt + fig, axes = plt.subplots(figsize=(14, 10)) + x = numpy.linspace(0,20,numpy.shape(buffer)[1]) + x = numpy.fft.fftfreq(numpy.shape(buffer)[1],0.00001) + x = numpy.fft.fftshift(x) + + plt.plot(x,buffer[0,:]) + axes = plt.gca() + axes.set_xlim([-10000,10000]) + + #axes.set_xlim([0,30000]) + #axes.set_ylim([-100,0.0025*1e10]) + plt.show() + import time + #time.sleep(20) + #exit(1) + + + + #if dataOut.profileIndex + + if plot == 'xmit': + y_1=numpy.arctan2(dataOut.output_LP[:,0,2].imag,dataOut.output_LP[:,0,2].real)* 180 / (numpy.pi*10) + y_2=numpy.abs(dataOut.output_LP[:,0,2]) + norm=numpy.max(y_2) + norm=max(norm,0.1) + y_2=y_2/norm + + buffer = numpy.vstack((y_1,y_2)) + self.NLAG = dataOut.NLAG + + if plot == 'crossprod': + buffer = dataOut.crossprods + self.NDP = dataOut.NDP + + if plot == 'crossprodlp': + buffer = 10*numpy.log10(numpy.abs(dataOut.output_LP)) + self.NRANGE = dataOut.NRANGE + self.NLAG = dataOut.NLAG + + + if plot == 'noisedp': + buffer = 10*numpy.log10(dataOut.noise_final) + #print(buffer) + + if plot == 'FaradayAngle': + buffer = numpy.degrees(dataOut.phi) + #print(buffer) + + if plot == 'RTIDP': + buffer = dataOut.data_for_RTI_DP + self.NDP = dataOut.NDP + + if plot == 'RTILP': + buffer = dataOut.data_for_RTI_LP + self.NRANGE = dataOut.NRANGE + + + if plot == 'denrti': + buffer = dataOut.DensityFinal + + + if plot == 'denrtiLP': + + #buffer = numpy.reshape(numpy.concatenate((dataOut.ph2[:dataOut.cut],dataOut.ne[dataOut.cut:dataOut.NACF])),(1,-1)) + buffer = dataOut.DensityFinal + #self.flagDataAsBlock = dataOut.flagDataAsBlock + #self.NDP = dataOut.NDP + if plot == 'den': + buffer = dataOut.ph2[:dataOut.NSHTS] + self.dphi=dataOut.dphi[:dataOut.NSHTS] + self.sdp2=dataOut.sdp2[:dataOut.NSHTS] + self.sdn1=dataOut.sdn1[:dataOut.NSHTS]#/self.dphi + self.NSHTS=dataOut.NSHTS + ''' + flag1=False + flag0=True + for i in range(12,dataOut.NSHTS): + print("H: ",i*15) + print(abs((dataOut.sdn1[i]/(dataOut.dphi[i]**2))*100)) + if flag0: + if abs((dataOut.sdn1[i]/dataOut.dphi[i]))<0.0005*abs(dataOut.dphi[i]): + print("***************************** FIRST: ",(i)*15,"*****************************") + flag1=True + flag0=False + #pass + #print("****************************************GOOD****************************************") + #else: + #print("****************************************",(i-1)*15,"****************************************") + #break + if flag1: + if abs((dataOut.sdn1[i]/dataOut.dphi[i]))>0.0005*abs(dataOut.dphi[i]): + print("***************************** LAST: ",(i-1)*15,"*****************************") + break + #print("H: ",i*15) + #print(dataOut.sdn1[i]) + ''' + if plot == 'denLP': + buffer = dataOut.ph2[:dataOut.NSHTS] + self.dphi=dataOut.dphi[:dataOut.NSHTS] + self.sdp2=dataOut.sdp2[:dataOut.NSHTS] + self.ne=dataOut.ne[:dataOut.NACF] + self.ene=dataOut.ene[:dataOut.NACF]*dataOut.ne[:dataOut.NACF]*0.434 + #self.ene=10**dataOut.ene[:dataOut.NACF] + self.NSHTS=dataOut.NSHTS + self.cut=dataOut.cut + + if plot == 'ETemp': + #buffer = dataOut.ElecTempClean + buffer = dataOut.ElecTempFinal + if plot == 'ITemp': + #buffer = dataOut.IonTempClean + buffer = dataOut.IonTempFinal + if plot == 'ETempLP': + #buffer = dataOut.IonTempClean + #buffer = numpy.reshape(numpy.concatenate((dataOut.te2[:dataOut.cut],dataOut.te[dataOut.cut:])),(1,-1)) + buffer = dataOut.ElecTempFinal + #print(buffer) + if plot == 'ITempLP': + #buffer = dataOut.IonTempClean + #buffer = numpy.reshape(numpy.concatenate((dataOut.ti2[:dataOut.cut],dataOut.ti[dataOut.cut:])),(1,-1)) + buffer = dataOut.IonTempFinal + + if plot == 'HFracLP': + #buffer = dataOut.IonTempClean + #buffer = numpy.reshape(numpy.concatenate((dataOut.phy2[:dataOut.cut],dataOut.ph[dataOut.cut:])),(1,-1)) + buffer = dataOut.PhyFinal + if plot == 'HeFracLP': + #buffer = dataOut.IonTempClean + #nan_array=numpy.empty((dataOut.cut)) + #nan_array[:]=numpy.nan + #buffer = numpy.reshape(numpy.concatenate((nan_array,dataOut.phe[dataOut.cut:])),(1,-1)) + buffer = dataOut.PheFinal + + + + + + if plot =='acfs': + buffer = dataOut.acfs_to_plot + self.acfs_error_to_plot=dataOut.acfs_error_to_plot + self.lags_to_plot=dataOut.lags_to_plot + self.x_igcej_to_plot=dataOut.x_igcej_to_plot + self.x_ibad_to_plot=dataOut.x_ibad_to_plot + self.y_igcej_to_plot=dataOut.y_igcej_to_plot + self.y_ibad_to_plot=dataOut.y_ibad_to_plot + self.NSHTS = dataOut.NSHTS + self.DPL = dataOut.DPL + if plot =='acfs_LP': + + aux=numpy.zeros((dataOut.NACF,dataOut.IBITS),'float32') + self.errors=numpy.zeros((dataOut.NACF,dataOut.IBITS),'float32') + self.lags_LP_to_plot=numpy.zeros((dataOut.NACF,dataOut.IBITS),'float32') + ''' + for i in range(dataOut.NACF): + for j in range(dataOut.IBITS): + aux[i,j]=dataOut.fit_array_real[i,j]/dataOut.fit_array_real[i,0] + aux[i,j]=max(min(aux[i,j],1.0),-1.0)*dataOut.DH+dataOut.heightList[i] + ''' + for i in range(dataOut.NACF): + for j in range(dataOut.IBITS): + if numpy.abs(dataOut.errors[j,i]/dataOut.output_LP_integrated.real[0,i,0])<1.0: + aux[i,j]=dataOut.output_LP_integrated.real[j,i,0]/dataOut.output_LP_integrated.real[0,i,0] + aux[i,j]=max(min(aux[i,j],1.0),-1.0)*dataOut.DH+dataOut.heightList[i] + self.lags_LP_to_plot[i,j]=dataOut.lags_LP[j] + self.errors[i,j]=dataOut.errors[j,i]/dataOut.output_LP_integrated.real[0,i,0]*dataOut.DH + else: + aux[i,j]=numpy.nan + self.lags_LP_to_plot[i,j]=numpy.nan + self.errors[i,j]=numpy.nan + + + + buffer = aux + + #self.lags_LP_to_plot=dataOut.lags_LP + + self.NACF = dataOut.NACF + self.NLAG = dataOut.NLAG + + if plot == 'tempsDP': + + buffer = dataOut.te2 + self.ete2 = dataOut.ete2 + self.ti2 = dataOut.ti2 + self.eti2 = dataOut.eti2 + + self.NSHTS = dataOut.NSHTS + + if plot == 'temps_LP': + + buffer = numpy.concatenate((dataOut.te2[:dataOut.cut],dataOut.te[dataOut.cut:])) + self.ete = numpy.concatenate((dataOut.ete2[:dataOut.cut],dataOut.ete[dataOut.cut:])) + self.ti = numpy.concatenate((dataOut.ti2[:dataOut.cut],dataOut.ti[dataOut.cut:])) + self.eti = numpy.concatenate((dataOut.eti2[:dataOut.cut],dataOut.eti[dataOut.cut:])) + + self.NACF = dataOut.NACF + + + if plot == 'fracs_LP': + + aux_nan=numpy.zeros(dataOut.cut,'float32') + aux_nan[:]=numpy.nan + buffer = numpy.concatenate((aux_nan,dataOut.ph[dataOut.cut:])) + self.eph = numpy.concatenate((aux_nan,dataOut.eph[dataOut.cut:])) + self.phe = dataOut.phe[dataOut.cut:] + self.ephe = dataOut.ephe[dataOut.cut:] + + self.NACF = dataOut.NACF + self.cut = dataOut.cut + + if plot == 'scope': buffer = dataOut.data self.flagDataAsBlock = dataOut.flagDataAsBlock @@ -1076,6 +1470,10 @@ class PlotterData(object): elif plot == 'spc_moments': self.data['spc'][tm] = buffer self.data['moments'][tm] = dataOut.moments + elif plot == 'spc_oblique': + self.data['spc'][tm] = buffer + self.data['shift1'][tm] = dataOut.Oblique_params[0] + self.data['shift2'][tm] = dataOut.Oblique_params[3] else: if self.buffering: self.data[plot][tm] = buffer @@ -1141,6 +1539,7 @@ class PlotterData(object): meta['interval'] = float(self.interval) meta['localtime'] = self.localtime meta['yrange'] = self.roundFloats(self.heights[::dy].tolist()) + if 'spc' in self.data or 'cspc' in self.data: meta['xrange'] = self.roundFloats(self.xrange[2][::dx].tolist()) else: diff --git a/schainpy/model/data/jroheaderIO.py b/schainpy/model/data/jroheaderIO.py index 4d1eeca..8648942 100644 --- a/schainpy/model/data/jroheaderIO.py +++ b/schainpy/model/data/jroheaderIO.py @@ -137,7 +137,7 @@ class BasicHeader(Header): timeZone = None dstFlag = None errorCount = None - datatime = None + F = None structure = BASIC_STRUCTURE __LOCALTIME = None @@ -363,6 +363,7 @@ class RadarControllerHeader(Header): self.expType = int(header['nExpType'][0]) self.nTx = int(header['nNTx'][0]) self.ipp = float(header['fIpp'][0]) + #print(self.ipp) self.txA = float(header['fTxA'][0]) self.txB = float(header['fTxB'][0]) self.nWindows = int(header['nNumWindows'][0]) @@ -534,6 +535,7 @@ class RadarControllerHeader(Header): def get_ippSeconds(self): ''' ''' + ippSeconds = 2.0 * 1000 * self.ipp / SPEED_OF_LIGHT return ippSeconds @@ -640,6 +642,7 @@ class ProcessingHeader(Header): self.nWindows = int(header['nNumWindows'][0]) self.processFlags = header['nProcessFlags'] self.nCohInt = int(header['nCoherentIntegrations'][0]) + self.nIncohInt = int(header['nIncoherentIntegrations'][0]) self.totalSpectra = int(header['nTotalSpectra'][0]) @@ -903,4 +906,4 @@ def get_procflag_dtype(index): def get_dtype_width(index): - return DTYPE_WIDTH[index] \ No newline at end of file + return DTYPE_WIDTH[index] diff --git a/schainpy/model/graphics/__init__.py b/schainpy/model/graphics/__init__.py index a0d1295..4369da6 100644 --- a/schainpy/model/graphics/__init__.py +++ b/schainpy/model/graphics/__init__.py @@ -3,3 +3,4 @@ from .jroplot_spectra import * from .jroplot_heispectra import * from .jroplot_correlation import * from .jroplot_parameters import * +from .jroplot_voltage_lags import * diff --git a/schainpy/model/graphics/jroplot_base.py b/schainpy/model/graphics/jroplot_base.py index 2525faf..0644a1f 100644 --- a/schainpy/model/graphics/jroplot_base.py +++ b/schainpy/model/graphics/jroplot_base.py @@ -221,6 +221,10 @@ class Plot(Operation): self.zmin = kwargs.get('zmin', None) self.zmax = kwargs.get('zmax', None) self.zlimits = kwargs.get('zlimits', None) + self.xlimits = kwargs.get('xlimits', None) + self.xstep_given = kwargs.get('xstep_given', None) + self.ystep_given = kwargs.get('ystep_given', None) + self.autoxticks = kwargs.get('autoxticks', True) self.xmin = kwargs.get('xmin', None) self.xmax = kwargs.get('xmax', None) self.xrange = kwargs.get('xrange', 12) @@ -253,7 +257,7 @@ class Plot(Operation): self.__throttle_plot = apply_throttle(self.throttle) self.data = PlotterData( self.CODE, self.throttle, self.exp_code, self.localtime, self.buffering, snr=self.showSNR) - + if self.server: if not self.server.startswith('tcp://'): self.server = 'tcp://{}'.format(self.server) @@ -269,7 +273,7 @@ class Plot(Operation): self.setup() - self.time_label = 'LT' if self.localtime else 'UTC' + self.time_label = 'LT' if self.localtime else 'UTC' if self.width is None: self.width = 8 @@ -374,7 +378,7 @@ class Plot(Operation): ''' Set min and max values, labels, ticks and titles ''' - + for n, ax in enumerate(self.axes): if ax.firsttime: if self.xaxis != 'time': @@ -457,14 +461,14 @@ class Plot(Operation): self.plot() self.format() - + for n, fig in enumerate(self.figures): if self.nrows == 0 or self.nplots == 0: log.warning('No data', self.name) fig.text(0.5, 0.5, 'No Data', fontsize='large', ha='center') fig.canvas.manager.set_window_title(self.CODE) continue - + fig.canvas.manager.set_window_title('{} - {}'.format(self.title, self.getDateTime(self.data.max_time).strftime('%Y/%m/%d'))) fig.canvas.draw() @@ -474,7 +478,7 @@ class Plot(Operation): if self.save: self.save_figure(n) - + if self.server: self.send_to_server() @@ -492,7 +496,7 @@ class Plot(Operation): figname = os.path.join( self.save, self.save_code, - '{}_{}.png'.format( + '{}_{}.png'.format( self.save_code, self.getDateTime(self.data.max_time).strftime( '%Y%m%d_%H%M%S' @@ -525,7 +529,7 @@ class Plot(Operation): return self.sender_time = self.data.tm - + attrs = ['titles', 'zmin', 'zmax', 'tag', 'ymin', 'ymax'] for attr in attrs: value = getattr(self, attr) @@ -546,7 +550,7 @@ class Plot(Operation): except: tm = self.sender_queue.get() self.sender_queue.put(self.data.tm) - + while True: if self.sender_queue.empty(): break @@ -588,7 +592,7 @@ class Plot(Operation): self.ncols: number of cols self.nplots: number of plots (channels or pairs) self.ylabel: label for Y axes - self.titles: list of axes title + self.titles: list of axes title ''' raise NotImplementedError @@ -598,12 +602,13 @@ class Plot(Operation): Must be defined in the child class ''' raise NotImplementedError - + def run(self, dataOut, **kwargs): ''' Main plotting routine ''' - + print("time_inside_plot: ",dataOut.datatime) + print(dataOut.flagNoData) if self.isConfig is False: self.__setup(**kwargs) @@ -622,8 +627,8 @@ class Plot(Operation): self.poll.register(self.socket, zmq.POLLIN) tm = getattr(dataOut, self.attr_time) - - if self.data and 'time' in self.xaxis and (tm - self.tmin) >= self.xrange*60*60: + + if self.data and 'time' in self.xaxis and (tm - self.tmin) >= self.xrange*60*60: self.save_time = tm self.__plot() self.tmin += self.xrange*60*60 @@ -639,7 +644,7 @@ class Plot(Operation): dt = self.getDateTime(tm) if self.xmin is None: self.tmin = tm - self.xmin = dt.hour + self.xmin = dt.hour minutes = (self.xmin-int(self.xmin)) * 60 seconds = (minutes - int(minutes)) * 60 self.tmin = (dt.replace(hour=int(self.xmin), minute=int(minutes), second=int(seconds)) - @@ -662,4 +667,3 @@ class Plot(Operation): self.__plot() if self.data and not self.data.flagNoData and self.pause: figpause(10) - diff --git a/schainpy/model/graphics/jroplot_parameters.py b/schainpy/model/graphics/jroplot_parameters.py index f2311aa..916117e 100644 --- a/schainpy/model/graphics/jroplot_parameters.py +++ b/schainpy/model/graphics/jroplot_parameters.py @@ -38,6 +38,15 @@ class SpectralMomentsPlot(SpectraPlot): colormap = 'jet' plot_type = 'pcolor' +class SpectralFitObliquePlot(SpectraPlot): + ''' + Plot for Spectral Oblique + ''' + CODE = 'spc_moments' + colormap = 'jet' + plot_type = 'pcolor' + + class SnrPlot(RTIPlot): ''' @@ -137,10 +146,10 @@ class ParametersPlot(RTIPlot): self.nrows = self.data.shape(self.CODE)[0] self.nplots = self.nrows self.plots_adjust.update({'hspace':0.8, 'left': 0.1, 'bottom': 0.08, 'right':0.95, 'top': 0.95}) - + if not self.xlabel: self.xlabel = 'Time' - + if self.showSNR: self.nrows += 1 self.nplots += 1 @@ -336,4 +345,3 @@ class PolarMapPlot(Plot): self.save_labels = ['{}-{}'.format(lbl, label) for lbl in self.labels] self.titles = ['{} {}'.format( self.data.parameters[x], title) for x in self.channels] - diff --git a/schainpy/model/graphics/jroplot_spectra.py b/schainpy/model/graphics/jroplot_spectra.py index df9ce3b..fe78703 100644 --- a/schainpy/model/graphics/jroplot_spectra.py +++ b/schainpy/model/graphics/jroplot_spectra.py @@ -22,6 +22,7 @@ class SpectraPlot(Plot): plot_type = 'pcolor' def setup(self): + self.nplots = len(self.data.channels) self.ncols = int(numpy.sqrt(self.nplots) + 0.9) self.nrows = int((1.0 * self.nplots / self.ncols) + 0.9) @@ -31,10 +32,13 @@ class SpectraPlot(Plot): self.width = 4 * self.ncols else: self.width = 3.5 * self.ncols - self.plots_adjust.update({'wspace': 0.4, 'hspace':0.4, 'left': 0.1, 'right': 0.9, 'bottom': 0.08}) + self.plots_adjust.update({'wspace': 0.8, 'hspace':0.2, 'left': 0.2, 'right': 0.9, 'bottom': 0.18}) self.ylabel = 'Range [km]' def plot(self): + + #print(self.xaxis) + #exit(1) if self.xaxis == "frequency": x = self.data.xrange[0] self.xlabel = "Frequency (kHz)" @@ -51,19 +55,25 @@ class SpectraPlot(Plot): self.titles = [] + y = self.data.heights self.y = y z = self.data['spc'] + self.CODE2 = 'spc_oblique' + + for n, ax in enumerate(self.axes): noise = self.data['noise'][n][-1] if self.CODE == 'spc_moments': mean = self.data['moments'][n, :, 1, :][-1] + if ax.firsttime: self.xmax = self.xmax if self.xmax else numpy.nanmax(x) self.xmin = self.xmin if self.xmin else -self.xmax self.zmin = self.zmin if self.zmin else numpy.nanmin(z) self.zmax = self.zmax if self.zmax else numpy.nanmax(z) + #print(numpy.shape(x)) ax.plt = ax.pcolormesh(x, y, z[n].T, vmin=self.zmin, vmax=self.zmax, @@ -77,15 +87,122 @@ class SpectraPlot(Plot): color="k", linestyle="dashed", lw=1)[0] if self.CODE == 'spc_moments': ax.plt_mean = ax.plot(mean, y, color='k')[0] + else: + ax.plt.set_array(z[n].T.ravel()) if self.showprofile: ax.plt_profile.set_data(self.data['rti'][n][-1], y) ax.plt_noise.set_data(numpy.repeat(noise, len(y)), y) if self.CODE == 'spc_moments': ax.plt_mean.set_data(mean, y) + self.titles.append('CH {}: {:3.2f}dB'.format(n, noise)) +class SpectraObliquePlot(Plot): + ''' + Plot for Spectra data + ''' + + CODE = 'spc' + colormap = 'jet' + plot_type = 'pcolor' + + def setup(self): + self.xaxis = "oblique" + self.nplots = len(self.data.channels) + self.ncols = int(numpy.sqrt(self.nplots) + 0.9) + self.nrows = int((1.0 * self.nplots / self.ncols) + 0.9) + self.height = 2.6 * self.nrows + self.cb_label = 'dB' + if self.showprofile: + self.width = 4 * self.ncols + else: + self.width = 3.5 * self.ncols + self.plots_adjust.update({'wspace': 0.8, 'hspace':0.2, 'left': 0.2, 'right': 0.9, 'bottom': 0.18}) + self.ylabel = 'Range [km]' + + def plot(self): + + #print(self.xaxis) + #exit(1) + if self.xaxis == "frequency": + x = self.data.xrange[0] + self.xlabel = "Frequency (kHz)" + elif self.xaxis == "time": + x = self.data.xrange[1] + self.xlabel = "Time (ms)" + else: + x = self.data.xrange[2] + self.xlabel = "Velocity (m/s)" + + if self.CODE == 'spc_moments': + x = self.data.xrange[2] + self.xlabel = "Velocity (m/s)" + + self.titles = [] + #self.xlabel = "Velocidad (m/s)" + #self.ylabel = 'Rango (km)' + + + y = self.data.heights + self.y = y + z = self.data['spc'] + + self.CODE2 = 'spc_oblique' + + + for n, ax in enumerate(self.axes): + noise = self.data['noise'][n][-1] + if self.CODE == 'spc_moments': + mean = self.data['moments'][n, :, 1, :][-1] + if self.CODE2 == 'spc_oblique': + shift1 = self.data.shift1 + shift2 = self.data.shift2 + if ax.firsttime: + self.xmax = self.xmax if self.xmax else numpy.nanmax(x) + self.xmin = self.xmin if self.xmin else -self.xmax + self.zmin = self.zmin if self.zmin else numpy.nanmin(z) + self.zmax = self.zmax if self.zmax else numpy.nanmax(z) + #print(numpy.shape(x)) + ax.plt = ax.pcolormesh(x, y, z[n].T, + vmin=self.zmin, + vmax=self.zmax, + cmap=plt.get_cmap(self.colormap) + ) + + if self.showprofile: + ax.plt_profile = self.pf_axes[n].plot( + self.data['rti'][n][-1], y)[0] + ax.plt_noise = self.pf_axes[n].plot(numpy.repeat(noise, len(y)), y, + color="k", linestyle="dashed", lw=1)[0] + if self.CODE == 'spc_moments': + ax.plt_mean = ax.plot(mean, y, color='k')[0] + + if self.CODE2 == 'spc_oblique': + #ax.plt_shift1 = ax.plot(shift1, y, color='k', marker='x', linestyle='None', markersize=0.5)[0] + #ax.plt_shift2 = ax.plot(shift2, y, color='m', marker='x', linestyle='None', markersize=0.5)[0] + self.ploterr1 = ax.errorbar(shift1, y, xerr=self.data.shift1_error,fmt='k^',elinewidth=0.2,marker='x',linestyle='None',markersize=0.5,capsize=0.3,markeredgewidth=0.2) + self.ploterr2 = ax.errorbar(shift2, y, xerr=self.data.shift2_error,fmt='m^',elinewidth=0.2,marker='x',linestyle='None',markersize=0.5,capsize=0.3,markeredgewidth=0.2) + + else: + self.ploterr1.remove() + self.ploterr2.remove() + ax.plt.set_array(z[n].T.ravel()) + if self.showprofile: + ax.plt_profile.set_data(self.data['rti'][n][-1], y) + ax.plt_noise.set_data(numpy.repeat(noise, len(y)), y) + if self.CODE == 'spc_moments': + ax.plt_mean.set_data(mean, y) + if self.CODE2 == 'spc_oblique': + #ax.plt_shift1.set_data(shift1, y) + #ax.plt_shift2.set_data(shift2, y) + #ax.clf() + self.ploterr1 = ax.errorbar(shift1, y, xerr=self.data.shift1_error,fmt='k^',elinewidth=0.2,marker='x',linestyle='None',markersize=0.5,capsize=0.3,markeredgewidth=0.2) + self.ploterr2 = ax.errorbar(shift2, y, xerr=self.data.shift2_error,fmt='m^',elinewidth=0.2,marker='x',linestyle='None',markersize=0.5,capsize=0.3,markeredgewidth=0.2) + + self.titles.append('CH {}: {:3.2f}dB'.format(n, noise)) + #self.titles.append('{}'.format('Velocidad Doppler')) class CrossSpectraPlot(Plot): @@ -103,7 +220,7 @@ class CrossSpectraPlot(Plot): self.nrows = len(self.data.pairs) self.nplots = self.nrows * 4 self.width = 3.1 * self.ncols - self.height = 2.6 * self.nrows + self.height = 5 * self.nrows self.ylabel = 'Range [km]' self.showprofile = False self.plots_adjust.update({'left': 0.08, 'right': 0.92, 'wspace': 0.5, 'hspace':0.4, 'top':0.95, 'bottom': 0.08}) @@ -119,30 +236,44 @@ class CrossSpectraPlot(Plot): else: x = self.data.xrange[2] self.xlabel = "Velocity (m/s)" - + self.titles = [] + y = self.data.heights self.y = y nspc = self.data['spc'] + #print(numpy.shape(self.data['spc'])) spc = self.data['cspc'][0] + #print(numpy.shape(spc)) + #exit() cspc = self.data['cspc'][1] + #print(numpy.shape(cspc)) + #exit() for n in range(self.nrows): noise = self.data['noise'][:,-1] pair = self.data.pairs[n] + #print(pair) + #exit() ax = self.axes[4 * n] - if ax.firsttime: - self.xmax = self.xmax if self.xmax else numpy.nanmax(x) - self.xmin = self.xmin if self.xmin else -self.xmax - self.zmin = self.zmin if self.zmin else numpy.nanmin(nspc) - self.zmax = self.zmax if self.zmax else numpy.nanmax(nspc) + if ax.firsttime: + self.xmax = self.xmax if self.xmax else numpy.nanmax(x) + #self.xmin = self.xmin if self.xmin else -self.xmax + self.xmin = self.xmin if self.xmin else numpy.nanmin(x) + self.zmin = self.zmin if self.zmin else numpy.nanmin(nspc) + self.zmax = self.zmax if self.zmax else numpy.nanmax(nspc) + #print(numpy.nanmin(x)) + #print(self.xmax) + #print(self.xmin) + #exit() + #self.xmin=-.1 ax.plt = ax.pcolormesh(x , y , nspc[pair[0]].T, vmin=self.zmin, vmax=self.zmax, cmap=plt.get_cmap(self.colormap) - ) - else: + ) + else: ax.plt.set_array(nspc[pair[0]].T.ravel()) self.titles.append('CH {}: {:3.2f}dB'.format(pair[0], noise[pair[0]])) @@ -153,10 +284,10 @@ class CrossSpectraPlot(Plot): vmax=self.zmax, cmap=plt.get_cmap(self.colormap) ) - else: + else: ax.plt.set_array(nspc[pair[1]].T.ravel()) self.titles.append('CH {}: {:3.2f}dB'.format(pair[1], noise[pair[1]])) - + out = cspc[n] / numpy.sqrt(spc[pair[0]] * spc[pair[1]]) coh = numpy.abs(out) phase = numpy.arctan2(out.imag, out.real) * 180 / numpy.pi @@ -172,19 +303,345 @@ class CrossSpectraPlot(Plot): ax.plt.set_array(coh.T.ravel()) self.titles.append( 'Coherence Ch{} * Ch{}'.format(pair[0], pair[1])) - + ax = self.axes[4 * n + 3] if ax.firsttime: ax.plt = ax.pcolormesh(x, y, phase.T, vmin=-180, vmax=180, - cmap=plt.get_cmap(self.colormap_phase) + cmap=plt.get_cmap(self.colormap_phase) ) else: ax.plt.set_array(phase.T.ravel()) self.titles.append('Phase CH{} * CH{}'.format(pair[0], pair[1])) +class CrossSpectra4Plot(Plot): + + CODE = 'cspc' + colormap = 'jet' + plot_type = 'pcolor' + zmin_coh = None + zmax_coh = None + zmin_phase = None + zmax_phase = None + + def setup(self): + + self.ncols = 4 + self.nrows = len(self.data.pairs) + self.nplots = self.nrows * 4 + self.width = 3.1 * self.ncols + self.height = 5 * self.nrows + self.ylabel = 'Range [km]' + self.showprofile = False + self.plots_adjust.update({'left': 0.08, 'right': 0.92, 'wspace': 0.5, 'hspace':0.4, 'top':0.95, 'bottom': 0.08}) + + def plot(self): + + if self.xaxis == "frequency": + x = self.data.xrange[0] + self.xlabel = "Frequency (kHz)" + elif self.xaxis == "time": + x = self.data.xrange[1] + self.xlabel = "Time (ms)" + else: + x = self.data.xrange[2] + self.xlabel = "Velocity (m/s)" + + self.titles = [] + + + y = self.data.heights + self.y = y + nspc = self.data['spc'] + #print(numpy.shape(self.data['spc'])) + spc = self.data['cspc'][0] + #print(numpy.shape(nspc)) + #exit() + #nspc[1,:,:] = numpy.flip(nspc[1,:,:],axis=0) + #print(numpy.shape(spc)) + #exit() + cspc = self.data['cspc'][1] + + #xflip=numpy.flip(x) + #print(numpy.shape(cspc)) + #exit() + + for n in range(self.nrows): + noise = self.data['noise'][:,-1] + pair = self.data.pairs[n] + #print(pair) + #exit() + ax = self.axes[4 * n] + if ax.firsttime: + self.xmax = self.xmax if self.xmax else numpy.nanmax(x) + self.xmin = self.xmin if self.xmin else -self.xmax + self.zmin = self.zmin if self.zmin else numpy.nanmin(nspc) + self.zmax = self.zmax if self.zmax else numpy.nanmax(nspc) + ax.plt = ax.pcolormesh(x , y , nspc[pair[0]].T, + vmin=self.zmin, + vmax=self.zmax, + cmap=plt.get_cmap(self.colormap) + ) + else: + #print(numpy.shape(nspc[pair[0]].T)) + #exit() + ax.plt.set_array(nspc[pair[0]].T.ravel()) + self.titles.append('CH {}: {:3.2f}dB'.format(pair[0], noise[pair[0]])) + + ax = self.axes[4 * n + 1] + + if ax.firsttime: + ax.plt = ax.pcolormesh(x , y, numpy.flip(nspc[pair[1]],axis=0).T, + vmin=self.zmin, + vmax=self.zmax, + cmap=plt.get_cmap(self.colormap) + ) + else: + + ax.plt.set_array(numpy.flip(nspc[pair[1]],axis=0).T.ravel()) + self.titles.append('CH {}: {:3.2f}dB'.format(pair[1], noise[pair[1]])) + + out = cspc[n] / numpy.sqrt(spc[pair[0]] * spc[pair[1]]) + coh = numpy.abs(out) + phase = numpy.arctan2(out.imag, out.real) * 180 / numpy.pi + + ax = self.axes[4 * n + 2] + if ax.firsttime: + ax.plt = ax.pcolormesh(x, y, numpy.flip(coh,axis=0).T, + vmin=0, + vmax=1, + cmap=plt.get_cmap(self.colormap_coh) + ) + else: + ax.plt.set_array(numpy.flip(coh,axis=0).T.ravel()) + self.titles.append( + 'Coherence Ch{} * Ch{}'.format(pair[0], pair[1])) + + ax = self.axes[4 * n + 3] + if ax.firsttime: + ax.plt = ax.pcolormesh(x, y, numpy.flip(phase,axis=0).T, + vmin=-180, + vmax=180, + cmap=plt.get_cmap(self.colormap_phase) + ) + else: + ax.plt.set_array(numpy.flip(phase,axis=0).T.ravel()) + self.titles.append('Phase CH{} * CH{}'.format(pair[0], pair[1])) + + +class CrossSpectra2Plot(Plot): + + CODE = 'cspc' + colormap = 'jet' + plot_type = 'pcolor' + zmin_coh = None + zmax_coh = None + zmin_phase = None + zmax_phase = None + + def setup(self): + + self.ncols = 1 + self.nrows = len(self.data.pairs) + self.nplots = self.nrows * 1 + self.width = 3.1 * self.ncols + self.height = 5 * self.nrows + self.ylabel = 'Range [km]' + self.showprofile = False + self.plots_adjust.update({'left': 0.22, 'right': .90, 'wspace': 0.5, 'hspace':0.4, 'top':0.95, 'bottom': 0.08}) + + def plot(self): + + if self.xaxis == "frequency": + x = self.data.xrange[0] + self.xlabel = "Frequency (kHz)" + elif self.xaxis == "time": + x = self.data.xrange[1] + self.xlabel = "Time (ms)" + else: + x = self.data.xrange[2] + self.xlabel = "Velocity (m/s)" + + self.titles = [] + + + y = self.data.heights + self.y = y + #nspc = self.data['spc'] + #print(numpy.shape(self.data['spc'])) + #spc = self.data['cspc'][0] + #print(numpy.shape(spc)) + #exit() + cspc = self.data['cspc'][1] + #print(numpy.shape(cspc)) + #exit() + + for n in range(self.nrows): + noise = self.data['noise'][:,-1] + pair = self.data.pairs[n] + #print(pair) #exit() + + + + out = cspc[n]# / numpy.sqrt(spc[pair[0]] * spc[pair[1]]) + + #print(out[:,53]) + #exit() + cross = numpy.abs(out) + z = cross/self.data.nFactor + #print("here") + #print(dataOut.data_spc[0,0,0]) + #exit() + + cross = 10*numpy.log10(z) + #print(numpy.shape(cross)) + #print(cross[0,:]) + #print(self.data.nFactor) + #exit() + #phase = numpy.arctan2(out.imag, out.real) * 180 / numpy.pi + + ax = self.axes[1 * n] + if ax.firsttime: + self.xmax = self.xmax if self.xmax else numpy.nanmax(x) + self.xmin = self.xmin if self.xmin else -self.xmax + self.zmin = self.zmin if self.zmin else numpy.nanmin(cross) + self.zmax = self.zmax if self.zmax else numpy.nanmax(cross) + ax.plt = ax.pcolormesh(x, y, cross.T, + vmin=self.zmin, + vmax=self.zmax, + cmap=plt.get_cmap(self.colormap) + ) + else: + ax.plt.set_array(cross.T.ravel()) + self.titles.append( + 'Cross Spectra Power Ch{} * Ch{}'.format(pair[0], pair[1])) + + +class CrossSpectra3Plot(Plot): + + CODE = 'cspc' + colormap = 'jet' + plot_type = 'pcolor' + zmin_coh = None + zmax_coh = None + zmin_phase = None + zmax_phase = None + + def setup(self): + + self.ncols = 3 + self.nrows = len(self.data.pairs) + self.nplots = self.nrows * 3 + self.width = 3.1 * self.ncols + self.height = 5 * self.nrows + self.ylabel = 'Range [km]' + self.showprofile = False + self.plots_adjust.update({'left': 0.22, 'right': .90, 'wspace': 0.5, 'hspace':0.4, 'top':0.95, 'bottom': 0.08}) + + def plot(self): + + if self.xaxis == "frequency": + x = self.data.xrange[0] + self.xlabel = "Frequency (kHz)" + elif self.xaxis == "time": + x = self.data.xrange[1] + self.xlabel = "Time (ms)" + else: + x = self.data.xrange[2] + self.xlabel = "Velocity (m/s)" + + self.titles = [] + + + y = self.data.heights + self.y = y + #nspc = self.data['spc'] + #print(numpy.shape(self.data['spc'])) + #spc = self.data['cspc'][0] + #print(numpy.shape(spc)) + #exit() + cspc = self.data['cspc'][1] + #print(numpy.shape(cspc)) + #exit() + + for n in range(self.nrows): + noise = self.data['noise'][:,-1] + pair = self.data.pairs[n] + #print(pair) #exit() + + + + out = cspc[n]# / numpy.sqrt(spc[pair[0]] * spc[pair[1]]) + + #print(out[:,53]) + #exit() + cross = numpy.abs(out) + z = cross/self.data.nFactor + cross = 10*numpy.log10(z) + + out_r= out.real/self.data.nFactor + #out_r = 10*numpy.log10(out_r) + + out_i= out.imag/self.data.nFactor + #out_i = 10*numpy.log10(out_i) + #print(numpy.shape(cross)) + #print(cross[0,:]) + #print(self.data.nFactor) + #exit() + #phase = numpy.arctan2(out.imag, out.real) * 180 / numpy.pi + + ax = self.axes[3 * n] + if ax.firsttime: + self.xmax = self.xmax if self.xmax else numpy.nanmax(x) + self.xmin = self.xmin if self.xmin else -self.xmax + self.zmin = self.zmin if self.zmin else numpy.nanmin(cross) + self.zmax = self.zmax if self.zmax else numpy.nanmax(cross) + ax.plt = ax.pcolormesh(x, y, cross.T, + vmin=self.zmin, + vmax=self.zmax, + cmap=plt.get_cmap(self.colormap) + ) + else: + ax.plt.set_array(cross.T.ravel()) + self.titles.append( + 'Cross Spectra Power Ch{} * Ch{}'.format(pair[0], pair[1])) + + ax = self.axes[3 * n + 1] + if ax.firsttime: + self.xmax = self.xmax if self.xmax else numpy.nanmax(x) + self.xmin = self.xmin if self.xmin else -self.xmax + self.zmin = self.zmin if self.zmin else numpy.nanmin(cross) + self.zmax = self.zmax if self.zmax else numpy.nanmax(cross) + ax.plt = ax.pcolormesh(x, y, out_r.T, + vmin=-1.e6, + vmax=0, + cmap=plt.get_cmap(self.colormap) + ) + else: + ax.plt.set_array(out_r.T.ravel()) + self.titles.append( + 'Cross Spectra Real Ch{} * Ch{}'.format(pair[0], pair[1])) + + ax = self.axes[3 * n + 2] + + + if ax.firsttime: + self.xmax = self.xmax if self.xmax else numpy.nanmax(x) + self.xmin = self.xmin if self.xmin else -self.xmax + self.zmin = self.zmin if self.zmin else numpy.nanmin(cross) + self.zmax = self.zmax if self.zmax else numpy.nanmax(cross) + ax.plt = ax.pcolormesh(x, y, out_i.T, + vmin=-1.e6, + vmax=1.e6, + cmap=plt.get_cmap(self.colormap) + ) + else: + ax.plt.set_array(out_i.T.ravel()) + self.titles.append( + 'Cross Spectra Imag Ch{} * Ch{}'.format(pair[0], pair[1])) + class RTIPlot(Plot): ''' Plot for RTI data @@ -202,7 +659,7 @@ class RTIPlot(Plot): self.ylabel = 'Range [km]' self.xlabel = 'Time' self.cb_label = 'dB' - self.plots_adjust.update({'hspace':0.8, 'left': 0.1, 'bottom': 0.08, 'right':0.95}) + self.plots_adjust.update({'hspace':0.8, 'left': 0.1, 'bottom': 0.1, 'right':0.95}) self.titles = ['{} Channel {}'.format( self.CODE.upper(), x) for x in range(self.nrows)] @@ -210,6 +667,78 @@ class RTIPlot(Plot): self.x = self.data.times self.y = self.data.heights self.z = self.data[self.CODE] + + self.z = numpy.ma.masked_invalid(self.z) + + if self.decimation is None: + x, y, z = self.fill_gaps(self.x, self.y, self.z) + else: + x, y, z = self.fill_gaps(*self.decimate()) + + for n, ax in enumerate(self.axes): + self.zmin = self.zmin if self.zmin else numpy.min(self.z) + self.zmax = self.zmax if self.zmax else numpy.max(self.z) + if ax.firsttime: + ax.plt = ax.pcolormesh(x, y, z[n].T, + vmin=self.zmin, + vmax=self.zmax, + cmap=plt.get_cmap(self.colormap) + ) + if self.showprofile: + ax.plot_profile = self.pf_axes[n].plot( + self.data['rti'][n][-1], self.y)[0] + ax.plot_noise = self.pf_axes[n].plot(numpy.repeat(self.data['noise'][n][-1], len(self.y)), self.y, + color="k", linestyle="dashed", lw=1)[0] + else: + ax.collections.remove(ax.collections[0]) + ax.plt = ax.pcolormesh(x, y, z[n].T, + vmin=self.zmin, + vmax=self.zmax, + cmap=plt.get_cmap(self.colormap) + ) + if self.showprofile: + ax.plot_profile.set_data(self.data['rti'][n][-1], self.y) + ax.plot_noise.set_data(numpy.repeat( + self.data['noise'][n][-1], len(self.y)), self.y) + + +class SpectrogramPlot(Plot): + ''' + Plot for Spectrogram data + ''' + + CODE = 'spectrogram' + colormap = 'binary' + plot_type = 'pcolorbuffer' + + def setup(self): + self.xaxis = 'time' + self.ncols = 1 + self.nrows = len(self.data.channels) + self.nplots = len(self.data.channels) + #print(self.dataOut.heightList) + #self.ylabel = 'Range [km]' + self.xlabel = 'Time' + self.cb_label = 'dB' + self.plots_adjust.update({'hspace':1.2, 'left': 0.1, 'bottom': 0.12, 'right':0.95}) + self.titles = ['{} Channel {} \n H = {} km ({} - {})'.format( + self.CODE.upper(), x, self.data.heightList[self.data.hei], self.data.heightList[self.data.hei],self.data.heightList[self.data.hei]+(self.data.DH*self.data.nProfiles)) for x in range(self.nrows)] + + def plot(self): + self.x = self.data.times + #self.y = self.data.heights + self.z = self.data[self.CODE] + self.y = self.data.xrange[0] + #import time + #print(time.ctime(self.x)) + + ''' + print(numpy.shape(self.x)) + print(numpy.shape(self.y)) + print(numpy.shape(self.z)) + ''' + self.ylabel = "Frequency (kHz)" + self.z = numpy.ma.masked_invalid(self.z) if self.decimation is None: @@ -280,7 +809,7 @@ class PhasePlot(CoherencePlot): class NoisePlot(Plot): ''' - Plot for noise + Plot for noise ''' CODE = 'noise' @@ -296,6 +825,7 @@ class NoisePlot(Plot): self.xlabel = 'Time' self.titles = ['Noise'] self.colorbar = False + self.plots_adjust.update({'hspace':0.8, 'left': 0.1, 'bottom': 0.17, 'right':0.95}) def plot(self): @@ -315,7 +845,7 @@ class NoisePlot(Plot): self.axes[0].lines[ch].set_data(x, y) self.ymin = numpy.nanmin(Y) - 5 - self.ymax = numpy.nanmax(Y) + 5 + self.ymax = numpy.nanmax(Y) + 10 class PowerProfilePlot(Plot): @@ -342,10 +872,10 @@ class PowerProfilePlot(Plot): self.y = y x = self.data['spcprofile'] - + if self.xmin is None: self.xmin = numpy.nanmin(x)*0.9 if self.xmax is None: self.xmax = numpy.nanmax(x)*1.1 - + if self.axes[0].firsttime: for ch in self.data.channels: self.axes[0].plot(x[ch], y, lw=1, label='Ch{}'.format(ch)) @@ -498,7 +1028,7 @@ class BeaconPhase(Plot): server=None, folder=None, username=None, password=None, ftp_wei=0, exp_code=0, sub_exp_code=0, plot_pos=0): - if dataOut.flagNoData: + if dataOut.flagNoData: return dataOut if not isTimeInHourRange(dataOut.datatime, xmin, xmax): @@ -638,4 +1168,4 @@ class BeaconPhase(Plot): thisDatetime=thisDatetime, update_figfile=update_figfile) - return dataOut \ No newline at end of file + return dataOut diff --git a/schainpy/model/graphics/jroplot_voltage_lags.py b/schainpy/model/graphics/jroplot_voltage_lags.py new file mode 100644 index 0000000..d831bb4 --- /dev/null +++ b/schainpy/model/graphics/jroplot_voltage_lags.py @@ -0,0 +1,1156 @@ + +import os +import datetime +import numpy +from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation, MPDecorator #YONG + +from .jroplot_spectra import RTIPlot, NoisePlot + +from schainpy.utils import log +from .plotting_codes import * + +from schainpy.model.graphics.jroplot_base import Plot, plt + +import matplotlib.pyplot as plt +import matplotlib.colors as colors + +import time +import math + + +from matplotlib.ticker import MultipleLocator + + + +class RTIDPPlot(RTIPlot): + + ''' + Plot for RTI Double Pulse Experiment + ''' + + CODE = 'RTIDP' + colormap = 'jro' + plot_name = 'RTI' + + #cb_label = 'Ne Electron Density (1/cm3)' + + def setup(self): + self.xaxis = 'time' + self.ncols = 1 + self.nrows = 3 + self.nplots = self.nrows + #self.height=10 + if self.showSNR: + self.nrows += 1 + self.nplots += 1 + + self.ylabel = 'Height [km]' + self.xlabel = 'Time (LT)' + + self.cb_label = 'Intensity (dB)' + + + #self.cb_label = cb_label + + self.titles = ['{} Channel {}'.format( + self.plot_name.upper(), '0x1'),'{} Channel {}'.format( + self.plot_name.upper(), '0'),'{} Channel {}'.format( + self.plot_name.upper(), '1')] + + + def plot(self): + + self.data.normalize_heights() + self.x = self.data.times + self.y = self.data.heights[0:self.data.NDP] + + if self.showSNR: + self.z = numpy.concatenate( + (self.data[self.CODE], self.data['snr']) + ) + else: + + self.z = self.data[self.CODE] + #print(numpy.max(self.z[0,0:])) + + self.z = numpy.ma.masked_invalid(self.z) + + if self.decimation is None: + x, y, z = self.fill_gaps(self.x, self.y, self.z) + else: + x, y, z = self.fill_gaps(*self.decimate()) + + for n, ax in enumerate(self.axes): + + + self.zmax = self.zmax if self.zmax is not None else numpy.max( + self.z[1][0,12:40]) + self.zmin = self.zmin if self.zmin is not None else numpy.min( + self.z[1][0,12:40]) + + + + if ax.firsttime: + + if self.zlimits is not None: + self.zmin, self.zmax = self.zlimits[n] + + + ax.plt = ax.pcolormesh(x, y, z[n].T * self.factors[n], + vmin=self.zmin, + vmax=self.zmax, + cmap=self.cmaps[n] + ) + #plt.tight_layout() + else: + if self.zlimits is not None: + self.zmin, self.zmax = self.zlimits[n] + ax.collections.remove(ax.collections[0]) + ax.plt = ax.pcolormesh(x, y, z[n].T * self.factors[n], + vmin=self.zmin, + vmax=self.zmax, + cmap=self.cmaps[n] + ) + #plt.tight_layout() + + +class RTILPPlot(RTIPlot): + + ''' + Plot for RTI Long Pulse + ''' + + CODE = 'RTILP' + colormap = 'jro' + plot_name = 'RTI LP' + + #cb_label = 'Ne Electron Density (1/cm3)' + + def setup(self): + self.xaxis = 'time' + self.ncols = 1 + self.nrows = 4 + self.nplots = self.nrows + if self.showSNR: + self.nrows += 1 + self.nplots += 1 + + self.ylabel = 'Height [km]' + self.xlabel = 'Time (LT)' + + self.cb_label = 'Intensity (dB)' + + + + #self.cb_label = cb_label + + self.titles = ['{} Channel {}'.format( + self.plot_name.upper(), '0'),'{} Channel {}'.format( + self.plot_name.upper(), '1'),'{} Channel {}'.format( + self.plot_name.upper(), '2'),'{} Channel {}'.format( + self.plot_name.upper(), '3')] + + + def plot(self): + + self.data.normalize_heights() + self.x = self.data.times + self.y = self.data.heights[0:self.data.NRANGE] + + if self.showSNR: + self.z = numpy.concatenate( + (self.data[self.CODE], self.data['snr']) + ) + else: + + self.z = self.data[self.CODE] + #print(numpy.max(self.z[0,0:])) + + self.z = numpy.ma.masked_invalid(self.z) + + if self.decimation is None: + x, y, z = self.fill_gaps(self.x, self.y, self.z) + else: + x, y, z = self.fill_gaps(*self.decimate()) + + for n, ax in enumerate(self.axes): + + + self.zmax = self.zmax if self.zmax is not None else numpy.max( + self.z[1][0,12:40]) + self.zmin = self.zmin if self.zmin is not None else numpy.min( + self.z[1][0,12:40]) + + if ax.firsttime: + + if self.zlimits is not None: + self.zmin, self.zmax = self.zlimits[n] + + + ax.plt = ax.pcolormesh(x, y, z[n].T * self.factors[n], + vmin=self.zmin, + vmax=self.zmax, + cmap=self.cmaps[n] + ) + #plt.tight_layout() + else: + if self.zlimits is not None: + self.zmin, self.zmax = self.zlimits[n] + ax.collections.remove(ax.collections[0]) + ax.plt = ax.pcolormesh(x, y, z[n].T * self.factors[n], + vmin=self.zmin, + vmax=self.zmax, + cmap=self.cmaps[n] + ) + #plt.tight_layout() + + +class DenRTIPlot(RTIPlot): + + ''' + Plot for Den + ''' + + CODE = 'denrti' + colormap = 'jro' + plot_name = 'Electron Density' + + #cb_label = 'Ne Electron Density (1/cm3)' + + def setup(self): + self.xaxis = 'time' + self.ncols = 1 + self.nrows = self.data.shape(self.CODE)[0] + self.nplots = self.nrows + if self.showSNR: + self.nrows += 1 + self.nplots += 1 + + self.ylabel = 'Height [km]' + self.xlabel = 'Time (LT)' + + self.plots_adjust.update({'wspace': 0.8, 'hspace':0.2, 'left': 0.2, 'right': 0.9, 'bottom': 0.18}) + + if self.CODE == 'denrti' or self.CODE=='denrtiLP': + self.cb_label = r'$\mathrm{N_e}$ Electron Density ($\mathrm{1/cm^3}$)' + + #self.cb_label = cb_label + if not self.titles: + self.titles = self.data.parameters \ + if self.data.parameters else ['{}'.format(self.plot_name)] + if self.showSNR: + self.titles.append('SNR') + + def plot(self): + + self.data.normalize_heights() + self.x = self.data.times + self.y = self.data.heights + + + + if self.showSNR: + self.z = numpy.concatenate( + (self.data[self.CODE], self.data['snr']) + ) + else: + self.z = self.data[self.CODE] + + self.z = numpy.ma.masked_invalid(self.z) + + if self.decimation is None: + x, y, z = self.fill_gaps(self.x, self.y, self.z) + else: + x, y, z = self.fill_gaps(*self.decimate()) + + for n, ax in enumerate(self.axes): + + self.zmax = self.zmax if self.zmax is not None else numpy.max( + self.z[n]) + self.zmin = self.zmin if self.zmin is not None else numpy.min( + self.z[n]) + + if ax.firsttime: + + if self.zlimits is not None: + self.zmin, self.zmax = self.zlimits[n] + if numpy.log10(self.zmin)<0: + self.zmin=1 + ax.plt = ax.pcolormesh(x, y, z[n].T * self.factors[n], + vmin=self.zmin, + vmax=self.zmax, + cmap=self.cmaps[n], + norm=colors.LogNorm() + ) + #plt.tight_layout() + + else: + if self.zlimits is not None: + self.zmin, self.zmax = self.zlimits[n] + ax.collections.remove(ax.collections[0]) + ax.plt = ax.pcolormesh(x, y, z[n].T * self.factors[n], + vmin=self.zmin, + vmax=self.zmax, + cmap=self.cmaps[n], + norm=colors.LogNorm() + ) + #plt.tight_layout() + + + +class DenRTILPPlot(DenRTIPlot): + + ''' + Plot for Electron Temperature + ''' + + CODE = 'denrtiLP' + colormap = 'jro' + plot_name = 'Electron Density' + + +class ETempRTIPlot(RTIPlot): + + ''' + Plot for Electron Temperature + ''' + + CODE = 'ETemp' + colormap = 'jet' + plot_name = 'Electron Temperature' + + #cb_label = 'Ne Electron Density (1/cm3)' + + def setup(self): + self.xaxis = 'time' + self.ncols = 1 + self.nrows = self.data.shape(self.CODE)[0] + self.nplots = self.nrows + if self.showSNR: + self.nrows += 1 + self.nplots += 1 + + self.ylabel = 'Height [km]' + self.xlabel = 'Time (LT)' + self.plots_adjust.update({'wspace': 0.8, 'hspace':0.2, 'left': 0.2, 'right': 0.9, 'bottom': 0.18}) + if self.CODE == 'ETemp' or self.CODE == 'ETempLP': + self.cb_label = 'Electron Temperature (K)' + if self.CODE == 'ITemp' or self.CODE == 'ITempLP': + self.cb_label = 'Ion Temperature (K)' + + + if not self.titles: + self.titles = self.data.parameters \ + if self.data.parameters else ['{}'.format(self.plot_name)] + if self.showSNR: + self.titles.append('SNR') + + def plot(self): + + self.data.normalize_heights() + self.x = self.data.times + self.y = self.data.heights + + if self.showSNR: + self.z = numpy.concatenate( + (self.data[self.CODE], self.data['snr']) + ) + else: + self.z = self.data[self.CODE] + + self.z = numpy.ma.masked_invalid(self.z) + + if self.decimation is None: + x, y, z = self.fill_gaps(self.x, self.y, self.z) + else: + x, y, z = self.fill_gaps(*self.decimate()) + + for n, ax in enumerate(self.axes): + + self.zmax = self.zmax if self.zmax is not None else numpy.max( + self.z[n]) + self.zmin = self.zmin if self.zmin is not None else numpy.min( + self.z[n]) + + if ax.firsttime: + + if self.zlimits is not None: + self.zmin, self.zmax = self.zlimits[n] + + ax.plt = ax.pcolormesh(x, y, z[n].T * self.factors[n], + vmin=self.zmin, + vmax=self.zmax, + cmap=self.cmaps[n] + ) + #plt.tight_layout() + + else: + if self.zlimits is not None: + self.zmin, self.zmax = self.zlimits[n] + ax.collections.remove(ax.collections[0]) + ax.plt = ax.pcolormesh(x, y, z[n].T * self.factors[n], + vmin=self.zmin, + vmax=self.zmax, + cmap=self.cmaps[n] + ) + #plt.tight_layout() + + + +class ITempRTIPlot(ETempRTIPlot): + + ''' + Plot for Ion Temperature + ''' + + CODE = 'ITemp' + colormap = 'jet' + plot_name = 'Ion Temperature' + + +class ElectronTempLPPlot(ETempRTIPlot): + + ''' + Plot for Electron Temperature LP + ''' + + CODE = 'ETempLP' + colormap = 'jet' + plot_name = 'Electron Temperature' + + +class IonTempLPPlot(ETempRTIPlot): + + ''' + Plot for Ion Temperature LP + ''' + + CODE = 'ITempLP' + colormap = 'jet' + plot_name = 'Ion Temperature' + + +class HFracRTIPlot(ETempRTIPlot): + + ''' + Plot for H+ LP + ''' + + CODE = 'HFracLP' + colormap = 'jet' + plot_name = 'H+ Frac' + + +class HeFracRTIPlot(ETempRTIPlot): + + ''' + Plot for He+ LP + ''' + + CODE = 'HeFracLP' + colormap = 'jet' + plot_name = 'He+ Frac' + + +class TempsDPPlot(Plot): + ''' + Plot for Electron - Ion Temperatures + ''' + + CODE = 'tempsDP' + plot_name = 'Temperatures' + plot_type = 'scatterbuffer' + + + def setup(self): + + self.ncols = 1 + self.nrows = 1 + self.nplots = 1 + self.ylabel = 'Range [km]' + self.xlabel = 'Temperature (K)' + self.width = 3.5 + self.height = 5.5 + self.colorbar = False + self.plots_adjust.update({'wspace': 0.8, 'hspace':0.2, 'left': 0.2, 'right': 0.9, 'bottom': 0.18}) + if not self.titles: + self.titles = self.data.parameters \ + if self.data.parameters else ['{}'.format(self.CODE.upper())] + + def plot(self): + + self.x = self.data['tempsDP'][:,-1] + self.y = self.data.heights[0:self.data.NSHTS] + + self.xmin = -100 + self.xmax = 5000 + ax = self.axes[0] + + if ax.firsttime: + + ax.errorbar(self.x, self.y, xerr=self.data.ete2, fmt='r^',elinewidth=1.0,color='b',linewidth=2.0, label='Te') + ax.errorbar(self.data.ti2, self.y, fmt='k^', xerr=self.data.eti2,elinewidth=1.0,color='b',linewidth=2.0, label='Ti') + plt.legend(loc='lower right') + self.ystep_given = 50 + ax.yaxis.set_minor_locator(MultipleLocator(15)) + ax.grid(which='minor') + #plt.tight_layout() + + + else: + self.clear_figures() + ax.errorbar(self.x, self.y, xerr=self.data.ete2, fmt='r^',elinewidth=1.0,color='b',linewidth=2.0, label='Te') + ax.errorbar(self.data.ti2, self.y, fmt='k^', xerr=self.data.eti2,elinewidth=1.0,color='b',linewidth=2.0, label='Ti') + plt.legend(loc='lower right') + ax.yaxis.set_minor_locator(MultipleLocator(15)) + #plt.tight_layout() + + +class TempsHPPlot(Plot): + ''' + Plot for Temperatures Hybrid Experiment + ''' + + CODE = 'temps_LP' + plot_name = 'Temperatures' + plot_type = 'scatterbuffer' + + + def setup(self): + + self.ncols = 1 + self.nrows = 1 + self.nplots = 1 + self.ylabel = 'Range [km]' + self.xlabel = 'Temperature (K)' + self.width = 3.5 + self.height = 6.5 + self.colorbar = False + if not self.titles: + self.titles = self.data.parameters \ + if self.data.parameters else ['{}'.format(self.CODE.upper())] + + def plot(self): + + self.x = self.data['temps_LP'][:,-1] + self.y = self.data.heights[0:self.data.NACF] + self.xmin = -100 + self.xmax = 4500 + ax = self.axes[0] + + if ax.firsttime: + + ax.errorbar(self.x, self.y, xerr=self.data.ete, fmt='r^',elinewidth=1.0,color='b',linewidth=2.0, label='Te') + ax.errorbar(self.data.ti, self.y, fmt='k^', xerr=self.data.eti,elinewidth=1.0,color='b',linewidth=2.0, label='Ti') + plt.legend(loc='lower right') + self.ystep_given = 200 + ax.yaxis.set_minor_locator(MultipleLocator(15)) + ax.grid(which='minor') + #plt.tight_layout() + + + else: + self.clear_figures() + ax.errorbar(self.x, self.y, xerr=self.data.ete, fmt='r^',elinewidth=1.0,color='b',linewidth=2.0, label='Te') + ax.errorbar(self.data.ti, self.y, fmt='k^', xerr=self.data.eti,elinewidth=1.0,color='b',linewidth=2.0, label='Ti') + plt.legend(loc='lower right') + ax.yaxis.set_minor_locator(MultipleLocator(15)) + #plt.tight_layout() + + +class FracsHPPlot(Plot): + ''' + Plot for Composition LP + ''' + + CODE = 'fracs_LP' + plot_name = 'Composition' + plot_type = 'scatterbuffer' + + + def setup(self): + + self.ncols = 1 + self.nrows = 1 + self.nplots = 1 + self.ylabel = 'Range [km]' + self.xlabel = 'Frac' + self.width = 3.5 + self.height = 6.5 + self.colorbar = False + if not self.titles: + self.titles = self.data.parameters \ + if self.data.parameters else ['{}'.format(self.CODE.upper())] + + def plot(self): + + self.x = self.data['fracs_LP'][:,-1] + self.y = self.data.heights[0:self.data.NACF] + + self.xmin = 0 + self.xmax = 1 + ax = self.axes[0] + + if ax.firsttime: + + ax.errorbar(self.x, self.y[self.data.cut:], xerr=self.data.eph, fmt='r^',elinewidth=1.0,color='b',linewidth=2.0, label='H+') + ax.errorbar(self.data.phe, self.y[self.data.cut:], fmt='k^', xerr=self.data.ephe,elinewidth=1.0,color='b',linewidth=2.0, label='He+') + plt.legend(loc='lower right') + self.xstep_given = 0.2 + self.ystep_given = 200 + ax.yaxis.set_minor_locator(MultipleLocator(15)) + ax.grid(which='minor') + #plt.tight_layout() + + + else: + self.clear_figures() + ax.errorbar(self.x, self.y[self.data.cut:], xerr=self.data.eph, fmt='r^',elinewidth=1.0,color='b',linewidth=2.0, label='H+') + ax.errorbar(self.data.phe, self.y[self.data.cut:], fmt='k^', xerr=self.data.ephe,elinewidth=1.0,color='b',linewidth=2.0, label='He+') + plt.legend(loc='lower right') + ax.yaxis.set_minor_locator(MultipleLocator(15)) + #plt.tight_layout() + + + +class EDensityPlot(Plot): + ''' + Plot for electron density + ''' + + CODE = 'den' + plot_name = 'Electron Density' + plot_type = 'scatterbuffer' + + + def setup(self): + + self.ncols = 1 + self.nrows = 1 + self.nplots = 1 + self.ylabel = 'Range [km]' + self.xlabel = r'$\mathrm{N_e}$ Electron Density ($\mathrm{1/cm^3}$)' + self.width = 4 + self.height = 6.5 + self.colorbar = False + self.plots_adjust.update({'wspace': 0.8, 'hspace':0.2, 'left': 0.2, 'right': 0.9, 'bottom': 0.18}) + if not self.titles: + self.titles = self.data.parameters \ + if self.data.parameters else ['{}'.format(self.CODE.upper())] + + def plot(self): + + + self.x = self.data[self.CODE] + self.y = self.data.heights + self.xmin = 1000 + self.xmax = 10000000 + ax = self.axes[0] + + if ax.firsttime: + self.autoxticks=False + #if self.CODE=='den': + ax.errorbar(self.data.dphi, self.y[:self.data.NSHTS], xerr=1, fmt='h-',elinewidth=1.0,color='g',linewidth=1.0, label='Faraday Profile',markersize=2) + #ax.errorbar(self.data.dphi, self.y[:self.data.NSHTS], xerr=self.data.sdn1, fmt='h-',elinewidth=1.0,color='g',linewidth=1.0, label='Faraday Profile',markersize=2) + + ax.errorbar(self.x[:,-1], self.y[:self.data.NSHTS], fmt='k^-', xerr=self.data.sdp2,elinewidth=1.0,color='b',linewidth=1.0, label='Power Profile',markersize=2) + #else: + #ax.errorbar(self.data.dphi[:self.data.cut], self.y[:self.data.cut], xerr=1, fmt='h-',elinewidth=1.0,color='g',linewidth=1.0, label='Faraday Profile',markersize=2) + #ax.errorbar(self.x[:self.data.cut,-1], self.y[:self.data.cut], fmt='k^-', xerr=self.data.sdp2[:self.data.cut],elinewidth=1.0,color='b',linewidth=1.0, label='Power Profile',markersize=2) + + if self.CODE=='denLP': + ax.errorbar(self.data.ne[self.data.cut:], self.y[self.data.cut:], xerr=self.data.ene[self.data.cut:], fmt='r^-',elinewidth=1.0,color='r',linewidth=1.0, label='LP Profile',markersize=2) + + plt.legend(loc='upper right') + ax.set_xscale("log", nonposx='clip') + grid_y_ticks=numpy.arange(numpy.nanmin(self.y),numpy.nanmax(self.y),50) + self.ystep_given=100 + if self.CODE=='denLP': + self.ystep_given=200 + ax.set_yticks(grid_y_ticks,minor=True) + ax.grid(which='minor') + #plt.tight_layout() + + + + else: + + self.clear_figures() + #if self.CODE=='den': + ax.errorbar(self.data.dphi, self.y[:self.data.NSHTS], xerr=1, fmt='h-',elinewidth=1.0,color='g',linewidth=1.0, label='Faraday Profile',markersize=2) + #ax.errorbar(self.data.dphi, self.y[:self.data.NSHTS], xerr=self.data.sdn1, fmt='h-',elinewidth=1.0,color='g',linewidth=1.0, label='Faraday Profile',markersize=2) + + ax.errorbar(self.x[:,-1], self.y[:self.data.NSHTS], fmt='k^-', xerr=self.data.sdp2,elinewidth=1.0,color='b',linewidth=1.0, label='Power Profile',markersize=2) + ax.errorbar(self.x[:,-2], self.y[:self.data.NSHTS], elinewidth=1.0,color='r',linewidth=0.5,linestyle="dashed") + #else: + #ax.errorbar(self.data.dphi[:self.data.cut], self.y[:self.data.cut], xerr=1, fmt='h-',elinewidth=1.0,color='g',linewidth=1.0, label='Faraday Profile',markersize=2) + #ax.errorbar(self.x[:self.data.cut,-1], self.y[:self.data.cut], fmt='k^-', xerr=self.data.sdp2[:self.data.cut],elinewidth=1.0,color='b',linewidth=1.0, label='Power Profile',markersize=2) + #ax.errorbar(self.x[:self.data.cut,-2], self.y[:self.data.cut], elinewidth=1.0,color='r',linewidth=0.5,linestyle="dashed") + + if self.CODE=='denLP': + ax.errorbar(self.data.ne[self.data.cut:], self.y[self.data.cut:], fmt='r^-', xerr=self.data.ene[self.data.cut:],elinewidth=1.0,color='r',linewidth=1.0, label='LP Profile',markersize=2) + + ax.set_xscale("log", nonposx='clip') + grid_y_ticks=numpy.arange(numpy.nanmin(self.y),numpy.nanmax(self.y),50) + ax.set_yticks(grid_y_ticks,minor=True) + ax.grid(which='minor') + plt.legend(loc='upper right') + #plt.tight_layout() + +class FaradayAnglePlot(Plot): + ''' + Plot for electron density + ''' + + CODE = 'FaradayAngle' + plot_name = 'Faraday Angle' + plot_type = 'scatterbuffer' + + + def setup(self): + + self.ncols = 1 + self.nrows = 1 + self.nplots = 1 + self.ylabel = 'Range [km]' + self.xlabel = 'Faraday Angle (º)' + self.width = 4 + self.height = 6.5 + self.colorbar = False + if not self.titles: + self.titles = self.data.parameters \ + if self.data.parameters else ['{}'.format(self.CODE.upper())] + + def plot(self): + + + self.x = self.data[self.CODE] + self.y = self.data.heights + self.xmin = -180 + self.xmax = 180 + ax = self.axes[0] + + if ax.firsttime: + self.autoxticks=False + #if self.CODE=='den': + ax.plot(self.x, self.y,marker='o',color='g',linewidth=1.0,markersize=2) + + grid_y_ticks=numpy.arange(numpy.nanmin(self.y),numpy.nanmax(self.y),50) + self.ystep_given=100 + if self.CODE=='denLP': + self.ystep_given=200 + ax.set_yticks(grid_y_ticks,minor=True) + ax.grid(which='minor') + #plt.tight_layout() + else: + + self.clear_figures() + #if self.CODE=='den': + #print(numpy.shape(self.x)) + ax.plot(self.x[:,-1], self.y, marker='o',color='g',linewidth=1.0, markersize=2) + + grid_y_ticks=numpy.arange(numpy.nanmin(self.y),numpy.nanmax(self.y),50) + ax.set_yticks(grid_y_ticks,minor=True) + ax.grid(which='minor') + +class EDensityHPPlot(EDensityPlot): + + ''' + Plot for Electron Density Hybrid Experiment + ''' + + CODE = 'denLP' + plot_name = 'Electron Density' + plot_type = 'scatterbuffer' + + +class ACFsPlot(Plot): + ''' + Plot for ACFs Double Pulse Experiment + ''' + + CODE = 'acfs' + plot_name = 'ACF' + plot_type = 'scatterbuffer' + + + def setup(self): + #self.xaxis = 'time' + self.ncols = 1 + self.nrows = 1 + self.nplots = 1 + self.ylabel = 'Range [km]' + self.xlabel = 'lags (ms)' + self.width = 3.5 + self.height = 6 + self.colorbar = False + self.plots_adjust.update({'wspace': 0.8, 'hspace':0.2, 'left': 0.2, 'right': 0.9, 'bottom': 0.18}) + if not self.titles: + self.titles = self.data.parameters \ + if self.data.parameters else ['{}'.format(self.CODE.upper())] + + def plot(self): + + self.x = self.data.lags_to_plot + self.y = self.data['acfs'][:,-1] + + + self.xmin = 0.0 + self.xmax = 2.0 + + ax = self.axes[0] + + if ax.firsttime: + + for i in range(self.data.NSHTS): + x_aux = numpy.isfinite(self.x[i,:]) + y_aux = numpy.isfinite(self.y[i,:]) + yerr_aux = numpy.isfinite(self.data.acfs_error_to_plot[i,:]) + x_igcej_aux = numpy.isfinite(self.data.x_igcej_to_plot[i,:]) + y_igcej_aux = numpy.isfinite(self.data.y_igcej_to_plot[i,:]) + x_ibad_aux = numpy.isfinite(self.data.x_ibad_to_plot[i,:]) + y_ibad_aux = numpy.isfinite(self.data.y_ibad_to_plot[i,:]) + if self.x[i,:][~numpy.isnan(self.x[i,:])].shape[0]>2: + ax.errorbar(self.x[i,x_aux], self.y[i,y_aux], yerr=self.data.acfs_error_to_plot[i,x_aux],color='b',marker='o',linewidth=1.0,markersize=2) + ax.plot(self.data.x_igcej_to_plot[i,x_igcej_aux],self.data.y_igcej_to_plot[i,y_igcej_aux],'x',color='red',markersize=2) + ax.plot(self.data.x_ibad_to_plot[i,x_ibad_aux],self.data.y_ibad_to_plot[i,y_ibad_aux],'X',color='red',markersize=2) + + self.xstep_given = (self.xmax-self.xmin)/(self.data.DPL-1) + self.ystep_given = 50 + ax.yaxis.set_minor_locator(MultipleLocator(15)) + ax.grid(which='minor') + + + + else: + self.clear_figures() + + for i in range(self.data.NSHTS): + x_aux = numpy.isfinite(self.x[i,:]) + y_aux = numpy.isfinite(self.y[i,:]) + yerr_aux = numpy.isfinite(self.data.acfs_error_to_plot[i,:]) + x_igcej_aux = numpy.isfinite(self.data.x_igcej_to_plot[i,:]) + y_igcej_aux = numpy.isfinite(self.data.y_igcej_to_plot[i,:]) + x_ibad_aux = numpy.isfinite(self.data.x_ibad_to_plot[i,:]) + y_ibad_aux = numpy.isfinite(self.data.y_ibad_to_plot[i,:]) + if self.x[i,:][~numpy.isnan(self.x[i,:])].shape[0]>2: + ax.errorbar(self.x[i,x_aux], self.y[i,y_aux], yerr=self.data.acfs_error_to_plot[i,x_aux],linewidth=1.0,markersize=2,color='b',marker='o') + ax.plot(self.data.x_igcej_to_plot[i,x_igcej_aux],self.data.y_igcej_to_plot[i,y_igcej_aux],'x',color='red',markersize=2) + ax.plot(self.data.x_ibad_to_plot[i,x_ibad_aux],self.data.y_ibad_to_plot[i,y_ibad_aux],'X',color='red',markersize=2) + ax.yaxis.set_minor_locator(MultipleLocator(15)) + + + + +class ACFsLPPlot(Plot): + ''' + Plot for ACFs Double Pulse Experiment + ''' + + CODE = 'acfs_LP' + plot_name = 'ACF' + plot_type = 'scatterbuffer' + + + def setup(self): + #self.xaxis = 'time' + self.ncols = 1 + self.nrows = 1 + self.nplots = 1 + self.ylabel = 'Range [km]' + self.xlabel = 'lags (ms)' + self.width = 3.5 + self.height = 7 + self.colorbar = False + if not self.titles: + self.titles = self.data.parameters \ + if self.data.parameters else ['{}'.format(self.CODE.upper())] + + + + def plot(self): + + self.x = self.data.lags_LP_to_plot + self.y = self.data['acfs_LP'][:,-1] + + self.xmin = 0.0 + self.xmax = 1.5 + + ax = self.axes[0] + + if ax.firsttime: + + for i in range(self.data.NACF): + x_aux = numpy.isfinite(self.x[i,:]) + y_aux = numpy.isfinite(self.y[i,:]) + yerr_aux = numpy.isfinite(self.data.errors[i,:]) + + if self.x[i,:][~numpy.isnan(self.x[i,:])].shape[0]>2: + ax.errorbar(self.x[i,x_aux], self.y[i,y_aux], yerr=self.data.errors[i,x_aux],color='b',linewidth=1.0,markersize=2,ecolor='r') + + #self.xstep_given = (self.xmax-self.xmin)/(self.data.NLAG-1) + self.xstep_given=0.3 + self.ystep_given = 200 + ax.yaxis.set_minor_locator(MultipleLocator(15)) + ax.grid(which='minor') + + else: + self.clear_figures() + + for i in range(self.data.NACF): + x_aux = numpy.isfinite(self.x[i,:]) + y_aux = numpy.isfinite(self.y[i,:]) + yerr_aux = numpy.isfinite(self.data.errors[i,:]) + + if self.x[i,:][~numpy.isnan(self.x[i,:])].shape[0]>2: + ax.errorbar(self.x[i,x_aux], self.y[i,y_aux], yerr=self.data.errors[i,x_aux],color='b',linewidth=1.0,markersize=2,ecolor='r') + + ax.yaxis.set_minor_locator(MultipleLocator(15)) + + +class CrossProductsPlot(Plot): + ''' + Plot for cross products + ''' + + CODE = 'crossprod' + plot_name = 'Cross Products' + plot_type = 'scatterbuffer' + + + def setup(self): + + self.ncols = 3 + self.nrows = 1 + self.nplots = 3 + self.ylabel = 'Range [km]' + + self.width = 3.5*self.nplots + self.height = 5.5 + self.colorbar = False + self.titles = [] + + def plot(self): + + self.x = self.data['crossprod'][:,-1,:,:,:,:] + + + + + self.y = self.data.heights[0:self.data.NDP] + + + + for n, ax in enumerate(self.axes): + + self.xmin=numpy.min(numpy.concatenate((self.x[n][0,20:30,0,0],self.x[n][1,20:30,0,0],self.x[n][2,20:30,0,0],self.x[n][3,20:30,0,0]))) + self.xmax=numpy.max(numpy.concatenate((self.x[n][0,20:30,0,0],self.x[n][1,20:30,0,0],self.x[n][2,20:30,0,0],self.x[n][3,20:30,0,0]))) + + + if ax.firsttime: + + self.autoxticks=False + if n==0: + label1='kax' + label2='kay' + label3='kbx' + label4='kby' + self.xlimits=[(self.xmin,self.xmax)] + elif n==1: + label1='kax2' + label2='kay2' + label3='kbx2' + label4='kby2' + self.xlimits.append((self.xmin,self.xmax)) + elif n==2: + label1='kaxay' + label2='kbxby' + label3='kaxbx' + label4='kaxby' + self.xlimits.append((self.xmin,self.xmax)) + + + ax.plotline1 = ax.plot(self.x[n][0,:,0,0], self.y, color='r',linewidth=2.0, label=label1) + ax.plotline2 = ax.plot(self.x[n][1,:,0,0], self.y, color='k',linewidth=2.0, label=label2) + ax.plotline3 = ax.plot(self.x[n][2,:,0,0], self.y, color='b',linewidth=2.0, label=label3) + ax.plotline4 = ax.plot(self.x[n][3,:,0,0], self.y, color='m',linewidth=2.0, label=label4) + ax.legend(loc='upper right') + ax.set_xlim(self.xmin, self.xmax) + self.titles.append('{}'.format(self.plot_name.upper())) + #plt.tight_layout() + + + else: + + if n==0: + self.xlimits=[(self.xmin,self.xmax)] + else: + self.xlimits.append((self.xmin,self.xmax)) + + ax.set_xlim(self.xmin, self.xmax) + + + ax.plotline1[0].set_data(self.x[n][0,:,0,0],self.y) + ax.plotline2[0].set_data(self.x[n][1,:,0,0],self.y) + ax.plotline3[0].set_data(self.x[n][2,:,0,0],self.y) + ax.plotline4[0].set_data(self.x[n][3,:,0,0],self.y) + self.titles.append('{}'.format(self.plot_name.upper())) + #plt.tight_layout() + + + +class CrossProductsLPPlot(Plot): + ''' + Plot for cross products LP + ''' + + CODE = 'crossprodlp' + plot_name = 'Cross Products LP' + plot_type = 'scatterbuffer' + + + def setup(self): + + self.ncols = 2 + self.nrows = 1 + self.nplots = 2 + self.ylabel = 'Range [km]' + self.xlabel = 'dB' + self.width = 3.5*self.nplots + self.height = 5.5 + self.colorbar = False + self.titles = [] + self.plotline_array=numpy.zeros((2,self.data.NLAG),dtype=object) + def plot(self): + + + self.x = self.data[self.CODE][:,-1,:,:] + + + self.y = self.data.heights[0:self.data.NRANGE] + + + label_array=numpy.array(['lag '+ str(x) for x in range(self.data.NLAG)]) + color_array=['r','k','g','b','c','m','y','orange','steelblue','purple','peru','darksalmon','grey','limegreen','olive','midnightblue'] + + + for n, ax in enumerate(self.axes): + + self.xmin=30 + self.xmax=70 + #print(self.x[0,12:15,n]) + #input() + #self.xmin=numpy.min(numpy.concatenate((self.x[0,:,n],self.x[1,:,n]))) + #self.xmax=numpy.max(numpy.concatenate((self.x[0,:,n],self.x[1,:,n]))) + + #print("before",self.plotline_array) + + if ax.firsttime: + + self.autoxticks=False + + + for i in range(self.data.NLAG): + #print(i) + #print(numpy.shape(self.x)) + self.plotline_array[n,i], = ax.plot(self.x[i,:,n], self.y, color=color_array[i],linewidth=1.0, label=label_array[i]) + #ax.plotline1 = ax.plot(self.x[0,:,n], self.y, color='r',linewidth=2.0, label=label_array[0]) + #ax.plotline2 = ax.plot(self.x[n][1,:,0,0], self.y, color='k',linewidth=2.0, label=label2) + #ax.plotline3 = ax.plot(self.x[n][2,:,0,0], self.y, color='b',linewidth=2.0, label=label3) + #ax.plotline4 = ax.plot(self.x[n][3,:,0,0], self.y, color='m',linewidth=2.0, label=label4) + + + #print(self.plotline_array) + + + + ax.legend(loc='upper right') + ax.set_xlim(self.xmin, self.xmax) + if n==0: + self.titles.append('{} CH0'.format(self.plot_name.upper())) + if n==1: + self.titles.append('{} CH1'.format(self.plot_name.upper())) + + #plt.tight_layout() + + else: + #print(self.plotline_array) + for i in range(self.data.NLAG): + + self.plotline_array[n,i].set_data(self.x[i,:,n],self.y) + + + + #ax.plotline1[0].set_data(self.x[n][0,:,0,0],self.y) + #ax.plotline2[0].set_data(self.x[n][1,:,0,0],self.y) + #ax.plotline3[0].set_data(self.x[n][2,:,0,0],self.y) + #ax.plotline4[0].set_data(self.x[n][3,:,0,0],self.y) + + if n==0: + self.titles.append('{} CH0'.format(self.plot_name.upper())) + if n==1: + self.titles.append('{} CH1'.format(self.plot_name.upper())) + + #plt.tight_layout() + + +class NoiseDPPlot(NoisePlot): + ''' + Plot for noise Double Pulse + ''' + + CODE = 'noisedp' + plot_name = 'Noise' + plot_type = 'scatterbuffer' + + +class XmitWaveformPlot(Plot): + ''' + Plot for xmit waveform + ''' + + CODE = 'xmit' + plot_name = 'Xmit Waveform' + plot_type = 'scatterbuffer' + + + def setup(self): + + self.ncols = 1 + self.nrows = 1 + self.nplots = 1 + self.ylabel = '' + self.xlabel = 'Number of Lag' + self.width = 5.5 + self.height = 3.5 + self.colorbar = False + if not self.titles: + self.titles = self.data.parameters \ + if self.data.parameters else ['{}'.format(self.plot_name.upper())] + + def plot(self): + + self.x = numpy.arange(0,self.data.NLAG,1,'float32') + self.y = self.data['xmit'][:,-1,:] + + self.xmin = 0 + self.xmax = self.data.NLAG-1 + self.ymin = -1.0 + self.ymax = 1.0 + ax = self.axes[0] + + if ax.firsttime: + ax.plotline0=ax.plot(self.x,self.y[0,:],color='blue') + ax.plotline1=ax.plot(self.x,self.y[1,:],color='red') + secax=ax.secondary_xaxis(location=0.5) + secax.xaxis.tick_bottom() + secax.tick_params( labelleft=False, labeltop=False, + labelright=False, labelbottom=False) + + self.xstep_given = 3 + self.ystep_given = .25 + secax.set_xticks(numpy.linspace(self.xmin, self.xmax, 6)) #only works on matplotlib.version>3.2 + + else: + ax.plotline0[0].set_data(self.x,self.y[0,:]) + ax.plotline1[0].set_data(self.x,self.y[1,:]) diff --git a/schainpy/model/io/__init__.py b/schainpy/model/io/__init__.py index f6c6768..05f4a73 100644 --- a/schainpy/model/io/__init__.py +++ b/schainpy/model/io/__init__.py @@ -21,4 +21,9 @@ from .jroIO_mira35c import * from .julIO_param import * from .pxIO_param import * -from .jroIO_simulator import * \ No newline at end of file +from .jroIO_simulator import * + +############DP############ +from .jroIO_dat import * + +############DP############ diff --git a/schainpy/model/io/jroIO_base.py b/schainpy/model/io/jroIO_base.py index 7d17366..d0777f4 100644 --- a/schainpy/model/io/jroIO_base.py +++ b/schainpy/model/io/jroIO_base.py @@ -78,6 +78,7 @@ def isFileInEpoch(filename, startUTSeconds, endUTSeconds): basicHeaderObj = BasicHeader(LOCALTIME) try: + fp = open(filename, 'rb') except IOError: print("The file %s can't be opened" % (filename)) @@ -140,6 +141,7 @@ def isFileInTimeRange(filename, startDate, endDate, startTime, endTime): firstBasicHeaderObj = BasicHeader(LOCALTIME) systemHeaderObj = SystemHeader() + radarControllerHeaderObj = RadarControllerHeader() processingHeaderObj = ProcessingHeader() @@ -384,7 +386,7 @@ def isRadarFolder(folder): def isRadarFile(file): - try: + try: year = int(file[1:5]) doy = int(file[5:8]) set = int(file[8:11]) @@ -395,10 +397,10 @@ def isRadarFile(file): def getDateFromRadarFile(file): - try: + try: year = int(file[1:5]) doy = int(file[5:8]) - set = int(file[8:11]) + set = int(file[8:11]) except: return None @@ -417,7 +419,7 @@ def getDateFromRadarFolder(folder): return thisDate def parse_format(s, fmt): - + for i in range(fmt.count('%')): x = fmt.index('%') d = DT_DIRECTIVES[fmt[x:x+2]] @@ -484,7 +486,7 @@ class Reader(object): def run(self): - raise NotImplementedError + raise NotImplementedError def getAllowedArgs(self): if hasattr(self, '__attrs__'): @@ -496,19 +498,19 @@ class Reader(object): for key, value in kwargs.items(): setattr(self, key, value) - + def find_folders(self, path, startDate, endDate, folderfmt, last=False): - folders = [x for f in path.split(',') + folders = [x for f in path.split(',') for x in os.listdir(f) if os.path.isdir(os.path.join(f, x))] folders.sort() if last: folders = [folders[-1]] - for folder in folders: - try: - dt = datetime.datetime.strptime(parse_format(folder, folderfmt), folderfmt).date() + for folder in folders: + try: + dt = datetime.datetime.strptime(parse_format(folder, folderfmt), folderfmt).date() if dt >= startDate and dt <= endDate: yield os.path.join(path, folder) else: @@ -517,38 +519,44 @@ class Reader(object): log.log('Skiping folder {}'.format(folder), self.name) continue return - - def find_files(self, folders, ext, filefmt, startDate=None, endDate=None, + + def find_files(self, folders, ext, filefmt, startDate=None, endDate=None, expLabel='', last=False): - - for path in folders: + + for path in folders: files = glob.glob1(path, '*{}'.format(ext)) files.sort() if last: - if files: + if files: fo = files[-1] - try: + try: dt = datetime.datetime.strptime(parse_format(fo, filefmt), filefmt).date() - yield os.path.join(path, expLabel, fo) - except Exception as e: + yield os.path.join(path, expLabel, fo) + except Exception as e: pass return else: return for fo in files: - try: - dt = datetime.datetime.strptime(parse_format(fo, filefmt), filefmt).date() + try: + dt = datetime.datetime.strptime(parse_format(fo, filefmt), filefmt).date() + #print(dt) + #print(startDate) + #print(endDate) if dt >= startDate and dt <= endDate: + yield os.path.join(path, expLabel, fo) + else: + log.log('Skiping file {}'.format(fo), self.name) except Exception as e: log.log('Skiping file {}'.format(fo), self.name) - continue + continue def searchFilesOffLine(self, path, startDate, endDate, - expLabel, ext, walk, + expLabel, ext, walk, filefmt, folderfmt): """Search files in offline mode for the given arguments @@ -561,12 +569,12 @@ class Reader(object): path, startDate, endDate, folderfmt) else: folders = path.split(',') - + return self.find_files( - folders, ext, filefmt, startDate, endDate, expLabel) + folders, ext, filefmt, startDate, endDate, expLabel) def searchFilesOnLine(self, path, startDate, endDate, - expLabel, ext, walk, + expLabel, ext, walk, filefmt, folderfmt): """Search for the last file of the last folder @@ -579,40 +587,54 @@ class Reader(object): Return: generator with the full path of last filename """ - + if walk: folders = self.find_folders( path, startDate, endDate, folderfmt, last=True) else: folders = path.split(',') - + return self.find_files( folders, ext, filefmt, startDate, endDate, expLabel, last=True) def setNextFile(self): """Set the next file to be readed open it and parse de file header""" + #print("fp: ",self.fp) while True: + + #print(self.fp) if self.fp != None: - self.fp.close() + self.fp.close() + #print("setNextFile") + #print("BEFORE OPENING",self.filename) if self.online: newFile = self.setNextFileOnline() + else: + newFile = self.setNextFileOffline() - + + #print("newFile: ",newFile) if not(newFile): + if self.online: raise schainpy.admin.SchainError('Time to wait for new files reach') else: if self.fileIndex == -1: + #print("OKK") raise schainpy.admin.SchainWarning('No files found in the given path') else: + raise schainpy.admin.SchainWarning('No more files to read') - + if self.verifyFile(self.filename): + break - + + ##print("BEFORE OPENING",self.filename) + log.log('Opening file: %s' % self.filename, self.name) self.readFirstHeader() @@ -625,15 +647,16 @@ class Reader(object): self.filename self.fp self.filesize - + Return: boolean """ + nextFile = True nextDay = False - for nFiles in range(self.nFiles+1): + for nFiles in range(self.nFiles+1): for nTries in range(self.nTries): fullfilename, filename = self.checkForRealPath(nextFile, nextDay) if fullfilename is not None: @@ -643,18 +666,18 @@ class Reader(object): self.name) time.sleep(self.delay) nextFile = False - continue - + continue + if fullfilename is not None: break - - self.nTries = 1 - nextFile = True + + #self.nTries = 1 + nextFile = True if nFiles == (self.nFiles - 1): log.log('Trying with next day...', self.name) nextDay = True - self.nTries = 3 + self.nTries = 3 if fullfilename: self.fileSize = os.path.getsize(fullfilename) @@ -662,45 +685,48 @@ class Reader(object): self.flagIsNewFile = 1 if self.fp != None: self.fp.close() + #print(fullfilename) self.fp = self.open_file(fullfilename, self.open_mode) + self.flagNoMoreFiles = 0 self.fileIndex += 1 return 1 - else: + else: return 0 - + def setNextFileOffline(self): """Open the next file to be readed in offline mode""" - + try: filename = next(self.filenameList) self.fileIndex +=1 except StopIteration: self.flagNoMoreFiles = 1 - return 0 - + return 0 + #print(self.fileIndex) + #print(filename) self.filename = filename self.fileSize = os.path.getsize(filename) self.fp = self.open_file(filename, self.open_mode) self.flagIsNewFile = 1 return 1 - + @staticmethod def isDateTimeInRange(dt, startDate, endDate, startTime, endTime): """Check if the given datetime is in range""" - + if startDate <= dt.date() <= endDate: if startTime <= dt.time() <= endTime: return True return False - + def verifyFile(self, filename): """Check for a valid file - + Arguments: filename -- full path filename - + Return: boolean """ @@ -711,10 +737,11 @@ class Reader(object): """Check if the next file to be readed exists""" raise NotImplementedError - + def readFirstHeader(self): """Parse the file header""" + pass def waitDataBlock(self, pointer_location, blocksize=None): @@ -783,8 +810,8 @@ class JRODataReader(Reader): Return: str -- fullpath of the file """ - - + + if nextFile: self.set += 1 if nextDay: @@ -796,7 +823,15 @@ class JRODataReader(Reader): prefixFileList = ['d', 'D'] elif self.ext.lower() == ".pdata": # spectra prefixFileList = ['p', 'P'] - + + ##############DP############## + + elif self.ext.lower() == ".dat": # dat + prefixFileList = ['z', 'Z'] + + + + ##############DP############## # barrido por las combinaciones posibles for prefixDir in prefixDirList: thispath = self.path @@ -816,9 +851,9 @@ class JRODataReader(Reader): if os.path.exists(fullfilename): return fullfilename, filename - - return None, filename - + + return None, filename + def __waitNewBlock(self): """ Return 1 si se encontro un nuevo bloque de datos, 0 de otra forma. @@ -853,6 +888,7 @@ class JRODataReader(Reader): return 0 print("[Reading] Waiting %0.2f seconds for the next block, try %03d ..." % (self.delay, nTries + 1)) + #print(self.filename) time.sleep(self.delay) return 0 @@ -860,9 +896,9 @@ class JRODataReader(Reader): def __setNewBlock(self): if self.fp == None: - return 0 - - if self.flagIsNewFile: + return 0 + + if self.flagIsNewFile: self.lastUTTime = self.basicHeaderObj.utc return 1 @@ -875,12 +911,12 @@ class JRODataReader(Reader): currentSize = self.fileSize - self.fp.tell() neededSize = self.processingHeaderObj.blockSize + self.basicHeaderSize - + if (currentSize >= neededSize): self.basicHeaderObj.read(self.fp) self.lastUTTime = self.basicHeaderObj.utc return 1 - + if self.__waitNewBlock(): self.lastUTTime = self.basicHeaderObj.utc return 1 @@ -921,6 +957,10 @@ class JRODataReader(Reader): print("[Reading] Block No. %d/%d -> %s" % (self.nReadBlocks, self.processingHeaderObj.dataBlocksPerFile, self.dataOut.datatime.ctime())) + #################DP################# + self.dataOut.TimeBlockDate=self.dataOut.datatime.ctime() + self.dataOut.TimeBlockSeconds=time.mktime(time.strptime(self.dataOut.datatime.ctime())) + #################DP################# return 1 def readFirstHeader(self): @@ -966,10 +1006,10 @@ class JRODataReader(Reader): except IOError: log.error("File {} can't be opened".format(filename), self.name) return False - + if self.online and self.waitDataBlock(0): pass - + basicHeaderObj = BasicHeader(LOCALTIME) systemHeaderObj = SystemHeader() radarControllerHeaderObj = RadarControllerHeader() @@ -996,7 +1036,7 @@ class JRODataReader(Reader): dt2 = basicHeaderObj.datatime if not self.isDateTimeInRange(dt1, self.startDate, self.endDate, self.startTime, self.endTime) and not \ self.isDateTimeInRange(dt2, self.startDate, self.endDate, self.startTime, self.endTime): - flag = False + flag = False fp.close() return flag @@ -1105,11 +1145,11 @@ class JRODataReader(Reader): return dateList def setup(self, **kwargs): - + self.set_kwargs(**kwargs) if not self.ext.startswith('.'): self.ext = '.{}'.format(self.ext) - + if self.server is not None: if 'tcp://' in self.server: address = server @@ -1131,36 +1171,36 @@ class JRODataReader(Reader): for nTries in range(self.nTries): fullpath = self.searchFilesOnLine(self.path, self.startDate, - self.endDate, self.expLabel, self.ext, self.walk, + self.endDate, self.expLabel, self.ext, self.walk, self.filefmt, self.folderfmt) try: fullpath = next(fullpath) except: fullpath = None - + if fullpath: break log.warning( 'Waiting {} sec for a valid file in {}: try {} ...'.format( - self.delay, self.path, nTries + 1), + self.delay, self.path, nTries + 1), self.name) time.sleep(self.delay) if not(fullpath): raise schainpy.admin.SchainError( - 'There isn\'t any valid file in {}'.format(self.path)) + 'There isn\'t any valid file in {}'.format(self.path)) pathname, filename = os.path.split(fullpath) self.year = int(filename[1:5]) self.doy = int(filename[5:8]) - self.set = int(filename[8:11]) - 1 + self.set = int(filename[8:11]) - 1 else: log.log("Searching files in {}".format(self.path), self.name) - self.filenameList = self.searchFilesOffLine(self.path, self.startDate, + self.filenameList = self.searchFilesOffLine(self.path, self.startDate, self.endDate, self.expLabel, self.ext, self.walk, self.filefmt, self.folderfmt) - + self.setNextFile() return @@ -1181,7 +1221,7 @@ class JRODataReader(Reader): self.dataOut.useLocalTime = self.basicHeaderObj.useLocalTime self.dataOut.ippSeconds = self.radarControllerHeaderObj.ippSeconds / self.nTxs - + def getFirstHeader(self): raise NotImplementedError @@ -1214,8 +1254,8 @@ class JRODataReader(Reader): """ Arguments: - path : - startDate : + path : + startDate : endDate : startTime : endTime : @@ -1284,7 +1324,7 @@ class JRODataWriter(Reader): dtype_width = get_dtype_width(dtype_index) return dtype_width - + def getProcessFlags(self): processFlags = 0 @@ -1322,9 +1362,9 @@ class JRODataWriter(Reader): self.basicHeaderObj.size = self.basicHeaderSize # bytes self.basicHeaderObj.version = self.versionFile - self.basicHeaderObj.dataBlock = self.nTotalBlocks + self.basicHeaderObj.dataBlock = self.nTotalBlocks utc = numpy.floor(self.dataOut.utctime) - milisecond = (self.dataOut.utctime - utc) * 1000.0 + milisecond = (self.dataOut.utctime - utc) * 1000.0 self.basicHeaderObj.utc = utc self.basicHeaderObj.miliSecond = milisecond self.basicHeaderObj.timeZone = self.dataOut.timeZone @@ -1465,9 +1505,9 @@ class JRODataWriter(Reader): if self.dataOut.datatime.date() > self.fileDate: setFile = 0 self.nTotalBlocks = 0 - + filen = '{}{:04d}{:03d}{:03d}{}'.format( - self.optchar, timeTuple.tm_year, timeTuple.tm_yday, setFile, ext) + self.optchar, timeTuple.tm_year, timeTuple.tm_yday, setFile, ext) filename = os.path.join(path, subfolder, filen) @@ -1515,11 +1555,11 @@ class JRODataWriter(Reader): self.ext = ext.lower() self.path = path - + if set is None: self.setFile = -1 else: - self.setFile = set - 1 + self.setFile = set - 1 self.blocksPerFile = blocksPerFile self.profilesPerBlock = profilesPerBlock diff --git a/schainpy/model/io/jroIO_dat.py b/schainpy/model/io/jroIO_dat.py new file mode 100644 index 0000000..d86bd39 --- /dev/null +++ b/schainpy/model/io/jroIO_dat.py @@ -0,0 +1,683 @@ +''' +Created on Jun 9, 2020 + +@author: Roberto Flores +''' + +import os +import sys +import time + +import struct + + +import datetime + +import numpy + + +import schainpy.admin +from schainpy.model.io.jroIO_base import LOCALTIME, Reader +from schainpy.model.data.jroheaderIO import BasicHeader, SystemHeader, RadarControllerHeader, ProcessingHeader +from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation, MPDecorator +from schainpy.model.data.jrodata import Voltage, Parameters +from schainpy.utils import log + + +class DatReader(Reader, ProcessingUnit): + + def __init__(self): + + ProcessingUnit.__init__(self) + self.basicHeaderObj = BasicHeader(LOCALTIME) + self.systemHeaderObj = SystemHeader() + self.radarControllerHeaderObj = RadarControllerHeader() + self.processingHeaderObj = ProcessingHeader() + self.dataOut = Parameters() + #print(self.basicHeaderObj.timezone) + #self.counter_block=0 + self.format='dat' + self.flagNoMoreFiles = 0 + self.filename = None + self.intervals = set() + #self.datatime = datetime.datetime(1900,1,1) + + self.filefmt = "***%Y%m%d*******" + + self.padding=numpy.zeros(1,'int32') + self.hsize=numpy.zeros(1,'int32') + self.bufsize=numpy.zeros(1,'int32') + self.nr=numpy.zeros(1,'int32') + self.ngates=numpy.zeros(1,'int32') ### ### ### 2 + self.time1=numpy.zeros(1,'uint64') # pos 3 + self.time2=numpy.zeros(1,'uint64') # pos 4 + self.lcounter=numpy.zeros(1,'int32') + self.groups=numpy.zeros(1,'int32') + self.system=numpy.zeros(4,'int8') # pos 7 + self.h0=numpy.zeros(1,'float32') + self.dh=numpy.zeros(1,'float32') + self.ipp=numpy.zeros(1,'float32') + self.process=numpy.zeros(1,'int32') + self.tx=numpy.zeros(1,'int32') + + self.ngates1=numpy.zeros(1,'int32') ### ### ### 13 + self.time0=numpy.zeros(1,'uint64') # pos 14 + self.nlags=numpy.zeros(1,'int32') + self.nlags1=numpy.zeros(1,'int32') + self.txb=numpy.zeros(1,'float32') ### ### ### 17 + self.time3=numpy.zeros(1,'uint64') # pos 18 + self.time4=numpy.zeros(1,'uint64') # pos 19 + self.h0_=numpy.zeros(1,'float32') + self.dh_=numpy.zeros(1,'float32') + self.ipp_=numpy.zeros(1,'float32') + self.txa_=numpy.zeros(1,'float32') + + self.pad=numpy.zeros(100,'int32') + + self.nbytes=numpy.zeros(1,'int32') + self.limits=numpy.zeros(1,'int32') + self.ngroups=numpy.zeros(1,'int32') ### ### ### 27 + #Make the header list + #header=[hsize,bufsize,nr,ngates,time1,time2,lcounter,groups,system,h0,dh,ipp,process,tx,padding,ngates1,time0,nlags,nlags1,padding,txb,time3,time4,h0_,dh_,ipp_,txa_,pad,nbytes,limits,padding,ngroups] + self.header=[self.hsize,self.bufsize,self.nr,self.ngates,self.time1,self.time2,self.lcounter,self.groups,self.system,self.h0,self.dh,self.ipp,self.process,self.tx,self.ngates1,self.padding,self.time0,self.nlags,self.nlags1,self.padding,self.txb,self.time3,self.time4,self.h0_,self.dh_,self.ipp_,self.txa_,self.pad,self.nbytes,self.limits,self.padding,self.ngroups] + + + + def setup(self, **kwargs): + + self.set_kwargs(**kwargs) + + + if self.path is None: + raise ValueError('The path is not valid') + + self.open_file = open + self.open_mode = 'rb' + + + + if self.format is None: + raise ValueError('The format is not valid') + elif self.format.lower() in ('dat'): + self.ext = '.dat' + elif self.format.lower() in ('out'): + self.ext = '.out' + + + log.log("Searching files in {}".format(self.path), self.name) + self.filenameList = self.searchFilesOffLine(self.path, self.startDate, + self.endDate, self.expLabel, self.ext, self.walk, self.filefmt, self.folderfmt) + #print(self.path) + #print(self.filenameList) + #input() + + + self.setNextFile() + + def readFirstHeader(self): + '''Read header and data''' + + #self.flag_same_file=1 + self.counter_block=0 + self.parseHeader() + self.parseData() + self.blockIndex = 0 + + return + + def parseHeader(self): + ''' + ''' + + for i in range(len(self.header)): + for j in range(len(self.header[i])): + #print("len(header[i]) ",len(header[i])) + #input() + temp=self.fp.read(int(self.header[i].itemsize)) + if isinstance(self.header[i][0], numpy.int32): + #print(struct.unpack('i', temp)[0]) + self.header[i][0]=struct.unpack('i', temp)[0] + if isinstance(self.header[i][0], numpy.uint64): + self.header[i][0]=struct.unpack('q', temp)[0] + if isinstance(self.header[i][0], numpy.int8): + self.header[i][0]=struct.unpack('B', temp)[0] + if isinstance(self.header[i][0], numpy.float32): + self.header[i][0]=struct.unpack('f', temp)[0] + + self.fp.seek(0,0) + if int(self.header[1][0])==int(81864): + self.experiment='DP' + + elif int(self.header[1][0])==int(185504): + self.experiment='HP' + + + self.total_blocks=os.stat(self.filename).st_size//self.header[1][0] + + + def parseData(self): + ''' + ''' + if self.experiment=='DP': + self.header[15][0]=66 + self.header[18][0]=16 + self.header[17][0]=11 + self.header[2][0]=2 + + + self.noise=numpy.zeros(self.header[2][0],'float32') #self.header[2][0] + #tmpx=numpy.zeros((self.header[15][0],self.header[17][0],2),'float32') + self.kax=numpy.zeros((self.header[15][0],self.header[17][0],2),'float32') + self.kay=numpy.zeros((self.header[15][0],self.header[17][0],2),'float32') + self.kbx=numpy.zeros((self.header[15][0],self.header[17][0],2),'float32') + self.kby=numpy.zeros((self.header[15][0],self.header[17][0],2),'float32') + self.kax2=numpy.zeros((self.header[15][0],self.header[17][0],2),'float32') + self.kay2=numpy.zeros((self.header[15][0],self.header[17][0],2),'float32') + self.kbx2=numpy.zeros((self.header[15][0],self.header[17][0],2),'float32') + self.kby2=numpy.zeros((self.header[15][0],self.header[17][0],2),'float32') + self.kaxbx=numpy.zeros((self.header[15][0],self.header[17][0],2),'float32') + self.kaxby=numpy.zeros((self.header[15][0],self.header[17][0],2),'float32') + self.kaybx=numpy.zeros((self.header[15][0],self.header[17][0],2),'float32') + self.kayby=numpy.zeros((self.header[15][0],self.header[17][0],2),'float32') + self.kaxay=numpy.zeros((self.header[15][0],self.header[17][0],2),'float32') + self.kbxby=numpy.zeros((self.header[15][0],self.header[17][0],2),'float32') + self.output_LP_real=numpy.zeros((self.header[18][0],200,self.header[2][0]),'float32') + self.output_LP_imag=numpy.zeros((self.header[18][0],200,self.header[2][0]),'float32') + self.final_cross_products=[self.kax,self.kay,self.kbx,self.kby,self.kax2,self.kay2,self.kbx2,self.kby2,self.kaxbx,self.kaxby,self.kaybx,self.kayby,self.kaxay,self.kbxby] + #self.final_cross_products=[tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx] + + #print("pos: ",self.fp.tell()) + + + def readNextBlock(self): + + while True: + self.flagDiscontinuousBlock = 0 + #print(os.stat(self.filename).st_size) + #print(os.stat(self.filename).st_size//self.header[1][0]) + #os.stat(self.fp) + if self.counter_block == self.total_blocks: + + self.setNextFile() + + self.readBlock() + #self.counter_block+=1 + + if (self.datatime < datetime.datetime.combine(self.startDate, self.startTime)) or \ + (self.datatime > datetime.datetime.combine(self.endDate, self.endTime)): + + #print(self.datatime) + #print(datetime.datetime.combine(self.startDate, self.startTime)) + #print(datetime.datetime.combine(self.endDate, self.endTime)) + #print("warning") + log.warning( + 'Reading Block No. {}/{} -> {} [Skipping]'.format( + self.counter_block, + self.total_blocks, + self.datatime.ctime()), + 'DATReader') + continue + break + + log.log( + 'Reading Block No. {}/{} -> {}'.format( + self.counter_block, + self.total_blocks, + self.datatime.ctime()), + 'DATReader') + + return 1 + + def readBlock(self): + ''' + ''' + + self.npos=self.counter_block*self.header[1][0] + #print(self.counter_block) + self.fp.seek(self.npos, 0) + self.counter_block+=1 + #print("fpos1: ",self.fp.tell()) + + self.read_header() + + #put by hand because old files didn't save it in the header + if self.experiment=='DP': + self.header[15][0]=66 + self.header[18][0]=16 + self.header[17][0]=11 + self.header[2][0]=2 + ######################################### + + if self.experiment=="HP": + self.long_pulse_products() + + self.read_cross_products() + + + self.read_noise() + + + return + + + + def read_header(self): + + + for i in range(len(self.header)): + for j in range(len(self.header[i])): + #print("len(header[i]) ",len(header[i])) + #input() + temp=self.fp.read(int(self.header[i].itemsize)) + #if(b''==temp): + # self.setNextFile() + # self.flag_same_file=0 + if isinstance(self.header[i][0], numpy.int32): + #print(struct.unpack('i', temp)[0]) + self.header[i][0]=struct.unpack('i', temp)[0] + if isinstance(self.header[i][0], numpy.uint64): + self.header[i][0]=struct.unpack('q', temp)[0] + if isinstance(self.header[i][0], numpy.int8): + self.header[i][0]=struct.unpack('B', temp)[0] + if isinstance(self.header[i][0], numpy.float32): + self.header[i][0]=struct.unpack('f', temp)[0] + #else: + # continue + #self.fp.seek(self.npos_aux, 0) + # break + + #print("fpos2: ",self.fp.tell()) + #log.success('Parameters found: {}'.format(self.parameters), + # 'DATReader') + #print("Success") + #self.TimeBlockSeconds_for_dp_power = self.header[4][0]#-((self.dataOut.nint-1)*self.dataOut.NAVG*2) + #print(dataOut.TimeBlockSeconds_for_dp_power) + + #self.datatime=datetime.datetime.fromtimestamp(self.header[4][0]).strftime("%Y-%m-%d %H:%M:%S") + #print(self.header[4][0]) + self.datatime=datetime.datetime.fromtimestamp(self.header[4][0]) + #print(self.header[1][0]) + + def long_pulse_products(self): + temp=self.fp.read(self.header[18][0]*self.header[2][0]*200*8) + ii=0 + + for l in range(self.header[18][0]): #lag + for r in range(self.header[2][0]): # channels + for k in range(200): #RANGE## generalizar + self.output_LP_real[l,k,r]=struct.unpack('f', temp[ii:ii+4])[0] + ii=ii+4 + self.output_LP_imag[l,k,r]=struct.unpack('f', temp[ii:ii+4])[0] + ii=ii+4 + + #print(self.output_LP_real[1,1,1]) + #print(self.output_LP_imag[1,1,1]) + def read_cross_products(self): + + for ind in range(len(self.final_cross_products)): #final cross products + temp=self.fp.read(self.header[17][0]*2*self.header[15][0]*4) #*4 bytes + #if(b''==temp): + # self.setNextFile() + # self.flag_same_file=0 + ii=0 + #print("kabxys.shape ",kabxys.shape) + #print(kabxys) + #print("fpos3: ",self.fp.tell()) + for l in range(self.header[17][0]): #lag + #print("fpos3: ",self.fp.tell()) + for fl in range(2): # unflip and flip + for k in range(self.header[15][0]): #RANGE + #print("fpos3: ",self.fp.tell()) + self.final_cross_products[ind][k,l,fl]=struct.unpack('f', temp[ii:ii+4])[0] + ii=ii+4 + #print("fpos2: ",self.fp.tell()) + + + + def read_noise(self): + + temp=self.fp.read(self.header[2][0]*4) #*4 bytes self.header[2][0] + for ii in range(self.header[2][0]): #self.header[2][0] + self.noise[ii]=struct.unpack('f', temp[ii*4:(ii+1)*4])[0] + + #print("fpos5: ",self.fp.tell()) + + + + def set_output(self): + ''' + Storing data from buffer to dataOut object + ''' + #print("fpos2: ",self.fp.tell()) + ##self.dataOut.header = self.header + #this is put by hand because it isn't saved in the header + if self.experiment=='DP': + self.dataOut.NRANGE=0 + self.dataOut.NSCAN=132 + self.dataOut.heightList=self.header[10][0]*(numpy.arange(self.header[15][0])) + elif self.experiment=='HP': + self.dataOut.output_LP=self.output_LP_real+1.j*self.output_LP_imag + self.dataOut.NRANGE=200 + self.dataOut.NSCAN=128 + self.dataOut.heightList=self.header[10][0]*(numpy.arange(90)) #NEEEDS TO BE GENERALIZED + ######################################### + #print(self.dataOut.output_LP[1,1,1]) + self.dataOut.MAXNRANGENDT=self.header[3][0] + self.dataOut.NDP=self.header[15][0] + self.dataOut.DPL=self.header[17][0] + self.dataOut.DH=self.header[10][0] + self.dataOut.NAVG=self.header[7][0] + self.dataOut.H0=self.header[9][0] + self.dataOut.NR=self.header[2][0] + self.dataOut.NLAG=self.header[18][0] + #self.dataOut.tmpx=self.tmpx + #self.dataOut.timeZone = 5 + #self.dataOut.final_cross_products=self.final_cross_products + self.dataOut.kax=self.kax + #print(self.dataOut.kax[1,1,1]) + self.dataOut.kay=self.kay + self.dataOut.kbx=self.kbx + self.dataOut.kby=self.kby + self.dataOut.kax2=self.kax2 + self.dataOut.kay2=self.kay2 + self.dataOut.kbx2=self.kbx2 + self.dataOut.kby2=self.kby2 + self.dataOut.kaxbx=self.kaxbx + self.dataOut.kaxby=self.kaxby + self.dataOut.kaybx=self.kaybx + self.dataOut.kayby=self.kayby + self.dataOut.kaxay=self.kaxay + self.dataOut.kbxby=self.kbxby + self.dataOut.noise_final=self.noise + #print("NOISE",self.noise) + + + self.dataOut.useLocalTime=True + + #self.dataOut.experiment=self.experiment + #print(self.datatime) + #print(self.dataOut.datatime) + + + #self.dataOut.utctime = (self.datatime - datetime.datetime(1970, 1, 1)).total_seconds() + #self.dataOut.utctimeInit = self.dataOut.utctime + + + + self.dataOut.lt=self.datatime.hour + + + #print(RadarControllerHeader().ippSeconds) + #print(RadarControllerHeader().ipp) + #self.dataOut.utctime=time.gmtime(self.header[4][0])- datetime.datetime(1970, 1, 1) + #self.dataOut.utctime=self.dataOut.utctime.total_seconds() + #time1 = self.header[4][0] # header.time1 + #print("time1: ",time1) + #print(self.header[4][0]) + #date = time.ctime(time1) + #print("DADSADA",time.strptime(date)) + #print("date_before: ",date) + #bd_time=time.gmtime(time1) + #print(time.mktime(bd_time)) + #self.dataOut.utctime=time.mktime(bd_time) + self.dataOut.utctime = self.header[4][0] + #self.dataOut.datatime=a + #print(datetime.datetime.utcfromtimestamp(self.dataOut.utctime)) + #self.dataOut.TimeBlockDate=self.datatime.ctime() + self.dataOut.TimeBlockSeconds=time.mktime(time.strptime(self.dataOut.datatime.ctime())) + + #self.dataOut.heightList = self.ranges + #self.dataOut.utctime = (self.datatime - datetime.datetime(1970, 1, 1)).total_seconds() + #self.dataOut.utctimeInit = self.dataOut.utctime + #self.dataOut.paramInterval = min(self.intervals) + #self.dataOut.useLocalTime = False + self.dataOut.flagNoData = False + self.dataOut.flagDiscontinuousBlock = self.flagDiscontinuousBlock + #print(self.dataOut.channelIndexList) + self.dataOut.channelList=list(range(0,self.header[2][0])) + #print(self.dataOut.channelList) + #print(self.datatime) + #print(self.dataOut.final_cross_products[0]) + + + #self.dataOut.heightList=self.header[10][0]*(numpy.arange(self.header[15][0])) + + #print(numpy.shape(self.dataOut.heightList)) + + + def getData(self): + ''' + Storing data from databuffer to dataOut object + ''' + + if not self.readNextBlock(): + self.dataOut.flagNoData = True + return 0 + + self.set_output() + + return 1 + + def run(self, **kwargs): + + if not(self.isConfig): + self.setup(**kwargs) + self.isConfig = True + #print("fpos1: ",self.fp.tell()) + self.getData() + + return + +@MPDecorator +class DatWriter(Operation): + + + def __init__(self): + + Operation.__init__(self) + #self.dataOut = Voltage() + self.counter = 0 + self.path = None + self.fp = None + return + #self.ext= '.dat' + + def run(self, dataOut, path, format='dat', experiment=None, **kwargs): + print(dataOut.flagNoData) + print(dataOut.datatime.ctime()) + print(dataOut.TimeBlockDate) + input() + #if dataOut.flag_save: + self.experiment=experiment + self.path=path + if self.experiment=='DP': + dataOut.header[1][0]=81864 + elif self.experiment=='HP': + dataOut.header[1][0]=185504#173216 + #dataOut.header[1][0]=bufsize + self.dataOut = dataOut + #print(self.dataOut.nint) + #self.bufsize=bufsize + if format == 'dat': + self.ext = '.dat' + if format == 'out': + self.ext = '.out' + self.putData() + + return + + + + def setFile(self): + ''' + Create new out file object + ''' + + #self.dataOut.TimeBlockSeconds=time.mktime(time.strptime(self.dataOut.TimeBlockDate)) + date = datetime.datetime.fromtimestamp(self.dataOut.TimeBlockSeconds) + + #print("date",date) + + filename = '{}{}{}'.format('jro', + date.strftime('%Y%m%d_%H%M%S'), + self.ext) + #print(filename) + #print(self.path) + + self.fullname = os.path.join(self.path, filename) + + if os.path.isfile(self.fullname) : + log.warning( + 'Destination file {} already exists, previous file deleted.'.format( + self.fullname), + 'DatWriter') + os.remove(self.fullname) + + try: + log.success( + 'Creating file: {}'.format(self.fullname), + 'DatWriter') + if not os.path.exists(self.path): + os.makedirs(self.path) + #self.fp = madrigal.cedar.MadrigalCedarFile(self.fullname, True) + self.fp = open(self.fullname,'wb') + + except ValueError as e: + log.error( + 'Impossible to create *.out file', + 'DatWriter') + return + + return 1 + + def writeBlock(self): + + #self.dataOut.paramInterval=2 + #startTime = datetime.datetime.utcfromtimestamp(self.dataOut.utctime) + #print(startTime) + #endTime = startTime + datetime.timedelta(seconds=self.dataOut.paramInterval) + + self.dataOut.header[0].astype('int32').tofile(self.fp) + self.dataOut.header[1].astype('int32').tofile(self.fp) + self.dataOut.header[2].astype('int32').tofile(self.fp) + self.dataOut.header[3].astype('int32').tofile(self.fp) + self.dataOut.header[4].astype('uint64').tofile(self.fp) + self.dataOut.header[5].astype('uint64').tofile(self.fp) + self.dataOut.header[6].astype('int32').tofile(self.fp) + self.dataOut.header[7].astype('int32').tofile(self.fp) + #print(dataOut.header[7]) + self.dataOut.header[8].astype('int8').tofile(self.fp) + self.dataOut.header[9].astype('float32').tofile(self.fp) + self.dataOut.header[10].astype('float32').tofile(self.fp) + self.dataOut.header[11].astype('float32').tofile(self.fp) + self.dataOut.header[12].astype('int32').tofile(self.fp) + self.dataOut.header[13].astype('int32').tofile(self.fp) + self.dataOut.header[14].astype('int32').tofile(self.fp) + self.dataOut.header[15].astype('int32').tofile(self.fp) + self.dataOut.header[16].astype('uint64').tofile(self.fp) + self.dataOut.header[17].astype('int32').tofile(self.fp) + self.dataOut.header[18].astype('int32').tofile(self.fp) + self.dataOut.header[19].astype('int32').tofile(self.fp) + self.dataOut.header[20].astype('float32').tofile(self.fp) + self.dataOut.header[21].astype('uint64').tofile(self.fp) + self.dataOut.header[22].astype('uint64').tofile(self.fp) + self.dataOut.header[23].astype('float32').tofile(self.fp) + self.dataOut.header[24].astype('float32').tofile(self.fp) + self.dataOut.header[25].astype('float32').tofile(self.fp) + self.dataOut.header[26].astype('float32').tofile(self.fp) + self.dataOut.header[27].astype('int32').tofile(self.fp) + self.dataOut.header[28].astype('int32').tofile(self.fp) + self.dataOut.header[29].astype('int32').tofile(self.fp) + self.dataOut.header[30].astype('int32').tofile(self.fp) + self.dataOut.header[31].astype('int32').tofile(self.fp) + #print("tell before 1 ",self.fp.tell()) + #input() + + if self.experiment=="HP": + #print("INSIDE") + #tmp=numpy.zeros(1,dtype='complex64') + #print("tmp ",tmp) + #input() + #print(dataOut.NLAG) + #print(dataOut.NR) + #print(dataOut.NRANGE) + for l in range(self.dataOut.NLAG): #lag + for r in range(self.dataOut.NR): # unflip and flip + for k in range(self.dataOut.NRANGE): #RANGE + self.dataOut.output_LP.real[l,k,r].astype('float32').tofile(self.fp) + self.dataOut.output_LP.imag[l,k,r].astype('float32').tofile(self.fp) + + + #print("tell before 2 ",self.outputfile.tell()) + + + + + + #print(self.dataOut.output_LP[1,1,1]) + + #print(self.dataOut.kax) + final_cross_products=[self.dataOut.kax,self.dataOut.kay,self.dataOut.kbx,self.dataOut.kby, + self.dataOut.kax2,self.dataOut.kay2,self.dataOut.kbx2,self.dataOut.kby2, + self.dataOut.kaxbx,self.dataOut.kaxby,self.dataOut.kaybx,self.dataOut.kayby, + self.dataOut.kaxay,self.dataOut.kbxby] + + #print(self.dataOut.kax) + #print("tell before crossp saving ",self.outputfile.tell()) + for kabxys in final_cross_products: + + for l in range(self.dataOut.DPL): #lag + for fl in range(2): # unflip and flip + for k in range(self.dataOut.NDT): #RANGE + kabxys[k,l,fl].astype('float32').tofile(self.fp) + + + #print("tell before noise saving ",self.outputfile.tell()) + + + for nch in range(self.dataOut.NR): + self.dataOut.noise_final[nch].astype('float32').tofile(self.fp) + + #print("tell before noise saving ",self.fp.tell()) + #input() + + + + + log.log( + 'Writing {} blocks'.format( + self.counter+1), + 'DatWriter') + + + + + + + def putData(self): + #print("flagNoData",self.dataOut.flagNoData) + #print("flagDiscontinuousBlock",self.dataOut.flagDiscontinuousBlock) + #print(self.dataOut.flagNoData) + + if self.dataOut.flagNoData: + return 0 + + if self.dataOut.flagDiscontinuousBlock: + + self.counter = 0 + + if self.counter == 0: + self.setFile() + #if self.experiment=="HP": + #if self.dataOut.debris_activated==0: + #self.writeBlock() + #self.counter += 1 + #else: + self.writeBlock() + self.counter += 1 + + def close(self): + + if self.counter > 0: + self.fp.close() + log.success('Closing file {}'.format(self.fullname), 'DatWriter') diff --git a/schainpy/model/io/jroIO_madrigal.py b/schainpy/model/io/jroIO_madrigal.py index 485428c..2127a11 100644 --- a/schainpy/model/io/jroIO_madrigal.py +++ b/schainpy/model/io/jroIO_madrigal.py @@ -38,7 +38,7 @@ DEF_CATALOG = { 'sciRemarks': '', 'instRemarks': '' } - + DEF_HEADER = { 'kindatDesc': '', 'analyst': 'Jicamarca User', @@ -75,7 +75,7 @@ def load_json(obj): for k, v in list(iterable.items())} elif isinstance(iterable, (list, tuple)): return [str(v) if isinstance(v, basestring) else v for v in iterable] - + return iterable @@ -85,18 +85,18 @@ class MADReader(Reader, ProcessingUnit): ProcessingUnit.__init__(self) - self.dataOut = Parameters() + self.dataOut = Parameters() self.counter_records = 0 self.nrecords = None self.flagNoMoreFiles = 0 - self.filename = None + self.filename = None self.intervals = set() self.datatime = datetime.datetime(1900,1,1) self.format = None self.filefmt = "***%Y%m%d*******" - + def setup(self, **kwargs): - + self.set_kwargs(**kwargs) self.oneDDict = load_json(self.oneDDict) self.twoDDict = load_json(self.twoDDict) @@ -125,32 +125,32 @@ class MADReader(Reader, ProcessingUnit): for nTries in range(self.nTries): fullpath = self.searchFilesOnLine(self.path, self.startDate, - self.endDate, self.expLabel, self.ext, self.walk, + self.endDate, self.expLabel, self.ext, self.walk, self.filefmt, self.folderfmt) try: fullpath = next(fullpath) except: fullpath = None - + if fullpath: break log.warning( 'Waiting {} sec for a valid file in {}: try {} ...'.format( - self.delay, self.path, nTries + 1), + self.delay, self.path, nTries + 1), self.name) time.sleep(self.delay) if not(fullpath): raise schainpy.admin.SchainError( - 'There isn\'t any valid file in {}'.format(self.path)) - + 'There isn\'t any valid file in {}'.format(self.path)) + else: log.log("Searching files in {}".format(self.path), self.name) - self.filenameList = self.searchFilesOffLine(self.path, self.startDate, + self.filenameList = self.searchFilesOffLine(self.path, self.startDate, self.endDate, self.expLabel, self.ext, self.walk, self.filefmt, self.folderfmt) - + self.setNextFile() def readFirstHeader(self): @@ -159,8 +159,8 @@ class MADReader(Reader, ProcessingUnit): self.parseHeader() self.parseData() self.blockIndex = 0 - - return + + return def parseHeader(self): ''' @@ -183,7 +183,7 @@ class MADReader(Reader, ProcessingUnit): if s_parameters: log.success('Spatial parameters found: {}'.format(s_parameters), 'MADReader') - + for param in list(self.oneDDict.keys()): if param.lower() not in self.parameters: log.warning( @@ -191,7 +191,7 @@ class MADReader(Reader, ProcessingUnit): param), 'MADReader') self.oneDDict.pop(param, None) - + for param, value in list(self.twoDDict.items()): if param.lower() not in self.parameters: log.warning( @@ -226,10 +226,10 @@ class MADReader(Reader, ProcessingUnit): while True: self.flagDiscontinuousBlock = 0 if self.counter_records == self.nrecords: - self.setNextFile() + self.setNextFile() self.readBlock() - + if (self.datatime < datetime.datetime.combine(self.startDate, self.startTime)) or \ (self.datatime > datetime.datetime.combine(self.endDate, self.endTime)): log.warning( @@ -268,7 +268,7 @@ class MADReader(Reader, ProcessingUnit): if self.counter_records == self.nrecords: break continue - self.intervals.add((datatime-self.datatime).seconds) + self.intervals.add((datatime-self.datatime).seconds) break elif self.ext == '.hdf5': datatime = datetime.datetime.utcfromtimestamp( @@ -278,27 +278,27 @@ class MADReader(Reader, ProcessingUnit): if datatime.date()>self.datatime.date(): self.flagDiscontinuousBlock = 1 self.datatime = datatime - self.counter_records += 1 - + self.counter_records += 1 + self.buffer = numpy.array(dum) return def set_output(self): ''' Storing data from buffer to dataOut object - ''' + ''' parameters = [None for __ in self.parameters] - for param, attr in list(self.oneDDict.items()): + for param, attr in list(self.oneDDict.items()): x = self.parameters.index(param.lower()) setattr(self.dataOut, attr, self.buffer[0][x]) for param, value in list(self.twoDDict.items()): - dummy = numpy.zeros(self.ranges.shape) + numpy.nan + dummy = numpy.zeros(self.ranges.shape) + numpy.nan if self.ext == '.txt': x = self.parameters.index(param.lower()) - y = self.parameters.index(self.independentParam.lower()) + y = self.parameters.index(self.independentParam.lower()) ranges = self.buffer[:,y] #if self.ranges.size == ranges.size: # continue @@ -308,23 +308,23 @@ class MADReader(Reader, ProcessingUnit): ranges = self.buffer[self.independentParam.lower()] index = numpy.where(numpy.in1d(self.ranges, ranges))[0] dummy[index] = self.buffer[param.lower()] - + if isinstance(value, str): - if value not in self.independentParam: + if value not in self.independentParam: setattr(self.dataOut, value, dummy.reshape(1,-1)) - elif isinstance(value, list): + elif isinstance(value, list): self.output[value[0]][value[1]] = dummy parameters[value[1]] = param for key, value in list(self.output.items()): setattr(self.dataOut, key, numpy.array(value)) - + self.dataOut.parameters = [s for s in parameters if s] self.dataOut.heightList = self.ranges self.dataOut.utctime = (self.datatime - datetime.datetime(1970, 1, 1)).total_seconds() - self.dataOut.utctimeInit = self.dataOut.utctime + self.dataOut.utctimeInit = self.dataOut.utctime self.dataOut.paramInterval = min(self.intervals) - self.dataOut.useLocalTime = False - self.dataOut.flagNoData = False + self.dataOut.useLocalTime = False + self.dataOut.flagNoData = False self.dataOut.nrecords = self.nrecords self.dataOut.flagDiscontinuousBlock = self.flagDiscontinuousBlock @@ -354,7 +354,7 @@ class MADReader(Reader, ProcessingUnit): @MPDecorator class MADWriter(Operation): '''Writing module for Madrigal files - + type: external Inputs: @@ -384,7 +384,7 @@ Inputs: __attrs__ = ['path', 'oneDDict', 'ind2DList', 'twoDDict','metadata', 'format', 'blocks'] missing = -32767 - + def __init__(self): Operation.__init__(self) @@ -395,27 +395,31 @@ Inputs: def run(self, dataOut, path, oneDDict, ind2DList='[]', twoDDict='{}', metadata='{}', format='cedar', **kwargs): - + + + #if dataOut.AUX==1: #Modified + if not self.isConfig: self.setup(path, oneDDict, ind2DList, twoDDict, metadata, format, **kwargs) self.isConfig = True - - self.dataOut = dataOut - self.putData() + + self.dataOut = dataOut + self.putData() + return 1 - + def setup(self, path, oneDDict, ind2DList, twoDDict, metadata, format, **kwargs): ''' - Configure Operation + Configure Operation ''' - + self.path = path self.blocks = kwargs.get('blocks', None) self.counter = 0 self.oneDDict = load_json(oneDDict) self.twoDDict = load_json(twoDDict) self.ind2DList = load_json(ind2DList) - meta = load_json(metadata) + meta = load_json(metadata) self.kinst = meta.get('kinst') self.kindat = meta.get('kindat') self.catalog = meta.get('catalog', DEF_CATALOG) @@ -426,8 +430,8 @@ Inputs: elif format == 'hdf5': self.ext = '.hdf5' self.extra_args = {'ind2DList': self.ind2DList} - - self.keys = [k.lower() for k in self.twoDDict] + + self.keys = [k.lower() for k in self.twoDDict] if 'range' in self.keys: self.keys.remove('range') if 'gdalt' in self.keys: @@ -440,20 +444,23 @@ Inputs: self.mnemonic = MNEMONICS[self.kinst] #TODO get mnemonic from madrigal date = datetime.datetime.utcfromtimestamp(self.dataOut.utctime) + #if self.dataOut.input_dat_type: + #date=datetime.datetime.fromtimestamp(self.dataOut.TimeBlockSeconds_for_dp_power) + #print("date",date) filename = '{}{}{}'.format(self.mnemonic, date.strftime('%Y%m%d_%H%M%S'), self.ext) - + self.fullname = os.path.join(self.path, filename) - - if os.path.isfile(self.fullname) : + + if os.path.isfile(self.fullname) : log.warning( 'Destination file {} already exists, previous file deleted.'.format( self.fullname), 'MADWriter') os.remove(self.fullname) - + try: log.success( 'Creating file: {}'.format(self.fullname), @@ -461,6 +468,8 @@ Inputs: if not os.path.exists(self.path): os.makedirs(self.path) self.fp = madrigal.cedar.MadrigalCedarFile(self.fullname, True) + + except ValueError as e: log.error( 'Impossible to create a cedar object with "madrigal.cedar.MadrigalCedarFile"', @@ -475,11 +484,26 @@ Inputs: attributes. Allowed parameters in: parcodes.tab ''' - + #self.dataOut.paramInterval=2 startTime = datetime.datetime.utcfromtimestamp(self.dataOut.utctime) + endTime = startTime + datetime.timedelta(seconds=self.dataOut.paramInterval) + + #if self.dataOut.input_dat_type: + #if self.dataOut.experiment=="DP": + #startTime=datetime.datetime.fromtimestamp(self.dataOut.TimeBlockSeconds_for_dp_power) + #endTime = startTime + datetime.timedelta(seconds=self.dataOut.paramInterval) + + + #print("2: ",startTime) + #print(endTime) heights = self.dataOut.heightList + #print(self.blocks) + #print(startTime) + #print(endTime) + #print(heights) + #input() if self.ext == '.dat': for key, value in list(self.twoDDict.items()): if isinstance(value, str): @@ -505,13 +529,21 @@ Inputs: out[key] = tmp.flatten()[:len(heights)] elif isinstance(value, (tuple, list)): attr, x = value - data = getattr(self.dataOut, attr) + data = getattr(self.dataOut, attr) + #print(x) + #print(len(heights)) + #print(data[int(x)][:len(heights)]) + #print(numpy.shape(out)) + #print(numpy.shape(data)) + out[key] = data[int(x)][:len(heights)] - + a = numpy.array([out[k] for k in self.keys]) + #print(a) nrows = numpy.array([numpy.isnan(a[:, x]).all() for x in range(len(heights))]) index = numpy.where(nrows == False)[0] + #print(startTime.minute) rec = madrigal.cedar.MadrigalDataRecord( self.kinst, self.kindat, @@ -534,22 +566,24 @@ Inputs: len(index), **self.extra_args ) - - # Setting 1d values + #print("rec",rec) + # Setting 1d values for key in self.oneDDict: rec.set1D(key, getattr(self.dataOut, self.oneDDict[key])) # Setting 2d values nrec = 0 - for n in index: + for n in index: for key in out: rec.set2D(key, nrec, out[key][n]) - nrec += 1 + nrec += 1 self.fp.append(rec) - if self.ext == '.hdf5' and self.counter % 500 == 0 and self.counter > 0: + if self.ext == '.hdf5' and self.counter %2 == 0 and self.counter > 0: + #print("here") self.fp.dump() if self.counter % 20 == 0 and self.counter > 0: + #self.fp.write() log.log( 'Writing {} records'.format( self.counter), @@ -558,8 +592,8 @@ Inputs: def setHeader(self): ''' Create an add catalog and header to cedar file - ''' - + ''' + log.success('Closing file {}'.format(self.fullname), 'MADWriter') if self.ext == '.dat': @@ -567,17 +601,17 @@ Inputs: else: self.fp.dump() self.fp.close() - - header = madrigal.cedar.CatalogHeaderCreator(self.fullname) + + header = madrigal.cedar.CatalogHeaderCreator(self.fullname) header.createCatalog(**self.catalog) header.createHeader(**self.header) header.write() - + def putData(self): if self.dataOut.flagNoData: - return 0 - + return 0 + if self.dataOut.flagDiscontinuousBlock or self.counter == self.blocks: if self.counter > 0: self.setHeader() @@ -585,11 +619,11 @@ Inputs: if self.counter == 0: self.setFile() - + self.writeBlock() - self.counter += 1 - + self.counter += 1 + def close(self): - - if self.counter > 0: - self.setHeader() \ No newline at end of file + + if self.counter > 0: + self.setHeader() diff --git a/schainpy/model/io/jroIO_param.py b/schainpy/model/io/jroIO_param.py index 1eaa55b..b087b64 100644 --- a/schainpy/model/io/jroIO_param.py +++ b/schainpy/model/io/jroIO_param.py @@ -17,7 +17,7 @@ class HDFReader(Reader, ProcessingUnit): This unit reads HDF5 files created with `HDFWriter` operation contains by default two groups Data and Metadata all variables would be saved as `dataOut` - attributes. + attributes. It is possible to read any HDF5 file by given the structure in the `description` parameter, also you can add extra values to metadata with the parameter `extras`. @@ -37,10 +37,10 @@ class HDFReader(Reader, ProcessingUnit): Dictionary with the description of the HDF5 file extras : dict, optional Dictionary with extra metadata to be be added to `dataOut` - + Examples -------- - + desc = { 'Data': { 'data_output': ['u', 'v', 'w'], @@ -64,7 +64,7 @@ class HDFReader(Reader, ProcessingUnit): extras = { 'timeZone': 300 } - + reader = project.addReadUnit( name='HDFReader', path='/path/to/files', @@ -98,42 +98,42 @@ class HDFReader(Reader, ProcessingUnit): self.set_kwargs(**kwargs) if not self.ext.startswith('.'): - self.ext = '.{}'.format(self.ext) + self.ext = '.{}'.format(self.ext) if self.online: log.log("Searching files in online mode...", self.name) for nTries in range(self.nTries): fullpath = self.searchFilesOnLine(self.path, self.startDate, - self.endDate, self.expLabel, self.ext, self.walk, + self.endDate, self.expLabel, self.ext, self.walk, self.filefmt, self.folderfmt) try: fullpath = next(fullpath) except: fullpath = None - + if fullpath: break log.warning( 'Waiting {} sec for a valid file in {}: try {} ...'.format( - self.delay, self.path, nTries + 1), + self.delay, self.path, nTries + 1), self.name) time.sleep(self.delay) if not(fullpath): raise schainpy.admin.SchainError( - 'There isn\'t any valid file in {}'.format(self.path)) + 'There isn\'t any valid file in {}'.format(self.path)) pathname, filename = os.path.split(fullpath) self.year = int(filename[1:5]) self.doy = int(filename[5:8]) - self.set = int(filename[8:11]) - 1 + self.set = int(filename[8:11]) - 1 else: log.log("Searching files in {}".format(self.path), self.name) - self.filenameList = self.searchFilesOffLine(self.path, self.startDate, + self.filenameList = self.searchFilesOffLine(self.path, self.startDate, self.endDate, self.expLabel, self.ext, self.walk, self.filefmt, self.folderfmt) - + self.setNextFile() return @@ -141,18 +141,18 @@ class HDFReader(Reader, ProcessingUnit): def readFirstHeader(self): '''Read metadata and data''' - self.__readMetadata() + self.__readMetadata() self.__readData() self.__setBlockList() - + if 'type' in self.meta: self.dataOut = eval(self.meta['type'])() - + for attr in self.meta: setattr(self.dataOut, attr, self.meta[attr]) - + self.blockIndex = 0 - + return def __setBlockList(self): @@ -200,7 +200,7 @@ class HDFReader(Reader, ProcessingUnit): else: grp = self.fp['Metadata'] for name in grp: - meta[name] = grp[name].value + meta[name] = grp[name].value if self.extras: for key, value in self.extras.items(): @@ -212,7 +212,7 @@ class HDFReader(Reader, ProcessingUnit): def __readData(self): data = {} - + if self.description: for key, value in self.description['Data'].items(): if isinstance(value, str): @@ -240,7 +240,7 @@ class HDFReader(Reader, ProcessingUnit): array = numpy.array(array) else: log.warning('Unknown type: {}'.format(name)) - + if name in self.description: key = self.description[name] else: @@ -249,7 +249,7 @@ class HDFReader(Reader, ProcessingUnit): self.data = data return - + def getData(self): for attr in self.data: @@ -288,8 +288,8 @@ class HDFWriter(Operation): The HDF5 file contains by default two groups Data and Metadata where you can save any `dataOut` attribute specified by `dataList` and `metadataList` parameters, data attributes are normaly time dependent where the metadata - are not. - It is possible to customize the structure of the HDF5 file with the + are not. + It is possible to customize the structure of the HDF5 file with the optional description parameter see the examples. Parameters: @@ -306,10 +306,10 @@ class HDFWriter(Operation): If True the name of the files corresponds to the timestamp of the data description : dict, optional Dictionary with the desired description of the HDF5 file - + Examples -------- - + desc = { 'data_output': {'winds': ['z', 'w', 'v']}, 'utctime': 'timestamps', @@ -329,7 +329,7 @@ class HDFWriter(Operation): 'heightList': 'heights' } } - + writer = proc_unit.addOperation(name='HDFWriter') writer.addParameter(name='path', value='/path/to/file') writer.addParameter(name='blocksPerFile', value='32') @@ -357,7 +357,7 @@ class HDFWriter(Operation): lastTime = None def __init__(self): - + Operation.__init__(self) return @@ -393,7 +393,7 @@ class HDFWriter(Operation): dsDict['shape'] = dataAux.shape dsDict['dsNumber'] = dataAux.shape[0] dsDict['dtype'] = dataAux.dtype - + dsList.append(dsDict) self.dsList = dsList @@ -408,7 +408,7 @@ class HDFWriter(Operation): self.lastTime = currentTime self.currentDay = dataDay return False - + timeDiff = currentTime - self.lastTime #Si el dia es diferente o si la diferencia entre un dato y otro supera la hora @@ -424,10 +424,11 @@ class HDFWriter(Operation): def run(self, dataOut, path, blocksPerFile=10, metadataList=None, dataList=[], setType=None, description={}): - + print("hdf",dataOut.flagNoData) + print(dataOut.datatime.ctime()) self.dataOut = dataOut if not(self.isConfig): - self.setup(path=path, blocksPerFile=blocksPerFile, + self.setup(path=path, blocksPerFile=blocksPerFile, metadataList=metadataList, dataList=dataList, setType=setType, description=description) @@ -436,9 +437,9 @@ class HDFWriter(Operation): self.putData() return - + def setNextFile(self): - + ext = self.ext path = self.path setFile = self.setFile @@ -523,7 +524,7 @@ class HDFWriter(Operation): return 'pair{:02d}'.format(x) else: return 'channel{:02d}'.format(x) - + def writeMetadata(self, fp): if self.description: @@ -548,7 +549,7 @@ class HDFWriter(Operation): return def writeData(self, fp): - + if self.description: if 'Data' in self.description: grp = fp.create_group('Data') @@ -559,13 +560,13 @@ class HDFWriter(Operation): dtsets = [] data = [] - + for dsInfo in self.dsList: if dsInfo['nDim'] == 0: ds = grp.create_dataset( - self.getLabel(dsInfo['variable']), + self.getLabel(dsInfo['variable']), (self.blocksPerFile, ), - chunks=True, + chunks=True, dtype=numpy.float64) dtsets.append(ds) data.append((dsInfo['variable'], -1)) @@ -577,7 +578,7 @@ class HDFWriter(Operation): sgrp = grp for i in range(dsInfo['dsNumber']): ds = sgrp.create_dataset( - self.getLabel(dsInfo['variable'], i), + self.getLabel(dsInfo['variable'], i), (self.blocksPerFile, ) + dsInfo['shape'][1:], chunks=True, dtype=dsInfo['dtype']) @@ -586,7 +587,7 @@ class HDFWriter(Operation): fp.flush() log.log('Creating file: {}'.format(fp.filename), self.name) - + self.ds = dtsets self.data = data self.firsttime = True diff --git a/schainpy/model/io/jroIO_voltage.py b/schainpy/model/io/jroIO_voltage.py index cb484db..65f4521 100644 --- a/schainpy/model/io/jroIO_voltage.py +++ b/schainpy/model/io/jroIO_voltage.py @@ -73,27 +73,28 @@ class VoltageReader(JRODataReader, ProcessingUnit): """ ProcessingUnit.__init__(self) - + self.ext = ".r" self.optchar = "D" self.basicHeaderObj = BasicHeader(LOCALTIME) self.systemHeaderObj = SystemHeader() self.radarControllerHeaderObj = RadarControllerHeader() + self.processingHeaderObj = ProcessingHeader() self.lastUTTime = 0 - self.profileIndex = 2**32 - 1 + self.profileIndex = 2**32 - 1 self.dataOut = Voltage() self.selBlocksize = None self.selBlocktime = None - + ##print("1--OKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK") def createObjByDefault(self): - + ##print("2--OKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK") dataObj = Voltage() return dataObj def __hasNotDataInBuffer(self): - + ##print("3--OKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK") if self.profileIndex >= self.processingHeaderObj.profilesPerBlock * self.nTxs: return 1 @@ -109,11 +110,13 @@ class VoltageReader(JRODataReader, ProcessingUnit): Return: None """ + ##print("4--OKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK") pts2read = self.processingHeaderObj.profilesPerBlock * \ self.processingHeaderObj.nHeights * self.systemHeaderObj.nChannels self.blocksize = pts2read def readBlock(self): + """ readBlock lee el bloque de datos desde la posicion actual del puntero del archivo (self.fp) y actualiza todos los parametros relacionados al bloque de datos @@ -133,10 +136,10 @@ class VoltageReader(JRODataReader, ProcessingUnit): self.flagIsNewBlock self.nTotalBlocks - Exceptions: + Exceptions: Si un bloque leido no es un bloque valido """ - + ##print("5--OKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK") # if self.server is not None: # self.zBlock = self.receiver.recv() # self.zHeader = self.zBlock[:24] @@ -177,6 +180,7 @@ class VoltageReader(JRODataReader, ProcessingUnit): return 1 def getFirstHeader(self): + ##print("6--OKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK") self.getBasicHeader() @@ -186,8 +190,12 @@ class VoltageReader(JRODataReader, ProcessingUnit): self.dataOut.radarControllerHeaderObj = self.radarControllerHeaderObj.copy() + #self.dataOut.ippSeconds_general=self.radarControllerHeaderObj.ippSeconds + #print(self.nTxs) if self.nTxs > 1: + #print(self.radarControllerHeaderObj.ippSeconds) self.dataOut.radarControllerHeaderObj.ippSeconds = self.radarControllerHeaderObj.ippSeconds / self.nTxs + #print(self.radarControllerHeaderObj.ippSeconds) # Time interval and code are propierties of dataOut. Its value depends of radarControllerHeaderObj. # self.dataOut.timeInterval = self.radarControllerHeaderObj.ippSeconds * self.processingHeaderObj.nCohInt @@ -220,7 +228,7 @@ class VoltageReader(JRODataReader, ProcessingUnit): self.dataOut.flagShiftFFT = self.processingHeaderObj.shif_fft def reshapeData(self): - + ##print("7--OKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK") if self.nTxs < 0: return @@ -247,6 +255,7 @@ class VoltageReader(JRODataReader, ProcessingUnit): def readFirstHeaderFromServer(self): + ##print("8--OKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK") self.getFirstHeader() self.firstHeaderSize = self.basicHeaderObj.size @@ -278,6 +287,7 @@ class VoltageReader(JRODataReader, ProcessingUnit): self.getBlockDimension() def getFromServer(self): + ##print("9--OKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK") self.flagDiscontinuousBlock = 0 self.profileIndex = 0 self.flagIsNewBlock = 1 @@ -382,6 +392,8 @@ class VoltageReader(JRODataReader, ProcessingUnit): self.flagDiscontinuousBlock self.flagIsNewBlock """ + + ##print("10--OKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKKK") if self.flagNoMoreFiles: self.dataOut.flagNoData = True return 0 @@ -410,6 +422,7 @@ class VoltageReader(JRODataReader, ProcessingUnit): self.dataOut.data = self.datablock[:, self.profileIndex, :] self.dataOut.profileIndex = self.profileIndex + self.profileIndex += 1 else: @@ -458,9 +471,13 @@ class VoltageReader(JRODataReader, ProcessingUnit): self.dataOut.flagDataAsBlock = True self.dataOut.nProfiles = self.dataOut.data.shape[1] +#######################DP####################### + self.dataOut.CurrentBlock=self.nReadBlocks + self.dataOut.LastBlock=self.processingHeaderObj.dataBlocksPerFile +#######################DP####################### self.dataOut.flagNoData = False - self.getBasicHeader() + #self.getBasicHeader() self.dataOut.realtime = self.online @@ -673,4 +690,3 @@ class VoltageWriter(JRODataWriter, Operation): self.processingHeaderObj.processFlags = self.getProcessFlags() self.setBasicHeader() - \ No newline at end of file diff --git a/schainpy/model/proc/__init__.py b/schainpy/model/proc/__init__.py index 2e2937e..c780bb2 100644 --- a/schainpy/model/proc/__init__.py +++ b/schainpy/model/proc/__init__.py @@ -14,3 +14,9 @@ from .jroproc_spectra_lags import * from .jroproc_spectra_acf import * from .bltrproc_parameters import * from .pxproc_parameters import * + + +###########DP########### +from .jroproc_voltage_lags import * +###########DP########### +from .jroproc_spectra_lags_faraday import * diff --git a/schainpy/model/proc/jroproc_base.py b/schainpy/model/proc/jroproc_base.py index b359d8a..cf5a2ce 100644 --- a/schainpy/model/proc/jroproc_base.py +++ b/schainpy/model/proc/jroproc_base.py @@ -169,7 +169,7 @@ def MPDecorator(BaseClass): self.op_type = 'external' self.name = BaseClass.__name__ self.__doc__ = BaseClass.__doc__ - + if 'plot' in self.name.lower() and not self.name.endswith('_'): self.name = '{}{}'.format(self.CODE.upper(), 'Plot') diff --git a/schainpy/model/proc/jroproc_parameters.py b/schainpy/model/proc/jroproc_parameters.py old mode 100755 new mode 100644 index 9b03052..c5a6274 --- a/schainpy/model/proc/jroproc_parameters.py +++ b/schainpy/model/proc/jroproc_parameters.py @@ -302,6 +302,12 @@ class SpectralFilters(Operation): dataOut.spcparam_range[0]=FrecRange return dataOut + +from scipy.optimize import fmin +import itertools +from scipy.optimize import curve_fit + + class GaussianFit(Operation): ''' @@ -321,135 +327,198 @@ class GaussianFit(Operation): self.i=0 - def run(self, dataOut, num_intg=7, pnoise=1., SNRlimit=-9): #num_intg: Incoherent integrations, pnoise: Noise, vel_arr: range of velocities, similar to the ftt points + # def run(self, dataOut, num_intg=7, pnoise=1., SNRlimit=-9): #num_intg: Incoherent integrations, pnoise: Noise, vel_arr: range of velocities, similar to the ftt points + def run(self, dataOut, SNRdBlimit=-9, method='generalized'): """This routine will find a couple of generalized Gaussians to a power spectrum + methods: generalized, squared input: spc output: - Amplitude0,shift0,width0,p0,Amplitude1,shift1,width1,p1,noise + noise, amplitude0,shift0,width0,p0,Amplitude1,shift1,width1,p1 """ - + print ('Entering ',method,' double Gaussian fit') self.spc = dataOut.data_pre[0].copy() self.Num_Hei = self.spc.shape[2] self.Num_Bin = self.spc.shape[1] self.Num_Chn = self.spc.shape[0] - Vrange = dataOut.abscissaList - - GauSPC = numpy.empty([self.Num_Chn,self.Num_Bin,self.Num_Hei]) - SPC_ch1 = numpy.empty([self.Num_Bin,self.Num_Hei]) - SPC_ch2 = numpy.empty([self.Num_Bin,self.Num_Hei]) - SPC_ch1[:] = numpy.NaN - SPC_ch2[:] = numpy.NaN - start_time = time.time() - noise_ = dataOut.spc_noise[0].copy() - - pool = Pool(processes=self.Num_Chn) - args = [(Vrange, Ch, pnoise, noise_, num_intg, SNRlimit) for Ch in range(self.Num_Chn)] + args = [(dataOut.spc_range[2], ich, dataOut.spc_noise[ich], dataOut.nIncohInt, SNRdBlimit) for ich in range(self.Num_Chn)] objs = [self for __ in range(self.Num_Chn)] attrs = list(zip(objs, args)) - gauSPC = pool.map(target, attrs) - dataOut.SPCparam = numpy.asarray(SPCparam) - - ''' Parameters: - 1. Amplitude - 2. Shift - 3. Width - 4. Power - ''' + DGauFitParam = pool.map(target, attrs) + # Parameters: + # 0. Noise, 1. Amplitude, 2. Shift, 3. Width 4. Power + dataOut.DGauFitParams = numpy.asarray(DGauFitParam) + + # Double Gaussian Curves + gau0 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei]) + gau0[:] = numpy.NaN + gau1 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei]) + gau1[:] = numpy.NaN + x_mtr = numpy.transpose(numpy.tile(dataOut.getVelRange(1)[:-1], (self.Num_Hei,1))) + for iCh in range(self.Num_Chn): + N0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,0]] * self.Num_Bin)) + N1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,1]] * self.Num_Bin)) + A0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,0]] * self.Num_Bin)) + A1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,1]] * self.Num_Bin)) + v0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,0]] * self.Num_Bin)) + v1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,1]] * self.Num_Bin)) + s0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,0]] * self.Num_Bin)) + s1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,1]] * self.Num_Bin)) + if method == 'generalized': + p0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,0]] * self.Num_Bin)) + p1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,1]] * self.Num_Bin)) + elif method == 'squared': + p0 = 2. + p1 = 2. + gau0[iCh] = A0*numpy.exp(-0.5*numpy.abs((x_mtr-v0)/s0)**p0)+N0 + gau1[iCh] = A1*numpy.exp(-0.5*numpy.abs((x_mtr-v1)/s1)**p1)+N1 + dataOut.GaussFit0 = gau0 + dataOut.GaussFit1 = gau1 + print(numpy.shape(gau0)) + hei = 26 + print(dataOut.heightList[hei]) + #import matplotlib.pyplot as plt + plt.plot(self.spc[0,:,hei]) + plt.plot(dataOut.GaussFit0[0,:,hei]) + plt.plot(dataOut.GaussFit1[0,:,hei]) + plt.plot(dataOut.GaussFit0[0,:,hei]+dataOut.GaussFit1[0,:,hei]) + + plt.show() + time.sleep(60) + #print(gau0) + + print('Leaving ',method ,' double Gaussian fit') + return dataOut def FitGau(self, X): - - Vrange, ch, pnoise, noise_, num_intg, SNRlimit = X - - SPCparam = [] - SPC_ch1 = numpy.empty([self.Num_Bin,self.Num_Hei]) - SPC_ch2 = numpy.empty([self.Num_Bin,self.Num_Hei]) - SPC_ch1[:] = 0#numpy.NaN - SPC_ch2[:] = 0#numpy.NaN - - - + # print('Entering FitGau') + # Assigning the variables + Vrange, ch, wnoise, num_intg, SNRlimit = X + # Noise Limits + noisebl = wnoise * 0.9 + noisebh = wnoise * 1.1 + # Radar Velocity + Va = max(Vrange) + deltav = Vrange[1] - Vrange[0] + x = numpy.arange(self.Num_Bin) + + # print ('stop 0') + + # 5 parameters, 2 Gaussians + DGauFitParam = numpy.zeros([5, self.Num_Hei,2]) + DGauFitParam[:] = numpy.NaN + + # SPCparam = [] + # SPC_ch1 = numpy.zeros([self.Num_Bin,self.Num_Hei]) + # SPC_ch2 = numpy.zeros([self.Num_Bin,self.Num_Hei]) + # SPC_ch1[:] = 0 #numpy.NaN + # SPC_ch2[:] = 0 #numpy.NaN + # print ('stop 1') for ht in range(self.Num_Hei): - - + # print (ht) + # print ('stop 2') + # Spectra at each range spc = numpy.asarray(self.spc)[ch,:,ht] + snr = ( spc.mean() - wnoise ) / wnoise + snrdB = 10.*numpy.log10(snr) + #print ('stop 3') + if snrdB < SNRlimit : + # snr = numpy.NaN + # SPC_ch1[:,ht] = 0#numpy.NaN + # SPC_ch1[:,ht] = 0#numpy.NaN + # SPCparam = (SPC_ch1,SPC_ch2) + # print ('SNR less than SNRth') + continue + # wnoise = hildebrand_sekhon(spc,num_intg) + # print ('stop 2.01') ############################################# # normalizing spc and noise # This part differs from gg1 - spc_norm_max = max(spc) + # spc_norm_max = max(spc) #commented by D. Scipión 19.03.2021 #spc = spc / spc_norm_max - pnoise = pnoise #/ spc_norm_max + # pnoise = pnoise #/ spc_norm_max #commented by D. Scipión 19.03.2021 ############################################# + # print ('stop 2.1') fatspectra=1.0 + # noise per channel.... we might want to use the noise at each range - wnoise = noise_ #/ spc_norm_max + # wnoise = noise_ #/ spc_norm_max #commented by D. Scipión 19.03.2021 #wnoise,stdv,i_max,index =enoise(spc,num_intg) #noise estimate using Hildebrand Sekhon, only wnoise is used #if wnoise>1.1*pnoise: # to be tested later # wnoise=pnoise - noisebl=wnoise*0.9; - noisebh=wnoise*1.1 - spc=spc-wnoise + # noisebl = wnoise*0.9 + # noisebh = wnoise*1.1 + spc = spc - wnoise # signal - minx=numpy.argmin(spc) + # print ('stop 2.2') + minx = numpy.argmin(spc) #spcs=spc.copy() - spcs=numpy.roll(spc,-minx) - cum=numpy.cumsum(spcs) - tot_noise=wnoise * self.Num_Bin #64; - - snr = sum(spcs)/tot_noise - snrdB=10.*numpy.log10(snr) - - if snrdB < SNRlimit : - snr = numpy.NaN - SPC_ch1[:,ht] = 0#numpy.NaN - SPC_ch1[:,ht] = 0#numpy.NaN - SPCparam = (SPC_ch1,SPC_ch2) - continue + spcs = numpy.roll(spc,-minx) + cum = numpy.cumsum(spcs) + # tot_noise = wnoise * self.Num_Bin #64; + + # print ('stop 2.3') + # snr = sum(spcs) / tot_noise + # snrdB = 10.*numpy.log10(snr) + #print ('stop 3') + # if snrdB < SNRlimit : + # snr = numpy.NaN + # SPC_ch1[:,ht] = 0#numpy.NaN + # SPC_ch1[:,ht] = 0#numpy.NaN + # SPCparam = (SPC_ch1,SPC_ch2) + # print ('SNR less than SNRth') + # continue #if snrdB<-18 or numpy.isnan(snrdB) or num_intg<4: # return [None,]*4,[None,]*4,None,snrdB,None,None,[None,]*5,[None,]*9,None - - cummax=max(cum); - epsi=0.08*fatspectra # cumsum to narrow down the energy region - cumlo=cummax*epsi; - cumhi=cummax*(1-epsi) - powerindex=numpy.array(numpy.where(numpy.logical_and(cum>cumlo, cumcumlo, cum-12: # when SNR is strong pick the peak with least shift (LOS velocity) error if oneG: - choice=0 + choice = 0 else: - w1=lsq2[0][1]; w2=lsq2[0][5] - a1=lsq2[0][2]; a2=lsq2[0][6] - p1=lsq2[0][3]; p2=lsq2[0][7] - s1=(2**(1+1./p1))*scipy.special.gamma(1./p1)/p1; - s2=(2**(1+1./p2))*scipy.special.gamma(1./p2)/p2; - gp1=a1*w1*s1; gp2=a2*w2*s2 # power content of each ggaussian with proper p scaling + w1 = lsq2[0][1]; w2 = lsq2[0][5] + a1 = lsq2[0][2]; a2 = lsq2[0][6] + p1 = lsq2[0][3]; p2 = lsq2[0][7] + s1 = (2**(1+1./p1))*scipy.special.gamma(1./p1)/p1 + s2 = (2**(1+1./p2))*scipy.special.gamma(1./p2)/p2 + gp1 = a1*w1*s1; gp2 = a2*w2*s2 # power content of each ggaussian with proper p scaling if gp1>gp2: if a1>0.7*a2: - choice=1 + choice = 1 else: - choice=2 + choice = 2 elif gp2>gp1: if a2>0.7*a1: - choice=2 + choice = 2 else: - choice=1 + choice = 1 else: - choice=numpy.argmax([a1,a2])+1 + choice = numpy.argmax([a1,a2])+1 #else: #choice=argmin([std2a,std2b])+1 else: # with low SNR go to the most energetic peak - choice=numpy.argmax([lsq1[0][2]*lsq1[0][1],lsq2[0][2]*lsq2[0][1],lsq2[0][6]*lsq2[0][5]]) - - - shift0=lsq2[0][0]; - vel0=Vrange[0] + shift0*(Vrange[1]-Vrange[0]) - shift1=lsq2[0][4]; - vel1=Vrange[0] + shift1*(Vrange[1]-Vrange[0]) - - max_vel = 1.0 - + choice = numpy.argmax([lsq1[0][2]*lsq1[0][1],lsq2[0][2]*lsq2[0][1],lsq2[0][6]*lsq2[0][5]]) + + # print ('stop 14') + shift0 = lsq2[0][0] + vel0 = Vrange[0] + shift0 * deltav + shift1 = lsq2[0][4] + # vel1=Vrange[0] + shift1 * deltav + + # max_vel = 1.0 + # Va = max(Vrange) + # deltav = Vrange[1]-Vrange[0] + # print ('stop 15') #first peak will be 0, second peak will be 1 - if vel0 > -1.0 and vel0 < max_vel : #first peak is in the correct range - shift0=lsq2[0][0] - width0=lsq2[0][1] - Amplitude0=lsq2[0][2] - p0=lsq2[0][3] - - shift1=lsq2[0][4] - width1=lsq2[0][5] - Amplitude1=lsq2[0][6] - p1=lsq2[0][7] - noise=lsq2[0][8] + # if vel0 > -1.0 and vel0 < max_vel : #first peak is in the correct range # Commented by D.Scipión 19.03.2021 + if vel0 > -Va and vel0 < Va : #first peak is in the correct range + shift0 = lsq2[0][0] + width0 = lsq2[0][1] + Amplitude0 = lsq2[0][2] + p0 = lsq2[0][3] + + shift1 = lsq2[0][4] + width1 = lsq2[0][5] + Amplitude1 = lsq2[0][6] + p1 = lsq2[0][7] + noise = lsq2[0][8] else: - shift1=lsq2[0][0] - width1=lsq2[0][1] - Amplitude1=lsq2[0][2] - p1=lsq2[0][3] + shift1 = lsq2[0][0] + width1 = lsq2[0][1] + Amplitude1 = lsq2[0][2] + p1 = lsq2[0][3] - shift0=lsq2[0][4] - width0=lsq2[0][5] - Amplitude0=lsq2[0][6] - p0=lsq2[0][7] - noise=lsq2[0][8] + shift0 = lsq2[0][4] + width0 = lsq2[0][5] + Amplitude0 = lsq2[0][6] + p0 = lsq2[0][7] + noise = lsq2[0][8] if Amplitude0<0.05: # in case the peak is noise - shift0,width0,Amplitude0,p0 = [0,0,0,0]#4*[numpy.NaN] + shift0,width0,Amplitude0,p0 = 4*[numpy.NaN] if Amplitude1<0.05: - shift1,width1,Amplitude1,p1 = [0,0,0,0]#4*[numpy.NaN] + shift1,width1,Amplitude1,p1 = 4*[numpy.NaN] + + # print ('stop 16 ') + # SPC_ch1[:,ht] = noise + Amplitude0*numpy.exp(-0.5*(abs(x-shift0)/width0)**p0) + # SPC_ch2[:,ht] = noise + Amplitude1*numpy.exp(-0.5*(abs(x-shift1)/width1)**p1) + # SPCparam = (SPC_ch1,SPC_ch2) + + DGauFitParam[0,ht,0] = noise + DGauFitParam[0,ht,1] = noise + DGauFitParam[1,ht,0] = Amplitude0 + DGauFitParam[1,ht,1] = Amplitude1 + DGauFitParam[2,ht,0] = Vrange[0] + shift0 * deltav + DGauFitParam[2,ht,1] = Vrange[0] + shift1 * deltav + DGauFitParam[3,ht,0] = width0 * deltav + DGauFitParam[3,ht,1] = width1 * deltav + DGauFitParam[4,ht,0] = p0 + DGauFitParam[4,ht,1] = p1 + + # print (DGauFitParam.shape) + # print ('Leaving FitGau') + return DGauFitParam + # return SPCparam + # return GauSPC + def y_model1(self,x,state): + shift0, width0, amplitude0, power0, noise = state + model0 = amplitude0*numpy.exp(-0.5*abs((x - shift0)/width0)**power0) + model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0) + model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0) + return model0 + model0u + model0d + noise - SPC_ch1[:,ht] = noise + Amplitude0*numpy.exp(-0.5*(abs(x-shift0))/width0)**p0 - SPC_ch2[:,ht] = noise + Amplitude1*numpy.exp(-0.5*(abs(x-shift1))/width1)**p1 - SPCparam = (SPC_ch1,SPC_ch2) + def y_model2(self,x,state): #Equation for two generalized Gaussians with Nyquist + shift0, width0, amplitude0, power0, shift1, width1, amplitude1, power1, noise = state + model0 = amplitude0*numpy.exp(-0.5*abs((x-shift0)/width0)**power0) + model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0) + model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0) + model1 = amplitude1*numpy.exp(-0.5*abs((x - shift1)/width1)**power1) + model1u = amplitude1*numpy.exp(-0.5*abs((x - shift1 - self.Num_Bin)/width1)**power1) + model1d = amplitude1*numpy.exp(-0.5*abs((x - shift1 + self.Num_Bin)/width1)**power1) + return model0 + model0u + model0d + model1 + model1u + model1d + noise - return GauSPC + def misfit1(self,state,y_data,x,num_intg): # This function compares how close real data is with the model data, the close it is, the better it is. - def y_model1(self,x,state): - shift0,width0,amplitude0,power0,noise=state - model0=amplitude0*numpy.exp(-0.5*abs((x-shift0)/width0)**power0) + return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model1(x,state)))**2)#/(64-5.) # /(64-5.) can be commented - model0u=amplitude0*numpy.exp(-0.5*abs((x-shift0- self.Num_Bin )/width0)**power0) + def misfit2(self,state,y_data,x,num_intg): + return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model2(x,state)))**2)#/(64-9.) - model0d=amplitude0*numpy.exp(-0.5*abs((x-shift0+ self.Num_Bin )/width0)**power0) - return model0+model0u+model0d+noise - def y_model2(self,x,state): #Equation for two generalized Gaussians with Nyquist - shift0,width0,amplitude0,power0,shift1,width1,amplitude1,power1,noise=state - model0=amplitude0*numpy.exp(-0.5*abs((x-shift0)/width0)**power0) +class Oblique_Gauss_Fit(Operation): + + def __init__(self): + Operation.__init__(self) - model0u=amplitude0*numpy.exp(-0.5*abs((x-shift0- self.Num_Bin )/width0)**power0) - model0d=amplitude0*numpy.exp(-0.5*abs((x-shift0+ self.Num_Bin )/width0)**power0) - model1=amplitude1*numpy.exp(-0.5*abs((x-shift1)/width1)**power1) - model1u=amplitude1*numpy.exp(-0.5*abs((x-shift1- self.Num_Bin )/width1)**power1) + def Gauss_fit(self,spc,x,nGauss): - model1d=amplitude1*numpy.exp(-0.5*abs((x-shift1+ self.Num_Bin )/width1)**power1) - return model0+model0u+model0d+model1+model1u+model1d+noise - def misfit1(self,state,y_data,x,num_intg): # This function compares how close real data is with the model data, the close it is, the better it is. + def gaussian(x, a, b, c, d): + val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d + return val - return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model1(x,state)))**2)#/(64-5.) # /(64-5.) can be commented + if nGauss == 'first': + spc_1_aux = numpy.copy(spc[:numpy.argmax(spc)+1]) + spc_2_aux = numpy.flip(spc_1_aux) + spc_3_aux = numpy.concatenate((spc_1_aux,spc_2_aux[1:])) + + len_dif = len(x)-len(spc_3_aux) + + spc_zeros = numpy.ones(len_dif)*spc_1_aux[0] + + spc_new = numpy.concatenate((spc_3_aux,spc_zeros)) + + y = spc_new + + elif nGauss == 'second': + y = spc + + + # estimate starting values from the data + a = y.max() + b = x[numpy.argmax(y)] + if nGauss == 'first': + c = 1.#b#b#numpy.std(spc) + elif nGauss == 'second': + c = b + else: + print("ERROR") + + d = numpy.mean(y[-100:]) + + # define a least squares function to optimize + def minfunc(params): + return sum((y-gaussian(x,params[0],params[1],params[2],params[3]))**2) + + # fit + popt = fmin(minfunc,[a,b,c,d],disp=False) + #popt,fopt,niter,funcalls = fmin(minfunc,[a,b,c,d]) + + + return gaussian(x, popt[0], popt[1], popt[2], popt[3]), popt[0], popt[1], popt[2], popt[3] + + + def Gauss_fit_2(self,spc,x,nGauss): + + + def gaussian(x, a, b, c, d): + val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d + return val + + if nGauss == 'first': + spc_1_aux = numpy.copy(spc[:numpy.argmax(spc)+1]) + spc_2_aux = numpy.flip(spc_1_aux) + spc_3_aux = numpy.concatenate((spc_1_aux,spc_2_aux[1:])) + + len_dif = len(x)-len(spc_3_aux) + + spc_zeros = numpy.ones(len_dif)*spc_1_aux[0] + + spc_new = numpy.concatenate((spc_3_aux,spc_zeros)) + + y = spc_new + + elif nGauss == 'second': + y = spc + + + # estimate starting values from the data + a = y.max() + b = x[numpy.argmax(y)] + if nGauss == 'first': + c = 1.#b#b#numpy.std(spc) + elif nGauss == 'second': + c = b + else: + print("ERROR") + + d = numpy.mean(y[-100:]) + + # define a least squares function to optimize + popt,pcov = curve_fit(gaussian,x,y,p0=[a,b,c,d]) + #popt,fopt,niter,funcalls = fmin(minfunc,[a,b,c,d]) + + + #return gaussian(x, popt[0], popt[1], popt[2], popt[3]), popt[0], popt[1], popt[2], popt[3] + return gaussian(x, popt[0], popt[1], popt[2], popt[3]),popt[0], popt[1], popt[2], popt[3] + + def Double_Gauss_fit(self,spc,x,A1,B1,C1,A2,B2,C2,D): + + def double_gaussian(x, a1, b1, c1, a2, b2, c2, d): + val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d + return val + + + y = spc + + # estimate starting values from the data + a1 = A1 + b1 = B1 + c1 = C1#numpy.std(spc) + + a2 = A2#y.max() + b2 = B2#x[numpy.argmax(y)] + c2 = C2#numpy.std(spc) + d = D + + # define a least squares function to optimize + def minfunc(params): + return sum((y-double_gaussian(x,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2) + + # fit + popt = fmin(minfunc,[a1,b1,c1,a2,b2,c2,d],disp=False) + + return double_gaussian(x, popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6] + + def Double_Gauss_fit_2(self,spc,x,A1,B1,C1,A2,B2,C2,D): + + def double_gaussian(x, a1, b1, c1, a2, b2, c2, d): + val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d + return val + + + y = spc + + # estimate starting values from the data + a1 = A1 + b1 = B1 + c1 = C1#numpy.std(spc) + + a2 = A2#y.max() + b2 = B2#x[numpy.argmax(y)] + c2 = C2#numpy.std(spc) + d = D + + # fit + + popt,pcov = curve_fit(double_gaussian,x,y,p0=[a1,b1,c1,a2,b2,c2,d]) + + error = numpy.sqrt(numpy.diag(pcov)) + + return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6], error[0], error[1], error[2], error[3], error[4], error[5], error[6] + + + + + def run(self, dataOut): + + pwcode = 1 + + if dataOut.flagDecodeData: + pwcode = numpy.sum(dataOut.code[0]**2) + #normFactor = min(self.nFFTPoints,self.nProfiles)*self.nIncohInt*self.nCohInt*pwcode*self.windowOfFilter + normFactor = dataOut.nProfiles * dataOut.nIncohInt * dataOut.nCohInt * pwcode * dataOut.windowOfFilter + factor = normFactor + z = dataOut.data_spc / factor + z = numpy.where(numpy.isfinite(z), z, numpy.NAN) + dataOut.power = numpy.average(z, axis=1) + dataOut.powerdB = 10 * numpy.log10(dataOut.power) + + + x = dataOut.getVelRange(0) + #print(aux) + #print(numpy.shape(aux)) + #exit(1) + + #print(numpy.shape(dataOut.data_spc)) + + dataOut.Oblique_params = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN + dataOut.Oblique_param_errors = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN + + dataOut.VelRange = x + + + l1=range(22,36) + l2=range(58,99) + + for hei in itertools.chain(l1, l2): + #print("INIT") + #print(hei) + + try: + spc = dataOut.data_spc[0,:,hei] + + spc_fit, A1, B1, C1, D1 = self.Gauss_fit_2(spc,x,'first') + + spc_diff = spc - spc_fit + spc_diff[spc_diff < 0] = 0 + + spc_fit_diff, A2, B2, C2, D2 = self.Gauss_fit_2(spc_diff,x,'second') + + D = (D1+D2) + + dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_param_errors[0,0,hei],dataOut.Oblique_param_errors[0,1,hei],dataOut.Oblique_param_errors[0,2,hei],dataOut.Oblique_param_errors[0,3,hei],dataOut.Oblique_param_errors[0,4,hei],dataOut.Oblique_param_errors[0,5,hei],dataOut.Oblique_param_errors[0,6,hei] = self.Double_Gauss_fit_2(spc,x,A1,B1,C1,A2,B2,C2,D) + #spc_double_fit,dataOut.Oblique_params = self.Double_Gauss_fit(spc,x,A1,B1,C1,A2,B2,C2,D) + #print(dataOut.Oblique_params) + except: + ###dataOut.Oblique_params[0,:,hei] = dataOut.Oblique_params[0,:,hei]*numpy.NAN + pass + #print("DONE") + ''' + print(dataOut.Oblique_params[1]) + print(dataOut.Oblique_params[4]) + import matplotlib.pyplot as plt + plt.plot(x,spc_double_fit) + plt.show() + import time + time.sleep(5) + plt.close() + ''' + + + + + + return dataOut - def misfit2(self,state,y_data,x,num_intg): - return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model2(x,state)))**2)#/(64-9.) @@ -3998,3 +4312,55 @@ class SMOperations(): # error[indInvalid1] = 13 # # return heights, error + + + +class IGRFModel(Operation): + """Operation to calculate Geomagnetic parameters. + + Parameters: + ----------- + None + + Example + -------- + + op = proc_unit.addOperation(name='IGRFModel', optype='other') + + """ + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + self.aux=1 + + def run(self,dataOut): + + try: + from schainpy.model.proc import mkfact_short_2020 + except: + log.warning('You should install "mkfact_short_2020" module to process IGRF Model') + + if self.aux==1: + + #dataOut.TimeBlockSeconds_First_Time=time.mktime(time.strptime(dataOut.TimeBlockDate)) + #### we do not use dataOut.datatime.ctime() because it's the time of the second (next) block + dataOut.TimeBlockSeconds_First_Time=dataOut.TimeBlockSeconds + dataOut.bd_time=time.gmtime(dataOut.TimeBlockSeconds_First_Time) + dataOut.year=dataOut.bd_time.tm_year+(dataOut.bd_time.tm_yday-1)/364.0 + dataOut.ut=dataOut.bd_time.tm_hour+dataOut.bd_time.tm_min/60.0+dataOut.bd_time.tm_sec/3600.0 + + self.aux=0 + + dataOut.h=numpy.arange(0.0,15.0*dataOut.MAXNRANGENDT,15.0,dtype='float32') + dataOut.bfm=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32') + dataOut.bfm=numpy.array(dataOut.bfm,order='F') + dataOut.thb=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32') + dataOut.thb=numpy.array(dataOut.thb,order='F') + dataOut.bki=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32') + dataOut.bki=numpy.array(dataOut.bki,order='F') + + mkfact_short_2020.mkfact(dataOut.year,dataOut.h,dataOut.bfm,dataOut.thb,dataOut.bki,dataOut.MAXNRANGENDT) + + return dataOut diff --git a/schainpy/model/proc/jroproc_spectra.py b/schainpy/model/proc/jroproc_spectra.py index db44b3d..f2fa663 100644 --- a/schainpy/model/proc/jroproc_spectra.py +++ b/schainpy/model/proc/jroproc_spectra.py @@ -873,4 +873,4 @@ class IncohInt(Operation): dataOut.utctime = avgdatatime dataOut.flagNoData = False - return dataOut \ No newline at end of file + return dataOut diff --git a/schainpy/model/proc/jroproc_spectra_lags.py b/schainpy/model/proc/jroproc_spectra_lags.py index 90b88e5..d7419d3 100644 --- a/schainpy/model/proc/jroproc_spectra_lags.py +++ b/schainpy/model/proc/jroproc_spectra_lags.py @@ -736,4 +736,4 @@ class SpectraLagsProc(ProcessingUnit): self.dataOut.noise_estimation = noise.copy() - return 1 \ No newline at end of file + return 1 diff --git a/schainpy/model/proc/jroproc_voltage.py b/schainpy/model/proc/jroproc_voltage.py index 7a62196..9539b4e 100644 --- a/schainpy/model/proc/jroproc_voltage.py +++ b/schainpy/model/proc/jroproc_voltage.py @@ -4,8 +4,8 @@ from scipy import interpolate from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation, MPDecorator from schainpy.model.data.jrodata import Voltage,hildebrand_sekhon from schainpy.utils import log -from time import time - +from time import time, mktime, strptime, gmtime, ctime +import os class VoltageProc(ProcessingUnit): @@ -17,8 +17,12 @@ class VoltageProc(ProcessingUnit): self.dataOut = Voltage() self.flip = 1 self.setupReq = False + #self.dataOut.test=1 + def run(self): + #import time + #time.sleep(3) if self.dataIn.type == 'AMISR': self.__updateObjFromAmisrInput() @@ -26,6 +30,31 @@ class VoltageProc(ProcessingUnit): if self.dataIn.type == 'Voltage': self.dataOut.copy(self.dataIn) + + #self.dataOut.flagNoData=True + #print(self.dataOut.data[-1,:]) + #print(ctime(self.dataOut.utctime)) + #print(self.dataOut.heightList) + #print(self.dataOut.nHeights) + #exit(1) + #print(self.dataOut.data[6,:32]) + #print(self.dataOut.data[0,320-5:320+5-5]) + ##print(self.dataOut.heightList[-20:]) + #print(numpy.shape(self.dataOut.data)) + #print(self.dataOut.code) + #print(numpy.shape(self.dataOut.code)) + #exit(1) + #print(self.dataOut.CurrentBlock) + #print(self.dataOut.data[0,:,0]) + + #print(numpy.shape(self.dataOut.data)) + #print(self.dataOut.data[0,:,1666:1666+320]) + #exit(1) + + #print(self.dataOut.utctime) + #self.dataOut.test+=1 + + def __updateObjFromAmisrInput(self): self.dataOut.timeZone = self.dataIn.timeZone @@ -52,11 +81,13 @@ class VoltageProc(ProcessingUnit): self.dataOut.beam.azimuthList = self.dataIn.beam.azimuthList self.dataOut.beam.zenithList = self.dataIn.beam.zenithList - class selectChannels(Operation): def run(self, dataOut, channelList): + + + channelIndexList = [] self.dataOut = dataOut for channel in channelList: @@ -66,8 +97,10 @@ class selectChannels(Operation): index = self.dataOut.channelList.index(channel) channelIndexList.append(index) self.selectChannelsByIndex(channelIndexList) + return self.dataOut + def selectChannelsByIndex(self, channelIndexList): """ Selecciona un bloque de datos en base a canales segun el channelIndexList @@ -194,6 +227,8 @@ class selectHeights(Operation): maxIndex = len(heights) self.selectHeightsByIndex(minIndex, maxIndex) + #print(self.dataOut.nHeights) + return self.dataOut @@ -318,11 +353,21 @@ class setH0(Operation): class deFlip(Operation): + def __init__(self): + + self.flip = 1 def run(self, dataOut, channelList = []): data = dataOut.data.copy() + #print(dataOut.channelList) + #exit() + + if channelList==1: #PARCHE + channelList=[1] + + dataOut.FlipChannels=channelList if dataOut.flagDataAsBlock: flip = self.flip profileList = list(range(dataOut.nProfiles)) @@ -342,10 +387,15 @@ class deFlip(Operation): self.flip = flip + + + else: if not channelList: data[:,:] = data[:,:]*self.flip else: + channelList=[1] + #print(self.flip) for thisChannel in channelList: if thisChannel not in dataOut.channelList: continue @@ -354,6 +404,7 @@ class deFlip(Operation): self.flip *= -1. + dataOut.data = data return dataOut @@ -418,879 +469,5459 @@ class interpolateHeights(Operation): return dataOut -class CohInt(Operation): +class LagsReshape(Operation): + """Operation to reshape input data into (Channels,Profiles(with same lag),Heights,Lags) and heights reconstruction. + + Parameters: + ----------- + + + Example + -------- + + op = proc_unit.addOperation(name='LagsReshape') - isConfig = False - __profIndex = 0 - __byTime = False - __initime = None - __lastdatatime = None - __integrationtime = None - __buffer = None - __bufferStride = [] - __dataReady = False - __profIndexStride = 0 - __dataToPutStride = False - n = None + + """ def __init__(self, **kwargs): Operation.__init__(self, **kwargs) - def setup(self, n=None, timeInterval=None, stride=None, overlapping=False, byblock=False): - """ - Set the parameters of the integration class. + self.buffer=None + self.buffer_HR=None + self.buffer_HRonelag=None - Inputs: + def LagDistribution(self,dataOut): - n : Number of coherent integrations - timeInterval : Time of integration. If the parameter "n" is selected this one does not work - overlapping : - """ + dataOut.datapure=numpy.copy(dataOut.data[:,0:dataOut.NSCAN,:]) + self.buffer = numpy.zeros((dataOut.nChannels, + int(dataOut.NSCAN/dataOut.DPL), + dataOut.nHeights,dataOut.DPL), + dtype='complex') - self.__initime = None - self.__lastdatatime = 0 - self.__buffer = None - self.__dataReady = False - self.byblock = byblock - self.stride = stride + for j in range(int(self.buffer.shape[1]/2)): + for i in range(dataOut.DPL): + if j+1==int(self.buffer.shape[1]/2) and i+1==dataOut.DPL: + self.buffer[:,2*j:,:,i]=dataOut.datapure[:,2*i+int(2*j*dataOut.DPL):,:] + else: + self.buffer[:,2*j:2*(j+1),:,i]=dataOut.datapure[:,2*i+int(2*j*dataOut.DPL):2*(i+1)+int(2*j*dataOut.DPL),:] - if n == None and timeInterval == None: - raise ValueError("n or timeInterval should be specified ...") + return self.buffer - if n != None: - self.n = n - self.__byTime = False - else: - self.__integrationtime = timeInterval #* 60. #if (type(timeInterval)!=integer) -> change this line - self.n = 9999 - self.__byTime = True + def HeightReconstruction(self,dataOut): - if overlapping: - self.__withOverlapping = True - self.__buffer = None - else: - self.__withOverlapping = False - self.__buffer = 0 + self.buffer_HR = numpy.zeros((int(dataOut.NSCAN/dataOut.DPL), + dataOut.nHeights,dataOut.DPL), + dtype='complex') - self.__profIndex = 0 + #self.buffer_HR[0,:,:,:]=dataOut.datalags[0,:,:,:] #No Lags - def putData(self, data): + for i in range(int(dataOut.DPL)): #Only channel B + if i==0: + self.buffer_HR[:,:,i]=dataOut.datalags[1,:,:,i] + else: + self.buffer_HR[:,:,i]=self.HRonelag(dataOut,i) - """ - Add a profile to the __buffer and increase in one the __profileIndex + return self.buffer_HR - """ - if not self.__withOverlapping: - self.__buffer += data.copy() - self.__profIndex += 1 - return + def HRonelag(self,dataOut,whichlag): + self.buffer_HRonelag = numpy.zeros((int(dataOut.NSCAN/dataOut.DPL), + dataOut.nHeights), + dtype='complex') - #Overlapping data - nChannels, nHeis = data.shape - data = numpy.reshape(data, (1, nChannels, nHeis)) + for i in range(self.buffer_HRonelag.shape[0]): + for j in range(dataOut.nHeights): + if j+int(2*whichlag)=nkill/2 and k 2): + nums_min= int(ndata/divider) + else: + nums_min=2 + sump=0.0 + sumq=0.0 + j=0 + cont=1 + while ( (cont==1) and (j nums_min): + rtest= float(j/(j-1)) +1.0/ndata + if( (sumq*j) > (rtest*sump*sump ) ): + j=j-1 + sump-= data[j] + sumq-=data[j]*data[j] + cont= 0 + noise= (sump/j) + + return noise + + + + def run(self, dataOut, NLAG=16, NRANGE=0, NCAL=0, DPL=11, + NDN=0, NDT=66, NDP=66, NSCAN=132, + flags_array=(0, 30, 60, 90, 120, 150, 180, 210, 240, 270, 300), NAVG=16, nkill=6, **kwargs): + + dataOut.NLAG=NLAG + dataOut.NR=len(dataOut.channelList) + dataOut.NRANGE=NRANGE + dataOut.NCAL=NCAL + dataOut.DPL=DPL + dataOut.NDN=NDN + dataOut.NDT=NDT + dataOut.NDP=NDP + dataOut.NSCAN=NSCAN + dataOut.DH=dataOut.heightList[1]-dataOut.heightList[0] + dataOut.H0=int(dataOut.heightList[0]) + dataOut.flags_array=flags_array + dataOut.NAVG=NAVG + dataOut.nkill=nkill + dataOut.flagNoData = True + + self.get_dc(dataOut) + self.get_products_cabxys(dataOut) + self.cabxys_navg(dataOut) + self.noise_estimation4x_DP(dataOut) + self.kabxys(dataOut) - return avgdata + return dataOut - def byTime(self, data, datatime): - self.__dataReady = False - avgdata = None - n = None - self.putData(data) +class IntegrationDP(Operation): + """Operation to integrate the Double Pulse data. - if (datatime - self.__initime) >= self.__integrationtime: - avgdata, n = self.pushData() - self.n = n - self.__dataReady = True + Parameters: + ----------- + nint : int + Number of integrations. - return avgdata + Example + -------- - def integrateByStride(self, data, datatime): - # print data - if self.__profIndex == 0: - self.__buffer = [[data.copy(), datatime]] - else: - self.__buffer.append([data.copy(),datatime]) - self.__profIndex += 1 - self.__dataReady = False + op = proc_unit.addOperation(name='IntegrationDP', optype='other') + op.addParameter(name='nint', value='30', format='int') - if self.__profIndex == self.n * self.stride : - self.__dataToPutStride = True - self.__profIndexStride = 0 - self.__profIndex = 0 - self.__bufferStride = [] - for i in range(self.stride): - current = self.__buffer[i::self.stride] - data = numpy.sum([t[0] for t in current], axis=0) - avgdatatime = numpy.average([t[1] for t in current]) - # print data - self.__bufferStride.append((data, avgdatatime)) + """ - if self.__dataToPutStride: - self.__dataReady = True - self.__profIndexStride += 1 - if self.__profIndexStride == self.stride: - self.__dataToPutStride = False - # print self.__bufferStride[self.__profIndexStride - 1] - # raise - return self.__bufferStride[self.__profIndexStride - 1] + def __init__(self, **kwargs): + Operation.__init__(self, **kwargs) - return None, None + self.counter=0 + self.aux=0 + self.init_time=None - def integrate(self, data, datatime=None): + def integration_for_double_pulse(self,dataOut): + #print("inside") + #print(self.aux) + if self.aux==1: + #print("CurrentBlockBBBBB: ",dataOut.CurrentBlock) + #print(dataOut.datatime) - if self.__initime == None: - self.__initime = datatime + #dataOut.TimeBlockDate_for_dp_power=dataOut.TimeBlockDate + ########dataOut.TimeBlockSeconds_for_dp_power=dataOut.LastAVGDate + #print("Date: ",dataOut.TimeBlockDate_for_dp_power) - if self.__byTime: - avgdata = self.byTime(data, datatime) - else: - avgdata = self.byProfiles(data) + #dataOut.TimeBlockSeconds_for_dp_power=mktime(strptime(dataOut.TimeBlockDate_for_dp_power)) + dataOut.TimeBlockSeconds_for_dp_power=dataOut.utctime#dataOut.TimeBlockSeconds-18000 + #dataOut.TimeBlockSeconds_for_dp_power=dataOut.LastAVGDate + #print("Seconds: ",dataOut.TimeBlockSeconds_for_dp_power) + dataOut.bd_time=gmtime(dataOut.TimeBlockSeconds_for_dp_power) + #print(dataOut.bd_time) + #exit() + dataOut.year=dataOut.bd_time.tm_year+(dataOut.bd_time.tm_yday-1)/364.0 + dataOut.ut_Faraday=dataOut.bd_time.tm_hour+dataOut.bd_time.tm_min/60.0+dataOut.bd_time.tm_sec/3600.0 + #print("date: ", dataOut.TimeBlockDate) - self.__lastdatatime = datatime + self.aux=0 - if avgdata is None: - return None, None + #print("after") - avgdatatime = self.__initime + if self.counter==0: - deltatime = datatime - self.__lastdatatime + tmpx=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kabxys_integrated=[tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx] + self.init_time=dataOut.utctime - if not self.__withOverlapping: - self.__initime = datatime - else: - self.__initime += deltatime + if self.counter < dataOut.nint: + #print("HERE") - return avgdata, avgdatatime + dataOut.final_cross_products=[dataOut.kax,dataOut.kay,dataOut.kbx,dataOut.kby,dataOut.kax2,dataOut.kay2,dataOut.kbx2,dataOut.kby2,dataOut.kaxbx,dataOut.kaxby,dataOut.kaybx,dataOut.kayby,dataOut.kaxay,dataOut.kbxby] - def integrateByBlock(self, dataOut): + for ind in range(len(dataOut.kabxys_integrated)): #final cross products + dataOut.kabxys_integrated[ind]=dataOut.kabxys_integrated[ind]+dataOut.final_cross_products[ind] - times = int(dataOut.data.shape[1]/self.n) - avgdata = numpy.zeros((dataOut.nChannels, times, dataOut.nHeights), dtype=numpy.complex) + self.counter+=1 - id_min = 0 - id_max = self.n + if self.counter==dataOut.nint-1: + self.aux=1 + #dataOut.TimeBlockDate_for_dp_power=dataOut.TimeBlockDate + if self.counter==dataOut.nint: - for i in range(times): - junk = dataOut.data[:,id_min:id_max,:] - avgdata[:,i,:] = junk.sum(axis=1) - id_min += self.n - id_max += self.n + dataOut.flagNoData=False + dataOut.utctime=self.init_time + self.counter=0 - timeInterval = dataOut.ippSeconds*self.n - avgdatatime = (times - 1) * timeInterval + dataOut.utctime - self.__dataReady = True - return avgdata, avgdatatime - def run(self, dataOut, n=None, timeInterval=None, stride=None, overlapping=False, byblock=False, **kwargs): + def run(self,dataOut,nint=20): - if not self.isConfig: - self.setup(n=n, stride=stride, timeInterval=timeInterval, overlapping=overlapping, byblock=byblock, **kwargs) - self.isConfig = True + dataOut.flagNoData=True + dataOut.nint=nint + dataOut.paramInterval=0#int(dataOut.nint*dataOut.header[7][0]*2 ) + dataOut.lat=-11.95 + dataOut.lon=-76.87 - if dataOut.flagDataAsBlock: - """ - Si la data es leida por bloques, dimension = [nChannels, nProfiles, nHeis] - """ - avgdata, avgdatatime = self.integrateByBlock(dataOut) - dataOut.nProfiles /= self.n - else: - if stride is None: - avgdata, avgdatatime = self.integrate(dataOut.data, dataOut.utctime) - else: - avgdata, avgdatatime = self.integrateByStride(dataOut.data, dataOut.utctime) + self.integration_for_double_pulse(dataOut) + return dataOut - # dataOut.timeInterval *= n - dataOut.flagNoData = True - if self.__dataReady: - dataOut.data = avgdata - if not dataOut.flagCohInt: - dataOut.nCohInt *= self.n - dataOut.flagCohInt = True - dataOut.utctime = avgdatatime - # print avgdata, avgdatatime - # raise - # dataOut.timeInterval = dataOut.ippSeconds * dataOut.nCohInt - dataOut.flagNoData = False - return dataOut +class SumFlips(Operation): + """Operation to sum the flip and unflip part of certain cross products of the Double Pulse. -class Decoder(Operation): + Parameters: + ----------- + None - isConfig = False - __profIndex = 0 + Example + -------- - code = None + op = proc_unit.addOperation(name='SumFlips', optype='other') - nCode = None - nBaud = None + """ def __init__(self, **kwargs): Operation.__init__(self, **kwargs) - self.times = None - self.osamp = None - # self.__setValues = False - self.isConfig = False - self.setupReq = False - def setup(self, code, osamp, dataOut): - self.__profIndex = 0 + def rint2DP(self,dataOut): - self.code = code + dataOut.rnint2=numpy.zeros(dataOut.DPL,'float32') - self.nCode = len(code) - self.nBaud = len(code[0]) + for l in range(dataOut.DPL): - if (osamp != None) and (osamp >1): - self.osamp = osamp - self.code = numpy.repeat(code, repeats=self.osamp, axis=1) - self.nBaud = self.nBaud*self.osamp + dataOut.rnint2[l]=1.0/(dataOut.nint*dataOut.NAVG*12.0) - self.__nChannels = dataOut.nChannels - self.__nProfiles = dataOut.nProfiles - self.__nHeis = dataOut.nHeights - if self.__nHeis < self.nBaud: - raise ValueError('Number of heights (%d) should be greater than number of bauds (%d)' %(self.__nHeis, self.nBaud)) + def SumLags(self,dataOut): - #Frequency - __codeBuffer = numpy.zeros((self.nCode, self.__nHeis), dtype=numpy.complex) + for l in range(dataOut.DPL): - __codeBuffer[:,0:self.nBaud] = self.code + dataOut.kabxys_integrated[4][:,l,0]=(dataOut.kabxys_integrated[4][:,l,0]+dataOut.kabxys_integrated[4][:,l,1])*dataOut.rnint2[l] + dataOut.kabxys_integrated[5][:,l,0]=(dataOut.kabxys_integrated[5][:,l,0]+dataOut.kabxys_integrated[5][:,l,1])*dataOut.rnint2[l] + dataOut.kabxys_integrated[6][:,l,0]=(dataOut.kabxys_integrated[6][:,l,0]+dataOut.kabxys_integrated[6][:,l,1])*dataOut.rnint2[l] + dataOut.kabxys_integrated[7][:,l,0]=(dataOut.kabxys_integrated[7][:,l,0]+dataOut.kabxys_integrated[7][:,l,1])*dataOut.rnint2[l] - self.fft_code = numpy.conj(numpy.fft.fft(__codeBuffer, axis=1)) + dataOut.kabxys_integrated[8][:,l,0]=(dataOut.kabxys_integrated[8][:,l,0]-dataOut.kabxys_integrated[8][:,l,1])*dataOut.rnint2[l] + dataOut.kabxys_integrated[9][:,l,0]=(dataOut.kabxys_integrated[9][:,l,0]-dataOut.kabxys_integrated[9][:,l,1])*dataOut.rnint2[l] + dataOut.kabxys_integrated[10][:,l,0]=(dataOut.kabxys_integrated[10][:,l,0]-dataOut.kabxys_integrated[10][:,l,1])*dataOut.rnint2[l] + dataOut.kabxys_integrated[11][:,l,0]=(dataOut.kabxys_integrated[11][:,l,0]-dataOut.kabxys_integrated[11][:,l,1])*dataOut.rnint2[l] - if dataOut.flagDataAsBlock: - self.ndatadec = self.__nHeis #- self.nBaud + 1 + def run(self,dataOut): - self.datadecTime = numpy.zeros((self.__nChannels, self.__nProfiles, self.ndatadec), dtype=numpy.complex) + self.rint2DP(dataOut) + self.SumLags(dataOut) - else: + return dataOut - #Time - self.ndatadec = self.__nHeis #- self.nBaud + 1 - self.datadecTime = numpy.zeros((self.__nChannels, self.ndatadec), dtype=numpy.complex) +class FlagBadHeights(Operation): + """Operation to flag bad heights (bad data) of the Double Pulse. - def __convolutionInFreq(self, data): + Parameters: + ----------- + None - fft_code = self.fft_code[self.__profIndex].reshape(1,-1) + Example + -------- - fft_data = numpy.fft.fft(data, axis=1) + op = proc_unit.addOperation(name='FlagBadHeights', optype='other') - conv = fft_data*fft_code + """ - data = numpy.fft.ifft(conv,axis=1) + def __init__(self, **kwargs): - return data + Operation.__init__(self, **kwargs) - def __convolutionInFreqOpt(self, data): + def run(self,dataOut): - raise NotImplementedError + dataOut.ibad=numpy.zeros((dataOut.NDP,dataOut.DPL),'int32') - def __convolutionInTime(self, data): + for j in range(dataOut.NDP): + for l in range(dataOut.DPL): + ip1=j+dataOut.NDP*(0+2*l) - code = self.code[self.__profIndex] - for i in range(self.__nChannels): - self.datadecTime[i,:] = numpy.correlate(data[i,:], code, mode='full')[self.nBaud-1:] + if( (dataOut.kabxys_integrated[5][j,l,0] <= 0.) or (dataOut.kabxys_integrated[4][j,l,0] <= 0.) or (dataOut.kabxys_integrated[7][j,l,0] <= 0.) or (dataOut.kabxys_integrated[6][j,l,0] <= 0.)): + dataOut.ibad[j][l]=1 + else: + dataOut.ibad[j][l]=0 - return self.datadecTime + return dataOut - def __convolutionByBlockInTime(self, data): +class FlagBadHeightsSpectra(Operation): + """Operation to flag bad heights (bad data) of the Double Pulse. - repetitions = int(self.__nProfiles / self.nCode) - junk = numpy.lib.stride_tricks.as_strided(self.code, (repetitions, self.code.size), (0, self.code.itemsize)) - junk = junk.flatten() - code_block = numpy.reshape(junk, (self.nCode*repetitions, self.nBaud)) - profilesList = range(self.__nProfiles) + Parameters: + ----------- + None - for i in range(self.__nChannels): - for j in profilesList: - self.datadecTime[i,j,:] = numpy.correlate(data[i,j,:], code_block[j,:], mode='full')[self.nBaud-1:] - return self.datadecTime + Example + -------- - def __convolutionByBlockInFreq(self, data): + op = proc_unit.addOperation(name='FlagBadHeightsSpectra', optype='other') - raise NotImplementedError("Decoder by frequency fro Blocks not implemented") + """ + def __init__(self, **kwargs): - fft_code = self.fft_code[self.__profIndex].reshape(1,-1) + Operation.__init__(self, **kwargs) - fft_data = numpy.fft.fft(data, axis=2) + def run(self,dataOut): - conv = fft_data*fft_code + dataOut.ibad=numpy.zeros((dataOut.NDP,dataOut.DPL),'int32') - data = numpy.fft.ifft(conv,axis=2) + for j in range(dataOut.NDP): + for l in range(dataOut.DPL): + ip1=j+dataOut.NDP*(0+2*l) - return data + if( (dataOut.kabxys_integrated[4][j,l,0] <= 0.) or (dataOut.kabxys_integrated[6][j,l,0] <= 0.)): + dataOut.ibad[j][l]=1 + else: + dataOut.ibad[j][l]=0 + return dataOut - def run(self, dataOut, code=None, nCode=None, nBaud=None, mode = 0, osamp=None, times=None): +class NoisePower(Operation): + """Operation to get noise power from the integrated data of the Double Pulse. - if dataOut.flagDecodeData: - print("This data is already decoded, recoding again ...") + Parameters: + ----------- + None - if not self.isConfig: + Example + -------- - if code is None: - if dataOut.code is None: - raise ValueError("Code could not be read from %s instance. Enter a value in Code parameter" %dataOut.type) + op = proc_unit.addOperation(name='NoisePower', optype='other') - code = dataOut.code + """ + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + def hildebrand(self,dataOut,data): + + divider=10 # divider was originally 10 + noise=0.0 + n1=0 + n2=int(dataOut.NDP/2) + sorts= sorted(data) + nums_min= dataOut.NDP/divider + if((dataOut.NDP/divider)> 2): + nums_min= int(dataOut.NDP/divider) + + else: + nums_min=2 + sump=0.0 + sumq=0.0 + j=0 + cont=1 + while( (cont==1) and (j nums_min): + rtest= float(j/(j-1)) +1.0/dataOut.NAVG + t1= (sumq*j) + t2=(rtest*sump*sump) + if( (t1/t2) > 0.990): + j=j-1 + sump-= sorts[j+n1] + sumq-=sorts[j+n1]*sorts[j+n1] + cont= 0 + + noise= sump/j + stdv=numpy.sqrt((sumq- noise*noise)/(j-1)) + return noise + + def run(self,dataOut): + + p=numpy.zeros((dataOut.NR,dataOut.NDP,dataOut.DPL),'float32') + av=numpy.zeros(dataOut.NDP,'float32') + dataOut.pnoise=numpy.zeros(dataOut.NR,'float32') + + p[0,:,:]=dataOut.kabxys_integrated[4][:,:,0]+dataOut.kabxys_integrated[5][:,:,0] #total power for channel 0, just pulse with non-flip + p[1,:,:]=dataOut.kabxys_integrated[6][:,:,0]+dataOut.kabxys_integrated[7][:,:,0] #total power for channel 1 + + for i in range(dataOut.NR): + dataOut.pnoise[i]=0.0 + for k in range(dataOut.DPL): + dataOut.pnoise[i]+= self.hildebrand(dataOut,p[i,:,k]) + + dataOut.pnoise[i]=dataOut.pnoise[i]/dataOut.DPL + + + dataOut.pan=1.0*dataOut.pnoise[0] # weights could change + dataOut.pbn=1.0*dataOut.pnoise[1] # weights could change + + return dataOut + + +class DoublePulseACFs(Operation): + """Operation to get the ACFs of the Double Pulse. + + Parameters: + ----------- + None + + Example + -------- + + op = proc_unit.addOperation(name='DoublePulseACFs', optype='other') + + """ + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + self.aux=1 + + def run(self,dataOut): + + dataOut.igcej=numpy.zeros((dataOut.NDP,dataOut.DPL),'int32') + + if self.aux==1: + dataOut.rhor=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float) + dataOut.rhoi=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float) + dataOut.sdp=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float) + dataOut.sd=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float) + dataOut.p=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float) + dataOut.alag=numpy.zeros(dataOut.NDP,'float32') + for l in range(dataOut.DPL): + dataOut.alag[l]=l*dataOut.DH*2.0/150.0 + self.aux=0 + sn4=dataOut.pan*dataOut.pbn + rhorn=0 + rhoin=0 + panrm=numpy.zeros((dataOut.NDP,dataOut.DPL), dtype=float) + + for i in range(dataOut.NDP): + for j in range(dataOut.DPL): + ################# Total power + pa=numpy.abs(dataOut.kabxys_integrated[4][i,j,0]+dataOut.kabxys_integrated[5][i,j,0]) + pb=numpy.abs(dataOut.kabxys_integrated[6][i,j,0]+dataOut.kabxys_integrated[7][i,j,0]) + st4=pa*pb + dataOut.p[i,j]=pa+pb-(dataOut.pan+dataOut.pbn) + dataOut.sdp[i,j]=2*dataOut.rnint2[j]*((pa+pb)*(pa+pb)) + ## ACF + rhorp=dataOut.kabxys_integrated[8][i,j,0]+dataOut.kabxys_integrated[11][i,j,0] + rhoip=dataOut.kabxys_integrated[10][i,j,0]-dataOut.kabxys_integrated[9][i,j,0] + if ((pa>dataOut.pan)&(pb>dataOut.pbn)): + + ss4=numpy.abs((pa-dataOut.pan)*(pb-dataOut.pbn)) + panrm[i,j]=math.sqrt(ss4) + rnorm=1/panrm[i,j] + ## ACF + dataOut.rhor[i,j]=rhorp*rnorm + dataOut.rhoi[i,j]=rhoip*rnorm + ############# Compute standard error for ACF + stoss4=st4/ss4 + snoss4=sn4/ss4 + rp2=((rhorp*rhorp)+(rhoip*rhoip))/st4 + rn2=((rhorn*rhorn)+(rhoin*rhoin))/sn4 + rs2=(dataOut.rhor[i,j]*dataOut.rhor[i,j])+(dataOut.rhoi[i,j]*dataOut.rhoi[i,j]) + st=1.0+rs2*(stoss4-(2*math.sqrt(stoss4*snoss4))) + stn=1.0+rs2*(snoss4-(2*math.sqrt(stoss4*snoss4))) + dataOut.sd[i,j]=((stoss4*((1.0+rp2)*st+(2.0*rp2*rs2*snoss4)-4.0*math.sqrt(rs2*rp2)))+(0.25*snoss4*((1.0+rn2)*stn+(2.0*rn2*rs2*stoss4)-4.0*math.sqrt(rs2*rn2))))*dataOut.rnint2[j] + dataOut.sd[i,j]=numpy.abs(dataOut.sd[i,j]) + + else: #default values for bad points + rnorm=1/math.sqrt(st4) + dataOut.sd[i,j]=1.e30 + dataOut.ibad[i,j]=4 + dataOut.rhor[i,j]=rhorp*rnorm + dataOut.rhoi[i,j]=rhoip*rnorm + + if ((pa/dataOut.pan-1.0)>2.25*(pb/dataOut.pbn-1.0)): + dataOut.igcej[i,j]=1 + + return dataOut + + +class FaradayAngleAndDPPower(Operation): + """Operation to calculate Faraday angle and Double Pulse power. + + Parameters: + ----------- + None + + Example + -------- + + op = proc_unit.addOperation(name='FaradayAngleAndDPPower', optype='other') + + """ + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + self.aux=1 + + def run(self,dataOut): + + if self.aux==1: + dataOut.h2=numpy.zeros(dataOut.MAXNRANGENDT,'float32') + dataOut.range1=numpy.zeros(dataOut.MAXNRANGENDT,order='F',dtype='float32') + dataOut.sdn2=numpy.zeros(dataOut.NDP,'float32') + dataOut.ph2=numpy.zeros(dataOut.NDP,'float32') + dataOut.sdp2=numpy.zeros(dataOut.NDP,'float32') + dataOut.ibd=numpy.zeros(dataOut.NDP,'float32') + dataOut.phi=numpy.zeros(dataOut.NDP,'float32') + + self.aux=0 + + for i in range(dataOut.MAXNRANGENDT): + dataOut.range1[i]=dataOut.H0 + i*dataOut.DH + dataOut.h2[i]=dataOut.range1[i]**2 + + for j in range(dataOut.NDP): + dataOut.ph2[j]=0. + dataOut.sdp2[j]=0. + ri=dataOut.rhoi[j][0]/dataOut.sd[j][0] + rr=dataOut.rhor[j][0]/dataOut.sd[j][0] + dataOut.sdn2[j]=1./dataOut.sd[j][0] + + pt=0.# // total power + st=0.# // total signal + ibt=0# // bad lags + ns=0# // no. good lags + for l in range(dataOut.DPL): + #add in other lags if outside of e-jet contamination + if( (dataOut.igcej[j][l] == 0) and (dataOut.ibad[j][l] == 0) ): + + dataOut.ph2[j]+=dataOut.p[j][l]/dataOut.sdp[j][l] + dataOut.sdp2[j]=dataOut.sdp2[j]+1./dataOut.sdp[j][l] + ns+=1 + + + pt+=dataOut.p[j][l]/dataOut.sdp[j][l] + st+=1./dataOut.sdp[j][l] + ibt|=dataOut.ibad[j][l]; + if(ns!= 0): + dataOut.ibd[j]=0 + dataOut.ph2[j]=dataOut.ph2[j]/dataOut.sdp2[j] + dataOut.sdp2[j]=1./dataOut.sdp2[j] else: - code = numpy.array(code).reshape(nCode,nBaud) - self.setup(code, osamp, dataOut) + dataOut.ibd[j]=ibt + dataOut.ph2[j]=pt/st + dataOut.sdp2[j]=1./st + + dataOut.ph2[j]=dataOut.ph2[j]*dataOut.h2[j] + dataOut.sdp2[j]=numpy.sqrt(dataOut.sdp2[j])*dataOut.h2[j] + rr=rr/dataOut.sdn2[j] + ri=ri/dataOut.sdn2[j] + #rm[j]=np.sqrt(rr*rr + ri*ri) it is not used in c program + dataOut.sdn2[j]=1./(dataOut.sdn2[j]*(rr*rr + ri*ri)) + if( (ri == 0.) and (rr == 0.) ): + dataOut.phi[j]=0. + else: + dataOut.phi[j]=math.atan2( ri , rr ) - self.isConfig = True + return dataOut - if mode == 3: - sys.stderr.write("Decoder Warning: mode=%d is not valid, using mode=0\n" %mode) - if times != None: - sys.stderr.write("Decoder Warning: Argument 'times' in not used anymore\n") +class ElectronDensityFaraday(Operation): + """Operation to calculate electron density from Faraday angle. - if self.code is None: - print("Fail decoding: Code is not defined.") - return + Parameters: + ----------- + NSHTS : int + .* + RATE : float + .* - self.__nProfiles = dataOut.nProfiles - datadec = None + Example + -------- - if mode == 3: - mode = 0 + op = proc_unit.addOperation(name='ElectronDensityFaraday', optype='other') + op.addParameter(name='NSHTS', value='50', format='int') + op.addParameter(name='RATE', value='1.8978873e-6', format='float') - if dataOut.flagDataAsBlock: - """ - Decoding when data have been read as block, - """ + """ - if mode == 0: - datadec = self.__convolutionByBlockInTime(dataOut.data) - if mode == 1: - datadec = self.__convolutionByBlockInFreq(dataOut.data) - else: - """ - Decoding when data have been read profile by profile - """ - if mode == 0: - datadec = self.__convolutionInTime(dataOut.data) + def __init__(self, **kwargs): - if mode == 1: - datadec = self.__convolutionInFreq(dataOut.data) + Operation.__init__(self, **kwargs) + self.aux=1 + + def run(self,dataOut,NSHTS=50,RATE=1.8978873e-6): + + #print(ctime(dataOut.utctime)) + #3print("Faraday Angle",dataOut.phi) + + dataOut.NSHTS=NSHTS + dataOut.RATE=RATE + + if self.aux==1: + dataOut.dphi=numpy.zeros(dataOut.NDP,'float32') + dataOut.sdn1=numpy.zeros(dataOut.NDP,'float32') + self.aux=0 + theta=numpy.zeros(dataOut.NDP,dtype=numpy.complex_) + thetai=numpy.zeros(dataOut.NDP,dtype=numpy.complex_) + # use complex numbers for phase + for i in range(dataOut.NSHTS): + theta[i]=math.cos(dataOut.phi[i])+math.sin(dataOut.phi[i])*1j + thetai[i]=-math.sin(dataOut.phi[i])+math.cos(dataOut.phi[i])*1j + + # differentiate and convert to number density + ndphi=dataOut.NSHTS-4 + for i in range(2,dataOut.NSHTS-2): + fact=(-0.5/(dataOut.RATE*dataOut.DH))*dataOut.bki[i] + #four-point derivative, no phase unwrapping necessary + ####dataOut.dphi[i]=((((theta[i+1]-theta[i-1])+(2.0*(theta[i+2]-theta[i-2])))/thetai[i])).real/10.0 + dataOut.dphi[i]=((((theta[i-2]-theta[i+2])+(8.0*(theta[i+1]-theta[i-1])))/thetai[i])).real/12.0 + + dataOut.dphi[i]=abs(dataOut.dphi[i]*fact) + dataOut.sdn1[i]=(4.*(dataOut.sdn2[i-2]+dataOut.sdn2[i+2])+dataOut.sdn2[i-1]+dataOut.sdn2[i+1]) + dataOut.sdn1[i]=numpy.sqrt(dataOut.sdn1[i])*fact - if mode == 2: - datadec = self.__convolutionInFreqOpt(dataOut.data) + return dataOut - if datadec is None: - raise ValueError("Codification mode selected is not valid: mode=%d. Try selecting 0 or 1" %mode) +class ElectronDensityRobertoTestFaraday(Operation): + """Operation to calculate electron density from Faraday angle. - dataOut.code = self.code - dataOut.nCode = self.nCode - dataOut.nBaud = self.nBaud + Parameters: + ----------- + NSHTS : int + .* + RATE : float + .* - dataOut.data = datadec + Example + -------- - dataOut.heightList = dataOut.heightList[0:datadec.shape[-1]] + op = proc_unit.addOperation(name='ElectronDensityFaraday', optype='other') + op.addParameter(name='NSHTS', value='50', format='int') + op.addParameter(name='RATE', value='1.8978873e-6', format='float') - dataOut.flagDecodeData = True #asumo q la data esta decodificada + """ - if self.__profIndex == self.nCode-1: - self.__profIndex = 0 - return dataOut + def __init__(self, **kwargs): - self.__profIndex += 1 + Operation.__init__(self, **kwargs) + self.aux=1 + + def run(self,dataOut,NSHTS=50,RATE=1.8978873e-6): + + #print(ctime(dataOut.utctime)) + #print("Faraday Angle",dataOut.phi) + + dataOut.NSHTS=NSHTS + dataOut.RATE=RATE + + if self.aux==1: + dataOut.dphi=numpy.zeros(dataOut.NDP,'float32') + dataOut.sdn1=numpy.zeros(dataOut.NDP,'float32') + self.aux=0 + theta=numpy.zeros(dataOut.NDP,dtype=numpy.complex_) + thetai=numpy.zeros(dataOut.NDP,dtype=numpy.complex_) + # use complex numbers for phase + ''' + for i in range(dataOut.NSHTS): + theta[i]=math.cos(dataOut.phi[i])+math.sin(dataOut.phi[i])*1j + thetai[i]=-math.sin(dataOut.phi[i])+math.cos(dataOut.phi[i])*1j + ''' + + # differentiate and convert to number density + ndphi=dataOut.NSHTS-4 + + dataOut.phi=numpy.unwrap(dataOut.phi) + + for i in range(2,dataOut.NSHTS-2): + fact=(-0.5/(dataOut.RATE*dataOut.DH))*dataOut.bki[i] + #four-point derivative, no phase unwrapping necessary + ####dataOut.dphi[i]=((((theta[i+1]-theta[i-1])+(2.0*(theta[i+2]-theta[i-2])))/thetai[i])).real/10.0 + ##dataOut.dphi[i]=((((theta[i-2]-theta[i+2])+(8.0*(theta[i+1]-theta[i-1])))/thetai[i])).real/12.0 + dataOut.dphi[i]=((dataOut.phi[i+1]-dataOut.phi[i-1])+(2.0*(dataOut.phi[i+2]-dataOut.phi[i-2])))/10.0 + + dataOut.dphi[i]=abs(dataOut.dphi[i]*fact) + dataOut.sdn1[i]=(4.*(dataOut.sdn2[i-2]+dataOut.sdn2[i+2])+dataOut.sdn2[i-1]+dataOut.sdn2[i+1]) + dataOut.sdn1[i]=numpy.sqrt(dataOut.sdn1[i])*fact return dataOut - # dataOut.flagDeflipData = True #asumo q la data no esta sin flip +class ElectronDensityRobertoTest2Faraday(Operation): + """Operation to calculate electron density from Faraday angle. -class ProfileConcat(Operation): + Parameters: + ----------- + NSHTS : int + .* + RATE : float + .* - isConfig = False - buffer = None + Example + -------- + + op = proc_unit.addOperation(name='ElectronDensityFaraday', optype='other') + op.addParameter(name='NSHTS', value='50', format='int') + op.addParameter(name='RATE', value='1.8978873e-6', format='float') + + """ def __init__(self, **kwargs): Operation.__init__(self, **kwargs) - self.profileIndex = 0 + self.aux=1 - def reset(self): - self.buffer = numpy.zeros_like(self.buffer) - self.start_index = 0 - self.times = 1 + def run(self,dataOut,NSHTS=50,RATE=1.8978873e-6): - def setup(self, data, m, n=1): - self.buffer = numpy.zeros((data.shape[0],data.shape[1]*m),dtype=type(data[0,0])) - self.nHeights = data.shape[1]#.nHeights - self.start_index = 0 - self.times = 1 + #print(ctime(dataOut.utctime)) + #print("Faraday Angle",dataOut.phi) - def concat(self, data): + dataOut.NSHTS=NSHTS + dataOut.RATE=RATE - self.buffer[:,self.start_index:self.nHeights*self.times] = data.copy() - self.start_index = self.start_index + self.nHeights + if self.aux==1: + dataOut.dphi=numpy.zeros(dataOut.NDP,'float32') + dataOut.sdn1=numpy.zeros(dataOut.NDP,'float32') + self.aux=0 + theta=numpy.zeros(dataOut.NDP,dtype=numpy.complex_) + thetai=numpy.zeros(dataOut.NDP,dtype=numpy.complex_) + # use complex numbers for phase + ''' + for i in range(dataOut.NSHTS): + theta[i]=math.cos(dataOut.phi[i])+math.sin(dataOut.phi[i])*1j + thetai[i]=-math.sin(dataOut.phi[i])+math.cos(dataOut.phi[i])*1j + ''' - def run(self, dataOut, m): - dataOut.flagNoData = True + # differentiate and convert to number density + ndphi=dataOut.NSHTS-4 - if not self.isConfig: - self.setup(dataOut.data, m, 1) - self.isConfig = True + #dataOut.phi=numpy.unwrap(dataOut.phi) + f1=numpy.exp((dataOut.phi*1.j)/10) + f2=numpy.exp((dataOut.phi*2.j)/10) - if dataOut.flagDataAsBlock: - raise ValueError("ProfileConcat can only be used when voltage have been read profile by profile, getBlock = False") + for i in range(2,dataOut.NSHTS-2): + fact=(-0.5/(dataOut.RATE*dataOut.DH))*dataOut.bki[i] + #four-point derivative, no phase unwrapping necessary + ####dataOut.dphi[i]=((((theta[i+1]-theta[i-1])+(2.0*(theta[i+2]-theta[i-2])))/thetai[i])).real/10.0 + ##dataOut.dphi[i]=((((theta[i-2]-theta[i+2])+(8.0*(theta[i+1]-theta[i-1])))/thetai[i])).real/12.0 + ##dataOut.dphi[i]=((dataOut.phi[i+1]-dataOut.phi[i-1])+(2.0*(dataOut.phi[i+2]-dataOut.phi[i-2])))/10.0 + dataOut.dphi[i]=numpy.angle(f1[i+1]*numpy.conjugate(f1[i-1])*f2[i+2]*numpy.conjugate(f2[i-2])) + + + dataOut.dphi[i]=abs(dataOut.dphi[i]*fact) + dataOut.sdn1[i]=(4.*(dataOut.sdn2[i-2]+dataOut.sdn2[i+2])+dataOut.sdn2[i-1]+dataOut.sdn2[i+1]) + dataOut.sdn1[i]=numpy.sqrt(dataOut.sdn1[i])*fact + + return dataOut + +class NormalizeDPPower(Operation): + """Operation to normalize relative electron density from power with total electron density from Farday angle. + + Parameters: + ----------- + None + + Example + -------- + + op = proc_unit.addOperation(name='NormalizeDPPower', optype='other') + + """ + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + self.aux=1 + + def normal(self,a,b,n,m): + chmin=1.0e30 + chisq=numpy.zeros(150,'float32') + temp=numpy.zeros(150,'float32') + + for i in range(2*m-1): + an=al=be=chisq[i]=0.0 + for j in range(int(n/m)): + k=int(j+i*n/(2*m)) + if(a[k]>0.0 and b[k]>0.0): + al+=a[k]*b[k] + be+=b[k]*b[k] + + if(be>0.0): + temp[i]=al/be + else: + temp[i]=1.0 + + for j in range(int(n/m)): + k=int(j+i*n/(2*m)) + if(a[k]>0.0 and b[k]>0.0): + chisq[i]+=(numpy.log10(b[k]*temp[i]/a[k]))**2 + an=an+1 + + if(chisq[i]>0.0): + chisq[i]/=an + + for i in range(int(2*m-1)): + if(chisq[i]1.0e-6): + chmin=chisq[i] + cf=temp[i] + return cf + + def normalize(self,dataOut): + + if self.aux==1: + dataOut.cf=numpy.zeros(1,'float32') + dataOut.cflast=numpy.zeros(1,'float32') + self.aux=0 + + night_first=300.0 + night_first1= 310.0 + night_end= 450.0 + day_first=250.0 + day_end=400.0 + day_first_sunrise=190.0 + day_end_sunrise=280.0 + + print(dataOut.ut_Faraday) + if(dataOut.ut_Faraday>4.0 and dataOut.ut_Faraday<11.0): #early + print("EARLY") + i2=(night_end-dataOut.range1[0])/dataOut.DH + i1=(night_first -dataOut.range1[0])/dataOut.DH + elif (dataOut.ut_Faraday>0.0 and dataOut.ut_Faraday<4.0): #night + print("NIGHT") + i2=(night_end-dataOut.range1[0])/dataOut.DH + i1=(night_first1 -dataOut.range1[0])/dataOut.DH + elif (dataOut.ut_Faraday>=11.0 and dataOut.ut_Faraday<13.5): #sunrise + print("SUNRISE") + i2=( day_end_sunrise-dataOut.range1[0])/dataOut.DH + i1=(day_first_sunrise - dataOut.range1[0])/dataOut.DH else: - self.concat(dataOut.data) - self.times += 1 - if self.times > m: - dataOut.data = self.buffer - self.reset() - dataOut.flagNoData = False - # se deben actualizar mas propiedades del header y del objeto dataOut, por ejemplo, las alturas - deltaHeight = dataOut.heightList[1] - dataOut.heightList[0] - xf = dataOut.heightList[0] + dataOut.nHeights * deltaHeight * m - dataOut.heightList = numpy.arange(dataOut.heightList[0], xf, deltaHeight) - dataOut.ippSeconds *= m + print("ELSE") + i2=(day_end-dataOut.range1[0])/dataOut.DH + i1=(day_first -dataOut.range1[0])/dataOut.DH + #print(i1*dataOut.DH) + #print(i2*dataOut.DH) + + i1=int(i1) + i2=int(i2) + + try: + dataOut.cf=self.normal(dataOut.dphi[i1::], dataOut.ph2[i1::], i2-i1, 1) + except: + pass + + #print(dataOut.ph2) + #input() + # in case of spread F, normalize much higher + if(dataOut.cf0.0 and b[k]>0.0): + al+=a[k]*b[k] + be+=b[k]*b[k] + + if(be>0.0): + temp[i]=al/be + else: + temp[i]=1.0 + + for j in range(int(n/m)): + k=int(j+i*n/(2*m)) + if(a[k]>0.0 and b[k]>0.0): + chisq[i]+=(numpy.log10(b[k]*temp[i]/a[k]))**2 + an=an+1 + + if(chisq[i]>0.0): + chisq[i]/=an + + for i in range(int(2*m-1)): + if(chisq[i]1.0e-6): + chmin=chisq[i] + cf=temp[i] + return cf + + def normalize(self,dataOut): + + if self.aux==1: + dataOut.cf=numpy.zeros(1,'float32') + dataOut.cflast=numpy.zeros(1,'float32') + self.aux=0 + + night_first=300.0 + night_first1= 310.0 + night_end= 450.0 + day_first=250.0 + day_end=400.0 + day_first_sunrise=190.0 + day_end_sunrise=350.0 + + print(dataOut.ut_Faraday) + ''' + if(dataOut.ut_Faraday>4.0 and dataOut.ut_Faraday<11.0): #early + print("EARLY") + i2=(night_end-dataOut.range1[0])/dataOut.DH + i1=(night_first -dataOut.range1[0])/dataOut.DH + elif (dataOut.ut_Faraday>0.0 and dataOut.ut_Faraday<4.0): #night + print("NIGHT") + i2=(night_end-dataOut.range1[0])/dataOut.DH + i1=(night_first1 -dataOut.range1[0])/dataOut.DH + elif (dataOut.ut_Faraday>=11.0 and dataOut.ut_Faraday<13.5): #sunrise + print("SUNRISE") + i2=( day_end_sunrise-dataOut.range1[0])/dataOut.DH + i1=(day_first_sunrise - dataOut.range1[0])/dataOut.DH + else: + print("ELSE") + i2=(day_end-dataOut.range1[0])/dataOut.DH + i1=(day_first -dataOut.range1[0])/dataOut.DH + ''' + i2=(420-dataOut.range1[0])/dataOut.DH + i1=(200 -dataOut.range1[0])/dataOut.DH + print(i1*dataOut.DH) + print(i2*dataOut.DH) + + i1=int(i1) + i2=int(i2) + + try: + dataOut.cf=self.normal(dataOut.dphi[i1::], dataOut.ph2[i1::], i2-i1, 1) + except: + pass + + #print(dataOut.ph2) + #input() + # in case of spread F, normalize much higher + if(dataOut.cf10 and l1>=0: + if l1==0: + l1=1 + + dataOut.cov=numpy.reshape(dataOut.cov,l1*l1) + dataOut.cov=numpy.resize(dataOut.cov,dataOut.DPL*dataOut.DPL) + dataOut.covinv=numpy.reshape(dataOut.covinv,l1*l1) + dataOut.covinv=numpy.resize(dataOut.covinv,dataOut.DPL*dataOut.DPL) + + for l in range(dataOut.DPL*dataOut.DPL): + dataOut.cov[l]=0.0 + acfm= (dataOut.rhor[i][0])**2 + (dataOut.rhoi[i][0])**2 + if acfm> 0.0: + cc=dataOut.rhor[i][0]/acfm + ss=dataOut.rhoi[i][0]/acfm + else: + cc=1. + ss=0. + # keep only uncontaminated data, don't pass zero lag to fitter + l1=0 + for l in range(0+1,dataOut.DPL): + if dataOut.igcej[i][l]==0 and dataOut.ibad[i][l]==0: + y[l1]=dataOut.rhor[i][l]*cc + dataOut.rhoi[i][l]*ss + x[l1]=dataOut.alag[l]*1.0e-3 + dataOut.sd[i][l]=dataOut.sd[i][l]/((acfm)**2)# important + e[l1]=dataOut.sd[i][l] #this is the variance, not the st. dev. + l1=l1+1 + + for l in range(l1*(l1+1)): + dataOut.cov[l]=0.0 + for l in range(l1): + dataOut.cov[l*(1+l1)]=e[l] + angle=dataOut.thb[i]*0.01745 + bm=dataOut.bfm[i] + dataOut.params[0]=1.0 #norm + dataOut.params[1]=1000.0 #te + dataOut.params[2]=800.0 #ti + dataOut.params[3]=0.00 #ph + dataOut.params[4]=0.00 #phe + + if l1!=0: + x=numpy.resize(x,l1) + y=numpy.resize(y,l1) + else: + x=numpy.resize(x,1) + y=numpy.resize(y,1) + + if True: #len(y)!=0: + + fitacf_guess.guess(y,x,zero,depth,t1,t2,len(y)) + t2=t1/t2 + + + + + if (t1<5000.0 and t1> 600.0): + dataOut.params[1]=t1 + dataOut.params[2]=min(t2,t1) + dataOut.ifit[1]=dataOut.ifit[2]=1 + dataOut.ifit[0]=dataOut.ifit[3]=dataOut.ifit[4]=0 + #print(dataOut.ut_Faraday) + if dataOut.ut_Faraday<10.0 and dataOut.ut_Faraday>=0.5: + dataOut.ifit[2]=0 + + den=dataOut.ph2[i] + + if l1!=0: + dataOut.covinv=dataOut.covinv[0:l1*l1].reshape((l1,l1)) + dataOut.cov=dataOut.cov[0:l1*l1].reshape((l1,l1)) + e=numpy.resize(e,l1) + else: + dataOut.covinv=numpy.resize(dataOut.covinv,1) + dataOut.cov=numpy.resize(dataOut.cov,1) + e=numpy.resize(e,1) + + eb=numpy.resize(eb,10) + dataOut.ifit=numpy.resize(dataOut.ifit,10) + + + + dataOut.covinv,e,dataOut.params,eb,dataOut.m=fitacf_fit_short.fit(wl,x,y,dataOut.cov,dataOut.covinv,e,dataOut.params,bm,angle,den,dataOut.range1[i],dataOut.year,dataOut.ifit,dataOut.m,l1) # + + #exit() + + if dataOut.params[2]>dataOut.params[1]*1.05: + dataOut.ifit[2]=0 + dataOut.params[1]=dataOut.params[2]=t1 + dataOut.covinv,e,dataOut.params,eb,dataOut.m=fitacf_fit_short.fit(wl,x,y,dataOut.cov,dataOut.covinv,e,dataOut.params,bm,angle,den,dataOut.range1[i],dataOut.year,dataOut.ifit,dataOut.m,l1) # + + if (dataOut.ifit[2]==0): + dataOut.params[2]=dataOut.params[1] + if (dataOut.ifit[3]==0 and iflag==0): + dataOut.params[3]=0.0 + if (dataOut.ifit[4]==0): + dataOut.params[4]=0.0 + dataOut.te2[i]=dataOut.params[1] + dataOut.ti2[i]=dataOut.params[2] + dataOut.ete2[i]=eb[1] + dataOut.eti2[i]=eb[2] + + if dataOut.eti2[i]==0: + dataOut.eti2[i]=dataOut.ete2[i] + + dataOut.phy2[i]=dataOut.params[3] + dataOut.ephy2[i]=eb[3] + if(iflag==1): + dataOut.ephy2[i]=0.0 + + if (dataOut.m<=3 and dataOut.m!= 0 and dataOut.te2[i]>400.0): + dataOut.info2[i]=1 + else: + dataOut.info2[i]=0 + + def run(self,dataOut,IBITS=16): + + dataOut.IBITS = IBITS + + self.Estimation(dataOut) + + + return dataOut + +class NeTeTiRecal(NormalizeDPPower,DPTemperaturesEstimation): + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + self.aux=0 + + def run(self,dataOut): + + for i in range(dataOut.NSHTS): + print("H: ",i*15) + print(1+(dataOut.te2[i]/dataOut.ti2[i])) + dataOut.ph2[i]*=1+(dataOut.te2[i]/dataOut.ti2[i]) + + self.normalize(dataOut) + self.Estimation(dataOut) + + + return dataOut + + +from schainpy.model.proc import fitacf_acf2 +class DenCorrection(Operation): + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + + def run(self,dataOut): + + y=numpy.zeros(dataOut.DPL,order='F',dtype='float32') + #y_aux = numpy.zeros(1,,dtype='float32') + for i in range(dataOut.NSHTS): + y[0]=y[1]=dataOut.range1[i] + + y = y.astype(dtype='float64',order='F') + three=int(3) + wl = 3.0 + tion=numpy.zeros(three,order='F',dtype='float32') + fion=numpy.zeros(three,order='F',dtype='float32') + nui=numpy.zeros(three,order='F',dtype='float32') + wion=numpy.zeros(three,order='F',dtype='int32') + bline=0.0 + #bline=numpy.zeros(1,order='F',dtype='float32') + + + #print("**** ACF2 WRAPPER ***** ",fitacf_acf2.acf2.__doc__ ) + print("BEFORE",dataOut.ph2[10:35]) + for i in range(dataOut.NSHTS): + if dataOut.info2[i]==1: + angle=dataOut.thb[i]*0.01745 + nue=nui[0]=nui[1]=nui[2]=0.0#nui[3]=0.0 + wion[0]=16 + wion[1]=1 + wion[2]=4 + tion[0]=tion[1]=tion[2]=dataOut.ti2[i] + fion[0]=1.0-dataOut.phy2[i] + fion[1]=dataOut.phy2[i] + fion[2]=0.0 + for j in range(dataOut.DPL): + tau=dataOut.alag[j]*1.0e-3 + + ''' + print("**** input from acf2 ***** ") + print("wl ",wl) + print("tau ",tau) + print("te2[i] ",dataOut.te2[i]) + print("tion ",tion) + print("fion ",fion) + print("nue ",nue) + print("nui ",nui) + print("wion ",wion) + print("angle ",angle) + print("ph2[i] ",dataOut.ph2[i]) + print("bfm[i] ",dataOut.bfm[i]) + print("y[j] ",y[j]) + ''' + print("Before y[j] ",y[j]) + #with suppress_stdout_stderr(): + y[j]=fitacf_acf2.acf2(wl,tau,dataOut.te2[i],tion,fion,nue,nui,wion,angle,dataOut.ph2[i],dataOut.bfm[i],y[j],three) + #print("l",l) + print("After y[j] ",y[j]) + ''' + print("**** output from acf2 ***** ") + print("wl ",wl) + print("tau ",tau) + print("te2[i] ",dataOut.te2[i]) + print("tion ",tion) + print("fion ",fion) + print("nue ",nue) + print("nui ",nui) + print("wion ",wion) + print("angle ",angle) + print("ph2[i] ",dataOut.ph2[i]) + print("bfm[i] ",dataOut.bfm[i]) + print("y[j] ",y[j]) + print("i ",i , " j ",j , "y[j] ",y[j]) + ''' + + + #exit(1) + if dataOut.ut_Faraday>11.0 and dataOut.range1[i]>150.0 and dataOut.range1[i]<400.0: + tau=0.0 + #with suppress_stdout_stderr(): + bline=fitacf_acf2.acf2(wl,tau,tion,tion,fion,nue,nui,wion,angle,dataOut.ph2[i],dataOut.bfm[i],bline,three) + cf=min(1.2,max(1.0,bline/y[0])) + print("bline: ",bline) + if cf != 1.0: + print("bline: ",bline) + print("cf: ",cf) + #exit(1) + #print("cf: ",cf) + dataOut.ph2[i]=cf*dataOut.ph2[i] + dataOut.sdp2[i]=cf*dataOut.sdp2[i] + for j in range(1,dataOut.DPL): + y[j]=(y[j]/y[0])*dataOut.DH+dataOut.range1[i] + y[0]=dataOut.range1[i]+dataOut.DH + #exit(1) + + + #exit(1) + print("AFTER",dataOut.ph2[10:35]) + #exit(1) + + + + + + return dataOut + +class DataPlotCleaner(Operation): + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + def run(self,dataOut): + + + THRESH_MIN_POW=10000 + THRESH_MAX_POW=10000000 + THRESH_MIN_TEMP=500 + THRESH_MAX_TEMP=4000 + dataOut.DensityClean=numpy.zeros((1,dataOut.NDP)) + dataOut.EDensityClean=numpy.zeros((1,dataOut.NDP)) + dataOut.ElecTempClean=numpy.zeros((1,dataOut.NDP)) + dataOut.EElecTempClean=numpy.zeros((1,dataOut.NDP)) + dataOut.IonTempClean=numpy.zeros((1,dataOut.NDP)) + dataOut.EIonTempClean=numpy.zeros((1,dataOut.NDP)) + + dataOut.DensityClean[0]=numpy.copy(dataOut.ph2) + dataOut.EDensityClean[0]=numpy.copy(dataOut.sdp2) + dataOut.ElecTempClean[0,:dataOut.NSHTS]=numpy.copy(dataOut.te2) + dataOut.EElecTempClean[0,:dataOut.NSHTS]=numpy.copy(dataOut.ete2) + dataOut.IonTempClean[0,:dataOut.NSHTS]=numpy.copy(dataOut.ti2) + dataOut.EIonTempClean[0,:dataOut.NSHTS]=numpy.copy(dataOut.eti2) + + for i in range(dataOut.NDP): + if dataOut.DensityClean[0,i]THRESH_MAX_POW: + dataOut.DensityClean[0,i]=THRESH_MAX_POW + + for i in range(dataOut.NSHTS): + dataOut.ElecTempClean[0,i]=(max(1.0, dataOut.ElecTempClean[0,i])) + dataOut.IonTempClean[0,i]=(max(1.0, dataOut.IonTempClean[0,i])) + for i in range(dataOut.NSHTS): + if dataOut.ElecTempClean[0,i]THRESH_MAX_TEMP: + dataOut.ElecTempClean[0,i]=THRESH_MAX_TEMP + if dataOut.IonTempClean[0,i]>THRESH_MAX_TEMP: + dataOut.IonTempClean[0,i]=THRESH_MAX_TEMP + for i in range(dataOut.NSHTS): + if dataOut.EElecTempClean[0,i]>500:# + dataOut.ElecTempClean[0,i]=500 + if dataOut.EIonTempClean[0,i]>500:# + dataOut.IonTempClean[0,i]=500 + + missing=numpy.nan + + for i in range(dataOut.NSHTS,dataOut.NDP): + + dataOut.ElecTempClean[0,i]=missing + dataOut.EElecTempClean[0,i]=missing + dataOut.IonTempClean[0,i]=missing + dataOut.EIonTempClean[0,i]=missing + + return dataOut + + +class DataSaveCleaner(Operation): + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + + def run(self,dataOut): + + dataOut.DensityFinal=numpy.zeros((1,dataOut.NDP)) + dataOut.EDensityFinal=numpy.zeros((1,dataOut.NDP)) + dataOut.ElecTempFinal=numpy.zeros((1,dataOut.NDP)) + dataOut.EElecTempFinal=numpy.zeros((1,dataOut.NDP)) + dataOut.IonTempFinal=numpy.zeros((1,dataOut.NDP)) + dataOut.EIonTempFinal=numpy.zeros((1,dataOut.NDP)) + dataOut.PhyFinal=numpy.zeros((1,dataOut.NDP)) + dataOut.EPhyFinal=numpy.zeros((1,dataOut.NDP)) + + dataOut.DensityFinal[0]=numpy.copy(dataOut.ph2) + dataOut.EDensityFinal[0]=numpy.copy(dataOut.sdp2) + dataOut.ElecTempFinal[0,:dataOut.NSHTS]=numpy.copy(dataOut.te2) + dataOut.EElecTempFinal[0,:dataOut.NSHTS]=numpy.copy(dataOut.ete2) + dataOut.IonTempFinal[0,:dataOut.NSHTS]=numpy.copy(dataOut.ti2) + dataOut.EIonTempFinal[0,:dataOut.NSHTS]=numpy.copy(dataOut.eti2) + dataOut.PhyFinal[0,:dataOut.NSHTS]=numpy.copy(dataOut.phy2) + dataOut.EPhyFinal[0,:dataOut.NSHTS]=numpy.copy(dataOut.ephy2) + + missing=numpy.nan + + temp_min=100.0 + temp_max=3000.0#6000.0e + + for i in range(dataOut.NSHTS): + + if dataOut.info2[i]!=1: + dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing + + if dataOut.ElecTempFinal[0,i]<=temp_min or dataOut.ElecTempFinal[0,i]>temp_max or dataOut.EElecTempFinal[0,i]>temp_max: + + dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=missing + + + if dataOut.IonTempFinal[0,i]<=temp_min or dataOut.IonTempFinal[0,i]>temp_max or dataOut.EIonTempFinal[0,i]>temp_max: + dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing + + if dataOut.lags_to_plot[i,:][~numpy.isnan(dataOut.lags_to_plot[i,:])].shape[0]<6: + dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing + + if dataOut.ut_Faraday>4 and dataOut.ut_Faraday<11: + if numpy.nanmax(dataOut.acfs_error_to_plot[i,:])>=10: + dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing + + if dataOut.EPhyFinal[0,i]<0.0 or dataOut.EPhyFinal[0,i]>1.0: + dataOut.PhyFinal[0,i]=dataOut.EPhyFinal[0,i]=missing + if dataOut.EDensityFinal[0,i]>0.0 and dataOut.DensityFinal[0,i]>0.0 and dataOut.DensityFinal[0,i]<9.9e6: + dataOut.EDensityFinal[0,i]=max(dataOut.EDensityFinal[0,i],1000.0) + else: + dataOut.DensityFinal[0,i]=dataOut.EDensityFinal[0,i]=missing + if dataOut.PhyFinal[0,i]==0 or dataOut.PhyFinal[0,i]>0.4: + dataOut.PhyFinal[0,i]=dataOut.EPhyFinal[0,i]=missing + if dataOut.ElecTempFinal[0,i]==dataOut.IonTempFinal[0,i]: + dataOut.EElecTempFinal[0,i]=dataOut.EIonTempFinal[0,i] + if numpy.isnan(dataOut.ElecTempFinal[0,i]): + dataOut.EElecTempFinal[0,i]=missing + if numpy.isnan(dataOut.IonTempFinal[0,i]): + dataOut.EIonTempFinal[0,i]=missing + if numpy.isnan(dataOut.ElecTempFinal[0,i]) or numpy.isnan(dataOut.EElecTempFinal[0,i]): + dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing + + for i in range(12,dataOut.NSHTS-1): + + if numpy.isnan(dataOut.ElecTempFinal[0,i-1]) and numpy.isnan(dataOut.ElecTempFinal[0,i+1]): + dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=missing + + if numpy.isnan(dataOut.IonTempFinal[0,i-1]) and numpy.isnan(dataOut.IonTempFinal[0,i+1]): + dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing + + if dataOut.ut_Faraday>4 and dataOut.ut_Faraday<11: + + if numpy.isnan(dataOut.ElecTempFinal[0,i-1]) and numpy.isnan(dataOut.ElecTempFinal[0,i-2]) and numpy.isnan(dataOut.ElecTempFinal[0,i+2]) and numpy.isnan(dataOut.ElecTempFinal[0,i+3]): #and numpy.isnan(dataOut.ElecTempFinal[0,i-5]): + + dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=missing + if numpy.isnan(dataOut.IonTempFinal[0,i-1]) and numpy.isnan(dataOut.IonTempFinal[0,i-2]) and numpy.isnan(dataOut.IonTempFinal[0,i+2]) and numpy.isnan(dataOut.IonTempFinal[0,i+3]): #and numpy.isnan(dataOut.IonTempFinal[0,i-5]): + + dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing + + + + if i>25: + if numpy.isnan(dataOut.ElecTempFinal[0,i-1]) and numpy.isnan(dataOut.ElecTempFinal[0,i-2]) and numpy.isnan(dataOut.ElecTempFinal[0,i-3]) and numpy.isnan(dataOut.ElecTempFinal[0,i-4]): #and numpy.isnan(dataOut.ElecTempFinal[0,i-5]): + dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=missing + if numpy.isnan(dataOut.IonTempFinal[0,i-1]) and numpy.isnan(dataOut.IonTempFinal[0,i-2]) and numpy.isnan(dataOut.IonTempFinal[0,i-3]) and numpy.isnan(dataOut.IonTempFinal[0,i-4]): #and numpy.isnan(dataOut.IonTempFinal[0,i-5]): + + dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing + + if numpy.isnan(dataOut.ElecTempFinal[0,i]) or numpy.isnan(dataOut.EElecTempFinal[0,i]): + + dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing + + for i in range(12,dataOut.NSHTS-1): + + if numpy.isnan(dataOut.ElecTempFinal[0,i-1]) and numpy.isnan(dataOut.ElecTempFinal[0,i+1]): + dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=missing + + if numpy.isnan(dataOut.IonTempFinal[0,i-1]) and numpy.isnan(dataOut.IonTempFinal[0,i+1]): + dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing + + + if numpy.isnan(dataOut.ElecTempFinal[0,i]) or numpy.isnan(dataOut.EElecTempFinal[0,i]): + + dataOut.ElecTempFinal[0,i]=dataOut.EElecTempFinal[0,i]=dataOut.IonTempFinal[0,i]=dataOut.EIonTempFinal[0,i]=missing + + if numpy.count_nonzero(~numpy.isnan(dataOut.ElecTempFinal[0,12:50]))<5: + dataOut.ElecTempFinal[0,:]=dataOut.EElecTempFinal[0,:]=missing + if numpy.count_nonzero(~numpy.isnan(dataOut.IonTempFinal[0,12:50]))<5: + dataOut.IonTempFinal[0,:]=dataOut.EIonTempFinal[0,:]=missing + + for i in range(dataOut.NSHTS,dataOut.NDP): + + dataOut.ElecTempFinal[0,i]=missing + dataOut.EElecTempFinal[0,i]=missing + dataOut.IonTempFinal[0,i]=missing + dataOut.EIonTempFinal[0,i]=missing + dataOut.PhyFinal[0,i]=missing + dataOut.EPhyFinal[0,i]=missing + + return dataOut + + +class DataSaveCleanerHP(Operation): + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + def run(self,dataOut): + + dataOut.Density_DP=numpy.zeros(dataOut.cut) + dataOut.EDensity_DP=numpy.zeros(dataOut.cut) + dataOut.ElecTemp_DP=numpy.zeros(dataOut.cut) + dataOut.EElecTemp_DP=numpy.zeros(dataOut.cut) + dataOut.IonTemp_DP=numpy.zeros(dataOut.cut) + dataOut.EIonTemp_DP=numpy.zeros(dataOut.cut) + dataOut.Phy_DP=numpy.zeros(dataOut.cut) + dataOut.EPhy_DP=numpy.zeros(dataOut.cut) + dataOut.Phe_DP=numpy.empty(dataOut.cut) + dataOut.EPhe_DP=numpy.empty(dataOut.cut) + + dataOut.Density_DP[:]=numpy.copy(dataOut.ph2[:dataOut.cut]) + dataOut.EDensity_DP[:]=numpy.copy(dataOut.sdp2[:dataOut.cut]) + dataOut.ElecTemp_DP[:]=numpy.copy(dataOut.te2[:dataOut.cut]) + dataOut.EElecTemp_DP[:]=numpy.copy(dataOut.ete2[:dataOut.cut]) + dataOut.IonTemp_DP[:]=numpy.copy(dataOut.ti2[:dataOut.cut]) + dataOut.EIonTemp_DP[:]=numpy.copy(dataOut.eti2[:dataOut.cut]) + dataOut.Phy_DP[:]=numpy.copy(dataOut.phy2[:dataOut.cut]) + dataOut.EPhy_DP[:]=numpy.copy(dataOut.ephy2[:dataOut.cut]) + dataOut.Phe_DP[:]=numpy.nan + dataOut.EPhe_DP[:]=numpy.nan + + missing=numpy.nan + temp_min=100.0 + temp_max_dp=3000.0 + + for i in range(dataOut.cut): + if dataOut.info2[i]!=1: + dataOut.ElecTemp_DP[i]=dataOut.EElecTemp_DP[i]=dataOut.IonTemp_DP[i]=dataOut.EIonTemp_DP[i]=missing + + if dataOut.ElecTemp_DP[i]<=temp_min or dataOut.ElecTemp_DP[i]>temp_max_dp or dataOut.EElecTemp_DP[i]>temp_max_dp: + + dataOut.ElecTemp_DP[i]=dataOut.EElecTemp_DP[i]=missing + + if dataOut.IonTemp_DP[i]<=temp_min or dataOut.IonTemp_DP[i]>temp_max_dp or dataOut.EIonTemp_DP[i]>temp_max_dp: + dataOut.IonTemp_DP[i]=dataOut.EIonTemp_DP[i]=missing + +####################################################################################### CHECK THIS + if dataOut.lags_to_plot[i,:][~numpy.isnan(dataOut.lags_to_plot[i,:])].shape[0]<6: + dataOut.ElecTemp_DP[i]=dataOut.EElecTemp_DP[i]=dataOut.IonTemp_DP[i]=dataOut.EIonTemp_DP[i]=missing + + if dataOut.ut_Faraday>4 and dataOut.ut_Faraday<11: + if numpy.nanmax(dataOut.acfs_error_to_plot[i,:])>=10: + dataOut.ElecTemp_DP[i]=dataOut.EElecTemp_DP[i]=dataOut.IonTemp_DP[i]=dataOut.EIonTemp_DP[i]=missing +####################################################################################### + + if dataOut.EPhy_DP[i]<0.0 or dataOut.EPhy_DP[i]>1.0: + dataOut.Phy_DP[i]=dataOut.EPhy_DP[i]=missing + if dataOut.EDensity_DP[i]>0.0 and dataOut.Density_DP[i]>0.0 and dataOut.Density_DP[i]<9.9e6: + dataOut.EDensity_DP[i]=max(dataOut.EDensity_DP[i],1000.0) + else: + dataOut.Density_DP[i]=dataOut.EDensity_DP[i]=missing + if dataOut.Phy_DP[i]==0 or dataOut.Phy_DP[i]>0.4: + dataOut.Phy_DP[i]=dataOut.EPhy_DP[i]=missing + if dataOut.ElecTemp_DP[i]==dataOut.IonTemp_DP[i]: + dataOut.EElecTemp_DP[i]=dataOut.EIonTemp_DP[i] + if numpy.isnan(dataOut.ElecTemp_DP[i]): + dataOut.EElecTemp_DP[i]=missing + if numpy.isnan(dataOut.IonTemp_DP[i]): + dataOut.EIonTemp_DP[i]=missing + if numpy.isnan(dataOut.ElecTemp_DP[i]) or numpy.isnan(dataOut.EElecTemp_DP[i]): + dataOut.ElecTemp_DP[i]=dataOut.EElecTemp_DP[i]=dataOut.IonTemp_DP[i]=dataOut.EIonTemp_DP[i]=missing + + + + dataOut.Density_LP=numpy.zeros(dataOut.NACF-dataOut.cut) + dataOut.EDensity_LP=numpy.zeros(dataOut.NACF-dataOut.cut) + dataOut.ElecTemp_LP=numpy.zeros(dataOut.NACF-dataOut.cut) + dataOut.EElecTemp_LP=numpy.zeros(dataOut.NACF-dataOut.cut) + dataOut.IonTemp_LP=numpy.zeros(dataOut.NACF-dataOut.cut) + dataOut.EIonTemp_LP=numpy.zeros(dataOut.NACF-dataOut.cut) + dataOut.Phy_LP=numpy.zeros(dataOut.NACF-dataOut.cut) + dataOut.EPhy_LP=numpy.zeros(dataOut.NACF-dataOut.cut) + dataOut.Phe_LP=numpy.zeros(dataOut.NACF-dataOut.cut) + dataOut.EPhe_LP=numpy.zeros(dataOut.NACF-dataOut.cut) + + dataOut.Density_LP[:]=numpy.copy(dataOut.ne[dataOut.cut:dataOut.NACF]) + dataOut.EDensity_LP[:]=numpy.copy(dataOut.ene[dataOut.cut:dataOut.NACF]) + dataOut.ElecTemp_LP[:]=numpy.copy(dataOut.te[dataOut.cut:dataOut.NACF]) + dataOut.EElecTemp_LP[:]=numpy.copy(dataOut.ete[dataOut.cut:dataOut.NACF]) + dataOut.IonTemp_LP[:]=numpy.copy(dataOut.ti[dataOut.cut:dataOut.NACF]) + dataOut.EIonTemp_LP[:]=numpy.copy(dataOut.eti[dataOut.cut:dataOut.NACF]) + dataOut.Phy_LP[:]=numpy.copy(dataOut.ph[dataOut.cut:dataOut.NACF]) + dataOut.EPhy_LP[:]=numpy.copy(dataOut.eph[dataOut.cut:dataOut.NACF]) + dataOut.Phe_LP[:]=numpy.copy(dataOut.phe[dataOut.cut:dataOut.NACF]) + dataOut.EPhe_LP[:]=numpy.copy(dataOut.ephe[dataOut.cut:dataOut.NACF]) + + temp_max_lp=6000.0 + + for i in range(dataOut.NACF-dataOut.cut): + + if dataOut.ElecTemp_LP[i]<=temp_min or dataOut.ElecTemp_LP[i]>temp_max_lp or dataOut.EElecTemp_LP[i]>temp_max_lp: + + dataOut.ElecTemp_LP[i]=dataOut.EElecTemp_LP[i]=missing + + if dataOut.IonTemp_LP[i]<=temp_min or dataOut.IonTemp_LP[i]>temp_max_lp or dataOut.EIonTemp_LP[i]>temp_max_lp: + dataOut.IonTemp_LP[i]=dataOut.EIonTemp_LP[i]=missing + if dataOut.EPhy_LP[i]<0.0 or dataOut.EPhy_LP[i]>1.0: + dataOut.Phy_LP[i]=dataOut.EPhy_LP[i]=missing + + if dataOut.EPhe_LP[i]<0.0 or dataOut.EPhe_LP[i]>1.0: + dataOut.Phe_LP[i]=dataOut.EPhe_LP[i]=missing + if dataOut.EDensity_LP[i]>0.0 and dataOut.Density_LP[i]>0.0 and dataOut.Density_LP[i]<9.9e6 and dataOut.EDensity_LP[i]*dataOut.Density_LP[i]<9.9e6: + dataOut.EDensity_LP[i]=max(dataOut.EDensity_LP[i],1000.0/dataOut.Density_LP[i]) + else: + dataOut.Density_LP[i]=missing + dataOut.EDensity_LP[i]=1.0 + + if numpy.isnan(dataOut.Phy_LP[i]): + dataOut.EPhy_LP[i]=missing + + if numpy.isnan(dataOut.Phe_LP[i]): + dataOut.EPhe_LP[i]=missing + + + if dataOut.ElecTemp_LP[i]==dataOut.IonTemp_LP[i]: + dataOut.EElecTemp_LP[i]=dataOut.EIonTemp_LP[i] + if numpy.isnan(dataOut.ElecTemp_LP[i]): + dataOut.EElecTemp_LP[i]=missing + if numpy.isnan(dataOut.IonTemp_LP[i]): + dataOut.EIonTemp_LP[i]=missing + if numpy.isnan(dataOut.ElecTemp_LP[i]) or numpy.isnan(dataOut.EElecTemp_LP[i]): + dataOut.ElecTemp_LP[i]=dataOut.EElecTemp_LP[i]=dataOut.IonTemp_LP[i]=dataOut.EIonTemp_LP[i]=missing + + + dataOut.DensityFinal=numpy.reshape(numpy.concatenate((dataOut.Density_DP,dataOut.Density_LP)),(1,-1)) + dataOut.EDensityFinal=numpy.reshape(numpy.concatenate((dataOut.EDensity_DP,dataOut.EDensity_LP)),(1,-1)) + dataOut.ElecTempFinal=numpy.reshape(numpy.concatenate((dataOut.ElecTemp_DP,dataOut.ElecTemp_LP)),(1,-1)) + dataOut.EElecTempFinal=numpy.reshape(numpy.concatenate((dataOut.EElecTemp_DP,dataOut.EElecTemp_LP)),(1,-1)) + dataOut.IonTempFinal=numpy.reshape(numpy.concatenate((dataOut.IonTemp_DP,dataOut.IonTemp_LP)),(1,-1)) + dataOut.EIonTempFinal=numpy.reshape(numpy.concatenate((dataOut.EIonTemp_DP,dataOut.EIonTemp_LP)),(1,-1)) + dataOut.PhyFinal=numpy.reshape(numpy.concatenate((dataOut.Phy_DP,dataOut.Phy_LP)),(1,-1)) + dataOut.EPhyFinal=numpy.reshape(numpy.concatenate((dataOut.EPhy_DP,dataOut.EPhy_LP)),(1,-1)) + dataOut.PheFinal=numpy.reshape(numpy.concatenate((dataOut.Phe_DP,dataOut.Phe_LP)),(1,-1)) + dataOut.EPheFinal=numpy.reshape(numpy.concatenate((dataOut.EPhe_DP,dataOut.EPhe_LP)),(1,-1)) + + nan_array_2=numpy.empty(dataOut.NACF-dataOut.NDP) + nan_array_2[:]=numpy.nan + + dataOut.acfs_DP=numpy.zeros((dataOut.NACF,dataOut.DPL),'float32') + dataOut.acfs_error_DP=numpy.zeros((dataOut.NACF,dataOut.DPL),'float32') + acfs_dp_aux=dataOut.acfs_to_save.transpose() + acfs_error_dp_aux=dataOut.acfs_error_to_save.transpose() + for i in range(dataOut.DPL): + dataOut.acfs_DP[:,i]=numpy.concatenate((acfs_dp_aux[:,i],nan_array_2)) + dataOut.acfs_error_DP[:,i]=numpy.concatenate((acfs_error_dp_aux[:,i],nan_array_2)) + dataOut.acfs_DP=dataOut.acfs_DP.transpose() + dataOut.acfs_error_DP=dataOut.acfs_error_DP.transpose() + + dataOut.acfs_LP=numpy.zeros((dataOut.NACF,dataOut.IBITS),'float32') + dataOut.acfs_error_LP=numpy.zeros((dataOut.NACF,dataOut.IBITS),'float32') + + for i in range(dataOut.NACF): + for j in range(dataOut.IBITS): + if numpy.abs(dataOut.errors[j,i]/dataOut.output_LP_integrated.real[0,i,0])<1.0: + dataOut.acfs_LP[i,j]=dataOut.output_LP_integrated.real[j,i,0]/dataOut.output_LP_integrated.real[0,i,0] + dataOut.acfs_LP[i,j]=max(min(dataOut.acfs_LP[i,j],1.0),-1.0) + + dataOut.acfs_error_LP[i,j]=dataOut.errors[j,i]/dataOut.output_LP_integrated.real[0,i,0] + else: + dataOut.acfs_LP[i,j]=numpy.nan + + dataOut.acfs_error_LP[i,j]=numpy.nan + + dataOut.acfs_LP=dataOut.acfs_LP.transpose() + dataOut.acfs_error_LP=dataOut.acfs_error_LP.transpose() + + return dataOut + + +class ACFs(Operation): + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + self.aux=1 + + def run(self,dataOut): + + if self.aux: + self.taup=numpy.zeros(dataOut.DPL,'float32') + self.pacf=numpy.zeros(dataOut.DPL,'float32') + self.sacf=numpy.zeros(dataOut.DPL,'float32') + + self.taup_full=numpy.zeros(dataOut.DPL,'float32') + self.pacf_full=numpy.zeros(dataOut.DPL,'float32') + self.sacf_full=numpy.zeros(dataOut.DPL,'float32') + self.x_igcej=numpy.zeros(dataOut.DPL,'float32') + self.y_igcej=numpy.zeros(dataOut.DPL,'float32') + self.x_ibad=numpy.zeros(dataOut.DPL,'float32') + self.y_ibad=numpy.zeros(dataOut.DPL,'float32') + self.aux=0 + + dataOut.acfs_to_plot=numpy.zeros((dataOut.NDP,dataOut.DPL),'float32') + dataOut.acfs_to_save=numpy.zeros((dataOut.NDP,dataOut.DPL),'float32') + dataOut.acfs_error_to_plot=numpy.zeros((dataOut.NDP,dataOut.DPL),'float32') + dataOut.acfs_error_to_save=numpy.zeros((dataOut.NDP,dataOut.DPL),'float32') + dataOut.lags_to_plot=numpy.zeros((dataOut.NDP,dataOut.DPL),'float32') + dataOut.x_igcej_to_plot=numpy.zeros((dataOut.NDP,dataOut.DPL),'float32') + dataOut.x_ibad_to_plot=numpy.zeros((dataOut.NDP,dataOut.DPL),'float32') + dataOut.y_igcej_to_plot=numpy.zeros((dataOut.NDP,dataOut.DPL),'float32') + dataOut.y_ibad_to_plot=numpy.zeros((dataOut.NDP,dataOut.DPL),'float32') + + for i in range(dataOut.NSHTS): + + acfm=dataOut.rhor[i][0]**2+dataOut.rhoi[i][0]**2 + + if acfm>0: + cc=dataOut.rhor[i][0]/acfm + ss=dataOut.rhoi[i][0]/acfm + else: + cc=1. + ss=0. + + # keep only uncontaminated data + for l in range(dataOut.DPL): + fact=dataOut.DH + if (dataOut.igcej[i][l]==0 and dataOut.ibad[i][l]==0): + + self.pacf_full[l]=min(1.0,max(-1.0,(dataOut.rhor[i][l]*cc + dataOut.rhoi[i][l]*ss)))*fact+dataOut.range1[i] + self.sacf_full[l]=min(1.0,numpy.sqrt(dataOut.sd[i][l]))*fact + self.taup_full[l]=dataOut.alag[l] + self.x_igcej[l]=numpy.nan + self.y_igcej[l]=numpy.nan + self.x_ibad[l]=numpy.nan + self.y_ibad[l]=numpy.nan + + else: + self.pacf_full[l]=numpy.nan + self.sacf_full[l]=numpy.nan + self.taup_full[l]=numpy.nan + + if dataOut.igcej[i][l]: + self.x_igcej[l]=dataOut.alag[l] + self.y_igcej[l]=dataOut.range1[i] + self.x_ibad[l]=numpy.nan + self.y_ibad[l]=numpy.nan + + if dataOut.ibad[i][l]: + self.x_igcej[l]=numpy.nan + self.y_igcej[l]=numpy.nan + self.x_ibad[l]=dataOut.alag[l] + self.y_ibad[l]=dataOut.range1[i] + + pacf_new=numpy.copy((self.pacf_full-dataOut.range1[i])/dataOut.DH) + sacf_new=numpy.copy(self.sacf_full/dataOut.DH) + dataOut.acfs_to_save[i,:]=numpy.copy(pacf_new) + dataOut.acfs_error_to_save[i,:]=numpy.copy(sacf_new) + dataOut.acfs_to_plot[i,:]=numpy.copy(self.pacf_full) + dataOut.acfs_error_to_plot[i,:]=numpy.copy(self.sacf_full) + dataOut.lags_to_plot[i,:]=numpy.copy(self.taup_full) + dataOut.x_igcej_to_plot[i,:]=numpy.copy(self.x_igcej) + dataOut.x_ibad_to_plot[i,:]=numpy.copy(self.x_ibad) + dataOut.y_igcej_to_plot[i,:]=numpy.copy(self.y_igcej) + dataOut.y_ibad_to_plot[i,:]=numpy.copy(self.y_ibad) + + missing=numpy.nan#-32767 + + for i in range(dataOut.NSHTS,dataOut.NDP): + for j in range(dataOut.DPL): + dataOut.acfs_to_save[i,j]=missing + dataOut.acfs_error_to_save[i,j]=missing + dataOut.acfs_to_plot[i,j]=missing + dataOut.acfs_error_to_plot[i,j]=missing + dataOut.lags_to_plot[i,j]=missing + dataOut.x_igcej_to_plot[i,j]=missing + dataOut.x_ibad_to_plot[i,j]=missing + dataOut.y_igcej_to_plot[i,j]=missing + dataOut.y_ibad_to_plot[i,j]=missing + + dataOut.acfs_to_save=dataOut.acfs_to_save.transpose() + dataOut.acfs_error_to_save=dataOut.acfs_error_to_save.transpose() + + return dataOut + + +class CohInt(Operation): + + isConfig = False + __profIndex = 0 + __byTime = False + __initime = None + __lastdatatime = None + __integrationtime = None + __buffer = None + __bufferStride = [] + __dataReady = False + __profIndexStride = 0 + __dataToPutStride = False + n = None + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + # self.isConfig = False + + def setup(self, n=None, timeInterval=None, stride=None, overlapping=False, byblock=False): + """ + Set the parameters of the integration class. + + Inputs: + + n : Number of coherent integrations + timeInterval : Time of integration. If the parameter "n" is selected this one does not work + overlapping : + """ + + self.__initime = None + self.__lastdatatime = 0 + self.__buffer = None + self.__dataReady = False + self.byblock = byblock + self.stride = stride + + if n == None and timeInterval == None: + raise ValueError("n or timeInterval should be specified ...") + + if n != None: + self.n = n + self.__byTime = False + else: + self.__integrationtime = timeInterval #* 60. #if (type(timeInterval)!=integer) -> change this line + self.n = 9999 + self.__byTime = True + + if overlapping: + self.__withOverlapping = True + self.__buffer = None + else: + self.__withOverlapping = False + self.__buffer = 0 + + self.__profIndex = 0 + + def putData(self, data): + + """ + Add a profile to the __buffer and increase in one the __profileIndex + + """ + + if not self.__withOverlapping: + self.__buffer += data.copy() + self.__profIndex += 1 + return + + #Overlapping data + nChannels, nHeis = data.shape + data = numpy.reshape(data, (1, nChannels, nHeis)) + + #If the buffer is empty then it takes the data value + if self.__buffer is None: + self.__buffer = data + self.__profIndex += 1 + return + + #If the buffer length is lower than n then stakcing the data value + if self.__profIndex < self.n: + self.__buffer = numpy.vstack((self.__buffer, data)) + self.__profIndex += 1 + return + + #If the buffer length is equal to n then replacing the last buffer value with the data value + self.__buffer = numpy.roll(self.__buffer, -1, axis=0) + self.__buffer[self.n-1] = data + self.__profIndex = self.n + return + + + def pushData(self): + """ + Return the sum of the last profiles and the profiles used in the sum. + + Affected: + + self.__profileIndex + + """ + + if not self.__withOverlapping: + data = self.__buffer + n = self.__profIndex + + self.__buffer = 0 + self.__profIndex = 0 + + return data, n + + #Integration with Overlapping + data = numpy.sum(self.__buffer, axis=0) + # print data + # raise + n = self.__profIndex + + return data, n + + def byProfiles(self, data): + + self.__dataReady = False + avgdata = None + # n = None + # print data + # raise + self.putData(data) + + if self.__profIndex == self.n: + avgdata, n = self.pushData() + self.__dataReady = True + + return avgdata + + def byTime(self, data, datatime): + + self.__dataReady = False + avgdata = None + n = None + + self.putData(data) + + if (datatime - self.__initime) >= self.__integrationtime: + avgdata, n = self.pushData() + self.n = n + self.__dataReady = True + + return avgdata + + def integrateByStride(self, data, datatime): + # print data + if self.__profIndex == 0: + self.__buffer = [[data.copy(), datatime]] + else: + self.__buffer.append([data.copy(),datatime]) + self.__profIndex += 1 + self.__dataReady = False + + if self.__profIndex == self.n * self.stride : + self.__dataToPutStride = True + self.__profIndexStride = 0 + self.__profIndex = 0 + self.__bufferStride = [] + for i in range(self.stride): + current = self.__buffer[i::self.stride] + data = numpy.sum([t[0] for t in current], axis=0) + avgdatatime = numpy.average([t[1] for t in current]) + # print data + self.__bufferStride.append((data, avgdatatime)) + + if self.__dataToPutStride: + self.__dataReady = True + self.__profIndexStride += 1 + if self.__profIndexStride == self.stride: + self.__dataToPutStride = False + # print self.__bufferStride[self.__profIndexStride - 1] + # raise + return self.__bufferStride[self.__profIndexStride - 1] + + + return None, None + + def integrate(self, data, datatime=None): + + if self.__initime == None: + self.__initime = datatime + + if self.__byTime: + avgdata = self.byTime(data, datatime) + else: + avgdata = self.byProfiles(data) + + + self.__lastdatatime = datatime + + if avgdata is None: + return None, None + + avgdatatime = self.__initime + + deltatime = datatime - self.__lastdatatime + + if not self.__withOverlapping: + self.__initime = datatime + else: + self.__initime += deltatime + + return avgdata, avgdatatime + + def integrateByBlock(self, dataOut): + + times = int(dataOut.data.shape[1]/self.n) + avgdata = numpy.zeros((dataOut.nChannels, times, dataOut.nHeights), dtype=numpy.complex) + + id_min = 0 + id_max = self.n + + for i in range(times): + junk = dataOut.data[:,id_min:id_max,:] + avgdata[:,i,:] = junk.sum(axis=1) + id_min += self.n + id_max += self.n + + timeInterval = dataOut.ippSeconds*self.n + avgdatatime = (times - 1) * timeInterval + dataOut.utctime + self.__dataReady = True + return avgdata, avgdatatime + + def run(self, dataOut, n=None, timeInterval=None, stride=None, overlapping=False, byblock=False, **kwargs): + + if not self.isConfig: + self.setup(n=n, stride=stride, timeInterval=timeInterval, overlapping=overlapping, byblock=byblock, **kwargs) + self.isConfig = True + print("inside") + if dataOut.flagDataAsBlock: + """ + Si la data es leida por bloques, dimension = [nChannels, nProfiles, nHeis] + """ + + avgdata, avgdatatime = self.integrateByBlock(dataOut) + dataOut.nProfiles /= self.n + else: + if stride is None: + avgdata, avgdatatime = self.integrate(dataOut.data, dataOut.utctime) + else: + avgdata, avgdatatime = self.integrateByStride(dataOut.data, dataOut.utctime) + + + # dataOut.timeInterval *= n + dataOut.flagNoData = True + + if self.__dataReady: + dataOut.data = avgdata + if not dataOut.flagCohInt: + dataOut.nCohInt *= self.n + dataOut.flagCohInt = True + dataOut.utctime = avgdatatime + # print avgdata, avgdatatime + # raise + # dataOut.timeInterval = dataOut.ippSeconds * dataOut.nCohInt + dataOut.flagNoData = False + return dataOut + +class TimesCode(Operation): + """ + + """ + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + + + def run(self,dataOut,code): + + #code = numpy.repeat(code, repeats=osamp, axis=1) + nCodes = numpy.shape(code)[1] + #nprofcode = dataOut.nProfiles//nCodes + code = numpy.array(code) + #print("nHeights",dataOut.nHeights) + #print("nheicode",nheicode) + #print("Code.Shape",numpy.shape(code)) + #print("Code",code[0,:]) + nheicode = dataOut.nHeights//nCodes + res = dataOut.nHeights%nCodes + ''' + buffer = numpy.zeros((dataOut.nChannels, + nprofcode, + nCodes, + ndataOut.nHeights), + dtype='complex') + ''' + #exit(1) + #for ipr in range(dataOut.nProfiles): + #print(dataOut.nHeights) + #print(dataOut.data[0,384-2:]) + #print(dataOut.profileIndex) + #print(dataOut.data[0,:2]) + #print(dataOut.data[0,0:64]) + #print(dataOut.data[0,64:64+64]) + #exit(1) + for ich in range(dataOut.nChannels): + for ihe in range(nheicode): + #print(ihe*nCodes) + #print((ihe+1)*nCodes) + #dataOut.data[ich,ipr,ihe*nCodes:nCodes*(ihe+1)] + #code[ipr,:] + #print("before",dataOut.data[ich,ipr,ihe*nCodes:nCodes*(ihe+1)]) + #dataOut.data[ich,ipr,ihe*nCodes:nCodes*(ihe+1)] = numpy.prod([dataOut.data[ich,ipr,ihe*nCodes:nCodes*(ihe+1)],code[ipr,:]],axis=0) + dataOut.data[ich,ihe*nCodes:nCodes*(ihe+1)] = numpy.prod([dataOut.data[ich,ihe*nCodes:nCodes*(ihe+1)],code[dataOut.profileIndex,:]],axis=0) + + #print("after",dataOut.data[ich,ipr,ihe*nCodes:nCodes*(ihe+1)]) + #exit(1) + #print(dataOut.data[0,:2]) + #exit(1) + #print(nheicode) + #print((nheicode)*nCodes) + #print(((nheicode)*nCodes)+res) + if res != 0: + for ich in range(dataOut.nChannels): + dataOut.data[ich,nheicode*nCodes:] = numpy.prod([dataOut.data[ich,nheicode*nCodes:],code[dataOut.profileIndex,:res]],axis=0) + + #pass + #print(dataOut.data[0,384-2:]) + #exit(1) + #dataOut.data = numpy.mean(buffer,axis=1) + #print(numpy.shape(dataOut.data)) + #print(dataOut.nHeights) + #dataOut.heightList = dataOut.heightList[0:nheicode] + #print(dataOut.nHeights) + #dataOut.nHeights = numpy.shape(dataOut.data)[2] + #print(numpy.shape(dataOut.data)) + #exit(1) + + return dataOut + +''' +class Spectrogram(Operation): + """ + + """ + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + + + def run(self,dataOut): + + import scipy + + + + fs = 3200*1e-6 + fs = fs/64 + fs = 1/fs + + nperseg=64 + noverlap=48 + + f, t, Sxx = signal.spectrogram(x, fs, return_onesided=False, nperseg=nperseg, noverlap=noverlap, mode='complex') + + + for ich in range(dataOut.nChannels): + for ihe in range(nheicode): + + + return dataOut +''' + + +class RemoveDcHae(Operation): + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + self.DcCounter = 0 + + def run(self, dataOut): + + if self.DcCounter == 0: + dataOut.DcHae = numpy.zeros((dataOut.data.shape[0],320),dtype='complex') + #dataOut.DcHae = [] + self.DcCounter = 1 + + dataOut.dataaux = numpy.copy(dataOut.data) + + #dataOut.DcHae += dataOut.dataaux[:,1666:1666+320] + dataOut.DcHae += dataOut.dataaux[:,0:0+320] + hei = 1666 + hei = 2000 + hei = 1000 + hei = 0 + #dataOut.DcHae = numpy.concatenate([dataOut.DcHae,dataOut.dataaux[0,hei]],axis = None) + + + + return dataOut + + +class SSheightProfiles(Operation): + + step = None + nsamples = None + bufferShape = None + profileShape = None + sshProfiles = None + profileIndex = None + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + self.isConfig = False + + def setup(self,dataOut ,step = None , nsamples = None): + + if step == None and nsamples == None: + #pass + raise ValueError("step or nheights should be specified ...") + + self.step = step + self.nsamples = nsamples + self.__nChannels = dataOut.nChannels + self.__nProfiles = dataOut.nProfiles + self.__nHeis = dataOut.nHeights + shape = dataOut.data.shape #nchannels, nprofiles, nsamples + ''' + print "input nChannels",self.__nChannels + print "input nProfiles",self.__nProfiles + print "input nHeis",self.__nHeis + print "input Shape",shape + ''' + + + residue = (shape[1] - self.nsamples) % self.step + if residue != 0: + print("The residue is %d, step=%d should be multiple of %d to avoid loss of %d samples"%(residue,step,shape[1] - self.nsamples,residue)) + + deltaHeight = dataOut.heightList[1] - dataOut.heightList[0] + numberProfile = self.nsamples + numberSamples = (shape[1] - self.nsamples)/self.step + ''' + print "new numberProfile",numberProfile + print "new numberSamples",numberSamples + + print "New number of profile: %d, number of height: %d, Resolution %f Km"%(numberProfile,numberSamples,deltaHeight*self.step) + ''' + self.bufferShape = int(shape[0]), int(numberSamples), int(numberProfile) # nchannels, nsamples , nprofiles + self.profileShape = int(shape[0]), int(numberProfile), int(numberSamples) # nchannels, nprofiles, nsamples + + self.buffer = numpy.zeros(self.bufferShape , dtype=numpy.complex) + self.sshProfiles = numpy.zeros(self.profileShape, dtype=numpy.complex) + + def run(self, dataOut, step, nsamples, code = None, repeat = None): + #print(dataOut.profileIndex) + dataOut.flagNoData = True + dataOut.flagDataAsBlock = False + profileIndex = None + + #code = numpy.array(code) + #print(dataOut.data[0,:]) + #exit(1) + + + if not self.isConfig: + #print("STEP",step) + self.setup(dataOut, step=step , nsamples=nsamples) + self.isConfig = True + #print(code[dataOut.profileIndex,:]) + + #DC_Hae = numpy.array([0.398+0.588j, -0.926+0.306j, -0.536-0.682j, -0.072+0.53j, 0.368-0.356j, 0.996+0.362j]) + DC_Hae = numpy.array([ 0.001025 +0.0516375j, 0.03485 +0.20923125j, -0.168 -0.02720625j, + -0.1105375 +0.0707125j, -0.20309375-0.09670625j, 0.189775 +0.02716875j])*(-3.5) + + DC_Hae = numpy.array([ -32.26 +8.66j, -32.26 +8.66j]) + + DC_Hae = numpy.array([-2.78500000e-01 -1.39175j, -6.63237294e+02+210.4268625j]) + + + + + + + #print(dataOut.data[0,13:15]) + dataOut.data = dataOut.data - DC_Hae[:,None] + #print(dataOut.data[0,13:15]) + #exit(1) + + + + code = numpy.array(code) + roll = 0 + code = numpy.roll(code,roll,axis=0) + code = numpy.reshape(code,(5,100,64)) + block = dataOut.CurrentBlock%5 + #print(block) + + #code_block = code[block-1-2,:,:] + day_dif = 1 #day_12 + code_block = code[block-1-0,:,:] + + if repeat is not None: + code_block = numpy.repeat(code_block, repeats=repeat, axis=1) + + + + #print(dataOut.data[0:2,13]) + for i in range(self.buffer.shape[1]): + #self.buffer[:,i] = numpy.flip(dataOut.data[:,i*self.step:i*self.step + self.nsamples]) + ''' + print(dataOut.profileIndex) + print(code[dataOut.profileIndex,:]) + print("before",dataOut.data[:,i*self.step:i*self.step + self.nsamples]) + print("after",dataOut.data[:,i*self.step:i*self.step + self.nsamples]*code[dataOut.profileIndex,:]) + exit(1) + ''' + + #dif = numpy.copy(code) + if code is not None: + ''' + code = numpy.array(code) + #print(code[0,:]) + + #print("There is Code") + #exit(1) + #code = dataOut.code + #print(code[0,:]) + #exit(1) + + roll = 0 + code = numpy.roll(code,roll,axis=0) + code = numpy.reshape(code,(5,100,64)) + block = dataOut.CurrentBlock%5 + #print(block) + + #code_block = code[block-1-2,:,:] + day_dif = 1 #day_12 + code_block = code[block-1-0,:,:] + + + + if repeat is not None: + code_block = numpy.repeat(code_block, repeats=repeat, axis=1) + ''' + + + + #code_block = code[0,:,:] + + #print(code_block[2,:]) + #for l in range(dataOut.data.shape[1]): + #dataOut.data[:,l] = dataOut.data[:,l] - numpy.array([0.398+0.588j, -0.926+0.306j, -0.536-0.682j, -0.072+0.53j, 0.368-0.356j, 0.996+0.362j]) + + ##DC_Hae = numpy.array([0.398+0.588j, -0.926+0.306j, -0.536-0.682j, -0.072+0.53j, 0.368-0.356j, 0.996+0.362j]) + + #print(dataOut.data[0:2,13]) + ##dataOut.data = dataOut.data - DC_Hae[:,None] + #print(dataOut.data[0:2,13]) + #exit(1) + #print(dataOut.data[0,i*self.step:i*self.step + self.nsamples]) + #print(dataOut.data[1,i*self.step:i*self.step + self.nsamples]) + #print(code_block[dataOut.profileIndex,:]) + #print(numpy.shape(code_block[dataOut.profileIndex,:])) + #exit(1) + ###aux = numpy.mean(dataOut.data[:,i*self.step:i*self.step + self.nsamples],axis=1) + ###self.buffer[:,i] = (dataOut.data[:,i*self.step:i*self.step + self.nsamples]-aux[:,None])*code_block[dataOut.profileIndex,:] + ''' + if i == 18: + buffer = dataOut.data[0,i*self.step:i*self.step + self.nsamples] + import matplotlib.pyplot as plt + fig, axes = plt.subplots(figsize=(14, 10)) + x = numpy.linspace(0,20,numpy.shape(buffer)[0]) + x = numpy.fft.fftfreq(numpy.shape(buffer)[0],0.00005) + x = numpy.fft.fftshift(x) + + plt.plot(x,buffer) + plt.show() + import time + time.sleep(50) + ''' + #for k in range(dataOut.nChannels): + self.buffer[:,i] = dataOut.data[:,i*self.step:i*self.step + self.nsamples]*code_block[dataOut.profileIndex,:] + #print(dataOut.data[0,:]) + #print(code_block[0,:]) + #print(self.buffer[1,i]) + #exit(1) + else: + #print("There is no Code") + #exit(1) + self.buffer[:,i] = dataOut.data[:,i*self.step:i*self.step + self.nsamples]#*code[dataOut.profileIndex,:] + + #self.buffer[:,j,self.__nHeis-j*self.step - self.nheights:self.__nHeis-j*self.step] = numpy.flip(dataOut.data[:,j*self.step:j*self.step + self.nheights]) + + for j in range(self.buffer.shape[0]): + self.sshProfiles[j] = numpy.transpose(self.buffer[j]) + + profileIndex = self.nsamples + deltaHeight = dataOut.heightList[1] - dataOut.heightList[0] + ippSeconds = (deltaHeight*1.0e-6)/(0.15) + #print "ippSeconds",ippSeconds + try: + if dataOut.concat_m is not None: + ippSeconds= ippSeconds/float(dataOut.concat_m) + #print "Profile concat %d"%dataOut.concat_m + except: + pass + + + + dataOut.data = self.sshProfiles + dataOut.flagNoData = False + dataOut.heightList = numpy.arange(self.buffer.shape[1]) *self.step*deltaHeight + dataOut.heightList[0] + dataOut.nProfiles = int(dataOut.nProfiles*self.nsamples) + + ''' + print(dataOut.profileIndex) + if dataOut.profileIndex == 0: + dataOut.data = dataOut.data*1.e5 + + buffer_prom = + ''' + #dataOut.utctime = dataOut.utctime - dataOut.profileIndex + #print(dataOut.profileIndex) + #print(dataOut.data[0,0,0]) + ''' + if dataOut.profileIndex == 0: + self.buffer_prom = numpy.copy(dataOut.data) + + else: + self.buffer_prom = dataOut.data+self.buffer_prom + if dataOut.profileIndex == 99: + dataOut.data = self.buffer_prom/100 + ''' + + #print(dataOut.data[0,0,0]) + #print(dataOut.profileIndex) + dataOut.profileIndex = profileIndex + dataOut.flagDataAsBlock = True + dataOut.ippSeconds = ippSeconds + dataOut.step = self.step + #print(dataOut.profileIndex) + #print(dataOut.heightList) + #exit(1) + + #print(dataOut.times) + + return dataOut + +class Decoder(Operation): + + isConfig = False + __profIndex = 0 + + code = None + + nCode = None + nBaud = None + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + self.times = None + self.osamp = None + # self.__setValues = False + self.isConfig = False + self.setupReq = False + def setup(self, code, osamp, dataOut): + + self.__profIndex = 0 + + self.code = code + + self.nCode = len(code) + self.nBaud = len(code[0]) + + if (osamp != None) and (osamp >1): + self.osamp = osamp + self.code = numpy.repeat(code, repeats=self.osamp, axis=1) + self.nBaud = self.nBaud*self.osamp + + self.__nChannels = dataOut.nChannels + self.__nProfiles = dataOut.nProfiles + self.__nHeis = dataOut.nHeights + + if self.__nHeis < self.nBaud: + raise ValueError('Number of heights (%d) should be greater than number of bauds (%d)' %(self.__nHeis, self.nBaud)) + + #Frequency + __codeBuffer = numpy.zeros((self.nCode, self.__nHeis), dtype=numpy.complex) + + __codeBuffer[:,0:self.nBaud] = self.code + + self.fft_code = numpy.conj(numpy.fft.fft(__codeBuffer, axis=1)) + + if dataOut.flagDataAsBlock: + + self.ndatadec = self.__nHeis #- self.nBaud + 1 + + self.datadecTime = numpy.zeros((self.__nChannels, self.__nProfiles, self.ndatadec), dtype=numpy.complex) + + else: + + #Time + self.ndatadec = self.__nHeis #- self.nBaud + 1 + + + self.datadecTime = numpy.zeros((self.__nChannels, self.ndatadec), dtype=numpy.complex) + + def __convolutionInFreq(self, data): + + fft_code = self.fft_code[self.__profIndex].reshape(1,-1) + + fft_data = numpy.fft.fft(data, axis=1) + + conv = fft_data*fft_code + + data = numpy.fft.ifft(conv,axis=1) + + return data + + def __convolutionInFreqOpt(self, data): + + raise NotImplementedError + + def __convolutionInTime(self, data): + + code = self.code[self.__profIndex] + for i in range(self.__nChannels): + #aux=numpy.correlate(data[i,:], code, mode='full') + #print(numpy.shape(aux)) + #print(numpy.shape(data[i,:])) + #print(numpy.shape(code)) + #exit(1) + self.datadecTime[i,:] = numpy.correlate(data[i,:], code, mode='full')[self.nBaud-1:] + + return self.datadecTime + + def __convolutionByBlockInTime(self, data): + + repetitions = int(self.__nProfiles / self.nCode) + junk = numpy.lib.stride_tricks.as_strided(self.code, (repetitions, self.code.size), (0, self.code.itemsize)) + junk = junk.flatten() + code_block = numpy.reshape(junk, (self.nCode*repetitions, self.nBaud)) + profilesList = range(self.__nProfiles) + #print(numpy.shape(self.datadecTime)) + #print(numpy.shape(data)) + for i in range(self.__nChannels): + for j in profilesList: + self.datadecTime[i,j,:] = numpy.correlate(data[i,j,:], code_block[j,:], mode='full')[self.nBaud-1:] + return self.datadecTime + + def __convolutionByBlockInFreq(self, data): + + raise NotImplementedError("Decoder by frequency fro Blocks not implemented") + + + fft_code = self.fft_code[self.__profIndex].reshape(1,-1) + + fft_data = numpy.fft.fft(data, axis=2) + + conv = fft_data*fft_code + + data = numpy.fft.ifft(conv,axis=2) + + return data + + + def run(self, dataOut, code=None, nCode=None, nBaud=None, mode = 0, osamp=None, times=None): + + if dataOut.flagDecodeData: + print("This data is already decoded, recoding again ...") + + if not self.isConfig: + + if code is None: + if dataOut.code is None: + raise ValueError("Code could not be read from %s instance. Enter a value in Code parameter" %dataOut.type) + + code = dataOut.code + else: + code = numpy.array(code).reshape(nCode,nBaud) + self.setup(code, osamp, dataOut) + + self.isConfig = True + + if mode == 3: + sys.stderr.write("Decoder Warning: mode=%d is not valid, using mode=0\n" %mode) + + if times != None: + sys.stderr.write("Decoder Warning: Argument 'times' in not used anymore\n") + + if self.code is None: + print("Fail decoding: Code is not defined.") + return + + self.__nProfiles = dataOut.nProfiles + datadec = None + + if mode == 3: + mode = 0 + + if dataOut.flagDataAsBlock: + """ + Decoding when data have been read as block, + """ + + if mode == 0: + datadec = self.__convolutionByBlockInTime(dataOut.data) + if mode == 1: + datadec = self.__convolutionByBlockInFreq(dataOut.data) + else: + """ + Decoding when data have been read profile by profile + """ + if mode == 0: + datadec = self.__convolutionInTime(dataOut.data) + + if mode == 1: + datadec = self.__convolutionInFreq(dataOut.data) + + if mode == 2: + datadec = self.__convolutionInFreqOpt(dataOut.data) + + if datadec is None: + raise ValueError("Codification mode selected is not valid: mode=%d. Try selecting 0 or 1" %mode) + + dataOut.code = self.code + dataOut.nCode = self.nCode + dataOut.nBaud = self.nBaud + + dataOut.data = datadec + #print("before",dataOut.heightList) + dataOut.heightList = dataOut.heightList[0:datadec.shape[-1]] + #print("after",dataOut.heightList) + + dataOut.flagDecodeData = True #asumo q la data esta decodificada + + if self.__profIndex == self.nCode-1: + self.__profIndex = 0 + return dataOut + + self.__profIndex += 1 + + #print("SHAPE",numpy.shape(dataOut.data)) + + return dataOut + # dataOut.flagDeflipData = True #asumo q la data no esta sin flip + +class DecoderRoll(Operation): + + isConfig = False + __profIndex = 0 + + code = None + + nCode = None + nBaud = None + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + self.times = None + self.osamp = None + # self.__setValues = False + self.isConfig = False + self.setupReq = False + def setup(self, code, osamp, dataOut): + + self.__profIndex = 0 + + + self.code = code + + self.nCode = len(code) + self.nBaud = len(code[0]) + + if (osamp != None) and (osamp >1): + self.osamp = osamp + self.code = numpy.repeat(code, repeats=self.osamp, axis=1) + self.nBaud = self.nBaud*self.osamp + + self.__nChannels = dataOut.nChannels + self.__nProfiles = dataOut.nProfiles + self.__nHeis = dataOut.nHeights + + if self.__nHeis < self.nBaud: + raise ValueError('Number of heights (%d) should be greater than number of bauds (%d)' %(self.__nHeis, self.nBaud)) + + #Frequency + __codeBuffer = numpy.zeros((self.nCode, self.__nHeis), dtype=numpy.complex) + + __codeBuffer[:,0:self.nBaud] = self.code + + self.fft_code = numpy.conj(numpy.fft.fft(__codeBuffer, axis=1)) + + if dataOut.flagDataAsBlock: + + self.ndatadec = self.__nHeis #- self.nBaud + 1 + + self.datadecTime = numpy.zeros((self.__nChannels, self.__nProfiles, self.ndatadec), dtype=numpy.complex) + + else: + + #Time + self.ndatadec = self.__nHeis #- self.nBaud + 1 + + + self.datadecTime = numpy.zeros((self.__nChannels, self.ndatadec), dtype=numpy.complex) + + def __convolutionInFreq(self, data): + + fft_code = self.fft_code[self.__profIndex].reshape(1,-1) + + fft_data = numpy.fft.fft(data, axis=1) + + conv = fft_data*fft_code + + data = numpy.fft.ifft(conv,axis=1) + + return data + + def __convolutionInFreqOpt(self, data): + + raise NotImplementedError + + def __convolutionInTime(self, data): + + code = self.code[self.__profIndex] + #print("code",code[0,0]) + for i in range(self.__nChannels): + #aux=numpy.correlate(data[i,:], code, mode='full') + #print(numpy.shape(aux)) + #print(numpy.shape(data[i,:])) + #print(numpy.shape(code)) + #exit(1) + self.datadecTime[i,:] = numpy.correlate(data[i,:], code, mode='full')[self.nBaud-1:] + + return self.datadecTime + + def __convolutionByBlockInTime(self, data): + + repetitions = int(self.__nProfiles / self.nCode) + junk = numpy.lib.stride_tricks.as_strided(self.code, (repetitions, self.code.size), (0, self.code.itemsize)) + junk = junk.flatten() + code_block = numpy.reshape(junk, (self.nCode*repetitions, self.nBaud)) + profilesList = range(self.__nProfiles) + #print(numpy.shape(self.datadecTime)) + #print(numpy.shape(data)) + for i in range(self.__nChannels): + for j in profilesList: + self.datadecTime[i,j,:] = numpy.correlate(data[i,j,:], code_block[j,:], mode='full')[self.nBaud-1:] + return self.datadecTime + + def __convolutionByBlockInFreq(self, data): + + raise NotImplementedError("Decoder by frequency fro Blocks not implemented") + + + fft_code = self.fft_code[self.__profIndex].reshape(1,-1) + + fft_data = numpy.fft.fft(data, axis=2) + + conv = fft_data*fft_code + + data = numpy.fft.ifft(conv,axis=2) + + return data + + + def run(self, dataOut, code=None, nCode=None, nBaud=None, mode = 0, osamp=None, times=None): + + if dataOut.flagDecodeData: + print("This data is already decoded, recoding again ...") + + + + #print(dataOut.ippSeconds) + #exit(1) + roll = 0 + + if self.isConfig: + code = numpy.array(code) + + #roll = 29 + code = numpy.roll(code,roll,axis=0) + code = numpy.reshape(code,(5,100,64)) + block = dataOut.CurrentBlock%5 + #code = code[block-1,:,:] #NormalizeDPPower + code = code[block-1-1,:,:] #Next Day + self.code = numpy.repeat(code, repeats=self.osamp, axis=1) + + + if not self.isConfig: + + if code is None: + if dataOut.code is None: + raise ValueError("Code could not be read from %s instance. Enter a value in Code parameter" %dataOut.type) + + code = dataOut.code + else: + code = numpy.array(code) + + #roll = 29 + code = numpy.roll(code,roll,axis=0) + code = numpy.reshape(code,(5,100,64)) + block = dataOut.CurrentBlock%5 + code = code[block-1-1,:,:] + #print(code.shape()) + #exit(1) + + code = numpy.array(code).reshape(nCode,nBaud) + self.setup(code, osamp, dataOut) + + self.isConfig = True + + if mode == 3: + sys.stderr.write("Decoder Warning: mode=%d is not valid, using mode=0\n" %mode) + + if times != None: + sys.stderr.write("Decoder Warning: Argument 'times' in not used anymore\n") + + if self.code is None: + print("Fail decoding: Code is not defined.") + return + + self.__nProfiles = dataOut.nProfiles + datadec = None + + if mode == 3: + mode = 0 + + if dataOut.flagDataAsBlock: + """ + Decoding when data have been read as block, + """ + + if mode == 0: + datadec = self.__convolutionByBlockInTime(dataOut.data) + if mode == 1: + datadec = self.__convolutionByBlockInFreq(dataOut.data) + else: + """ + Decoding when data have been read profile by profile + """ + if mode == 0: + datadec = self.__convolutionInTime(dataOut.data) + + if mode == 1: + datadec = self.__convolutionInFreq(dataOut.data) + + if mode == 2: + datadec = self.__convolutionInFreqOpt(dataOut.data) + + if datadec is None: + raise ValueError("Codification mode selected is not valid: mode=%d. Try selecting 0 or 1" %mode) + + dataOut.code = self.code + dataOut.nCode = self.nCode + dataOut.nBaud = self.nBaud + + dataOut.data = datadec + #print("before",dataOut.heightList) + dataOut.heightList = dataOut.heightList[0:datadec.shape[-1]] + #print("after",dataOut.heightList) + + dataOut.flagDecodeData = True #asumo q la data esta decodificada + + if self.__profIndex == self.nCode-1: + self.__profIndex = 0 + return dataOut + + self.__profIndex += 1 + + #print("SHAPE",numpy.shape(dataOut.data)) + + return dataOut + + + + + + +class ProfileConcat(Operation): + + isConfig = False + buffer = None + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + self.profileIndex = 0 + + def reset(self): + self.buffer = numpy.zeros_like(self.buffer) + self.start_index = 0 + self.times = 1 + + def setup(self, data, m, n=1): + self.buffer = numpy.zeros((data.shape[0],data.shape[1]*m),dtype=type(data[0,0])) + self.nHeights = data.shape[1]#.nHeights + self.start_index = 0 + self.times = 1 + + def concat(self, data): + + self.buffer[:,self.start_index:self.nHeights*self.times] = data.copy() + self.start_index = self.start_index + self.nHeights + + def run(self, dataOut, m): + dataOut.flagNoData = True + + if not self.isConfig: + self.setup(dataOut.data, m, 1) + self.isConfig = True + + if dataOut.flagDataAsBlock: + raise ValueError("ProfileConcat can only be used when voltage have been read profile by profile, getBlock = False") + + else: + self.concat(dataOut.data) + self.times += 1 + if self.times > m: + dataOut.data = self.buffer + self.reset() + dataOut.flagNoData = False + # se deben actualizar mas propiedades del header y del objeto dataOut, por ejemplo, las alturas + deltaHeight = dataOut.heightList[1] - dataOut.heightList[0] + xf = dataOut.heightList[0] + dataOut.nHeights * deltaHeight * m + dataOut.heightList = numpy.arange(dataOut.heightList[0], xf, deltaHeight) + dataOut.ippSeconds *= m + return dataOut + +class ProfileSelector(Operation): + + profileIndex = None + # Tamanho total de los perfiles + nProfiles = None + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + self.profileIndex = 0 + + def incProfileIndex(self): + + self.profileIndex += 1 + + if self.profileIndex >= self.nProfiles: + self.profileIndex = 0 + + def isThisProfileInRange(self, profileIndex, minIndex, maxIndex): + + if profileIndex < minIndex: + return False + + if profileIndex > maxIndex: + return False + + return True + + def isThisProfileInList(self, profileIndex, profileList): + + if profileIndex not in profileList: + return False + + return True + + def run(self, dataOut, profileList=None, profileRangeList=None, beam=None, byblock=False, rangeList = None, nProfiles=None): + + """ + ProfileSelector: + + Inputs: + profileList : Index of profiles selected. Example: profileList = (0,1,2,7,8) + + profileRangeList : Minimum and maximum profile indexes. Example: profileRangeList = (4, 30) + + rangeList : List of profile ranges. Example: rangeList = ((4, 30), (32, 64), (128, 256)) + + """ + + if rangeList is not None: + if type(rangeList[0]) not in (tuple, list): + rangeList = [rangeList] + + dataOut.flagNoData = True + + if dataOut.flagDataAsBlock: + """ + data dimension = [nChannels, nProfiles, nHeis] + """ + if profileList != None: + dataOut.data = dataOut.data[:,profileList,:] + + if profileRangeList != None: + minIndex = profileRangeList[0] + maxIndex = profileRangeList[1] + profileList = list(range(minIndex, maxIndex+1)) + + dataOut.data = dataOut.data[:,minIndex:maxIndex+1,:] + + if rangeList != None: + + profileList = [] + + for thisRange in rangeList: + minIndex = thisRange[0] + maxIndex = thisRange[1] + + profileList.extend(list(range(minIndex, maxIndex+1))) + + dataOut.data = dataOut.data[:,profileList,:] + + dataOut.nProfiles = len(profileList) + dataOut.profileIndex = dataOut.nProfiles - 1 + dataOut.flagNoData = False + + return dataOut + + """ + data dimension = [nChannels, nHeis] + """ + + if profileList != None: + + if self.isThisProfileInList(dataOut.profileIndex, profileList): + + self.nProfiles = len(profileList) + dataOut.nProfiles = self.nProfiles + dataOut.profileIndex = self.profileIndex + dataOut.flagNoData = False + + self.incProfileIndex() + return dataOut + + if profileRangeList != None: + + minIndex = profileRangeList[0] + maxIndex = profileRangeList[1] + + if self.isThisProfileInRange(dataOut.profileIndex, minIndex, maxIndex): + + self.nProfiles = maxIndex - minIndex + 1 + dataOut.nProfiles = self.nProfiles + dataOut.profileIndex = self.profileIndex + dataOut.flagNoData = False + + self.incProfileIndex() + return dataOut + + if rangeList != None: + + nProfiles = 0 + + for thisRange in rangeList: + minIndex = thisRange[0] + maxIndex = thisRange[1] + + nProfiles += maxIndex - minIndex + 1 + + for thisRange in rangeList: + + minIndex = thisRange[0] + maxIndex = thisRange[1] + + if self.isThisProfileInRange(dataOut.profileIndex, minIndex, maxIndex): + + self.nProfiles = nProfiles + dataOut.nProfiles = self.nProfiles + dataOut.profileIndex = self.profileIndex + dataOut.flagNoData = False + + self.incProfileIndex() + + break + + return dataOut + + + if beam != None: #beam is only for AMISR data + if self.isThisProfileInList(dataOut.profileIndex, dataOut.beamRangeDict[beam]): + dataOut.flagNoData = False + dataOut.profileIndex = self.profileIndex + + self.incProfileIndex() + + return dataOut + + raise ValueError("ProfileSelector needs profileList, profileRangeList or rangeList parameter") + + #return False + return dataOut + +class Reshaper(Operation): + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + self.__buffer = None + self.__nitems = 0 + + def __appendProfile(self, dataOut, nTxs): + + if self.__buffer is None: + shape = (dataOut.nChannels, int(dataOut.nHeights/nTxs) ) + self.__buffer = numpy.empty(shape, dtype = dataOut.data.dtype) + + ini = dataOut.nHeights * self.__nitems + end = ini + dataOut.nHeights + + self.__buffer[:, ini:end] = dataOut.data + + self.__nitems += 1 + + return int(self.__nitems*nTxs) + + def __getBuffer(self): + + if self.__nitems == int(1./self.__nTxs): + + self.__nitems = 0 + + return self.__buffer.copy() + + return None + + def __checkInputs(self, dataOut, shape, nTxs): + + if shape is None and nTxs is None: + raise ValueError("Reshaper: shape of factor should be defined") + + if nTxs: + if nTxs < 0: + raise ValueError("nTxs should be greater than 0") + + if nTxs < 1 and dataOut.nProfiles % (1./nTxs) != 0: + raise ValueError("nProfiles= %d is not divisibled by (1./nTxs) = %f" %(dataOut.nProfiles, (1./nTxs))) + + shape = [dataOut.nChannels, dataOut.nProfiles*nTxs, dataOut.nHeights/nTxs] + + return shape, nTxs + + if len(shape) != 2 and len(shape) != 3: + raise ValueError("shape dimension should be equal to 2 or 3. shape = (nProfiles, nHeis) or (nChannels, nProfiles, nHeis). Actually shape = (%d, %d, %d)" %(dataOut.nChannels, dataOut.nProfiles, dataOut.nHeights)) + + if len(shape) == 2: + shape_tuple = [dataOut.nChannels] + shape_tuple.extend(shape) + else: + shape_tuple = list(shape) + + nTxs = 1.0*shape_tuple[1]/dataOut.nProfiles + + return shape_tuple, nTxs + + def run(self, dataOut, shape=None, nTxs=None): + + shape_tuple, self.__nTxs = self.__checkInputs(dataOut, shape, nTxs) + + dataOut.flagNoData = True + profileIndex = None + + if dataOut.flagDataAsBlock: + + dataOut.data = numpy.reshape(dataOut.data, shape_tuple) + dataOut.flagNoData = False + + profileIndex = int(dataOut.nProfiles*self.__nTxs) - 1 + + else: + + + if self.__nTxs < 1: + + self.__appendProfile(dataOut, self.__nTxs) + new_data = self.__getBuffer() + + if new_data is not None: + dataOut.data = new_data + dataOut.flagNoData = False + + profileIndex = dataOut.profileIndex*nTxs + + else: + raise ValueError("nTxs should be greater than 0 and lower than 1, or use VoltageReader(..., getblock=True)") + + deltaHeight = dataOut.heightList[1] - dataOut.heightList[0] + + dataOut.heightList = numpy.arange(dataOut.nHeights/self.__nTxs) * deltaHeight + dataOut.heightList[0] + + dataOut.nProfiles = int(dataOut.nProfiles*self.__nTxs) + + dataOut.profileIndex = profileIndex + + dataOut.ippSeconds /= self.__nTxs + + return dataOut + +class SplitProfiles(Operation): + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + def run(self, dataOut, n): + + dataOut.flagNoData = True + profileIndex = None + + if dataOut.flagDataAsBlock: + + #nchannels, nprofiles, nsamples + shape = dataOut.data.shape + + if shape[2] % n != 0: + raise ValueError("Could not split the data, n=%d has to be multiple of %d" %(n, shape[2])) + + new_shape = shape[0], shape[1]*n, int(shape[2]/n) + + dataOut.data = numpy.reshape(dataOut.data, new_shape) + dataOut.flagNoData = False + + profileIndex = int(dataOut.nProfiles/n) - 1 + + else: + + raise ValueError("Could not split the data when is read Profile by Profile. Use VoltageReader(..., getblock=True)") + + deltaHeight = dataOut.heightList[1] - dataOut.heightList[0] + + dataOut.heightList = numpy.arange(dataOut.nHeights/n) * deltaHeight + dataOut.heightList[0] + + dataOut.nProfiles = int(dataOut.nProfiles*n) + + dataOut.profileIndex = profileIndex + + dataOut.ippSeconds /= n + + return dataOut + +class CombineProfiles(Operation): + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + self.__remData = None + self.__profileIndex = 0 + + def run(self, dataOut, n): + + dataOut.flagNoData = True + profileIndex = None + + if dataOut.flagDataAsBlock: + + #nchannels, nprofiles, nsamples + shape = dataOut.data.shape + new_shape = shape[0], shape[1]/n, shape[2]*n + + if shape[1] % n != 0: + raise ValueError("Could not split the data, n=%d has to be multiple of %d" %(n, shape[1])) + + dataOut.data = numpy.reshape(dataOut.data, new_shape) + dataOut.flagNoData = False + + profileIndex = int(dataOut.nProfiles*n) - 1 + + else: + + #nchannels, nsamples + if self.__remData is None: + newData = dataOut.data + else: + newData = numpy.concatenate((self.__remData, dataOut.data), axis=1) + + self.__profileIndex += 1 + + if self.__profileIndex < n: + self.__remData = newData + #continue + return + + self.__profileIndex = 0 + self.__remData = None + + dataOut.data = newData + dataOut.flagNoData = False + + profileIndex = dataOut.profileIndex/n + + + deltaHeight = dataOut.heightList[1] - dataOut.heightList[0] + + dataOut.heightList = numpy.arange(dataOut.nHeights*n) * deltaHeight + dataOut.heightList[0] + + dataOut.nProfiles = int(dataOut.nProfiles/n) + + dataOut.profileIndex = profileIndex + + dataOut.ippSeconds *= n + + return dataOut +# import collections +# from scipy.stats import mode +# +# class Synchronize(Operation): +# +# isConfig = False +# __profIndex = 0 +# +# def __init__(self, **kwargs): +# +# Operation.__init__(self, **kwargs) +# # self.isConfig = False +# self.__powBuffer = None +# self.__startIndex = 0 +# self.__pulseFound = False +# +# def __findTxPulse(self, dataOut, channel=0, pulse_with = None): +# +# #Read data +# +# powerdB = dataOut.getPower(channel = channel) +# noisedB = dataOut.getNoise(channel = channel)[0] +# +# self.__powBuffer.extend(powerdB.flatten()) +# +# dataArray = numpy.array(self.__powBuffer) +# +# filteredPower = numpy.correlate(dataArray, dataArray[0:self.__nSamples], "same") +# +# maxValue = numpy.nanmax(filteredPower) +# +# if maxValue < noisedB + 10: +# #No se encuentra ningun pulso de transmision +# return None +# +# maxValuesIndex = numpy.where(filteredPower > maxValue - 0.1*abs(maxValue))[0] +# +# if len(maxValuesIndex) < 2: +# #Solo se encontro un solo pulso de transmision de un baudio, esperando por el siguiente TX +# return None +# +# phasedMaxValuesIndex = maxValuesIndex - self.__nSamples +# +# #Seleccionar solo valores con un espaciamiento de nSamples +# pulseIndex = numpy.intersect1d(maxValuesIndex, phasedMaxValuesIndex) +# +# if len(pulseIndex) < 2: +# #Solo se encontro un pulso de transmision con ancho mayor a 1 +# return None +# +# spacing = pulseIndex[1:] - pulseIndex[:-1] +# +# #remover senales que se distancien menos de 10 unidades o muestras +# #(No deberian existir IPP menor a 10 unidades) +# +# realIndex = numpy.where(spacing > 10 )[0] +# +# if len(realIndex) < 2: +# #Solo se encontro un pulso de transmision con ancho mayor a 1 +# return None +# +# #Eliminar pulsos anchos (deja solo la diferencia entre IPPs) +# realPulseIndex = pulseIndex[realIndex] +# +# period = mode(realPulseIndex[1:] - realPulseIndex[:-1])[0][0] +# +# print "IPP = %d samples" %period +# +# self.__newNSamples = dataOut.nHeights #int(period) +# self.__startIndex = int(realPulseIndex[0]) +# +# return 1 +# +# +# def setup(self, nSamples, nChannels, buffer_size = 4): +# +# self.__powBuffer = collections.deque(numpy.zeros( buffer_size*nSamples,dtype=numpy.float), +# maxlen = buffer_size*nSamples) +# +# bufferList = [] +# +# for i in range(nChannels): +# bufferByChannel = collections.deque(numpy.zeros( buffer_size*nSamples, dtype=numpy.complex) + numpy.NAN, +# maxlen = buffer_size*nSamples) +# +# bufferList.append(bufferByChannel) +# +# self.__nSamples = nSamples +# self.__nChannels = nChannels +# self.__bufferList = bufferList +# +# def run(self, dataOut, channel = 0): +# +# if not self.isConfig: +# nSamples = dataOut.nHeights +# nChannels = dataOut.nChannels +# self.setup(nSamples, nChannels) +# self.isConfig = True +# +# #Append new data to internal buffer +# for thisChannel in range(self.__nChannels): +# bufferByChannel = self.__bufferList[thisChannel] +# bufferByChannel.extend(dataOut.data[thisChannel]) +# +# if self.__pulseFound: +# self.__startIndex -= self.__nSamples +# +# #Finding Tx Pulse +# if not self.__pulseFound: +# indexFound = self.__findTxPulse(dataOut, channel) +# +# if indexFound == None: +# dataOut.flagNoData = True +# return +# +# self.__arrayBuffer = numpy.zeros((self.__nChannels, self.__newNSamples), dtype = numpy.complex) +# self.__pulseFound = True +# self.__startIndex = indexFound +# +# #If pulse was found ... +# for thisChannel in range(self.__nChannels): +# bufferByChannel = self.__bufferList[thisChannel] +# #print self.__startIndex +# x = numpy.array(bufferByChannel) +# self.__arrayBuffer[thisChannel] = x[self.__startIndex:self.__startIndex+self.__newNSamples] +# +# deltaHeight = dataOut.heightList[1] - dataOut.heightList[0] +# dataOut.heightList = numpy.arange(self.__newNSamples)*deltaHeight +# # dataOut.ippSeconds = (self.__newNSamples / deltaHeight)/1e6 +# +# dataOut.data = self.__arrayBuffer +# +# self.__startIndex += self.__newNSamples +# +# return + + + + + + + +##############################LONG PULSE############################## + + + +class CrossProdHybrid(CrossProdDP): + """Operation to calculate cross products of the Hybrid Experiment. + + Parameters: + ----------- + NLAG : int + Number of lags for Long Pulse. + NRANGE : int + Number of samples (heights) for Long Pulse. + NCAL : int + .* + DPL : int + Number of lags for Double Pulse. + NDN : int + .* + NDT : int + Number of heights for Double Pulse.* + NDP : int + Number of heights for Double Pulse.* + NSCAN : int + Number of profiles when the transmitter is on. + lagind : intlist + .* + lagfirst : intlist + .* + NAVG : int + Number of blocks to be "averaged". + nkill : int + Number of blocks not to be considered when averaging. + + Example + -------- + + op = proc_unit.addOperation(name='CrossProdHybrid', optype='other') + op.addParameter(name='NLAG', value='16', format='int') + op.addParameter(name='NRANGE', value='200', format='int') + op.addParameter(name='NCAL', value='0', format='int') + op.addParameter(name='DPL', value='11', format='int') + op.addParameter(name='NDN', value='0', format='int') + op.addParameter(name='NDT', value='67', format='int') + op.addParameter(name='NDP', value='67', format='int') + op.addParameter(name='NSCAN', value='128', format='int') + op.addParameter(name='lagind', value='(0,1,2,3,4,5,6,7,0,3,4,5,6,8,9,10)', format='intlist') + op.addParameter(name='lagfirst', value='(1,1,1,1,1,1,1,1,0,0,0,0,0,1,1,1)', format='intlist') + op.addParameter(name='NAVG', value='16', format='int') + op.addParameter(name='nkill', value='6', format='int') + + """ + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + self.bcounter=0 + self.aux=1 + self.aux_cross_lp=1 + self.lag_products_LP_median_estimates_aux=1 + + def get_products_cabxys_HP(self,dataOut): + + if self.aux==1: + self.set_header_output(dataOut) + self.aux=0 + + self.cax=numpy.zeros((dataOut.NDP,dataOut.DPL,2))# hp:67x11x2 dp: 66x11x2 + self.cay=numpy.zeros((dataOut.NDP,dataOut.DPL,2)) + self.cbx=numpy.zeros((dataOut.NDP,dataOut.DPL,2)) + self.cby=numpy.zeros((dataOut.NDP,dataOut.DPL,2)) + self.cax2=numpy.zeros((dataOut.NDP,dataOut.DPL,2)) + self.cay2=numpy.zeros((dataOut.NDP,dataOut.DPL,2)) + self.cbx2=numpy.zeros((dataOut.NDP,dataOut.DPL,2)) + self.cby2=numpy.zeros((dataOut.NDP,dataOut.DPL,2)) + self.caxbx=numpy.zeros((dataOut.NDP,dataOut.DPL,2)) + self.caxby=numpy.zeros((dataOut.NDP,dataOut.DPL,2)) + self.caybx=numpy.zeros((dataOut.NDP,dataOut.DPL,2)) + self.cayby=numpy.zeros((dataOut.NDP,dataOut.DPL,2)) + self.caxay=numpy.zeros((dataOut.NDP,dataOut.DPL,2)) + self.cbxby=numpy.zeros((dataOut.NDP,dataOut.DPL,2)) + for i in range(2): # flipped and unflipped + for j in range(dataOut.NDP): # loop over true ranges # 67 + for k in range(int(dataOut.NSCAN)): # 128 + + n=dataOut.lagind[k%dataOut.NLAG] # 128=16x8 + + ax=dataOut.data[0,k,dataOut.NRANGE+dataOut.NCAL+j+i*dataOut.NDT].real-dataOut.dc.real[0] + ay=dataOut.data[0,k,dataOut.NRANGE+dataOut.NCAL+j+i*dataOut.NDT].imag-dataOut.dc.imag[0] + + if dataOut.NRANGE+dataOut.NCAL+j+i*dataOut.NDT+2*n=dataOut.nkill/2 and k=dataOut.nkill/2 and k=dataOut.nkill/2 and k=dataOut.nkill/2 and k + ((debris[i-12]+debris[i-11]+debris[i-10]+debris[i-9]+ + debris[i+12]+debris[i+11]+debris[i+10]+debris[i+9])/2.0+ + thresh)): + + dataOut.flagNoData=True + print("LP Debris detected at",i*15,"km") + + debris=numpy.zeros(dataOut.NDP,dtype='float32') + Range=numpy.arange(0,3000,15) + for k in range(2): #flip + for i in range(dataOut.NDP): # + debris[i]+=numpy.sqrt((dataOut.kaxbx[i,0,k]+dataOut.kayby[i,0,k])**2+(dataOut.kaybx[i,0,k]-dataOut.kaxby[i,0,k])**2) + + if gmtime(dataOut.utctime).tm_hour > 11: + for i in range(2,dataOut.NDP-2): + if (debris[i]>3.0*debris[i-2] and + debris[i]>3.0*debris[i+2] and + Range[i]>200.0 and Range[i]<=540.0): + dataOut.flagNoData=True + print("DP Debris detected at",i*15,"km") + + print("inside debris",dataOut.flagNoData) + return dataOut + + +class IntegrationHP(IntegrationDP): + """Operation to integrate Double Pulse and Long Pulse data. + + Parameters: + ----------- + nint : int + Number of integrations. + + Example + -------- + + op = proc_unit.addOperation(name='IntegrationHP', optype='other') + op.addParameter(name='nint', value='30', format='int') + + """ + + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + self.counter = 0 + self.aux = 0 + + def integration_noise(self,dataOut): + + if self.counter == 0: + dataOut.tnoise=numpy.zeros((dataOut.NR),dtype='float32') + + dataOut.tnoise+=dataOut.noise_final + + def integration_for_long_pulse(self,dataOut): - def __init__(self, **kwargs): + if self.counter == 0: + dataOut.output_LP_integrated=numpy.zeros((dataOut.NLAG,dataOut.NRANGE,dataOut.NR),order='F',dtype='complex64') - Operation.__init__(self, **kwargs) - self.profileIndex = 0 + dataOut.output_LP_integrated+=dataOut.output_LP - def incProfileIndex(self): + def run(self,dataOut,nint=None): - self.profileIndex += 1 + dataOut.flagNoData=True - if self.profileIndex >= self.nProfiles: - self.profileIndex = 0 + #print("flag_inside",dataOut.flagNoData) + dataOut.nint=nint + dataOut.paramInterval=0#int(dataOut.nint*dataOut.header[7][0]*2 ) + dataOut.lat=-11.95 + dataOut.lon=-76.87 - def isThisProfileInRange(self, profileIndex, minIndex, maxIndex): + self.integration_for_long_pulse(dataOut) - if profileIndex < minIndex: - return False + self.integration_noise(dataOut) - if profileIndex > maxIndex: - return False - return True + if self.counter==dataOut.nint-1: - def isThisProfileInList(self, profileIndex, profileList): + dataOut.tnoise[0]*=0.995 + dataOut.tnoise[1]*=0.995 + dataOut.pan=dataOut.tnoise[0]/float(dataOut.NSCAN*dataOut.nint*dataOut.NAVG) + dataOut.pbn=dataOut.tnoise[1]/float(dataOut.NSCAN*dataOut.nint*dataOut.NAVG) - if profileIndex not in profileList: - return False + self.integration_for_double_pulse(dataOut) - return True - def run(self, dataOut, profileList=None, profileRangeList=None, beam=None, byblock=False, rangeList = None, nProfiles=None): - """ - ProfileSelector: + return dataOut - Inputs: - profileList : Index of profiles selected. Example: profileList = (0,1,2,7,8) - profileRangeList : Minimum and maximum profile indexes. Example: profileRangeList = (4, 30) +class IntegrationLP(Operation): + """Operation to integrate Double Pulse and Long Pulse data. - rangeList : List of profile ranges. Example: rangeList = ((4, 30), (32, 64), (128, 256)) + Parameters: + ----------- + nint : int + Number of integrations. - """ + Example + -------- - if rangeList is not None: - if type(rangeList[0]) not in (tuple, list): - rangeList = [rangeList] + op = proc_unit.addOperation(name='IntegrationHP', optype='other') + op.addParameter(name='nint', value='30', format='int') - dataOut.flagNoData = True + """ - if dataOut.flagDataAsBlock: - """ - data dimension = [nChannels, nProfiles, nHeis] - """ - if profileList != None: - dataOut.data = dataOut.data[:,profileList,:] + def __init__(self, **kwargs): - if profileRangeList != None: - minIndex = profileRangeList[0] - maxIndex = profileRangeList[1] - profileList = list(range(minIndex, maxIndex+1)) + Operation.__init__(self, **kwargs) - dataOut.data = dataOut.data[:,minIndex:maxIndex+1,:] + self.counter = 0 + self.aux = 0 - if rangeList != None: + def integration_noise(self,dataOut): - profileList = [] + if self.counter == 0: + dataOut.tnoise=numpy.zeros((dataOut.NR),dtype='float32') - for thisRange in rangeList: - minIndex = thisRange[0] - maxIndex = thisRange[1] + dataOut.tnoise+=dataOut.noise_final + ''' + def integration_for_long_pulse(self,dataOut): - profileList.extend(list(range(minIndex, maxIndex+1))) + if self.counter == 0: + dataOut.output_LP_integrated=numpy.zeros((dataOut.NLAG,dataOut.NRANGE,dataOut.NR),order='F',dtype='complex64') - dataOut.data = dataOut.data[:,profileList,:] + dataOut.output_LP_integrated+=dataOut.output_LP + ''' + def integration_for_long_pulse(self,dataOut): + #print("inside") + #print(self.aux) - dataOut.nProfiles = len(profileList) - dataOut.profileIndex = dataOut.nProfiles - 1 - dataOut.flagNoData = False + if self.counter == 0: + dataOut.output_LP_integrated=numpy.zeros((dataOut.NLAG,dataOut.NRANGE,dataOut.NR),order='F',dtype='complex64') - return dataOut + dataOut.output_LP_integrated+=dataOut.output_LP - """ - data dimension = [nChannels, nHeis] - """ + if self.aux==1: + #print("CurrentBlockBBBBB: ",dataOut.CurrentBlock) + #print(dataOut.datatime) - if profileList != None: + #dataOut.TimeBlockDate_for_dp_power=dataOut.TimeBlockDate + ########dataOut.TimeBlockSeconds_for_dp_power=dataOut.LastAVGDate + #print("Date: ",dataOut.TimeBlockDate_for_dp_power) - if self.isThisProfileInList(dataOut.profileIndex, profileList): + #dataOut.TimeBlockSeconds_for_dp_power=mktime(strptime(dataOut.TimeBlockDate_for_dp_power)) + dataOut.TimeBlockSeconds_for_dp_power=dataOut.utctime#dataOut.TimeBlockSeconds-18000 + #dataOut.TimeBlockSeconds_for_dp_power=dataOut.LastAVGDate + #print("Seconds: ",dataOut.TimeBlockSeconds_for_dp_power) + dataOut.bd_time=gmtime(dataOut.TimeBlockSeconds_for_dp_power) + #print(dataOut.bd_time) + #exit() + dataOut.year=dataOut.bd_time.tm_year+(dataOut.bd_time.tm_yday-1)/364.0 + dataOut.ut_Faraday=dataOut.bd_time.tm_hour+dataOut.bd_time.tm_min/60.0+dataOut.bd_time.tm_sec/3600.0 + #print("date: ", dataOut.TimeBlockDate) - self.nProfiles = len(profileList) - dataOut.nProfiles = self.nProfiles - dataOut.profileIndex = self.profileIndex - dataOut.flagNoData = False - self.incProfileIndex() - return dataOut + self.aux=0 - if profileRangeList != None: + #print("after") - minIndex = profileRangeList[0] - maxIndex = profileRangeList[1] + self.integration_noise(dataOut) - if self.isThisProfileInRange(dataOut.profileIndex, minIndex, maxIndex): + if self.counter==0: - self.nProfiles = maxIndex - minIndex + 1 - dataOut.nProfiles = self.nProfiles - dataOut.profileIndex = self.profileIndex - dataOut.flagNoData = False + self.init_time=dataOut.utctime - self.incProfileIndex() - return dataOut + if self.counter < dataOut.nint: + #print("HERE") - if rangeList != None: - nProfiles = 0 - for thisRange in rangeList: - minIndex = thisRange[0] - maxIndex = thisRange[1] + self.counter+=1 - nProfiles += maxIndex - minIndex + 1 + if self.counter==dataOut.nint-1: + self.aux=1 + #dataOut.TimeBlockDate_for_dp_power=dataOut.TimeBlockDate + if self.counter==dataOut.nint: - for thisRange in rangeList: + dataOut.flagNoData=False + dataOut.utctime=self.init_time + self.counter=0 - minIndex = thisRange[0] - maxIndex = thisRange[1] + def run(self,dataOut,nint=None): - if self.isThisProfileInRange(dataOut.profileIndex, minIndex, maxIndex): + dataOut.flagNoData=True - self.nProfiles = nProfiles - dataOut.nProfiles = self.nProfiles - dataOut.profileIndex = self.profileIndex - dataOut.flagNoData = False + #print("flag_inside",dataOut.flagNoData) + dataOut.nint=nint + dataOut.paramInterval=0#int(dataOut.nint*dataOut.header[7][0]*2 ) + dataOut.lat=-11.95 + dataOut.lon=-76.87 - self.incProfileIndex() + self.integration_for_long_pulse(dataOut) - break - return dataOut + if self.counter==dataOut.nint: + dataOut.tnoise[0]*=0.995 + dataOut.tnoise[1]*=0.995 + dataOut.pan=dataOut.tnoise[0]/float(dataOut.NSCAN*dataOut.nint*dataOut.NAVG) + dataOut.pbn=dataOut.tnoise[1]/float(dataOut.NSCAN*dataOut.nint*dataOut.NAVG) - if beam != None: #beam is only for AMISR data - if self.isThisProfileInList(dataOut.profileIndex, dataOut.beamRangeDict[beam]): - dataOut.flagNoData = False - dataOut.profileIndex = self.profileIndex + #self.integration_for_double_pulse(dataOut) + print("HERE2") - self.incProfileIndex() - return dataOut - raise ValueError("ProfileSelector needs profileList, profileRangeList or rangeList parameter") + return dataOut -class Reshaper(Operation): +class SumFlipsHP(SumFlips): + """Operation to sum the flip and unflip part of certain cross products of the Double Pulse. + + Parameters: + ----------- + None + + Example + -------- + + op = proc_unit.addOperation(name='SumFlipsHP', optype='other') + + """ def __init__(self, **kwargs): Operation.__init__(self, **kwargs) - self.__buffer = None - self.__nitems = 0 + def rint2HP(self,dataOut): - def __appendProfile(self, dataOut, nTxs): + dataOut.rnint2=numpy.zeros(dataOut.DPL,'float32') - if self.__buffer is None: - shape = (dataOut.nChannels, int(dataOut.nHeights/nTxs) ) - self.__buffer = numpy.empty(shape, dtype = dataOut.data.dtype) + for l in range(dataOut.DPL): + if(l==0 or (l>=3 and l <=6)): + dataOut.rnint2[l]=0.5/float(dataOut.nint*dataOut.NAVG*16.0) + else: + dataOut.rnint2[l]=0.5/float(dataOut.nint*dataOut.NAVG*8.0) - ini = dataOut.nHeights * self.__nitems - end = ini + dataOut.nHeights + def run(self,dataOut): - self.__buffer[:, ini:end] = dataOut.data + self.rint2HP(dataOut) + self.SumLags(dataOut) - self.__nitems += 1 + return dataOut - return int(self.__nitems*nTxs) - def __getBuffer(self): +from schainpy.model.proc import full_profile_profile +from scipy.optimize import nnls +class LongPulseAnalysis(Operation): + """Operation to estimate ACFs, temperatures, total electron density and Hydrogen/Helium fractions from the Long Pulse data. - if self.__nitems == int(1./self.__nTxs): + Parameters: + ----------- + NACF : int + .* - self.__nitems = 0 + Example + -------- - return self.__buffer.copy() + op = proc_unit.addOperation(name='LongPulseAnalysis', optype='other') + op.addParameter(name='NACF', value='16', format='int') - return None + """ - def __checkInputs(self, dataOut, shape, nTxs): + def __init__(self, **kwargs): - if shape is None and nTxs is None: - raise ValueError("Reshaper: shape of factor should be defined") + Operation.__init__(self, **kwargs) + self.aux=1 + + def run(self,dataOut,NACF): + + dataOut.NACF=NACF + dataOut.heightList=dataOut.DH*(numpy.arange(dataOut.NACF)) + anoise0=dataOut.tnoise[0] + anoise1=anoise0*0.0 #seems to be noise in 1st lag 0.015 before '14 + + if self.aux: + #dataOut.cut=31#26#height=31*15=465 + self.cal=numpy.zeros((dataOut.NLAG),'float32') + self.drift=numpy.zeros((200),'float32') + self.rdrift=numpy.zeros((200),'float32') + self.ddrift=numpy.zeros((200),'float32') + self.sigma=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32') + self.powera=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32') + self.powerb=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32') + self.perror=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32') + dataOut.ene=numpy.zeros((dataOut.NRANGE),'float32') + self.dpulse=numpy.zeros((dataOut.NACF),'float32') + self.lpulse=numpy.zeros((dataOut.NACF),'float32') + dataOut.lags_LP=numpy.zeros((dataOut.IBITS),order='F',dtype='float32') + self.lagp=numpy.zeros((dataOut.NACF),'float32') + self.u=numpy.zeros((2*dataOut.NACF,2*dataOut.NACF),'float32') + dataOut.ne=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32') + dataOut.te=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.ete=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.ti=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.eti=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.ph=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.eph=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.phe=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.ephe=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.errors=numpy.zeros((dataOut.IBITS,max(dataOut.NRANGE,dataOut.NSHTS)),order='F',dtype='float32') + dataOut.fit_array_real=numpy.zeros((max(dataOut.NRANGE,dataOut.NSHTS),dataOut.NLAG),order='F',dtype='float32') + dataOut.status=numpy.zeros(1,'float32') + dataOut.tx=240.0 #debería provenir del header #hybrid + + for i in range(dataOut.IBITS): + dataOut.lags_LP[i]=float(i)*(dataOut.tx/150.0)/float(dataOut.IBITS) # (float)i*(header.tx/150.0)/(float)IBITS; + + self.aux=0 + + dataOut.cut=30 + for i in range(30,15,-1): + if numpy.nanmax(dataOut.acfs_error_to_plot[i,:])>=10 or dataOut.info2[i]==0: + dataOut.cut=i-1 + #print(dataOut.cut) + #print(dataOut.info2[:]) + #print(dataOut.te2[:]) + #print(dataOut.ti2[:]) + for i in range(dataOut.NLAG): + self.cal[i]=sum(dataOut.output_LP_integrated[i,:,3].real) + + + self.cal/=float(dataOut.NRANGE) + + for j in range(dataOut.NACF+2*dataOut.IBITS+2): + + dataOut.output_LP_integrated.real[0,j,0]-=anoise0 #lag0 ch0 + dataOut.output_LP_integrated.real[1,j,0]-=anoise1 #lag1 ch0 + + for i in range(1,dataOut.NLAG): #remove cal data from certain lags + dataOut.output_LP_integrated.real[i,j,0]-=self.cal[i] + k=max(j,26) #constant power below range 26 + self.powera[j]=dataOut.output_LP_integrated.real[0,k,0] + + ## examine drifts here - based on 60 'indep.' estimates + + nis=dataOut.NSCAN*dataOut.NAVG*dataOut.nint*10 + alpha=beta=delta=0.0 + nest=0 + gamma=3.0/(2.0*numpy.pi*dataOut.lags_LP[1]*1.0e-3) + beta=gamma*(math.atan2(dataOut.output_LP_integrated.imag[14,0,2],dataOut.output_LP_integrated.real[14,0,2])-math.atan2(dataOut.output_LP_integrated.imag[1,0,2],dataOut.output_LP_integrated.real[1,0,2]))/13.0 + for i in range(1,3): + gamma=3.0/(2.0*numpy.pi*dataOut.lags_LP[i]*1.0e-3) + for j in range(34,44): + rho2=numpy.abs(dataOut.output_LP_integrated[i,j,0])/numpy.abs(dataOut.output_LP_integrated[0,j,0]) + dataOut.dphi2=(1.0/rho2-1.0)/(float(2*nis)) + dataOut.dphi2*=gamma**2 + pest=gamma*math.atan(dataOut.output_LP_integrated.imag[i,j,0]/dataOut.output_LP_integrated.real[i,j,0]) + + self.drift[nest]=pest + self.ddrift[nest]=dataOut.dphi2 + self.rdrift[nest]=float(nest) + nest+=1 + + sorted(self.drift[:nest]) + + for j in range(int(nest/4),int(3*nest/4)): + #i=int(self.rdrift[j]) + alpha+=self.drift[j]/self.ddrift[j] + delta+=1.0/self.ddrift[j] + + alpha/=delta + delta=1./numpy.sqrt(delta) + vdrift=alpha-beta + dvdrift=delta + + #need to develop estimate of complete density profile using all + #available data + + #estimate sample variances for long-pulse power profile + + nis=dataOut.NSCAN*dataOut.NAVG*dataOut.nint + + self.sigma[:dataOut.NACF+2*dataOut.IBITS+2]=((anoise0+self.powera[:dataOut.NACF+2*dataOut.IBITS+2])**2)/float(nis) + + ioff=1 + + #deconvolve rectangular pulse shape from profile ==> powerb, perror + + + ############# START nnlswrap############# + + if dataOut.ut_Faraday>14.0: + alpha_nnlswrap=20.0 + else: + alpha_nnlswrap=30.0 - if nTxs: - if nTxs < 0: - raise ValueError("nTxs should be greater than 0") + range1_nnls=dataOut.NACF + range2_nnls=dataOut.NACF+dataOut.IBITS-1 - if nTxs < 1 and dataOut.nProfiles % (1./nTxs) != 0: - raise ValueError("nProfiles= %d is not divisibled by (1./nTxs) = %f" %(dataOut.nProfiles, (1./nTxs))) + g_nnlswrap=numpy.zeros((range1_nnls,range2_nnls),'float32') + a_nnlswrap=numpy.zeros((range2_nnls,range2_nnls),'float64') - shape = [dataOut.nChannels, dataOut.nProfiles*nTxs, dataOut.nHeights/nTxs] + for i in range(range1_nnls): + for j in range(range2_nnls): + if j>=i and j16: + self.dpulse[i]+=dataOut.ph2[k]/dataOut.h2[k] + elif k>=36-aux: + self.lpulse[i]+=self.powerb[k] + self.lagp[i]=self.powera[i] - if self.__nTxs < 1: + #find scale factor that best merges profiles - self.__appendProfile(dataOut, self.__nTxs) - new_data = self.__getBuffer() + qi=sum(self.dpulse[32:dataOut.NACF]**2/(self.lagp[32:dataOut.NACF]+anoise0)**2) + ri=sum((self.dpulse[32:dataOut.NACF]*self.lpulse[32:dataOut.NACF])/(self.lagp[32:dataOut.NACF]+anoise0)**2) + si=sum((self.dpulse[32:dataOut.NACF]*self.lagp[32:dataOut.NACF])/(self.lagp[32:dataOut.NACF]+anoise0)**2) + ui=sum(self.lpulse[32:dataOut.NACF]**2/(self.lagp[32:dataOut.NACF]+anoise0)**2) + vi=sum((self.lpulse[32:dataOut.NACF]*self.lagp[32:dataOut.NACF])/(self.lagp[32:dataOut.NACF]+anoise0)**2) - if new_data is not None: - dataOut.data = new_data - dataOut.flagNoData = False + alpha=(si*ui-vi*ri)/(qi*ui-ri*ri) + beta=(qi*vi-ri*si)/(qi*ui-ri*ri) - profileIndex = dataOut.profileIndex*nTxs + #form density profile estimate, merging rescaled power profiles - else: - raise ValueError("nTxs should be greater than 0 and lower than 1, or use VoltageReader(..., getblock=True)") + self.powerb[16:36-aux]=alpha*dataOut.ph2[16:36-aux]/dataOut.h2[16:36-aux] + self.powerb[36-aux:dataOut.NACF]*=beta - deltaHeight = dataOut.heightList[1] - dataOut.heightList[0] + #form Ne estimate, fill in error estimate at low altitudes - dataOut.heightList = numpy.arange(dataOut.nHeights/self.__nTxs) * deltaHeight + dataOut.heightList[0] + dataOut.ene[0:36-aux]=dataOut.sdp2[0:36-aux]/dataOut.ph2[0:36-aux] + dataOut.ne[:dataOut.NACF]=self.powerb[:dataOut.NACF]*dataOut.h2[:dataOut.NACF]/alpha - dataOut.nProfiles = int(dataOut.nProfiles*self.__nTxs) + #now do error propagation: store zero lag error covariance in u - dataOut.profileIndex = profileIndex + nis=dataOut.NSCAN*dataOut.NAVG*dataOut.nint/1 # DLH serious debris removal - dataOut.ippSeconds /= self.__nTxs + for i in range(dataOut.NACF): + for j in range(i,dataOut.NACF): + if j-i>=dataOut.IBITS: + self.u[i,j]=0.0 + else: + self.u[i,j]=dataOut.output_LP_integrated.real[j-i,i,0]**2/float(nis) + self.u[i,j]*=(anoise0+dataOut.output_LP_integrated.real[0,i,0])/dataOut.output_LP_integrated.real[0,i,0] + self.u[i,j]*=(anoise0+dataOut.output_LP_integrated.real[0,j,0])/dataOut.output_LP_integrated.real[0,j,0] + + self.u[j,i]=self.u[i,j] + + #now error analyis for lag product matrix (diag), place in acf_err + + for i in range(dataOut.NACF): + for j in range(dataOut.IBITS): + if j==0: + dataOut.errors[0,i]=numpy.sqrt(self.u[i,i]) + else: + dataOut.errors[j,i]=numpy.sqrt(((dataOut.output_LP_integrated.real[0,i,0]+anoise0)*(dataOut.output_LP_integrated.real[0,i+j,0]+anoise0)+dataOut.output_LP_integrated.real[j,i,0]**2)/float(2*nis)) + + #with suppress_stdout_stderr(): + #full_profile_profile.profile(numpy.transpose(dataOut.output_LP_integrated,(2,1,0)),numpy.transpose(dataOut.errors),self.powerb,dataOut.ne,dataOut.lags_LP,dataOut.thb,dataOut.bfm,dataOut.te,dataOut.ete,dataOut.ti,dataOut.eti,dataOut.ph,dataOut.eph,dataOut.phe,dataOut.ephe,dataOut.range1,dataOut.ut,dataOut.NACF,dataOut.fit_array_real,dataOut.status,dataOut.NRANGE,dataOut.IBITS) + + if dataOut.status>=3.5: + dataOut.te[:]=numpy.nan + dataOut.ete[:]=numpy.nan + dataOut.ti[:]=numpy.nan + dataOut.eti[:]=numpy.nan + dataOut.ph[:]=numpy.nan + dataOut.eph[:]=numpy.nan + dataOut.phe[:]=numpy.nan + dataOut.ephe[:]=numpy.nan return dataOut -class SplitProfiles(Operation): + +class LongPulseAnalysisLP(Operation): + """Operation to estimate ACFs, temperatures, total electron density and Hydrogen/Helium fractions from the Long Pulse data. + + Parameters: + ----------- + NACF : int + .* + + Example + -------- + + op = proc_unit.addOperation(name='LongPulseAnalysis', optype='other') + op.addParameter(name='NACF', value='16', format='int') + + """ def __init__(self, **kwargs): Operation.__init__(self, **kwargs) + self.aux=1 + + def run(self,dataOut,NACF=None): + + + dataOut.IBITS = 64 + dataOut.NACF = dataOut.nHeights# - (2*dataOut.IBITS+5) + #print(dataOut.heightList[int(dataOut.NACF)]) + #exit(1) + + #dataOut.heightList=dataOut.DH*(numpy.arange(dataOut.NACF)) + anoise0=dataOut.tnoise[0] + anoise1=anoise0*0.0 #seems to be noise in 1st lag 0.015 before '14 + + if self.aux: + #dataOut.cut=31#26#height=31*15=465 + self.cal=numpy.zeros((dataOut.NLAG),'float32') + self.drift=numpy.zeros((200),'float32') + self.rdrift=numpy.zeros((200),'float32') + self.ddrift=numpy.zeros((200),'float32') + self.sigma=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32') + self.powera=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32') + self.powerb=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32') + self.perror=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32') + dataOut.ene=numpy.zeros((dataOut.NRANGE),'float32') + self.dpulse=numpy.zeros((dataOut.NACF),'float32') + self.lpulse=numpy.zeros((dataOut.NACF),'float32') + dataOut.lags_LP=numpy.zeros((dataOut.IBITS),order='F',dtype='float32') + self.lagp=numpy.zeros((dataOut.NACF),'float32') + self.u=numpy.zeros((2*dataOut.NACF,2*dataOut.NACF),'float32') + dataOut.ne=numpy.zeros((dataOut.NRANGE),order='F',dtype='float32') + dataOut.te=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.ete=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.ti=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.eti=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.ph=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.eph=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.phe=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.ephe=numpy.zeros((dataOut.NACF),order='F',dtype='float32') + dataOut.errors=numpy.zeros((dataOut.IBITS,max(dataOut.NRANGE,1)),order='F',dtype='float32') + dataOut.fit_array_real=numpy.zeros((max(dataOut.NRANGE,1),dataOut.NLAG),order='F',dtype='float32') + dataOut.status=numpy.zeros(1,'float32') + dataOut.tx=480.0 #debería provenir del header #HAE + + dataOut.h2=numpy.zeros(dataOut.MAXNRANGENDT,'float32') + dataOut.range1=numpy.zeros(dataOut.MAXNRANGENDT,order='F',dtype='float32') + + + + for i in range(dataOut.IBITS): + dataOut.lags_LP[i]=float(i)*(dataOut.tx/150.0)/float(dataOut.IBITS) # (float)i*(header.tx/150.0)/(float)IBITS; + + self.aux=0 + + + + for i in range(dataOut.MAXNRANGENDT): + dataOut.range1[i]=dataOut.H0 + i*dataOut.DH + dataOut.h2[i]=dataOut.range1[i]**2 + + dataOut.cut=30 + #for i in range(30,15,-1): + # if numpy.nanmax(dataOut.acfs_error_to_plot[i,:])>=10 or dataOut.info2[i]==0: + # dataOut.cut=i-1 + #print(dataOut.cut) + #print(dataOut.info2[:]) + #print(dataOut.te2[:]) + #print(dataOut.ti2[:]) + #for i in range(dataOut.NLAG): + # self.cal[i]=sum(dataOut.output_LP_integrated[i,:,3].real) + + + #self.cal/=float(dataOut.NRANGE) + + for j in range(dataOut.NACF):#+2*dataOut.IBITS+2): + + self.powera[j]=dataOut.output_LP_integrated.real[0,j,0] + + + print(dataOut.heightList[:dataOut.NACF]) + import matplotlib.pyplot as plt + fig, axes = plt.subplots(figsize=(14, 10)) + axes.plot(self.powera[:dataOut.NACF]*dataOut.h2[:dataOut.NACF],dataOut.heightList[:dataOut.NACF]) + axes.set_xscale("log", nonposx='clip') + #axes.set_xlim(1e18,2e19) + axes.set_ylim(180,470) + import time + + plt.title(time.ctime(dataOut.utctime)) + plt.show() + time.sleep(50) + exit(1) + ''' + for j in range(dataOut.NACF+2*dataOut.IBITS+2): - def run(self, dataOut, n): + dataOut.output_LP_integrated.real[0,j,0]-=anoise0 #lag0 ch0 + dataOut.output_LP_integrated.real[1,j,0]-=anoise1 #lag1 ch0 - dataOut.flagNoData = True - profileIndex = None + #for i in range(1,dataOut.NLAG): #remove cal data from certain lags + # dataOut.output_LP_integrated.real[i,j,0]-=self.cal[i] + k=max(j,26) #constant power below range 26 + self.powera[j]=dataOut.output_LP_integrated.real[0,k,0] - if dataOut.flagDataAsBlock: + ## examine drifts here - based on 60 'indep.' estimates - #nchannels, nprofiles, nsamples - shape = dataOut.data.shape + nis=dataOut.NSCAN*dataOut.NAVG*dataOut.nint*10 + alpha=beta=delta=0.0 + nest=0 + gamma=3.0/(2.0*numpy.pi*dataOut.lags_LP[1]*1.0e-3) + beta=gamma*(math.atan2(dataOut.output_LP_integrated.imag[14,0,2],dataOut.output_LP_integrated.real[14,0,2])-math.atan2(dataOut.output_LP_integrated.imag[1,0,2],dataOut.output_LP_integrated.real[1,0,2]))/13.0 + for i in range(1,3): + gamma=3.0/(2.0*numpy.pi*dataOut.lags_LP[i]*1.0e-3) + for j in range(34,44): + rho2=numpy.abs(dataOut.output_LP_integrated[i,j,0])/numpy.abs(dataOut.output_LP_integrated[0,j,0]) + dataOut.dphi2=(1.0/rho2-1.0)/(float(2*nis)) + dataOut.dphi2*=gamma**2 + pest=gamma*math.atan(dataOut.output_LP_integrated.imag[i,j,0]/dataOut.output_LP_integrated.real[i,j,0]) - if shape[2] % n != 0: - raise ValueError("Could not split the data, n=%d has to be multiple of %d" %(n, shape[2])) + self.drift[nest]=pest + self.ddrift[nest]=dataOut.dphi2 + self.rdrift[nest]=float(nest) + nest+=1 - new_shape = shape[0], shape[1]*n, int(shape[2]/n) + sorted(self.drift[:nest]) - dataOut.data = numpy.reshape(dataOut.data, new_shape) - dataOut.flagNoData = False + for j in range(int(nest/4),int(3*nest/4)): + #i=int(self.rdrift[j]) + alpha+=self.drift[j]/self.ddrift[j] + delta+=1.0/self.ddrift[j] - profileIndex = int(dataOut.nProfiles/n) - 1 + alpha/=delta + delta=1./numpy.sqrt(delta) + vdrift=alpha-beta + dvdrift=delta - else: + #need to develop estimate of complete density profile using all + #available data - raise ValueError("Could not split the data when is read Profile by Profile. Use VoltageReader(..., getblock=True)") + #estimate sample variances for long-pulse power profile - deltaHeight = dataOut.heightList[1] - dataOut.heightList[0] + nis=dataOut.NSCAN*dataOut.NAVG*dataOut.nint - dataOut.heightList = numpy.arange(dataOut.nHeights/n) * deltaHeight + dataOut.heightList[0] + self.sigma[:dataOut.NACF+2*dataOut.IBITS+2]=((anoise0+self.powera[:dataOut.NACF+2*dataOut.IBITS+2])**2)/float(nis) + ''' + ioff=1 - dataOut.nProfiles = int(dataOut.nProfiles*n) + #deconvolve rectangular pulse shape from profile ==> powerb, perror - dataOut.profileIndex = profileIndex - dataOut.ippSeconds /= n + ############# START nnlswrap############# - return dataOut + if dataOut.ut_Faraday>14.0: + alpha_nnlswrap=20.0 + else: + alpha_nnlswrap=30.0 -class CombineProfiles(Operation): - def __init__(self, **kwargs): + range1_nnls=dataOut.NACF + range2_nnls=dataOut.NACF+dataOut.IBITS-1 - Operation.__init__(self, **kwargs) + g_nnlswrap=numpy.zeros((range1_nnls,range2_nnls),'float32') + a_nnlswrap=numpy.zeros((range2_nnls,range2_nnls),'float64') - self.__remData = None - self.__profileIndex = 0 + for i in range(range1_nnls): + for j in range(range2_nnls): + if j>=i and j16: + self.dpulse[i]+=dataOut.ph2[k]/dataOut.h2[k] + elif k>=36-aux: + self.lpulse[i]+=self.powerb[k] + self.lagp[i]=self.powera[i] - profileIndex = dataOut.profileIndex/n + #find scale factor that best merges profiles + qi=sum(self.dpulse[32:dataOut.NACF]**2/(self.lagp[32:dataOut.NACF]+anoise0)**2) + ri=sum((self.dpulse[32:dataOut.NACF]*self.lpulse[32:dataOut.NACF])/(self.lagp[32:dataOut.NACF]+anoise0)**2) + si=sum((self.dpulse[32:dataOut.NACF]*self.lagp[32:dataOut.NACF])/(self.lagp[32:dataOut.NACF]+anoise0)**2) + ui=sum(self.lpulse[32:dataOut.NACF]**2/(self.lagp[32:dataOut.NACF]+anoise0)**2) + vi=sum((self.lpulse[32:dataOut.NACF]*self.lagp[32:dataOut.NACF])/(self.lagp[32:dataOut.NACF]+anoise0)**2) - deltaHeight = dataOut.heightList[1] - dataOut.heightList[0] + alpha=(si*ui-vi*ri)/(qi*ui-ri*ri) + beta=(qi*vi-ri*si)/(qi*ui-ri*ri) - dataOut.heightList = numpy.arange(dataOut.nHeights*n) * deltaHeight + dataOut.heightList[0] + #form density profile estimate, merging rescaled power profiles - dataOut.nProfiles = int(dataOut.nProfiles/n) + self.powerb[16:36-aux]=alpha*dataOut.ph2[16:36-aux]/dataOut.h2[16:36-aux] + self.powerb[36-aux:dataOut.NACF]*=beta - dataOut.profileIndex = profileIndex + #form Ne estimate, fill in error estimate at low altitudes - dataOut.ippSeconds *= n + dataOut.ene[0:36-aux]=dataOut.sdp2[0:36-aux]/dataOut.ph2[0:36-aux] + dataOut.ne[:dataOut.NACF]=self.powerb[:dataOut.NACF]*dataOut.h2[:dataOut.NACF]/alpha + #now do error propagation: store zero lag error covariance in u + + nis=dataOut.NSCAN*dataOut.NAVG*dataOut.nint/1 # DLH serious debris removal + + for i in range(dataOut.NACF): + for j in range(i,dataOut.NACF): + if j-i>=dataOut.IBITS: + self.u[i,j]=0.0 + else: + self.u[i,j]=dataOut.output_LP_integrated.real[j-i,i,0]**2/float(nis) + self.u[i,j]*=(anoise0+dataOut.output_LP_integrated.real[0,i,0])/dataOut.output_LP_integrated.real[0,i,0] + self.u[i,j]*=(anoise0+dataOut.output_LP_integrated.real[0,j,0])/dataOut.output_LP_integrated.real[0,j,0] + + self.u[j,i]=self.u[i,j] + + #now error analyis for lag product matrix (diag), place in acf_err + + for i in range(dataOut.NACF): + for j in range(dataOut.IBITS): + if j==0: + dataOut.errors[0,i]=numpy.sqrt(self.u[i,i]) + else: + dataOut.errors[j,i]=numpy.sqrt(((dataOut.output_LP_integrated.real[0,i,0]+anoise0)*(dataOut.output_LP_integrated.real[0,i+j,0]+anoise0)+dataOut.output_LP_integrated.real[j,i,0]**2)/float(2*nis)) + + #with suppress_stdout_stderr(): + #full_profile_profile.profile(numpy.transpose(dataOut.output_LP_integrated,(2,1,0)),numpy.transpose(dataOut.errors),self.powerb,dataOut.ne,dataOut.lags_LP,dataOut.thb,dataOut.bfm,dataOut.te,dataOut.ete,dataOut.ti,dataOut.eti,dataOut.ph,dataOut.eph,dataOut.phe,dataOut.ephe,dataOut.range1,dataOut.ut,dataOut.NACF,dataOut.fit_array_real,dataOut.status,dataOut.NRANGE,dataOut.IBITS) + ''' + if dataOut.status>=3.5: + dataOut.te[:]=numpy.nan + dataOut.ete[:]=numpy.nan + dataOut.ti[:]=numpy.nan + dataOut.eti[:]=numpy.nan + dataOut.ph[:]=numpy.nan + dataOut.eph[:]=numpy.nan + dataOut.phe[:]=numpy.nan + dataOut.ephe[:]=numpy.nan + ''' return dataOut + class PulsePairVoltage(Operation): ''' Function PulsePair(Signal Power, Velocity) diff --git a/schainpy/model/proc/jroproc_voltage_lags.py b/schainpy/model/proc/jroproc_voltage_lags.py new file mode 100644 index 0000000..4573778 --- /dev/null +++ b/schainpy/model/proc/jroproc_voltage_lags.py @@ -0,0 +1,2561 @@ + +import matplotlib.pyplot as plt + + + +import numpy +import time +import math + +from datetime import datetime + +from schainpy.utils import log + +import struct +import os + +import sys + +from ctypes import * + +from schainpy.model.io.jroIO_voltage import VoltageReader,JRODataReader +from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation, MPDecorator +from schainpy.model.data.jrodata import Voltage + + + + +@MPDecorator +class VoltageLagsProc(ProcessingUnit): + + def __init__(self): + + ProcessingUnit.__init__(self) + + self.dataOut = Voltage() + self.bcounter=0 + self.dataOut.kax=None + self.dataOut.kay=None + self.dataOut.kbx=None + self.dataOut.kby=None + self.dataOut.kax2=None + self.dataOut.kay2=None + self.dataOut.kbx2=None + self.dataOut.kby2=None + self.dataOut.kaxbx=None + self.dataOut.kaxby=None + self.dataOut.kaybx=None + self.dataOut.kayby=None + self.dataOut.kaxay=None + self.dataOut.kbxby=None + self.aux=1 + + self.LP_products_aux=0 + self.lag_products_LP_median_estimates_aux=0 + + #self.dataOut.input_dat_type=0 #06/04/2020 + + def get_products_cabxys(self): + + + if self.aux==1: + + + + self.dataOut.read_samples=int(self.dataOut.systemHeaderObj.nSamples/self.dataOut.OSAMP) + if self.dataOut.experiment=="DP": + self.dataOut.nptsfft1=132 #30/03/2020 + self.dataOut.nptsfft2=140 #30/03/2020 + if self.dataOut.experiment=="HP": + self.dataOut.nptsfft1=128 #30/03/2020 + self.dataOut.nptsfft2=150 #30/03/2020 + + + #self.dataOut.noise_final_list=[] #30/03/2020 + + padding=numpy.zeros(1,'int32') + + hsize=numpy.zeros(1,'int32') + bufsize=numpy.zeros(1,'int32') + nr=numpy.zeros(1,'int32') + ngates=numpy.zeros(1,'int32') ### ### ### 2 + time1=numpy.zeros(1,'uint64') # pos 3 + time2=numpy.zeros(1,'uint64') # pos 4 + lcounter=numpy.zeros(1,'int32') + groups=numpy.zeros(1,'int32') + system=numpy.zeros(4,'int8') # pos 7 + h0=numpy.zeros(1,'float32') + dh=numpy.zeros(1,'float32') + ipp=numpy.zeros(1,'float32') + process=numpy.zeros(1,'int32') + tx=numpy.zeros(1,'int32') + + ngates1=numpy.zeros(1,'int32') ### ### ### 13 + time0=numpy.zeros(1,'uint64') # pos 14 + nlags=numpy.zeros(1,'int32') + nlags1=numpy.zeros(1,'int32') + txb=numpy.zeros(1,'float32') ### ### ### 17 + time3=numpy.zeros(1,'uint64') # pos 18 + time4=numpy.zeros(1,'uint64') # pos 19 + h0_=numpy.zeros(1,'float32') + dh_=numpy.zeros(1,'float32') + ipp_=numpy.zeros(1,'float32') + txa_=numpy.zeros(1,'float32') + + pad=numpy.zeros(100,'int32') + + nbytes=numpy.zeros(1,'int32') + limits=numpy.zeros(1,'int32') + ngroups=numpy.zeros(1,'int32') ### ### ### 27 + + + self.dataOut.header=[hsize,bufsize,nr,ngates,time1,time2, + lcounter,groups,system,h0,dh,ipp, + process,tx,ngates1,padding,time0,nlags, + nlags1,padding,txb,time3,time4,h0_,dh_, + ipp_,txa_,pad,nbytes,limits,padding,ngroups] + + if self.dataOut.experiment == "DP": + self.dataOut.header[1][0]=81864 + if self.dataOut.experiment == "HP": + self.dataOut.header[1][0]=173216 + + self.dataOut.header[3][0]=max(self.dataOut.NRANGE,self.dataOut.NDT) + self.dataOut.header[7][0]=self.dataOut.NAVG + self.dataOut.header[9][0]=int(self.dataOut.heightList[0]) + self.dataOut.header[10][0]=self.dataOut.DH + self.dataOut.header[17][0]=self.dataOut.DPL + self.dataOut.header[18][0]=self.dataOut.NLAG + #self.header[5][0]=0 + self.dataOut.header[15][0]=self.dataOut.NDP + self.dataOut.header[2][0]=self.dataOut.NR + #time.mktime(time.strptime() + + + + + self.aux=0 + + + + + + + + + + if self.dataOut.experiment=="DP": + + + self.dataOut.lags_array=[x / self.dataOut.DH for x in self.dataOut.flags_array] + self.cax=numpy.zeros((self.dataOut.NDP,self.dataOut.nlags_array,2)) + self.cay=numpy.zeros((self.dataOut.NDP,self.dataOut.nlags_array,2)) + self.cbx=numpy.zeros((self.dataOut.NDP,self.dataOut.nlags_array,2)) + self.cby=numpy.zeros((self.dataOut.NDP,self.dataOut.nlags_array,2)) + self.cax2=numpy.zeros((self.dataOut.NDP,self.dataOut.nlags_array,2)) + self.cay2=numpy.zeros((self.dataOut.NDP,self.dataOut.nlags_array,2)) + self.cbx2=numpy.zeros((self.dataOut.NDP,self.dataOut.nlags_array,2)) + self.cby2=numpy.zeros((self.dataOut.NDP,self.dataOut.nlags_array,2)) + self.caxbx=numpy.zeros((self.dataOut.NDP,self.dataOut.nlags_array,2)) + self.caxby=numpy.zeros((self.dataOut.NDP,self.dataOut.nlags_array,2)) + self.caybx=numpy.zeros((self.dataOut.NDP,self.dataOut.nlags_array,2)) + self.cayby=numpy.zeros((self.dataOut.NDP,self.dataOut.nlags_array,2)) + self.caxay=numpy.zeros((self.dataOut.NDP,self.dataOut.nlags_array,2)) + self.cbxby=numpy.zeros((self.dataOut.NDP,self.dataOut.nlags_array,2)) + + for i in range(2): + for j in range(self.dataOut.NDP): + for k in range(int(self.dataOut.NSCAN/2)): + n=k%self.dataOut.nlags_array + ax=self.dataOut.data[0,2*k+i,j].real + ay=self.dataOut.data[0,2*k+i,j].imag + if j+self.dataOut.lags_array[n]=self.dataOut.nkill/2 and k 2): + nums_min= int(ndata/divider) + else: + nums_min=2 + sump=0.0 + sumq=0.0 + j=0 + cont=1 + while ( (cont==1) and (j nums_min): + rtest= float(j/(j-1)) +1.0/ndata + if( (sumq*j) > (rtest*sump*sump ) ): + j=j-1 + sump-= data[j] + sumq-=data[j]*data[j] + cont= 0 + noise= (sump/j) + + return noise + + + + def test(self): + + #print("LP_init") + #self.dataOut.flagNoData=1 + buffer=self.dataOut.data + #self.dataOut.flagNoData=0 + if self.LP_products_aux==0: + + #self.dataOut.nptsfft2=150 + self.cnorm=float((self.dataOut.nptsfft2LP-self.dataOut.NSCAN)/self.dataOut.NSCAN) + + + #print("self.bcounter",self.bcounter) + self.lagp0=numpy.zeros((self.dataOut.NLAG,self.dataOut.NRANGE,self.dataOut.NAVG),'complex64') + self.lagp1=numpy.zeros((self.dataOut.NLAG,self.dataOut.NRANGE,self.dataOut.NAVG),'complex64') + self.lagp2=numpy.zeros((self.dataOut.NLAG,self.dataOut.NRANGE,self.dataOut.NAVG),'complex64') + self.lagp3=numpy.zeros((self.dataOut.NLAG,self.dataOut.NRANGE,self.dataOut.NAVG),'complex64') + self.lagp4=numpy.zeros((self.dataOut.NLAG,self.dataOut.NRANGE,self.dataOut.NAVG),'complex64') + self.LP_products_aux=1 + + #print(self.dataOut.data[0,0,0]) + #self.dataOut.flagNoData =False + for i in range(self.dataOut.NR-1): + #print("inside i",i) + buffer_dc=self.dataOut.dc[i] + for j in range(self.dataOut.NRANGE): + #print("inside j",j) + #print(self.dataOut.read_samples) + #input() + range_for_n=numpy.min((self.dataOut.NRANGE-j,self.dataOut.NLAG)) + for k in range(self.dataOut.nptsfft2LP): + #print(self.dataOut.data[i][k][j]) + #input() + #print(self.dataOut.dc) + #input() + #aux_ac=0 + buffer_aux=numpy.conj(buffer[i][k][j]-buffer_dc) + #self.dataOut.flagNoData=0 + for n in range(range_for_n): + + + #for n in range(numpy.min((self.dataOut.NRANGE-j,self.dataOut.NLAG))): + #print(numpy.shape(self.dataOut.data)) + #input() + #pass + #self.dataOut.flagNoData=1 + #c=2*buffer_aux + #c=(self.dataOut.data[i][k][j]-self.dataOut.dc[i])*(numpy.conj(self.dataOut.data[i][k][j+n]-self.dataOut.dc[i])) + #c=(buffer[i][k][j]-buffer_dc)*(numpy.conj(buffer[i][k][j+n])-buffer_dc) + + c=(buffer_aux)*(buffer[i][k][j+n]-buffer_dc) + #c=(buffer[i][k][j])*(buffer[i][k][j+n]) + #print("first: ",self.dataOut.data[i][k][j]-self.dataOut.dc[i]) + #print("second: ",numpy.conj(self.dataOut.data[i][k][j+n]-self.dataOut.dc[i])) + + #print("c: ",c) + #input() + #print("n: ",n) + #print("aux_ac",aux_ac) + #print("data1:",self.dataOut.data[i][k][j]) + #print("data2:",self.dataOut.data[i][k][j+n]) + #print("dc: ",self.dataOut.dc[i]) + #if aux_ac==2: + #input() + #aux_ac+=1 + #print("GG") + #print("inside n",n) + #pass + + if k=self.dataOut.nkill/2 and k + ((debris[i-12]+debris[i-11]+debris[i-10]+debris[i-9]+ + debris[i+12]+debris[i+11]+debris[i+10]+debris[i+9])/2.0+ + thresh)): + + self.dataOut.debris_activated=1 + #print("LP debris",i) + + + #print("self.debris",debris) + + + def remove_debris_DP(self): + + if self.dataOut.flag_save==1: + debris=numpy.zeros(self.dataOut.NDP,dtype='float32') + Range=numpy.arange(0,3000,15) + for k in range(2): #flip + for i in range(self.dataOut.NDP): # + debris[i]+=numpy.sqrt((self.dataOut.kaxbx[i,0,k]+self.dataOut.kayby[i,0,k])**2+(self.dataOut.kaybx[i,0,k]-self.dataOut.kaxby[i,0,k])**2) + + #print("debris: ",debris) + + if time.gmtime(self.dataOut.utctime).tm_hour > 11: + for i in range(2,self.dataOut.NDP-2): + if (debris[i]>3.0*debris[i-2] and + debris[i]>3.0*debris[i+2] and + Range[i]>200.0 and Range[i]<=540.0): + + self.dataOut.debris_activated=1 + #print("DP debris") + + + + + + + def run(self, experiment="", nlags_array=None, NLAG=None, NR=None, NRANGE=None, NCAL=None, DPL=None, + NDN=None, NDT=None, NDP=None, NLP=None, NSCAN=None, HDR_SIZE=None, DH=15, H0=None, LPMASK=None, + flags_array=None, + NPROFILE1=None, NPROFILE2=None, NPROFILES=None, NPROFILE=None, + lagind=None, lagfirst=None, + nptsfftx1=None): + + #self.dataOut.input_dat_type=input_dat_type + + self.dataOut.experiment=experiment + + #print(self.dataOut.experiment) + self.dataOut.nlags_array=nlags_array + self.dataOut.NLAG=NLAG + self.dataOut.NR=NR + self.dataOut.NRANGE=NRANGE + #print(self.dataOut.NRANGE) + self.dataOut.NCAL=NCAL + self.dataOut.DPL=DPL + self.dataOut.NDN=NDN + self.dataOut.NDT=NDT + self.dataOut.NDP=NDP + self.dataOut.NLP=NLP + self.dataOut.NSCAN=NSCAN + self.dataOut.HDR_SIZE=HDR_SIZE + self.dataOut.DH=float(DH) + self.dataOut.H0=H0 + self.dataOut.LPMASK=LPMASK + self.dataOut.flags_array=flags_array + + self.dataOut.NPROFILE1=NPROFILE1 + self.dataOut.NPROFILE2=NPROFILE2 + self.dataOut.NPROFILES=NPROFILES + self.dataOut.NPROFILE=NPROFILE + self.dataOut.lagind=lagind + self.dataOut.lagfirst=lagfirst + self.dataOut.nptsfftx1=nptsfftx1 + + + self.dataOut.copy(self.dataIn) + #print(self.dataOut.datatime) + #print(self.dataOut.ippSeconds_general) + #print("Data: ",numpy.shape(self.dataOut.data)) + #print("Data_after: ",self.dataOut.data[0,0,1]) + ## (4, 150, 334) + #print(self.dataOut.channelIndexList) + + #print(self.dataOut.timeInterval) + + ###NEWWWWWWW + self.dataOut.lat=-11.95 + self.dataOut.lon=-7687 + self.dataOut.debris_activated=0 + + #print(time.gmtime(self.dataOut.utctime).tm_hour) + #print(numpy.shape(self.dataOut.heightList)) + + + +class NewData(Operation): + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + + + + + def run(self,dataOut): + + #print("SHAPE",numpy.shape(dataOut.kaxby)) + print("CurrentBlock",dataOut.CurrentBlock) + #print("DATAOUT",dataOut.kaxby) + #print("TRUE OR FALSE",numpy.shape(dataOut.kaxby)==()) + #print("SHAPE",numpy.shape(dataOut.kaxby)) + if numpy.shape(dataOutF.kax)!=(): ############VER SI SE PUEDE TRABAJAR CON dataOut.kaxby==None ##Puede ser cualquier k... + + print("NEWDATA",dataOut.kaxby) + + + + + + return dataOut + + + + + + + + + + + +''' + +class PlotVoltageLag(Operation): + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + + +self.kax=numpy.zeros((self.NDP,self.nlags_array,2),'float32') + def range(self,DH): + Range=numpy.arange(0,990,DH) + return Range + + + + def run(self,dataOut): + + + + #plt.subplot(1, 4, 1) + plt.plot(kax[:,0,0],Range,'r',linewidth=2.0) + plt.xlim(min(limit_min_plot1[12::,0,0]), max(limit_max_plot1[12::,0,0])) + plt.show() + +self.kax=numpy.zeros((self.NDP,self.nlags_array,2),'float32') + + return dataOut +''' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +class Integration(Operation): + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + + + self.counter=0 + self.aux=0 + self.aux2=1 + + def run(self,dataOut,nint=None): + + dataOut.nint=nint + dataOut.AUX=0 + dataOut.paramInterval=dataOut.nint*dataOut.header[7][0]*2 #GENERALIZAR EL 2 + #print("CurrentBlock: ",dataOut.CurrentBlock) + #print("date: ",dataOut.datatime) + #print("self.aux: ",self.aux) + #print("CurrentBlockAAAAAA: ",dataOut.CurrentBlock) + #print(dataOut.input_dat_type) + #print(dataOut.heightList) + + #print(dataOut.blocktime.ctime()) + ''' + if dataOut.input_dat_type: #when .dat data is read + #print(dataOut.realtime) + #print("OKODOKO") + #dataOut.flagNoData = False + #print(dataOut.flagNoData) + if self.aux2: + + self.noise=numpy.zeros(dataOut.NR,'float32') + + + padding=numpy.zeros(1,'int32') + + hsize=numpy.zeros(1,'int32') + bufsize=numpy.zeros(1,'int32') + nr=numpy.zeros(1,'int32') + ngates=numpy.zeros(1,'int32') ### ### ### 2 + time1=numpy.zeros(1,'uint64') # pos 3 + time2=numpy.zeros(1,'uint64') # pos 4 + lcounter=numpy.zeros(1,'int32') + groups=numpy.zeros(1,'int32') + system=numpy.zeros(4,'int8') # pos 7 + h0=numpy.zeros(1,'float32') + dh=numpy.zeros(1,'float32') + ipp=numpy.zeros(1,'float32') + process=numpy.zeros(1,'int32') + tx=numpy.zeros(1,'int32') + + ngates1=numpy.zeros(1,'int32') ### ### ### 13 + time0=numpy.zeros(1,'uint64') # pos 14 + nlags=numpy.zeros(1,'int32') + nlags1=numpy.zeros(1,'int32') + txb=numpy.zeros(1,'float32') ### ### ### 17 + time3=numpy.zeros(1,'uint64') # pos 18 + time4=numpy.zeros(1,'uint64') # pos 19 + h0_=numpy.zeros(1,'float32') + dh_=numpy.zeros(1,'float32') + ipp_=numpy.zeros(1,'float32') + txa_=numpy.zeros(1,'float32') + + pad=numpy.zeros(100,'int32') + + nbytes=numpy.zeros(1,'int32') + limits=numpy.zeros(1,'int32') + ngroups=numpy.zeros(1,'int32') ### ### ### 27 + #Make the header list + #header=[hsize,bufsize,nr,ngates,time1,time2,lcounter,groups,system,h0,dh,ipp,process,tx,padding,ngates1,time0,nlags,nlags1,padding,txb,time3,time4,h0_,dh_,ipp_,txa_,pad,nbytes,limits,padding,ngroups] + dataOut.header=[hsize,bufsize,nr,ngates,time1,time2,lcounter,groups,system,h0,dh,ipp,process,tx,ngates1,padding,time0,nlags,nlags1,padding,txb,time3,time4,h0_,dh_,ipp_,txa_,pad,nbytes,limits,padding,ngroups] + + + + dataOut.kax=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kay=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kbx=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kby=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kax2=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kay2=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kbx2=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kby2=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kaxbx=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kaxby=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kaybx=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kayby=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kaxay=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kbxby=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + + self.dataOut.final_cross_products=[dataOut.kax,dataOut.kay,dataOut.kbx,dataOut.kby,dataOut.kax2,dataOut.kay2,dataOut.kbx2,dataOut.kby2,dataOut.kaxbx,dataOut.kaxby,dataOut.kaybx,dataOut.kayby,dataOut.kaxay,dataOut.kbxby] + + self.inputfile_DP = open(dataOut.fname,"rb") + + ## read header the header first time + for i in range(len(dataOut.header)): + for j in range(len(dataOut.header[i])): + #print("len(header[i]) ",len(header[i])) + #input() + temp=self.inputfile_DP.read(int(dataOut.header[i].itemsize)) + if isinstance(dataOut.header[i][0], numpy.int32): + #print(struct.unpack('i', temp)[0]) + dataOut.header[i][0]=struct.unpack('i', temp)[0] + if isinstance(dataOut.header[i][0], numpy.uint64): + dataOut.header[i][0]=struct.unpack('q', temp)[0] + if isinstance(dataOut.header[i][0], numpy.int8): + dataOut.header[i][0]=struct.unpack('B', temp)[0] + if isinstance(dataOut.header[i][0], numpy.float32): + dataOut.header[i][0]=struct.unpack('f', temp)[0] + + + + + self.activator_No_Data=1 + + self.inputfile_DP.seek(0,0) + + #print("Repositioning to",self.npos," bytes, bufsize ", self.header[1][0]) + #self.inputfile.seek(self.npos, 0) + #print("inputfile.tell() ",self.inputfile.tell() ," npos : ", self.npos) + + self.npos=0 + + #if dataOut.nint < 0: + # dataOut.nint=-dataOut.nint + # sfile=os.stat(dataOut.fname) + # if (os.path.exists(dataOut.fname)==0): + # print("ERROR on STAT file: %s\n", dataOut.fname) + # self.npos=sfile.st_size - dataOut.nint*dataOut.header[1][0]# sfile.st_size - nint*header.bufsize + + self.start_another_day=False + if dataOut.new_time_date!=" ": + self.start_another_day=True + + + if self.start_another_day: + #print("Starting_at_another_day") + #new_time_date = "16/08/2013 09:51:43" + #new_time_seconds=time.mktime(time.strptime(new_time_date)) + #dataOut.new_time_date = "04/12/2019 09:21:21" + d = datetime.strptime(dataOut.new_time_date, "%d/%m/%Y %H:%M:%S") + new_time_seconds=time.mktime(d.timetuple()) + + d_2 = datetime.strptime(dataOut.new_ending_time, "%d/%m/%Y %H:%M:%S") + self.new_ending_time_seconds=time.mktime(d_2.timetuple()) + #print("new_time_seconds: ",new_time_seconds) + #input() + jumper=0 + + #if jumper>0 and nint>0: + while True: + sfile=os.stat(dataOut.fname) + + if (os.path.exists(dataOut.fname)==0): + print("ERROR on STAT file: %s\n",dataOut.fname) + self.npos=jumper*dataOut.nint*dataOut.header[1][0] #jump_blocks*header,bufsize + self.npos_next=(jumper+1)*dataOut.nint*dataOut.header[1][0] + self.inputfile_DP.seek(self.npos, 0) + jumper+=1 + for i in range(len(dataOut.header)): + for j in range(len(dataOut.header[i])): + #print("len(header[i]) ",len(header[i])) + #input() + temp=self.inputfile_DP.read(int(dataOut.header[i].itemsize)) + if isinstance(dataOut.header[i][0], numpy.int32): + #print(struct.unpack('i', temp)[0]) + dataOut.header[i][0]=struct.unpack('i', temp)[0] + if isinstance(dataOut.header[i][0], numpy.uint64): + dataOut.header[i][0]=struct.unpack('q', temp)[0] + if isinstance(dataOut.header[i][0], numpy.int8): + dataOut.header[i][0]=struct.unpack('B', temp)[0] + if isinstance(dataOut.header[i][0], numpy.float32): + dataOut.header[i][0]=struct.unpack('f', temp)[0] + + if self.npos==0: + if new_time_secondsdataOut.header[4][0]: + print("No Data") + self.inputfile_DP.close() + sys.exit(1) + + self.inputfile_DP.seek(self.npos, 0) + + + + + if new_time_seconds==dataOut.header[4][0]: + #print("EQUALS") + break + + self.inputfile_DP.seek(self.npos_next, 0) + + for i in range(len(dataOut.header)): + for j in range(len(dataOut.header[i])): + #print("len(header[i]) ",len(header[i])) + #input() + temp=self.inputfile_DP.read(int(dataOut.header[i].itemsize)) + if isinstance(dataOut.header[i][0], numpy.int32): + #print(struct.unpack('i', temp)[0]) + dataOut.header[i][0]=struct.unpack('i', temp)[0] + if isinstance(dataOut.header[i][0], numpy.uint64): + dataOut.header[i][0]=struct.unpack('q', temp)[0] + if isinstance(dataOut.header[i][0], numpy.int8): + dataOut.header[i][0]=struct.unpack('B', temp)[0] + if isinstance(dataOut.header[i][0], numpy.float32): + dataOut.header[i][0]=struct.unpack('f', temp)[0] + + + if new_time_secondsself.new_ending_time_seconds: + print("EOF \n") + if self.activator_No_Data: + print("No Data") + self.inputfile_DP.close() + #sys.exit(0) + dataOut.error = True + return dataOut + #print(self.activator_No_Data) + self.activator_No_Data=0 + #dataOut.TimeBlockDate_for_dp_power=dataOut.TimeBlockDate + #dataOut.TimeBlockSeconds_for_dp_power=time.mktime(time.strptime(dataOut.TimeBlockDate_for_dp_power)) + dataOut.TimeBlockSeconds_for_dp_power = dataOut.header[4][0]-((dataOut.nint-1)*dataOut.NAVG*2) + #print(dataOut.TimeBlockSeconds_for_dp_power) + dataOut.TimeBlockDate_for_dp_power=datetime.fromtimestamp(dataOut.TimeBlockSeconds_for_dp_power).strftime("%a %b %-d %H:%M:%S %Y") + #print("Date: ",dataOut.TimeBlockDate_for_dp_power) + #print("Seconds: ",dataOut.TimeBlockSeconds_for_dp_power) + dataOut.bd_time=time.gmtime(dataOut.TimeBlockSeconds_for_dp_power) + dataOut.year=dataOut.bd_time.tm_year+(dataOut.bd_time.tm_yday-1)/364.0 + dataOut.ut=dataOut.bd_time.tm_hour+dataOut.bd_time.tm_min/60.0+dataOut.bd_time.tm_sec/3600.0 + + + if dataOut.experiment=="HP": # NRANGE*NLAG*NR # np.zeros([total_samples*nprofiles],dtype='complex64') + temp=self.inputfile_DP.read(dataOut.NLAG*dataOut.NR*176*8) + ii=0 + for l in range(dataOut.NLAG): #lag + for r in range(dataOut.NR): # unflip and flip + for k in range(176): #RANGE## generalizar + struct.unpack('q', temp[ii:ii+8])[0] + ii=ii+8 + + + + #print("A: ",dataOut.kax) + for ind in range(len(self.dataOut.final_cross_products)): #final cross products + temp=self.inputfile_DP.read(dataOut.DPL*2*dataOut.NDT*4) #*4 bytes + ii=0 + #print("kabxys.shape ",kabxys.shape) + #print(kabxys) + for l in range(dataOut.DPL): #lag + for fl in range(2): # unflip and flip + for k in range(dataOut.NDT): #RANGE + self.dataOut.final_cross_products[ind][k,l,fl]=struct.unpack('f', temp[ii:ii+4])[0] + ii=ii+4 + #print("DPL*2*NDT*4 es: ", DPL*2*NDT*4) + #print("B: ",dataOut.kax) + ## read noise + temp=self.inputfile_DP.read(dataOut.NR*4) #*4 bytes + for ii in range(dataOut.NR): + self.noise[ii]=struct.unpack('f', temp[ii*4:(ii+1)*4])[0] + #print("NR*4 es: ", NR*4) + + +################################END input_dat_type################################ + ''' + + #if dataOut.input_dat_type==0: + + if self.aux==1: + #print("CurrentBlockBBBBB: ",dataOut.CurrentBlock) + #print(dataOut.datatime) + dataOut.TimeBlockDate_for_dp_power=dataOut.TimeBlockDate + + #print("Date: ",dataOut.TimeBlockDate_for_dp_power) + dataOut.TimeBlockSeconds_for_dp_power=time.mktime(time.strptime(dataOut.TimeBlockDate_for_dp_power)) + #print("Seconds: ",dataOut.TimeBlockSeconds_for_dp_power) + dataOut.bd_time=time.gmtime(dataOut.TimeBlockSeconds_for_dp_power) + dataOut.year=dataOut.bd_time.tm_year+(dataOut.bd_time.tm_yday-1)/364.0 + dataOut.ut=dataOut.bd_time.tm_hour+dataOut.bd_time.tm_min/60.0+dataOut.bd_time.tm_sec/3600.0 + #print("date: ", dataOut.TimeBlockDate) + self.aux=0 + + if numpy.shape(dataOut.kax)!=(): + #print("SELFCOUNTER",self.counter) + #dataOut.flagNoData =True + if self.counter==0: + ''' + dataOut.kax_integrated=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kay_integrated=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kax2_integrated=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kay2_integrated=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kbx_integrated=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kby_integrated=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kbx2_integrated=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kby2_integrated=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kaxbx_integrated=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kaxby_integrated=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kaybx_integrated=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kayby_integrated=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kaxay_integrated=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + dataOut.kbxby_integrated=numpy.zeros((dataOut.NDP,dataOut.DPL,2),'float32') + ''' + + tmpx=numpy.zeros((dataOut.header[15][0],dataOut.header[17][0],2),'float32') + dataOut.kabxys_integrated=[tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx,tmpx] + #self.final_cross_products=[dataOut.kax,dataOut.kay,dataOut.kbx,dataOut.kby,dataOut.kax2,dataOut.kay2,dataOut.kbx2,dataOut.kby2,dataOut.kaxbx,dataOut.kaxby,dataOut.kaybx,dataOut.kayby,dataOut.kaxay,dataOut.kbxby] + + #print(numpy.shape(tmpx)) + if self.counter < dataOut.nint: + #if dataOut.input_dat_type==0: + dataOut.final_cross_products=[dataOut.kax,dataOut.kay,dataOut.kbx,dataOut.kby,dataOut.kax2,dataOut.kay2,dataOut.kbx2,dataOut.kby2,dataOut.kaxbx,dataOut.kaxby,dataOut.kaybx,dataOut.kayby,dataOut.kaxay,dataOut.kbxby] + + ''' + dataOut.kax_integrated=dataOut.kax_integrated+dataOut.kax + dataOut.kay_integrated=dataOut.kay_integrated+dataOut.kay + dataOut.kax2_integrated=dataOut.kax2_integrated+dataOut.kax2 + dataOut.kay2_integrated=dataOut.kay2_integrated+dataOut.kay2 + dataOut.kbx_integrated=dataOut.kbx_integrated+dataOut.kbx + dataOut.kby_integrated=dataOut.kby_integrated+dataOut.kby + dataOut.kbx2_integrated=dataOut.kbx2_integrated+dataOut.kbx2 + dataOut.kby2_integrated=dataOut.kby2_integrated+dataOut.kby2 + dataOut.kaxbx_integrated=dataOut.kaxbx_integrated+dataOut.kaxbx + dataOut.kaxby_integrated=dataOut.kaxby_integrated+dataOut.kaxby + dataOut.kaybx_integrated=dataOut.kaybx_integrated+dataOut.kaybx + dataOut.kayby_integrated=dataOut.kayby_integrated+dataOut.kayby + dataOut.kaxay_integrated=dataOut.kaxay_integrated+dataOut.kaxbx + dataOut.kbxby_integrated=dataOut.kbxby_integrated+dataOut.kbxby + #print("KAX_BEFORE: ",self.kax_integrated) + ''' + #print("self.final_cross_products[0]: ",self.final_cross_products[0]) + + for ind in range(len(dataOut.kabxys_integrated)): #final cross products + dataOut.kabxys_integrated[ind]=dataOut.kabxys_integrated[ind]+dataOut.final_cross_products[ind] + #print("ataOut.kabxys_integrated[0]: ",dataOut.kabxys_integrated[0]) + + self.counter+=1 + if self.counter==dataOut.nint-1: + self.aux=1 + #dataOut.TimeBlockDate_for_dp_power=dataOut.TimeBlockDate + if self.counter==dataOut.nint: + + #dataOut.flagNoData =False + + self.counter=0 + dataOut.AUX=1 + #self.aux=1 + #print("KAXBY_INTEGRATED: ",dataOut.kaxby_integrated) + + ''' + else : + #dataOut.kax_integrated=self.kax_integrated + self.counter=0 + + + #print("CurrentBlock: ", dataOut.CurrentBlock) + print("KAX_INTEGRATED: ",self.kax_integrated) + #print("nint: ",nint) + ''' + + ##print("CurrentBlock: ", dataOut.CurrentBlock) + ##print("KAX_INTEGRATED: ",dataOut.kax_integrated) + + + return dataOut + + + + + + + + +class SumLagProducts_Old(Operation): + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + #dataOut.rnint2=numpy.zeros(dataOut.nlags_array,'float32') + + + def run(self,dataOut): + + if dataOut.AUX: #Solo cuando ya hizo la intregacion se ejecuta + + + dataOut.rnint2=numpy.zeros(dataOut.header[17][0],'float32') + #print(dataOut.experiment) + if dataOut.experiment=="DP": + for l in range(dataOut.header[17][0]): + dataOut.rnint2[l]=1.0/(dataOut.nint*dataOut.header[7][0]*12.0) + + + if dataOut.experiment=="HP": + for l in range(dataOut.header[17][0]): + if(l==0 or (l>=3 and l <=6)): + dataOut.rnint2[l]=0.5/float(dataOut.nint*dataOut.header[7][0]*16.0) + else: + dataOut.rnint2[l]=0.5/float(dataOut.nint*dataOut.header[7][0]*8.0) + #print(dataOut.rnint2) + for l in range(dataOut.header[17][0]): + + dataOut.kabxys_integrated[4][:,l,0]=(dataOut.kabxys_integrated[4][:,l,0]+dataOut.kabxys_integrated[4][:,l,1])*dataOut.rnint2[l] + dataOut.kabxys_integrated[5][:,l,0]=(dataOut.kabxys_integrated[5][:,l,0]+dataOut.kabxys_integrated[5][:,l,1])*dataOut.rnint2[l] + dataOut.kabxys_integrated[6][:,l,0]=(dataOut.kabxys_integrated[6][:,l,0]+dataOut.kabxys_integrated[6][:,l,1])*dataOut.rnint2[l] + dataOut.kabxys_integrated[7][:,l,0]=(dataOut.kabxys_integrated[7][:,l,0]+dataOut.kabxys_integrated[7][:,l,1])*dataOut.rnint2[l] + + dataOut.kabxys_integrated[8][:,l,0]=(dataOut.kabxys_integrated[8][:,l,0]-dataOut.kabxys_integrated[8][:,l,1])*dataOut.rnint2[l] + dataOut.kabxys_integrated[9][:,l,0]=(dataOut.kabxys_integrated[9][:,l,0]-dataOut.kabxys_integrated[9][:,l,1])*dataOut.rnint2[l] + dataOut.kabxys_integrated[10][:,l,0]=(dataOut.kabxys_integrated[10][:,l,0]-dataOut.kabxys_integrated[10][:,l,1])*dataOut.rnint2[l] + dataOut.kabxys_integrated[11][:,l,0]=(dataOut.kabxys_integrated[11][:,l,0]-dataOut.kabxys_integrated[11][:,l,1])*dataOut.rnint2[l] + + + #print("Final Integration: ",dataOut.kabxys_integrated[4][:,l,0]) + + + + + + + return dataOut + + + + + + + + + +class BadHeights_Old(Operation): + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + + + def run(self,dataOut): + + + if dataOut.AUX==1: + dataOut.ibad=numpy.zeros((dataOut.header[15][0],dataOut.header[17][0]),'int32') + + for j in range(dataOut.header[15][0]): + for l in range(dataOut.header[17][0]): + ip1=j+dataOut.header[15][0]*(0+2*l) + if( (dataOut.kabxys_integrated[5][j,l,0] <= 0.) or (dataOut.kabxys_integrated[4][j,l,0] <= 0.) or (dataOut.kabxys_integrated[7][j,l,0] <= 0.) or (dataOut.kabxys_integrated[6][j,l,0] <= 0.)): + dataOut.ibad[j][l]=1 + else: + dataOut.ibad[j][l]=0 + #print("ibad: ",dataOut.ibad) + + + + return dataOut + + + + + + + + + + + + + + + + +class NoisePower_old(Operation): + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + def hildebrand(self,dataOut,data): + #print("data ",data ) + divider=10 # divider was originally 10 + noise=0.0 + n1=0 + n2=int(dataOut.header[15][0]/2) + sorts= sorted(data) + + nums_min= dataOut.header[15][0]/divider + if((dataOut.header[15][0]/divider)> 2): + nums_min= int(dataOut.header[15][0]/divider) + else: + nums_min=2 + sump=0.0 + sumq=0.0 + j=0 + cont=1 + while( (cont==1) and (j nums_min): + rtest= float(j/(j-1)) +1.0/dataOut.header[7][0] + t1= (sumq*j) + t2=(rtest*sump*sump) + if( (t1/t2) > 0.990): + j=j-1 + sump-= sorts[j+n1] + sumq-=sorts[j+n1]*sorts[j+n1] + cont= 0 + + noise= sump/j + stdv=numpy.sqrt((sumq- noise*noise)/(j-1)) + return noise + + def run(self,dataOut): + + if dataOut.AUX==1: + + #print("ax2 shape ",ax2.shape) + p=numpy.zeros((dataOut.header[2][0],dataOut.header[15][0],dataOut.header[17][0]),'float32') + av=numpy.zeros(dataOut.header[15][0],'float32') + dataOut.pnoise=numpy.zeros(dataOut.header[2][0],'float32') + + p[0,:,:]=dataOut.kabxys_integrated[4][:,:,0]+dataOut.kabxys_integrated[5][:,:,0] #total power for channel 0, just pulse with non-flip + p[1,:,:]=dataOut.kabxys_integrated[6][:,:,0]+dataOut.kabxys_integrated[7][:,:,0] #total power for channel 1 + + #print("p[0,:,:] ",p[0,:,:]) + #print("p[1,:,:] ",p[1,:,:]) + + for i in range(dataOut.header[2][0]): + dataOut.pnoise[i]=0.0 + for k in range(dataOut.header[17][0]): + dataOut.pnoise[i]+= self.hildebrand(dataOut,p[i,:,k]) + #print("dpl ",k, "pnoise[",i,"] ",pnoise[i] ) + dataOut.pnoise[i]=dataOut.pnoise[i]/dataOut.header[17][0] + + + #print("POWERNOISE: ",dataOut.pnoise) + dataOut.pan=1.0*dataOut.pnoise[0] # weights could change + dataOut.pbn=1.0*dataOut.pnoise[1] # weights could change + #print("dataOut.pan ",dataOut.pan, " dataOut.pbn ",dataOut.pbn) + #print("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAa") + + #print("POWERNOISE: ",dataOut.pnoise) + + + return dataOut + + + + + + + + +class double_pulse_ACFs(Operation): + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + self.aux=1 + + def run(self,dataOut): + dataOut.pairsList=None + if dataOut.AUX==1: + dataOut.igcej=numpy.zeros((dataOut.header[15][0],dataOut.header[17][0]),'int32') + + if self.aux==1: + dataOut.rhor=numpy.zeros((dataOut.header[15][0],dataOut.header[17][0]), dtype=float) + dataOut.rhoi=numpy.zeros((dataOut.header[15][0],dataOut.header[17][0]), dtype=float) + dataOut.sdp=numpy.zeros((dataOut.header[15][0],dataOut.header[17][0]), dtype=float) + dataOut.sd=numpy.zeros((dataOut.header[15][0],dataOut.header[17][0]), dtype=float) + #dataOut.igcej=numpy.zeros((dataOut.NDP,dataOut.nlags_array),'int32') + dataOut.p=numpy.zeros((dataOut.header[15][0],dataOut.header[17][0]), dtype=float) + dataOut.alag=numpy.zeros(dataOut.header[15][0],'float32') + for l in range(dataOut.header[17][0]): + dataOut.alag[l]=l*dataOut.header[10][0]*2.0/150.0 + self.aux=0 + sn4=dataOut.pan*dataOut.pbn + rhorn=0 + rhoin=0 + #p=np.zeros((ndt,dpl), dtype=float) + panrm=numpy.zeros((dataOut.header[15][0],dataOut.header[17][0]), dtype=float) + + + for i in range(dataOut.header[15][0]): + for j in range(dataOut.header[17][0]): + ################# Total power + pa=numpy.abs(dataOut.kabxys_integrated[4][i,j,0]+dataOut.kabxys_integrated[5][i,j,0]) + pb=numpy.abs(dataOut.kabxys_integrated[6][i,j,0]+dataOut.kabxys_integrated[7][i,j,0]) + #print("PA",pb) + st4=pa*pb + dataOut.p[i,j]=pa+pb-(dataOut.pan+dataOut.pbn) + dataOut.sdp[i,j]=2*dataOut.rnint2[j]*((pa+pb)*(pa+pb)) + ## ACF + rhorp=dataOut.kabxys_integrated[8][i,j,0]+dataOut.kabxys_integrated[11][i,j,0] + rhoip=dataOut.kabxys_integrated[10][i,j,0]-dataOut.kabxys_integrated[9][i,j,0] + if ((pa>dataOut.pan)&(pb>dataOut.pbn)): + #print("dataOut.pnoise[0]: ",dataOut.pnoise[0]) + #print("dataOut.pnoise[1]: ",dataOut.pnoise[1]) + #print("OKKKKKKKKKKKKKKK") + ss4=numpy.abs((pa-dataOut.pan)*(pb-dataOut.pbn)) + #print("ss4: ",ss4) + #print("OKKKKKKKKKKKKKKK") + panrm[i,j]=math.sqrt(ss4) + rnorm=1/panrm[i,j] + #print("rnorm: ",rnorm)get_number_density + #print("OKKKKKKKKKKKKKKK") + + ## ACF + dataOut.rhor[i,j]=rhorp*rnorm + dataOut.rhoi[i,j]=rhoip*rnorm + #print("rhoi: ",dataOut.rhoi) + #print("OKKKKKKKKKKKKKKK") + ############# Compute standard error for ACF + stoss4=st4/ss4 + snoss4=sn4/ss4 + rp2=((rhorp*rhorp)+(rhoip*rhoip))/st4 + rn2=((rhorn*rhorn)+(rhoin*rhoin))/sn4 + rs2=(dataOut.rhor[i,j]*dataOut.rhor[i,j])+(dataOut.rhoi[i,j]*dataOut.rhoi[i,j]) + st=1.0+rs2*(stoss4-(2*math.sqrt(stoss4*snoss4))) + stn=1.0+rs2*(snoss4-(2*math.sqrt(stoss4*snoss4))) + dataOut.sd[i,j]=((stoss4*((1.0+rp2)*st+(2.0*rp2*rs2*snoss4)-4.0*math.sqrt(rs2*rp2)))+(0.25*snoss4*((1.0+rn2)*stn+(2.0*rn2*rs2*stoss4)-4.0*math.sqrt(rs2*rn2))))*dataOut.rnint2[j] + dataOut.sd[i,j]=numpy.abs(dataOut.sd[i,j]) + #print("sd: ",dataOut.sd) + #print("OKKKKKKKKKKKKKKK") + else: #default values for bad points + rnorm=1/math.sqrt(st4) + dataOut.sd[i,j]=1.e30 + dataOut.ibad[i,j]=4 + dataOut.rhor[i,j]=rhorp*rnorm + dataOut.rhoi[i,j]=rhoip*rnorm + if ((pa/dataOut.pan-1.0)>2.25*(pb/dataOut.pbn-1.0)): + dataOut.igcej[i,j]=1 + + #print("sdp",dataOut.sdp) + + return dataOut + + + + + + + +class faraday_angle_and_power_double_pulse(Operation): + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + self.aux=1 + + def run(self,dataOut): + #dataOut.NRANGE=NRANGE + #dataOut.H0=H0 + ######### H0 Y NRANGE SON PARAMETROS? + + if dataOut.AUX==1: + if self.aux==1: + dataOut.h2=numpy.zeros(dataOut.header[15][0],'float32') + dataOut.range1=numpy.zeros(dataOut.header[15][0],order='F',dtype='float32') + dataOut.sdn2=numpy.zeros(dataOut.header[15][0],'float32') + dataOut.ph2=numpy.zeros(dataOut.header[15][0],'float32') + dataOut.sdp2=numpy.zeros(dataOut.header[15][0],'float32') + dataOut.ibd=numpy.zeros(dataOut.header[15][0],'float32') + dataOut.phi=numpy.zeros(dataOut.header[15][0],'float32') + self.aux=0 + #print("p: ",dataOut.p) + + + for i in range(dataOut.header[15][0]): + dataOut.range1[i]=dataOut.header[9][0] + i*dataOut.header[10][0] # (float) header.h0 + (float)i * header.dh + dataOut.h2[i]=dataOut.range1[i]**2 + + #print("sd: ",dataOut.sd) + #print("OIKKKKKKKKKKKKKKK") + #print("ibad: ",dataOut.ibad) + #print("igcej: ",dataOut.igcej) + for j in range(dataOut.header[15][0]): + dataOut.ph2[j]=0. + dataOut.sdp2[j]=0. + ri=dataOut.rhoi[j][0]/dataOut.sd[j][0] + rr=dataOut.rhor[j][0]/dataOut.sd[j][0] + dataOut.sdn2[j]=1./dataOut.sd[j][0] + #print("sdn2: ",dataOut.sdn2) + #print("OIKKKKKKKKKKKKKKK") + pt=0.# // total power + st=0.# // total signal + ibt=0# // bad lags + ns=0# // no. good lags + for l in range(dataOut.header[17][0]): + #add in other lags if outside of e-jet contamination + if( (dataOut.igcej[j][l] == 0) and (dataOut.ibad[j][l] == 0) ): + #print("dataOut.p[j][l]: ",dataOut.p[j][l]) + dataOut.ph2[j]+=dataOut.p[j][l]/dataOut.sdp[j][l] + dataOut.sdp2[j]=dataOut.sdp2[j]+1./dataOut.sdp[j][l] + ns+=1 + + pt+=dataOut.p[j][l]/dataOut.sdp[j][l] + st+=1./dataOut.sdp[j][l] + ibt|=dataOut.ibad[j][l]; + #print("pt: ",pt) + #print("st: ",st) + if(ns!= 0): + dataOut.ibd[j]=0 + dataOut.ph2[j]=dataOut.ph2[j]/dataOut.sdp2[j] + dataOut.sdp2[j]=1./dataOut.sdp2[j] + else: + dataOut.ibd[j]=ibt + dataOut.ph2[j]=pt/st + #print("ph2: ",dataOut.ph2) + dataOut.sdp2[j]=1./st + #print("ph2: ",dataOut.ph2) + dataOut.ph2[j]=dataOut.ph2[j]*dataOut.h2[j] + dataOut.sdp2[j]=numpy.sqrt(dataOut.sdp2[j])*dataOut.h2[j] + rr=rr/dataOut.sdn2[j] + ri=ri/dataOut.sdn2[j] + #rm[j]=np.sqrt(rr*rr + ri*ri) it is not used in c program + dataOut.sdn2[j]=1./(dataOut.sdn2[j]*(rr*rr + ri*ri)) + if( (ri == 0.) and (rr == 0.) ): + dataOut.phi[j]=0. + else: + dataOut.phi[j]=math.atan2( ri , rr ) + + #print("ph2: ",dataOut.ph2) + #print("sdp2: ",dataOut.sdp2) + #print("sdn2",dataOut.sdn2) + + + return dataOut + + + + + + +class get_number_density(Operation): + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + self.aux=1 + + def run(self,dataOut,NSHTS=None,RATE=None): + dataOut.NSHTS=NSHTS + dataOut.RATE=RATE + if dataOut.AUX==1: + #dataOut.TimeBlockSeconds=time.mktime(time.strptime(dataOut.TimeBlockDate)) + #dataOut.bd_time=time.gmtime(dataOut.TimeBlockSeconds) + #dataOut.ut=dataOut.bd_time.tm_hour+dataOut.bd_time.tm_min/60.0+dataOut.bd_time.tm_sec/3600.0 + if self.aux==1: + dataOut.dphi=numpy.zeros(dataOut.header[15][0],'float32') + dataOut.sdn1=numpy.zeros(dataOut.header[15][0],'float32') + self.aux=0 + theta=numpy.zeros(dataOut.header[15][0],dtype=numpy.complex_) + thetai=numpy.zeros(dataOut.header[15][0],dtype=numpy.complex_) + # use complex numbers for phase + for i in range(dataOut.NSHTS): + theta[i]=math.cos(dataOut.phi[i])+math.sin(dataOut.phi[i])*1j + thetai[i]=-math.sin(dataOut.phi[i])+math.cos(dataOut.phi[i])*1j + + # differentiate and convert to number density + ndphi=dataOut.NSHTS-4 + #print("dataOut.dphiBEFORE: ",dataOut.dphi) + for i in range(2,dataOut.NSHTS-2): + fact=(-0.5/(dataOut.RATE*dataOut.header[10][0]))*dataOut.bki[i] + #four-point derivative, no phase unwrapping necessary + dataOut.dphi[i]=((((theta[i+1]-theta[i-1])+(2.0*(theta[i+2]-theta[i-2])))/thetai[i])).real/10.0 + #print("dataOut.dphi[i]AFTER: ",dataOut.dphi[i]) + dataOut.dphi[i]=abs(dataOut.dphi[i]*fact) + dataOut.sdn1[i]=(4.*(dataOut.sdn2[i-2]+dataOut.sdn2[i+2])+dataOut.sdn2[i-1]+dataOut.sdn2[i+1]) + dataOut.sdn1[i]=numpy.sqrt(dataOut.sdn1[i])*fact + ''' + #print("date: ",dataOut.TimeBlockDate) + #print("CurrentBlock: ", dataOut.CurrentBlock) + #print("NSHTS: ",dataOut.NSHTS) + print("phi: ",dataOut.phi) + #print("header[10][0]: ",dataOut.DH) + print("bkibki: ",dataOut.bki) + #print("RATE: ",dataOut.RATE) + print("sdn2: ",dataOut.sdn2) + print("dphi: ",dataOut.dphi) + print("sdn1: ",dataOut.sdn1) + print("ph2: ",dataOut.ph2) + print("sdp2: ",dataOut.sdp2) + print("sdn1: ",dataOut.sdn1) + ''' + + ''' + Al finallllllllllllllllllllllllllllllllllllllllllllllllllllllllll + for i in range(dataOut.NSHTS): + dataOut.ph2[i]=(max(1.0, dataOut.ph2[i])) + dataOut.dphi[i]=(max(1.0, dataOut.dphi[i])) + #print("dphi ",dphi) + # threshold - values less than 10⁴ + for i in range(dataOut.NSHTS): + if dataOut.ph2[i]<10000: + dataOut.ph2[i]=10000 + + # threshold values more than 10⁷ + for i in range(dataOut.NSHTS): + if dataOut.ph2[i]>10000000:# + dataOut.ph2[i]=10000000 + + ## filter for errors + for i in range(dataOut.NSHTS): + if dataOut.sdp2[i]>100000:# + dataOut.ph2[i]=10000 + ''' + + + + + return dataOut + + + + + + + + + + + +class normalize_dp_power2(Operation): + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + self.aux=1 + + def normal(self,a,b,n,m): + chmin=1.0e30 + chisq=numpy.zeros(150,'float32') + temp=numpy.zeros(150,'float32') + + for i in range(2*m-1): + an=al=be=chisq[i]=0.0 + for j in range(int(n/m)): + k=int(j+i*n/(2*m)) + if(a[k]>0.0 and b[k]>0.0): + al+=a[k]*b[k] + be+=b[k]*b[k] + + if(be>0.0): + temp[i]=al/be + else: + temp[i]=1.0 + + for j in range(int(n/m)): + k=int(j+i*n/(2*m)) + if(a[k]>0.0 and b[k]>0.0): + chisq[i]+=(numpy.log10(b[k]*temp[i]/a[k]))**2 + an=an+1 + + if(chisq[i]>0.0): + chisq[i]/=an + + + for i in range(int(2*m-1)): + if(chisq[i]1.0e-6): + chmin=chisq[i] + cf=temp[i] + return cf + + + + def run(self,dataOut,cut0=None,cut1=None): + dataOut.cut0=float(cut0) + dataOut.cut1=float(cut1) + if dataOut.AUX==1: + #print("dateBefore: ",dataOut.TimeBlockDate_for_dp_power) + #print("dateNow: ",dataOut.TimeBlockDate) + if self.aux==1: + dataOut.cf=numpy.zeros(1,'float32') + dataOut.cflast=numpy.zeros(1,'float32') + self.aux=0 + + night_first=300.0 + night_first1= 310.0 + night_end= 450.0 + day_first=250.0 + day_end=400.0 + day_first_sunrise=190.0 + day_end_sunrise=280.0 + + if(dataOut.ut>4.0 and dataOut.ut<11.0): #early + i2=(night_end-dataOut.range1[0])/dataOut.header[10][0] + i1=(night_first -dataOut.range1[0])/dataOut.header[10][0] + elif (dataOut.ut>0.0 and dataOut.ut<4.0): #night + i2=(night_end-dataOut.range1[0])/dataOut.header[10][0] + i1=(night_first1 -dataOut.range1[0])/dataOut.header[10][0] + elif (dataOut.ut>=11.0 and dataOut.ut<13.5): #sunrise + i2=( day_end_sunrise-dataOut.range1[0])/dataOut.header[10][0] + i1=(day_first_sunrise - dataOut.range1[0])/dataOut.header[10][0] + else: + i2=(day_end-dataOut.range1[0])/dataOut.header[10][0] + i1=(day_first -dataOut.range1[0])/dataOut.header[10][0] + + i1=int(i1) + i2=int(i2) + #print("ph2: ",dataOut.ph2) + dataOut.cf=self.normal(dataOut.dphi[i1::], dataOut.ph2[i1::], i2-i1, 1) + + #print("n in:",i1,"(",dataOut.range1[i1],"), i2=",i2,"(",dataOut.range1[i2],"), ut=",dataOut.ut,", cf=",dataOut.cf,", cf_last=", + #dataOut.cflast) + # in case of spread F, normalize much higher + if(dataOut.cf cf: ",dataOut.cf," cflast: ", dataOut.cflast) + dataOut.cf=self.normal(dataOut.dphi[int(i1)::], dataOut.ph2[int(i1)::], int(i2-i1), 1) + dataOut.cf=dataOut.cflast[0] + + #print(">>>i1=",i1,"(",dataOut.range1[i1],"), i2=",i2,"(",dataOut.range1[i2],"), ut=",dataOut.ut,", cf=",dataOut.cf,", cf_last=", + # dataOut.cflast," (",dataOut.cf/dataOut.cflast,"), cut=",dataOut.cut0," ",dataOut.cut1) + dataOut.cflast[0]=dataOut.cf + + ## normalize double pulse power and error bars to Faraday + for i in range(dataOut.NSHTS): + dataOut.ph2[i]*=dataOut.cf + dataOut.sdp2[i]*=dataOut.cf + #print("******* correction factor: ",dataOut.cf) + + #print(dataOut.ph2) + + for i in range(dataOut.NSHTS): + dataOut.ph2[i]=(max(1.0, dataOut.ph2[i])) + dataOut.dphi[i]=(max(1.0, dataOut.dphi[i])) + #print("dphi ",dphi) + # threshold - values less than 10⁴ + + ''' + for i in range(dataOut.NSHTS): + if dataOut.ph2[i]<10000: + dataOut.ph2[i]=10000 + + # threshold values more than 10⁷ + for i in range(dataOut.NSHTS): + if dataOut.ph2[i]>10000000:# + dataOut.ph2[i]=10000000 + + ## filter for errors + for i in range(dataOut.NSHTS): + if dataOut.sdp2[i]>100000:# + dataOut.ph2[i]=10000 + ''' + + + + + + ''' + #print("date: ",dataOut.TimeBlockDate) + #print("CurrentBlock: ", dataOut.CurrentBlock) + #print("NSHTS: ",dataOut.NSHTS) + print("phi: ",dataOut.phi) + #print("header[10][0]: ",dataOut.DH) + print("bkibki: ",dataOut.bki) + #print("RATE: ",dataOut.RATE) + print("sdn2: ",dataOut.sdn2) + print("dphi: ",dataOut.dphi) + print("sdn1: ",dataOut.sdn1) + print("ph2: ",dataOut.ph2) + print("sdp2: ",dataOut.sdp2) + print("sdn1: ",dataOut.sdn1) + ''' + + + + + + + + return dataOut + + + + + + + + + + + + + + + +''' +from ctypes import * +class IDATE(Structure): + _fields_ = [ + ("year", c_int), + ("moda", c_int), + ("hrmn", c_int), + ("sec", c_int), + ("secs", c_int), + ] +#typedef struct IDATE {int year,moda,hrmn,sec,secs;} idate; +''' + + + + +''' +class get_number_density(Operation): + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + #self.aux=1 +''' + +''' + def IDATE(Structure): + + _fields_ = [ + ("year", c_int), + ("moda", c_int), + ("hrmn", c_int), + ("sec", c_int), + ("secs", c_int), + ] + + ''' + + + + +''' + def run(self,dataOut): +''' +''' + if dataOut.CurrentBlock==1 and self.aux==1: + + #print("CurrentBlock: ",dataOut.CurrentBlock) + + dataOut.TimeBlockSeconds=time.mktime(time.strptime(dataOut.TimeBlockDate)) + #print("time1: ",dataOut.TimeBlockSeconds) + + #print("date: ",dataOut.TimeBlockDate) + dataOut.bd_time=time.gmtime(dataOut.TimeBlockSeconds) + #print("bd_time: ",dataOut.bd_time) + dataOut.year=dataOut.bd_time.tm_year+(dataOut.bd_time.tm_yday-1)/364.0 + #print("year: ",dataOut.year) + dataOut.ut=dataOut.bd_time.tm_hour+dataOut.bd_time.tm_min/60.0+dataOut.bd_time.tm_sec/3600.0 + #print("ut: ",dataOut.ut) + self.aux=0 + + + + + ''' + #print("CurrentBlock: ",dataOut.CurrentBlock) + #print("date: ",dataOut.firsttime) + #print("bd_time: ",time.strptime(dataOut.datatime.ctime())) + #mkfact_short.mkfact(year,h,bfm,thb,bki,dataOut.NDP) + #print("CurrentBlock: ",dataOut.CurrentBlock) +''' + if dataOut.AUX==1: + ''' +''' + #begin=IDATE() + #begin.year=dataOut.bd_time.tm_year + #begin.moda=100*(dataOut.bd_time.tm_mon)+dataOut.bd_time.tm_mday + #begin.hrmn=100*dataOut.bd_time.tm_hour+dataOut.bd_time.tm_min + #begin.sec=dataOut.bd_time.tm_sec + #begin.secs=dataOut.bd_time.tm_sec+60*(dataOut.bd_time.tm_min+60*(dataOut.bd_time.tm_hour+24*(dataOut.bd_time.tm_yday-1))) + h=numpy.arange(0.0,15.0*dataOut.NDP,15.0,dtype='float32') + bfm=numpy.zeros(dataOut.NDP,dtype='float32') + bfm=numpy.array(bfm,order='F') + thb=numpy.zeros(dataOut.NDP,dtype='float32') + thb=numpy.array(thb,order='F') + bki=numpy.zeros(dataOut.NDP,dtype='float32') + bki=numpy.array(thb,order='F') + #yearmanually=2019.9285714285713 + #print("year manually: ",yearmanually) + #print("year: ",dataOut.year) + mkfact_short.mkfact(dataOut.year,h,bfm,thb,bki,dataOut.NDP) + #print("tm ",tm) + ''' +''' + print("year ",dataOut.year) + print("h ", dataOut.h) + print("bfm ", dataOut.bfm) + print("thb ", dataOut.thb) + print("bki ", dataOut.bki) +''' + + + + +''' + print("CurrentBlock: ",dataOut.CurrentBlock) + + + + + + + + + + + return dataOut +''' + + + + + + +class test(Operation): + def __init__(self, **kwargs): + + Operation.__init__(self, **kwargs) + + + + + def run(self,dataOut,tt=10): + + print("tt: ",tt) + + + + return dataOut diff --git a/schainpy/model/proc/pxproc_parameters.py b/schainpy/model/proc/pxproc_parameters.py index 9ba0611..7f7c971 100644 --- a/schainpy/model/proc/pxproc_parameters.py +++ b/schainpy/model/proc/pxproc_parameters.py @@ -61,4 +61,4 @@ class PXParametersProc(ProcessingUnit): meta[attr] = getattr(self.dataOut, attr) meta['mode'] = mode - self.dataOut.meta = meta \ No newline at end of file + self.dataOut.meta = meta diff --git a/schainpy/scripts/PPD.py b/schainpy/scripts/PPD.py index 0c66244..1e377b1 100644 --- a/schainpy/scripts/PPD.py +++ b/schainpy/scripts/PPD.py @@ -1,6 +1,6 @@ import argparse -from schainpy.controller import Project, multiSchain +from schainpy.controller import Project#, multiSchain desc = "HF_EXAMPLE"