##// END OF EJS Templates
atributo chann en class dopplerFlip
Alexander Valdez -
r1697:40634503f931
parent child
Show More
@@ -0,0 +1,223
1 '''
2 Base clases to create Processing units and operations, the MPDecorator
3 must be used in plotting and writing operations to allow to run as an
4 external process.
5 '''
6 import os
7 import inspect
8 import zmq
9 import time
10 import pickle
11 import traceback
12 from threading import Thread
13 from multiprocessing import Process, Queue
14 from schainpy.utils import log
15 #isr-jro_proc_base.py
16 import copy
17 QUEUE_SIZE = int(os.environ.get('QUEUE_MAX_SIZE', '10'))
18
19 class ProcessingUnit(object):
20 '''
21 Base class to create Signal Chain Units
22 '''
23
24 proc_type = 'processing'
25
26 def __init__(self):
27
28 self.dataIn = None
29 self.dataOut = None
30 self.isConfig = False
31 self.operations = []
32 self.name = 'Test'
33 self.inputs = []
34
35 def setInput(self, unit):
36
37 attr = 'dataIn'
38 for i, u in enumerate(unit):
39 if i==0:
40 self.dataIn = u.dataOut#.copy()
41 self.inputs.append('dataIn')
42 else:
43 setattr(self, 'dataIn{}'.format(i), u.dataOut)#.copy())
44 self.inputs.append('dataIn{}'.format(i))
45
46 def getAllowedArgs(self):
47 if hasattr(self, '__attrs__'):
48 return self.__attrs__
49 else:
50 return inspect.getargspec(self.run).args
51
52 def addOperation(self, conf, operation):
53 '''
54 '''
55
56 self.operations.append((operation, conf.type, conf.getKwargs()))
57
58 def getOperationObj(self, objId):
59
60 if objId not in list(self.operations.keys()):
61 return None
62
63 return self.operations[objId]
64
65 def call(self, **kwargs):
66 '''
67 '''
68
69 try:
70 if self.dataIn is not None and self.dataIn.flagNoData and not self.dataIn.error:
71 if self.dataIn.runNextUnit:
72 return not self.dataIn.isReady()
73 else:
74 return self.dataIn.isReady()
75 elif self.dataIn is None or not self.dataIn.error:
76 self.run(**kwargs)
77 elif self.dataIn.error:
78 self.dataOut.error = self.dataIn.error
79 self.dataOut.flagNoData = True
80 except:
81 err = traceback.format_exc()
82 if 'SchainWarning' in err:
83 log.warning(err.split('SchainWarning:')[-1].split('\n')[0].strip(), self.name)
84 elif 'SchainError' in err:
85 log.error(err.split('SchainError:')[-1].split('\n')[0].strip(), self.name)
86 else:
87 log.error(err, self.name)
88 self.dataOut.error = True
89 for op, optype, opkwargs in self.operations:
90 aux = self.dataOut.copy()
91 if optype == 'other' and not self.dataOut.flagNoData:
92 self.dataOut = op.run(self.dataOut, **opkwargs)
93 elif optype == 'external' and not self.dataOut.flagNoData:
94 op.queue.put(aux)
95 elif optype == 'external' and self.dataOut.error:
96 op.queue.put(aux)
97 try:
98 if self.dataOut.runNextUnit:
99 runNextUnit = self.dataOut.runNextUnit
100 else:
101 runNextUnit = self.dataOut.isReady()
102 except:
103 runNextUnit = self.dataOut.isReady()
104 return 'Error' if self.dataOut.error else runNextUnit# self.dataOut.isReady()
105
106 def setup(self):
107
108 raise NotImplementedError
109
110 def run(self):
111
112 raise NotImplementedError
113
114 def close(self):
115
116 return
117
118
119 class Operation(object):
120
121 '''
122 '''
123
124 proc_type = 'operation'
125
126 def __init__(self):
127
128 self.id = None
129 self.isConfig = False
130
131 if not hasattr(self, 'name'):
132 self.name = self.__class__.__name__
133
134 def getAllowedArgs(self):
135 if hasattr(self, '__attrs__'):
136 return self.__attrs__
137 else:
138 return inspect.getargspec(self.run).args
139
140 def setup(self):
141
142 self.isConfig = True
143
144 raise NotImplementedError
145
146 def run(self, dataIn, **kwargs):
147 """
148 Realiza las operaciones necesarias sobre la dataIn.data y actualiza los
149 atributos del objeto dataIn.
150
151 Input:
152
153 dataIn : objeto del tipo JROData
154
155 Return:
156
157 None
158
159 Affected:
160 __buffer : buffer de recepcion de datos.
161
162 """
163 if not self.isConfig:
164 self.setup(**kwargs)
165
166 raise NotImplementedError
167
168 def close(self):
169
170 return
171
172
173 def MPDecorator(BaseClass):
174 """
175 Multiprocessing class decorator
176
177 This function add multiprocessing features to a BaseClass.
178 """
179
180 class MPClass(BaseClass, Process):
181
182 def __init__(self, *args, **kwargs):
183 super(MPClass, self).__init__()
184 Process.__init__(self)
185
186 self.args = args
187 self.kwargs = kwargs
188 self.t = time.time()
189 self.op_type = 'external'
190 self.name = BaseClass.__name__
191 self.__doc__ = BaseClass.__doc__
192
193 if 'plot' in self.name.lower() and not self.name.endswith('_'):
194 self.name = '{}{}'.format(self.CODE.upper(), 'Plot')
195
196 self.start_time = time.time()
197 self.err_queue = args[3]
198 self.queue = Queue(maxsize=QUEUE_SIZE)
199 self.myrun = BaseClass.run
200
201 def run(self):
202
203 while True:
204
205 dataOut = self.queue.get()
206
207 if not dataOut.error:
208 try:
209 BaseClass.run(self, dataOut, **self.kwargs)
210 except:
211 err = traceback.format_exc()
212 log.error(err, self.name)
213 else:
214 break
215
216 self.close()
217
218 def close(self):
219
220 BaseClass.close(self)
221 log.success('Done...(Time:{:4.2f} secs)'.format(time.time() - self.start_time), self.name)
222
223 return MPClass No newline at end of file
This diff has been collapsed as it changes many lines, (7333 lines changed) Show them Hide them
@@ -0,0 +1,7333
1 import numpy
2 import math
3 from scipy import optimize, interpolate, signal, stats, ndimage
4 import scipy
5 from scipy.optimize import least_squares
6 import re
7 import datetime
8 import copy
9 import sys
10 import importlib
11 import itertools
12 from multiprocessing import Pool, TimeoutError, Process
13 from multiprocessing.pool import ThreadPool
14 import time
15 from threading import Thread
16 from scipy.optimize import fmin_l_bfgs_b #optimize with bounds on state papameters
17 from .jroproc_base import ProcessingUnit, Operation, MPDecorator
18 from schainpy.model.data.jrodata import Parameters, hildebrand_sekhon
19 from scipy import asarray as ar,exp
20 from scipy.optimize import fmin, curve_fit
21 from schainpy.utils import log
22 import warnings
23 from numpy import NaN
24 from scipy.optimize.optimize import OptimizeWarning
25 warnings.filterwarnings('ignore')
26
27
28 SPEED_OF_LIGHT = 299792458
29
30 '''solving pickling issue'''
31
32 def _pickle_method(method):
33 func_name = method.__func__.__name__
34 obj = method.__self__
35 cls = method.__self__.__class__
36 return _unpickle_method, (func_name, obj, cls)
37
38 def _unpickle_method(func_name, obj, cls):
39 for cls in cls.mro():
40 try:
41 func = cls.__dict__[func_name]
42 except KeyError:
43 pass
44 else:
45 break
46 return func.__get__(obj, cls)
47
48 # @MPDecorator
49 class ParametersProc(ProcessingUnit):
50
51 METHODS = {}
52 nSeconds = None
53
54 def __init__(self):
55 ProcessingUnit.__init__(self)
56
57 self.buffer = None
58 self.firstdatatime = None
59 self.profIndex = 0
60 self.dataOut = Parameters()
61 self.setupReq = False #Agregar a todas las unidades de proc
62
63 def __updateObjFromInput(self):
64
65 self.dataOut.inputUnit = self.dataIn.type
66
67 self.dataOut.timeZone = self.dataIn.timeZone
68 self.dataOut.dstFlag = self.dataIn.dstFlag
69 self.dataOut.errorCount = self.dataIn.errorCount
70 self.dataOut.useLocalTime = self.dataIn.useLocalTime
71
72 self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy()
73 self.dataOut.systemHeaderObj = self.dataIn.systemHeaderObj.copy()
74 self.dataOut.channelList = self.dataIn.channelList
75 self.dataOut.heightList = self.dataIn.heightList
76 self.dataOut.dtype = numpy.dtype([('real','<f4'),('imag','<f4')])
77 # self.dataOut.nHeights = self.dataIn.nHeights
78 # self.dataOut.nChannels = self.dataIn.nChannels
79 # self.dataOut.nBaud = self.dataIn.nBaud
80 # self.dataOut.nCode = self.dataIn.nCode
81 # self.dataOut.code = self.dataIn.code
82 # self.dataOut.nProfiles = self.dataOut.nFFTPoints
83 self.dataOut.flagDiscontinuousBlock = self.dataIn.flagDiscontinuousBlock
84 # self.dataOut.utctime = self.firstdatatime
85 self.dataOut.utctime = self.dataIn.utctime
86 self.dataOut.flagDecodeData = self.dataIn.flagDecodeData #asumo q la data esta decodificada
87 self.dataOut.flagDeflipData = self.dataIn.flagDeflipData #asumo q la data esta sin flip
88 self.dataOut.nCohInt = self.dataIn.nCohInt
89 # self.dataOut.nIncohInt = 1
90 # self.dataOut.ippSeconds = self.dataIn.ippSeconds
91 # self.dataOut.windowOfFilter = self.dataIn.windowOfFilter
92 self.dataOut.timeInterval1 = self.dataIn.timeInterval
93 self.dataOut.heightList = self.dataIn.heightList
94 self.dataOut.frequency = self.dataIn.frequency
95 #self.dataOut.runNextUnit = self.dataIn.runNextUnit
96 #self.dataOut.noise = self.dataIn.noise
97
98 def run(self):
99
100 #---------------------- Voltage Data ---------------------------
101
102 if self.dataIn.type == "Voltage":
103
104 self.__updateObjFromInput()
105 self.dataOut.data_pre = self.dataIn.data.copy()
106 self.dataOut.flagNoData = False
107 self.dataOut.utctimeInit = self.dataIn.utctime
108 self.dataOut.paramInterval = self.dataIn.nProfiles*self.dataIn.nCohInt*self.dataIn.ippSeconds
109 if hasattr(self.dataIn, 'dataPP_POW'):
110 self.dataOut.dataPP_POW = self.dataIn.dataPP_POW
111
112 if hasattr(self.dataIn, 'dataPP_POWER'):
113 self.dataOut.dataPP_POWER = self.dataIn.dataPP_POWER
114
115 if hasattr(self.dataIn, 'dataPP_DOP'):
116 self.dataOut.dataPP_DOP = self.dataIn.dataPP_DOP
117
118 if hasattr(self.dataIn, 'dataPP_SNR'):
119 self.dataOut.dataPP_SNR = self.dataIn.dataPP_SNR
120
121 if hasattr(self.dataIn, 'dataPP_WIDTH'):
122 self.dataOut.dataPP_WIDTH = self.dataIn.dataPP_WIDTH
123 return
124
125 #---------------------- Spectra Data ---------------------------
126
127 if self.dataIn.type == "Spectra":
128
129 self.dataOut.data_pre = [self.dataIn.data_spc, self.dataIn.data_cspc]
130 self.dataOut.data_spc = self.dataIn.data_spc
131 self.dataOut.data_cspc = self.dataIn.data_cspc
132 self.dataOut.nProfiles = self.dataIn.nProfiles
133 self.dataOut.nIncohInt = self.dataIn.nIncohInt
134 self.dataOut.nFFTPoints = self.dataIn.nFFTPoints
135 self.dataOut.ippFactor = self.dataIn.ippFactor
136 self.dataOut.abscissaList = self.dataIn.getVelRange(1)
137 self.dataOut.spc_noise = self.dataIn.getNoise()
138 self.dataOut.spc_range = (self.dataIn.getFreqRange(1) , self.dataIn.getAcfRange(1) , self.dataIn.getVelRange(1))
139 # self.dataOut.normFactor = self.dataIn.normFactor
140 self.dataOut.pairsList = self.dataIn.pairsList
141 self.dataOut.groupList = self.dataIn.pairsList
142 self.dataOut.flagNoData = False
143
144 if hasattr(self.dataIn, 'ChanDist'): #Distances of receiver channels
145 self.dataOut.ChanDist = self.dataIn.ChanDist
146 else: self.dataOut.ChanDist = None
147
148 #if hasattr(self.dataIn, 'VelRange'): #Velocities range
149 # self.dataOut.VelRange = self.dataIn.VelRange
150 #else: self.dataOut.VelRange = None
151
152 if hasattr(self.dataIn, 'RadarConst'): #Radar Constant
153 self.dataOut.RadarConst = self.dataIn.RadarConst
154
155 if hasattr(self.dataIn, 'NPW'): #NPW
156 self.dataOut.NPW = self.dataIn.NPW
157
158 if hasattr(self.dataIn, 'COFA'): #COFA
159 self.dataOut.COFA = self.dataIn.COFA
160
161 #self.dataOut.runNextUnit = self.dataIn.runNextUnit
162
163
164 #---------------------- Correlation Data ---------------------------
165
166 if self.dataIn.type == "Correlation":
167 acf_ind, ccf_ind, acf_pairs, ccf_pairs, data_acf, data_ccf = self.dataIn.splitFunctions()
168
169 self.dataOut.data_pre = (self.dataIn.data_cf[acf_ind,:], self.dataIn.data_cf[ccf_ind,:,:])
170 self.dataOut.normFactor = (self.dataIn.normFactor[acf_ind,:], self.dataIn.normFactor[ccf_ind,:])
171 self.dataOut.groupList = (acf_pairs, ccf_pairs)
172
173 self.dataOut.abscissaList = self.dataIn.lagRange
174 self.dataOut.noise = self.dataIn.noise
175 self.dataOut.data_snr = self.dataIn.SNR
176 self.dataOut.flagNoData = False
177 self.dataOut.nAvg = self.dataIn.nAvg
178
179 #---------------------- Parameters Data ---------------------------
180
181 if self.dataIn.type == "Parameters":
182 self.dataOut.copy(self.dataIn)
183 self.dataOut.flagNoData = False
184
185 return True
186
187 self.__updateObjFromInput()
188 self.dataOut.utctimeInit = self.dataIn.utctime
189 self.dataOut.paramInterval = self.dataIn.timeInterval
190
191 return
192
193
194 def target(tups):
195
196 obj, args = tups
197
198 return obj.FitGau(args)
199
200 class RemoveWideGC(Operation):
201 ''' This class remove the wide clutter and replace it with a simple interpolation points
202 This mainly applies to CLAIRE radar
203
204 ClutterWidth : Width to look for the clutter peak
205
206 Input:
207
208 self.dataOut.data_pre : SPC and CSPC
209 self.dataOut.spc_range : To select wind and rainfall velocities
210
211 Affected:
212
213 self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
214
215 Written by D. Scipión 25.02.2021
216 '''
217 def __init__(self):
218 Operation.__init__(self)
219 self.i = 0
220 self.ich = 0
221 self.ir = 0
222
223 def run(self, dataOut, ClutterWidth=2.5):
224
225 self.spc = dataOut.data_pre[0].copy()
226 self.spc_out = dataOut.data_pre[0].copy()
227 self.Num_Chn = self.spc.shape[0]
228 self.Num_Hei = self.spc.shape[2]
229 VelRange = dataOut.spc_range[2][:-1]
230 dv = VelRange[1]-VelRange[0]
231
232 # Find the velocities that corresponds to zero
233 gc_values = numpy.squeeze(numpy.where(numpy.abs(VelRange) <= ClutterWidth))
234
235 # Removing novalid data from the spectra
236 for ich in range(self.Num_Chn) :
237 for ir in range(self.Num_Hei) :
238 # Estimate the noise at each range
239 HSn = hildebrand_sekhon(self.spc[ich,:,ir],dataOut.nIncohInt)
240
241 # Removing the noise floor at each range
242 novalid = numpy.where(self.spc[ich,:,ir] < HSn)
243 self.spc[ich,novalid,ir] = HSn
244
245 junk = numpy.append(numpy.insert(numpy.squeeze(self.spc[ich,gc_values,ir]),0,HSn),HSn)
246 j1index = numpy.squeeze(numpy.where(numpy.diff(junk)>0))
247 j2index = numpy.squeeze(numpy.where(numpy.diff(junk)<0))
248 if ((numpy.size(j1index)<=1) | (numpy.size(j2index)<=1)) :
249 continue
250 junk3 = numpy.squeeze(numpy.diff(j1index))
251 junk4 = numpy.squeeze(numpy.diff(j2index))
252
253 valleyindex = j2index[numpy.where(junk4>1)]
254 peakindex = j1index[numpy.where(junk3>1)]
255
256 isvalid = numpy.squeeze(numpy.where(numpy.abs(VelRange[gc_values[peakindex]]) <= 2.5*dv))
257 if numpy.size(isvalid) == 0 :
258 continue
259 if numpy.size(isvalid) >1 :
260 vindex = numpy.argmax(self.spc[ich,gc_values[peakindex[isvalid]],ir])
261 isvalid = isvalid[vindex]
262
263 # clutter peak
264 gcpeak = peakindex[isvalid]
265 vl = numpy.where(valleyindex < gcpeak)
266 if numpy.size(vl) == 0:
267 continue
268 gcvl = valleyindex[vl[0][-1]]
269 vr = numpy.where(valleyindex > gcpeak)
270 if numpy.size(vr) == 0:
271 continue
272 gcvr = valleyindex[vr[0][0]]
273
274 # Removing the clutter
275 interpindex = numpy.array([gc_values[gcvl], gc_values[gcvr]])
276 gcindex = gc_values[gcvl+1:gcvr-1]
277 self.spc_out[ich,gcindex,ir] = numpy.interp(VelRange[gcindex],VelRange[interpindex],self.spc[ich,interpindex,ir])
278
279 dataOut.data_pre[0] = self.spc_out
280
281 return dataOut
282
283 class SpectralFilters(Operation):
284 ''' This class allows to replace the novalid values with noise for each channel
285 This applies to CLAIRE RADAR
286
287 PositiveLimit : RightLimit of novalid data
288 NegativeLimit : LeftLimit of novalid data
289
290 Input:
291
292 self.dataOut.data_pre : SPC and CSPC
293 self.dataOut.spc_range : To select wind and rainfall velocities
294
295 Affected:
296
297 self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
298
299 Written by D. Scipión 29.01.2021
300 '''
301 def __init__(self):
302 Operation.__init__(self)
303 self.i = 0
304
305 def run(self, dataOut, ):
306
307 self.spc = dataOut.data_pre[0].copy()
308 self.Num_Chn = self.spc.shape[0]
309 VelRange = dataOut.spc_range[2]
310
311 # novalid corresponds to data within the Negative and PositiveLimit
312
313
314 # Removing novalid data from the spectra
315 for i in range(self.Num_Chn):
316 self.spc[i,novalid,:] = dataOut.noise[i]
317 dataOut.data_pre[0] = self.spc
318 return dataOut
319
320
321
322 class GaussianFit(Operation):
323
324 '''
325 Function that fit of one and two generalized gaussians (gg) based
326 on the PSD shape across an "power band" identified from a cumsum of
327 the measured spectrum - noise.
328
329 Input:
330 self.dataOut.data_pre : SelfSpectra
331
332 Output:
333 self.dataOut.SPCparam : SPC_ch1, SPC_ch2
334
335 '''
336 def __init__(self):
337 Operation.__init__(self)
338 self.i=0
339
340
341 # def run(self, dataOut, num_intg=7, pnoise=1., SNRlimit=-9): #num_intg: Incoherent integrations, pnoise: Noise, vel_arr: range of velocities, similar to the ftt points
342 def run(self, dataOut, SNRdBlimit=-9, method='generalized'):
343 """This routine will find a couple of generalized Gaussians to a power spectrum
344 methods: generalized, squared
345 input: spc
346 output:
347 noise, amplitude0,shift0,width0,p0,Amplitude1,shift1,width1,p1
348 """
349 print ('Entering ',method,' double Gaussian fit')
350 self.spc = dataOut.data_pre[0].copy()
351 self.Num_Hei = self.spc.shape[2]
352 self.Num_Bin = self.spc.shape[1]
353 self.Num_Chn = self.spc.shape[0]
354
355 start_time = time.time()
356
357 pool = Pool(processes=self.Num_Chn)
358 args = [(dataOut.spc_range[2], ich, dataOut.spc_noise[ich], dataOut.nIncohInt, SNRdBlimit) for ich in range(self.Num_Chn)]
359 objs = [self for __ in range(self.Num_Chn)]
360 attrs = list(zip(objs, args))
361 DGauFitParam = pool.map(target, attrs)
362 # Parameters:
363 # 0. Noise, 1. Amplitude, 2. Shift, 3. Width 4. Power
364 dataOut.DGauFitParams = numpy.asarray(DGauFitParam)
365
366 # Double Gaussian Curves
367 gau0 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
368 gau0[:] = numpy.NaN
369 gau1 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
370 gau1[:] = numpy.NaN
371 x_mtr = numpy.transpose(numpy.tile(dataOut.getVelRange(1)[:-1], (self.Num_Hei,1)))
372 for iCh in range(self.Num_Chn):
373 N0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,0]] * self.Num_Bin))
374 N1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,1]] * self.Num_Bin))
375 A0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,0]] * self.Num_Bin))
376 A1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,1]] * self.Num_Bin))
377 v0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,0]] * self.Num_Bin))
378 v1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,1]] * self.Num_Bin))
379 s0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,0]] * self.Num_Bin))
380 s1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,1]] * self.Num_Bin))
381 if method == 'generalized':
382 p0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,0]] * self.Num_Bin))
383 p1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,1]] * self.Num_Bin))
384 elif method == 'squared':
385 p0 = 2.
386 p1 = 2.
387 gau0[iCh] = A0*numpy.exp(-0.5*numpy.abs((x_mtr-v0)/s0)**p0)+N0
388 gau1[iCh] = A1*numpy.exp(-0.5*numpy.abs((x_mtr-v1)/s1)**p1)+N1
389 dataOut.GaussFit0 = gau0
390 dataOut.GaussFit1 = gau1
391
392 print('Leaving ',method ,' double Gaussian fit')
393 return dataOut
394
395 def FitGau(self, X):
396 # print('Entering FitGau')
397 # Assigning the variables
398 Vrange, ch, wnoise, num_intg, SNRlimit = X
399 # Noise Limits
400 noisebl = wnoise * 0.9
401 noisebh = wnoise * 1.1
402 # Radar Velocity
403 Va = max(Vrange)
404 deltav = Vrange[1] - Vrange[0]
405 x = numpy.arange(self.Num_Bin)
406
407 # print ('stop 0')
408
409 # 5 parameters, 2 Gaussians
410 DGauFitParam = numpy.zeros([5, self.Num_Hei,2])
411 DGauFitParam[:] = numpy.NaN
412
413 # SPCparam = []
414 # SPC_ch1 = numpy.zeros([self.Num_Bin,self.Num_Hei])
415 # SPC_ch2 = numpy.zeros([self.Num_Bin,self.Num_Hei])
416 # SPC_ch1[:] = 0 #numpy.NaN
417 # SPC_ch2[:] = 0 #numpy.NaN
418 # print ('stop 1')
419 for ht in range(self.Num_Hei):
420 # print (ht)
421 # print ('stop 2')
422 # Spectra at each range
423 spc = numpy.asarray(self.spc)[ch,:,ht]
424 snr = ( spc.mean() - wnoise ) / wnoise
425 snrdB = 10.*numpy.log10(snr)
426
427 #print ('stop 3')
428 if snrdB < SNRlimit :
429 # snr = numpy.NaN
430 # SPC_ch1[:,ht] = 0#numpy.NaN
431 # SPC_ch1[:,ht] = 0#numpy.NaN
432 # SPCparam = (SPC_ch1,SPC_ch2)
433 # print ('SNR less than SNRth')
434 continue
435 # wnoise = hildebrand_sekhon(spc,num_intg)
436 # print ('stop 2.01')
437 #############################################
438 # normalizing spc and noise
439 # This part differs from gg1
440 # spc_norm_max = max(spc) #commented by D. Scipión 19.03.2021
441 #spc = spc / spc_norm_max
442 # pnoise = pnoise #/ spc_norm_max #commented by D. Scipión 19.03.2021
443 #############################################
444
445 # print ('stop 2.1')
446 fatspectra=1.0
447 # noise per channel.... we might want to use the noise at each range
448
449 # wnoise = noise_ #/ spc_norm_max #commented by D. Scipión 19.03.2021
450 #wnoise,stdv,i_max,index =enoise(spc,num_intg) #noise estimate using Hildebrand Sekhon, only wnoise is used
451 #if wnoise>1.1*pnoise: # to be tested later
452 # wnoise=pnoise
453 # noisebl = wnoise*0.9
454 # noisebh = wnoise*1.1
455 spc = spc - wnoise # signal
456
457 # print ('stop 2.2')
458 minx = numpy.argmin(spc)
459 #spcs=spc.copy()
460 spcs = numpy.roll(spc,-minx)
461 cum = numpy.cumsum(spcs)
462 # tot_noise = wnoise * self.Num_Bin #64;
463
464 # print ('stop 2.3')
465 # snr = sum(spcs) / tot_noise
466 # snrdB = 10.*numpy.log10(snr)
467 #print ('stop 3')
468 # if snrdB < SNRlimit :
469 # snr = numpy.NaN
470 # SPC_ch1[:,ht] = 0#numpy.NaN
471 # SPC_ch1[:,ht] = 0#numpy.NaN
472 # SPCparam = (SPC_ch1,SPC_ch2)
473 # print ('SNR less than SNRth')
474 # continue
475
476
477 #if snrdB<-18 or numpy.isnan(snrdB) or num_intg<4:
478 # return [None,]*4,[None,]*4,None,snrdB,None,None,[None,]*5,[None,]*9,None
479 # print ('stop 4')
480 cummax = max(cum)
481 epsi = 0.08 * fatspectra # cumsum to narrow down the energy region
482 cumlo = cummax * epsi
483 cumhi = cummax * (1-epsi)
484 powerindex = numpy.array(numpy.where(numpy.logical_and(cum>cumlo, cum<cumhi))[0])
485
486 # print ('stop 5')
487 if len(powerindex) < 1:# case for powerindex 0
488 # print ('powerindex < 1')
489 continue
490 powerlo = powerindex[0]
491 powerhi = powerindex[-1]
492 powerwidth = powerhi-powerlo
493 if powerwidth <= 1:
494 # print('powerwidth <= 1')
495 continue
496
497 # print ('stop 6')
498 firstpeak = powerlo + powerwidth/10.# first gaussian energy location
499 secondpeak = powerhi - powerwidth/10. #second gaussian energy location
500 midpeak = (firstpeak + secondpeak)/2.
501 firstamp = spcs[int(firstpeak)]
502 secondamp = spcs[int(secondpeak)]
503 midamp = spcs[int(midpeak)]
504
505 y_data = spc + wnoise
506
507 ''' single Gaussian '''
508 shift0 = numpy.mod(midpeak+minx, self.Num_Bin )
509 width0 = powerwidth/4.#Initialization entire power of spectrum divided by 4
510 power0 = 2.
511 amplitude0 = midamp
512 state0 = [shift0,width0,amplitude0,power0,wnoise]
513 bnds = ((0,self.Num_Bin-1),(1,powerwidth),(0,None),(0.5,3.),(noisebl,noisebh))
514 lsq1 = fmin_l_bfgs_b(self.misfit1, state0, args=(y_data,x,num_intg), bounds=bnds, approx_grad=True)
515 # print ('stop 7.1')
516 # print (bnds)
517
518 chiSq1=lsq1[1]
519
520 # print ('stop 8')
521 if fatspectra<1.0 and powerwidth<4:
522 choice=0
523 Amplitude0=lsq1[0][2]
524 shift0=lsq1[0][0]
525 width0=lsq1[0][1]
526 p0=lsq1[0][3]
527 Amplitude1=0.
528 shift1=0.
529 width1=0.
530 p1=0.
531 noise=lsq1[0][4]
532 #return (numpy.array([shift0,width0,Amplitude0,p0]),
533 # numpy.array([shift1,width1,Amplitude1,p1]),noise,snrdB,chiSq1,6.,sigmas1,[None,]*9,choice)
534 # print ('stop 9')
535 ''' two Gaussians '''
536 #shift0=numpy.mod(firstpeak+minx,64); shift1=numpy.mod(secondpeak+minx,64)
537 shift0 = numpy.mod(firstpeak+minx, self.Num_Bin )
538 shift1 = numpy.mod(secondpeak+minx, self.Num_Bin )
539 width0 = powerwidth/6.
540 width1 = width0
541 power0 = 2.
542 power1 = power0
543 amplitude0 = firstamp
544 amplitude1 = secondamp
545 state0 = [shift0,width0,amplitude0,power0,shift1,width1,amplitude1,power1,wnoise]
546 #bnds=((0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
547 bnds=((0,self.Num_Bin-1),(1,powerwidth/2.),(0,None),(0.5,3.),(0,self.Num_Bin-1),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
548 #bnds=(( 0,(self.Num_Bin-1) ),(1,powerwidth/2.),(0,None),(0.5,3.),( 0,(self.Num_Bin-1)),(1,powerwidth/2.),(0,None),(0.5,3.),(0.1,0.5))
549
550 # print ('stop 10')
551 lsq2 = fmin_l_bfgs_b( self.misfit2 , state0 , args=(y_data,x,num_intg) , bounds=bnds , approx_grad=True )
552
553 # print ('stop 11')
554 chiSq2 = lsq2[1]
555
556 # print ('stop 12')
557
558 oneG = (chiSq1<5 and chiSq1/chiSq2<2.0) and (abs(lsq2[0][0]-lsq2[0][4])<(lsq2[0][1]+lsq2[0][5])/3. or abs(lsq2[0][0]-lsq2[0][4])<10)
559
560 # print ('stop 13')
561 if snrdB>-12: # when SNR is strong pick the peak with least shift (LOS velocity) error
562 if oneG:
563 choice = 0
564 else:
565 w1 = lsq2[0][1]; w2 = lsq2[0][5]
566 a1 = lsq2[0][2]; a2 = lsq2[0][6]
567 p1 = lsq2[0][3]; p2 = lsq2[0][7]
568 s1 = (2**(1+1./p1))*scipy.special.gamma(1./p1)/p1
569 s2 = (2**(1+1./p2))*scipy.special.gamma(1./p2)/p2
570 gp1 = a1*w1*s1; gp2 = a2*w2*s2 # power content of each ggaussian with proper p scaling
571
572 if gp1>gp2:
573 if a1>0.7*a2:
574 choice = 1
575 else:
576 choice = 2
577 elif gp2>gp1:
578 if a2>0.7*a1:
579 choice = 2
580 else:
581 choice = 1
582 else:
583 choice = numpy.argmax([a1,a2])+1
584 #else:
585 #choice=argmin([std2a,std2b])+1
586
587 else: # with low SNR go to the most energetic peak
588 choice = numpy.argmax([lsq1[0][2]*lsq1[0][1],lsq2[0][2]*lsq2[0][1],lsq2[0][6]*lsq2[0][5]])
589
590 # print ('stop 14')
591 shift0 = lsq2[0][0]
592 vel0 = Vrange[0] + shift0 * deltav
593 shift1 = lsq2[0][4]
594 # vel1=Vrange[0] + shift1 * deltav
595
596 # max_vel = 1.0
597 # Va = max(Vrange)
598 # deltav = Vrange[1]-Vrange[0]
599 # print ('stop 15')
600 #first peak will be 0, second peak will be 1
601 # if vel0 > -1.0 and vel0 < max_vel : #first peak is in the correct range # Commented by D.Scipión 19.03.2021
602 if vel0 > -Va and vel0 < Va : #first peak is in the correct range
603 shift0 = lsq2[0][0]
604 width0 = lsq2[0][1]
605 Amplitude0 = lsq2[0][2]
606 p0 = lsq2[0][3]
607
608 shift1 = lsq2[0][4]
609 width1 = lsq2[0][5]
610 Amplitude1 = lsq2[0][6]
611 p1 = lsq2[0][7]
612 noise = lsq2[0][8]
613 else:
614 shift1 = lsq2[0][0]
615 width1 = lsq2[0][1]
616 Amplitude1 = lsq2[0][2]
617 p1 = lsq2[0][3]
618
619 shift0 = lsq2[0][4]
620 width0 = lsq2[0][5]
621 Amplitude0 = lsq2[0][6]
622 p0 = lsq2[0][7]
623 noise = lsq2[0][8]
624
625 if Amplitude0<0.05: # in case the peak is noise
626 shift0,width0,Amplitude0,p0 = 4*[numpy.NaN]
627 if Amplitude1<0.05:
628 shift1,width1,Amplitude1,p1 = 4*[numpy.NaN]
629
630 # print ('stop 16 ')
631 # SPC_ch1[:,ht] = noise + Amplitude0*numpy.exp(-0.5*(abs(x-shift0)/width0)**p0)
632 # SPC_ch2[:,ht] = noise + Amplitude1*numpy.exp(-0.5*(abs(x-shift1)/width1)**p1)
633 # SPCparam = (SPC_ch1,SPC_ch2)
634
635 DGauFitParam[0,ht,0] = noise
636 DGauFitParam[0,ht,1] = noise
637 DGauFitParam[1,ht,0] = Amplitude0
638 DGauFitParam[1,ht,1] = Amplitude1
639 DGauFitParam[2,ht,0] = Vrange[0] + shift0 * deltav
640 DGauFitParam[2,ht,1] = Vrange[0] + shift1 * deltav
641 DGauFitParam[3,ht,0] = width0 * deltav
642 DGauFitParam[3,ht,1] = width1 * deltav
643 DGauFitParam[4,ht,0] = p0
644 DGauFitParam[4,ht,1] = p1
645
646 return DGauFitParam
647
648 def y_model1(self,x,state):
649 shift0, width0, amplitude0, power0, noise = state
650 model0 = amplitude0*numpy.exp(-0.5*abs((x - shift0)/width0)**power0)
651 model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0)
652 model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0)
653 return model0 + model0u + model0d + noise
654
655 def y_model2(self,x,state): #Equation for two generalized Gaussians with Nyquist
656 shift0, width0, amplitude0, power0, shift1, width1, amplitude1, power1, noise = state
657 model0 = amplitude0*numpy.exp(-0.5*abs((x-shift0)/width0)**power0)
658 model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0)
659 model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0)
660
661 model1 = amplitude1*numpy.exp(-0.5*abs((x - shift1)/width1)**power1)
662 model1u = amplitude1*numpy.exp(-0.5*abs((x - shift1 - self.Num_Bin)/width1)**power1)
663 model1d = amplitude1*numpy.exp(-0.5*abs((x - shift1 + self.Num_Bin)/width1)**power1)
664 return model0 + model0u + model0d + model1 + model1u + model1d + noise
665
666 def misfit1(self,state,y_data,x,num_intg): # This function compares how close real data is with the model data, the close it is, the better it is.
667
668 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model1(x,state)))**2)#/(64-5.) # /(64-5.) can be commented
669
670 def misfit2(self,state,y_data,x,num_intg):
671 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model2(x,state)))**2)#/(64-9.)
672
673 class Oblique_Gauss_Fit(Operation):
674 '''
675 Written by R. Flores
676 '''
677 def __init__(self):
678 Operation.__init__(self)
679
680 def Gauss_fit(self,spc,x,nGauss):
681
682
683 def gaussian(x, a, b, c, d):
684 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
685 return val
686
687 if nGauss == 'first':
688 spc_1_aux = numpy.copy(spc[:numpy.argmax(spc)+1])
689 spc_2_aux = numpy.flip(spc_1_aux)
690 spc_3_aux = numpy.concatenate((spc_1_aux,spc_2_aux[1:]))
691
692 len_dif = len(x)-len(spc_3_aux)
693
694 spc_zeros = numpy.ones(len_dif)*spc_1_aux[0]
695
696 spc_new = numpy.concatenate((spc_3_aux,spc_zeros))
697
698 y = spc_new
699
700 elif nGauss == 'second':
701 y = spc
702
703
704 # estimate starting values from the data
705 a = y.max()
706 b = x[numpy.argmax(y)]
707 if nGauss == 'first':
708 c = 1.#b#b#numpy.std(spc)
709 elif nGauss == 'second':
710 c = b
711 else:
712 print("ERROR")
713
714 d = numpy.mean(y[-100:])
715
716 # define a least squares function to optimize
717 def minfunc(params):
718 return sum((y-gaussian(x,params[0],params[1],params[2],params[3]))**2)
719
720 # fit
721 popt = fmin(minfunc,[a,b,c,d],disp=False)
722 #popt,fopt,niter,funcalls = fmin(minfunc,[a,b,c,d])
723
724
725 return gaussian(x, popt[0], popt[1], popt[2], popt[3]), popt[0], popt[1], popt[2], popt[3]
726
727 def Gauss_fit_2(self,spc,x,nGauss):
728
729
730 def gaussian(x, a, b, c, d):
731 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
732 return val
733
734 if nGauss == 'first':
735 spc_1_aux = numpy.copy(spc[:numpy.argmax(spc)+1])
736 spc_2_aux = numpy.flip(spc_1_aux)
737 spc_3_aux = numpy.concatenate((spc_1_aux,spc_2_aux[1:]))
738
739 len_dif = len(x)-len(spc_3_aux)
740
741 spc_zeros = numpy.ones(len_dif)*spc_1_aux[0]
742
743 spc_new = numpy.concatenate((spc_3_aux,spc_zeros))
744
745 y = spc_new
746
747 elif nGauss == 'second':
748 y = spc
749
750
751 # estimate starting values from the data
752 a = y.max()
753 b = x[numpy.argmax(y)]
754 if nGauss == 'first':
755 c = 1.#b#b#numpy.std(spc)
756 elif nGauss == 'second':
757 c = b
758 else:
759 print("ERROR")
760
761 d = numpy.mean(y[-100:])
762
763 # define a least squares function to optimize
764 popt,pcov = curve_fit(gaussian,x,y,p0=[a,b,c,d])
765 #popt,fopt,niter,funcalls = fmin(minfunc,[a,b,c,d])
766
767
768 #return gaussian(x, popt[0], popt[1], popt[2], popt[3]), popt[0], popt[1], popt[2], popt[3]
769 return gaussian(x, popt[0], popt[1], popt[2], popt[3]),popt[0], popt[1], popt[2], popt[3]
770
771 def Double_Gauss_fit(self,spc,x,A1,B1,C1,A2,B2,C2,D):
772
773 def double_gaussian(x, a1, b1, c1, a2, b2, c2, d):
774 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
775 return val
776
777
778 y = spc
779
780 # estimate starting values from the data
781 a1 = A1
782 b1 = B1
783 c1 = C1#numpy.std(spc)
784
785 a2 = A2#y.max()
786 b2 = B2#x[numpy.argmax(y)]
787 c2 = C2#numpy.std(spc)
788 d = D
789
790 # define a least squares function to optimize
791 def minfunc(params):
792 return sum((y-double_gaussian(x,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2)
793
794 # fit
795 popt = fmin(minfunc,[a1,b1,c1,a2,b2,c2,d],disp=False)
796
797 return double_gaussian(x, popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
798
799 def Double_Gauss_fit_2(self,spc,x,A1,B1,C1,A2,B2,C2,D):
800
801 def double_gaussian(x, a1, b1, c1, a2, b2, c2, d):
802 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
803 return val
804
805
806 y = spc
807
808 # estimate starting values from the data
809 a1 = A1
810 b1 = B1
811 c1 = C1#numpy.std(spc)
812
813 a2 = A2#y.max()
814 b2 = B2#x[numpy.argmax(y)]
815 c2 = C2#numpy.std(spc)
816 d = D
817
818 # fit
819
820 popt,pcov = curve_fit(double_gaussian,x,y,p0=[a1,b1,c1,a2,b2,c2,d])
821
822 error = numpy.sqrt(numpy.diag(pcov))
823
824 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6], error[0], error[1], error[2], error[3], error[4], error[5], error[6]
825
826 def windowing_double(self,spc,x,A1,B1,C1,A2,B2,C2,D):
827 from scipy.optimize import curve_fit,fmin
828
829 def R_gaussian(x, a, b, c):
830 N = int(numpy.shape(x)[0])
831 val = a * numpy.exp(-((x)*c*2*2*numpy.pi)**2 / (2))* numpy.exp(1.j*b*x*4*numpy.pi)
832 return val
833
834 def T(x,N):
835 T = 1-abs(x)/N
836 return T
837
838 def R_T_spc_fun(x, a1, b1, c1, a2, b2, c2, d):
839
840 N = int(numpy.shape(x)[0])
841
842 x_max = x[-1]
843
844 x_pos = x[1600:]
845 x_neg = x[:1600]
846
847 R_T_neg_1 = R_gaussian(x, a1, b1, c1)[:1600]*T(x_neg,-x[0])
848 R_T_pos_1 = R_gaussian(x, a1, b1, c1)[1600:]*T(x_pos,x[-1])
849 R_T_sum_1 = R_T_pos_1 + R_T_neg_1
850 R_T_spc_1 = numpy.fft.fft(R_T_sum_1).real
851 R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1)
852 max_val_1 = numpy.max(R_T_spc_1)
853 R_T_spc_1 = R_T_spc_1*a1/max_val_1
854
855 R_T_neg_2 = R_gaussian(x, a2, b2, c2)[:1600]*T(x_neg,-x[0])
856 R_T_pos_2 = R_gaussian(x, a2, b2, c2)[1600:]*T(x_pos,x[-1])
857 R_T_sum_2 = R_T_pos_2 + R_T_neg_2
858 R_T_spc_2 = numpy.fft.fft(R_T_sum_2).real
859 R_T_spc_2 = numpy.fft.fftshift(R_T_spc_2)
860 max_val_2 = numpy.max(R_T_spc_2)
861 R_T_spc_2 = R_T_spc_2*a2/max_val_2
862
863 R_T_d = d*numpy.fft.fftshift(signal.unit_impulse(N))
864 R_T_d_neg = R_T_d[:1600]*T(x_neg,-x[0])
865 R_T_d_pos = R_T_d[1600:]*T(x_pos,x[-1])
866 R_T_d_sum = R_T_d_pos + R_T_d_neg
867 R_T_spc_3 = numpy.fft.fft(R_T_d_sum).real
868 R_T_spc_3 = numpy.fft.fftshift(R_T_spc_3)
869
870 R_T_final = R_T_spc_1 + R_T_spc_2 + R_T_spc_3
871
872 return R_T_final
873
874 y = spc#gaussian(x, a, meanY, sigmaY) + a*0.1*numpy.random.normal(0, 1, size=len(x))
875
876 from scipy.stats import norm
877 mean,std=norm.fit(spc)
878
879 # estimate starting values from the data
880 a1 = A1
881 b1 = B1
882 c1 = C1#numpy.std(spc)
883
884 a2 = A2#y.max()
885 b2 = B2#x[numpy.argmax(y)]
886 c2 = C2#numpy.std(spc)
887 d = D
888
889 ippSeconds = 250*20*1.e-6/3
890
891 x_t = ippSeconds * (numpy.arange(1600) -1600 / 2.)
892
893 x_t = numpy.linspace(x_t[0],x_t[-1],3200)
894
895 x_freq = numpy.fft.fftfreq(1600,d=ippSeconds)
896 x_freq = numpy.fft.fftshift(x_freq)
897
898 # define a least squares function to optimize
899 def minfunc(params):
900 #print(params[2])
901 #print(numpy.shape(params[2]))
902 return sum((y-R_T_spc_fun(x_t,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2/1)#y**2)
903
904 # fit
905
906 popt_full = fmin(minfunc,[a1,b1,c1,a2,b2,c2,d],full_output=True)
907 #print("nIter", popt_full[2])
908 popt = popt_full[0]
909
910 #return R_T_spc_fun(x_t,popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
911 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
912
913 def Double_Gauss_fit_weight(self,spc,x,A1,B1,C1,A2,B2,C2,D):
914 from scipy.optimize import curve_fit,fmin
915
916 def double_gaussian(x, a1, b1, c1, a2, b2, c2, d):
917 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
918 return val
919
920 y = spc
921
922 from scipy.stats import norm
923 mean,std=norm.fit(spc)
924
925 # estimate starting values from the data
926 a1 = A1
927 b1 = B1
928 c1 = C1#numpy.std(spc)
929
930 a2 = A2#y.max()
931 b2 = B2#x[numpy.argmax(y)]
932 c2 = C2#numpy.std(spc)
933 d = D
934
935 y_clean = signal.medfilt(y)
936 # define a least squares function to optimize
937 def minfunc(params):
938 return sum((y-double_gaussian(x,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2/(y_clean**2/1))
939
940 # fit
941 popt_full = fmin(minfunc,[a1,b1,c1,a2,b2,c2,d], disp =False, full_output=True)
942 #print("nIter", popt_full[2])
943 popt = popt_full[0]
944 #popt,pcov = curve_fit(double_gaussian,x,y,p0=[a1,b1,c1,a2,b2,c2,d])
945
946 #return double_gaussian(x, popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
947 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
948
949 def DH_mode(self,spectra,VelRange):
950
951 from scipy.optimize import curve_fit
952
953 def double_gauss(x, a1,b1,c1, a2,b2,c2, d):
954 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
955 return val
956
957 spec = (spectra.copy()).flatten()
958 amp=spec.max()
959 params=numpy.array([amp,-400,30,amp/4,-200,150,1.0e7])
960 #try:
961 popt,pcov=curve_fit(double_gauss, VelRange, spec, p0=params,bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf]))
962
963 error = numpy.sqrt(numpy.diag(pcov))
964 #doppler_2=popt[4]
965 #err_2 = numpy.sqrt(pcov[4][4])
966
967 #except:
968 #pass
969 #doppler_2=numpy.NAN
970 #err_2 = numpy.NAN
971
972 #return doppler_2, err_2
973
974 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6], error[0], error[1], error[2], error[3], error[4], error[5], error[6]
975
976 def Tri_Marco(self,spc,freq,a1,b1,c1,a2,b2,c2,d):
977
978 from scipy.optimize import least_squares
979
980 freq_max = numpy.max(numpy.abs(freq))
981 spc_max = numpy.max(spc)
982
983 def tri_gaussian(x, a1, b1, c1, a2, b2, c2, a3, b3, c3, d):
984 z1 = (x-b1)/c1
985 z2 = (x-b2)/c2
986 z3 = (x-b3)/c3
987 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-z2**2/2) + a3 * numpy.exp(-z3**2/2) + d
988 return val
989
990 from scipy.signal import medfilt
991 Nincoh = 20
992 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
993 c1 = abs(c1)
994 c2 = abs(c2)
995
996 # define a least squares function to optimize
997 def lsq_func(params):
998 return (spc-tri_gaussian(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7],params[8],params[9]))/spcm
999
1000 # fit
1001 #bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf])
1002 bounds=([0,-numpy.inf,0,0,-numpy.inf,0,0,0,0,0],[numpy.inf,-100,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,600,numpy.inf,numpy.inf])
1003 #bounds=([0,-180,0,0,-100,30,0,110,0,0],[numpy.inf,-110,20,numpy.inf,33,80,numpy.inf,150,16,numpy.inf])
1004 #bounds=([0,-540,0,0,-300,100,0,330,0,0],[numpy.inf,-330,60,numpy.inf,100,240,numpy.inf,450,80,numpy.inf])
1005
1006 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,spc_max]
1007 #print(a1,b1,c1,a2,b2,c2,d)
1008 popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,a2/4,-b1,c1,d],x_scale=params_scale,bounds=bounds)
1009
1010 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1011 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]
1012 A3f = popt.x[6]; B3f = popt.x[7]; C3f = popt.x[8]
1013 Df = popt.x[9]
1014
1015 return A1f, B1f, C1f, A2f, B2f, C2f, Df
1016
1017 def Tri_Marco(self,spc,freq,a1,b1,c1,a2,b2,c2,d):
1018
1019 from scipy.optimize import least_squares
1020
1021 freq_max = numpy.max(numpy.abs(freq))
1022 spc_max = numpy.max(spc)
1023
1024 def duo_gaussian(x, a1, b1, c1, a2, b2, c2, d):
1025 z1 = (x-b1)/c1
1026 z2 = (x-b2)/c2
1027 #z3 = (x-b3)/c3
1028 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-z2**2/2) + d
1029 return val
1030
1031 from scipy.signal import medfilt
1032 Nincoh = 20
1033 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1034 c1 = abs(c1)
1035 c2 = abs(c2)
1036
1037 # define a least squares function to optimize
1038 def lsq_func(params):
1039 return (spc-tri_gaussian(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))/spcm
1040
1041 # fit
1042 #bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf])
1043 bounds=([0,-numpy.inf,0,0,-numpy.inf,0,0],[numpy.inf,-100,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf])
1044 #bounds=([0,-180,0,0,-100,30,0,110,0,0],[numpy.inf,-110,20,numpy.inf,33,80,numpy.inf,150,16,numpy.inf])
1045 #bounds=([0,-540,0,0,-300,100,0,330,0,0],[numpy.inf,-330,60,numpy.inf,100,240,numpy.inf,450,80,numpy.inf])
1046
1047 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,spc_max]
1048 #print(a1,b1,c1,a2,b2,c2,d)
1049 popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,d],x_scale=params_scale,bounds=bounds)
1050
1051 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1052 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]
1053 #A3f = popt.x[6]; B3f = popt.x[7]; C3f = popt.x[8]
1054 Df = popt.x[9]
1055
1056 return A1f, B1f, C1f, A2f, B2f, C2f, Df
1057
1058 def double_gaussian_skew(self,x, a1, b1, c1, a2, b2, c2, k2, d):
1059 #from scipy import special
1060 z1 = (x-b1)/c1
1061 z2 = (x-b2)/c2
1062 h2 = 1-k2*z2
1063 h2[h2<0] = 0
1064 y2 = -1/k2*numpy.log(h2)
1065 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-y2**2/2)/(1-k2*z2) + d
1066 return val
1067
1068 def gaussian(self, x, a, b, c, d):
1069 z = (x-b)/c
1070 val = a * numpy.exp(-z**2/2) + d
1071 return val
1072
1073 def double_gaussian(self, x, a1, b1, c1, a2, b2, c2, d):
1074 z1 = (x-b1)/c1
1075 z2 = (x-b2)/c2
1076 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-z2**2/2) + d
1077 return val
1078
1079 def double_gaussian_double_skew(self,x, a1, b1, c1, k1, a2, b2, c2, k2, d):
1080
1081 z1 = (x-b1)/c1
1082 h1 = 1-k1*z1
1083 h1[h1<0] = 0
1084 y1 = -1/k1*numpy.log(h1)
1085
1086 z2 = (x-b2)/c2
1087 h2 = 1-k2*z2
1088 h2[h2<0] = 0
1089 y2 = -1/k2*numpy.log(h2)
1090
1091 val = a1 * numpy.exp(-y1**2/2)/(1-k1*z1) + a2 * numpy.exp(-y2**2/2)/(1-k2*z2) + d
1092 return val
1093
1094 def gaussian_skew(self,x, a2, b2, c2, k2, d):
1095 #from scipy import special
1096 z2 = (x-b2)/c2
1097 h2 = 1-k2*z2
1098 h2[h2<0] = 0
1099 y2 = -1/k2*numpy.log(h2)
1100 val = a2 * numpy.exp(-y2**2/2)/(1-k2*z2) + d
1101 return val
1102
1103 def triple_gaussian_skew(self,x, a1, b1, c1, a2, b2, c2, k2, a3, b3, c3, k3, d):
1104 #from scipy import special
1105 z1 = (x-b1)/c1
1106 z2 = (x-b2)/c2
1107 z3 = (x-b3)/c3
1108 h2 = 1-k2*z2
1109 h2[h2<0] = 0
1110 y2 = -1/k2*numpy.log(h2)
1111 h3 = 1-k3*z3
1112 h3[h3<0] = 0
1113 y3 = -1/k3*numpy.log(h3)
1114 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-y2**2/2)/(1-k2*z2) + a3 * numpy.exp(-y3**2/2)/(1-k3*z3) + d
1115 return val
1116
1117 def Double_Gauss_Skew_fit_weight_bound_no_inputs(self,spc,freq):
1118
1119 from scipy.optimize import least_squares
1120
1121 freq_max = numpy.max(numpy.abs(freq))
1122 spc_max = numpy.max(spc)
1123
1124 from scipy.signal import medfilt
1125 Nincoh = 20
1126 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1127
1128 # define a least squares function to optimize
1129 def lsq_func(params):
1130 return (spc-self.double_gaussian_skew(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7]))/spcm
1131
1132 # fit
1133 # bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf])
1134 # bounds=([0,-numpy.inf,0,0,-numpy.inf,0,-numpy.inf,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,0,numpy.inf])
1135 #print(a1,b1,c1,a2,b2,c2,k2,d)
1136 bounds=([0,-numpy.inf,0,0,-400,0,0,0],[numpy.inf,-340,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1137 #print(bounds)
1138 #bounds=([0,-numpy.inf,0,0,-numpy.inf,0,0,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1139 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,1,spc_max]
1140 x0_value = numpy.array([spc_max,-400,30,spc_max/4,-200,150,1,1.0e7])
1141 #popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=1)
1142 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1143 # popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x_scale=params_scale,verbose=1)
1144
1145 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1146 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]; K2f = popt.x[6]
1147 Df = popt.x[7]
1148
1149 aux = self.gaussian_skew(freq, A2f, B2f, C2f, K2f, Df)
1150 doppler = freq[numpy.argmax(aux)]
1151
1152 #return A1f, B1f, C1f, A2f, B2f, C2f, K2f, Df, doppler
1153 return A1f, B1f, C1f, A2f, B2f, C2f, K2f, Df, doppler
1154
1155 def Double_Gauss_Double_Skew_fit_weight_bound_no_inputs(self,spc,freq,Nincoh,hei):
1156
1157 from scipy.optimize import least_squares
1158
1159 freq_max = numpy.max(numpy.abs(freq))
1160 spc_max = numpy.max(spc)
1161
1162 #from scipy.signal import medfilt
1163 #Nincoh = 20
1164 #Nincoh = 80
1165 Nincoh = Nincoh
1166 #spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1167 spcm = spc/numpy.sqrt(Nincoh)
1168
1169 # define a least squares function to optimize
1170 def lsq_func(params):
1171 return (spc-self.double_gaussian_double_skew(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7],params[8]))/spcm
1172
1173 # fit
1174 # bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf])
1175 # bounds=([0,-numpy.inf,0,0,-numpy.inf,0,-numpy.inf,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,0,numpy.inf])
1176 #print(a1,b1,c1,a2,b2,c2,k2,d)
1177 #bounds=([0,-numpy.inf,0,-numpy.inf,0,-400,0,0,0],[numpy.inf,-340,numpy.inf,0,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1178 #bounds=([0,-numpy.inf,0,-numpy.inf,0,-400,0,0,0],[numpy.inf,-140,numpy.inf,0,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1179 bounds=([0,-numpy.inf,0,-5,0,-400,0,0,0],[numpy.inf,-200,numpy.inf,5,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1180
1181 #print(bounds)
1182 #bounds=([0,-numpy.inf,0,0,-numpy.inf,0,0,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1183 params_scale = [spc_max,freq_max,freq_max,1,spc_max,freq_max,freq_max,1,spc_max]
1184 ####################x0_value = numpy.array([spc_max,-400,30,-.1,spc_max/4,-200,150,1,1.0e7])
1185
1186 dop1_x0 = freq[numpy.argmax(spc)]
1187 ####dop1_x0 = freq[numpy.argmax(spcm)]
1188 if dop1_x0 < 0:
1189 dop2_x0 = dop1_x0 + 100
1190 if dop1_x0 > 0:
1191 dop2_x0 = dop1_x0 - 100
1192
1193 ###########x0_value = numpy.array([spc_max,-200.5,30,-.1,spc_max/4,-100.5,150,1,1.0e7])
1194 x0_value = numpy.array([spc_max,dop1_x0,30,-.1,spc_max/4, dop2_x0,150,1,1.0e7])
1195 #x0_value = numpy.array([spc_max,-400.5,30,-.1,spc_max/4,-200.5,150,1,1.0e7])
1196 #popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=1)
1197 '''
1198 print("INSIDE 1")
1199 print("x0_value: ", x0_value)
1200 print("boundaries: ", bounds)
1201 import matplotlib.pyplot as plt
1202 plt.plot(freq,spc)
1203 plt.plot(freq,self.double_gaussian_double_skew(freq,x0_value[0],x0_value[1],x0_value[2],x0_value[3],x0_value[4],x0_value[5],x0_value[6],x0_value[7],x0_value[8]))
1204 plt.title(hei)
1205 plt.show()
1206 '''
1207 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1208 # popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x_scale=params_scale,verbose=1)
1209 #print(popt)
1210 #########print("INSIDE 2")
1211 J = popt.jac
1212
1213 try:
1214 cov = numpy.linalg.inv(J.T.dot(J))
1215 error = numpy.sqrt(numpy.diagonal(cov))
1216 except:
1217 error = numpy.ones((9))*numpy.NAN
1218 #print("error_inside",error)
1219 #exit(1)
1220
1221 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]; K1f = popt.x[3]
1222 A2f = popt.x[4]; B2f = popt.x[5]; C2f = popt.x[6]; K2f = popt.x[7]
1223 Df = popt.x[8]
1224 '''
1225 A1f_err = error.x[0]; B1f_err= error.x[1]; C1f_err = error.x[2]; K1f_err = error.x[3]
1226 A2f_err = error.x[4]; B2f_err = error.x[5]; C2f_err = error.x[6]; K2f_err = error.x[7]
1227 Df_err = error.x[8]
1228 '''
1229 aux1 = self.gaussian_skew(freq, A1f, B1f, C1f, K1f, Df)
1230 doppler1 = freq[numpy.argmax(aux1)]
1231
1232 aux2 = self.gaussian_skew(freq, A2f, B2f, C2f, K2f, Df)
1233 doppler2 = freq[numpy.argmax(aux2)]
1234 #print("error",error)
1235 #exit(1)
1236
1237
1238 return A1f, B1f, C1f, K1f, A2f, B2f, C2f, K2f, Df, doppler1, doppler2, error
1239
1240 def Double_Gauss_fit_weight_bound_no_inputs(self,spc,freq,Nincoh):
1241
1242 from scipy.optimize import least_squares
1243
1244 freq_max = numpy.max(numpy.abs(freq))
1245 spc_max = numpy.max(spc)
1246
1247 from scipy.signal import medfilt
1248 Nincoh = 20
1249 Nincoh = 80
1250 Nincoh = Nincoh
1251 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1252
1253 # define a least squares function to optimize
1254 def lsq_func(params):
1255 return (spc-self.double_gaussian(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))/spcm
1256
1257 # fit
1258 # bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf])
1259 # bounds=([0,-numpy.inf,0,0,-numpy.inf,0,-numpy.inf,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,0,numpy.inf])
1260 #print(a1,b1,c1,a2,b2,c2,k2,d)
1261
1262 dop1_x0 = freq[numpy.argmax(spcm)]
1263
1264 #####bounds=([0,-numpy.inf,0,0,-400,0,0],[numpy.inf,-340,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf])
1265 #####bounds=([0,-numpy.inf,0,0,dop1_x0-50,0,0],[numpy.inf,-340,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf])
1266 bounds=([0,-numpy.inf,0,0,dop1_x0-50,0,0],[numpy.inf,-300,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf])
1267 #####bounds=([0,-numpy.inf,0,0,-500,0,0],[numpy.inf,-340,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf])
1268 #bounds=([0,-numpy.inf,0,-numpy.inf,0,-500,0,0,0],[numpy.inf,-240,numpy.inf,0,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1269 #print(bounds)
1270 #bounds=([0,-numpy.inf,0,0,-numpy.inf,0,0,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1271 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,spc_max]
1272 #x0_value = numpy.array([spc_max,-400.5,30,spc_max/4,-200.5,150,1.0e7])
1273 x0_value = numpy.array([spc_max,-400.5,30,spc_max/4,dop1_x0,150,1.0e7])
1274 #x0_value = numpy.array([spc_max,-420.5,30,-.1,spc_max/4,-50,150,.1,numpy.mean(spc[-50:])])
1275 #print("before popt")
1276 #print(x0_value)
1277 #print("freq: ",freq)
1278 #popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=1)
1279 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1280 # popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x_scale=params_scale,verbose=1)
1281 #print("after popt")
1282 J = popt.jac
1283
1284 try:
1285 cov = numpy.linalg.inv(J.T.dot(J))
1286 error = numpy.sqrt(numpy.diagonal(cov))
1287 except:
1288 error = numpy.ones((7))*numpy.NAN
1289
1290 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1291 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]
1292 Df = popt.x[6]
1293 #print("before return")
1294 return A1f, B1f, C1f, A2f, B2f, C2f, Df, error
1295
1296 def Double_Gauss_Double_Skew_fit_weight_bound_with_inputs(self, spc, freq, a1, b1, c1, a2, b2, c2, k2, d):
1297
1298 from scipy.optimize import least_squares
1299
1300 freq_max = numpy.max(numpy.abs(freq))
1301 spc_max = numpy.max(spc)
1302
1303 from scipy.signal import medfilt
1304 Nincoh = dataOut.nIncohInt
1305 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1306
1307 # define a least squares function to optimize
1308 def lsq_func(params):
1309 return (spc-self.double_gaussian_double_skew(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7],params[8]))/spcm
1310
1311
1312 bounds=([0,-numpy.inf,0,-numpy.inf,0,-400,0,0,0],[numpy.inf,-340,numpy.inf,0,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1313
1314 params_scale = [spc_max,freq_max,freq_max,1,spc_max,freq_max,freq_max,1,spc_max]
1315
1316 x0_value = numpy.array([a1,b1,c1,-.1,a2,b2,c2,k2,d])
1317
1318 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1319
1320 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]; K1f = popt.x[3]
1321 A2f = popt.x[4]; B2f = popt.x[5]; C2f = popt.x[6]; K2f = popt.x[7]
1322 Df = popt.x[8]
1323
1324 aux = self.gaussian_skew(freq, A2f, B2f, C2f, K2f, Df)
1325 doppler = x[numpy.argmax(aux)]
1326
1327 return A1f, B1f, C1f, K1f, A2f, B2f, C2f, K2f, Df, doppler
1328
1329 def Triple_Gauss_Skew_fit_weight_bound_no_inputs(self,spc,freq):
1330
1331 from scipy.optimize import least_squares
1332
1333 freq_max = numpy.max(numpy.abs(freq))
1334 spc_max = numpy.max(spc)
1335
1336 from scipy.signal import medfilt
1337 Nincoh = 20
1338 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1339
1340 # define a least squares function to optimize
1341 def lsq_func(params):
1342 return (spc-self.triple_gaussian_skew(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7],params[8],params[9],params[10],params[11]))/spcm
1343
1344 # fit
1345 # bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf])
1346 # bounds=([0,-numpy.inf,0,0,-numpy.inf,0,-numpy.inf,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,0,numpy.inf])
1347 #print(a1,b1,c1,a2,b2,c2,k2,d)
1348 bounds=([0,-numpy.inf,0,0,-400,0,0,0,0,0,0,0],[numpy.inf,-340,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf,numpy.inf,numpy.inf,numpy.inf,numpy.inf])
1349 #print(bounds)
1350 #bounds=([0,-numpy.inf,0,0,-numpy.inf,0,0,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1351 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,1,spc_max,freq_max,freq_max,1,spc_max]
1352 x0_value = numpy.array([spc_max,-400,30,spc_max/4,-200,150,1,spc_max/4,400,150,1,1.0e7])
1353 #popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=1)
1354 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1355 # popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x_scale=params_scale,verbose=1)
1356
1357 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1358 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]; K2f = popt.x[6]
1359 A3f = popt.x[7]; B3f = popt.x[8]; C3f = popt.x[9]; K3f = popt.x[10]
1360 Df = popt.x[11]
1361
1362 aux = self.gaussian_skew(freq, A2f, B2f, C2f, K2f, Df)
1363 doppler = freq[numpy.argmax(aux)]
1364
1365 return A1f, B1f, C1f, A2f, B2f, C2f, K2f, A3f, B3f, C3f, K3f, Df, doppler
1366
1367 def CEEJ_Skew_fit_weight_bound_no_inputs(self,spc,freq,Nincoh):
1368
1369 from scipy.optimize import least_squares
1370
1371 freq_max = numpy.max(numpy.abs(freq))
1372 spc_max = numpy.max(spc)
1373
1374 from scipy.signal import medfilt
1375 Nincoh = 20
1376 Nincoh = 80
1377 Nincoh = Nincoh
1378 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1379
1380 # define a least squares function to optimize
1381 def lsq_func(params):
1382 return (spc-self.gaussian_skew(freq,params[0],params[1],params[2],params[3],params[4]))#/spcm
1383
1384
1385 bounds=([0,0,0,-numpy.inf,0],[numpy.inf,numpy.inf,numpy.inf,0,numpy.inf])
1386
1387 params_scale = [spc_max,freq_max,freq_max,1,spc_max]
1388
1389 x0_value = numpy.array([spc_max,freq[numpy.argmax(spc)],30,-.1,numpy.mean(spc[:50])])
1390
1391 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1392
1393 J = popt.jac
1394
1395 try:
1396 error = numpy.ones((9))*numpy.NAN
1397 cov = numpy.linalg.inv(J.T.dot(J))
1398 error[:4] = numpy.sqrt(numpy.diagonal(cov))[:4]
1399 error[-1] = numpy.sqrt(numpy.diagonal(cov))[-1]
1400 except:
1401 error = numpy.ones((9))*numpy.NAN
1402
1403 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]; K1f = popt.x[3]
1404 Df = popt.x[4]
1405
1406 aux1 = self.gaussian_skew(freq, A1f, B1f, C1f, K1f, Df)
1407 doppler1 = freq[numpy.argmax(aux1)]
1408 #print("CEEJ ERROR:",error)
1409
1410 return A1f, B1f, C1f, K1f, numpy.NAN, numpy.NAN, numpy.NAN, numpy.NAN, Df, doppler1, numpy.NAN, error
1411
1412 def CEEJ_fit_weight_bound_no_inputs(self,spc,freq,Nincoh):
1413
1414 from scipy.optimize import least_squares
1415
1416 freq_max = numpy.max(numpy.abs(freq))
1417 spc_max = numpy.max(spc)
1418
1419 from scipy.signal import medfilt
1420 Nincoh = 20
1421 Nincoh = 80
1422 Nincoh = Nincoh
1423 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1424
1425 # define a least squares function to optimize
1426 def lsq_func(params):
1427 return (spc-self.gaussian(freq,params[0],params[1],params[2],params[3]))#/spcm
1428
1429
1430 bounds=([0,0,0,0],[numpy.inf,numpy.inf,numpy.inf,numpy.inf])
1431
1432 params_scale = [spc_max,freq_max,freq_max,spc_max]
1433
1434 x0_value = numpy.array([spc_max,freq[numpy.argmax(spcm)],30,numpy.mean(spc[:50])])
1435
1436 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1437
1438 J = popt.jac
1439
1440 try:
1441 error = numpy.ones((4))*numpy.NAN
1442 cov = numpy.linalg.inv(J.T.dot(J))
1443 error = numpy.sqrt(numpy.diagonal(cov))
1444 except:
1445 error = numpy.ones((4))*numpy.NAN
1446
1447 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1448 Df = popt.x[3]
1449
1450 return A1f, B1f, C1f, Df, error
1451
1452 def Simple_fit_bound(self,spc,freq,Nincoh):
1453
1454 freq_max = numpy.max(numpy.abs(freq))
1455 spc_max = numpy.max(spc)
1456
1457 Nincoh = Nincoh
1458
1459 def lsq_func(params):
1460 return (spc-self.gaussian(freq,params[0],params[1],params[2],params[3]))
1461
1462 bounds=([0,-50,0,0],[numpy.inf,+50,numpy.inf,numpy.inf])
1463
1464 params_scale = [spc_max,freq_max,freq_max,spc_max]
1465
1466 x0_value = numpy.array([spc_max,-20.5,5,1.0e7])
1467
1468 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1469
1470 J = popt.jac
1471
1472 try:
1473 cov = numpy.linalg.inv(J.T.dot(J))
1474 error = numpy.sqrt(numpy.diagonal(cov))
1475 except:
1476 error = numpy.ones((4))*numpy.NAN
1477
1478 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1479 Df = popt.x[3]
1480
1481 return A1f, B1f, C1f, Df, error
1482
1483 def clean_outliers(self,param):
1484
1485 threshold = 700
1486
1487 param = numpy.where(param < -threshold, numpy.nan, param)
1488 param = numpy.where(param > +threshold, numpy.nan, param)
1489
1490 return param
1491
1492 def windowing_single(self,spc,x,A,B,C,D,nFFTPoints):
1493 from scipy.optimize import curve_fit,fmin
1494
1495 def R_gaussian(x, a, b, c):
1496 N = int(numpy.shape(x)[0])
1497 val = a * numpy.exp(-((x)*c*2*2*numpy.pi)**2 / (2))* numpy.exp(1.j*b*x*4*numpy.pi)
1498 return val
1499
1500 def T(x,N):
1501 T = 1-abs(x)/N
1502 return T
1503
1504 def R_T_spc_fun(x, a, b, c, d, nFFTPoints):
1505
1506 N = int(numpy.shape(x)[0])
1507
1508 x_max = x[-1]
1509
1510 x_pos = x[int(nFFTPoints/2):]
1511 x_neg = x[:int(nFFTPoints/2)]
1512
1513 R_T_neg_1 = R_gaussian(x, a, b, c)[:int(nFFTPoints/2)]*T(x_neg,-x[0])
1514 R_T_pos_1 = R_gaussian(x, a, b, c)[int(nFFTPoints/2):]*T(x_pos,x[-1])
1515 R_T_sum_1 = R_T_pos_1 + R_T_neg_1
1516 R_T_spc_1 = numpy.fft.fft(R_T_sum_1).real
1517 R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1)
1518 max_val_1 = numpy.max(R_T_spc_1)
1519 R_T_spc_1 = R_T_spc_1*a/max_val_1
1520
1521 R_T_d = d*numpy.fft.fftshift(signal.unit_impulse(N))
1522 R_T_d_neg = R_T_d[:int(nFFTPoints/2)]*T(x_neg,-x[0])
1523 R_T_d_pos = R_T_d[int(nFFTPoints/2):]*T(x_pos,x[-1])
1524 R_T_d_sum = R_T_d_pos + R_T_d_neg
1525 R_T_spc_3 = numpy.fft.fft(R_T_d_sum).real
1526 R_T_spc_3 = numpy.fft.fftshift(R_T_spc_3)
1527
1528 R_T_final = R_T_spc_1 + R_T_spc_3
1529
1530 return R_T_final
1531
1532 y = spc#gaussian(x, a, meanY, sigmaY) + a*0.1*numpy.random.normal(0, 1, size=len(x))
1533
1534 from scipy.stats import norm
1535 mean,std=norm.fit(spc)
1536
1537 # estimate starting values from the data
1538 a = A
1539 b = B
1540 c = C#numpy.std(spc)
1541 d = D
1542 '''
1543 ippSeconds = 250*20*1.e-6/3
1544
1545 x_t = ippSeconds * (numpy.arange(1600) -1600 / 2.)
1546
1547 x_t = numpy.linspace(x_t[0],x_t[-1],3200)
1548
1549 x_freq = numpy.fft.fftfreq(1600,d=ippSeconds)
1550 x_freq = numpy.fft.fftshift(x_freq)
1551 '''
1552 # define a least squares function to optimize
1553 def minfunc(params):
1554 return sum((y-R_T_spc_fun(x,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2/1)#y**2)
1555
1556 # fit
1557
1558 popt_full = fmin(minfunc,[a,b,c,d],full_output=True)
1559 #print("nIter", popt_full[2])
1560 popt = popt_full[0]
1561
1562 #return R_T_spc_fun(x_t,popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
1563 return popt[0], popt[1], popt[2], popt[3]
1564
1565 def run(self, dataOut, mode = 0, Hmin1 = None, Hmax1 = None, Hmin2 = None, Hmax2 = None, Dop = 'Shift'):
1566
1567 pwcode = 1
1568
1569 if dataOut.flagDecodeData:
1570 pwcode = numpy.sum(dataOut.code[0]**2)
1571 #normFactor = min(self.nFFTPoints,self.nProfiles)*self.nIncohInt*self.nCohInt*pwcode*self.windowOfFilter
1572 normFactor = dataOut.nProfiles * dataOut.nIncohInt * dataOut.nCohInt * pwcode * dataOut.windowOfFilter
1573 factor = normFactor
1574 z = dataOut.data_spc / factor
1575 z = numpy.where(numpy.isfinite(z), z, numpy.NAN)
1576 dataOut.power = numpy.average(z, axis=1)
1577 dataOut.powerdB = 10 * numpy.log10(dataOut.power)
1578
1579 x = dataOut.getVelRange(0)
1580
1581 dataOut.Oblique_params = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
1582 dataOut.Oblique_param_errors = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
1583 dataOut.dplr_2_u = numpy.ones((1,1,dataOut.nHeights))*numpy.NAN
1584
1585 if mode == 6:
1586 dataOut.Oblique_params = numpy.ones((1,9,dataOut.nHeights))*numpy.NAN
1587 elif mode == 7:
1588 dataOut.Oblique_params = numpy.ones((1,13,dataOut.nHeights))*numpy.NAN
1589 elif mode == 8:
1590 dataOut.Oblique_params = numpy.ones((1,10,dataOut.nHeights))*numpy.NAN
1591 elif mode == 9:
1592 dataOut.Oblique_params = numpy.ones((1,11,dataOut.nHeights))*numpy.NAN
1593 dataOut.Oblique_param_errors = numpy.ones((1,9,dataOut.nHeights))*numpy.NAN
1594 elif mode == 11:
1595 dataOut.Oblique_params = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
1596 dataOut.Oblique_param_errors = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
1597 elif mode == 10: #150 km
1598 dataOut.Oblique_params = numpy.ones((1,4,dataOut.nHeights))*numpy.NAN
1599 dataOut.Oblique_param_errors = numpy.ones((1,4,dataOut.nHeights))*numpy.NAN
1600 dataOut.snr_log10 = numpy.ones((1,dataOut.nHeights))*numpy.NAN
1601
1602 dataOut.VelRange = x
1603
1604
1605
1606 #l1=range(22,36) #+62
1607 #l1=range(32,36)
1608 #l2=range(58,99) #+62
1609
1610 #if Hmin1 == None or Hmax1 == None or Hmin2 == None or Hmax2 == None:
1611
1612 minHei1 = 105.
1613 maxHei1 = 122.5
1614 maxHei1 = 130.5
1615
1616 if mode == 10: #150 km
1617 minHei1 = 100
1618 maxHei1 = 100
1619
1620 inda1 = numpy.where(dataOut.heightList >= minHei1)
1621 indb1 = numpy.where(dataOut.heightList <= maxHei1)
1622
1623 minIndex1 = inda1[0][0]
1624 maxIndex1 = indb1[0][-1]
1625
1626 minHei2 = 150.
1627 maxHei2 = 201.25
1628 maxHei2 = 225.3
1629
1630 if mode == 10: #150 km
1631 minHei2 = 110
1632 maxHei2 = 165
1633
1634 inda2 = numpy.where(dataOut.heightList >= minHei2)
1635 indb2 = numpy.where(dataOut.heightList <= maxHei2)
1636
1637 minIndex2 = inda2[0][0]
1638 maxIndex2 = indb2[0][-1]
1639
1640 l1=range(minIndex1,maxIndex1)
1641 l2=range(minIndex2,maxIndex2)
1642
1643 if mode == 4:
1644 '''
1645 for ind in range(dataOut.nHeights):
1646 if(dataOut.heightList[ind]>=168 and dataOut.heightList[ind]<188):
1647 try:
1648 dataOut.Oblique_params[0,0,ind],dataOut.Oblique_params[0,1,ind],dataOut.Oblique_params[0,2,ind],dataOut.Oblique_params[0,3,ind],dataOut.Oblique_params[0,4,ind],dataOut.Oblique_params[0,5,ind],dataOut.Oblique_params[0,6,ind],dataOut.Oblique_param_errors[0,0,ind],dataOut.Oblique_param_errors[0,1,ind],dataOut.Oblique_param_errors[0,2,ind],dataOut.Oblique_param_errors[0,3,ind],dataOut.Oblique_param_errors[0,4,ind],dataOut.Oblique_param_errors[0,5,ind],dataOut.Oblique_param_errors[0,6,ind] = self.DH_mode(dataOut.data_spc[0,:,ind],dataOut.VelRange)
1649 except:
1650 pass
1651 '''
1652 for ind in itertools.chain(l1, l2):
1653
1654 try:
1655 dataOut.Oblique_params[0,0,ind],dataOut.Oblique_params[0,1,ind],dataOut.Oblique_params[0,2,ind],dataOut.Oblique_params[0,3,ind],dataOut.Oblique_params[0,4,ind],dataOut.Oblique_params[0,5,ind],dataOut.Oblique_params[0,6,ind],dataOut.Oblique_param_errors[0,0,ind],dataOut.Oblique_param_errors[0,1,ind],dataOut.Oblique_param_errors[0,2,ind],dataOut.Oblique_param_errors[0,3,ind],dataOut.Oblique_param_errors[0,4,ind],dataOut.Oblique_param_errors[0,5,ind],dataOut.Oblique_param_errors[0,6,ind] = self.DH_mode(dataOut.data_spc[0,:,ind],dataOut.VelRange)
1656 dataOut.dplr_2_u[0,0,ind] = dataOut.Oblique_params[0,4,ind]/numpy.sin(numpy.arccos(102/dataOut.heightList[ind]))
1657 except:
1658 pass
1659
1660 else:
1661 #print("After: ", dataOut.data_snr[0])
1662 #######import matplotlib.pyplot as plt
1663 #######plt.plot(dataOut.data_snr[0],dataOut.heightList,marker='*',linestyle='--')
1664 #######plt.show()
1665 #print("l1: ", dataOut.heightList[l1])
1666 #print("l2: ", dataOut.heightList[l2])
1667 for hei in itertools.chain(l1, l2):
1668 #for hei in range(79,81):
1669 #if numpy.isnan(dataOut.data_snr[0,hei]) or numpy.isnan(numpy.log10(dataOut.data_snr[0,hei])):
1670 if numpy.isnan(dataOut.snl[0,hei]) or dataOut.snl[0,hei]<.0:
1671
1672 continue #Avoids the analysis when there is only noise
1673
1674 try:
1675 spc = dataOut.data_spc[0,:,hei]
1676
1677 if mode == 6: #Skew Weighted Bounded
1678 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei] = self.Double_Gauss_Skew_fit_weight_bound_no_inputs(spc,x)
1679 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,8,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1680
1681 elif mode == 7: #Triple Skew Weighted Bounded
1682 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei],dataOut.Oblique_params[0,10,hei],dataOut.Oblique_params[0,11,hei],dataOut.Oblique_params[0,12,hei] = self.Triple_Gauss_Skew_fit_weight_bound_no_inputs(spc,x)
1683 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,12,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1684
1685 elif mode == 8: #Double Skewed Weighted Bounded with inputs
1686 a1, b1, c1, a2, b2, c2, k2, d, dopp = self.Double_Gauss_Skew_fit_weight_bound_no_inputs(spc,x)
1687 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei] = self.Double_Gauss_Skew_fit_weight_bound_no_inputs(spc,x, a1, b1, c1, a2, b2, c2, k2, d)
1688 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,9,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1689
1690 elif mode == 9: #Double Skewed Weighted Bounded no inputs
1691 #if numpy.max(spc) <= 0:
1692 from scipy.signal import medfilt
1693 spcm = medfilt(spc,11)
1694 if x[numpy.argmax(spcm)] <= 0:
1695 #print("EEJ", dataOut.heightList[hei], hei)
1696 #if hei != 70:
1697 #continue
1698 #else:
1699 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei],dataOut.Oblique_params[0,10,hei],dataOut.Oblique_param_errors[0,:,hei] = self.Double_Gauss_Double_Skew_fit_weight_bound_no_inputs(spcm,x,dataOut.nIncohInt,dataOut.heightList[hei])
1700 #if dataOut.Oblique_params[0,-2,hei] < -500 or dataOut.Oblique_params[0,-2,hei] > 500 or dataOut.Oblique_params[0,-1,hei] < -500 or dataOut.Oblique_params[0,-1,hei] > 500:
1701 # dataOut.Oblique_params[0,:,hei] *= numpy.NAN
1702 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,10,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1703
1704 else:
1705 #print("CEEJ")
1706 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei],dataOut.Oblique_params[0,10,hei],dataOut.Oblique_param_errors[0,:,hei] = self.CEEJ_Skew_fit_weight_bound_no_inputs(spcm,x,dataOut.nIncohInt)
1707 #if dataOut.Oblique_params[0,-2,hei] < -500 or dataOut.Oblique_params[0,-2,hei] > 500 or dataOut.Oblique_params[0,-1,hei] < -500 or dataOut.Oblique_params[0,-1,hei] > 500:
1708 # dataOut.Oblique_params[0,:,hei] *= numpy.NAN
1709 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,10,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1710 elif mode == 11: #Double Weighted Bounded no inputs
1711 #if numpy.max(spc) <= 0:
1712 from scipy.signal import medfilt
1713 spcm = medfilt(spc,11)
1714
1715 if x[numpy.argmax(spcm)] <= 0:
1716 #print("EEJ")
1717 #print("EEJ",dataOut.heightList[hei])
1718 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_param_errors[0,:,hei] = self.Double_Gauss_fit_weight_bound_no_inputs(spc,x,dataOut.nIncohInt)
1719 #if dataOut.Oblique_params[0,-2,hei] < -500 or dataOut.Oblique_params[0,-2,hei] > 500 or dataOut.Oblique_params[0,-1,hei] < -500 or dataOut.Oblique_params[0,-1,hei] > 500:
1720 # dataOut.Oblique_params[0,:,hei] *= numpy.NAN
1721 else:
1722 #print("CEEJ",dataOut.heightList[hei])
1723 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_param_errors[0,:,hei] = self.CEEJ_fit_weight_bound_no_inputs(spc,x,dataOut.nIncohInt)
1724
1725 elif mode == 10: #150km
1726 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_param_errors[0,:,hei] = self.Simple_fit_bound(spc,x,dataOut.nIncohInt)
1727 snr = (dataOut.power[0,hei]*factor - dataOut.Oblique_params[0,3,hei])/dataOut.Oblique_params[0,3,hei]
1728 dataOut.snr_log10[0,hei] = numpy.log10(snr)
1729
1730 else:
1731 spc_fit, A1, B1, C1, D1 = self.Gauss_fit_2(spc,x,'first')
1732
1733 spc_diff = spc - spc_fit
1734 spc_diff[spc_diff < 0] = 0
1735
1736 spc_fit_diff, A2, B2, C2, D2 = self.Gauss_fit_2(spc_diff,x,'second')
1737
1738 D = (D1+D2)
1739
1740 if mode == 0: #Double Fit
1741 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_param_errors[0,0,hei],dataOut.Oblique_param_errors[0,1,hei],dataOut.Oblique_param_errors[0,2,hei],dataOut.Oblique_param_errors[0,3,hei],dataOut.Oblique_param_errors[0,4,hei],dataOut.Oblique_param_errors[0,5,hei],dataOut.Oblique_param_errors[0,6,hei] = self.Double_Gauss_fit_2(spc,x,A1,B1,C1,A2,B2,C2,D)
1742 #spc_double_fit,dataOut.Oblique_params = self.Double_Gauss_fit(spc,x,A1,B1,C1,A2,B2,C2,D)
1743
1744 elif mode == 1: #Double Fit Windowed
1745 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei] = self.windowing_double(spc,dataOut.getFreqRange(0),A1,B1,C1,A2,B2,C2,D)
1746
1747 elif mode == 2: #Double Fit Weight
1748 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei] = self.Double_Gauss_fit_weight(spc,x,A1,B1,C1,A2,B2,C2,D)
1749
1750 elif mode == 3: #Simple Fit
1751 dataOut.Oblique_params[0,0,hei] = A1
1752 dataOut.Oblique_params[0,1,hei] = B1
1753 dataOut.Oblique_params[0,2,hei] = C1
1754 dataOut.Oblique_params[0,3,hei] = A2
1755 dataOut.Oblique_params[0,4,hei] = B2
1756 dataOut.Oblique_params[0,5,hei] = C2
1757 dataOut.Oblique_params[0,6,hei] = D
1758
1759 elif mode == 5: #Triple Fit Weight
1760 if hei in l1:
1761 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei] = self.duo_Marco(spc,x,A1,B1,C1,A2,B2,C2,D)
1762 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,4,hei]/numpy.sin(numpy.arccos(102/dataOut.heightList[hei]))
1763 #print(dataOut.Oblique_params[0,0,hei])
1764 #print(dataOut.dplr_2_u[0,0,hei])
1765 else:
1766 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei] = self.Double_Gauss_fit_weight(spc,x,A1,B1,C1,A2,B2,C2,D)
1767 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,4,hei]/numpy.sin(numpy.arccos(102/dataOut.heightList[hei]))
1768
1769
1770 except:
1771 ###dataOut.Oblique_params[0,:,hei] = dataOut.Oblique_params[0,:,hei]*numpy.NAN
1772 pass
1773
1774 #exit(1)
1775 dataOut.paramInterval = dataOut.nProfiles*dataOut.nCohInt*dataOut.ippSeconds
1776 dataOut.lat=-11.95
1777 dataOut.lon=-76.87
1778 '''
1779 dataOut.Oblique_params = numpy.where(dataOut.Oblique_params<-700, numpy.nan, dop_t1)
1780 dataOut.Oblique_params = numpy.where(dataOut.Oblique_params<+700, numpy.nan, dop_t1)
1781 Aquí debo exceptuar las amplitudes
1782 '''
1783 if mode == 9: #Double Skew Gaussian
1784 #dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,-2,:] #Pos[Max_value]
1785 #dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,1,:] #Shift
1786 dataOut.Spec_W_T1 = dataOut.Oblique_params[:,2,:]
1787 #dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,-1,:] #Pos[Max_value]
1788 #dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,5,:] #Shift
1789 dataOut.Spec_W_T2 = dataOut.Oblique_params[:,6,:]
1790 if Dop == 'Shift':
1791 dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,1,:] #Shift
1792 dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,5,:] #Shift
1793 elif Dop == 'Max':
1794 dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,-2,:] #Pos[Max_value]
1795 dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,-1,:] #Pos[Max_value]
1796
1797 dataOut.Err_Dop_EEJ_T1 = dataOut.Oblique_param_errors[:,1,:] #En realidad este es el error?
1798 dataOut.Err_Spec_W_T1 = dataOut.Oblique_param_errors[:,2,:]
1799 dataOut.Err_Dop_EEJ_T2 = dataOut.Oblique_param_errors[:,5,:] #En realidad este es el error?
1800 dataOut.Err_Spec_W_T2 = dataOut.Oblique_param_errors[:,6,:]
1801
1802 elif mode == 11: #Double Gaussian
1803 dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,1,:]
1804 dataOut.Spec_W_T1 = dataOut.Oblique_params[:,2,:]
1805 dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,4,:]
1806 dataOut.Spec_W_T2 = dataOut.Oblique_params[:,5,:]
1807
1808 dataOut.Err_Dop_EEJ_T1 = dataOut.Oblique_param_errors[:,1,:]
1809 dataOut.Err_Spec_W_T1 = dataOut.Oblique_param_errors[:,2,:]
1810 dataOut.Err_Dop_EEJ_T2 = dataOut.Oblique_param_errors[:,4,:]
1811 dataOut.Err_Spec_W_T2 = dataOut.Oblique_param_errors[:,5,:]
1812
1813 #print("Before: ", dataOut.Dop_EEJ_T2)
1814 dataOut.Spec_W_T1 = self.clean_outliers(dataOut.Spec_W_T1)
1815 dataOut.Spec_W_T2 = self.clean_outliers(dataOut.Spec_W_T2)
1816 dataOut.Dop_EEJ_T1 = self.clean_outliers(dataOut.Dop_EEJ_T1)
1817 dataOut.Dop_EEJ_T2 = self.clean_outliers(dataOut.Dop_EEJ_T2)
1818 #print("After: ", dataOut.Dop_EEJ_T2)
1819 dataOut.Err_Spec_W_T1 = self.clean_outliers(dataOut.Err_Spec_W_T1)
1820 dataOut.Err_Spec_W_T2 = self.clean_outliers(dataOut.Err_Spec_W_T2)
1821 dataOut.Err_Dop_EEJ_T1 = self.clean_outliers(dataOut.Err_Dop_EEJ_T1)
1822 dataOut.Err_Dop_EEJ_T2 = self.clean_outliers(dataOut.Err_Dop_EEJ_T2)
1823 #print("Before data_snr: ", dataOut.data_snr)
1824 #dataOut.data_snr = numpy.where(numpy.isnan(dataOut.Dop_EEJ_T1), numpy.nan, dataOut.data_snr)
1825 dataOut.snl = numpy.where(numpy.isnan(dataOut.Dop_EEJ_T1), numpy.nan, dataOut.snl)
1826
1827 #print("After data_snr: ", dataOut.data_snr)
1828 dataOut.mode = mode
1829 dataOut.flagNoData = numpy.all(numpy.isnan(dataOut.Dop_EEJ_T1)) #Si todos los valores son NaN no se prosigue
1830 ###dataOut.flagNoData = False #Descomentar solo para ploteo sino mantener comentado (para guardado)
1831
1832 return dataOut
1833
1834 class Gaussian_Windowed(Operation):
1835 '''
1836 Written by R. Flores
1837 '''
1838 def __init__(self):
1839 Operation.__init__(self)
1840
1841 def windowing_single(self,spc,x,A,B,C,D,nFFTPoints):
1842 from scipy.optimize import curve_fit,fmin
1843
1844 def gaussian(x, a, b, c, d):
1845 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
1846 return val
1847
1848 def R_gaussian(x, a, b, c):
1849 N = int(numpy.shape(x)[0])
1850 val = a * numpy.exp(-((x)*c*2*2*numpy.pi)**2 / (2))* numpy.exp(1.j*b*x*4*numpy.pi)
1851 return val
1852
1853 def T(x,N):
1854 T = 1-abs(x)/N
1855 return T
1856
1857 def R_T_spc_fun(x, a, b, c, d, nFFTPoints):
1858
1859 N = int(numpy.shape(x)[0])
1860
1861 x_max = x[-1]
1862
1863 x_pos = x[nFFTPoints:]
1864 x_neg = x[:nFFTPoints]
1865 #print([int(nFFTPoints/2))
1866 #print("x: ", x)
1867 #print("x_neg: ", x_neg)
1868 #print("x_pos: ", x_pos)
1869
1870
1871 R_T_neg_1 = R_gaussian(x, a, b, c)[:nFFTPoints]*T(x_neg,-x[0])
1872 R_T_pos_1 = R_gaussian(x, a, b, c)[nFFTPoints:]*T(x_pos,x[-1])
1873 #print(T(x_pos,x[-1]),x_pos,x[-1])
1874 #print(R_T_neg_1.shape,R_T_pos_1.shape)
1875 R_T_sum_1 = R_T_pos_1 + R_T_neg_1
1876 R_T_spc_1 = numpy.fft.fft(R_T_sum_1).real
1877 R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1)
1878 max_val_1 = numpy.max(R_T_spc_1)
1879 R_T_spc_1 = R_T_spc_1*a/max_val_1
1880
1881 R_T_d = d*numpy.fft.fftshift(signal.unit_impulse(N))
1882 R_T_d_neg = R_T_d[:nFFTPoints]*T(x_neg,-x[0])
1883 R_T_d_pos = R_T_d[nFFTPoints:]*T(x_pos,x[-1])
1884 R_T_d_sum = R_T_d_pos + R_T_d_neg
1885 R_T_spc_3 = numpy.fft.fft(R_T_d_sum).real
1886 R_T_spc_3 = numpy.fft.fftshift(R_T_spc_3)
1887
1888 R_T_final = R_T_spc_1 + R_T_spc_3
1889
1890 return R_T_final
1891
1892 y = spc#gaussian(x, a, meanY, sigmaY) + a*0.1*numpy.random.normal(0, 1, size=len(x))
1893
1894 from scipy.stats import norm
1895 mean,std=norm.fit(spc)
1896
1897 # estimate starting values from the data
1898 a = A
1899 b = B
1900 c = C#numpy.std(spc)
1901 d = D
1902 #'''
1903 #ippSeconds = 250*20*1.e-6/3
1904
1905 #x_t = ippSeconds * (numpy.arange(nFFTPoints) - nFFTPoints / 2.)
1906
1907 #x_t = numpy.linspace(x_t[0],x_t[-1],3200)
1908 #print("x_t: ", x_t)
1909 #print("nFFTPoints: ", nFFTPoints)
1910 x_vel = numpy.linspace(x[0],x[-1],int(2*nFFTPoints))
1911 #print("x_vel: ", x_vel)
1912 #x_freq = numpy.fft.fftfreq(1600,d=ippSeconds)
1913 #x_freq = numpy.fft.fftshift(x_freq)
1914 #'''
1915 # define a least squares function to optimize
1916 def minfunc(params):
1917 #print("y.shape: ", numpy.shape(y))
1918 return sum((y-R_T_spc_fun(x_vel,params[0],params[1],params[2],params[3],nFFTPoints))**2/1)#y**2)
1919
1920 # fit
1921
1922 popt_full = fmin(minfunc,[a,b,c,d], disp=False)
1923 #print("nIter", popt_full[2])
1924 popt = popt_full#[0]
1925
1926 fun = gaussian(x, popt[0], popt[1], popt[2], popt[3])
1927
1928 #return R_T_spc_fun(x_t,popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
1929 return fun, popt[0], popt[1], popt[2], popt[3]
1930
1931 def run(self, dataOut):
1932
1933 from scipy.signal import medfilt
1934 import matplotlib.pyplot as plt
1935 dataOut.moments = numpy.ones((dataOut.nChannels,4,dataOut.nHeights))*numpy.NAN
1936 dataOut.VelRange = dataOut.getVelRange(0)
1937 for nChannel in range(dataOut.nChannels):
1938 for hei in range(dataOut.heightList.shape[0]):
1939 #print("ipp: ", dataOut.ippSeconds)
1940 spc = numpy.copy(dataOut.data_spc[nChannel,:,hei])
1941
1942 #print(VelRange)
1943 #print(dataOut.getFreqRange(64))
1944 spcm = medfilt(spc,11)
1945 spc_max = numpy.max(spcm)
1946 dop1_x0 = dataOut.VelRange[numpy.argmax(spcm)]
1947 D = numpy.min(spcm)
1948
1949 fun, A, B, C, D = self.windowing_single(spc,dataOut.VelRange,spc_max,dop1_x0,abs(dop1_x0),D,dataOut.nFFTPoints)
1950 dataOut.moments[nChannel,0,hei] = A
1951 dataOut.moments[nChannel,1,hei] = B
1952 dataOut.moments[nChannel,2,hei] = C
1953 dataOut.moments[nChannel,3,hei] = D
1954 '''
1955 plt.figure()
1956 plt.plot(VelRange,spc,marker='*',linestyle='')
1957 plt.plot(VelRange,fun)
1958 plt.title(dataOut.heightList[hei])
1959 plt.show()
1960 '''
1961
1962 return dataOut
1963
1964 class PrecipitationProc(Operation):
1965
1966 '''
1967 Operator that estimates Reflectivity factor (Z), and estimates rainfall Rate (R)
1968
1969 Input:
1970 self.dataOut.data_pre : SelfSpectra
1971
1972 Output:
1973
1974 self.dataOut.data_output : Reflectivity factor, rainfall Rate
1975
1976
1977 Parameters affected:
1978 '''
1979
1980 def __init__(self):
1981 Operation.__init__(self)
1982 self.i=0
1983
1984 def run(self, dataOut, radar=None, Pt=5000, Gt=295.1209, Gr=70.7945, Lambda=0.6741, aL=2.5118,
1985 tauW=4e-06, ThetaT=0.1656317, ThetaR=0.36774087, Km2 = 0.93, Altitude=3350,SNRdBlimit=-30):
1986
1987 # print ('Entering PrecepitationProc ... ')
1988
1989 if radar == "MIRA35C" :
1990
1991 self.spc = dataOut.data_pre[0].copy()
1992 self.Num_Hei = self.spc.shape[2]
1993 self.Num_Bin = self.spc.shape[1]
1994 self.Num_Chn = self.spc.shape[0]
1995 Ze = self.dBZeMODE2(dataOut)
1996
1997 else:
1998
1999 self.spc = dataOut.data_pre[0].copy()
2000
2001 #NOTA SE DEBE REMOVER EL RANGO DEL PULSO TX
2002 self.spc[:,:,0:7]= numpy.NaN
2003
2004 self.Num_Hei = self.spc.shape[2]
2005 self.Num_Bin = self.spc.shape[1]
2006 self.Num_Chn = self.spc.shape[0]
2007
2008 VelRange = dataOut.spc_range[2]
2009
2010 ''' Se obtiene la constante del RADAR '''
2011
2012 self.Pt = Pt
2013 self.Gt = Gt
2014 self.Gr = Gr
2015 self.Lambda = Lambda
2016 self.aL = aL
2017 self.tauW = tauW
2018 self.ThetaT = ThetaT
2019 self.ThetaR = ThetaR
2020 self.GSys = 10**(36.63/10) # Ganancia de los LNA 36.63 dB
2021 self.lt = 10**(1.67/10) # Perdida en cables Tx 1.67 dB
2022 self.lr = 10**(5.73/10) # Perdida en cables Rx 5.73 dB
2023
2024 Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
2025 Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * tauW * numpy.pi * ThetaT * ThetaR)
2026 RadarConstant = 10e-26 * Numerator / Denominator #
2027 ExpConstant = 10**(40/10) #Constante Experimental
2028
2029 SignalPower = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
2030 for i in range(self.Num_Chn):
2031 SignalPower[i,:,:] = self.spc[i,:,:] - dataOut.noise[i]
2032 SignalPower[numpy.where(SignalPower < 0)] = 1e-20
2033
2034 SPCmean = numpy.mean(SignalPower, 0)
2035 Pr = SPCmean[:,:]/dataOut.normFactor
2036
2037 # Declaring auxiliary variables
2038 Range = dataOut.heightList*1000. #Range in m
2039 # replicate the heightlist to obtain a matrix [Num_Bin,Num_Hei]
2040 rMtrx = numpy.transpose(numpy.transpose([dataOut.heightList*1000.] * self.Num_Bin))
2041 zMtrx = rMtrx+Altitude
2042 # replicate the VelRange to obtain a matrix [Num_Bin,Num_Hei]
2043 VelMtrx = numpy.transpose(numpy.tile(VelRange[:-1], (self.Num_Hei,1)))
2044
2045 # height dependence to air density Foote and Du Toit (1969)
2046 delv_z = 1 + 3.68e-5 * zMtrx + 1.71e-9 * zMtrx**2
2047 VMtrx = VelMtrx / delv_z #Normalized velocity
2048 VMtrx[numpy.where(VMtrx> 9.6)] = numpy.NaN
2049 # Diameter is related to the fall speed of falling drops
2050 D_Vz = -1.667 * numpy.log( 0.9369 - 0.097087 * VMtrx ) # D in [mm]
2051 # Only valid for D>= 0.16 mm
2052 D_Vz[numpy.where(D_Vz < 0.16)] = numpy.NaN
2053
2054 #Calculate Radar Reflectivity ETAn
2055 ETAn = (RadarConstant *ExpConstant) * Pr * rMtrx**2 #Reflectivity (ETA)
2056 ETAd = ETAn * 6.18 * exp( -0.6 * D_Vz ) * delv_z
2057 # Radar Cross Section
2058 sigmaD = Km2 * (D_Vz * 1e-3 )**6 * numpy.pi**5 / Lambda**4
2059 # Drop Size Distribution
2060 DSD = ETAn / sigmaD
2061 # Equivalente Reflectivy
2062 Ze_eqn = numpy.nansum( DSD * D_Vz**6 ,axis=0)
2063 Ze_org = numpy.nansum(ETAn * Lambda**4, axis=0) / (1e-18*numpy.pi**5 * Km2) # [mm^6 /m^3]
2064 # RainFall Rate
2065 RR = 0.0006*numpy.pi * numpy.nansum( D_Vz**3 * DSD * VelMtrx ,0) #mm/hr
2066
2067 # Censoring the data
2068 # Removing data with SNRth < 0dB se debe considerar el SNR por canal
2069 SNRth = 10**(SNRdBlimit/10) #-30dB
2070 novalid = numpy.where((dataOut.data_snr[0,:] <SNRth) | (dataOut.data_snr[1,:] <SNRth) | (dataOut.data_snr[2,:] <SNRth)) # AND condition. Maybe OR condition better
2071 W = numpy.nanmean(dataOut.data_dop,0)
2072 W[novalid] = numpy.NaN
2073 Ze_org[novalid] = numpy.NaN
2074 RR[novalid] = numpy.NaN
2075
2076 dataOut.data_output = RR[8]
2077 dataOut.data_param = numpy.ones([3,self.Num_Hei])
2078 dataOut.channelList = [0,1,2]
2079
2080 dataOut.data_param[0]=10*numpy.log10(Ze_org)
2081 dataOut.data_param[1]=-W
2082 dataOut.data_param[2]=RR
2083
2084 # print ('Leaving PrecepitationProc ... ')
2085 return dataOut
2086
2087 def dBZeMODE2(self, dataOut): # Processing for MIRA35C
2088
2089 NPW = dataOut.NPW
2090 COFA = dataOut.COFA
2091
2092 SNR = numpy.array([self.spc[0,:,:] / NPW[0]]) #, self.spc[1,:,:] / NPW[1]])
2093 RadarConst = dataOut.RadarConst
2094 #frequency = 34.85*10**9
2095
2096 ETA = numpy.zeros(([self.Num_Chn ,self.Num_Hei]))
2097 data_output = numpy.ones([self.Num_Chn , self.Num_Hei])*numpy.NaN
2098
2099 ETA = numpy.sum(SNR,1)
2100
2101 ETA = numpy.where(ETA != 0. , ETA, numpy.NaN)
2102
2103 Ze = numpy.ones([self.Num_Chn, self.Num_Hei] )
2104
2105 for r in range(self.Num_Hei):
2106
2107 Ze[0,r] = ( ETA[0,r] ) * COFA[0,r][0] * RadarConst * ((r/5000.)**2)
2108 #Ze[1,r] = ( ETA[1,r] ) * COFA[1,r][0] * RadarConst * ((r/5000.)**2)
2109
2110 return Ze
2111
2112 # def GetRadarConstant(self):
2113 #
2114 # """
2115 # Constants:
2116 #
2117 # Pt: Transmission Power dB 5kW 5000
2118 # Gt: Transmission Gain dB 24.7 dB 295.1209
2119 # Gr: Reception Gain dB 18.5 dB 70.7945
2120 # Lambda: Wavelenght m 0.6741 m 0.6741
2121 # aL: Attenuation loses dB 4dB 2.5118
2122 # tauW: Width of transmission pulse s 4us 4e-6
2123 # ThetaT: Transmission antenna bean angle rad 0.1656317 rad 0.1656317
2124 # ThetaR: Reception antenna beam angle rad 0.36774087 rad 0.36774087
2125 #
2126 # """
2127 #
2128 # Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
2129 # Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * TauW * numpy.pi * ThetaT * TheraR)
2130 # RadarConstant = Numerator / Denominator
2131 #
2132 # return RadarConstant
2133
2134
2135 class FullSpectralAnalysis(Operation):
2136
2137 """
2138 Function that implements Full Spectral Analysis technique.
2139
2140 Input:
2141 self.dataOut.data_pre : SelfSpectra and CrossSpectra data
2142 self.dataOut.groupList : Pairlist of channels
2143 self.dataOut.ChanDist : Physical distance between receivers
2144
2145
2146 Output:
2147
2148 self.dataOut.data_output : Zonal wind, Meridional wind, and Vertical wind
2149
2150
2151 Parameters affected: Winds, height range, SNR
2152
2153 """
2154 def run(self, dataOut, Xi01=None, Xi02=None, Xi12=None, Eta01=None, Eta02=None, Eta12=None, SNRdBlimit=-30,
2155 minheight=None, maxheight=None, NegativeLimit=None, PositiveLimit=None):
2156
2157 spc = dataOut.data_pre[0].copy()
2158 cspc = dataOut.data_pre[1]
2159 nHeights = spc.shape[2]
2160
2161 # first_height = 0.75 #km (ref: data header 20170822)
2162 # resolution_height = 0.075 #km
2163 '''
2164 finding height range. check this when radar parameters are changed!
2165 '''
2166 if maxheight is not None:
2167 # range_max = math.ceil((maxheight - first_height) / resolution_height) # theoretical
2168 range_max = math.ceil(13.26 * maxheight - 3) # empirical, works better
2169 else:
2170 range_max = nHeights
2171 if minheight is not None:
2172 # range_min = int((minheight - first_height) / resolution_height) # theoretical
2173 range_min = int(13.26 * minheight - 5) # empirical, works better
2174 if range_min < 0:
2175 range_min = 0
2176 else:
2177 range_min = 0
2178
2179 pairsList = dataOut.groupList
2180 if dataOut.ChanDist is not None :
2181 ChanDist = dataOut.ChanDist
2182 else:
2183 ChanDist = numpy.array([[Xi01, Eta01],[Xi02,Eta02],[Xi12,Eta12]])
2184
2185 # 4 variables: zonal, meridional, vertical, and average SNR
2186 data_param = numpy.zeros([4,nHeights]) * numpy.NaN
2187 velocityX = numpy.zeros([nHeights]) * numpy.NaN
2188 velocityY = numpy.zeros([nHeights]) * numpy.NaN
2189 velocityZ = numpy.zeros([nHeights]) * numpy.NaN
2190
2191 dbSNR = 10*numpy.log10(numpy.average(dataOut.data_snr,0))
2192
2193 '''***********************************************WIND ESTIMATION**************************************'''
2194 for Height in range(nHeights):
2195
2196 if Height >= range_min and Height < range_max:
2197 # error_code will be useful in future analysis
2198 [Vzon,Vmer,Vver, error_code] = self.WindEstimation(spc[:,:,Height], cspc[:,:,Height], pairsList,
2199 ChanDist, Height, dataOut.noise, dataOut.spc_range, dbSNR[Height], SNRdBlimit, NegativeLimit, PositiveLimit,dataOut.frequency)
2200
2201 if abs(Vzon) < 100. and abs(Vmer) < 100.:
2202 velocityX[Height] = Vzon
2203 velocityY[Height] = -Vmer
2204 velocityZ[Height] = Vver
2205
2206 # Censoring data with SNR threshold
2207 dbSNR [dbSNR < SNRdBlimit] = numpy.NaN
2208
2209 data_param[0] = velocityX
2210 data_param[1] = velocityY
2211 data_param[2] = velocityZ
2212 data_param[3] = dbSNR
2213 dataOut.data_param = data_param
2214 return dataOut
2215
2216 def moving_average(self,x, N=2):
2217 """ convolution for smoothenig data. note that last N-1 values are convolution with zeroes """
2218 return numpy.convolve(x, numpy.ones((N,))/N)[(N-1):]
2219
2220 def gaus(self,xSamples,Amp,Mu,Sigma):
2221 return Amp * numpy.exp(-0.5*((xSamples - Mu)/Sigma)**2)
2222
2223 def Moments(self, ySamples, xSamples):
2224 Power = numpy.nanmean(ySamples) # Power, 0th Moment
2225 yNorm = ySamples / numpy.nansum(ySamples)
2226 RadVel = numpy.nansum(xSamples * yNorm) # Radial Velocity, 1st Moment
2227 Sigma2 = numpy.nansum(yNorm * (xSamples - RadVel)**2) # Spectral Width, 2nd Moment
2228 StdDev = numpy.sqrt(numpy.abs(Sigma2)) # Desv. Estandar, Ancho espectral
2229 return numpy.array([Power,RadVel,StdDev])
2230
2231 def StopWindEstimation(self, error_code):
2232 Vzon = numpy.NaN
2233 Vmer = numpy.NaN
2234 Vver = numpy.NaN
2235 return Vzon, Vmer, Vver, error_code
2236
2237 def AntiAliasing(self, interval, maxstep):
2238 """
2239 function to prevent errors from aliased values when computing phaseslope
2240 """
2241 antialiased = numpy.zeros(len(interval))
2242 copyinterval = interval.copy()
2243
2244 antialiased[0] = copyinterval[0]
2245
2246 for i in range(1,len(antialiased)):
2247 step = interval[i] - interval[i-1]
2248 if step > maxstep:
2249 copyinterval -= 2*numpy.pi
2250 antialiased[i] = copyinterval[i]
2251 elif step < maxstep*(-1):
2252 copyinterval += 2*numpy.pi
2253 antialiased[i] = copyinterval[i]
2254 else:
2255 antialiased[i] = copyinterval[i].copy()
2256
2257 return antialiased
2258
2259 def WindEstimation(self, spc, cspc, pairsList, ChanDist, Height, noise, AbbsisaRange, dbSNR, SNRlimit, NegativeLimit, PositiveLimit, radfreq):
2260 """
2261 Function that Calculates Zonal, Meridional and Vertical wind velocities.
2262 Initial Version by E. Bocanegra updated by J. Zibell until Nov. 2019.
2263
2264 Input:
2265 spc, cspc : self spectra and cross spectra data. In Briggs notation something like S_i*(S_i)_conj, (S_j)_conj respectively.
2266 pairsList : Pairlist of channels
2267 ChanDist : array of xi_ij and eta_ij
2268 Height : height at which data is processed
2269 noise : noise in [channels] format for specific height
2270 Abbsisarange : range of the frequencies or velocities
2271 dbSNR, SNRlimit : signal to noise ratio in db, lower limit
2272
2273 Output:
2274 Vzon, Vmer, Vver : wind velocities
2275 error_code : int that states where code is terminated
2276
2277 0 : no error detected
2278 1 : Gaussian of mean spc exceeds widthlimit
2279 2 : no Gaussian of mean spc found
2280 3 : SNR to low or velocity to high -> prec. e.g.
2281 4 : at least one Gaussian of cspc exceeds widthlimit
2282 5 : zero out of three cspc Gaussian fits converged
2283 6 : phase slope fit could not be found
2284 7 : arrays used to fit phase have different length
2285 8 : frequency range is either too short (len <= 5) or very long (> 30% of cspc)
2286
2287 """
2288
2289 error_code = 0
2290
2291 nChan = spc.shape[0]
2292 nProf = spc.shape[1]
2293 nPair = cspc.shape[0]
2294
2295 SPC_Samples = numpy.zeros([nChan, nProf]) # for normalized spc values for one height
2296 CSPC_Samples = numpy.zeros([nPair, nProf], dtype=numpy.complex_) # for normalized cspc values
2297 phase = numpy.zeros([nPair, nProf]) # phase between channels
2298 PhaseSlope = numpy.zeros(nPair) # slope of the phases, channelwise
2299 PhaseInter = numpy.zeros(nPair) # intercept to the slope of the phases, channelwise
2300 xFrec = AbbsisaRange[0][:-1] # frequency range
2301 xVel = AbbsisaRange[2][:-1] # velocity range
2302 xSamples = xFrec # the frequency range is taken
2303 delta_x = xSamples[1] - xSamples[0] # delta_f or delta_x
2304
2305 # only consider velocities with in NegativeLimit and PositiveLimit
2306 if (NegativeLimit is None):
2307 NegativeLimit = numpy.min(xVel)
2308 if (PositiveLimit is None):
2309 PositiveLimit = numpy.max(xVel)
2310 xvalid = numpy.where((xVel > NegativeLimit) & (xVel < PositiveLimit))
2311 xSamples_zoom = xSamples[xvalid]
2312
2313 '''Getting Eij and Nij'''
2314 Xi01, Xi02, Xi12 = ChanDist[:,0]
2315 Eta01, Eta02, Eta12 = ChanDist[:,1]
2316
2317 # spwd limit - updated by D. Scipión 30.03.2021
2318 widthlimit = 10
2319 '''************************* SPC is normalized ********************************'''
2320 spc_norm = spc.copy()
2321 # For each channel
2322 for i in range(nChan):
2323 spc_sub = spc_norm[i,:] - noise[i] # only the signal power
2324 SPC_Samples[i] = spc_sub / (numpy.nansum(spc_sub) * delta_x)
2325
2326 '''********************** FITTING MEAN SPC GAUSSIAN **********************'''
2327
2328 """ the gaussian of the mean: first subtract noise, then normalize. this is legal because
2329 you only fit the curve and don't need the absolute value of height for calculation,
2330 only for estimation of width. for normalization of cross spectra, you need initial,
2331 unnormalized self-spectra With noise.
2332
2333 Technically, you don't even need to normalize the self-spectra, as you only need the
2334 width of the peak. However, it was left this way. Note that the normalization has a flaw:
2335 due to subtraction of the noise, some values are below zero. Raw "spc" values should be
2336 >= 0, as it is the modulus squared of the signals (complex * it's conjugate)
2337 """
2338 # initial conditions
2339 popt = [1e-10,0,1e-10]
2340 # Spectra average
2341 SPCMean = numpy.average(SPC_Samples,0)
2342 # Moments in frequency
2343 SPCMoments = self.Moments(SPCMean[xvalid], xSamples_zoom)
2344
2345 # Gauss Fit SPC in frequency domain
2346 if dbSNR > SNRlimit: # only if SNR > SNRth
2347 try:
2348 popt,pcov = curve_fit(self.gaus,xSamples_zoom,SPCMean[xvalid],p0=SPCMoments)
2349 if popt[2] <= 0 or popt[2] > widthlimit: # CONDITION
2350 return self.StopWindEstimation(error_code = 1)
2351 FitGauss = self.gaus(xSamples_zoom,*popt)
2352 except :#RuntimeError:
2353 return self.StopWindEstimation(error_code = 2)
2354 else:
2355 return self.StopWindEstimation(error_code = 3)
2356
2357 '''***************************** CSPC Normalization *************************
2358 The Spc spectra are used to normalize the crossspectra. Peaks from precipitation
2359 influence the norm which is not desired. First, a range is identified where the
2360 wind peak is estimated -> sum_wind is sum of those frequencies. Next, the area
2361 around it gets cut off and values replaced by mean determined by the boundary
2362 data -> sum_noise (spc is not normalized here, thats why the noise is important)
2363
2364 The sums are then added and multiplied by range/datapoints, because you need
2365 an integral and not a sum for normalization.
2366
2367 A norm is found according to Briggs 92.
2368 '''
2369 # for each pair
2370 for i in range(nPair):
2371 cspc_norm = cspc[i,:].copy()
2372 chan_index0 = pairsList[i][0]
2373 chan_index1 = pairsList[i][1]
2374 CSPC_Samples[i] = cspc_norm / (numpy.sqrt(numpy.nansum(spc_norm[chan_index0])*numpy.nansum(spc_norm[chan_index1])) * delta_x)
2375 phase[i] = numpy.arctan2(CSPC_Samples[i].imag, CSPC_Samples[i].real)
2376
2377 CSPCmoments = numpy.vstack([self.Moments(numpy.abs(CSPC_Samples[0,xvalid]), xSamples_zoom),
2378 self.Moments(numpy.abs(CSPC_Samples[1,xvalid]), xSamples_zoom),
2379 self.Moments(numpy.abs(CSPC_Samples[2,xvalid]), xSamples_zoom)])
2380
2381 popt01, popt02, popt12 = [1e-10,0,1e-10], [1e-10,0,1e-10] ,[1e-10,0,1e-10]
2382 FitGauss01, FitGauss02, FitGauss12 = numpy.zeros(len(xSamples)), numpy.zeros(len(xSamples)), numpy.zeros(len(xSamples))
2383
2384 '''*******************************FIT GAUSS CSPC************************************'''
2385 try:
2386 popt01,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[0][xvalid]),p0=CSPCmoments[0])
2387 if popt01[2] > widthlimit: # CONDITION
2388 return self.StopWindEstimation(error_code = 4)
2389 popt02,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[1][xvalid]),p0=CSPCmoments[1])
2390 if popt02[2] > widthlimit: # CONDITION
2391 return self.StopWindEstimation(error_code = 4)
2392 popt12,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[2][xvalid]),p0=CSPCmoments[2])
2393 if popt12[2] > widthlimit: # CONDITION
2394 return self.StopWindEstimation(error_code = 4)
2395
2396 FitGauss01 = self.gaus(xSamples_zoom, *popt01)
2397 FitGauss02 = self.gaus(xSamples_zoom, *popt02)
2398 FitGauss12 = self.gaus(xSamples_zoom, *popt12)
2399 except:
2400 return self.StopWindEstimation(error_code = 5)
2401
2402
2403 '''************* Getting Fij ***************'''
2404 # x-axis point of the gaussian where the center is located from GaussFit of spectra
2405 GaussCenter = popt[1]
2406 ClosestCenter = xSamples_zoom[numpy.abs(xSamples_zoom-GaussCenter).argmin()]
2407 PointGauCenter = numpy.where(xSamples_zoom==ClosestCenter)[0][0]
2408
2409 # Point where e^-1 is located in the gaussian
2410 PeMinus1 = numpy.max(FitGauss) * numpy.exp(-1)
2411 FijClosest = FitGauss[numpy.abs(FitGauss-PeMinus1).argmin()] # The closest point to"Peminus1" in "FitGauss"
2412 PointFij = numpy.where(FitGauss==FijClosest)[0][0]
2413 Fij = numpy.abs(xSamples_zoom[PointFij] - xSamples_zoom[PointGauCenter])
2414
2415 '''********** Taking frequency ranges from mean SPCs **********'''
2416 GauWidth = popt[2] * 3/2 # Bandwidth of Gau01
2417 Range = numpy.empty(2)
2418 Range[0] = GaussCenter - GauWidth
2419 Range[1] = GaussCenter + GauWidth
2420 # Point in x-axis where the bandwidth is located (min:max)
2421 ClosRangeMin = xSamples_zoom[numpy.abs(xSamples_zoom-Range[0]).argmin()]
2422 ClosRangeMax = xSamples_zoom[numpy.abs(xSamples_zoom-Range[1]).argmin()]
2423 PointRangeMin = numpy.where(xSamples_zoom==ClosRangeMin)[0][0]
2424 PointRangeMax = numpy.where(xSamples_zoom==ClosRangeMax)[0][0]
2425 Range = numpy.array([ PointRangeMin, PointRangeMax ])
2426 FrecRange = xSamples_zoom[ Range[0] : Range[1] ]
2427
2428 '''************************** Getting Phase Slope ***************************'''
2429 for i in range(nPair):
2430 if len(FrecRange) > 5:
2431 PhaseRange = phase[i, xvalid[0][Range[0]:Range[1]]].copy()
2432 mask = ~numpy.isnan(FrecRange) & ~numpy.isnan(PhaseRange)
2433 if len(FrecRange) == len(PhaseRange):
2434 try:
2435 slope, intercept, _, _, _ = stats.linregress(FrecRange[mask], self.AntiAliasing(PhaseRange[mask], 4.5))
2436 PhaseSlope[i] = slope
2437 PhaseInter[i] = intercept
2438 except:
2439 return self.StopWindEstimation(error_code = 6)
2440 else:
2441 return self.StopWindEstimation(error_code = 7)
2442 else:
2443 return self.StopWindEstimation(error_code = 8)
2444
2445 '''*** Constants A-H correspond to the convention as in Briggs and Vincent 1992 ***'''
2446
2447 '''Getting constant C'''
2448 cC=(Fij*numpy.pi)**2
2449
2450 '''****** Getting constants F and G ******'''
2451 MijEijNij = numpy.array([[Xi02,Eta02], [Xi12,Eta12]])
2452 # MijEijNij = numpy.array([[Xi01,Eta01], [Xi02,Eta02], [Xi12,Eta12]])
2453 # MijResult0 = (-PhaseSlope[0] * cC) / (2*numpy.pi)
2454 MijResult1 = (-PhaseSlope[1] * cC) / (2*numpy.pi)
2455 MijResult2 = (-PhaseSlope[2] * cC) / (2*numpy.pi)
2456 # MijResults = numpy.array([MijResult0, MijResult1, MijResult2])
2457 MijResults = numpy.array([MijResult1, MijResult2])
2458 (cF,cG) = numpy.linalg.solve(MijEijNij, MijResults)
2459
2460 '''****** Getting constants A, B and H ******'''
2461 W01 = numpy.nanmax( FitGauss01 )
2462 W02 = numpy.nanmax( FitGauss02 )
2463 W12 = numpy.nanmax( FitGauss12 )
2464
2465 WijResult01 = ((cF * Xi01 + cG * Eta01)**2)/cC - numpy.log(W01 / numpy.sqrt(numpy.pi / cC))
2466 WijResult02 = ((cF * Xi02 + cG * Eta02)**2)/cC - numpy.log(W02 / numpy.sqrt(numpy.pi / cC))
2467 WijResult12 = ((cF * Xi12 + cG * Eta12)**2)/cC - numpy.log(W12 / numpy.sqrt(numpy.pi / cC))
2468 WijResults = numpy.array([WijResult01, WijResult02, WijResult12])
2469
2470 WijEijNij = numpy.array([ [Xi01**2, Eta01**2, 2*Xi01*Eta01] , [Xi02**2, Eta02**2, 2*Xi02*Eta02] , [Xi12**2, Eta12**2, 2*Xi12*Eta12] ])
2471 (cA,cB,cH) = numpy.linalg.solve(WijEijNij, WijResults)
2472
2473 VxVy = numpy.array([[cA,cH],[cH,cB]])
2474 VxVyResults = numpy.array([-cF,-cG])
2475 (Vmer,Vzon) = numpy.linalg.solve(VxVy, VxVyResults)
2476 Vver = -SPCMoments[1]*SPEED_OF_LIGHT/(2*radfreq)
2477 error_code = 0
2478
2479 return Vzon, Vmer, Vver, error_code
2480
2481 class SpectralMoments(Operation):
2482
2483 '''
2484 Function SpectralMoments()
2485
2486 Calculates moments (power, mean, standard deviation) and SNR of the signal
2487
2488 Type of dataIn: Spectra
2489
2490 Configuration Parameters:
2491
2492 dirCosx : Cosine director in X axis
2493 dirCosy : Cosine director in Y axis
2494
2495 elevation :
2496 azimuth :
2497
2498 Input:
2499 channelList : simple channel list to select e.g. [2,3,7]
2500 self.dataOut.data_pre : Spectral data
2501 self.dataOut.abscissaList : List of frequencies
2502 self.dataOut.noise : Noise level per channel
2503
2504 Affected:
2505 self.dataOut.moments : Parameters per channel
2506 self.dataOut.data_snr : SNR per channel
2507
2508 '''
2509
2510 def run(self, dataOut):
2511
2512 data = dataOut.data_pre[0]
2513 absc = dataOut.abscissaList[:-1]
2514 noise = dataOut.noise
2515 nChannel = data.shape[0]
2516 data_param = numpy.zeros((nChannel, 4, data.shape[2]))
2517
2518 for ind in range(nChannel):
2519 data_param[ind,:,:] = self.__calculateMoments( data[ind,:,:] , absc , noise[ind] )
2520
2521 dataOut.moments = data_param[:,1:,:]
2522 dataOut.data_snr = data_param[:,0]
2523 dataOut.data_pow = data_param[:,1]
2524 dataOut.data_dop = data_param[:,2]
2525 dataOut.data_width = data_param[:,3]
2526
2527 return dataOut
2528
2529 def __calculateMoments(self, oldspec, oldfreq, n0,
2530 nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None):
2531
2532 if (nicoh is None): nicoh = 1
2533 if (graph is None): graph = 0
2534 if (smooth is None): smooth = 0
2535 elif (self.smooth < 3): smooth = 0
2536
2537 if (type1 is None): type1 = 0
2538 if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1
2539 if (snrth is None): snrth = -3
2540 if (dc is None): dc = 0
2541 if (aliasing is None): aliasing = 0
2542 if (oldfd is None): oldfd = 0
2543 if (wwauto is None): wwauto = 0
2544
2545 if (n0 < 1.e-20): n0 = 1.e-20
2546
2547 freq = oldfreq
2548 vec_power = numpy.zeros(oldspec.shape[1])
2549 vec_fd = numpy.zeros(oldspec.shape[1])
2550 vec_w = numpy.zeros(oldspec.shape[1])
2551 vec_snr = numpy.zeros(oldspec.shape[1])
2552
2553 # oldspec = numpy.ma.masked_invalid(oldspec)
2554
2555 for ind in range(oldspec.shape[1]):
2556
2557 spec = oldspec[:,ind]
2558 aux = spec*fwindow
2559 max_spec = aux.max()
2560 m = aux.tolist().index(max_spec)
2561
2562 # Smooth
2563 if (smooth == 0):
2564 spec2 = spec
2565 else:
2566 spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth)
2567
2568 # Moments Estimation
2569 bb = spec2[numpy.arange(m,spec2.size)]
2570 bb = (bb<n0).nonzero()
2571 bb = bb[0]
2572
2573 ss = spec2[numpy.arange(0,m + 1)]
2574 ss = (ss<n0).nonzero()
2575 ss = ss[0]
2576
2577 if (bb.size == 0):
2578 bb0 = spec.size - 1 - m
2579 else:
2580 bb0 = bb[0] - 1
2581 if (bb0 < 0):
2582 bb0 = 0
2583
2584 if (ss.size == 0):
2585 ss1 = 1
2586 else:
2587 ss1 = max(ss) + 1
2588
2589 if (ss1 > m):
2590 ss1 = m
2591
2592 valid = numpy.arange(int(m + bb0 - ss1 + 1)) + ss1
2593
2594 signal_power = ((spec2[valid] - n0) * fwindow[valid]).mean() # D. Scipión added with correct definition
2595 total_power = (spec2[valid] * fwindow[valid]).mean() # D. Scipión added with correct definition
2596 power = ((spec2[valid] - n0) * fwindow[valid]).sum()
2597 fd = ((spec2[valid]- n0)*freq[valid] * fwindow[valid]).sum() / power
2598 w = numpy.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum() / power)
2599 snr = (spec2.mean()-n0)/n0
2600 if (snr < 1.e-20) :
2601 snr = 1.e-20
2602
2603 # vec_power[ind] = power #D. Scipión replaced with the line below
2604 vec_power[ind] = total_power
2605 vec_fd[ind] = fd
2606 vec_w[ind] = w
2607 vec_snr[ind] = snr
2608
2609 return numpy.vstack((vec_snr, vec_power, vec_fd, vec_w))
2610
2611
2612
2613 class SALags(Operation):
2614 '''
2615 Function GetMoments()
2616
2617 Input:
2618 self.dataOut.data_pre
2619 self.dataOut.abscissaList
2620 self.dataOut.noise
2621 self.dataOut.normFactor
2622 self.dataOut.data_snr
2623 self.dataOut.groupList
2624 self.dataOut.nChannels
2625
2626 Affected:
2627 self.dataOut.data_param
2628
2629 '''
2630 def run(self, dataOut):
2631 data_acf = dataOut.data_pre[0]
2632 data_ccf = dataOut.data_pre[1]
2633 normFactor_acf = dataOut.normFactor[0]
2634 normFactor_ccf = dataOut.normFactor[1]
2635 pairs_acf = dataOut.groupList[0]
2636 pairs_ccf = dataOut.groupList[1]
2637
2638 nHeights = dataOut.nHeights
2639 absc = dataOut.abscissaList
2640 noise = dataOut.noise
2641 SNR = dataOut.data_snr
2642 nChannels = dataOut.nChannels
2643 # pairsList = dataOut.groupList
2644 # pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairsList, nChannels)
2645
2646 for l in range(len(pairs_acf)):
2647 data_acf[l,:,:] = data_acf[l,:,:]/normFactor_acf[l,:]
2648
2649 for l in range(len(pairs_ccf)):
2650 data_ccf[l,:,:] = data_ccf[l,:,:]/normFactor_ccf[l,:]
2651
2652 dataOut.data_param = numpy.zeros((len(pairs_ccf)*2 + 1, nHeights))
2653 dataOut.data_param[:-1,:] = self.__calculateTaus(data_acf, data_ccf, absc)
2654 dataOut.data_param[-1,:] = self.__calculateLag1Phase(data_acf, absc)
2655 return
2656
2657 # def __getPairsAutoCorr(self, pairsList, nChannels):
2658 #
2659 # pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan
2660 #
2661 # for l in range(len(pairsList)):
2662 # firstChannel = pairsList[l][0]
2663 # secondChannel = pairsList[l][1]
2664 #
2665 # #Obteniendo pares de Autocorrelacion
2666 # if firstChannel == secondChannel:
2667 # pairsAutoCorr[firstChannel] = int(l)
2668 #
2669 # pairsAutoCorr = pairsAutoCorr.astype(int)
2670 #
2671 # pairsCrossCorr = range(len(pairsList))
2672 # pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr)
2673 #
2674 # return pairsAutoCorr, pairsCrossCorr
2675
2676 def __calculateTaus(self, data_acf, data_ccf, lagRange):
2677
2678 lag0 = data_acf.shape[1]/2
2679 #Funcion de Autocorrelacion
2680 mean_acf = stats.nanmean(data_acf, axis = 0)
2681
2682 #Obtencion Indice de TauCross
2683 ind_ccf = data_ccf.argmax(axis = 1)
2684 #Obtencion Indice de TauAuto
2685 ind_acf = numpy.zeros(ind_ccf.shape,dtype = 'int')
2686 ccf_lag0 = data_ccf[:,lag0,:]
2687
2688 for i in range(ccf_lag0.shape[0]):
2689 ind_acf[i,:] = numpy.abs(mean_acf - ccf_lag0[i,:]).argmin(axis = 0)
2690
2691 #Obtencion de TauCross y TauAuto
2692 tau_ccf = lagRange[ind_ccf]
2693 tau_acf = lagRange[ind_acf]
2694
2695 Nan1, Nan2 = numpy.where(tau_ccf == lagRange[0])
2696
2697 tau_ccf[Nan1,Nan2] = numpy.nan
2698 tau_acf[Nan1,Nan2] = numpy.nan
2699 tau = numpy.vstack((tau_ccf,tau_acf))
2700
2701 return tau
2702
2703 def __calculateLag1Phase(self, data, lagTRange):
2704 data1 = stats.nanmean(data, axis = 0)
2705 lag1 = numpy.where(lagTRange == 0)[0][0] + 1
2706
2707 phase = numpy.angle(data1[lag1,:])
2708
2709 return phase
2710
2711 def fit_func( x, a0, a1, a2): #, a3, a4, a5):
2712 z = (x - a1) / a2
2713 y = a0 * numpy.exp(-z**2 / a2) #+ a3 + a4 * x + a5 * x**2
2714 return y
2715
2716
2717 class SpectralFitting(Operation):
2718 '''
2719 Function GetMoments()
2720
2721 Input:
2722 Output:
2723 Variables modified:
2724 '''
2725 def __calculateMoments(self,oldspec, oldfreq, n0, nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None):
2726
2727 if (nicoh is None): nicoh = 1
2728 if (graph is None): graph = 0
2729 if (smooth is None): smooth = 0
2730 elif (self.smooth < 3): smooth = 0
2731
2732 if (type1 is None): type1 = 0
2733 if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1
2734 if (snrth is None): snrth = -3
2735 if (dc is None): dc = 0
2736 if (aliasing is None): aliasing = 0
2737 if (oldfd is None): oldfd = 0
2738 if (wwauto is None): wwauto = 0
2739
2740 if (n0 < 1.e-20): n0 = 1.e-20
2741
2742 freq = oldfreq
2743 vec_power = numpy.zeros(oldspec.shape[1])
2744 vec_fd = numpy.zeros(oldspec.shape[1])
2745 vec_w = numpy.zeros(oldspec.shape[1])
2746 vec_snr = numpy.zeros(oldspec.shape[1])
2747
2748 oldspec = numpy.ma.masked_invalid(oldspec)
2749
2750 for ind in range(oldspec.shape[1]):
2751
2752 spec = oldspec[:,ind]
2753 aux = spec*fwindow
2754 max_spec = aux.max()
2755 m = list(aux).index(max_spec)
2756
2757 #Smooth
2758 if (smooth == 0): spec2 = spec
2759 else: spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth)
2760
2761 # Calculo de Momentos
2762 bb = spec2[list(range(m,spec2.size))]
2763 bb = (bb<n0).nonzero()
2764 bb = bb[0]
2765
2766 ss = spec2[list(range(0,m + 1))]
2767 ss = (ss<n0).nonzero()
2768 ss = ss[0]
2769
2770 if (bb.size == 0):
2771 bb0 = spec.size - 1 - m
2772 else:
2773 bb0 = bb[0] - 1
2774 if (bb0 < 0):
2775 bb0 = 0
2776
2777 if (ss.size == 0): ss1 = 1
2778 else: ss1 = max(ss) + 1
2779
2780 if (ss1 > m): ss1 = m
2781
2782 valid = numpy.asarray(list(range(int(m + bb0 - ss1 + 1)))) + ss1
2783 power = ((spec2[valid] - n0)*fwindow[valid]).sum()
2784 fd = ((spec2[valid]- n0)*freq[valid]*fwindow[valid]).sum()/power
2785 w = math.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum()/power)
2786 snr = (spec2.mean()-n0)/n0
2787
2788 if (snr < 1.e-20) :
2789 snr = 1.e-20
2790
2791 vec_power[ind] = power
2792 vec_fd[ind] = fd
2793 vec_w[ind] = w
2794 vec_snr[ind] = snr
2795
2796 moments = numpy.vstack((vec_snr, vec_power, vec_fd, vec_w))
2797 return moments
2798
2799 #def __DiffCoherent(self,snrth, spectra, cspectra, nProf, heights,nChan, nHei, nPairs, channels, noise, crosspairs):
2800 def __DiffCoherent(self, spectra, cspectra, dataOut, noise, snrth, coh_th, hei_th):
2801
2802 #import matplotlib.pyplot as plt
2803 nProf = dataOut.nProfiles
2804 heights = dataOut.heightList
2805 nHei = len(heights)
2806 channels = dataOut.channelList
2807 nChan = len(channels)
2808 crosspairs = dataOut.groupList
2809 nPairs = len(crosspairs)
2810 #Separar espectros incoherentes de coherentes snr > 20 dB'
2811 snr_th = 10**(snrth/10.0)
2812 my_incoh_spectra = numpy.zeros([nChan, nProf,nHei], dtype='float')
2813 my_incoh_cspectra = numpy.zeros([nPairs,nProf, nHei], dtype='complex')
2814 my_incoh_aver = numpy.zeros([nChan, nHei])
2815 my_coh_aver = numpy.zeros([nChan, nHei])
2816
2817 coh_spectra = numpy.zeros([nChan, nProf, nHei], dtype='float')
2818 coh_cspectra = numpy.zeros([nPairs, nProf, nHei], dtype='complex')
2819 coh_aver = numpy.zeros([nChan, nHei])
2820
2821 incoh_spectra = numpy.zeros([nChan, nProf, nHei], dtype='float')
2822 incoh_cspectra = numpy.zeros([nPairs, nProf, nHei], dtype='complex')
2823 incoh_aver = numpy.zeros([nChan, nHei])
2824 power = numpy.sum(spectra, axis=1)
2825
2826 if coh_th == None : coh_th = numpy.array([0.75,0.65,0.15]) # 0.65
2827 if hei_th == None : hei_th = numpy.array([60,300,650])
2828 for ic in range(nPairs):
2829 pair = crosspairs[ic]
2830 #si el SNR es mayor que el SNR threshold los datos se toman coherentes
2831 s_n0 = power[pair[0],:]/noise[pair[0]]
2832 s_n1 = power[pair[1],:]/noise[pair[1]]
2833 valid1 =(s_n0>=snr_th).nonzero()
2834 valid2 = (s_n1>=snr_th).nonzero()
2835
2836 valid1 = numpy.array(valid1[0])
2837 valid2 = numpy.array(valid2[0])
2838 valid = valid1
2839 for iv in range(len(valid2)):
2840
2841 indv = numpy.array((valid1 == valid2[iv]).nonzero())
2842 if len(indv[0]) == 0 :
2843 valid = numpy.concatenate((valid,valid2[iv]), axis=None)
2844 if len(valid)>0:
2845 my_coh_aver[pair[0],valid]=1
2846 my_coh_aver[pair[1],valid]=1
2847 # si la coherencia es mayor a la coherencia threshold los datos se toman
2848
2849 coh = numpy.squeeze(numpy.nansum(cspectra[ic,:,:], axis=0)/numpy.sqrt(numpy.nansum(spectra[pair[0],:,:], axis=0)*numpy.nansum(spectra[pair[1],:,:], axis=0)))
2850
2851 for ih in range(len(hei_th)):
2852 hvalid = (heights>hei_th[ih]).nonzero()
2853 hvalid = hvalid[0]
2854 if len(hvalid)>0:
2855 valid = (numpy.absolute(coh[hvalid])>coh_th[ih]).nonzero()
2856 valid = valid[0]
2857
2858 if len(valid)>0:
2859 my_coh_aver[pair[0],hvalid[valid]] =1
2860 my_coh_aver[pair[1],hvalid[valid]] =1
2861
2862 coh_echoes = (my_coh_aver[pair[0],:] == 1).nonzero()
2863 incoh_echoes = (my_coh_aver[pair[0],:] != 1).nonzero()
2864 incoh_echoes = incoh_echoes[0]
2865 if len(incoh_echoes) > 0:
2866 my_incoh_spectra[pair[0],:,incoh_echoes] = spectra[pair[0],:,incoh_echoes]
2867 my_incoh_spectra[pair[1],:,incoh_echoes] = spectra[pair[1],:,incoh_echoes]
2868 my_incoh_cspectra[ic,:,incoh_echoes] = cspectra[ic,:,incoh_echoes]
2869 my_incoh_aver[pair[0],incoh_echoes] = 1
2870 my_incoh_aver[pair[1],incoh_echoes] = 1
2871
2872
2873 for ic in range(nPairs):
2874 pair = crosspairs[ic]
2875
2876 valid1 =(my_coh_aver[pair[0],:]==1 ).nonzero()
2877 valid2 = (my_coh_aver[pair[1],:]==1).nonzero()
2878 valid1 = numpy.array(valid1[0])
2879 valid2 = numpy.array(valid2[0])
2880 valid = valid1
2881
2882 for iv in range(len(valid2)):
2883
2884 indv = numpy.array((valid1 == valid2[iv]).nonzero())
2885 if len(indv[0]) == 0 :
2886 valid = numpy.concatenate((valid,valid2[iv]), axis=None)
2887 valid1 =(my_coh_aver[pair[0],:] !=1 ).nonzero()
2888 valid2 = (my_coh_aver[pair[1],:] !=1).nonzero()
2889 valid1 = numpy.array(valid1[0])
2890 valid2 = numpy.array(valid2[0])
2891 incoh_echoes = valid1
2892
2893 for iv in range(len(valid2)):
2894
2895 indv = numpy.array((valid1 == valid2[iv]).nonzero())
2896 if len(indv[0]) == 0 :
2897 incoh_echoes = numpy.concatenate(( incoh_echoes,valid2[iv]), axis=None)
2898
2899 if len(valid)>0:
2900 coh_spectra[pair[0],:,valid] = spectra[pair[0],:,valid]
2901 coh_spectra[pair[1],:,valid] = spectra[pair[1],:,valid]
2902 coh_cspectra[ic,:,valid] = cspectra[ic,:,valid]
2903 coh_aver[pair[0],valid]=1
2904 coh_aver[pair[1],valid]=1
2905 if len(incoh_echoes)>0:
2906 incoh_spectra[pair[0],:,incoh_echoes] = spectra[pair[0],:,incoh_echoes]
2907 incoh_spectra[pair[1],:,incoh_echoes] = spectra[pair[1],:,incoh_echoes]
2908 incoh_cspectra[ic,:,incoh_echoes] = cspectra[ic,:,incoh_echoes]
2909 incoh_aver[pair[0],incoh_echoes]=1
2910 incoh_aver[pair[1],incoh_echoes]=1
2911
2912 return my_incoh_spectra ,my_incoh_cspectra,my_incoh_aver,my_coh_aver, incoh_spectra, coh_spectra, incoh_cspectra, coh_cspectra, incoh_aver, coh_aver
2913
2914 def __CleanCoherent(self,snrth, spectra, cspectra, coh_aver,dataOut, noise,clean_coh_echoes,index):
2915
2916 #import matplotlib.pyplot as plt
2917 nProf = dataOut.nProfiles
2918 heights = dataOut.heightList
2919 nHei = len(heights)
2920 channels = dataOut.channelList
2921 nChan = len(channels)
2922 crosspairs = dataOut.groupList
2923 nPairs = len(crosspairs)
2924
2925 absc = dataOut.abscissaList[:-1]
2926 data_param = numpy.zeros((nChan, 4, spectra.shape[2]))
2927
2928 clean_coh_spectra = spectra.copy()
2929 clean_coh_cspectra = cspectra.copy()
2930 clean_coh_aver = coh_aver.copy()
2931
2932 spwd_th=[10,6] #spwd_th[0] --> For satellites ; spwd_th[1] --> For special events like SUN.
2933 coh_th = 0.75
2934
2935 rtime0 = [6,18] # periodo sin ESF
2936 rtime1 = [10.5,13.5] # periodo con alta coherencia y alto ancho espectral (esperado): SOL.
2937
2938 time = index*5./60 # en base a 5 min de proceso
2939 if clean_coh_echoes == 1 :
2940 for ind in range(nChan):
2941 data_param[ind,:,:] = self.__calculateMoments( spectra[ind,:,:] , absc , noise[ind] )
2942
2943 spwd = data_param[:,3]
2944
2945 # SPECB_JULIA,header=anal_header,jspectra=spectra,vel=velocities,hei=heights, num_aver=1, mode_fit=0,smoothing=smoothing,jvelr=velr,jspwd=spwd,jsnr=snr,jnoise=noise,jstdvnoise=stdvnoise
2946 # para obtener spwd
2947 for ic in range(nPairs):
2948 pair = crosspairs[ic]
2949 coh = numpy.squeeze(numpy.sum(cspectra[ic,:,:], axis=1)/numpy.sqrt(numpy.sum(spectra[pair[0],:,:], axis=1)*numpy.sum(spectra[pair[1],:,:], axis=1)))
2950 for ih in range(nHei) :
2951 # Considering heights higher than 200km in order to avoid removing phenomena like EEJ.
2952 if heights[ih] >= 200 and coh_aver[pair[0],ih] == 1 and coh_aver[pair[1],ih] == 1 :
2953 # Checking coherence
2954 if (numpy.abs(coh[ih]) <= coh_th) or (time >= rtime0[0] and time <= rtime0[1]) :
2955 # Checking spectral widths
2956 if (spwd[pair[0],ih] > spwd_th[0]) or (spwd[pair[1],ih] > spwd_th[0]) :
2957 # satelite
2958 clean_coh_spectra[pair,ih,:] = 0.0
2959 clean_coh_cspectra[ic,ih,:] = 0.0
2960 clean_coh_aver[pair,ih] = 0
2961 else :
2962 if ((spwd[pair[0],ih] < spwd_th[1]) or (spwd[pair[1],ih] < spwd_th[1])) :
2963 # Especial event like sun.
2964 clean_coh_spectra[pair,ih,:] = 0.0
2965 clean_coh_cspectra[ic,ih,:] = 0.0
2966 clean_coh_aver[pair,ih] = 0
2967
2968 return clean_coh_spectra, clean_coh_cspectra, clean_coh_aver
2969
2970 isConfig = False
2971 __dataReady = False
2972 bloques = None
2973 bloque0 = None
2974
2975 def __init__(self):
2976 Operation.__init__(self)
2977 self.i=0
2978 self.isConfig = False
2979
2980 def setup(self,nChan,nProf,nHei,nBlocks):
2981 self.__dataReady = False
2982 self.bloques = numpy.zeros([2, nProf, nHei,nBlocks], dtype= complex)
2983 self.bloque0 = numpy.zeros([nChan, nProf, nHei, nBlocks])
2984
2985 def CleanRayleigh(self,dataOut,spectra,cspectra,save_drifts):
2986
2987 rfunc = cspectra.copy()
2988 n_funct = len(rfunc[0,:,0,0])
2989 val_spc = spectra*0.0
2990 val_cspc = cspectra*0.0
2991 in_sat_spectra = spectra.copy()
2992 in_sat_cspectra = cspectra.copy()
2993
2994 min_hei = 200
2995 nProf = dataOut.nProfiles
2996 heights = dataOut.heightList
2997 nHei = len(heights)
2998 channels = dataOut.channelList
2999 nChan = len(channels)
3000 crosspairs = dataOut.groupList
3001 nPairs = len(crosspairs)
3002 hval=(heights >= min_hei).nonzero()
3003 ih=hval[0]
3004 for ih in range(hval[0][0],nHei):
3005 for ifreq in range(nProf):
3006 for ii in range(n_funct):
3007
3008 func2clean = 10*numpy.log10(numpy.absolute(rfunc[:,ii,ifreq,ih]))
3009
3010 val = (numpy.isfinite(func2clean)==True).nonzero()
3011 if len(val)>0:
3012 min_val = numpy.around(numpy.amin(func2clean)-2) #> (-40)
3013 if min_val <= -40 : min_val = -40
3014 max_val = numpy.around(numpy.amax(func2clean)+2) #< 200
3015 if max_val >= 200 : max_val = 200
3016
3017 step = 1
3018 #Getting bins and the histogram
3019 x_dist = min_val + numpy.arange(1 + ((max_val-(min_val))/step))*step
3020 y_dist,binstep = numpy.histogram(func2clean,bins=range(int(min_val),int(max_val+2),step))
3021 mean = numpy.sum(x_dist * y_dist) / numpy.sum(y_dist)
3022 sigma = numpy.sqrt(numpy.sum(y_dist * (x_dist - mean)**2) / numpy.sum(y_dist))
3023 parg = [numpy.amax(y_dist),mean,sigma]
3024 try :
3025 gauss_fit, covariance = curve_fit(fit_func, x_dist, y_dist,p0=parg)
3026 mode = gauss_fit[1]
3027 stdv = gauss_fit[2]
3028 except:
3029 mode = mean
3030 stdv = sigma
3031
3032 #Removing echoes greater than mode + 3*stdv
3033 factor_stdv = 2.5
3034 noval = (abs(func2clean - mode)>=(factor_stdv*stdv)).nonzero()
3035
3036 if len(noval[0]) > 0:
3037 novall = ((func2clean - mode) >= (factor_stdv*stdv)).nonzero()
3038 cross_pairs = crosspairs[ii]
3039 #Getting coherent echoes which are removed.
3040 if len(novall[0]) > 0:
3041 val_spc[novall[0],cross_pairs[0],ifreq,ih] = 1
3042 val_spc[novall[0],cross_pairs[1],ifreq,ih] = 1
3043 val_cspc[novall[0],ii,ifreq,ih] = 1
3044 #Removing coherent from ISR data
3045 spectra[noval,cross_pairs[0],ifreq,ih] = numpy.nan
3046 spectra[noval,cross_pairs[1],ifreq,ih] = numpy.nan
3047 cspectra[noval,ii,ifreq,ih] = numpy.nan
3048 #
3049 #no sale es para savedrifts >2
3050 ''' channels = dataOut.channelList
3051 cross_pairs = dataOut.groupList
3052 #print("OUT NOVALL 2")
3053
3054 vcross0 = (cross_pairs[0] == channels[ii]).nonzero()
3055 vcross1 = (cross_pairs[1] == channels[ii]).nonzero()
3056 vcross = numpy.concatenate((vcross0,vcross1),axis=None)
3057 #print('vcros =', vcross)
3058
3059 #Getting coherent echoes which are removed.
3060 if len(novall) > 0:
3061 #val_spc[novall,ii,ifreq,ih] = 1
3062 val_spc[ii,ifreq,ih,novall] = 1
3063 if len(vcross) > 0:
3064 val_cspc[vcross,ifreq,ih,novall] = 1
3065
3066 #Removing coherent from ISR data.
3067 self.bloque0[ii,ifreq,ih,noval] = numpy.nan
3068 if len(vcross) > 0:
3069 self.bloques[vcross,ifreq,ih,noval] = numpy.nan
3070 '''
3071 #Getting average of the spectra and cross-spectra from incoherent echoes.
3072
3073 out_spectra = numpy.zeros([nChan,nProf,nHei], dtype=float) #+numpy.nan
3074 out_cspectra = numpy.zeros([nPairs,nProf,nHei], dtype=complex) #+numpy.nan
3075 for ih in range(nHei):
3076 for ifreq in range(nProf):
3077 for ich in range(nChan):
3078 tmp = spectra[:,ich,ifreq,ih]
3079 valid = (numpy.isfinite(tmp[:])==True).nonzero()
3080 if len(valid[0]) >0 :
3081 out_spectra[ich,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
3082
3083 for icr in range(nPairs):
3084 tmp = numpy.squeeze(cspectra[:,icr,ifreq,ih])
3085 valid = (numpy.isfinite(tmp)==True).nonzero()
3086 if len(valid[0]) > 0:
3087 out_cspectra[icr,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
3088 #Removing fake coherent echoes (at least 4 points around the point)
3089 val_spectra = numpy.sum(val_spc,0)
3090 val_cspectra = numpy.sum(val_cspc,0)
3091
3092 val_spectra = self.REM_ISOLATED_POINTS(val_spectra,4)
3093 val_cspectra = self.REM_ISOLATED_POINTS(val_cspectra,4)
3094
3095 for i in range(nChan):
3096 for j in range(nProf):
3097 for k in range(nHei):
3098 if numpy.isfinite(val_spectra[i,j,k]) and val_spectra[i,j,k] < 1 :
3099 val_spc[:,i,j,k] = 0.0
3100 for i in range(nPairs):
3101 for j in range(nProf):
3102 for k in range(nHei):
3103 if numpy.isfinite(val_cspectra[i,j,k]) and val_cspectra[i,j,k] < 1 :
3104 val_cspc[:,i,j,k] = 0.0
3105 # val_spc = numpy.reshape(val_spc, (len(spectra[:,0,0,0]),nProf*nHei*nChan))
3106 # if numpy.isfinite(val_spectra)==str(True):
3107 # noval = (val_spectra<1).nonzero()
3108 # if len(noval) > 0:
3109 # val_spc[:,noval] = 0.0
3110 # val_spc = numpy.reshape(val_spc, (149,nChan,nProf,nHei))
3111
3112 #val_cspc = numpy.reshape(val_spc, (149,nChan*nHei*nProf))
3113 #if numpy.isfinite(val_cspectra)==str(True):
3114 # noval = (val_cspectra<1).nonzero()
3115 # if len(noval) > 0:
3116 # val_cspc[:,noval] = 0.0
3117 # val_cspc = numpy.reshape(val_cspc, (149,nChan,nProf,nHei))
3118
3119 tmp_sat_spectra = spectra.copy()
3120 tmp_sat_spectra = tmp_sat_spectra*numpy.nan
3121 tmp_sat_cspectra = cspectra.copy()
3122 tmp_sat_cspectra = tmp_sat_cspectra*numpy.nan
3123
3124 val = (val_spc > 0).nonzero()
3125 if len(val[0]) > 0:
3126 tmp_sat_spectra[val] = in_sat_spectra[val]
3127
3128 val = (val_cspc > 0).nonzero()
3129 if len(val[0]) > 0:
3130 tmp_sat_cspectra[val] = in_sat_cspectra[val]
3131
3132 #Getting average of the spectra and cross-spectra from incoherent echoes.
3133 sat_spectra = numpy.zeros((nChan,nProf,nHei), dtype=float)
3134 sat_cspectra = numpy.zeros((nPairs,nProf,nHei), dtype=complex)
3135 for ih in range(nHei):
3136 for ifreq in range(nProf):
3137 for ich in range(nChan):
3138 tmp = numpy.squeeze(tmp_sat_spectra[:,ich,ifreq,ih])
3139 valid = (numpy.isfinite(tmp)).nonzero()
3140 if len(valid[0]) > 0:
3141 sat_spectra[ich,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
3142
3143 for icr in range(nPairs):
3144 tmp = numpy.squeeze(tmp_sat_cspectra[:,icr,ifreq,ih])
3145 valid = (numpy.isfinite(tmp)).nonzero()
3146 if len(valid[0]) > 0:
3147 sat_cspectra[icr,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
3148
3149 return out_spectra, out_cspectra,sat_spectra,sat_cspectra
3150 def REM_ISOLATED_POINTS(self,array,rth):
3151 if rth == None : rth = 4
3152 num_prof = len(array[0,:,0])
3153 num_hei = len(array[0,0,:])
3154 n2d = len(array[:,0,0])
3155
3156 for ii in range(n2d) :
3157 tmp = array[ii,:,:]
3158 tmp = numpy.reshape(tmp,num_prof*num_hei)
3159 indxs1 = (numpy.isfinite(tmp)==True).nonzero()
3160 indxs2 = (tmp > 0).nonzero()
3161 indxs1 = (indxs1[0])
3162 indxs2 = indxs2[0]
3163 indxs = None
3164
3165 for iv in range(len(indxs2)):
3166 indv = numpy.array((indxs1 == indxs2[iv]).nonzero())
3167 if len(indv[0]) > 0 :
3168 indxs = numpy.concatenate((indxs,indxs2[iv]), axis=None)
3169
3170 indxs = indxs[1:]
3171 if len(indxs) < 4 :
3172 array[ii,:,:] = 0.
3173 return
3174
3175 xpos = numpy.mod(indxs ,num_hei)
3176 ypos = (indxs / num_hei)
3177 sx = numpy.argsort(xpos) # Ordering respect to "x" (time)
3178 xpos = xpos[sx]
3179 ypos = ypos[sx]
3180
3181 # *********************************** Cleaning isolated points **********************************
3182 ic = 0
3183 while True :
3184 r = numpy.sqrt(list(numpy.power((xpos[ic]-xpos),2)+ numpy.power((ypos[ic]-ypos),2)))
3185
3186 no_coh1 = (numpy.isfinite(r)==True).nonzero()
3187 no_coh2 = (r <= rth).nonzero()
3188 no_coh1 = numpy.array(no_coh1[0])
3189 no_coh2 = numpy.array(no_coh2[0])
3190 no_coh = None
3191 for iv in range(len(no_coh2)):
3192 indv = numpy.array((no_coh1 == no_coh2[iv]).nonzero())
3193 if len(indv[0]) > 0 :
3194 no_coh = numpy.concatenate((no_coh,no_coh2[iv]), axis=None)
3195 no_coh = no_coh[1:]
3196 if len(no_coh) < 4 :
3197 xpos[ic] = numpy.nan
3198 ypos[ic] = numpy.nan
3199
3200 ic = ic + 1
3201 if (ic == len(indxs)) :
3202 break
3203 indxs = (numpy.isfinite(list(xpos))==True).nonzero()
3204 if len(indxs[0]) < 4 :
3205 array[ii,:,:] = 0.
3206 return
3207
3208 xpos = xpos[indxs[0]]
3209 ypos = ypos[indxs[0]]
3210 for i in range(0,len(ypos)):
3211 ypos[i]=int(ypos[i])
3212 junk = tmp
3213 tmp = junk*0.0
3214
3215 tmp[list(xpos + (ypos*num_hei))] = junk[list(xpos + (ypos*num_hei))]
3216 array[ii,:,:] = numpy.reshape(tmp,(num_prof,num_hei))
3217
3218 return array
3219 def moments(self,doppler,yarray,npoints):
3220 ytemp = yarray
3221 val = (ytemp > 0).nonzero()
3222 val = val[0]
3223 if len(val) == 0 : val = range(npoints-1)
3224
3225 ynew = 0.5*(ytemp[val[0]]+ytemp[val[len(val)-1]])
3226 ytemp[len(ytemp):] = [ynew]
3227
3228 index = 0
3229 index = numpy.argmax(ytemp)
3230 ytemp = numpy.roll(ytemp,int(npoints/2)-1-index)
3231 ytemp = ytemp[0:npoints-1]
3232
3233 fmom = numpy.sum(doppler*ytemp)/numpy.sum(ytemp)+(index-(npoints/2-1))*numpy.abs(doppler[1]-doppler[0])
3234 smom = numpy.sum(doppler*doppler*ytemp)/numpy.sum(ytemp)
3235 return [fmom,numpy.sqrt(smom)]
3236
3237 def windowing_single_old(self,spc,x,A,B,C,D,nFFTPoints):
3238 '''
3239 Written by R. Flores
3240 '''
3241 from scipy.optimize import curve_fit,fmin
3242
3243 def gaussian(x, a, b, c, d):
3244 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
3245 return val
3246
3247 def R_gaussian(x, a, b, c):
3248 N = int(numpy.shape(x)[0])
3249 val = a * numpy.exp(-((x)*c*2*2*numpy.pi)**2 / (2))* numpy.exp(1.j*b*x*4*numpy.pi)
3250 return val
3251
3252 def T(x,N):
3253 T = 1-abs(x)/N
3254 return T
3255
3256 def R_T_spc_fun(x, a, b, c, d, nFFTPoints):
3257
3258 N = int(numpy.shape(x)[0])
3259
3260 x_max = x[-1]
3261
3262 x_pos = x[nFFTPoints:]
3263 x_neg = x[:nFFTPoints]
3264
3265 R_T_neg_1 = R_gaussian(x, a, b, c)[:nFFTPoints]*T(x_neg,-x[0])
3266 R_T_pos_1 = R_gaussian(x, a, b, c)[nFFTPoints:]*T(x_pos,x[-1])
3267 #print(T(x_pos,x[-1]),x_pos,x[-1])
3268 #print(R_T_neg_1.shape,R_T_pos_1.shape)
3269 R_T_sum_1 = R_T_pos_1 + R_T_neg_1
3270 R_T_spc_1 = numpy.fft.fft(R_T_sum_1).real
3271 R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1)
3272 max_val_1 = numpy.max(R_T_spc_1)
3273 R_T_spc_1 = R_T_spc_1*a/max_val_1
3274 print("R_T_spc_1: ", R_T_spc_1)
3275
3276 R_T_d = d*numpy.fft.fftshift(signal.unit_impulse(N))
3277 R_T_d_neg = R_T_d[:nFFTPoints]*T(x_neg,-x[0])
3278 R_T_d_pos = R_T_d[nFFTPoints:]*T(x_pos,x[-1])
3279 R_T_d_sum = R_T_d_pos + R_T_d_neg
3280 R_T_spc_3 = numpy.fft.fft(R_T_d_sum).real
3281 R_T_spc_3 = numpy.fft.fftshift(R_T_spc_3)
3282
3283 R_T_final = R_T_spc_1# + R_T_spc_3
3284
3285 return R_T_final
3286
3287 y = spc#gaussian(x, a, meanY, sigmaY) + a*0.1*numpy.random.normal(0, 1, size=len(x))
3288
3289 from scipy.stats import norm
3290 mean,std=norm.fit(spc)
3291
3292 # estimate starting values from the data
3293 print("A: ", A)
3294 a = A-D
3295 b = B
3296 c = C#numpy.std(spc) #C
3297 d = D
3298 #'''
3299 #ippSeconds = 250*20*1.e-6/3
3300
3301 #x_t = ippSeconds * (numpy.arange(nFFTPoints) - nFFTPoints / 2.)
3302
3303 #x_t = numpy.linspace(x_t[0],x_t[-1],3200)
3304 #print("x_t: ", x_t)
3305 #print("nFFTPoints: ", nFFTPoints)
3306 x_vel = numpy.linspace(x[0],x[-1],int(2*nFFTPoints))
3307 #print("x_vel: ", x_vel)
3308 #x_freq = numpy.fft.fftfreq(1600,d=ippSeconds)
3309 #x_freq = numpy.fft.fftshift(x_freq)
3310 #'''
3311 # define a least squares function to optimize
3312 import matplotlib.pyplot as plt
3313 aui = R_T_spc_fun(x_vel,a,b,c,d,nFFTPoints)
3314 print("aux_max: ", numpy.nanmax(aui))
3315 #print(dataOut.heightList[hei])
3316 plt.figure()
3317 plt.plot(x,spc,marker='*',linestyle='--')
3318 plt.plot(x,gaussian(x, a, b, c, d),color='b',marker='^',linestyle='')
3319 plt.plot(x,aui,color='k')
3320 #plt.title(dataOut.heightList[hei])
3321 plt.show()
3322
3323 def minfunc(params):
3324 #print("y.shape: ", numpy.shape(y))
3325 return sum((y-R_T_spc_fun(x_vel,params[0],params[1],params[2],params[3],nFFTPoints))**2/1)#y**2)
3326
3327 # fit
3328
3329 popt_full = fmin(minfunc,[a,b,c,d], disp=False)
3330 #print("nIter", popt_full[2])
3331 popt = popt_full#[0]
3332
3333 fun = gaussian(x, popt[0], popt[1], popt[2], popt[3])
3334 print("pop1[0]: ", popt[0])
3335 #return R_T_spc_fun(x_t,popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
3336 return fun, popt[0], popt[1], popt[2], popt[3]
3337
3338 def windowing_single(self,spc,x,A,B,C,D,nFFTPoints):
3339 '''
3340 Written by R. Flores
3341 '''
3342 from scipy.optimize import curve_fit,fmin
3343
3344 def gaussian(x, a, b, c, d):
3345 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
3346 return val
3347
3348 def R_gaussian(x, a, b, c):
3349 N = int(numpy.shape(x)[0])
3350
3351 val = (a*numpy.exp((-(1/2)*x*(x*c**2 + 2*1.j*b)))/numpy.sqrt(1/c**2))
3352
3353 return val
3354
3355 def T(x,N):
3356 T = 1-abs(x)/N
3357 return T
3358
3359 def R_T_spc_fun(x, a, id_dop, c, d, nFFTPoints):
3360
3361 N = int(numpy.shape(x)[0])
3362 b = 0
3363 x_max = x[-1]
3364
3365 x_pos = x[nFFTPoints:]
3366 x_neg = x[:nFFTPoints]
3367 R_T_neg_1 = R_gaussian(x, a, b, c)[:nFFTPoints]*T(x_neg,-x[0])
3368 R_T_pos_1 = R_gaussian(x, a, b, c)[nFFTPoints:]*T(x_pos,x[-1])
3369
3370 R_T_sum_1 = R_T_pos_1 + R_T_neg_1
3371 R_T_spc_1 = numpy.fft.fft(R_T_sum_1).real
3372 R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1)
3373 max_val_1 = numpy.max(R_T_spc_1)
3374 R_T_spc_1 = R_T_spc_1*a/max_val_1
3375 #raise NotImplementedError
3376 R_T_d = d*numpy.fft.fftshift(signal.unit_impulse(N))
3377 R_T_d_neg = R_T_d[:nFFTPoints]*T(x_neg,-x[0])
3378 R_T_d_pos = R_T_d[nFFTPoints:]*T(x_pos,x[-1])
3379 R_T_d_sum = R_T_d_pos + R_T_d_neg
3380 R_T_spc_3 = numpy.fft.fft(R_T_d_sum).real
3381 R_T_spc_3 = numpy.fft.fftshift(R_T_spc_3)
3382
3383 R_T_final = R_T_spc_1 + R_T_spc_3
3384
3385 id_dop = int(id_dop)
3386
3387 R_T_final = numpy.roll(R_T_final,-id_dop)
3388
3389 return R_T_final
3390
3391 y = spc#gaussian(x, a, meanY, sigmaY) + a*0.1*numpy.random.normal(0, 1, size=len(x))
3392
3393 from scipy.stats import norm
3394 mean,std=norm.fit(spc)
3395
3396 # estimate starting values from the data
3397 a = A-D
3398 b = B
3399 c = C#numpy.std(spc) #C
3400 d = D
3401
3402 id_dop = numpy.argmax(spc)
3403 id_dop = int(spc.shape[0]/2 - id_dop)
3404
3405 x_vel = numpy.linspace(x[0],x[-1],int(2*nFFTPoints))
3406
3407 # define a least squares function to optimize
3408
3409 def minfunc(params):
3410 #print("y.shape: ", numpy.shape(y))
3411 return sum((y-R_T_spc_fun(x_vel,params[0],params[1],params[2],params[3],nFFTPoints))**2/1)#y**2)
3412
3413 # fit
3414 popt_full = fmin(minfunc,[a,id_dop,c,d], disp=False)
3415 popt = popt_full#[0]
3416
3417 fun = gaussian(x, a, 0, popt[2], popt[3])
3418 fun = numpy.roll(fun,-int(popt[1]))
3419
3420 return fun, popt[0], popt[1], popt[2], popt[3]
3421
3422 def windowing_single_direct(self,spc_mod,x,A,B,C,D,nFFTPoints,timeInterval):
3423 '''
3424 Written by R. Flores
3425 '''
3426 from scipy.optimize import curve_fit,fmin
3427
3428 def gaussian(x, a, b, c, d):
3429 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
3430 return val
3431
3432 def R_gaussian(x, a, b, c, d):
3433 N = int(numpy.shape(x)[0])
3434 val = (a*numpy.exp(-2*c**2*x**2 + 2*x*1.j*b))*(numpy.sqrt(2*numpy.pi)*c)/((numpy.pi)) + d*signal.unit_impulse(N)*numpy.shape(x)[0]/2
3435
3436 return 2*val/numpy.shape(val)[0]
3437
3438 def T(x,N):
3439 T = 1-abs(x)/N
3440 return T
3441
3442 def R_T_spc_fun(x, a, b, c, d, nFFTPoints, timeInterval): #"x" should be time
3443
3444 #timeInterval = 2
3445 x_double = numpy.linspace(0,timeInterval,nFFTPoints)
3446 x_double_m = numpy.flip(x_double)
3447 x_double_aux = numpy.linspace(0,x_double[-2],nFFTPoints)
3448 x_double_t = numpy.concatenate((x_double_m,x_double_aux))
3449 x_double_t /= max(x_double_t)
3450
3451
3452 R_T_sum_1 = R_gaussian(x, a, b, c, d)
3453
3454 R_T_sum_1_flip = numpy.copy(numpy.flip(R_T_sum_1))
3455 R_T_sum_1_flip[-1] = R_T_sum_1_flip[0]
3456 R_T_sum_1_flip = numpy.roll(R_T_sum_1_flip,1)
3457
3458 R_T_sum_1_flip.imag *= -1
3459
3460 R_T_sum_1_total = numpy.concatenate((R_T_sum_1,R_T_sum_1_flip))
3461 R_T_sum_1_total *= x_double_t #times trian_fun
3462
3463 R_T_sum_1_total = R_T_sum_1_total[:nFFTPoints] + R_T_sum_1_total[nFFTPoints:]
3464
3465 R_T_spc_1 = numpy.fft.fft(R_T_sum_1_total).real
3466 R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1)
3467
3468 freq = numpy.fft.fftfreq(nFFTPoints, d=timeInterval/nFFTPoints)
3469
3470 freq = numpy.fft.fftshift(freq)
3471
3472 freq *= 6/2 #lambda/2
3473
3474 return R_T_spc_1
3475
3476 y = spc_mod
3477
3478 #from scipy.stats import norm
3479
3480 # estimate starting values from the data
3481
3482 a = A-D
3483 b = B
3484 c = C
3485 d = D
3486
3487 # define a least squares function to optimize
3488 import matplotlib.pyplot as plt
3489 #ippSeconds = 2
3490 t_range = numpy.linspace(0,timeInterval,nFFTPoints)
3491 #aui = R_T_spc_fun(t_range,a,b,c,d,nFFTPoints,timeInterval)
3492
3493 def minfunc(params):
3494 return sum((y-R_T_spc_fun(t_range,params[0],params[1],params[2],params[3],nFFTPoints,timeInterval))**2/1)#y**2)
3495
3496 # fit
3497 popt_full = fmin(minfunc,[a,b,c,d], disp=False)
3498 popt = popt_full
3499
3500 fun = R_T_spc_fun(t_range,popt[0],popt[1],popt[2],popt[3],nFFTPoints,timeInterval)
3501
3502 return fun, popt[0], popt[1], popt[2], popt[3]
3503
3504 # **********************************************************************************************
3505 index = 0
3506 fint = 0
3507 buffer = 0
3508 buffer2 = 0
3509 buffer3 = 0
3510 def run(self, dataOut, getSNR = True, path=None, file=None, groupList=None, filec=None,coh_th=None, hei_th=None,taver=None,Gaussian_Windowed=0):
3511 nChannels = dataOut.nChannels
3512 nHeights= dataOut.heightList.size
3513 nProf = dataOut.nProfiles
3514 if numpy.any(taver): taver=int(taver)
3515 else : taver = 15
3516 tini=time.localtime(dataOut.utctime)
3517 if (tini.tm_min % taver) == 0 and (tini.tm_sec < 5 and self.fint==0):
3518
3519 self.index = 0
3520 jspc = self.buffer
3521 jcspc = self.buffer2
3522 jnoise = self.buffer3
3523 self.buffer = dataOut.data_spc
3524 self.buffer2 = dataOut.data_cspc
3525 self.buffer3 = dataOut.noise
3526 self.fint = 1
3527 if numpy.any(jspc) :
3528 jspc= numpy.reshape(jspc,(int(len(jspc)/nChannels),nChannels,nProf,nHeights))
3529 jcspc= numpy.reshape(jcspc,(int(len(jcspc)/int(nChannels/2)),int(nChannels/2),nProf,nHeights))
3530 jnoise= numpy.reshape(jnoise,(int(len(jnoise)/nChannels),nChannels))
3531 else:
3532 dataOut.flagNoData = True
3533 return dataOut
3534 else :
3535 if (tini.tm_min % taver) == 0 : self.fint = 1
3536 else : self.fint = 0
3537 self.index += 1
3538 if numpy.any(self.buffer):
3539 self.buffer = numpy.concatenate((self.buffer,dataOut.data_spc), axis=0)
3540 self.buffer2 = numpy.concatenate((self.buffer2,dataOut.data_cspc), axis=0)
3541 self.buffer3 = numpy.concatenate((self.buffer3,dataOut.noise), axis=0)
3542 else:
3543 self.buffer = dataOut.data_spc
3544 self.buffer2 = dataOut.data_cspc
3545 self.buffer3 = dataOut.noise
3546 dataOut.flagNoData = True
3547 return dataOut
3548 if path != None:
3549 sys.path.append(path)
3550 try:
3551 self.library = importlib.import_module(file)
3552 except:
3553 pass
3554 if filec != None:
3555 self.weightf = importlib.import_module(filec)
3556 #self.weightf = importlib.import_module('weightfit')
3557
3558 #To be inserted as a parameter
3559 groupArray = numpy.array(groupList)
3560 #groupArray = numpy.array([[0,1],[2,3]])
3561 dataOut.groupList = groupArray
3562
3563 nGroups = groupArray.shape[0]
3564 nChannels = dataOut.nChannels
3565 nHeights = dataOut.heightList.size
3566
3567 #Parameters Array
3568 dataOut.data_param = None
3569 dataOut.data_paramC = None
3570
3571 #Set constants
3572 try:
3573 constants = self.library.setConstants(dataOut)
3574 dataOut.constants = constants
3575 except:
3576 pass
3577 M = dataOut.normFactor
3578 N = dataOut.nFFTPoints
3579 ippSeconds = dataOut.ippSeconds
3580 K = dataOut.nIncohInt
3581 pairsArray = numpy.array(dataOut.pairsList)
3582
3583 snrth= 20
3584 spectra = dataOut.data_spc
3585 cspectra = dataOut.data_cspc
3586 nProf = dataOut.nProfiles
3587 heights = dataOut.heightList
3588 nHei = len(heights)
3589 channels = dataOut.channelList
3590 nChan = len(channels)
3591 nIncohInt = dataOut.nIncohInt
3592 crosspairs = dataOut.groupList
3593 noise = dataOut.noise
3594 jnoise = jnoise/N
3595 noise = numpy.nansum(jnoise,axis=0)#/len(jnoise)
3596 power = numpy.sum(spectra, axis=1)
3597 nPairs = len(crosspairs)
3598 absc = dataOut.abscissaList[:-1]
3599
3600 if not self.isConfig:
3601 self.isConfig = True
3602
3603 index = tini.tm_hour*12+tini.tm_min/taver
3604 jspc = jspc/N/N
3605 jcspc = jcspc/N/N
3606 tmp_spectra,tmp_cspectra,sat_spectra,sat_cspectra = self.CleanRayleigh(dataOut,jspc,jcspc,2)
3607 jspectra = tmp_spectra*len(jspc[:,0,0,0])
3608 jcspectra = tmp_cspectra*len(jspc[:,0,0,0])
3609 my_incoh_spectra ,my_incoh_cspectra,my_incoh_aver,my_coh_aver, incoh_spectra, coh_spectra, incoh_cspectra, coh_cspectra, incoh_aver, coh_aver = self.__DiffCoherent(jspectra, jcspectra, dataOut, noise, snrth,coh_th, hei_th)
3610 clean_coh_spectra, clean_coh_cspectra, clean_coh_aver = self.__CleanCoherent(snrth, coh_spectra, coh_cspectra, coh_aver, dataOut, noise,1,index)
3611 dataOut.data_spc = incoh_spectra
3612 dataOut.data_cspc = incoh_cspectra
3613 #dataOut.data_spc = tmp_spectra
3614 #dataOut.data_cspc = tmp_cspectra
3615
3616 clean_num_aver = incoh_aver*len(jspc[:,0,0,0])
3617 coh_num_aver = clean_coh_aver*len(jspc[:,0,0,0])
3618 #clean_num_aver = (numpy.zeros([nChan, nHei])+1)*len(jspc[:,0,0,0])
3619 #coh_num_aver = numpy.zeros([nChan, nHei])*0*len(jspc[:,0,0,0])
3620 #List of possible combinations
3621 listComb = itertools.combinations(numpy.arange(groupArray.shape[1]),2)
3622 indCross = numpy.zeros(len(list(listComb)), dtype = 'int')
3623 if Gaussian_Windowed == 1:
3624 #dataOut.data_spc = jspectra
3625 '''
3626 Written by R. Flores
3627 '''
3628 print("normFactor: ", dataOut.normFactor)
3629 data_spc_aux = numpy.copy(dataOut.data_spc)#[:,0,:]
3630 data_spc_aux[:,0,:] = (data_spc_aux[:,1,:]+data_spc_aux[:,-1,:])/2
3631 #'''
3632 from scipy.signal import medfilt
3633 import matplotlib.pyplot as plt
3634 dataOut.moments = numpy.ones((dataOut.nChannels,4,dataOut.nHeights))*numpy.NAN
3635 dataOut.VelRange = dataOut.getVelRange(0)
3636 for nChannel in range(dataOut.nChannels):
3637 for hei in range(dataOut.heightList.shape[0]):
3638 #print("ipp: ", dataOut.ippSeconds)
3639 #spc = numpy.copy(dataOut.data_spc[nChannel,:,hei])
3640 spc = data_spc_aux[nChannel,:,hei]
3641 if spc.all() == 0.:
3642 print("CONTINUE")
3643 continue
3644 #print(VelRange)
3645 #print(dataOut.getFreqRange(64))
3646 #print("Hei: ", dataOut.heightList[hei])
3647
3648 spc_mod = numpy.copy(spc)
3649 spcm = medfilt(spc_mod,11)
3650 spc_max = numpy.max(spcm)
3651 dop1_x0 = dataOut.VelRange[numpy.argmax(spcm)]
3652 #D = numpy.min(spcm)
3653 D_in = (numpy.mean(spcm[:15])+numpy.mean(spcm[-15:]))/2.
3654 #print("spc_max: ", spc_max)
3655 #print("dataOut.ippSeconds: ", dataOut.ippSeconds, dataOut.timeInterval)
3656 ##fun, A, B, C, D = self.windowing_single(spc,dataOut.VelRange,spc_max,dop1_x0,abs(dop1_x0),D,dataOut.nFFTPoints)
3657 #fun, A, B, C, D = self.windowing_single(spc,dataOut.VelRange,spc_max,dop1_x0,abs(dop1_x0),D,dataOut.nFFTPoints)
3658 fun, A, B, C, D = self.windowing_single_direct(spc_mod,dataOut.VelRange,spc_max,dop1_x0,abs(dop1_x0/5),D_in,dataOut.nFFTPoints,dataOut.timeInterval)
3659
3660 dataOut.moments[nChannel,0,hei] = A
3661 dataOut.moments[nChannel,1,hei] = B
3662 dataOut.moments[nChannel,2,hei] = C
3663 dataOut.moments[nChannel,3,hei] = D
3664 '''
3665 if nChannel == 0:
3666 print(dataOut.heightList[hei])
3667 plt.figure()
3668 plt.plot(dataOut.VelRange,spc,marker='*',linestyle='--')
3669 plt.plot(dataOut.VelRange,fun)
3670 plt.title(dataOut.heightList[hei])
3671 plt.show()
3672 '''
3673 #plt.show()
3674 #'''
3675 dataOut.data_spc = jspectra
3676 print("SUCCESS")
3677 return dataOut
3678
3679 elif Gaussian_Windowed == 2: #Only to clean spc
3680 dataOut.VelRange = dataOut.getVelRange(0)
3681 return dataOut
3682 else:
3683 if getSNR:
3684 listChannels = groupArray.reshape((groupArray.size))
3685 listChannels.sort()
3686 dataOut.data_SNR = self.__getSNR(dataOut.data_spc[listChannels,:,:], noise[listChannels])
3687 if dataOut.data_paramC is None:
3688 dataOut.data_paramC = numpy.zeros((nGroups*4, nHeights,2))*numpy.nan
3689 for i in range(nGroups):
3690 coord = groupArray[i,:]
3691 #Input data array
3692 data = dataOut.data_spc[coord,:,:]/(M*N)
3693 data = data.reshape((data.shape[0]*data.shape[1],data.shape[2]))
3694
3695 #Cross Spectra data array for Covariance Matrixes
3696 ind = 0
3697 for pairs in listComb:
3698 pairsSel = numpy.array([coord[x],coord[y]])
3699 indCross[ind] = int(numpy.where(numpy.all(pairsArray == pairsSel, axis = 1))[0][0])
3700 ind += 1
3701 dataCross = dataOut.data_cspc[indCross,:,:]/(M*N)
3702 dataCross = dataCross**2
3703 nhei = nHeights
3704 poweri = numpy.sum(dataOut.data_spc[:,1:nProf-0,:],axis=1)/clean_num_aver[:,:]
3705 if i == 0 : my_noises = numpy.zeros(4,dtype=float)
3706 n0i = numpy.nanmin(poweri[0+i*2,0:nhei-0])/(nProf-1)
3707 n1i = numpy.nanmin(poweri[1+i*2,0:nhei-0])/(nProf-1)
3708 n0 = n0i
3709 n1= n1i
3710 my_noises[2*i+0] = n0
3711 my_noises[2*i+1] = n1
3712 snrth = -25.0 # -4
3713 snrth = 10**(snrth/10.0)
3714 jvelr = numpy.zeros(nHeights, dtype = 'float')
3715 #snr0 = numpy.zeros(nHeights, dtype = 'float')
3716 #snr1 = numpy.zeros(nHeights, dtype = 'float')
3717 hvalid = [0]
3718
3719 coh2 = abs(dataOut.data_cspc[i,1:nProf,:])**2/(dataOut.data_spc[0+i*2,1:nProf-0,:]*dataOut.data_spc[1+i*2,1:nProf-0,:])
3720
3721 for h in range(nHeights):
3722 smooth = clean_num_aver[i+1,h]
3723 signalpn0 = (dataOut.data_spc[i*2,1:(nProf-0),h])/smooth
3724 signalpn1 = (dataOut.data_spc[i*2+1,1:(nProf-0),h])/smooth
3725 signal0 = signalpn0-n0
3726 signal1 = signalpn1-n1
3727 snr0 = numpy.sum(signal0/n0)/(nProf-1)
3728 snr1 = numpy.sum(signal1/n1)/(nProf-1)
3729 #jmax0 = MAX(signal0,maxp0)
3730 #jmax1 = MAX(signal1,maxp1)
3731 gamma = coh2[:,h]
3732
3733 indxs = (numpy.isfinite(list(gamma))==True).nonzero()
3734
3735 if len(indxs) >0:
3736 if numpy.nanmean(gamma) > 0.07:
3737 maxp0 = numpy.argmax(signal0*gamma)
3738 maxp1 = numpy.argmax(signal1*gamma)
3739 #print('usa gamma',numpy.nanmean(gamma))
3740 else:
3741 maxp0 = numpy.argmax(signal0)
3742 maxp1 = numpy.argmax(signal1)
3743 jvelr[h] = (absc[maxp0]+absc[maxp1])/2.
3744 else: jvelr[h] = absc[0]
3745 if snr0 > 0.1 and snr1 > 0.1: hvalid = numpy.concatenate((hvalid,h), axis=None)
3746 #print(maxp0,absc[maxp0],snr0,jvelr[h])
3747
3748 if len(hvalid)> 1: fd0 = numpy.median(jvelr[hvalid[1:]])*-1
3749 else: fd0 = numpy.nan
3750 #print(fd0,hvalid)
3751 for h in range(nHeights):
3752 d = data[:,h]
3753 smooth = clean_num_aver[i+1,h] #dataOut.data_spc[:,1:nProf-0,:]
3754 signalpn0 = (dataOut.data_spc[i*2,1:(nProf-0),h])/smooth
3755 signalpn1 = (dataOut.data_spc[i*2+1,1:(nProf-0),h])/smooth
3756 signal0 = signalpn0-n0
3757 signal1 = signalpn1-n1
3758 snr0 = numpy.sum(signal0/n0)/(nProf-1)
3759 snr1 = numpy.sum(signal1/n1)/(nProf-1)
3760
3761 if snr0 > snrth and snr1 > snrth and clean_num_aver[i+1,h] > 0 :
3762 #Covariance Matrix
3763 D = numpy.diag(d**2)
3764 ind = 0
3765 for pairs in listComb:
3766 #Coordinates in Covariance Matrix
3767 x = pairs[0]
3768 y = pairs[1]
3769 #Channel Index
3770 S12 = dataCross[ind,:,h]
3771 D12 = numpy.diag(S12)
3772 #Completing Covariance Matrix with Cross Spectras
3773 D[x*N:(x+1)*N,y*N:(y+1)*N] = D12
3774 D[y*N:(y+1)*N,x*N:(x+1)*N] = D12
3775 ind += 1
3776 diagD = numpy.zeros(256)
3777
3778 #Dinv=numpy.linalg.inv(D)
3779 #L=numpy.linalg.cholesky(Dinv)
3780 try:
3781 Dinv=numpy.linalg.inv(D)
3782 L=numpy.linalg.cholesky(Dinv)
3783 except:
3784 Dinv = D*numpy.nan
3785 L= D*numpy.nan
3786 LT=L.T
3787
3788 dp = numpy.dot(LT,d)
3789
3790 #Initial values
3791 data_spc = dataOut.data_spc[coord,:,h]
3792 w = data_spc/data_spc
3793 if filec != None:
3794 w = self.weightf.weightfit(w,tini.tm_year,tini.tm_yday,index,h,i)
3795
3796 if (h>6) and (error1[3]<25):
3797 p0 = dataOut.data_param[i,:,h-1]
3798 #print('usa anterior')
3799 else:
3800 p0 = numpy.array(self.library.initialValuesFunction(data_spc*w, constants))# sin el i(data_spc, constants, i)
3801
3802 if filec != None:
3803 p0 = self.weightf.Vrfit(p0,tini.tm_year,tini.tm_yday,index,h,i)
3804 p0[3] = fd0
3805 #if index == 175 and i==1 and h>=27 and h<=35: p0[3]=30
3806 #if h >= 6 and i==1 and h<= 10: print(p0)
3807 try:
3808 #Least Squares
3809 minp,covp,infodict,mesg,ier = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants),full_output=True)
3810 #minp,covp = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants))
3811 #Chi square error
3812 error0 = numpy.sum(infodict['fvec']**2)/(2*N)
3813 #Error with Jacobian
3814 error1 = self.library.errorFunction(minp,constants,LT)
3815 #if h >= 0 and h<= 10 and i ==0: print(p0,minp,error1)
3816 #if i>=0 and h>=0: print(index,h,minp[3])
3817 # print self.__residFunction(p0,dp,LT, constants)
3818 # print infodict['fvec']
3819 # print self.__residFunction(minp,dp,LT,constants)
3820
3821 except:
3822 minp = p0*numpy.nan
3823 error0 = numpy.nan
3824 error1 = p0*numpy.nan
3825 # s_sq = (self.__residFunction(minp,dp,LT,constants)).sum()/(len(dp)-len(p0))
3826 # covp = covp*s_sq
3827 # error = []
3828 # for ip in range(len(minp)):
3829 # try:
3830 # error.append(numpy.absolute(covp[ip][ip])**0.5)
3831 # except:
3832 # error.append( 0.00 )
3833 #if i==1 and h==11 and index == 139: print(p0, minp,data_spc)
3834 else :
3835 data_spc = dataOut.data_spc[coord,:,h]
3836 p0 = numpy.array(self.library.initialValuesFunction(data_spc, constants))
3837 minp = p0*numpy.nan
3838 error0 = numpy.nan
3839 error1 = p0*numpy.nan
3840
3841 if dataOut.data_param is None:
3842 dataOut.data_param = numpy.zeros((nGroups, p0.size, nHeights))*numpy.nan
3843 dataOut.data_error = numpy.zeros((nGroups, p0.size + 1, nHeights))*numpy.nan
3844
3845 dataOut.data_error[i,:,h] = numpy.hstack((error0,error1))
3846 dataOut.data_param[i,:,h] = minp
3847
3848 for ht in range(nHeights-1) :
3849 smooth = coh_num_aver[i+1,ht] #datc[0,ht,0,beam]
3850 dataOut.data_paramC[4*i,ht,1] = smooth
3851 signalpn0 = (coh_spectra[i*2 ,1:(nProf-0),ht])/smooth #coh_spectra
3852 signalpn1 = (coh_spectra[i*2+1,1:(nProf-0),ht])/smooth
3853
3854 val0 = (signalpn0 > 0).nonzero()
3855 val0 = val0[0]
3856
3857 if len(val0) == 0 : val0_npoints = nProf
3858 else : val0_npoints = len(val0)
3859
3860 val1 = (signalpn1 > 0).nonzero()
3861 val1 = val1[0]
3862 if len(val1) == 0 : val1_npoints = nProf
3863 else : val1_npoints = len(val1)
3864
3865 dataOut.data_paramC[0+4*i,ht,0] = numpy.sum((signalpn0/val0_npoints))/n0
3866 dataOut.data_paramC[1+4*i,ht,0] = numpy.sum((signalpn1/val1_npoints))/n1
3867
3868 signal0 = (signalpn0-n0)
3869 vali = (signal0 < 0).nonzero()
3870 vali = vali[0]
3871 if len(vali) > 0 : signal0[vali] = 0
3872 signal1 = (signalpn1-n1)
3873 vali = (signal1 < 0).nonzero()
3874 vali = vali[0]
3875 if len(vali) > 0 : signal1[vali] = 0
3876 snr0 = numpy.sum(signal0/n0)/(nProf-1)
3877 snr1 = numpy.sum(signal1/n1)/(nProf-1)
3878 doppler = absc[1:]
3879 if snr0 >= snrth and snr1 >= snrth and smooth :
3880 signalpn0_n0 = signalpn0
3881 signalpn0_n0[val0] = signalpn0[val0] - n0
3882 mom0 = self.moments(doppler,signalpn0-n0,nProf)
3883
3884 signalpn1_n1 = signalpn1
3885 signalpn1_n1[val1] = signalpn1[val1] - n1
3886 mom1 = self.moments(doppler,signalpn1_n1,nProf)
3887 dataOut.data_paramC[2+4*i,ht,0] = (mom0[0]+mom1[0])/2.
3888 dataOut.data_paramC[3+4*i,ht,0] = (mom0[1]+mom1[1])/2.
3889
3890 dataOut.data_spc = jspectra
3891 if getSNR:
3892 listChannels = groupArray.reshape((groupArray.size))
3893 listChannels.sort()
3894
3895 dataOut.data_snr = self.__getSNR(dataOut.data_spc[listChannels,:,:], my_noises[listChannels])
3896 return dataOut
3897
3898 def __residFunction(self, p, dp, LT, constants):
3899
3900 fm = self.library.modelFunction(p, constants)
3901 fmp=numpy.dot(LT,fm)
3902 return dp-fmp
3903
3904 def __getSNR(self, z, noise):
3905
3906 avg = numpy.average(z, axis=1)
3907 SNR = (avg.T-noise)/noise
3908 SNR = SNR.T
3909 return SNR
3910
3911 def __chisq(self, p, chindex, hindex):
3912 #similar to Resid but calculates CHI**2
3913 [LT,d,fm]=setupLTdfm(p,chindex,hindex)
3914 dp=numpy.dot(LT,d)
3915 fmp=numpy.dot(LT,fm)
3916 chisq=numpy.dot((dp-fmp).T,(dp-fmp))
3917 return chisq
3918
3919 class WindProfiler_V0(Operation):
3920
3921 __isConfig = False
3922
3923 __initime = None
3924 __lastdatatime = None
3925 __integrationtime = None
3926
3927 __buffer = None
3928
3929 __dataReady = False
3930
3931 __firstdata = None
3932
3933 n = None
3934
3935 def __init__(self):
3936 Operation.__init__(self)
3937
3938 def __calculateCosDir(self, elev, azim):
3939 zen = (90 - elev)*numpy.pi/180
3940 azim = azim*numpy.pi/180
3941 cosDirX = numpy.sqrt((1-numpy.cos(zen)**2)/((1+numpy.tan(azim)**2)))
3942 cosDirY = numpy.sqrt(1-numpy.cos(zen)**2-cosDirX**2)
3943
3944 signX = numpy.sign(numpy.cos(azim))
3945 signY = numpy.sign(numpy.sin(azim))
3946
3947 cosDirX = numpy.copysign(cosDirX, signX)
3948 cosDirY = numpy.copysign(cosDirY, signY)
3949 return cosDirX, cosDirY
3950
3951 def __calculateAngles(self, theta_x, theta_y, azimuth):
3952
3953 dir_cosw = numpy.sqrt(1-theta_x**2-theta_y**2)
3954 zenith_arr = numpy.arccos(dir_cosw)
3955 azimuth_arr = numpy.arctan2(theta_x,theta_y) + azimuth*math.pi/180
3956
3957 dir_cosu = numpy.sin(azimuth_arr)*numpy.sin(zenith_arr)
3958 dir_cosv = numpy.cos(azimuth_arr)*numpy.sin(zenith_arr)
3959
3960 return azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw
3961
3962 def __calculateMatA(self, dir_cosu, dir_cosv, dir_cosw, horOnly):
3963
3964 if horOnly:
3965 A = numpy.c_[dir_cosu,dir_cosv]
3966 else:
3967 A = numpy.c_[dir_cosu,dir_cosv,dir_cosw]
3968 A = numpy.asmatrix(A)
3969 A1 = numpy.linalg.inv(A.transpose()*A)*A.transpose()
3970
3971 return A1
3972
3973 def __correctValues(self, heiRang, phi, velRadial, SNR):
3974 listPhi = phi.tolist()
3975 maxid = listPhi.index(max(listPhi))
3976 minid = listPhi.index(min(listPhi))
3977
3978 rango = list(range(len(phi)))
3979 # rango = numpy.delete(rango,maxid)
3980
3981 heiRang1 = heiRang*math.cos(phi[maxid])
3982 heiRangAux = heiRang*math.cos(phi[minid])
3983 indOut = (heiRang1 < heiRangAux[0]).nonzero()
3984 heiRang1 = numpy.delete(heiRang1,indOut)
3985
3986 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
3987 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
3988
3989 for i in rango:
3990 x = heiRang*math.cos(phi[i])
3991 y1 = velRadial[i,:]
3992 f1 = interpolate.interp1d(x,y1,kind = 'cubic')
3993
3994 x1 = heiRang1
3995 y11 = f1(x1)
3996
3997 y2 = SNR[i,:]
3998 f2 = interpolate.interp1d(x,y2,kind = 'cubic')
3999 y21 = f2(x1)
4000
4001 velRadial1[i,:] = y11
4002 SNR1[i,:] = y21
4003
4004 return heiRang1, velRadial1, SNR1
4005
4006 def __calculateVelUVW(self, A, velRadial):
4007
4008 #Operacion Matricial
4009 # velUVW = numpy.zeros((velRadial.shape[1],3))
4010 # for ind in range(velRadial.shape[1]):
4011 # velUVW[ind,:] = numpy.dot(A,velRadial[:,ind])
4012 # velUVW = velUVW.transpose()
4013 velUVW = numpy.zeros((A.shape[0],velRadial.shape[1]))
4014 velUVW[:,:] = numpy.dot(A,velRadial)
4015
4016
4017 return velUVW
4018
4019 # def techniqueDBS(self, velRadial0, dirCosx, disrCosy, azimuth, correct, horizontalOnly, heiRang, SNR0):
4020
4021 def techniqueDBS(self, kwargs):
4022 """
4023 Function that implements Doppler Beam Swinging (DBS) technique.
4024
4025 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
4026 Direction correction (if necessary), Ranges and SNR
4027
4028 Output: Winds estimation (Zonal, Meridional and Vertical)
4029
4030 Parameters affected: Winds, height range, SNR
4031 """
4032 velRadial0 = kwargs['velRadial']
4033 heiRang = kwargs['heightList']
4034 SNR0 = kwargs['SNR']
4035
4036 if 'dirCosx' in kwargs and 'dirCosy' in kwargs:
4037 theta_x = numpy.array(kwargs['dirCosx'])
4038 theta_y = numpy.array(kwargs['dirCosy'])
4039 else:
4040 elev = numpy.array(kwargs['elevation'])
4041 azim = numpy.array(kwargs['azimuth'])
4042 theta_x, theta_y = self.__calculateCosDir(elev, azim)
4043 azimuth = kwargs['correctAzimuth']
4044 if 'horizontalOnly' in kwargs:
4045 horizontalOnly = kwargs['horizontalOnly']
4046 else: horizontalOnly = False
4047 if 'correctFactor' in kwargs:
4048 correctFactor = kwargs['correctFactor']
4049 else: correctFactor = 1
4050 if 'channelList' in kwargs:
4051 channelList = kwargs['channelList']
4052 if len(channelList) == 2:
4053 horizontalOnly = True
4054 arrayChannel = numpy.array(channelList)
4055 param = param[arrayChannel,:,:]
4056 theta_x = theta_x[arrayChannel]
4057 theta_y = theta_y[arrayChannel]
4058
4059 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
4060 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, zenith_arr, correctFactor*velRadial0, SNR0)
4061 A = self.__calculateMatA(dir_cosu, dir_cosv, dir_cosw, horizontalOnly)
4062
4063 #Calculo de Componentes de la velocidad con DBS
4064 winds = self.__calculateVelUVW(A,velRadial1)
4065
4066 return winds, heiRang1, SNR1
4067
4068 def __calculateDistance(self, posx, posy, pairs_ccf, azimuth = None):
4069
4070 nPairs = len(pairs_ccf)
4071 posx = numpy.asarray(posx)
4072 posy = numpy.asarray(posy)
4073
4074 #Rotacion Inversa para alinear con el azimuth
4075 if azimuth!= None:
4076 azimuth = azimuth*math.pi/180
4077 posx1 = posx*math.cos(azimuth) + posy*math.sin(azimuth)
4078 posy1 = -posx*math.sin(azimuth) + posy*math.cos(azimuth)
4079 else:
4080 posx1 = posx
4081 posy1 = posy
4082
4083 #Calculo de Distancias
4084 distx = numpy.zeros(nPairs)
4085 disty = numpy.zeros(nPairs)
4086 dist = numpy.zeros(nPairs)
4087 ang = numpy.zeros(nPairs)
4088
4089 for i in range(nPairs):
4090 distx[i] = posx1[pairs_ccf[i][1]] - posx1[pairs_ccf[i][0]]
4091 disty[i] = posy1[pairs_ccf[i][1]] - posy1[pairs_ccf[i][0]]
4092 dist[i] = numpy.sqrt(distx[i]**2 + disty[i]**2)
4093 ang[i] = numpy.arctan2(disty[i],distx[i])
4094
4095 return distx, disty, dist, ang
4096 #Calculo de Matrices
4097 # nPairs = len(pairs)
4098 # ang1 = numpy.zeros((nPairs, 2, 1))
4099 # dist1 = numpy.zeros((nPairs, 2, 1))
4100 #
4101 # for j in range(nPairs):
4102 # dist1[j,0,0] = dist[pairs[j][0]]
4103 # dist1[j,1,0] = dist[pairs[j][1]]
4104 # ang1[j,0,0] = ang[pairs[j][0]]
4105 # ang1[j,1,0] = ang[pairs[j][1]]
4106 #
4107 # return distx,disty, dist1,ang1
4108
4109
4110 def __calculateVelVer(self, phase, lagTRange, _lambda):
4111
4112 Ts = lagTRange[1] - lagTRange[0]
4113 velW = -_lambda*phase/(4*math.pi*Ts)
4114
4115 return velW
4116
4117 def __calculateVelHorDir(self, dist, tau1, tau2, ang):
4118 nPairs = tau1.shape[0]
4119 nHeights = tau1.shape[1]
4120 vel = numpy.zeros((nPairs,3,nHeights))
4121 dist1 = numpy.reshape(dist, (dist.size,1))
4122
4123 angCos = numpy.cos(ang)
4124 angSin = numpy.sin(ang)
4125
4126 vel0 = dist1*tau1/(2*tau2**2)
4127 vel[:,0,:] = (vel0*angCos).sum(axis = 1)
4128 vel[:,1,:] = (vel0*angSin).sum(axis = 1)
4129
4130 ind = numpy.where(numpy.isinf(vel))
4131 vel[ind] = numpy.nan
4132
4133 return vel
4134
4135 # def __getPairsAutoCorr(self, pairsList, nChannels):
4136 #
4137 # pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan
4138 #
4139 # for l in range(len(pairsList)):
4140 # firstChannel = pairsList[l][0]
4141 # secondChannel = pairsList[l][1]
4142 #
4143 # #Obteniendo pares de Autocorrelacion
4144 # if firstChannel == secondChannel:
4145 # pairsAutoCorr[firstChannel] = int(l)
4146 #
4147 # pairsAutoCorr = pairsAutoCorr.astype(int)
4148 #
4149 # pairsCrossCorr = range(len(pairsList))
4150 # pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr)
4151 #
4152 # return pairsAutoCorr, pairsCrossCorr
4153
4154 # def techniqueSA(self, pairsSelected, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, lagTRange, correctFactor):
4155 def techniqueSA(self, kwargs):
4156
4157 """
4158 Function that implements Spaced Antenna (SA) technique.
4159
4160 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
4161 Direction correction (if necessary), Ranges and SNR
4162
4163 Output: Winds estimation (Zonal, Meridional and Vertical)
4164
4165 Parameters affected: Winds
4166 """
4167 position_x = kwargs['positionX']
4168 position_y = kwargs['positionY']
4169 azimuth = kwargs['azimuth']
4170
4171 if 'correctFactor' in kwargs:
4172 correctFactor = kwargs['correctFactor']
4173 else:
4174 correctFactor = 1
4175
4176 groupList = kwargs['groupList']
4177 pairs_ccf = groupList[1]
4178 tau = kwargs['tau']
4179 _lambda = kwargs['_lambda']
4180
4181 #Cross Correlation pairs obtained
4182 # pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairssList, nChannels)
4183 # pairsArray = numpy.array(pairsList)[pairsCrossCorr]
4184 # pairsSelArray = numpy.array(pairsSelected)
4185 # pairs = []
4186 #
4187 # #Wind estimation pairs obtained
4188 # for i in range(pairsSelArray.shape[0]/2):
4189 # ind1 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i], axis = 1))[0][0]
4190 # ind2 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i + 1], axis = 1))[0][0]
4191 # pairs.append((ind1,ind2))
4192
4193 indtau = tau.shape[0]/2
4194 tau1 = tau[:indtau,:]
4195 tau2 = tau[indtau:-1,:]
4196 # tau1 = tau1[pairs,:]
4197 # tau2 = tau2[pairs,:]
4198 phase1 = tau[-1,:]
4199
4200 #---------------------------------------------------------------------
4201 #Metodo Directo
4202 distx, disty, dist, ang = self.__calculateDistance(position_x, position_y, pairs_ccf,azimuth)
4203 winds = self.__calculateVelHorDir(dist, tau1, tau2, ang)
4204 winds = stats.nanmean(winds, axis=0)
4205 #---------------------------------------------------------------------
4206 #Metodo General
4207 # distx, disty, dist = self.calculateDistance(position_x,position_y,pairsCrossCorr, pairsList, azimuth)
4208 # #Calculo Coeficientes de Funcion de Correlacion
4209 # F,G,A,B,H = self.calculateCoef(tau1,tau2,distx,disty,n)
4210 # #Calculo de Velocidades
4211 # winds = self.calculateVelUV(F,G,A,B,H)
4212
4213 #---------------------------------------------------------------------
4214 winds[2,:] = self.__calculateVelVer(phase1, lagTRange, _lambda)
4215 winds = correctFactor*winds
4216 return winds
4217
4218 def __checkTime(self, currentTime, paramInterval, outputInterval):
4219
4220 dataTime = currentTime + paramInterval
4221 deltaTime = dataTime - self.__initime
4222
4223 if deltaTime >= outputInterval or deltaTime < 0:
4224 self.__dataReady = True
4225 return
4226
4227 def techniqueMeteors(self, arrayMeteor, meteorThresh, heightMin, heightMax):
4228 '''
4229 Function that implements winds estimation technique with detected meteors.
4230
4231 Input: Detected meteors, Minimum meteor quantity to wind estimation
4232
4233 Output: Winds estimation (Zonal and Meridional)
4234
4235 Parameters affected: Winds
4236 '''
4237 #Settings
4238 nInt = (heightMax - heightMin)/2
4239 nInt = int(nInt)
4240 winds = numpy.zeros((2,nInt))*numpy.nan
4241
4242 #Filter errors
4243 error = numpy.where(arrayMeteor[:,-1] == 0)[0]
4244 finalMeteor = arrayMeteor[error,:]
4245
4246 #Meteor Histogram
4247 finalHeights = finalMeteor[:,2]
4248 hist = numpy.histogram(finalHeights, bins = nInt, range = (heightMin,heightMax))
4249 nMeteorsPerI = hist[0]
4250 heightPerI = hist[1]
4251
4252 #Sort of meteors
4253 indSort = finalHeights.argsort()
4254 finalMeteor2 = finalMeteor[indSort,:]
4255
4256 # Calculating winds
4257 ind1 = 0
4258 ind2 = 0
4259
4260 for i in range(nInt):
4261 nMet = nMeteorsPerI[i]
4262 ind1 = ind2
4263 ind2 = ind1 + nMet
4264
4265 meteorAux = finalMeteor2[ind1:ind2,:]
4266
4267 if meteorAux.shape[0] >= meteorThresh:
4268 vel = meteorAux[:, 6]
4269 zen = meteorAux[:, 4]*numpy.pi/180
4270 azim = meteorAux[:, 3]*numpy.pi/180
4271
4272 n = numpy.cos(zen)
4273 # m = (1 - n**2)/(1 - numpy.tan(azim)**2)
4274 # l = m*numpy.tan(azim)
4275 l = numpy.sin(zen)*numpy.sin(azim)
4276 m = numpy.sin(zen)*numpy.cos(azim)
4277
4278 A = numpy.vstack((l, m)).transpose()
4279 A1 = numpy.dot(numpy.linalg.inv( numpy.dot(A.transpose(),A) ),A.transpose())
4280 windsAux = numpy.dot(A1, vel)
4281
4282 winds[0,i] = windsAux[0]
4283 winds[1,i] = windsAux[1]
4284
4285 return winds, heightPerI[:-1]
4286
4287 def techniqueNSM_SA(self, **kwargs):
4288 metArray = kwargs['metArray']
4289 heightList = kwargs['heightList']
4290 timeList = kwargs['timeList']
4291
4292 rx_location = kwargs['rx_location']
4293 groupList = kwargs['groupList']
4294 azimuth = kwargs['azimuth']
4295 dfactor = kwargs['dfactor']
4296 k = kwargs['k']
4297
4298 azimuth1, dist = self.__calculateAzimuth1(rx_location, groupList, azimuth)
4299 d = dist*dfactor
4300 #Phase calculation
4301 metArray1 = self.__getPhaseSlope(metArray, heightList, timeList)
4302
4303 metArray1[:,-2] = metArray1[:,-2]*metArray1[:,2]*1000/(k*d[metArray1[:,1].astype(int)]) #angles into velocities
4304
4305 velEst = numpy.zeros((heightList.size,2))*numpy.nan
4306 azimuth1 = azimuth1*numpy.pi/180
4307
4308 for i in range(heightList.size):
4309 h = heightList[i]
4310 indH = numpy.where((metArray1[:,2] == h)&(numpy.abs(metArray1[:,-2]) < 100))[0]
4311 metHeight = metArray1[indH,:]
4312 if metHeight.shape[0] >= 2:
4313 velAux = numpy.asmatrix(metHeight[:,-2]).T #Radial Velocities
4314 iazim = metHeight[:,1].astype(int)
4315 azimAux = numpy.asmatrix(azimuth1[iazim]).T #Azimuths
4316 A = numpy.hstack((numpy.cos(azimAux),numpy.sin(azimAux)))
4317 A = numpy.asmatrix(A)
4318 A1 = numpy.linalg.pinv(A.transpose()*A)*A.transpose()
4319 velHor = numpy.dot(A1,velAux)
4320
4321 velEst[i,:] = numpy.squeeze(velHor)
4322 return velEst
4323
4324 def __getPhaseSlope(self, metArray, heightList, timeList):
4325 meteorList = []
4326 #utctime sec1 height SNR velRad ph0 ph1 ph2 coh0 coh1 coh2
4327 #Putting back together the meteor matrix
4328 utctime = metArray[:,0]
4329 uniqueTime = numpy.unique(utctime)
4330
4331 phaseDerThresh = 0.5
4332 ippSeconds = timeList[1] - timeList[0]
4333 sec = numpy.where(timeList>1)[0][0]
4334 nPairs = metArray.shape[1] - 6
4335 nHeights = len(heightList)
4336
4337 for t in uniqueTime:
4338 metArray1 = metArray[utctime==t,:]
4339 # phaseDerThresh = numpy.pi/4 #reducir Phase thresh
4340 tmet = metArray1[:,1].astype(int)
4341 hmet = metArray1[:,2].astype(int)
4342
4343 metPhase = numpy.zeros((nPairs, heightList.size, timeList.size - 1))
4344 metPhase[:,:] = numpy.nan
4345 metPhase[:,hmet,tmet] = metArray1[:,6:].T
4346
4347 #Delete short trails
4348 metBool = ~numpy.isnan(metPhase[0,:,:])
4349 heightVect = numpy.sum(metBool, axis = 1)
4350 metBool[heightVect<sec,:] = False
4351 metPhase[:,heightVect<sec,:] = numpy.nan
4352
4353 #Derivative
4354 metDer = numpy.abs(metPhase[:,:,1:] - metPhase[:,:,:-1])
4355 phDerAux = numpy.dstack((numpy.full((nPairs,nHeights,1), False, dtype=bool),metDer > phaseDerThresh))
4356 metPhase[phDerAux] = numpy.nan
4357
4358 #--------------------------METEOR DETECTION -----------------------------------------
4359 indMet = numpy.where(numpy.any(metBool,axis=1))[0]
4360
4361 for p in numpy.arange(nPairs):
4362 phase = metPhase[p,:,:]
4363 phDer = metDer[p,:,:]
4364
4365 for h in indMet:
4366 height = heightList[h]
4367 phase1 = phase[h,:] #82
4368 phDer1 = phDer[h,:]
4369
4370 phase1[~numpy.isnan(phase1)] = numpy.unwrap(phase1[~numpy.isnan(phase1)]) #Unwrap
4371
4372 indValid = numpy.where(~numpy.isnan(phase1))[0]
4373 initMet = indValid[0]
4374 endMet = 0
4375
4376 for i in range(len(indValid)-1):
4377
4378 #Time difference
4379 inow = indValid[i]
4380 inext = indValid[i+1]
4381 idiff = inext - inow
4382 #Phase difference
4383 phDiff = numpy.abs(phase1[inext] - phase1[inow])
4384
4385 if idiff>sec or phDiff>numpy.pi/4 or inext==indValid[-1]: #End of Meteor
4386 sizeTrail = inow - initMet + 1
4387 if sizeTrail>3*sec: #Too short meteors
4388 x = numpy.arange(initMet,inow+1)*ippSeconds
4389 y = phase1[initMet:inow+1]
4390 ynnan = ~numpy.isnan(y)
4391 x = x[ynnan]
4392 y = y[ynnan]
4393 slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
4394 ylin = x*slope + intercept
4395 rsq = r_value**2
4396 if rsq > 0.5:
4397 vel = slope#*height*1000/(k*d)
4398 estAux = numpy.array([utctime,p,height, vel, rsq])
4399 meteorList.append(estAux)
4400 initMet = inext
4401 metArray2 = numpy.array(meteorList)
4402
4403 return metArray2
4404
4405 def __calculateAzimuth1(self, rx_location, pairslist, azimuth0):
4406
4407 azimuth1 = numpy.zeros(len(pairslist))
4408 dist = numpy.zeros(len(pairslist))
4409
4410 for i in range(len(rx_location)):
4411 ch0 = pairslist[i][0]
4412 ch1 = pairslist[i][1]
4413
4414 diffX = rx_location[ch0][0] - rx_location[ch1][0]
4415 diffY = rx_location[ch0][1] - rx_location[ch1][1]
4416 azimuth1[i] = numpy.arctan2(diffY,diffX)*180/numpy.pi
4417 dist[i] = numpy.sqrt(diffX**2 + diffY**2)
4418
4419 azimuth1 -= azimuth0
4420 return azimuth1, dist
4421
4422 def techniqueNSM_DBS(self, **kwargs):
4423 metArray = kwargs['metArray']
4424 heightList = kwargs['heightList']
4425 timeList = kwargs['timeList']
4426 azimuth = kwargs['azimuth']
4427 theta_x = numpy.array(kwargs['theta_x'])
4428 theta_y = numpy.array(kwargs['theta_y'])
4429
4430 utctime = metArray[:,0]
4431 cmet = metArray[:,1].astype(int)
4432 hmet = metArray[:,3].astype(int)
4433 SNRmet = metArray[:,4]
4434 vmet = metArray[:,5]
4435 spcmet = metArray[:,6]
4436
4437 nChan = numpy.max(cmet) + 1
4438 nHeights = len(heightList)
4439
4440 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
4441 hmet = heightList[hmet]
4442 h1met = hmet*numpy.cos(zenith_arr[cmet]) #Corrected heights
4443
4444 velEst = numpy.zeros((heightList.size,2))*numpy.nan
4445
4446 for i in range(nHeights - 1):
4447 hmin = heightList[i]
4448 hmax = heightList[i + 1]
4449
4450 thisH = (h1met>=hmin) & (h1met<hmax) & (cmet!=2) & (SNRmet>8) & (vmet<50) & (spcmet<10)
4451 indthisH = numpy.where(thisH)
4452
4453 if numpy.size(indthisH) > 3:
4454
4455 vel_aux = vmet[thisH]
4456 chan_aux = cmet[thisH]
4457 cosu_aux = dir_cosu[chan_aux]
4458 cosv_aux = dir_cosv[chan_aux]
4459 cosw_aux = dir_cosw[chan_aux]
4460
4461 nch = numpy.size(numpy.unique(chan_aux))
4462 if nch > 1:
4463 A = self.__calculateMatA(cosu_aux, cosv_aux, cosw_aux, True)
4464 velEst[i,:] = numpy.dot(A,vel_aux)
4465
4466 return velEst
4467
4468 def run(self, dataOut, technique, nHours=1, hmin=70, hmax=110, **kwargs):
4469
4470 param = dataOut.data_param
4471 #if dataOut.abscissaList != None:
4472 if numpy.any(dataOut.abscissaList):
4473 absc = dataOut.abscissaList[:-1]
4474 # noise = dataOut.noise
4475 heightList = dataOut.heightList
4476 SNR = dataOut.data_snr
4477
4478 if technique == 'DBS':
4479
4480 kwargs['velRadial'] = param[:,1,:] #Radial velocity
4481 kwargs['heightList'] = heightList
4482 kwargs['SNR'] = SNR
4483
4484 dataOut.data_output, dataOut.heightList, dataOut.data_snr = self.techniqueDBS(kwargs) #DBS Function
4485 dataOut.utctimeInit = dataOut.utctime
4486 dataOut.outputInterval = dataOut.paramInterval
4487
4488 elif technique == 'SA':
4489
4490 #Parameters
4491 # position_x = kwargs['positionX']
4492 # position_y = kwargs['positionY']
4493 # azimuth = kwargs['azimuth']
4494 #
4495 # if kwargs.has_key('crosspairsList'):
4496 # pairs = kwargs['crosspairsList']
4497 # else:
4498 # pairs = None
4499 #
4500 # if kwargs.has_key('correctFactor'):
4501 # correctFactor = kwargs['correctFactor']
4502 # else:
4503 # correctFactor = 1
4504
4505 # tau = dataOut.data_param
4506 # _lambda = dataOut.C/dataOut.frequency
4507 # pairsList = dataOut.groupList
4508 # nChannels = dataOut.nChannels
4509
4510 kwargs['groupList'] = dataOut.groupList
4511 kwargs['tau'] = dataOut.data_param
4512 kwargs['_lambda'] = dataOut.C/dataOut.frequency
4513 # dataOut.data_output = self.techniqueSA(pairs, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, absc, correctFactor)
4514 dataOut.data_output = self.techniqueSA(kwargs)
4515 dataOut.utctimeInit = dataOut.utctime
4516 dataOut.outputInterval = dataOut.timeInterval
4517
4518 elif technique == 'Meteors':
4519 dataOut.flagNoData = True
4520 self.__dataReady = False
4521
4522 if 'nHours' in kwargs:
4523 nHours = kwargs['nHours']
4524 else:
4525 nHours = 1
4526
4527 if 'meteorsPerBin' in kwargs:
4528 meteorThresh = kwargs['meteorsPerBin']
4529 else:
4530 meteorThresh = 6
4531
4532 if 'hmin' in kwargs:
4533 hmin = kwargs['hmin']
4534 else: hmin = 70
4535 if 'hmax' in kwargs:
4536 hmax = kwargs['hmax']
4537 else: hmax = 110
4538
4539 dataOut.outputInterval = nHours*3600
4540
4541 if self.__isConfig == False:
4542 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
4543 #Get Initial LTC time
4544 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
4545 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
4546
4547 self.__isConfig = True
4548
4549 if self.__buffer is None:
4550 self.__buffer = dataOut.data_param
4551 self.__firstdata = copy.copy(dataOut)
4552
4553 else:
4554 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
4555
4556 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
4557
4558 if self.__dataReady:
4559 dataOut.utctimeInit = self.__initime
4560
4561 self.__initime += dataOut.outputInterval #to erase time offset
4562
4563 dataOut.data_output, dataOut.heightList = self.techniqueMeteors(self.__buffer, meteorThresh, hmin, hmax)
4564 dataOut.flagNoData = False
4565 self.__buffer = None
4566
4567 elif technique == 'Meteors1':
4568 dataOut.flagNoData = True
4569 self.__dataReady = False
4570
4571 if 'nMins' in kwargs:
4572 nMins = kwargs['nMins']
4573 else: nMins = 20
4574 if 'rx_location' in kwargs:
4575 rx_location = kwargs['rx_location']
4576 else: rx_location = [(0,1),(1,1),(1,0)]
4577 if 'azimuth' in kwargs:
4578 azimuth = kwargs['azimuth']
4579 else: azimuth = 51.06
4580 if 'dfactor' in kwargs:
4581 dfactor = kwargs['dfactor']
4582 if 'mode' in kwargs:
4583 mode = kwargs['mode']
4584 if 'theta_x' in kwargs:
4585 theta_x = kwargs['theta_x']
4586 if 'theta_y' in kwargs:
4587 theta_y = kwargs['theta_y']
4588 else: mode = 'SA'
4589
4590 #Borrar luego esto
4591 if dataOut.groupList is None:
4592 dataOut.groupList = [(0,1),(0,2),(1,2)]
4593 groupList = dataOut.groupList
4594 C = 3e8
4595 freq = 50e6
4596 lamb = C/freq
4597 k = 2*numpy.pi/lamb
4598
4599 timeList = dataOut.abscissaList
4600 heightList = dataOut.heightList
4601
4602 if self.__isConfig == False:
4603 dataOut.outputInterval = nMins*60
4604 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
4605 #Get Initial LTC time
4606 initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
4607 minuteAux = initime.minute
4608 minuteNew = int(numpy.floor(minuteAux/nMins)*nMins)
4609 self.__initime = (initime.replace(minute = minuteNew, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
4610
4611 self.__isConfig = True
4612
4613 if self.__buffer is None:
4614 self.__buffer = dataOut.data_param
4615 self.__firstdata = copy.copy(dataOut)
4616
4617 else:
4618 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
4619
4620 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
4621
4622 if self.__dataReady:
4623 dataOut.utctimeInit = self.__initime
4624 self.__initime += dataOut.outputInterval #to erase time offset
4625
4626 metArray = self.__buffer
4627 if mode == 'SA':
4628 dataOut.data_output = self.techniqueNSM_SA(rx_location=rx_location, groupList=groupList, azimuth=azimuth, dfactor=dfactor, k=k,metArray=metArray, heightList=heightList,timeList=timeList)
4629 elif mode == 'DBS':
4630 dataOut.data_output = self.techniqueNSM_DBS(metArray=metArray,heightList=heightList,timeList=timeList, azimuth=azimuth, theta_x=theta_x, theta_y=theta_y)
4631 dataOut.data_output = dataOut.data_output.T
4632 dataOut.flagNoData = False
4633 self.__buffer = None
4634
4635 return
4636
4637 class WindProfiler(Operation):
4638
4639 __isConfig = False
4640
4641 __initime = None
4642 __lastdatatime = None
4643 __integrationtime = None
4644
4645 __buffer = None
4646
4647 __dataReady = False
4648
4649 __firstdata = None
4650
4651 n = None
4652
4653 def __init__(self):
4654 Operation.__init__(self)
4655
4656 def __calculateCosDir(self, elev, azim):
4657 zen = (90 - elev)*numpy.pi/180
4658 azim = azim*numpy.pi/180
4659 cosDirX = numpy.sqrt((1-numpy.cos(zen)**2)/((1+numpy.tan(azim)**2)))
4660 cosDirY = numpy.sqrt(1-numpy.cos(zen)**2-cosDirX**2)
4661
4662 signX = numpy.sign(numpy.cos(azim))
4663 signY = numpy.sign(numpy.sin(azim))
4664
4665 cosDirX = numpy.copysign(cosDirX, signX)
4666 cosDirY = numpy.copysign(cosDirY, signY)
4667 return cosDirX, cosDirY
4668
4669 def __calculateAngles(self, theta_x, theta_y, azimuth):
4670
4671 dir_cosw = numpy.sqrt(1-theta_x**2-theta_y**2)
4672 zenith_arr = numpy.arccos(dir_cosw)
4673 azimuth_arr = numpy.arctan2(theta_x,theta_y) + azimuth*math.pi/180
4674
4675 dir_cosu = numpy.sin(azimuth_arr)*numpy.sin(zenith_arr)
4676 dir_cosv = numpy.cos(azimuth_arr)*numpy.sin(zenith_arr)
4677
4678 return azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw
4679
4680 def __calculateMatA(self, dir_cosu, dir_cosv, dir_cosw, horOnly):
4681
4682 if horOnly:
4683 A = numpy.c_[dir_cosu,dir_cosv]
4684 else:
4685 A = numpy.c_[dir_cosu,dir_cosv,dir_cosw]
4686 A = numpy.asmatrix(A)
4687 A1 = numpy.linalg.inv(A.transpose()*A)*A.transpose()
4688
4689 return A1
4690
4691 def __correctValues(self, heiRang, phi, velRadial, SNR):
4692 listPhi = phi.tolist()
4693 maxid = listPhi.index(max(listPhi))
4694 minid = listPhi.index(min(listPhi))
4695
4696 rango = list(range(len(phi)))
4697 # rango = numpy.delete(rango,maxid)
4698
4699 heiRang1 = heiRang*math.cos(phi[maxid])
4700 heiRangAux = heiRang*math.cos(phi[minid])
4701 indOut = (heiRang1 < heiRangAux[0]).nonzero()
4702 heiRang1 = numpy.delete(heiRang1,indOut)
4703
4704 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
4705 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
4706
4707 for i in rango:
4708 x = heiRang*math.cos(phi[i])
4709 y1 = velRadial[i,:]
4710 f1 = interpolate.interp1d(x,y1,kind = 'cubic')
4711
4712 x1 = heiRang1
4713 y11 = f1(x1)
4714
4715 y2 = SNR[i,:]
4716 f2 = interpolate.interp1d(x,y2,kind = 'cubic')
4717 y21 = f2(x1)
4718
4719 velRadial1[i,:] = y11
4720 SNR1[i,:] = y21
4721
4722 return heiRang1, velRadial1, SNR1
4723
4724 def __calculateVelUVW(self, A, velRadial):
4725
4726 #Operacion Matricial
4727 # velUVW = numpy.zeros((velRadial.shape[1],3))
4728 # for ind in range(velRadial.shape[1]):
4729 # velUVW[ind,:] = numpy.dot(A,velRadial[:,ind])
4730 # velUVW = velUVW.transpose()
4731 velUVW = numpy.zeros((A.shape[0],velRadial.shape[1]))
4732 velUVW[:,:] = numpy.dot(A,velRadial)
4733
4734
4735 return velUVW
4736
4737 # def techniqueDBS(self, velRadial0, dirCosx, disrCosy, azimuth, correct, horizontalOnly, heiRang, SNR0):
4738
4739 def techniqueDBS(self, kwargs):
4740 """
4741 Function that implements Doppler Beam Swinging (DBS) technique.
4742
4743 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
4744 Direction correction (if necessary), Ranges and SNR
4745
4746 Output: Winds estimation (Zonal, Meridional and Vertical)
4747
4748 Parameters affected: Winds, height range, SNR
4749 """
4750 velRadial0 = kwargs['velRadial']
4751 heiRang = kwargs['heightList']
4752 SNR0 = kwargs['SNR']
4753
4754 if 'dirCosx' in kwargs and 'dirCosy' in kwargs:
4755 theta_x = numpy.array(kwargs['dirCosx'])
4756 theta_y = numpy.array(kwargs['dirCosy'])
4757 else:
4758 elev = numpy.array(kwargs['elevation'])
4759 azim = numpy.array(kwargs['azimuth'])
4760 theta_x, theta_y = self.__calculateCosDir(elev, azim)
4761 azimuth = kwargs['correctAzimuth']
4762 if 'horizontalOnly' in kwargs:
4763 horizontalOnly = kwargs['horizontalOnly']
4764 else: horizontalOnly = False
4765 if 'correctFactor' in kwargs:
4766 correctFactor = kwargs['correctFactor']
4767 else: correctFactor = 1
4768 if 'channelList' in kwargs:
4769 channelList = kwargs['channelList']
4770 if len(channelList) == 2:
4771 horizontalOnly = True
4772 arrayChannel = numpy.array(channelList)
4773 param = param[arrayChannel,:,:]
4774 theta_x = theta_x[arrayChannel]
4775 theta_y = theta_y[arrayChannel]
4776
4777 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
4778 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, zenith_arr, correctFactor*velRadial0, SNR0)
4779 A = self.__calculateMatA(dir_cosu, dir_cosv, dir_cosw, horizontalOnly)
4780
4781 #Calculo de Componentes de la velocidad con DBS
4782 winds = self.__calculateVelUVW(A,velRadial1)
4783
4784 return winds, heiRang1, SNR1
4785
4786 def __calculateDistance(self, posx, posy, pairs_ccf, azimuth = None):
4787
4788 nPairs = len(pairs_ccf)
4789 posx = numpy.asarray(posx)
4790 posy = numpy.asarray(posy)
4791
4792 #Rotacion Inversa para alinear con el azimuth
4793 if azimuth!= None:
4794 azimuth = azimuth*math.pi/180
4795 posx1 = posx*math.cos(azimuth) + posy*math.sin(azimuth)
4796 posy1 = -posx*math.sin(azimuth) + posy*math.cos(azimuth)
4797 else:
4798 posx1 = posx
4799 posy1 = posy
4800
4801 #Calculo de Distancias
4802 distx = numpy.zeros(nPairs)
4803 disty = numpy.zeros(nPairs)
4804 dist = numpy.zeros(nPairs)
4805 ang = numpy.zeros(nPairs)
4806
4807 for i in range(nPairs):
4808 distx[i] = posx1[pairs_ccf[i][1]] - posx1[pairs_ccf[i][0]]
4809 disty[i] = posy1[pairs_ccf[i][1]] - posy1[pairs_ccf[i][0]]
4810 dist[i] = numpy.sqrt(distx[i]**2 + disty[i]**2)
4811 ang[i] = numpy.arctan2(disty[i],distx[i])
4812
4813 return distx, disty, dist, ang
4814 #Calculo de Matrices
4815 # nPairs = len(pairs)
4816 # ang1 = numpy.zeros((nPairs, 2, 1))
4817 # dist1 = numpy.zeros((nPairs, 2, 1))
4818 #
4819 # for j in range(nPairs):
4820 # dist1[j,0,0] = dist[pairs[j][0]]
4821 # dist1[j,1,0] = dist[pairs[j][1]]
4822 # ang1[j,0,0] = ang[pairs[j][0]]
4823 # ang1[j,1,0] = ang[pairs[j][1]]
4824 #
4825 # return distx,disty, dist1,ang1
4826
4827
4828 def __calculateVelVer(self, phase, lagTRange, _lambda):
4829
4830 Ts = lagTRange[1] - lagTRange[0]
4831 velW = -_lambda*phase/(4*math.pi*Ts)
4832
4833 return velW
4834
4835 def __calculateVelHorDir(self, dist, tau1, tau2, ang):
4836 nPairs = tau1.shape[0]
4837 nHeights = tau1.shape[1]
4838 vel = numpy.zeros((nPairs,3,nHeights))
4839 dist1 = numpy.reshape(dist, (dist.size,1))
4840
4841 angCos = numpy.cos(ang)
4842 angSin = numpy.sin(ang)
4843
4844 vel0 = dist1*tau1/(2*tau2**2)
4845 vel[:,0,:] = (vel0*angCos).sum(axis = 1)
4846 vel[:,1,:] = (vel0*angSin).sum(axis = 1)
4847
4848 ind = numpy.where(numpy.isinf(vel))
4849 vel[ind] = numpy.nan
4850
4851 return vel
4852
4853 # def __getPairsAutoCorr(self, pairsList, nChannels):
4854 #
4855 # pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan
4856 #
4857 # for l in range(len(pairsList)):
4858 # firstChannel = pairsList[l][0]
4859 # secondChannel = pairsList[l][1]
4860 #
4861 # #Obteniendo pares de Autocorrelacion
4862 # if firstChannel == secondChannel:
4863 # pairsAutoCorr[firstChannel] = int(l)
4864 #
4865 # pairsAutoCorr = pairsAutoCorr.astype(int)
4866 #
4867 # pairsCrossCorr = range(len(pairsList))
4868 # pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr)
4869 #
4870 # return pairsAutoCorr, pairsCrossCorr
4871
4872 # def techniqueSA(self, pairsSelected, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, lagTRange, correctFactor):
4873 def techniqueSA(self, kwargs):
4874
4875 """
4876 Function that implements Spaced Antenna (SA) technique.
4877
4878 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
4879 Direction correction (if necessary), Ranges and SNR
4880
4881 Output: Winds estimation (Zonal, Meridional and Vertical)
4882
4883 Parameters affected: Winds
4884 """
4885 position_x = kwargs['positionX']
4886 position_y = kwargs['positionY']
4887 azimuth = kwargs['azimuth']
4888
4889 if 'correctFactor' in kwargs:
4890 correctFactor = kwargs['correctFactor']
4891 else:
4892 correctFactor = 1
4893
4894 groupList = kwargs['groupList']
4895 pairs_ccf = groupList[1]
4896 tau = kwargs['tau']
4897 _lambda = kwargs['_lambda']
4898
4899 #Cross Correlation pairs obtained
4900 # pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairssList, nChannels)
4901 # pairsArray = numpy.array(pairsList)[pairsCrossCorr]
4902 # pairsSelArray = numpy.array(pairsSelected)
4903 # pairs = []
4904 #
4905 # #Wind estimation pairs obtained
4906 # for i in range(pairsSelArray.shape[0]/2):
4907 # ind1 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i], axis = 1))[0][0]
4908 # ind2 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i + 1], axis = 1))[0][0]
4909 # pairs.append((ind1,ind2))
4910
4911 indtau = tau.shape[0]/2
4912 tau1 = tau[:indtau,:]
4913 tau2 = tau[indtau:-1,:]
4914 # tau1 = tau1[pairs,:]
4915 # tau2 = tau2[pairs,:]
4916 phase1 = tau[-1,:]
4917
4918 #---------------------------------------------------------------------
4919 #Metodo Directo
4920 distx, disty, dist, ang = self.__calculateDistance(position_x, position_y, pairs_ccf,azimuth)
4921 winds = self.__calculateVelHorDir(dist, tau1, tau2, ang)
4922 winds = stats.nanmean(winds, axis=0)
4923 #---------------------------------------------------------------------
4924 #Metodo General
4925 # distx, disty, dist = self.calculateDistance(position_x,position_y,pairsCrossCorr, pairsList, azimuth)
4926 # #Calculo Coeficientes de Funcion de Correlacion
4927 # F,G,A,B,H = self.calculateCoef(tau1,tau2,distx,disty,n)
4928 # #Calculo de Velocidades
4929 # winds = self.calculateVelUV(F,G,A,B,H)
4930
4931 #---------------------------------------------------------------------
4932 winds[2,:] = self.__calculateVelVer(phase1, lagTRange, _lambda)
4933 winds = correctFactor*winds
4934 return winds
4935
4936 def __checkTime(self, currentTime, paramInterval, outputInterval):
4937
4938 dataTime = currentTime + paramInterval
4939 deltaTime = dataTime - self.__initime
4940
4941 if deltaTime >= outputInterval or deltaTime < 0:
4942 self.__dataReady = True
4943 return
4944
4945 def techniqueMeteors(self, arrayMeteor, meteorThresh, heightMin, heightMax):
4946 '''
4947 Function that implements winds estimation technique with detected meteors.
4948
4949 Input: Detected meteors, Minimum meteor quantity to wind estimation
4950
4951 Output: Winds estimation (Zonal and Meridional)
4952
4953 Parameters affected: Winds
4954 '''
4955 #Settings
4956 nInt = (heightMax - heightMin)/2
4957 nInt = int(nInt)
4958 winds = numpy.zeros((2,nInt))*numpy.nan
4959
4960 #Filter errors
4961 error = numpy.where(arrayMeteor[:,-1] == 0)[0]
4962 finalMeteor = arrayMeteor[error,:]
4963
4964 #Meteor Histogram
4965 finalHeights = finalMeteor[:,2]
4966 hist = numpy.histogram(finalHeights, bins = nInt, range = (heightMin,heightMax))
4967 nMeteorsPerI = hist[0]
4968 heightPerI = hist[1]
4969
4970 #Sort of meteors
4971 indSort = finalHeights.argsort()
4972 finalMeteor2 = finalMeteor[indSort,:]
4973
4974 # Calculating winds
4975 ind1 = 0
4976 ind2 = 0
4977
4978 for i in range(nInt):
4979 nMet = nMeteorsPerI[i]
4980 ind1 = ind2
4981 ind2 = ind1 + nMet
4982
4983 meteorAux = finalMeteor2[ind1:ind2,:]
4984
4985 if meteorAux.shape[0] >= meteorThresh:
4986 vel = meteorAux[:, 6]
4987 zen = meteorAux[:, 4]*numpy.pi/180
4988 azim = meteorAux[:, 3]*numpy.pi/180
4989
4990 n = numpy.cos(zen)
4991 # m = (1 - n**2)/(1 - numpy.tan(azim)**2)
4992 # l = m*numpy.tan(azim)
4993 l = numpy.sin(zen)*numpy.sin(azim)
4994 m = numpy.sin(zen)*numpy.cos(azim)
4995
4996 A = numpy.vstack((l, m)).transpose()
4997 A1 = numpy.dot(numpy.linalg.inv( numpy.dot(A.transpose(),A) ),A.transpose())
4998 windsAux = numpy.dot(A1, vel)
4999
5000 winds[0,i] = windsAux[0]
5001 winds[1,i] = windsAux[1]
5002
5003 return winds, heightPerI[:-1]
5004
5005 def techniqueNSM_SA(self, **kwargs):
5006 metArray = kwargs['metArray']
5007 heightList = kwargs['heightList']
5008 timeList = kwargs['timeList']
5009
5010 rx_location = kwargs['rx_location']
5011 groupList = kwargs['groupList']
5012 azimuth = kwargs['azimuth']
5013 dfactor = kwargs['dfactor']
5014 k = kwargs['k']
5015
5016 azimuth1, dist = self.__calculateAzimuth1(rx_location, groupList, azimuth)
5017 d = dist*dfactor
5018 #Phase calculation
5019 metArray1 = self.__getPhaseSlope(metArray, heightList, timeList)
5020
5021 metArray1[:,-2] = metArray1[:,-2]*metArray1[:,2]*1000/(k*d[metArray1[:,1].astype(int)]) #angles into velocities
5022
5023 velEst = numpy.zeros((heightList.size,2))*numpy.nan
5024 azimuth1 = azimuth1*numpy.pi/180
5025
5026 for i in range(heightList.size):
5027 h = heightList[i]
5028 indH = numpy.where((metArray1[:,2] == h)&(numpy.abs(metArray1[:,-2]) < 100))[0]
5029 metHeight = metArray1[indH,:]
5030 if metHeight.shape[0] >= 2:
5031 velAux = numpy.asmatrix(metHeight[:,-2]).T #Radial Velocities
5032 iazim = metHeight[:,1].astype(int)
5033 azimAux = numpy.asmatrix(azimuth1[iazim]).T #Azimuths
5034 A = numpy.hstack((numpy.cos(azimAux),numpy.sin(azimAux)))
5035 A = numpy.asmatrix(A)
5036 A1 = numpy.linalg.pinv(A.transpose()*A)*A.transpose()
5037 velHor = numpy.dot(A1,velAux)
5038
5039 velEst[i,:] = numpy.squeeze(velHor)
5040 return velEst
5041
5042 def __getPhaseSlope(self, metArray, heightList, timeList):
5043 meteorList = []
5044 #utctime sec1 height SNR velRad ph0 ph1 ph2 coh0 coh1 coh2
5045 #Putting back together the meteor matrix
5046 utctime = metArray[:,0]
5047 uniqueTime = numpy.unique(utctime)
5048
5049 phaseDerThresh = 0.5
5050 ippSeconds = timeList[1] - timeList[0]
5051 sec = numpy.where(timeList>1)[0][0]
5052 nPairs = metArray.shape[1] - 6
5053 nHeights = len(heightList)
5054
5055 for t in uniqueTime:
5056 metArray1 = metArray[utctime==t,:]
5057 # phaseDerThresh = numpy.pi/4 #reducir Phase thresh
5058 tmet = metArray1[:,1].astype(int)
5059 hmet = metArray1[:,2].astype(int)
5060
5061 metPhase = numpy.zeros((nPairs, heightList.size, timeList.size - 1))
5062 metPhase[:,:] = numpy.nan
5063 metPhase[:,hmet,tmet] = metArray1[:,6:].T
5064
5065 #Delete short trails
5066 metBool = ~numpy.isnan(metPhase[0,:,:])
5067 heightVect = numpy.sum(metBool, axis = 1)
5068 metBool[heightVect<sec,:] = False
5069 metPhase[:,heightVect<sec,:] = numpy.nan
5070
5071 #Derivative
5072 metDer = numpy.abs(metPhase[:,:,1:] - metPhase[:,:,:-1])
5073 phDerAux = numpy.dstack((numpy.full((nPairs,nHeights,1), False, dtype=bool),metDer > phaseDerThresh))
5074 metPhase[phDerAux] = numpy.nan
5075
5076 #--------------------------METEOR DETECTION -----------------------------------------
5077 indMet = numpy.where(numpy.any(metBool,axis=1))[0]
5078
5079 for p in numpy.arange(nPairs):
5080 phase = metPhase[p,:,:]
5081 phDer = metDer[p,:,:]
5082
5083 for h in indMet:
5084 height = heightList[h]
5085 phase1 = phase[h,:] #82
5086 phDer1 = phDer[h,:]
5087
5088 phase1[~numpy.isnan(phase1)] = numpy.unwrap(phase1[~numpy.isnan(phase1)]) #Unwrap
5089
5090 indValid = numpy.where(~numpy.isnan(phase1))[0]
5091 initMet = indValid[0]
5092 endMet = 0
5093
5094 for i in range(len(indValid)-1):
5095
5096 #Time difference
5097 inow = indValid[i]
5098 inext = indValid[i+1]
5099 idiff = inext - inow
5100 #Phase difference
5101 phDiff = numpy.abs(phase1[inext] - phase1[inow])
5102
5103 if idiff>sec or phDiff>numpy.pi/4 or inext==indValid[-1]: #End of Meteor
5104 sizeTrail = inow - initMet + 1
5105 if sizeTrail>3*sec: #Too short meteors
5106 x = numpy.arange(initMet,inow+1)*ippSeconds
5107 y = phase1[initMet:inow+1]
5108 ynnan = ~numpy.isnan(y)
5109 x = x[ynnan]
5110 y = y[ynnan]
5111 slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
5112 ylin = x*slope + intercept
5113 rsq = r_value**2
5114 if rsq > 0.5:
5115 vel = slope#*height*1000/(k*d)
5116 estAux = numpy.array([utctime,p,height, vel, rsq])
5117 meteorList.append(estAux)
5118 initMet = inext
5119 metArray2 = numpy.array(meteorList)
5120
5121 return metArray2
5122
5123 def __calculateAzimuth1(self, rx_location, pairslist, azimuth0):
5124
5125 azimuth1 = numpy.zeros(len(pairslist))
5126 dist = numpy.zeros(len(pairslist))
5127
5128 for i in range(len(rx_location)):
5129 ch0 = pairslist[i][0]
5130 ch1 = pairslist[i][1]
5131
5132 diffX = rx_location[ch0][0] - rx_location[ch1][0]
5133 diffY = rx_location[ch0][1] - rx_location[ch1][1]
5134 azimuth1[i] = numpy.arctan2(diffY,diffX)*180/numpy.pi
5135 dist[i] = numpy.sqrt(diffX**2 + diffY**2)
5136
5137 azimuth1 -= azimuth0
5138 return azimuth1, dist
5139
5140 def techniqueNSM_DBS(self, **kwargs):
5141 metArray = kwargs['metArray']
5142 heightList = kwargs['heightList']
5143 timeList = kwargs['timeList']
5144 azimuth = kwargs['azimuth']
5145 theta_x = numpy.array(kwargs['theta_x'])
5146 theta_y = numpy.array(kwargs['theta_y'])
5147
5148 utctime = metArray[:,0]
5149 cmet = metArray[:,1].astype(int)
5150 hmet = metArray[:,3].astype(int)
5151 SNRmet = metArray[:,4]
5152 vmet = metArray[:,5]
5153 spcmet = metArray[:,6]
5154
5155 nChan = numpy.max(cmet) + 1
5156 nHeights = len(heightList)
5157
5158 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
5159 hmet = heightList[hmet]
5160 h1met = hmet*numpy.cos(zenith_arr[cmet]) #Corrected heights
5161
5162 velEst = numpy.zeros((heightList.size,2))*numpy.nan
5163
5164 for i in range(nHeights - 1):
5165 hmin = heightList[i]
5166 hmax = heightList[i + 1]
5167
5168 thisH = (h1met>=hmin) & (h1met<hmax) & (cmet!=2) & (SNRmet>8) & (vmet<50) & (spcmet<10)
5169 indthisH = numpy.where(thisH)
5170
5171 if numpy.size(indthisH) > 3:
5172
5173 vel_aux = vmet[thisH]
5174 chan_aux = cmet[thisH]
5175 cosu_aux = dir_cosu[chan_aux]
5176 cosv_aux = dir_cosv[chan_aux]
5177 cosw_aux = dir_cosw[chan_aux]
5178
5179 nch = numpy.size(numpy.unique(chan_aux))
5180 if nch > 1:
5181 A = self.__calculateMatA(cosu_aux, cosv_aux, cosw_aux, True)
5182 velEst[i,:] = numpy.dot(A,vel_aux)
5183
5184 return velEst
5185
5186 def run(self, dataOut, technique, nHours=1, hmin=70, hmax=110, **kwargs):
5187
5188 param = dataOut.moments
5189 #param = dataOut.data_param
5190 #if dataOut.abscissaList != None:
5191 if numpy.any(dataOut.abscissaList) :
5192 absc = dataOut.abscissaList[:-1]
5193 # noise = dataOut.noise
5194 heightList = dataOut.heightList
5195 SNR = dataOut.data_snr
5196
5197 if technique == 'DBS':
5198
5199 kwargs['velRadial'] = param[:,1,:] #Radial velocity
5200 kwargs['heightList'] = heightList
5201 kwargs['SNR'] = SNR
5202
5203 dataOut.data_output, dataOut.heightList, dataOut.data_snr = self.techniqueDBS(kwargs) #DBS Function
5204 dataOut.utctimeInit = dataOut.utctime
5205 dataOut.outputInterval = dataOut.paramInterval
5206
5207 elif technique == 'SA':
5208
5209 #Parameters
5210 # position_x = kwargs['positionX']
5211 # position_y = kwargs['positionY']
5212 # azimuth = kwargs['azimuth']
5213 #
5214 # if kwargs.has_key('crosspairsList'):
5215 # pairs = kwargs['crosspairsList']
5216 # else:
5217 # pairs = None
5218 #
5219 # if kwargs.has_key('correctFactor'):
5220 # correctFactor = kwargs['correctFactor']
5221 # else:
5222 # correctFactor = 1
5223
5224 # tau = dataOut.data_param
5225 # _lambda = dataOut.C/dataOut.frequency
5226 # pairsList = dataOut.groupList
5227 # nChannels = dataOut.nChannels
5228
5229 kwargs['groupList'] = dataOut.groupList
5230 kwargs['tau'] = dataOut.data_param
5231 kwargs['_lambda'] = dataOut.C/dataOut.frequency
5232 # dataOut.data_output = self.techniqueSA(pairs, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, absc, correctFactor)
5233 dataOut.data_output = self.techniqueSA(kwargs)
5234 dataOut.utctimeInit = dataOut.utctime
5235 dataOut.outputInterval = dataOut.timeInterval
5236
5237 elif technique == 'Meteors':
5238 dataOut.flagNoData = True
5239 self.__dataReady = False
5240
5241 if 'nHours' in kwargs:
5242 nHours = kwargs['nHours']
5243 else:
5244 nHours = 1
5245
5246 if 'meteorsPerBin' in kwargs:
5247 meteorThresh = kwargs['meteorsPerBin']
5248 else:
5249 meteorThresh = 6
5250
5251 if 'hmin' in kwargs:
5252 hmin = kwargs['hmin']
5253 else: hmin = 70
5254 if 'hmax' in kwargs:
5255 hmax = kwargs['hmax']
5256 else: hmax = 110
5257
5258 dataOut.outputInterval = nHours*3600
5259
5260 if self.__isConfig == False:
5261 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
5262 #Get Initial LTC time
5263 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
5264 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
5265
5266 self.__isConfig = True
5267
5268 if self.__buffer is None:
5269 self.__buffer = dataOut.data_param
5270 self.__firstdata = copy.copy(dataOut)
5271
5272 else:
5273 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
5274
5275 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
5276
5277 if self.__dataReady:
5278 dataOut.utctimeInit = self.__initime
5279
5280 self.__initime += dataOut.outputInterval #to erase time offset
5281
5282 dataOut.data_output, dataOut.heightList = self.techniqueMeteors(self.__buffer, meteorThresh, hmin, hmax)
5283 dataOut.flagNoData = False
5284 self.__buffer = None
5285
5286 elif technique == 'Meteors1':
5287 dataOut.flagNoData = True
5288 self.__dataReady = False
5289
5290 if 'nMins' in kwargs:
5291 nMins = kwargs['nMins']
5292 else: nMins = 20
5293 if 'rx_location' in kwargs:
5294 rx_location = kwargs['rx_location']
5295 else: rx_location = [(0,1),(1,1),(1,0)]
5296 if 'azimuth' in kwargs:
5297 azimuth = kwargs['azimuth']
5298 else: azimuth = 51.06
5299 if 'dfactor' in kwargs:
5300 dfactor = kwargs['dfactor']
5301 if 'mode' in kwargs:
5302 mode = kwargs['mode']
5303 if 'theta_x' in kwargs:
5304 theta_x = kwargs['theta_x']
5305 if 'theta_y' in kwargs:
5306 theta_y = kwargs['theta_y']
5307 else: mode = 'SA'
5308
5309 #Borrar luego esto
5310 if dataOut.groupList is None:
5311 dataOut.groupList = [(0,1),(0,2),(1,2)]
5312 groupList = dataOut.groupList
5313 C = 3e8
5314 freq = 50e6
5315 lamb = C/freq
5316 k = 2*numpy.pi/lamb
5317
5318 timeList = dataOut.abscissaList
5319 heightList = dataOut.heightList
5320
5321 if self.__isConfig == False:
5322 dataOut.outputInterval = nMins*60
5323 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
5324 #Get Initial LTC time
5325 initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
5326 minuteAux = initime.minute
5327 minuteNew = int(numpy.floor(minuteAux/nMins)*nMins)
5328 self.__initime = (initime.replace(minute = minuteNew, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
5329
5330 self.__isConfig = True
5331
5332 if self.__buffer is None:
5333 self.__buffer = dataOut.data_param
5334 self.__firstdata = copy.copy(dataOut)
5335
5336 else:
5337 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
5338
5339 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
5340
5341 if self.__dataReady:
5342 dataOut.utctimeInit = self.__initime
5343 self.__initime += dataOut.outputInterval #to erase time offset
5344
5345 metArray = self.__buffer
5346 if mode == 'SA':
5347 dataOut.data_output = self.techniqueNSM_SA(rx_location=rx_location, groupList=groupList, azimuth=azimuth, dfactor=dfactor, k=k,metArray=metArray, heightList=heightList,timeList=timeList)
5348 elif mode == 'DBS':
5349 dataOut.data_output = self.techniqueNSM_DBS(metArray=metArray,heightList=heightList,timeList=timeList, azimuth=azimuth, theta_x=theta_x, theta_y=theta_y)
5350 dataOut.data_output = dataOut.data_output.T
5351 dataOut.flagNoData = False
5352 self.__buffer = None
5353 #print("ENDDD")
5354 return dataOut
5355
5356 class EWDriftsEstimation(Operation):
5357
5358 def __init__(self):
5359 Operation.__init__(self)
5360
5361 def __correctValues(self, heiRang, phi, velRadial, SNR):
5362 listPhi = phi.tolist()
5363 maxid = listPhi.index(max(listPhi))
5364 minid = listPhi.index(min(listPhi))
5365
5366 rango = list(range(len(phi)))
5367 # rango = numpy.delete(rango,maxid)
5368
5369 heiRang1 = heiRang*math.cos(phi[maxid])
5370 heiRangAux = heiRang*math.cos(phi[minid])
5371 indOut = (heiRang1 < heiRangAux[0]).nonzero()
5372 heiRang1 = numpy.delete(heiRang1,indOut)
5373
5374 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
5375 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
5376
5377 for i in rango:
5378 x = heiRang*math.cos(phi[i])
5379 y1 = velRadial[i,:]
5380 vali= (numpy.isfinite(y1)==True).nonzero()
5381 y1=y1[vali]
5382 x = x[vali]
5383 f1 = interpolate.interp1d(x,y1,kind = 'cubic',bounds_error=False)
5384
5385 #heiRang1 = x*math.cos(phi[maxid])
5386 x1 = heiRang1
5387 y11 = f1(x1)
5388
5389 y2 = SNR[i,:]
5390 #print 'snr ', y2
5391 x = heiRang*math.cos(phi[i])
5392 vali= (y2 != -1).nonzero()
5393 y2 = y2[vali]
5394 x = x[vali]
5395 #print 'snr ',y2
5396 f2 = interpolate.interp1d(x,y2,kind = 'cubic',bounds_error=False)
5397 y21 = f2(x1)
5398
5399 velRadial1[i,:] = y11
5400 SNR1[i,:] = y21
5401
5402 return heiRang1, velRadial1, SNR1
5403
5404
5405
5406 def run(self, dataOut, zenith, zenithCorrection):
5407
5408 heiRang = dataOut.heightList
5409 velRadial = dataOut.data_param[:,3,:]
5410 velRadialm = dataOut.data_param[:,2:4,:]*-1
5411
5412 rbufc=dataOut.data_paramC[:,:,0]
5413 ebufc=dataOut.data_paramC[:,:,1]
5414 SNR = dataOut.data_snr
5415 velRerr = dataOut.data_error[:,4,:]
5416 moments=numpy.vstack(([velRadialm[0,:]],[velRadialm[0,:]],[velRadialm[1,:]],[velRadialm[1,:]]))
5417 dataOut.moments=moments
5418 # Coherent
5419 smooth_wC = ebufc[0,:]
5420 p_w0C = rbufc[0,:]
5421 p_w1C = rbufc[1,:]
5422 w_wC = rbufc[2,:]*-1 #*radial_sign(radial EQ 1)
5423 t_wC = rbufc[3,:]
5424 my_nbeams = 2
5425
5426 zenith = numpy.array(zenith)
5427 zenith -= zenithCorrection
5428 zenith *= numpy.pi/180
5429 if zenithCorrection != 0 :
5430 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, numpy.abs(zenith), velRadial, SNR)
5431 else :
5432 heiRang1 = heiRang
5433 velRadial1 = velRadial
5434 SNR1 = SNR
5435
5436 alp = zenith[0]
5437 bet = zenith[1]
5438
5439 w_w = velRadial1[0,:]
5440 w_e = velRadial1[1,:]
5441 w_w_err = velRerr[0,:]
5442 w_e_err = velRerr[1,:]
5443
5444 val = (numpy.isfinite(w_w)==False).nonzero()
5445 val = val[0]
5446 bad = val
5447 if len(bad) > 0 :
5448 w_w[bad] = w_wC[bad]
5449 w_w_err[bad]= numpy.nan
5450 if my_nbeams == 2:
5451 smooth_eC=ebufc[4,:]
5452 p_e0C = rbufc[4,:]
5453 p_e1C = rbufc[5,:]
5454 w_eC = rbufc[6,:]*-1
5455 t_eC = rbufc[7,:]
5456 val = (numpy.isfinite(w_e)==False).nonzero()
5457 val = val[0]
5458 bad = val
5459 if len(bad) > 0 :
5460 w_e[bad] = w_eC[bad]
5461 w_e_err[bad]= numpy.nan
5462
5463 w = (w_w*numpy.sin(bet) - w_e*numpy.sin(alp))/(numpy.cos(alp)*numpy.sin(bet) - numpy.cos(bet)*numpy.sin(alp))
5464 u = (w_w*numpy.cos(bet) - w_e*numpy.cos(alp))/(numpy.sin(alp)*numpy.cos(bet) - numpy.sin(bet)*numpy.cos(alp))
5465
5466 w_err = numpy.sqrt((w_w_err*numpy.sin(bet))**2.+(w_e_err*numpy.sin(alp))**2.)/ numpy.absolute(numpy.cos(alp)*numpy.sin(bet)-numpy.cos(bet)*numpy.sin(alp))
5467 u_err = numpy.sqrt((w_w_err*numpy.cos(bet))**2.+(w_e_err*numpy.cos(alp))**2.)/ numpy.absolute(numpy.cos(alp)*numpy.sin(bet)-numpy.cos(bet)*numpy.sin(alp))
5468
5469 winds = numpy.vstack((w,u))
5470
5471 dataOut.heightList = heiRang1
5472 dataOut.data_output = winds
5473
5474 snr1 = 10*numpy.log10(SNR1[0])
5475 dataOut.data_snr1 = numpy.reshape(snr1,(1,snr1.shape[0]))
5476 dataOut.utctimeInit = dataOut.utctime
5477 dataOut.outputInterval = dataOut.timeInterval
5478
5479 hei_aver0 = 218
5480 jrange = 450 #900 para HA drifts
5481 deltah = 15.0 #dataOut.spacing(0)
5482 h0 = 0.0 #dataOut.first_height(0)
5483 heights = dataOut.heightList
5484 nhei = len(heights)
5485
5486 range1 = numpy.arange(nhei) * deltah + h0
5487
5488 #jhei = WHERE(range1 GE hei_aver0 , jcount)
5489 jhei = (range1 >= hei_aver0).nonzero()
5490 if len(jhei[0]) > 0 :
5491 h0_index = jhei[0][0] # Initial height for getting averages 218km
5492
5493 mynhei = 7
5494 nhei_avg = int(jrange/deltah)
5495 h_avgs = int(nhei_avg/mynhei)
5496 nhei_avg = h_avgs*(mynhei-1)+mynhei
5497
5498 navgs = numpy.zeros(mynhei,dtype='float')
5499 delta_h = numpy.zeros(mynhei,dtype='float')
5500 range_aver = numpy.zeros(mynhei,dtype='float')
5501 for ih in range( mynhei-1 ):
5502 range_aver[ih] = numpy.sum(range1[h0_index+h_avgs*ih:h0_index+h_avgs*(ih+1)-0])/h_avgs
5503 navgs[ih] = h_avgs
5504 delta_h[ih] = deltah*h_avgs
5505
5506 range_aver[mynhei-1] = numpy.sum(range1[h0_index:h0_index+6*h_avgs-0])/(6*h_avgs)
5507 navgs[mynhei-1] = 6*h_avgs
5508 delta_h[mynhei-1] = deltah*6*h_avgs
5509
5510 wA = w[h0_index:h0_index+nhei_avg-0]
5511 wA_err = w_err[h0_index:h0_index+nhei_avg-0]
5512
5513 for i in range(5) :
5514 vals = wA[i*h_avgs:(i+1)*h_avgs-0]
5515 errs = wA_err[i*h_avgs:(i+1)*h_avgs-0]
5516 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2.)
5517 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
5518 wA[6*h_avgs+i] = avg
5519 wA_err[6*h_avgs+i] = sigma
5520
5521
5522 vals = wA[0:6*h_avgs-0]
5523 errs=wA_err[0:6*h_avgs-0]
5524 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2)
5525 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
5526 wA[nhei_avg-1] = avg
5527 wA_err[nhei_avg-1] = sigma
5528
5529 wA = wA[6*h_avgs:nhei_avg-0]
5530 wA_err=wA_err[6*h_avgs:nhei_avg-0]
5531 if my_nbeams == 2 :
5532
5533 uA = u[h0_index:h0_index+nhei_avg]
5534 uA_err=u_err[h0_index:h0_index+nhei_avg]
5535
5536 for i in range(5) :
5537 vals = uA[i*h_avgs:(i+1)*h_avgs-0]
5538 errs=uA_err[i*h_avgs:(i+1)*h_avgs-0]
5539 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2.)
5540 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
5541 uA[6*h_avgs+i] = avg
5542 uA_err[6*h_avgs+i]=sigma
5543
5544 vals = uA[0:6*h_avgs-0]
5545 errs = uA_err[0:6*h_avgs-0]
5546 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2.)
5547 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
5548 uA[nhei_avg-1] = avg
5549 uA_err[nhei_avg-1] = sigma
5550 uA = uA[6*h_avgs:nhei_avg-0]
5551 uA_err = uA_err[6*h_avgs:nhei_avg-0]
5552
5553 dataOut.drifts_avg = numpy.vstack((wA,uA))
5554
5555 tini=time.localtime(dataOut.utctime)
5556 datefile= str(tini[0]).zfill(4)+str(tini[1]).zfill(2)+str(tini[2]).zfill(2)
5557 nfile = '/home/pcondor/Database/ewdriftsschain2019/jro'+datefile+'drifts_sch3.txt'
5558
5559 f1 = open(nfile,'a')
5560
5561 datedriftavg=str(tini[0])+' '+str(tini[1])+' '+str(tini[2])+' '+str(tini[3])+' '+str(tini[4])
5562 driftavgstr=str(dataOut.drifts_avg)
5563
5564 numpy.savetxt(f1,numpy.column_stack([tini[0],tini[1],tini[2],tini[3],tini[4]]),fmt='%4i')
5565 numpy.savetxt(f1,dataOut.drifts_avg,fmt='%10.2f')
5566 f1.close()
5567
5568 return dataOut
5569
5570 #--------------- Non Specular Meteor ----------------
5571
5572 class NonSpecularMeteorDetection(Operation):
5573
5574 def run(self, dataOut, mode, SNRthresh=8, phaseDerThresh=0.5, cohThresh=0.8, allData = False):
5575 data_acf = dataOut.data_pre[0]
5576 data_ccf = dataOut.data_pre[1]
5577 pairsList = dataOut.groupList[1]
5578
5579 lamb = dataOut.C/dataOut.frequency
5580 tSamp = dataOut.ippSeconds*dataOut.nCohInt
5581 paramInterval = dataOut.paramInterval
5582
5583 nChannels = data_acf.shape[0]
5584 nLags = data_acf.shape[1]
5585 nProfiles = data_acf.shape[2]
5586 nHeights = dataOut.nHeights
5587 nCohInt = dataOut.nCohInt
5588 sec = numpy.round(nProfiles/dataOut.paramInterval)
5589 heightList = dataOut.heightList
5590 ippSeconds = dataOut.ippSeconds*dataOut.nCohInt*dataOut.nAvg
5591 utctime = dataOut.utctime
5592
5593 dataOut.abscissaList = numpy.arange(0,paramInterval+ippSeconds,ippSeconds)
5594
5595 #------------------------ SNR --------------------------------------
5596 power = data_acf[:,0,:,:].real
5597 noise = numpy.zeros(nChannels)
5598 SNR = numpy.zeros(power.shape)
5599 for i in range(nChannels):
5600 noise[i] = hildebrand_sekhon(power[i,:], nCohInt)
5601 SNR[i] = (power[i]-noise[i])/noise[i]
5602 SNRm = numpy.nanmean(SNR, axis = 0)
5603 SNRdB = 10*numpy.log10(SNR)
5604
5605 if mode == 'SA':
5606 dataOut.groupList = dataOut.groupList[1]
5607 nPairs = data_ccf.shape[0]
5608 #---------------------- Coherence and Phase --------------------------
5609 phase = numpy.zeros(data_ccf[:,0,:,:].shape)
5610 # phase1 = numpy.copy(phase)
5611 coh1 = numpy.zeros(data_ccf[:,0,:,:].shape)
5612
5613 for p in range(nPairs):
5614 ch0 = pairsList[p][0]
5615 ch1 = pairsList[p][1]
5616 ccf = data_ccf[p,0,:,:]/numpy.sqrt(data_acf[ch0,0,:,:]*data_acf[ch1,0,:,:])
5617 phase[p,:,:] = ndimage.median_filter(numpy.angle(ccf), size = (5,1)) #median filter
5618 # phase1[p,:,:] = numpy.angle(ccf) #median filter
5619 coh1[p,:,:] = ndimage.median_filter(numpy.abs(ccf), 5) #median filter
5620 # coh1[p,:,:] = numpy.abs(ccf) #median filter
5621 coh = numpy.nanmax(coh1, axis = 0)
5622 # struc = numpy.ones((5,1))
5623 # coh = ndimage.morphology.grey_dilation(coh, size=(10,1))
5624 #---------------------- Radial Velocity ----------------------------
5625 phaseAux = numpy.mean(numpy.angle(data_acf[:,1,:,:]), axis = 0)
5626 velRad = phaseAux*lamb/(4*numpy.pi*tSamp)
5627
5628 if allData:
5629 boolMetFin = ~numpy.isnan(SNRm)
5630 # coh[:-1,:] = numpy.nanmean(numpy.abs(phase[:,1:,:] - phase[:,:-1,:]),axis=0)
5631 else:
5632 #------------------------ Meteor mask ---------------------------------
5633 # #SNR mask
5634 # boolMet = (SNRdB>SNRthresh)#|(~numpy.isnan(SNRdB))
5635 #
5636 # #Erase small objects
5637 # boolMet1 = self.__erase_small(boolMet, 2*sec, 5)
5638 #
5639 # auxEEJ = numpy.sum(boolMet1,axis=0)
5640 # indOver = auxEEJ>nProfiles*0.8 #Use this later
5641 # indEEJ = numpy.where(indOver)[0]
5642 # indNEEJ = numpy.where(~indOver)[0]
5643 #
5644 # boolMetFin = boolMet1
5645 #
5646 # if indEEJ.size > 0:
5647 # boolMet1[:,indEEJ] = False #Erase heights with EEJ
5648 #
5649 # boolMet2 = coh > cohThresh
5650 # boolMet2 = self.__erase_small(boolMet2, 2*sec,5)
5651 #
5652 # #Final Meteor mask
5653 # boolMetFin = boolMet1|boolMet2
5654
5655 #Coherence mask
5656 boolMet1 = coh > 0.75
5657 struc = numpy.ones((30,1))
5658 boolMet1 = ndimage.morphology.binary_dilation(boolMet1, structure=struc)
5659
5660 #Derivative mask
5661 derPhase = numpy.nanmean(numpy.abs(phase[:,1:,:] - phase[:,:-1,:]),axis=0)
5662 boolMet2 = derPhase < 0.2
5663 # boolMet2 = ndimage.morphology.binary_opening(boolMet2)
5664 # boolMet2 = ndimage.morphology.binary_closing(boolMet2, structure = numpy.ones((10,1)))
5665 boolMet2 = ndimage.median_filter(boolMet2,size=5)
5666 boolMet2 = numpy.vstack((boolMet2,numpy.full((1,nHeights), True, dtype=bool)))
5667 # #Final mask
5668 # boolMetFin = boolMet2
5669 boolMetFin = boolMet1&boolMet2
5670 # boolMetFin = ndimage.morphology.binary_dilation(boolMetFin)
5671 #Creating data_param
5672 coordMet = numpy.where(boolMetFin)
5673
5674 tmet = coordMet[0]
5675 hmet = coordMet[1]
5676
5677 data_param = numpy.zeros((tmet.size, 6 + nPairs))
5678 data_param[:,0] = utctime
5679 data_param[:,1] = tmet
5680 data_param[:,2] = hmet
5681 data_param[:,3] = SNRm[tmet,hmet]
5682 data_param[:,4] = velRad[tmet,hmet]
5683 data_param[:,5] = coh[tmet,hmet]
5684 data_param[:,6:] = phase[:,tmet,hmet].T
5685
5686 elif mode == 'DBS':
5687 dataOut.groupList = numpy.arange(nChannels)
5688
5689 #Radial Velocities
5690 phase = numpy.angle(data_acf[:,1,:,:])
5691 # phase = ndimage.median_filter(numpy.angle(data_acf[:,1,:,:]), size = (1,5,1))
5692 velRad = phase*lamb/(4*numpy.pi*tSamp)
5693
5694 #Spectral width
5695 # acf1 = ndimage.median_filter(numpy.abs(data_acf[:,1,:,:]), size = (1,5,1))
5696 # acf2 = ndimage.median_filter(numpy.abs(data_acf[:,2,:,:]), size = (1,5,1))
5697 acf1 = data_acf[:,1,:,:]
5698 acf2 = data_acf[:,2,:,:]
5699
5700 spcWidth = (lamb/(2*numpy.sqrt(6)*numpy.pi*tSamp))*numpy.sqrt(numpy.log(acf1/acf2))
5701 # velRad = ndimage.median_filter(velRad, size = (1,5,1))
5702 if allData:
5703 boolMetFin = ~numpy.isnan(SNRdB)
5704 else:
5705 #SNR
5706 boolMet1 = (SNRdB>SNRthresh) #SNR mask
5707 boolMet1 = ndimage.median_filter(boolMet1, size=(1,5,5))
5708
5709 #Radial velocity
5710 boolMet2 = numpy.abs(velRad) < 20
5711 boolMet2 = ndimage.median_filter(boolMet2, (1,5,5))
5712
5713 #Spectral Width
5714 boolMet3 = spcWidth < 30
5715 boolMet3 = ndimage.median_filter(boolMet3, (1,5,5))
5716 # boolMetFin = self.__erase_small(boolMet1, 10,5)
5717 boolMetFin = boolMet1&boolMet2&boolMet3
5718
5719 #Creating data_param
5720 coordMet = numpy.where(boolMetFin)
5721
5722 cmet = coordMet[0]
5723 tmet = coordMet[1]
5724 hmet = coordMet[2]
5725
5726 data_param = numpy.zeros((tmet.size, 7))
5727 data_param[:,0] = utctime
5728 data_param[:,1] = cmet
5729 data_param[:,2] = tmet
5730 data_param[:,3] = hmet
5731 data_param[:,4] = SNR[cmet,tmet,hmet].T
5732 data_param[:,5] = velRad[cmet,tmet,hmet].T
5733 data_param[:,6] = spcWidth[cmet,tmet,hmet].T
5734
5735 # self.dataOut.data_param = data_int
5736 if len(data_param) == 0:
5737 dataOut.flagNoData = True
5738 else:
5739 dataOut.data_param = data_param
5740
5741 def __erase_small(self, binArray, threshX, threshY):
5742 labarray, numfeat = ndimage.measurements.label(binArray)
5743 binArray1 = numpy.copy(binArray)
5744
5745 for i in range(1,numfeat + 1):
5746 auxBin = (labarray==i)
5747 auxSize = auxBin.sum()
5748
5749 x,y = numpy.where(auxBin)
5750 widthX = x.max() - x.min()
5751 widthY = y.max() - y.min()
5752
5753 #width X: 3 seg -> 12.5*3
5754 #width Y:
5755
5756 if (auxSize < 50) or (widthX < threshX) or (widthY < threshY):
5757 binArray1[auxBin] = False
5758
5759 return binArray1
5760
5761 #--------------- Specular Meteor ----------------
5762
5763 class SMDetection(Operation):
5764 '''
5765 Function DetectMeteors()
5766 Project developed with paper:
5767 HOLDSWORTH ET AL. 2004
5768
5769 Input:
5770 self.dataOut.data_pre
5771
5772 centerReceiverIndex: From the channels, which is the center receiver
5773
5774 hei_ref: Height reference for the Beacon signal extraction
5775 tauindex:
5776 predefinedPhaseShifts: Predefined phase offset for the voltge signals
5777
5778 cohDetection: Whether to user Coherent detection or not
5779 cohDet_timeStep: Coherent Detection calculation time step
5780 cohDet_thresh: Coherent Detection phase threshold to correct phases
5781
5782 noise_timeStep: Noise calculation time step
5783 noise_multiple: Noise multiple to define signal threshold
5784
5785 multDet_timeLimit: Multiple Detection Removal time limit in seconds
5786 multDet_rangeLimit: Multiple Detection Removal range limit in km
5787
5788 phaseThresh: Maximum phase difference between receiver to be consider a meteor
5789 SNRThresh: Minimum SNR threshold of the meteor signal to be consider a meteor
5790
5791 hmin: Minimum Height of the meteor to use it in the further wind estimations
5792 hmax: Maximum Height of the meteor to use it in the further wind estimations
5793 azimuth: Azimuth angle correction
5794
5795 Affected:
5796 self.dataOut.data_param
5797
5798 Rejection Criteria (Errors):
5799 0: No error; analysis OK
5800 1: SNR < SNR threshold
5801 2: angle of arrival (AOA) ambiguously determined
5802 3: AOA estimate not feasible
5803 4: Large difference in AOAs obtained from different antenna baselines
5804 5: echo at start or end of time series
5805 6: echo less than 5 examples long; too short for analysis
5806 7: echo rise exceeds 0.3s
5807 8: echo decay time less than twice rise time
5808 9: large power level before echo
5809 10: large power level after echo
5810 11: poor fit to amplitude for estimation of decay time
5811 12: poor fit to CCF phase variation for estimation of radial drift velocity
5812 13: height unresolvable echo: not valid height within 70 to 110 km
5813 14: height ambiguous echo: more then one possible height within 70 to 110 km
5814 15: radial drift velocity or projected horizontal velocity exceeds 200 m/s
5815 16: oscilatory echo, indicating event most likely not an underdense echo
5816
5817 17: phase difference in meteor Reestimation
5818
5819 Data Storage:
5820 Meteors for Wind Estimation (8):
5821 Utc Time | Range Height
5822 Azimuth Zenith errorCosDir
5823 VelRad errorVelRad
5824 Phase0 Phase1 Phase2 Phase3
5825 TypeError
5826
5827 '''
5828
5829 def run(self, dataOut, hei_ref = None, tauindex = 0,
5830 phaseOffsets = None,
5831 cohDetection = False, cohDet_timeStep = 1, cohDet_thresh = 25,
5832 noise_timeStep = 4, noise_multiple = 4,
5833 multDet_timeLimit = 1, multDet_rangeLimit = 3,
5834 phaseThresh = 20, SNRThresh = 5,
5835 hmin = 50, hmax=150, azimuth = 0,
5836 channelPositions = None) :
5837
5838
5839 #Getting Pairslist
5840 if channelPositions is None:
5841 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
5842 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
5843 meteorOps = SMOperations()
5844 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
5845 heiRang = dataOut.heightList
5846 #Get Beacon signal - No Beacon signal anymore
5847 # newheis = numpy.where(self.dataOut.heightList>self.dataOut.radarControllerHeaderObj.Taus[tauindex])
5848 #
5849 # if hei_ref != None:
5850 # newheis = numpy.where(self.dataOut.heightList>hei_ref)
5851 #
5852
5853
5854 #****************REMOVING HARDWARE PHASE DIFFERENCES***************
5855 # see if the user put in pre defined phase shifts
5856 voltsPShift = dataOut.data_pre.copy()
5857
5858 # if predefinedPhaseShifts != None:
5859 # hardwarePhaseShifts = numpy.array(predefinedPhaseShifts)*numpy.pi/180
5860 #
5861 # # elif beaconPhaseShifts:
5862 # # #get hardware phase shifts using beacon signal
5863 # # hardwarePhaseShifts = self.__getHardwarePhaseDiff(self.dataOut.data_pre, pairslist, newheis, 10)
5864 # # hardwarePhaseShifts = numpy.insert(hardwarePhaseShifts,centerReceiverIndex,0)
5865 #
5866 # else:
5867 # hardwarePhaseShifts = numpy.zeros(5)
5868 #
5869 # voltsPShift = numpy.zeros((self.dataOut.data_pre.shape[0],self.dataOut.data_pre.shape[1],self.dataOut.data_pre.shape[2]), dtype = 'complex')
5870 # for i in range(self.dataOut.data_pre.shape[0]):
5871 # voltsPShift[i,:,:] = self.__shiftPhase(self.dataOut.data_pre[i,:,:], hardwarePhaseShifts[i])
5872
5873 #******************END OF REMOVING HARDWARE PHASE DIFFERENCES*********
5874
5875 #Remove DC
5876 voltsDC = numpy.mean(voltsPShift,1)
5877 voltsDC = numpy.mean(voltsDC,1)
5878 for i in range(voltsDC.shape[0]):
5879 voltsPShift[i] = voltsPShift[i] - voltsDC[i]
5880
5881 #Don't considerate last heights, theyre used to calculate Hardware Phase Shift
5882 # voltsPShift = voltsPShift[:,:,:newheis[0][0]]
5883
5884 #************ FIND POWER OF DATA W/COH OR NON COH DETECTION (3.4) **********
5885 #Coherent Detection
5886 if cohDetection:
5887 #use coherent detection to get the net power
5888 cohDet_thresh = cohDet_thresh*numpy.pi/180
5889 voltsPShift = self.__coherentDetection(voltsPShift, cohDet_timeStep, dataOut.timeInterval, pairslist0, cohDet_thresh)
5890
5891 #Non-coherent detection!
5892 powerNet = numpy.nansum(numpy.abs(voltsPShift[:,:,:])**2,0)
5893 #********** END OF COH/NON-COH POWER CALCULATION**********************
5894
5895 #********** FIND THE NOISE LEVEL AND POSSIBLE METEORS ****************
5896 #Get noise
5897 noise, noise1 = self.__getNoise(powerNet, noise_timeStep, dataOut.timeInterval)
5898 # noise = self.getNoise1(powerNet, noise_timeStep, self.dataOut.timeInterval)
5899 #Get signal threshold
5900 signalThresh = noise_multiple*noise
5901 #Meteor echoes detection
5902 listMeteors = self.__findMeteors(powerNet, signalThresh)
5903 #******* END OF NOISE LEVEL AND POSSIBLE METEORS CACULATION **********
5904
5905 #************** REMOVE MULTIPLE DETECTIONS (3.5) ***************************
5906 #Parameters
5907 heiRange = dataOut.heightList
5908 rangeInterval = heiRange[1] - heiRange[0]
5909 rangeLimit = multDet_rangeLimit/rangeInterval
5910 timeLimit = multDet_timeLimit/dataOut.timeInterval
5911 #Multiple detection removals
5912 listMeteors1 = self.__removeMultipleDetections(listMeteors, rangeLimit, timeLimit)
5913 #************ END OF REMOVE MULTIPLE DETECTIONS **********************
5914
5915 #********************* METEOR REESTIMATION (3.7, 3.8, 3.9, 3.10) ********************
5916 #Parameters
5917 phaseThresh = phaseThresh*numpy.pi/180
5918 thresh = [phaseThresh, noise_multiple, SNRThresh]
5919 #Meteor reestimation (Errors N 1, 6, 12, 17)
5920 listMeteors2, listMeteorsPower, listMeteorsVolts = self.__meteorReestimation(listMeteors1, voltsPShift, pairslist0, thresh, noise, dataOut.timeInterval, dataOut.frequency)
5921 # listMeteors2, listMeteorsPower, listMeteorsVolts = self.meteorReestimation3(listMeteors2, listMeteorsPower, listMeteorsVolts, voltsPShift, pairslist, thresh, noise)
5922 #Estimation of decay times (Errors N 7, 8, 11)
5923 listMeteors3 = self.__estimateDecayTime(listMeteors2, listMeteorsPower, dataOut.timeInterval, dataOut.frequency)
5924 #******************* END OF METEOR REESTIMATION *******************
5925
5926 #********************* METEOR PARAMETERS CALCULATION (3.11, 3.12, 3.13) **************************
5927 #Calculating Radial Velocity (Error N 15)
5928 radialStdThresh = 10
5929 listMeteors4 = self.__getRadialVelocity(listMeteors3, listMeteorsVolts, radialStdThresh, pairslist0, dataOut.timeInterval)
5930
5931 if len(listMeteors4) > 0:
5932 #Setting New Array
5933 date = dataOut.utctime
5934 arrayParameters = self.__setNewArrays(listMeteors4, date, heiRang)
5935
5936 #Correcting phase offset
5937 if phaseOffsets != None:
5938 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
5939 arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
5940
5941 #Second Pairslist
5942 pairsList = []
5943 pairx = (0,1)
5944 pairy = (2,3)
5945 pairsList.append(pairx)
5946 pairsList.append(pairy)
5947
5948 jph = numpy.array([0,0,0,0])
5949 h = (hmin,hmax)
5950 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
5951
5952 # #Calculate AOA (Error N 3, 4)
5953 # #JONES ET AL. 1998
5954 # error = arrayParameters[:,-1]
5955 # AOAthresh = numpy.pi/8
5956 # phases = -arrayParameters[:,9:13]
5957 # arrayParameters[:,4:7], arrayParameters[:,-1] = meteorOps.getAOA(phases, pairsList, error, AOAthresh, azimuth)
5958 #
5959 # #Calculate Heights (Error N 13 and 14)
5960 # error = arrayParameters[:,-1]
5961 # Ranges = arrayParameters[:,2]
5962 # zenith = arrayParameters[:,5]
5963 # arrayParameters[:,3], arrayParameters[:,-1] = meteorOps.getHeights(Ranges, zenith, error, hmin, hmax)
5964 # error = arrayParameters[:,-1]
5965 #********************* END OF PARAMETERS CALCULATION **************************
5966
5967 #***************************+ PASS DATA TO NEXT STEP **********************
5968 # arrayFinal = arrayParameters.reshape((1,arrayParameters.shape[0],arrayParameters.shape[1]))
5969 dataOut.data_param = arrayParameters
5970
5971 if arrayParameters is None:
5972 dataOut.flagNoData = True
5973 else:
5974 dataOut.flagNoData = True
5975
5976 return
5977
5978 def __getHardwarePhaseDiff(self, voltage0, pairslist, newheis, n):
5979
5980 minIndex = min(newheis[0])
5981 maxIndex = max(newheis[0])
5982
5983 voltage = voltage0[:,:,minIndex:maxIndex+1]
5984 nLength = voltage.shape[1]/n
5985 nMin = 0
5986 nMax = 0
5987 phaseOffset = numpy.zeros((len(pairslist),n))
5988
5989 for i in range(n):
5990 nMax += nLength
5991 phaseCCF = -numpy.angle(self.__calculateCCF(voltage[:,nMin:nMax,:], pairslist, [0]))
5992 phaseCCF = numpy.mean(phaseCCF, axis = 2)
5993 phaseOffset[:,i] = phaseCCF.transpose()
5994 nMin = nMax
5995 # phaseDiff, phaseArrival = self.estimatePhaseDifference(voltage, pairslist)
5996
5997 #Remove Outliers
5998 factor = 2
5999 wt = phaseOffset - signal.medfilt(phaseOffset,(1,5))
6000 dw = numpy.std(wt,axis = 1)
6001 dw = dw.reshape((dw.size,1))
6002 ind = numpy.where(numpy.logical_or(wt>dw*factor,wt<-dw*factor))
6003 phaseOffset[ind] = numpy.nan
6004 phaseOffset = stats.nanmean(phaseOffset, axis=1)
6005
6006 return phaseOffset
6007
6008 def __shiftPhase(self, data, phaseShift):
6009 #this will shift the phase of a complex number
6010 dataShifted = numpy.abs(data) * numpy.exp((numpy.angle(data)+phaseShift)*1j)
6011 return dataShifted
6012
6013 def __estimatePhaseDifference(self, array, pairslist):
6014 nChannel = array.shape[0]
6015 nHeights = array.shape[2]
6016 numPairs = len(pairslist)
6017 # phaseCCF = numpy.zeros((nChannel, 5, nHeights))
6018 phaseCCF = numpy.angle(self.__calculateCCF(array, pairslist, [-2,-1,0,1,2]))
6019
6020 #Correct phases
6021 derPhaseCCF = phaseCCF[:,1:,:] - phaseCCF[:,0:-1,:]
6022 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
6023
6024 if indDer[0].shape[0] > 0:
6025 for i in range(indDer[0].shape[0]):
6026 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i],indDer[2][i]])
6027 phaseCCF[indDer[0][i],indDer[1][i]+1:,:] += signo*2*numpy.pi
6028
6029 # for j in range(numSides):
6030 # phaseCCFAux = self.calculateCCF(arrayCenter, arraySides[j,:,:], [-2,1,0,1,2])
6031 # phaseCCF[j,:,:] = numpy.angle(phaseCCFAux)
6032 #
6033 #Linear
6034 phaseInt = numpy.zeros((numPairs,1))
6035 angAllCCF = phaseCCF[:,[0,1,3,4],0]
6036 for j in range(numPairs):
6037 fit = stats.linregress([-2,-1,1,2],angAllCCF[j,:])
6038 phaseInt[j] = fit[1]
6039 #Phase Differences
6040 phaseDiff = phaseInt - phaseCCF[:,2,:]
6041 phaseArrival = phaseInt.reshape(phaseInt.size)
6042
6043 #Dealias
6044 phaseArrival = numpy.angle(numpy.exp(1j*phaseArrival))
6045 # indAlias = numpy.where(phaseArrival > numpy.pi)
6046 # phaseArrival[indAlias] -= 2*numpy.pi
6047 # indAlias = numpy.where(phaseArrival < -numpy.pi)
6048 # phaseArrival[indAlias] += 2*numpy.pi
6049
6050 return phaseDiff, phaseArrival
6051
6052 def __coherentDetection(self, volts, timeSegment, timeInterval, pairslist, thresh):
6053 #this function will run the coherent detection used in Holdworth et al. 2004 and return the net power
6054 #find the phase shifts of each channel over 1 second intervals
6055 #only look at ranges below the beacon signal
6056 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
6057 numBlocks = int(volts.shape[1]/numProfPerBlock)
6058 numHeights = volts.shape[2]
6059 nChannel = volts.shape[0]
6060 voltsCohDet = volts.copy()
6061
6062 pairsarray = numpy.array(pairslist)
6063 indSides = pairsarray[:,1]
6064 # indSides = numpy.array(range(nChannel))
6065 # indSides = numpy.delete(indSides, indCenter)
6066 #
6067 # listCenter = numpy.array_split(volts[indCenter,:,:], numBlocks, 0)
6068 listBlocks = numpy.array_split(volts, numBlocks, 1)
6069
6070 startInd = 0
6071 endInd = 0
6072
6073 for i in range(numBlocks):
6074 startInd = endInd
6075 endInd = endInd + listBlocks[i].shape[1]
6076
6077 arrayBlock = listBlocks[i]
6078 # arrayBlockCenter = listCenter[i]
6079
6080 #Estimate the Phase Difference
6081 phaseDiff, aux = self.__estimatePhaseDifference(arrayBlock, pairslist)
6082 #Phase Difference RMS
6083 arrayPhaseRMS = numpy.abs(phaseDiff)
6084 phaseRMSaux = numpy.sum(arrayPhaseRMS < thresh,0)
6085 indPhase = numpy.where(phaseRMSaux==4)
6086 #Shifting
6087 if indPhase[0].shape[0] > 0:
6088 for j in range(indSides.size):
6089 arrayBlock[indSides[j],:,indPhase] = self.__shiftPhase(arrayBlock[indSides[j],:,indPhase], phaseDiff[j,indPhase].transpose())
6090 voltsCohDet[:,startInd:endInd,:] = arrayBlock
6091
6092 return voltsCohDet
6093
6094 def __calculateCCF(self, volts, pairslist ,laglist):
6095
6096 nHeights = volts.shape[2]
6097 nPoints = volts.shape[1]
6098 voltsCCF = numpy.zeros((len(pairslist), len(laglist), nHeights),dtype = 'complex')
6099
6100 for i in range(len(pairslist)):
6101 volts1 = volts[pairslist[i][0]]
6102 volts2 = volts[pairslist[i][1]]
6103
6104 for t in range(len(laglist)):
6105 idxT = laglist[t]
6106 if idxT >= 0:
6107 vStacked = numpy.vstack((volts2[idxT:,:],
6108 numpy.zeros((idxT, nHeights),dtype='complex')))
6109 else:
6110 vStacked = numpy.vstack((numpy.zeros((-idxT, nHeights),dtype='complex'),
6111 volts2[:(nPoints + idxT),:]))
6112 voltsCCF[i,t,:] = numpy.sum((numpy.conjugate(volts1)*vStacked),axis=0)
6113
6114 vStacked = None
6115 return voltsCCF
6116
6117 def __getNoise(self, power, timeSegment, timeInterval):
6118 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
6119 numBlocks = int(power.shape[0]/numProfPerBlock)
6120 numHeights = power.shape[1]
6121
6122 listPower = numpy.array_split(power, numBlocks, 0)
6123 noise = numpy.zeros((power.shape[0], power.shape[1]))
6124 noise1 = numpy.zeros((power.shape[0], power.shape[1]))
6125
6126 startInd = 0
6127 endInd = 0
6128
6129 for i in range(numBlocks): #split por canal
6130 startInd = endInd
6131 endInd = endInd + listPower[i].shape[0]
6132
6133 arrayBlock = listPower[i]
6134 noiseAux = numpy.mean(arrayBlock, 0)
6135 # noiseAux = numpy.median(noiseAux)
6136 # noiseAux = numpy.mean(arrayBlock)
6137 noise[startInd:endInd,:] = noise[startInd:endInd,:] + noiseAux
6138
6139 noiseAux1 = numpy.mean(arrayBlock)
6140 noise1[startInd:endInd,:] = noise1[startInd:endInd,:] + noiseAux1
6141
6142 return noise, noise1
6143
6144 def __findMeteors(self, power, thresh):
6145 nProf = power.shape[0]
6146 nHeights = power.shape[1]
6147 listMeteors = []
6148
6149 for i in range(nHeights):
6150 powerAux = power[:,i]
6151 threshAux = thresh[:,i]
6152
6153 indUPthresh = numpy.where(powerAux > threshAux)[0]
6154 indDNthresh = numpy.where(powerAux <= threshAux)[0]
6155
6156 j = 0
6157
6158 while (j < indUPthresh.size - 2):
6159 if (indUPthresh[j + 2] == indUPthresh[j] + 2):
6160 indDNAux = numpy.where(indDNthresh > indUPthresh[j])
6161 indDNthresh = indDNthresh[indDNAux]
6162
6163 if (indDNthresh.size > 0):
6164 indEnd = indDNthresh[0] - 1
6165 indInit = indUPthresh[j]
6166
6167 meteor = powerAux[indInit:indEnd + 1]
6168 indPeak = meteor.argmax() + indInit
6169 FLA = sum(numpy.conj(meteor)*numpy.hstack((meteor[1:],0)))
6170
6171 listMeteors.append(numpy.array([i,indInit,indPeak,indEnd,FLA])) #CHEQUEAR!!!!!
6172 j = numpy.where(indUPthresh == indEnd)[0] + 1
6173 else: j+=1
6174 else: j+=1
6175
6176 return listMeteors
6177
6178 def __removeMultipleDetections(self,listMeteors, rangeLimit, timeLimit):
6179
6180 arrayMeteors = numpy.asarray(listMeteors)
6181 listMeteors1 = []
6182
6183 while arrayMeteors.shape[0] > 0:
6184 FLAs = arrayMeteors[:,4]
6185 maxFLA = FLAs.argmax()
6186 listMeteors1.append(arrayMeteors[maxFLA,:])
6187
6188 MeteorInitTime = arrayMeteors[maxFLA,1]
6189 MeteorEndTime = arrayMeteors[maxFLA,3]
6190 MeteorHeight = arrayMeteors[maxFLA,0]
6191
6192 #Check neighborhood
6193 maxHeightIndex = MeteorHeight + rangeLimit
6194 minHeightIndex = MeteorHeight - rangeLimit
6195 minTimeIndex = MeteorInitTime - timeLimit
6196 maxTimeIndex = MeteorEndTime + timeLimit
6197
6198 #Check Heights
6199 indHeight = numpy.logical_and(arrayMeteors[:,0] >= minHeightIndex, arrayMeteors[:,0] <= maxHeightIndex)
6200 indTime = numpy.logical_and(arrayMeteors[:,3] >= minTimeIndex, arrayMeteors[:,1] <= maxTimeIndex)
6201 indBoth = numpy.where(numpy.logical_and(indTime,indHeight))
6202
6203 arrayMeteors = numpy.delete(arrayMeteors, indBoth, axis = 0)
6204
6205 return listMeteors1
6206
6207 def __meteorReestimation(self, listMeteors, volts, pairslist, thresh, noise, timeInterval,frequency):
6208 numHeights = volts.shape[2]
6209 nChannel = volts.shape[0]
6210
6211 thresholdPhase = thresh[0]
6212 thresholdNoise = thresh[1]
6213 thresholdDB = float(thresh[2])
6214
6215 thresholdDB1 = 10**(thresholdDB/10)
6216 pairsarray = numpy.array(pairslist)
6217 indSides = pairsarray[:,1]
6218
6219 pairslist1 = list(pairslist)
6220 pairslist1.append((0,1))
6221 pairslist1.append((3,4))
6222
6223 listMeteors1 = []
6224 listPowerSeries = []
6225 listVoltageSeries = []
6226 #volts has the war data
6227
6228 if frequency == 30e6:
6229 timeLag = 45*10**-3
6230 else:
6231 timeLag = 15*10**-3
6232 lag = numpy.ceil(timeLag/timeInterval)
6233
6234 for i in range(len(listMeteors)):
6235
6236 ###################### 3.6 - 3.7 PARAMETERS REESTIMATION #########################
6237 meteorAux = numpy.zeros(16)
6238
6239 #Loading meteor Data (mHeight, mStart, mPeak, mEnd)
6240 mHeight = listMeteors[i][0]
6241 mStart = listMeteors[i][1]
6242 mPeak = listMeteors[i][2]
6243 mEnd = listMeteors[i][3]
6244
6245 #get the volt data between the start and end times of the meteor
6246 meteorVolts = volts[:,mStart:mEnd+1,mHeight]
6247 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
6248
6249 #3.6. Phase Difference estimation
6250 phaseDiff, aux = self.__estimatePhaseDifference(meteorVolts, pairslist)
6251
6252 #3.7. Phase difference removal & meteor start, peak and end times reestimated
6253 #meteorVolts0.- all Channels, all Profiles
6254 meteorVolts0 = volts[:,:,mHeight]
6255 meteorThresh = noise[:,mHeight]*thresholdNoise
6256 meteorNoise = noise[:,mHeight]
6257 meteorVolts0[indSides,:] = self.__shiftPhase(meteorVolts0[indSides,:], phaseDiff) #Phase Shifting
6258 powerNet0 = numpy.nansum(numpy.abs(meteorVolts0)**2, axis = 0) #Power
6259
6260 #Times reestimation
6261 mStart1 = numpy.where(powerNet0[:mPeak] < meteorThresh[:mPeak])[0]
6262 if mStart1.size > 0:
6263 mStart1 = mStart1[-1] + 1
6264
6265 else:
6266 mStart1 = mPeak
6267
6268 mEnd1 = numpy.where(powerNet0[mPeak:] < meteorThresh[mPeak:])[0][0] + mPeak - 1
6269 mEndDecayTime1 = numpy.where(powerNet0[mPeak:] < meteorNoise[mPeak:])[0]
6270 if mEndDecayTime1.size == 0:
6271 mEndDecayTime1 = powerNet0.size
6272 else:
6273 mEndDecayTime1 = mEndDecayTime1[0] + mPeak - 1
6274 # mPeak1 = meteorVolts0[mStart1:mEnd1 + 1].argmax()
6275
6276 #meteorVolts1.- all Channels, from start to end
6277 meteorVolts1 = meteorVolts0[:,mStart1:mEnd1 + 1]
6278 meteorVolts2 = meteorVolts0[:,mPeak + lag:mEnd1 + 1]
6279 if meteorVolts2.shape[1] == 0:
6280 meteorVolts2 = meteorVolts0[:,mPeak:mEnd1 + 1]
6281 meteorVolts1 = meteorVolts1.reshape(meteorVolts1.shape[0], meteorVolts1.shape[1], 1)
6282 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1], 1)
6283 ##################### END PARAMETERS REESTIMATION #########################
6284
6285 ##################### 3.8 PHASE DIFFERENCE REESTIMATION ########################
6286 # if mEnd1 - mStart1 > 4: #Error Number 6: echo less than 5 samples long; too short for analysis
6287 if meteorVolts2.shape[1] > 0:
6288 #Phase Difference re-estimation
6289 phaseDiff1, phaseDiffint = self.__estimatePhaseDifference(meteorVolts2, pairslist1) #Phase Difference Estimation
6290 # phaseDiff1, phaseDiffint = self.estimatePhaseDifference(meteorVolts2, pairslist)
6291 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1])
6292 phaseDiff11 = numpy.reshape(phaseDiff1, (phaseDiff1.shape[0],1))
6293 meteorVolts2[indSides,:] = self.__shiftPhase(meteorVolts2[indSides,:], phaseDiff11[0:4]) #Phase Shifting
6294
6295 #Phase Difference RMS
6296 phaseRMS1 = numpy.sqrt(numpy.mean(numpy.square(phaseDiff1)))
6297 powerNet1 = numpy.nansum(numpy.abs(meteorVolts1[:,:])**2,0)
6298 #Data from Meteor
6299 mPeak1 = powerNet1.argmax() + mStart1
6300 mPeakPower1 = powerNet1.max()
6301 noiseAux = sum(noise[mStart1:mEnd1 + 1,mHeight])
6302 mSNR1 = (sum(powerNet1)-noiseAux)/noiseAux
6303 Meteor1 = numpy.array([mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1])
6304 Meteor1 = numpy.hstack((Meteor1,phaseDiffint))
6305 PowerSeries = powerNet0[mStart1:mEndDecayTime1 + 1]
6306 #Vectorize
6307 meteorAux[0:7] = [mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1]
6308 meteorAux[7:11] = phaseDiffint[0:4]
6309
6310 #Rejection Criterions
6311 if phaseRMS1 > thresholdPhase: #Error Number 17: Phase variation
6312 meteorAux[-1] = 17
6313 elif mSNR1 < thresholdDB1: #Error Number 1: SNR < threshold dB
6314 meteorAux[-1] = 1
6315
6316
6317 else:
6318 meteorAux[0:4] = [mHeight, mStart, mPeak, mEnd]
6319 meteorAux[-1] = 6 #Error Number 6: echo less than 5 samples long; too short for analysis
6320 PowerSeries = 0
6321
6322 listMeteors1.append(meteorAux)
6323 listPowerSeries.append(PowerSeries)
6324 listVoltageSeries.append(meteorVolts1)
6325
6326 return listMeteors1, listPowerSeries, listVoltageSeries
6327
6328 def __estimateDecayTime(self, listMeteors, listPower, timeInterval, frequency):
6329
6330 threshError = 10
6331 #Depending if it is 30 or 50 MHz
6332 if frequency == 30e6:
6333 timeLag = 45*10**-3
6334 else:
6335 timeLag = 15*10**-3
6336 lag = numpy.ceil(timeLag/timeInterval)
6337
6338 listMeteors1 = []
6339
6340 for i in range(len(listMeteors)):
6341 meteorPower = listPower[i]
6342 meteorAux = listMeteors[i]
6343
6344 if meteorAux[-1] == 0:
6345
6346 try:
6347 indmax = meteorPower.argmax()
6348 indlag = indmax + lag
6349
6350 y = meteorPower[indlag:]
6351 x = numpy.arange(0, y.size)*timeLag
6352
6353 #first guess
6354 a = y[0]
6355 tau = timeLag
6356 #exponential fit
6357 popt, pcov = optimize.curve_fit(self.__exponential_function, x, y, p0 = [a, tau])
6358 y1 = self.__exponential_function(x, *popt)
6359 #error estimation
6360 error = sum((y - y1)**2)/(numpy.var(y)*(y.size - popt.size))
6361
6362 decayTime = popt[1]
6363 riseTime = indmax*timeInterval
6364 meteorAux[11:13] = [decayTime, error]
6365
6366 #Table items 7, 8 and 11
6367 if (riseTime > 0.3): #Number 7: Echo rise exceeds 0.3s
6368 meteorAux[-1] = 7
6369 elif (decayTime < 2*riseTime) : #Number 8: Echo decay time less than than twice rise time
6370 meteorAux[-1] = 8
6371 if (error > threshError): #Number 11: Poor fit to amplitude for estimation of decay time
6372 meteorAux[-1] = 11
6373
6374
6375 except:
6376 meteorAux[-1] = 11
6377
6378
6379 listMeteors1.append(meteorAux)
6380
6381 return listMeteors1
6382
6383 #Exponential Function
6384
6385 def __exponential_function(self, x, a, tau):
6386 y = a*numpy.exp(-x/tau)
6387 return y
6388
6389 def __getRadialVelocity(self, listMeteors, listVolts, radialStdThresh, pairslist, timeInterval):
6390
6391 pairslist1 = list(pairslist)
6392 pairslist1.append((0,1))
6393 pairslist1.append((3,4))
6394 numPairs = len(pairslist1)
6395 #Time Lag
6396 timeLag = 45*10**-3
6397 c = 3e8
6398 lag = numpy.ceil(timeLag/timeInterval)
6399 freq = 30e6
6400
6401 listMeteors1 = []
6402
6403 for i in range(len(listMeteors)):
6404 meteorAux = listMeteors[i]
6405 if meteorAux[-1] == 0:
6406 mStart = listMeteors[i][1]
6407 mPeak = listMeteors[i][2]
6408 mLag = mPeak - mStart + lag
6409
6410 #get the volt data between the start and end times of the meteor
6411 meteorVolts = listVolts[i]
6412 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
6413
6414 #Get CCF
6415 allCCFs = self.__calculateCCF(meteorVolts, pairslist1, [-2,-1,0,1,2])
6416
6417 #Method 2
6418 slopes = numpy.zeros(numPairs)
6419 time = numpy.array([-2,-1,1,2])*timeInterval
6420 angAllCCF = numpy.angle(allCCFs[:,[0,1,3,4],0])
6421
6422 #Correct phases
6423 derPhaseCCF = angAllCCF[:,1:] - angAllCCF[:,0:-1]
6424 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
6425
6426 if indDer[0].shape[0] > 0:
6427 for i in range(indDer[0].shape[0]):
6428 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i]])
6429 angAllCCF[indDer[0][i],indDer[1][i]+1:] += signo*2*numpy.pi
6430
6431 # fit = scipy.stats.linregress(numpy.array([-2,-1,1,2])*timeInterval, numpy.array([phaseLagN2s[i],phaseLagN1s[i],phaseLag1s[i],phaseLag2s[i]]))
6432 for j in range(numPairs):
6433 fit = stats.linregress(time, angAllCCF[j,:])
6434 slopes[j] = fit[0]
6435
6436 #Remove Outlier
6437 # indOut = numpy.argmax(numpy.abs(slopes - numpy.mean(slopes)))
6438 # slopes = numpy.delete(slopes,indOut)
6439 # indOut = numpy.argmax(numpy.abs(slopes - numpy.mean(slopes)))
6440 # slopes = numpy.delete(slopes,indOut)
6441
6442 radialVelocity = -numpy.mean(slopes)*(0.25/numpy.pi)*(c/freq)
6443 radialError = numpy.std(slopes)*(0.25/numpy.pi)*(c/freq)
6444 meteorAux[-2] = radialError
6445 meteorAux[-3] = radialVelocity
6446
6447 #Setting Error
6448 #Number 15: Radial Drift velocity or projected horizontal velocity exceeds 200 m/s
6449 if numpy.abs(radialVelocity) > 200:
6450 meteorAux[-1] = 15
6451 #Number 12: Poor fit to CCF variation for estimation of radial drift velocity
6452 elif radialError > radialStdThresh:
6453 meteorAux[-1] = 12
6454
6455 listMeteors1.append(meteorAux)
6456 return listMeteors1
6457
6458 def __setNewArrays(self, listMeteors, date, heiRang):
6459
6460 #New arrays
6461 arrayMeteors = numpy.array(listMeteors)
6462 arrayParameters = numpy.zeros((len(listMeteors), 13))
6463
6464 #Date inclusion
6465 # date = re.findall(r'\((.*?)\)', date)
6466 # date = date[0].split(',')
6467 # date = map(int, date)
6468 #
6469 # if len(date)<6:
6470 # date.append(0)
6471 #
6472 # date = [date[0]*10000 + date[1]*100 + date[2], date[3]*10000 + date[4]*100 + date[5]]
6473 # arrayDate = numpy.tile(date, (len(listMeteors), 1))
6474 arrayDate = numpy.tile(date, (len(listMeteors)))
6475
6476 #Meteor array
6477 # arrayMeteors[:,0] = heiRang[arrayMeteors[:,0].astype(int)]
6478 # arrayMeteors = numpy.hstack((arrayDate, arrayMeteors))
6479
6480 #Parameters Array
6481 arrayParameters[:,0] = arrayDate #Date
6482 arrayParameters[:,1] = heiRang[arrayMeteors[:,0].astype(int)] #Range
6483 arrayParameters[:,6:8] = arrayMeteors[:,-3:-1] #Radial velocity and its error
6484 arrayParameters[:,8:12] = arrayMeteors[:,7:11] #Phases
6485 arrayParameters[:,-1] = arrayMeteors[:,-1] #Error
6486
6487
6488 return arrayParameters
6489
6490 class CorrectSMPhases(Operation):
6491
6492 def run(self, dataOut, phaseOffsets, hmin = 50, hmax = 150, azimuth = 45, channelPositions = None):
6493
6494 arrayParameters = dataOut.data_param
6495 pairsList = []
6496 pairx = (0,1)
6497 pairy = (2,3)
6498 pairsList.append(pairx)
6499 pairsList.append(pairy)
6500 jph = numpy.zeros(4)
6501
6502 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
6503 # arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
6504 arrayParameters[:,8:12] = numpy.angle(numpy.exp(1j*(arrayParameters[:,8:12] + phaseOffsets)))
6505
6506 meteorOps = SMOperations()
6507 if channelPositions is None:
6508 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
6509 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
6510
6511 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
6512 h = (hmin,hmax)
6513
6514 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
6515
6516 dataOut.data_param = arrayParameters
6517 return
6518
6519 class SMPhaseCalibration(Operation):
6520
6521 __buffer = None
6522
6523 __initime = None
6524
6525 __dataReady = False
6526
6527 __isConfig = False
6528
6529 def __checkTime(self, currentTime, initTime, paramInterval, outputInterval):
6530
6531 dataTime = currentTime + paramInterval
6532 deltaTime = dataTime - initTime
6533
6534 if deltaTime >= outputInterval or deltaTime < 0:
6535 return True
6536
6537 return False
6538
6539 def __getGammas(self, pairs, d, phases):
6540 gammas = numpy.zeros(2)
6541
6542 for i in range(len(pairs)):
6543
6544 pairi = pairs[i]
6545
6546 phip3 = phases[:,pairi[0]]
6547 d3 = d[pairi[0]]
6548 phip2 = phases[:,pairi[1]]
6549 d2 = d[pairi[1]]
6550 #Calculating gamma
6551 # jdcos = alp1/(k*d1)
6552 # jgamma = numpy.angle(numpy.exp(1j*(d0*alp1/d1 - alp0)))
6553 jgamma = -phip2*d3/d2 - phip3
6554 jgamma = numpy.angle(numpy.exp(1j*jgamma))
6555 # jgamma[jgamma>numpy.pi] -= 2*numpy.pi
6556 # jgamma[jgamma<-numpy.pi] += 2*numpy.pi
6557
6558 #Revised distribution
6559 jgammaArray = numpy.hstack((jgamma,jgamma+0.5*numpy.pi,jgamma-0.5*numpy.pi))
6560
6561 #Histogram
6562 nBins = 64
6563 rmin = -0.5*numpy.pi
6564 rmax = 0.5*numpy.pi
6565 phaseHisto = numpy.histogram(jgammaArray, bins=nBins, range=(rmin,rmax))
6566
6567 meteorsY = phaseHisto[0]
6568 phasesX = phaseHisto[1][:-1]
6569 width = phasesX[1] - phasesX[0]
6570 phasesX += width/2
6571
6572 #Gaussian aproximation
6573 bpeak = meteorsY.argmax()
6574 peak = meteorsY.max()
6575 jmin = bpeak - 5
6576 jmax = bpeak + 5 + 1
6577
6578 if jmin<0:
6579 jmin = 0
6580 jmax = 6
6581 elif jmax > meteorsY.size:
6582 jmin = meteorsY.size - 6
6583 jmax = meteorsY.size
6584
6585 x0 = numpy.array([peak,bpeak,50])
6586 coeff = optimize.leastsq(self.__residualFunction, x0, args=(meteorsY[jmin:jmax], phasesX[jmin:jmax]))
6587
6588 #Gammas
6589 gammas[i] = coeff[0][1]
6590
6591 return gammas
6592
6593 def __residualFunction(self, coeffs, y, t):
6594
6595 return y - self.__gauss_function(t, coeffs)
6596
6597 def __gauss_function(self, t, coeffs):
6598
6599 return coeffs[0]*numpy.exp(-0.5*((t - coeffs[1]) / coeffs[2])**2)
6600
6601 def __getPhases(self, azimuth, h, pairsList, d, gammas, meteorsArray):
6602 meteorOps = SMOperations()
6603 nchan = 4
6604 pairx = pairsList[0] #x es 0
6605 pairy = pairsList[1] #y es 1
6606 center_xangle = 0
6607 center_yangle = 0
6608 range_angle = numpy.array([10*numpy.pi,numpy.pi,numpy.pi/2,numpy.pi/4])
6609 ntimes = len(range_angle)
6610
6611 nstepsx = 20
6612 nstepsy = 20
6613
6614 for iz in range(ntimes):
6615 min_xangle = -range_angle[iz]/2 + center_xangle
6616 max_xangle = range_angle[iz]/2 + center_xangle
6617 min_yangle = -range_angle[iz]/2 + center_yangle
6618 max_yangle = range_angle[iz]/2 + center_yangle
6619
6620 inc_x = (max_xangle-min_xangle)/nstepsx
6621 inc_y = (max_yangle-min_yangle)/nstepsy
6622
6623 alpha_y = numpy.arange(nstepsy)*inc_y + min_yangle
6624 alpha_x = numpy.arange(nstepsx)*inc_x + min_xangle
6625 penalty = numpy.zeros((nstepsx,nstepsy))
6626 jph_array = numpy.zeros((nchan,nstepsx,nstepsy))
6627 jph = numpy.zeros(nchan)
6628
6629 # Iterations looking for the offset
6630 for iy in range(int(nstepsy)):
6631 for ix in range(int(nstepsx)):
6632 d3 = d[pairsList[1][0]]
6633 d2 = d[pairsList[1][1]]
6634 d5 = d[pairsList[0][0]]
6635 d4 = d[pairsList[0][1]]
6636
6637 alp2 = alpha_y[iy] #gamma 1
6638 alp4 = alpha_x[ix] #gamma 0
6639
6640 alp3 = -alp2*d3/d2 - gammas[1]
6641 alp5 = -alp4*d5/d4 - gammas[0]
6642 # jph[pairy[1]] = alpha_y[iy]
6643 # jph[pairy[0]] = -gammas[1] - alpha_y[iy]*d[pairy[1]]/d[pairy[0]]
6644
6645 # jph[pairx[1]] = alpha_x[ix]
6646 # jph[pairx[0]] = -gammas[0] - alpha_x[ix]*d[pairx[1]]/d[pairx[0]]
6647 jph[pairsList[0][1]] = alp4
6648 jph[pairsList[0][0]] = alp5
6649 jph[pairsList[1][0]] = alp3
6650 jph[pairsList[1][1]] = alp2
6651 jph_array[:,ix,iy] = jph
6652 # d = [2.0,2.5,2.5,2.0]
6653 #falta chequear si va a leer bien los meteoros
6654 meteorsArray1 = meteorOps.getMeteorParams(meteorsArray, azimuth, h, pairsList, d, jph)
6655 error = meteorsArray1[:,-1]
6656 ind1 = numpy.where(error==0)[0]
6657 penalty[ix,iy] = ind1.size
6658
6659 i,j = numpy.unravel_index(penalty.argmax(), penalty.shape)
6660 phOffset = jph_array[:,i,j]
6661
6662 center_xangle = phOffset[pairx[1]]
6663 center_yangle = phOffset[pairy[1]]
6664
6665 phOffset = numpy.angle(numpy.exp(1j*jph_array[:,i,j]))
6666 phOffset = phOffset*180/numpy.pi
6667 return phOffset
6668
6669
6670 def run(self, dataOut, hmin, hmax, channelPositions=None, nHours = 1):
6671
6672 dataOut.flagNoData = True
6673 self.__dataReady = False
6674 dataOut.outputInterval = nHours*3600
6675
6676 if self.__isConfig == False:
6677 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
6678 #Get Initial LTC time
6679 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
6680 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
6681
6682 self.__isConfig = True
6683
6684 if self.__buffer is None:
6685 self.__buffer = dataOut.data_param.copy()
6686
6687 else:
6688 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
6689
6690 self.__dataReady = self.__checkTime(dataOut.utctime, self.__initime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
6691
6692 if self.__dataReady:
6693 dataOut.utctimeInit = self.__initime
6694 self.__initime += dataOut.outputInterval #to erase time offset
6695
6696 freq = dataOut.frequency
6697 c = dataOut.C #m/s
6698 lamb = c/freq
6699 k = 2*numpy.pi/lamb
6700 azimuth = 0
6701 h = (hmin, hmax)
6702 # pairs = ((0,1),(2,3)) #Estrella
6703 # pairs = ((1,0),(2,3)) #T
6704
6705 if channelPositions is None:
6706 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
6707 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
6708 meteorOps = SMOperations()
6709 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
6710
6711 #Checking correct order of pairs
6712 pairs = []
6713 if distances[1] > distances[0]:
6714 pairs.append((1,0))
6715 else:
6716 pairs.append((0,1))
6717
6718 if distances[3] > distances[2]:
6719 pairs.append((3,2))
6720 else:
6721 pairs.append((2,3))
6722 # distances1 = [-distances[0]*lamb, distances[1]*lamb, -distances[2]*lamb, distances[3]*lamb]
6723
6724 meteorsArray = self.__buffer
6725 error = meteorsArray[:,-1]
6726 boolError = (error==0)|(error==3)|(error==4)|(error==13)|(error==14)
6727 ind1 = numpy.where(boolError)[0]
6728 meteorsArray = meteorsArray[ind1,:]
6729 meteorsArray[:,-1] = 0
6730 phases = meteorsArray[:,8:12]
6731
6732 #Calculate Gammas
6733 gammas = self.__getGammas(pairs, distances, phases)
6734 # gammas = numpy.array([-21.70409463,45.76935864])*numpy.pi/180
6735 #Calculate Phases
6736 phasesOff = self.__getPhases(azimuth, h, pairs, distances, gammas, meteorsArray)
6737 phasesOff = phasesOff.reshape((1,phasesOff.size))
6738 dataOut.data_output = -phasesOff
6739 dataOut.flagNoData = False
6740 self.__buffer = None
6741
6742
6743 return
6744
6745 class SMOperations():
6746
6747 def __init__(self):
6748
6749 return
6750
6751 def getMeteorParams(self, arrayParameters0, azimuth, h, pairsList, distances, jph):
6752
6753 arrayParameters = arrayParameters0.copy()
6754 hmin = h[0]
6755 hmax = h[1]
6756
6757 #Calculate AOA (Error N 3, 4)
6758 #JONES ET AL. 1998
6759 AOAthresh = numpy.pi/8
6760 error = arrayParameters[:,-1]
6761 phases = -arrayParameters[:,8:12] + jph
6762 # phases = numpy.unwrap(phases)
6763 arrayParameters[:,3:6], arrayParameters[:,-1] = self.__getAOA(phases, pairsList, distances, error, AOAthresh, azimuth)
6764
6765 #Calculate Heights (Error N 13 and 14)
6766 error = arrayParameters[:,-1]
6767 Ranges = arrayParameters[:,1]
6768 zenith = arrayParameters[:,4]
6769 arrayParameters[:,2], arrayParameters[:,-1] = self.__getHeights(Ranges, zenith, error, hmin, hmax)
6770
6771 #----------------------- Get Final data ------------------------------------
6772 # error = arrayParameters[:,-1]
6773 # ind1 = numpy.where(error==0)[0]
6774 # arrayParameters = arrayParameters[ind1,:]
6775
6776 return arrayParameters
6777
6778 def __getAOA(self, phases, pairsList, directions, error, AOAthresh, azimuth):
6779
6780 arrayAOA = numpy.zeros((phases.shape[0],3))
6781 cosdir0, cosdir = self.__getDirectionCosines(phases, pairsList,directions)
6782
6783 arrayAOA[:,:2] = self.__calculateAOA(cosdir, azimuth)
6784 cosDirError = numpy.sum(numpy.abs(cosdir0 - cosdir), axis = 1)
6785 arrayAOA[:,2] = cosDirError
6786
6787 azimuthAngle = arrayAOA[:,0]
6788 zenithAngle = arrayAOA[:,1]
6789
6790 #Setting Error
6791 indError = numpy.where(numpy.logical_or(error == 3, error == 4))[0]
6792 error[indError] = 0
6793 #Number 3: AOA not fesible
6794 indInvalid = numpy.where(numpy.logical_and((numpy.logical_or(numpy.isnan(zenithAngle), numpy.isnan(azimuthAngle))),error == 0))[0]
6795 error[indInvalid] = 3
6796 #Number 4: Large difference in AOAs obtained from different antenna baselines
6797 indInvalid = numpy.where(numpy.logical_and(cosDirError > AOAthresh,error == 0))[0]
6798 error[indInvalid] = 4
6799 return arrayAOA, error
6800
6801 def __getDirectionCosines(self, arrayPhase, pairsList, distances):
6802
6803 #Initializing some variables
6804 ang_aux = numpy.array([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])*2*numpy.pi
6805 ang_aux = ang_aux.reshape(1,ang_aux.size)
6806
6807 cosdir = numpy.zeros((arrayPhase.shape[0],2))
6808 cosdir0 = numpy.zeros((arrayPhase.shape[0],2))
6809
6810
6811 for i in range(2):
6812 ph0 = arrayPhase[:,pairsList[i][0]]
6813 ph1 = arrayPhase[:,pairsList[i][1]]
6814 d0 = distances[pairsList[i][0]]
6815 d1 = distances[pairsList[i][1]]
6816
6817 ph0_aux = ph0 + ph1
6818 ph0_aux = numpy.angle(numpy.exp(1j*ph0_aux))
6819 # ph0_aux[ph0_aux > numpy.pi] -= 2*numpy.pi
6820 # ph0_aux[ph0_aux < -numpy.pi] += 2*numpy.pi
6821 #First Estimation
6822 cosdir0[:,i] = (ph0_aux)/(2*numpy.pi*(d0 - d1))
6823
6824 #Most-Accurate Second Estimation
6825 phi1_aux = ph0 - ph1
6826 phi1_aux = phi1_aux.reshape(phi1_aux.size,1)
6827 #Direction Cosine 1
6828 cosdir1 = (phi1_aux + ang_aux)/(2*numpy.pi*(d0 + d1))
6829
6830 #Searching the correct Direction Cosine
6831 cosdir0_aux = cosdir0[:,i]
6832 cosdir0_aux = cosdir0_aux.reshape(cosdir0_aux.size,1)
6833 #Minimum Distance
6834 cosDiff = (cosdir1 - cosdir0_aux)**2
6835 indcos = cosDiff.argmin(axis = 1)
6836 #Saving Value obtained
6837 cosdir[:,i] = cosdir1[numpy.arange(len(indcos)),indcos]
6838
6839 return cosdir0, cosdir
6840
6841 def __calculateAOA(self, cosdir, azimuth):
6842 cosdirX = cosdir[:,0]
6843 cosdirY = cosdir[:,1]
6844
6845 zenithAngle = numpy.arccos(numpy.sqrt(1 - cosdirX**2 - cosdirY**2))*180/numpy.pi
6846 azimuthAngle = numpy.arctan2(cosdirX,cosdirY)*180/numpy.pi + azimuth#0 deg north, 90 deg east
6847 angles = numpy.vstack((azimuthAngle, zenithAngle)).transpose()
6848
6849 return angles
6850
6851 def __getHeights(self, Ranges, zenith, error, minHeight, maxHeight):
6852
6853 Ramb = 375 #Ramb = c/(2*PRF)
6854 Re = 6371 #Earth Radius
6855 heights = numpy.zeros(Ranges.shape)
6856
6857 R_aux = numpy.array([0,1,2])*Ramb
6858 R_aux = R_aux.reshape(1,R_aux.size)
6859
6860 Ranges = Ranges.reshape(Ranges.size,1)
6861
6862 Ri = Ranges + R_aux
6863 hi = numpy.sqrt(Re**2 + Ri**2 + (2*Re*numpy.cos(zenith*numpy.pi/180)*Ri.transpose()).transpose()) - Re
6864
6865 #Check if there is a height between 70 and 110 km
6866 h_bool = numpy.sum(numpy.logical_and(hi > minHeight, hi < maxHeight), axis = 1)
6867 ind_h = numpy.where(h_bool == 1)[0]
6868
6869 hCorr = hi[ind_h, :]
6870 ind_hCorr = numpy.where(numpy.logical_and(hi > minHeight, hi < maxHeight))
6871
6872 hCorr = hi[ind_hCorr][:len(ind_h)]
6873 heights[ind_h] = hCorr
6874
6875 #Setting Error
6876 #Number 13: Height unresolvable echo: not valid height within 70 to 110 km
6877 #Number 14: Height ambiguous echo: more than one possible height within 70 to 110 km
6878 indError = numpy.where(numpy.logical_or(error == 13, error == 14))[0]
6879 error[indError] = 0
6880 indInvalid2 = numpy.where(numpy.logical_and(h_bool > 1, error == 0))[0]
6881 error[indInvalid2] = 14
6882 indInvalid1 = numpy.where(numpy.logical_and(h_bool == 0, error == 0))[0]
6883 error[indInvalid1] = 13
6884
6885 return heights, error
6886
6887 def getPhasePairs(self, channelPositions):
6888 chanPos = numpy.array(channelPositions)
6889 listOper = list(itertools.combinations(list(range(5)),2))
6890
6891 distances = numpy.zeros(4)
6892 axisX = []
6893 axisY = []
6894 distX = numpy.zeros(3)
6895 distY = numpy.zeros(3)
6896 ix = 0
6897 iy = 0
6898
6899 pairX = numpy.zeros((2,2))
6900 pairY = numpy.zeros((2,2))
6901
6902 for i in range(len(listOper)):
6903 pairi = listOper[i]
6904
6905 posDif = numpy.abs(chanPos[pairi[0],:] - chanPos[pairi[1],:])
6906
6907 if posDif[0] == 0:
6908 axisY.append(pairi)
6909 distY[iy] = posDif[1]
6910 iy += 1
6911 elif posDif[1] == 0:
6912 axisX.append(pairi)
6913 distX[ix] = posDif[0]
6914 ix += 1
6915
6916 for i in range(2):
6917 if i==0:
6918 dist0 = distX
6919 axis0 = axisX
6920 else:
6921 dist0 = distY
6922 axis0 = axisY
6923
6924 side = numpy.argsort(dist0)[:-1]
6925 axis0 = numpy.array(axis0)[side,:]
6926 chanC = int(numpy.intersect1d(axis0[0,:], axis0[1,:])[0])
6927 axis1 = numpy.unique(numpy.reshape(axis0,4))
6928 side = axis1[axis1 != chanC]
6929 diff1 = chanPos[chanC,i] - chanPos[side[0],i]
6930 diff2 = chanPos[chanC,i] - chanPos[side[1],i]
6931 if diff1<0:
6932 chan2 = side[0]
6933 d2 = numpy.abs(diff1)
6934 chan1 = side[1]
6935 d1 = numpy.abs(diff2)
6936 else:
6937 chan2 = side[1]
6938 d2 = numpy.abs(diff2)
6939 chan1 = side[0]
6940 d1 = numpy.abs(diff1)
6941
6942 if i==0:
6943 chanCX = chanC
6944 chan1X = chan1
6945 chan2X = chan2
6946 distances[0:2] = numpy.array([d1,d2])
6947 else:
6948 chanCY = chanC
6949 chan1Y = chan1
6950 chan2Y = chan2
6951 distances[2:4] = numpy.array([d1,d2])
6952 # axisXsides = numpy.reshape(axisX[ix,:],4)
6953 #
6954 # channelCentX = int(numpy.intersect1d(pairX[0,:], pairX[1,:])[0])
6955 # channelCentY = int(numpy.intersect1d(pairY[0,:], pairY[1,:])[0])
6956 #
6957 # ind25X = numpy.where(pairX[0,:] != channelCentX)[0][0]
6958 # ind20X = numpy.where(pairX[1,:] != channelCentX)[0][0]
6959 # channel25X = int(pairX[0,ind25X])
6960 # channel20X = int(pairX[1,ind20X])
6961 # ind25Y = numpy.where(pairY[0,:] != channelCentY)[0][0]
6962 # ind20Y = numpy.where(pairY[1,:] != channelCentY)[0][0]
6963 # channel25Y = int(pairY[0,ind25Y])
6964 # channel20Y = int(pairY[1,ind20Y])
6965
6966 # pairslist = [(channelCentX, channel25X),(channelCentX, channel20X),(channelCentY,channel25Y),(channelCentY, channel20Y)]
6967 pairslist = [(chanCX, chan1X),(chanCX, chan2X),(chanCY,chan1Y),(chanCY, chan2Y)]
6968
6969 return pairslist, distances
6970 # def __getAOA(self, phases, pairsList, error, AOAthresh, azimuth):
6971 #
6972 # arrayAOA = numpy.zeros((phases.shape[0],3))
6973 # cosdir0, cosdir = self.__getDirectionCosines(phases, pairsList)
6974 #
6975 # arrayAOA[:,:2] = self.__calculateAOA(cosdir, azimuth)
6976 # cosDirError = numpy.sum(numpy.abs(cosdir0 - cosdir), axis = 1)
6977 # arrayAOA[:,2] = cosDirError
6978 #
6979 # azimuthAngle = arrayAOA[:,0]
6980 # zenithAngle = arrayAOA[:,1]
6981 #
6982 # #Setting Error
6983 # #Number 3: AOA not fesible
6984 # indInvalid = numpy.where(numpy.logical_and((numpy.logical_or(numpy.isnan(zenithAngle), numpy.isnan(azimuthAngle))),error == 0))[0]
6985 # error[indInvalid] = 3
6986 # #Number 4: Large difference in AOAs obtained from different antenna baselines
6987 # indInvalid = numpy.where(numpy.logical_and(cosDirError > AOAthresh,error == 0))[0]
6988 # error[indInvalid] = 4
6989 # return arrayAOA, error
6990 #
6991 # def __getDirectionCosines(self, arrayPhase, pairsList):
6992 #
6993 # #Initializing some variables
6994 # ang_aux = numpy.array([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])*2*numpy.pi
6995 # ang_aux = ang_aux.reshape(1,ang_aux.size)
6996 #
6997 # cosdir = numpy.zeros((arrayPhase.shape[0],2))
6998 # cosdir0 = numpy.zeros((arrayPhase.shape[0],2))
6999 #
7000 #
7001 # for i in range(2):
7002 # #First Estimation
7003 # phi0_aux = arrayPhase[:,pairsList[i][0]] + arrayPhase[:,pairsList[i][1]]
7004 # #Dealias
7005 # indcsi = numpy.where(phi0_aux > numpy.pi)
7006 # phi0_aux[indcsi] -= 2*numpy.pi
7007 # indcsi = numpy.where(phi0_aux < -numpy.pi)
7008 # phi0_aux[indcsi] += 2*numpy.pi
7009 # #Direction Cosine 0
7010 # cosdir0[:,i] = -(phi0_aux)/(2*numpy.pi*0.5)
7011 #
7012 # #Most-Accurate Second Estimation
7013 # phi1_aux = arrayPhase[:,pairsList[i][0]] - arrayPhase[:,pairsList[i][1]]
7014 # phi1_aux = phi1_aux.reshape(phi1_aux.size,1)
7015 # #Direction Cosine 1
7016 # cosdir1 = -(phi1_aux + ang_aux)/(2*numpy.pi*4.5)
7017 #
7018 # #Searching the correct Direction Cosine
7019 # cosdir0_aux = cosdir0[:,i]
7020 # cosdir0_aux = cosdir0_aux.reshape(cosdir0_aux.size,1)
7021 # #Minimum Distance
7022 # cosDiff = (cosdir1 - cosdir0_aux)**2
7023 # indcos = cosDiff.argmin(axis = 1)
7024 # #Saving Value obtained
7025 # cosdir[:,i] = cosdir1[numpy.arange(len(indcos)),indcos]
7026 #
7027 # return cosdir0, cosdir
7028 #
7029 # def __calculateAOA(self, cosdir, azimuth):
7030 # cosdirX = cosdir[:,0]
7031 # cosdirY = cosdir[:,1]
7032 #
7033 # zenithAngle = numpy.arccos(numpy.sqrt(1 - cosdirX**2 - cosdirY**2))*180/numpy.pi
7034 # azimuthAngle = numpy.arctan2(cosdirX,cosdirY)*180/numpy.pi + azimuth #0 deg north, 90 deg east
7035 # angles = numpy.vstack((azimuthAngle, zenithAngle)).transpose()
7036 #
7037 # return angles
7038 #
7039 # def __getHeights(self, Ranges, zenith, error, minHeight, maxHeight):
7040 #
7041 # Ramb = 375 #Ramb = c/(2*PRF)
7042 # Re = 6371 #Earth Radius
7043 # heights = numpy.zeros(Ranges.shape)
7044 #
7045 # R_aux = numpy.array([0,1,2])*Ramb
7046 # R_aux = R_aux.reshape(1,R_aux.size)
7047 #
7048 # Ranges = Ranges.reshape(Ranges.size,1)
7049 #
7050 # Ri = Ranges + R_aux
7051 # hi = numpy.sqrt(Re**2 + Ri**2 + (2*Re*numpy.cos(zenith*numpy.pi/180)*Ri.transpose()).transpose()) - Re
7052 #
7053 # #Check if there is a height between 70 and 110 km
7054 # h_bool = numpy.sum(numpy.logical_and(hi > minHeight, hi < maxHeight), axis = 1)
7055 # ind_h = numpy.where(h_bool == 1)[0]
7056 #
7057 # hCorr = hi[ind_h, :]
7058 # ind_hCorr = numpy.where(numpy.logical_and(hi > minHeight, hi < maxHeight))
7059 #
7060 # hCorr = hi[ind_hCorr]
7061 # heights[ind_h] = hCorr
7062 #
7063 # #Setting Error
7064 # #Number 13: Height unresolvable echo: not valid height within 70 to 110 km
7065 # #Number 14: Height ambiguous echo: more than one possible height within 70 to 110 km
7066 #
7067 # indInvalid2 = numpy.where(numpy.logical_and(h_bool > 1, error == 0))[0]
7068 # error[indInvalid2] = 14
7069 # indInvalid1 = numpy.where(numpy.logical_and(h_bool == 0, error == 0))[0]
7070 # error[indInvalid1] = 13
7071 #
7072 # return heights, error
7073
7074 class IGRFModel(Operation):
7075 '''
7076 Written by R. Flores
7077 '''
7078 """Operation to calculate Geomagnetic parameters.
7079
7080 Parameters:
7081 -----------
7082 None
7083
7084 Example
7085 --------
7086
7087 op = proc_unit.addOperation(name='IGRFModel', optype='other')
7088
7089 """
7090
7091 def __init__(self, **kwargs):
7092
7093 Operation.__init__(self, **kwargs)
7094
7095 self.aux=1
7096
7097 def run(self,dataOut):
7098
7099 try:
7100 from schainpy.model.proc import mkfact_short_2020_2
7101 except:
7102 log.warning('You should install "mkfact_short_2020" module to process IGRF Model')
7103
7104 if self.aux==1:
7105
7106 #dataOut.TimeBlockSeconds_First_Time=time.mktime(time.strptime(dataOut.TimeBlockDate))
7107 #### we do not use dataOut.datatime.ctime() because it's the time of the second (next) block
7108 dataOut.TimeBlockSeconds_First_Time=dataOut.TimeBlockSeconds
7109 dataOut.bd_time=time.gmtime(dataOut.TimeBlockSeconds_First_Time)
7110 dataOut.year=dataOut.bd_time.tm_year+(dataOut.bd_time.tm_yday-1)/364.0
7111 dataOut.ut=dataOut.bd_time.tm_hour+dataOut.bd_time.tm_min/60.0+dataOut.bd_time.tm_sec/3600.0
7112
7113 self.aux=0
7114 dh = dataOut.heightList[1]-dataOut.heightList[0]
7115 #dataOut.h=numpy.arange(0.0,15.0*dataOut.MAXNRANGENDT,15.0,dtype='float32')
7116 dataOut.h=numpy.arange(0.0,dh*dataOut.MAXNRANGENDT,dh,dtype='float32')
7117 dataOut.bfm=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32')
7118 dataOut.bfm=numpy.array(dataOut.bfm,order='F')
7119 dataOut.thb=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32')
7120 dataOut.thb=numpy.array(dataOut.thb,order='F')
7121 dataOut.bki=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32')
7122 dataOut.bki=numpy.array(dataOut.bki,order='F')
7123 #print("bki: ", dataOut.bki)
7124 #print("**** mkfact WRAPPER ***** ",mkfact_short_2020.mkfact.__doc__ )
7125 #print("IDs: ", id(dataOut.bki))
7126 #print("bki shape: ", numpy.shape(dataOut.bki),numpy.shape(dataOut.h),dataOut.year)
7127
7128 mkfact_short_2020_2.mkfact(dataOut.year,dataOut.h,dataOut.bfm,dataOut.thb,dataOut.bki,dataOut.MAXNRANGENDT)
7129
7130 #mkfact_short_2020.mkfact(dataOut.year,dataOut.h,dataOut.bfm,dataOut.thb,dataOut.bki,dataOut.MAXNRANGENDT)
7131 #print("bki: ", dataOut.bki[:10])
7132 #print("thb: ", dataOut.thb[:10])
7133 #print("bfm: ", dataOut.bfm[:10])
7134 #print("IDs: ", id(dataOut.bki))
7135
7136 return dataOut
7137
7138 class MergeProc(ProcessingUnit):
7139
7140 def __init__(self):
7141 ProcessingUnit.__init__(self)
7142
7143 def run(self, attr_data, attr_data_2 = None, attr_data_3 = None, attr_data_4 = None, attr_data_5 = None, mode=0):
7144 #print("*****************************Merge***************")
7145
7146 self.dataOut = getattr(self, self.inputs[0])
7147 data_inputs = [getattr(self, attr) for attr in self.inputs]
7148 #print(data_inputs)
7149 #print("Run: ",self.dataOut.runNextUnit)
7150 #exit(1)
7151 #print("a:", [getattr(data, attr_data) for data in data_inputs][1])
7152 #exit(1)
7153 if mode==0:
7154 data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs])
7155 setattr(self.dataOut, attr_data, data)
7156
7157 if mode==1: #Hybrid
7158 #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1)
7159 #setattr(self.dataOut, attr_data, data)
7160 setattr(self.dataOut, 'dataLag_spc', [getattr(data, attr_data) for data in data_inputs][0])
7161 setattr(self.dataOut, 'dataLag_spc_LP', [getattr(data, attr_data) for data in data_inputs][1])
7162 setattr(self.dataOut, 'dataLag_cspc', [getattr(data, attr_data_2) for data in data_inputs][0])
7163 setattr(self.dataOut, 'dataLag_cspc_LP', [getattr(data, attr_data_2) for data in data_inputs][1])
7164 #setattr(self.dataOut, 'nIncohInt', [getattr(data, attr_data_3) for data in data_inputs][0])
7165 #setattr(self.dataOut, 'nIncohInt_LP', [getattr(data, attr_data_3) for data in data_inputs][1])
7166 '''
7167 print(self.dataOut.dataLag_spc_LP.shape)
7168 print(self.dataOut.dataLag_cspc_LP.shape)
7169 exit(1)
7170 '''
7171
7172 #self.dataOut.dataLag_spc_LP = numpy.transpose(self.dataOut.dataLag_spc_LP[0],(2,0,1))
7173 #self.dataOut.dataLag_cspc_LP = numpy.transpose(self.dataOut.dataLag_cspc_LP,(3,1,2,0))
7174 '''
7175 print("Merge")
7176 print(numpy.shape(self.dataOut.dataLag_spc))
7177 print(numpy.shape(self.dataOut.dataLag_spc_LP))
7178 print(numpy.shape(self.dataOut.dataLag_cspc))
7179 print(numpy.shape(self.dataOut.dataLag_cspc_LP))
7180 exit(1)
7181 '''
7182 #print(numpy.sum(self.dataOut.dataLag_spc_LP[2,:,164])/128)
7183 #print(numpy.sum(self.dataOut.dataLag_cspc_LP[0,:,30,1])/128)
7184 #exit(1)
7185 #print(self.dataOut.NDP)
7186 #print(self.dataOut.nNoiseProfiles)
7187
7188 #self.dataOut.nIncohInt_LP = 128
7189 self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP
7190 self.dataOut.nIncohInt_LP = self.dataOut.nIncohInt
7191 self.dataOut.NLAG = 16
7192 self.dataOut.NRANGE = 200
7193 self.dataOut.NSCAN = 128
7194 #print(numpy.shape(self.dataOut.data_spc))
7195
7196 #exit(1)
7197
7198 if mode==2: #HAE 2022
7199 data = numpy.sum([getattr(data, attr_data) for data in data_inputs],axis=0)
7200 setattr(self.dataOut, attr_data, data)
7201
7202 self.dataOut.nIncohInt *= 2
7203 #meta = self.dataOut.getFreqRange(1)/1000.
7204 self.dataOut.freqRange = self.dataOut.getFreqRange(1)/1000.
7205
7206 #exit(1)
7207
7208 if mode==4: #Hybrid LP-SSheightProfiles
7209 #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1)
7210 #setattr(self.dataOut, attr_data, data)
7211 setattr(self.dataOut, 'dataLag_spc', getattr(data_inputs[0], attr_data)) #DP
7212 setattr(self.dataOut, 'dataLag_cspc', getattr(data_inputs[0], attr_data_2)) #DP
7213 setattr(self.dataOut, 'dataLag_spc_LP', getattr(data_inputs[1], attr_data_3)) #LP
7214 #setattr(self.dataOut, 'dataLag_cspc_LP', getattr(data_inputs[1], attr_data_4)) #LP
7215 #setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP
7216 setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP
7217 #print("Merge data_acf: ",self.dataOut.data_acf.shape)
7218
7219
7220 #self.dataOut.nIncohInt_LP = 128
7221 #self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP
7222 self.dataOut.nProfiles_LP = 16#28#self.dataOut.nIncohInt_LP
7223 self.dataOut.nProfiles_LP = self.dataOut.data_acf.shape[1]#28#self.dataOut.nIncohInt_LP
7224 self.dataOut.NSCAN = 128
7225 self.dataOut.nIncohInt_LP = self.dataOut.nIncohInt*self.dataOut.NSCAN
7226 #print("sahpi",self.dataOut.nIncohInt_LP)
7227 #exit(1)
7228 self.dataOut.NLAG = 16
7229 self.dataOut.NLAG = self.dataOut.data_acf.shape[1]
7230 self.dataOut.NRANGE = self.dataOut.data_acf.shape[-1]
7231
7232 #print(numpy.shape(self.dataOut.data_spc))
7233
7234 #exit(1)
7235 if mode==5:
7236 data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs])
7237 setattr(self.dataOut, attr_data, data)
7238 data = numpy.concatenate([getattr(data, attr_data_2) for data in data_inputs])
7239 setattr(self.dataOut, attr_data_2, data)
7240
7241 if mode==6: #Hybrid Spectra-Voltage
7242 #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1)
7243 #setattr(self.dataOut, attr_data, data)
7244 setattr(self.dataOut, 'dataLag_spc', getattr(data_inputs[1], attr_data)) #DP
7245 setattr(self.dataOut, 'dataLag_cspc', getattr(data_inputs[1], attr_data_2)) #DP
7246 setattr(self.dataOut, 'output_LP_integrated', getattr(data_inputs[0], attr_data_3)) #LP
7247 #setattr(self.dataOut, 'dataLag_cspc_LP', getattr(data_inputs[1], attr_data_4)) #LP
7248 #setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP
7249 #setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP
7250 #print("Merge data_acf: ",self.dataOut.data_acf.shape)
7251 #print(self.dataOut.NSCAN)
7252 self.dataOut.nIncohInt = int(self.dataOut.NAVG * self.dataOut.nint)
7253 #print(self.dataOut.dataLag_spc.shape)
7254 self.dataOut.nProfiles = self.dataOut.nProfiles_DP = self.dataOut.dataLag_spc.shape[1]
7255 '''
7256 #self.dataOut.nIncohInt_LP = 128
7257 #self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP
7258 self.dataOut.nProfiles_LP = 16#28#self.dataOut.nIncohInt_LP
7259 self.dataOut.nProfiles_LP = self.dataOut.data_acf.shape[1]#28#self.dataOut.nIncohInt_LP
7260 self.dataOut.NSCAN = 128
7261 self.dataOut.nIncohInt_LP = self.dataOut.nIncohInt*self.dataOut.NSCAN
7262 #print("sahpi",self.dataOut.nIncohInt_LP)
7263 #exit(1)
7264 self.dataOut.NLAG = 16
7265 self.dataOut.NLAG = self.dataOut.data_acf.shape[1]
7266 self.dataOut.NRANGE = self.dataOut.data_acf.shape[-1]
7267 '''
7268 #print(numpy.shape(self.dataOut.data_spc))
7269 #print("*************************GOOD*************************")
7270 #exit(1)
7271
7272 if mode==11: #MST ISR
7273 #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1)
7274 #setattr(self.dataOut, attr_data, data)
7275 #setattr(self.dataOut, 'ph2', [getattr(data, attr_data) for data in data_inputs][1])
7276 #setattr(self.dataOut, 'dphi', [getattr(data, attr_data_2) for data in data_inputs][1])
7277 #setattr(self.dataOut, 'sdp2', [getattr(data, attr_data_3) for data in data_inputs][1])
7278
7279 setattr(self.dataOut, 'ph2', getattr(data_inputs[1], attr_data)) #DP
7280 setattr(self.dataOut, 'dphi', getattr(data_inputs[1], attr_data_2)) #DP
7281 setattr(self.dataOut, 'sdp2', getattr(data_inputs[1], attr_data_3)) #DP
7282
7283 print("MST Density", numpy.shape(self.dataOut.ph2))
7284 print("cf MST: ", self.dataOut.cf)
7285 #exit(1)
7286 #print("MST Density", self.dataOut.ph2[116:283])
7287 print("MST Density", self.dataOut.ph2[80:120])
7288 print("MST dPhi", self.dataOut.dphi[80:120])
7289 self.dataOut.ph2 *= self.dataOut.cf#0.0008136899
7290 #print("MST Density", self.dataOut.ph2[116:283])
7291 self.dataOut.sdp2 *= 0#self.dataOut.cf#0.0008136899
7292 #print("MST Density", self.dataOut.ph2[116:283])
7293 print("MST Density", self.dataOut.ph2[80:120])
7294 self.dataOut.NSHTS = int(numpy.shape(self.dataOut.ph2)[0])
7295 dH = self.dataOut.heightList[1]-self.dataOut.heightList[0]
7296 dH /= self.dataOut.windowOfFilter
7297 self.dataOut.heightList = numpy.arange(0,self.dataOut.NSHTS)*dH + dH
7298 #print("heightList: ", self.dataOut.heightList)
7299 self.dataOut.NDP = self.dataOut.NSHTS
7300 #exit(1)
7301 #print(self.dataOut.heightList)
7302
7303 class MST_Den_Conv(Operation):
7304 '''
7305 Written by R. Flores
7306 '''
7307 """Operation to calculate Geomagnetic parameters.
7308
7309 Parameters:
7310 -----------
7311 None
7312
7313 Example
7314 --------
7315
7316 op = proc_unit.addOperation(name='MST_Den_Conv', optype='other')
7317
7318 """
7319
7320 def __init__(self, **kwargs):
7321
7322 Operation.__init__(self, **kwargs)
7323
7324 def run(self,dataOut):
7325
7326 dataOut.PowDen = numpy.zeros((1,dataOut.NDP))
7327 dataOut.PowDen[0] = numpy.copy(dataOut.ph2[:dataOut.NDP])
7328
7329 dataOut.FarDen = numpy.zeros((1,dataOut.NDP))
7330 dataOut.FarDen[0] = numpy.copy(dataOut.dphi[:dataOut.NDP])
7331 print("pow den shape", numpy.shape(dataOut.PowDen))
7332 print("far den shape", numpy.shape(dataOut.FarDen))
7333 return dataOut No newline at end of file
This diff has been collapsed as it changes many lines, (982 lines changed) Show them Hide them
@@ -0,0 +1,982
1 # Copyright (c) 2012-2020 Jicamarca Radio Observatory
2 # All rights reserved.
3 #
4 # Distributed under the terms of the BSD 3-clause license.
5 """Spectra processing Unit and operations
6
7 Here you will find the processing unit `SpectraProc` and several operations
8 to work with Spectra data type
9 """
10
11 import time
12 import itertools
13
14 import numpy
15
16 from schainpy.model.proc.jroproc_base import ProcessingUnit, MPDecorator, Operation
17 from schainpy.model.data.jrodata import Spectra
18 from schainpy.model.data.jrodata import hildebrand_sekhon
19 from schainpy.utils import log
20
21
22 class SpectraProc(ProcessingUnit):
23
24 def __init__(self):
25
26 ProcessingUnit.__init__(self)
27
28 self.buffer = None
29 self.firstdatatime = None
30 self.profIndex = 0
31 self.dataOut = Spectra()
32 self.id_min = None
33 self.id_max = None
34 self.setupReq = False #Agregar a todas las unidades de proc
35
36 def __updateSpecFromVoltage(self):
37
38 self.dataOut.timeZone = self.dataIn.timeZone
39 self.dataOut.dstFlag = self.dataIn.dstFlag
40 self.dataOut.errorCount = self.dataIn.errorCount
41 self.dataOut.useLocalTime = self.dataIn.useLocalTime
42 try:
43 self.dataOut.processingHeaderObj = self.dataIn.processingHeaderObj.copy()
44 except:
45 pass
46 self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy()
47 self.dataOut.systemHeaderObj = self.dataIn.systemHeaderObj.copy()
48 self.dataOut.channelList = self.dataIn.channelList
49 self.dataOut.heightList = self.dataIn.heightList
50 self.dataOut.dtype = numpy.dtype([('real', '<f4'), ('imag', '<f4')])
51 self.dataOut.nProfiles = self.dataOut.nFFTPoints
52 self.dataOut.flagDiscontinuousBlock = self.dataIn.flagDiscontinuousBlock
53 self.dataOut.utctime = self.firstdatatime
54 self.dataOut.flagDecodeData = self.dataIn.flagDecodeData
55 self.dataOut.flagDeflipData = self.dataIn.flagDeflipData
56 self.dataOut.flagShiftFFT = False
57 self.dataOut.nCohInt = self.dataIn.nCohInt
58 self.dataOut.nIncohInt = 1
59 self.dataOut.windowOfFilter = self.dataIn.windowOfFilter
60 self.dataOut.frequency = self.dataIn.frequency
61 self.dataOut.realtime = self.dataIn.realtime
62 self.dataOut.azimuth = self.dataIn.azimuth
63 self.dataOut.zenith = self.dataIn.zenith
64 self.dataOut.beam.codeList = self.dataIn.beam.codeList
65 self.dataOut.beam.azimuthList = self.dataIn.beam.azimuthList
66 self.dataOut.beam.zenithList = self.dataIn.beam.zenithList
67 self.dataOut.runNextUnit = self.dataIn.runNextUnit
68 try:
69 self.dataOut.step = self.dataIn.step
70 except:
71 pass
72
73 def __getFft(self):
74 """
75 Convierte valores de Voltaje a Spectra
76
77 Affected:
78 self.dataOut.data_spc
79 self.dataOut.data_cspc
80 self.dataOut.data_dc
81 self.dataOut.heightList
82 self.profIndex
83 self.buffer
84 self.dataOut.flagNoData
85 """
86 fft_volt = numpy.fft.fft(
87 self.buffer, n=self.dataOut.nFFTPoints, axis=1)
88 fft_volt = fft_volt.astype(numpy.dtype('complex'))
89 dc = fft_volt[:, 0, :]
90
91 # calculo de self-spectra
92 fft_volt = numpy.fft.fftshift(fft_volt, axes=(1,))
93 spc = fft_volt * numpy.conjugate(fft_volt)
94 spc = spc.real
95
96 blocksize = 0
97 blocksize += dc.size
98 blocksize += spc.size
99
100 cspc = None
101 pairIndex = 0
102 if self.dataOut.pairsList != None:
103 # calculo de cross-spectra
104 cspc = numpy.zeros(
105 (self.dataOut.nPairs, self.dataOut.nFFTPoints, self.dataOut.nHeights), dtype='complex')
106 for pair in self.dataOut.pairsList:
107 if pair[0] not in self.dataOut.channelList:
108 raise ValueError("Error getting CrossSpectra: pair 0 of %s is not in channelList = %s" % (
109 str(pair), str(self.dataOut.channelList)))
110 if pair[1] not in self.dataOut.channelList:
111 raise ValueError("Error getting CrossSpectra: pair 1 of %s is not in channelList = %s" % (
112 str(pair), str(self.dataOut.channelList)))
113
114 cspc[pairIndex, :, :] = fft_volt[pair[0], :, :] * \
115 numpy.conjugate(fft_volt[pair[1], :, :])
116 pairIndex += 1
117 blocksize += cspc.size
118
119 self.dataOut.data_spc = spc
120 self.dataOut.data_cspc = cspc
121 self.dataOut.data_dc = dc
122 self.dataOut.blockSize = blocksize
123 self.dataOut.flagShiftFFT = False
124
125 def run(self, nProfiles=None, nFFTPoints=None, pairsList=None, ippFactor=None, shift_fft=False, runNextUnit = 0):
126
127 self.dataIn.runNextUnit = runNextUnit
128 if self.dataIn.type == "Spectra":
129
130 self.dataOut.copy(self.dataIn)
131 if shift_fft:
132 #desplaza a la derecha en el eje 2 determinadas posiciones
133 shift = int(self.dataOut.nFFTPoints/2)
134 self.dataOut.data_spc = numpy.roll(self.dataOut.data_spc, shift , axis=1)
135
136 if self.dataOut.data_cspc is not None:
137 #desplaza a la derecha en el eje 2 determinadas posiciones
138 self.dataOut.data_cspc = numpy.roll(self.dataOut.data_cspc, shift, axis=1)
139 if pairsList:
140 self.__selectPairs(pairsList)
141
142 elif self.dataIn.type == "Voltage":
143
144 self.dataOut.flagNoData = True
145
146 if nFFTPoints == None:
147 raise ValueError("This SpectraProc.run() need nFFTPoints input variable")
148
149 if nProfiles == None:
150 nProfiles = nFFTPoints
151 #print(self.dataOut.ipp)
152 #exit(1)
153 if ippFactor == None:
154 self.dataOut.ippFactor = 1
155 #if ippFactor is not None:
156 #self.dataOut.ippFactor = ippFactor
157 #print(ippFactor)
158 #print(self.dataOut.ippFactor)
159 #exit(1)
160
161 self.dataOut.nFFTPoints = nFFTPoints
162
163 if self.buffer is None:
164 self.buffer = numpy.zeros((self.dataIn.nChannels,
165 nProfiles,
166 self.dataIn.nHeights),
167 dtype='complex')
168
169 if self.dataIn.flagDataAsBlock:
170 nVoltProfiles = self.dataIn.data.shape[1]
171
172 if nVoltProfiles == nProfiles:
173 self.buffer = self.dataIn.data.copy()
174 self.profIndex = nVoltProfiles
175
176 elif nVoltProfiles < nProfiles:
177
178 if self.profIndex == 0:
179 self.id_min = 0
180 self.id_max = nVoltProfiles
181 #print(self.id_min)
182 #print(self.id_max)
183 #print(numpy.shape(self.buffer))
184 self.buffer[:, self.id_min:self.id_max,
185 :] = self.dataIn.data
186 self.profIndex += nVoltProfiles
187 self.id_min += nVoltProfiles
188 self.id_max += nVoltProfiles
189 else:
190 raise ValueError("The type object %s has %d profiles, it should just has %d profiles" % (
191 self.dataIn.type, self.dataIn.data.shape[1], nProfiles))
192 self.dataOut.flagNoData = True
193 else:
194 self.buffer[:, self.profIndex, :] = self.dataIn.data.copy()
195 self.profIndex += 1
196
197 if self.firstdatatime == None:
198 self.firstdatatime = self.dataIn.utctime
199
200 if self.profIndex == nProfiles:
201 self.__updateSpecFromVoltage()
202 if pairsList == None:
203 self.dataOut.pairsList = [pair for pair in itertools.combinations(self.dataOut.channelList, 2)]
204 else:
205 self.dataOut.pairsList = pairsList
206 self.__getFft()
207 self.dataOut.flagNoData = False
208 self.firstdatatime = None
209 self.profIndex = 0
210 else:
211 raise ValueError("The type of input object '%s' is not valid".format(
212 self.dataIn.type))
213
214
215 def __selectPairs(self, pairsList):
216
217 if not pairsList:
218 return
219
220 pairs = []
221 pairsIndex = []
222
223 for pair in pairsList:
224 if pair[0] not in self.dataOut.channelList or pair[1] not in self.dataOut.channelList:
225 continue
226 pairs.append(pair)
227 pairsIndex.append(pairs.index(pair))
228
229 self.dataOut.data_cspc = self.dataOut.data_cspc[pairsIndex]
230 self.dataOut.pairsList = pairs
231
232 return
233
234 def selectFFTs(self, minFFT, maxFFT ):
235 """
236 Selecciona un bloque de datos en base a un grupo de valores de puntos FFTs segun el rango
237 minFFT<= FFT <= maxFFT
238 """
239
240 if (minFFT > maxFFT):
241 raise ValueError("Error selecting heights: Height range (%d,%d) is not valid" % (minFFT, maxFFT))
242
243 if (minFFT < self.dataOut.getFreqRange()[0]):
244 minFFT = self.dataOut.getFreqRange()[0]
245
246 if (maxFFT > self.dataOut.getFreqRange()[-1]):
247 maxFFT = self.dataOut.getFreqRange()[-1]
248
249 minIndex = 0
250 maxIndex = 0
251 FFTs = self.dataOut.getFreqRange()
252
253 inda = numpy.where(FFTs >= minFFT)
254 indb = numpy.where(FFTs <= maxFFT)
255
256 try:
257 minIndex = inda[0][0]
258 except:
259 minIndex = 0
260
261 try:
262 maxIndex = indb[0][-1]
263 except:
264 maxIndex = len(FFTs)
265
266 self.selectFFTsByIndex(minIndex, maxIndex)
267
268 return 1
269
270 def getBeaconSignal(self, tauindex=0, channelindex=0, hei_ref=None):
271 newheis = numpy.where(
272 self.dataOut.heightList > self.dataOut.radarControllerHeaderObj.Taus[tauindex])
273
274 if hei_ref != None:
275 newheis = numpy.where(self.dataOut.heightList > hei_ref)
276
277 minIndex = min(newheis[0])
278 maxIndex = max(newheis[0])
279 data_spc = self.dataOut.data_spc[:, :, minIndex:maxIndex + 1]
280 heightList = self.dataOut.heightList[minIndex:maxIndex + 1]
281
282 # determina indices
283 nheis = int(self.dataOut.radarControllerHeaderObj.txB /
284 (self.dataOut.heightList[1] - self.dataOut.heightList[0]))
285 avg_dB = 10 * \
286 numpy.log10(numpy.sum(data_spc[channelindex, :, :], axis=0))
287 beacon_dB = numpy.sort(avg_dB)[-nheis:]
288 beacon_heiIndexList = []
289 for val in avg_dB.tolist():
290 if val >= beacon_dB[0]:
291 beacon_heiIndexList.append(avg_dB.tolist().index(val))
292
293 #data_spc = data_spc[:,:,beacon_heiIndexList]
294 data_cspc = None
295 if self.dataOut.data_cspc is not None:
296 data_cspc = self.dataOut.data_cspc[:, :, minIndex:maxIndex + 1]
297 #data_cspc = data_cspc[:,:,beacon_heiIndexList]
298
299 data_dc = None
300 if self.dataOut.data_dc is not None:
301 data_dc = self.dataOut.data_dc[:, minIndex:maxIndex + 1]
302 #data_dc = data_dc[:,beacon_heiIndexList]
303
304 self.dataOut.data_spc = data_spc
305 self.dataOut.data_cspc = data_cspc
306 self.dataOut.data_dc = data_dc
307 self.dataOut.heightList = heightList
308 self.dataOut.beacon_heiIndexList = beacon_heiIndexList
309
310 return 1
311
312 def selectFFTsByIndex(self, minIndex, maxIndex):
313 """
314
315 """
316
317 if (minIndex < 0) or (minIndex > maxIndex):
318 raise ValueError("Error selecting heights: Index range (%d,%d) is not valid" % (minIndex, maxIndex))
319
320 if (maxIndex >= self.dataOut.nProfiles):
321 maxIndex = self.dataOut.nProfiles-1
322
323 #Spectra
324 data_spc = self.dataOut.data_spc[:,minIndex:maxIndex+1,:]
325
326 data_cspc = None
327 if self.dataOut.data_cspc is not None:
328 data_cspc = self.dataOut.data_cspc[:,minIndex:maxIndex+1,:]
329
330 data_dc = None
331 if self.dataOut.data_dc is not None:
332 data_dc = self.dataOut.data_dc[minIndex:maxIndex+1,:]
333
334 self.dataOut.data_spc = data_spc
335 self.dataOut.data_cspc = data_cspc
336 self.dataOut.data_dc = data_dc
337
338 self.dataOut.ippSeconds = self.dataOut.ippSeconds*(self.dataOut.nFFTPoints / numpy.shape(data_cspc)[1])
339 self.dataOut.nFFTPoints = numpy.shape(data_cspc)[1]
340 self.dataOut.profilesPerBlock = numpy.shape(data_cspc)[1]
341
342 return 1
343
344 def getNoise(self, minHei=None, maxHei=None, minVel=None, maxVel=None):
345 # validacion de rango
346 print("NOISeeee")
347 if minHei == None:
348 minHei = self.dataOut.heightList[0]
349
350 if maxHei == None:
351 maxHei = self.dataOut.heightList[-1]
352
353 if (minHei < self.dataOut.heightList[0]) or (minHei > maxHei):
354 print('minHei: %.2f is out of the heights range' % (minHei))
355 print('minHei is setting to %.2f' % (self.dataOut.heightList[0]))
356 minHei = self.dataOut.heightList[0]
357
358 if (maxHei > self.dataOut.heightList[-1]) or (maxHei < minHei):
359 print('maxHei: %.2f is out of the heights range' % (maxHei))
360 print('maxHei is setting to %.2f' % (self.dataOut.heightList[-1]))
361 maxHei = self.dataOut.heightList[-1]
362
363 # validacion de velocidades
364 velrange = self.dataOut.getVelRange(1)
365
366 if minVel == None:
367 minVel = velrange[0]
368
369 if maxVel == None:
370 maxVel = velrange[-1]
371
372 if (minVel < velrange[0]) or (minVel > maxVel):
373 print('minVel: %.2f is out of the velocity range' % (minVel))
374 print('minVel is setting to %.2f' % (velrange[0]))
375 minVel = velrange[0]
376
377 if (maxVel > velrange[-1]) or (maxVel < minVel):
378 print('maxVel: %.2f is out of the velocity range' % (maxVel))
379 print('maxVel is setting to %.2f' % (velrange[-1]))
380 maxVel = velrange[-1]
381
382 # seleccion de indices para rango
383 minIndex = 0
384 maxIndex = 0
385 heights = self.dataOut.heightList
386
387 inda = numpy.where(heights >= minHei)
388 indb = numpy.where(heights <= maxHei)
389
390 try:
391 minIndex = inda[0][0]
392 except:
393 minIndex = 0
394
395 try:
396 maxIndex = indb[0][-1]
397 except:
398 maxIndex = len(heights)
399
400 if (minIndex < 0) or (minIndex > maxIndex):
401 raise ValueError("some value in (%d,%d) is not valid" % (
402 minIndex, maxIndex))
403
404 if (maxIndex >= self.dataOut.nHeights):
405 maxIndex = self.dataOut.nHeights - 1
406
407 # seleccion de indices para velocidades
408 indminvel = numpy.where(velrange >= minVel)
409 indmaxvel = numpy.where(velrange <= maxVel)
410 try:
411 minIndexVel = indminvel[0][0]
412 except:
413 minIndexVel = 0
414
415 try:
416 maxIndexVel = indmaxvel[0][-1]
417 except:
418 maxIndexVel = len(velrange)
419
420 # seleccion del espectro
421 data_spc = self.dataOut.data_spc[:,
422 minIndexVel:maxIndexVel + 1, minIndex:maxIndex + 1]
423 # estimacion de ruido
424 noise = numpy.zeros(self.dataOut.nChannels)
425
426 for channel in range(self.dataOut.nChannels):
427 daux = data_spc[channel, :, :]
428 sortdata = numpy.sort(daux, axis=None)
429 noise[channel] = hildebrand_sekhon(sortdata, self.dataOut.nIncohInt)
430
431 self.dataOut.noise_estimation = noise.copy()
432
433 return 1
434
435 class GetSNR(Operation):
436 '''
437 Written by R. Flores
438 '''
439 """Operation to get SNR.
440
441 Parameters:
442 -----------
443
444 Example
445 --------
446
447 op = proc_unit.addOperation(name='GetSNR', optype='other')
448
449 """
450
451 def __init__(self, **kwargs):
452
453 Operation.__init__(self, **kwargs)
454
455
456 def run(self,dataOut):
457
458 #noise = dataOut.getNoise()
459 noise = dataOut.getNoise(ymin_index=-10) #Región superior donde solo debería de haber ruido
460 #print("Noise: ", noise)
461 #print("Noise_dB: ", 10*numpy.log10(noise/dataOut.normFactor))
462 #print("Heights: ", dataOut.heightList)
463 #dataOut.data_snr = (dataOut.data_spc.sum(axis=1))/(noise[:,None]*dataOut.normFactor)
464 ################dataOut.data_snr = (dataOut.data_spc.sum(axis=1))/(noise[:,None]*dataOut.nFFTPoints) #Before 12Jan2023
465 #dataOut.data_snr = (dataOut.data_spc.sum(axis=1)-noise[:,None])/(noise[:,None])
466 dataOut.data_snr = (dataOut.data_spc.sum(axis=1)-noise[:,None]*dataOut.nFFTPoints)/(noise[:,None]*dataOut.nFFTPoints) #It works apparently
467 dataOut.snl = numpy.log10(dataOut.data_snr)
468 #print("snl: ", dataOut.snl)
469 #exit(1)
470 #print(dataOut.heightList[-11])
471 #print(numpy.shape(dataOut.heightList))
472 #print(dataOut.data_snr)
473 #print(dataOut.data_snr[0,-11])
474 #exit(1)
475 #dataOut.data_snr = numpy.where(10*numpy.log10(dataOut.data_snr)<.5, numpy.nan, dataOut.data_snr)
476 #dataOut.data_snr = numpy.where(10*numpy.log10(dataOut.data_snr)<.1, numpy.nan, dataOut.data_snr)
477 #dataOut.data_snr = numpy.where(10*numpy.log10(dataOut.data_snr)<.0, numpy.nan, dataOut.data_snr)
478 #dataOut.data_snr = numpy.where(dataOut.data_snr<.05, numpy.nan, dataOut.data_snr)
479 #dataOut.snl = numpy.where(dataOut.data_snr<.01, numpy.nan, dataOut.snl)
480 dataOut.snl = numpy.where(dataOut.snl<-1, numpy.nan, dataOut.snl)
481 '''
482 import matplotlib.pyplot as plt
483 #plt.plot(10*numpy.log10(dataOut.data_snr[0]),dataOut.heightList)
484 plt.plot(dataOut.data_snr[0],dataOut.heightList)#,marker='*')
485 plt.xlim(-1,10)
486 plt.axvline(1,color='k')
487 plt.axvline(.1,color='k',linestyle='--')
488 plt.grid()
489 plt.show()
490 '''
491 #dataOut.data_snr = 10*numpy.log10(dataOut.data_snr)
492 #dataOut.data_snr = numpy.expand_dims(dataOut.data_snr,axis=0)
493 #print(dataOut.data_snr.shape)
494 #exit(1)
495 #print("Before: ", dataOut.data_snr[0])
496
497
498 return dataOut
499
500 class removeDC(Operation):
501
502 def run(self, dataOut, mode=2):
503 self.dataOut = dataOut
504 jspectra = self.dataOut.data_spc
505 jcspectra = self.dataOut.data_cspc
506
507 num_chan = jspectra.shape[0]
508 num_hei = jspectra.shape[2]
509
510 if jcspectra is not None:
511 jcspectraExist = True
512 num_pairs = jcspectra.shape[0]
513 else:
514 jcspectraExist = False
515
516 freq_dc = int(jspectra.shape[1] / 2)
517 ind_vel = numpy.array([-2, -1, 1, 2]) + freq_dc
518 ind_vel = ind_vel.astype(int)
519
520 if ind_vel[0] < 0:
521 ind_vel[list(range(0, 1))] = ind_vel[list(range(0, 1))] + self.num_prof
522
523 if mode == 1:
524 jspectra[:, freq_dc, :] = (
525 jspectra[:, ind_vel[1], :] + jspectra[:, ind_vel[2], :]) / 2 # CORRECCION
526
527 if jcspectraExist:
528 jcspectra[:, freq_dc, :] = (
529 jcspectra[:, ind_vel[1], :] + jcspectra[:, ind_vel[2], :]) / 2
530
531 if mode == 2:
532
533 vel = numpy.array([-2, -1, 1, 2])
534 xx = numpy.zeros([4, 4])
535
536 for fil in range(4):
537 xx[fil, :] = vel[fil]**numpy.asarray(list(range(4)))
538
539 xx_inv = numpy.linalg.inv(xx)
540 xx_aux = xx_inv[0, :]
541
542 for ich in range(num_chan):
543 yy = jspectra[ich, ind_vel, :]
544 jspectra[ich, freq_dc, :] = numpy.dot(xx_aux, yy)
545
546 junkid = jspectra[ich, freq_dc, :] <= 0
547 cjunkid = sum(junkid)
548
549 if cjunkid.any():
550 jspectra[ich, freq_dc, junkid.nonzero()] = (
551 jspectra[ich, ind_vel[1], junkid] + jspectra[ich, ind_vel[2], junkid]) / 2
552
553 if jcspectraExist:
554 for ip in range(num_pairs):
555 yy = jcspectra[ip, ind_vel, :]
556 jcspectra[ip, freq_dc, :] = numpy.dot(xx_aux, yy)
557
558 self.dataOut.data_spc = jspectra
559 self.dataOut.data_cspc = jcspectra
560
561 return self.dataOut
562
563 class removeInterference(Operation):
564
565 def removeInterference2(self):
566
567 cspc = self.dataOut.data_cspc
568 spc = self.dataOut.data_spc
569 Heights = numpy.arange(cspc.shape[2])
570 realCspc = numpy.abs(cspc)
571
572 for i in range(cspc.shape[0]):
573 LinePower= numpy.sum(realCspc[i], axis=0)
574 Threshold = numpy.amax(LinePower)-numpy.sort(LinePower)[len(Heights)-int(len(Heights)*0.1)]
575 SelectedHeights = Heights[ numpy.where( LinePower < Threshold ) ]
576 InterferenceSum = numpy.sum( realCspc[i,:,SelectedHeights], axis=0 )
577 InterferenceThresholdMin = numpy.sort(InterferenceSum)[int(len(InterferenceSum)*0.98)]
578 InterferenceThresholdMax = numpy.sort(InterferenceSum)[int(len(InterferenceSum)*0.99)]
579
580
581 InterferenceRange = numpy.where( ([InterferenceSum > InterferenceThresholdMin]))# , InterferenceSum < InterferenceThresholdMax]) )
582 #InterferenceRange = numpy.where( ([InterferenceRange < InterferenceThresholdMax]))
583 if len(InterferenceRange)<int(cspc.shape[1]*0.3):
584 cspc[i,InterferenceRange,:] = numpy.NaN
585
586 self.dataOut.data_cspc = cspc
587
588 def removeInterference(self, interf = 2, hei_interf = None, nhei_interf = None, offhei_interf = None):
589
590 jspectra = self.dataOut.data_spc
591 jcspectra = self.dataOut.data_cspc
592 jnoise = self.dataOut.getNoise()
593 num_incoh = self.dataOut.nIncohInt
594
595 num_channel = jspectra.shape[0]
596 num_prof = jspectra.shape[1]
597 num_hei = jspectra.shape[2]
598
599 # hei_interf
600 if hei_interf is None:
601 count_hei = int(num_hei / 2)
602 hei_interf = numpy.asmatrix(list(range(count_hei))) + num_hei - count_hei
603 hei_interf = numpy.asarray(hei_interf)[0]
604 # nhei_interf
605 if (nhei_interf == None):
606 nhei_interf = 5
607 if (nhei_interf < 1):
608 nhei_interf = 1
609 if (nhei_interf > count_hei):
610 nhei_interf = count_hei
611 if (offhei_interf == None):
612 offhei_interf = 0
613
614 ind_hei = list(range(num_hei))
615 # mask_prof = numpy.asarray(range(num_prof - 2)) + 1
616 # mask_prof[range(num_prof/2 - 1,len(mask_prof))] += 1
617 mask_prof = numpy.asarray(list(range(num_prof)))
618 num_mask_prof = mask_prof.size
619 comp_mask_prof = [0, num_prof / 2]
620
621 # noise_exist: Determina si la variable jnoise ha sido definida y contiene la informacion del ruido de cada canal
622 if (jnoise.size < num_channel or numpy.isnan(jnoise).any()):
623 jnoise = numpy.nan
624 noise_exist = jnoise[0] < numpy.Inf
625
626 # Subrutina de Remocion de la Interferencia
627 for ich in range(num_channel):
628 # Se ordena los espectros segun su potencia (menor a mayor)
629 power = jspectra[ich, mask_prof, :]
630 power = power[:, hei_interf]
631 power = power.sum(axis=0)
632 psort = power.ravel().argsort()
633
634 # Se estima la interferencia promedio en los Espectros de Potencia empleando
635 junkspc_interf = jspectra[ich, :, hei_interf[psort[list(range(
636 offhei_interf, nhei_interf + offhei_interf))]]]
637
638 if noise_exist:
639 # tmp_noise = jnoise[ich] / num_prof
640 tmp_noise = jnoise[ich]
641 junkspc_interf = junkspc_interf - tmp_noise
642 #junkspc_interf[:,comp_mask_prof] = 0
643
644 jspc_interf = junkspc_interf.sum(axis=0) / nhei_interf
645 jspc_interf = jspc_interf.transpose()
646 # Calculando el espectro de interferencia promedio
647 noiseid = numpy.where(
648 jspc_interf <= tmp_noise / numpy.sqrt(num_incoh))
649 noiseid = noiseid[0]
650 cnoiseid = noiseid.size
651 interfid = numpy.where(
652 jspc_interf > tmp_noise / numpy.sqrt(num_incoh))
653 interfid = interfid[0]
654 cinterfid = interfid.size
655
656 if (cnoiseid > 0):
657 jspc_interf[noiseid] = 0
658
659 # Expandiendo los perfiles a limpiar
660 if (cinterfid > 0):
661 new_interfid = (
662 numpy.r_[interfid - 1, interfid, interfid + 1] + num_prof) % num_prof
663 new_interfid = numpy.asarray(new_interfid)
664 new_interfid = {x for x in new_interfid}
665 new_interfid = numpy.array(list(new_interfid))
666 new_cinterfid = new_interfid.size
667 else:
668 new_cinterfid = 0
669
670 for ip in range(new_cinterfid):
671 ind = junkspc_interf[:, new_interfid[ip]].ravel().argsort()
672 jspc_interf[new_interfid[ip]
673 ] = junkspc_interf[ind[nhei_interf // 2], new_interfid[ip]]
674
675 jspectra[ich, :, ind_hei] = jspectra[ich, :,
676 ind_hei] - jspc_interf # Corregir indices
677
678 # Removiendo la interferencia del punto de mayor interferencia
679 ListAux = jspc_interf[mask_prof].tolist()
680 maxid = ListAux.index(max(ListAux))
681
682 if cinterfid > 0:
683 for ip in range(cinterfid * (interf == 2) - 1):
684 ind = (jspectra[ich, interfid[ip], :] < tmp_noise *
685 (1 + 1 / numpy.sqrt(num_incoh))).nonzero()
686 cind = len(ind)
687
688 if (cind > 0):
689 jspectra[ich, interfid[ip], ind] = tmp_noise * \
690 (1 + (numpy.random.uniform(cind) - 0.5) /
691 numpy.sqrt(num_incoh))
692
693 ind = numpy.array([-2, -1, 1, 2])
694 xx = numpy.zeros([4, 4])
695
696 for id1 in range(4):
697 xx[:, id1] = ind[id1]**numpy.asarray(list(range(4)))
698
699 xx_inv = numpy.linalg.inv(xx)
700 xx = xx_inv[:, 0]
701 ind = (ind + maxid + num_mask_prof) % num_mask_prof
702 yy = jspectra[ich, mask_prof[ind], :]
703 jspectra[ich, mask_prof[maxid], :] = numpy.dot(
704 yy.transpose(), xx)
705
706 indAux = (jspectra[ich, :, :] < tmp_noise *
707 (1 - 1 / numpy.sqrt(num_incoh))).nonzero()
708 jspectra[ich, indAux[0], indAux[1]] = tmp_noise * \
709 (1 - 1 / numpy.sqrt(num_incoh))
710
711 # Remocion de Interferencia en el Cross Spectra
712 if jcspectra is None:
713 return jspectra, jcspectra
714 num_pairs = int(jcspectra.size / (num_prof * num_hei))
715 jcspectra = jcspectra.reshape(num_pairs, num_prof, num_hei)
716
717 for ip in range(num_pairs):
718
719 #-------------------------------------------
720
721 cspower = numpy.abs(jcspectra[ip, mask_prof, :])
722 cspower = cspower[:, hei_interf]
723 cspower = cspower.sum(axis=0)
724
725 cspsort = cspower.ravel().argsort()
726 junkcspc_interf = jcspectra[ip, :, hei_interf[cspsort[list(range(
727 offhei_interf, nhei_interf + offhei_interf))]]]
728 junkcspc_interf = junkcspc_interf.transpose()
729 jcspc_interf = junkcspc_interf.sum(axis=1) / nhei_interf
730
731 ind = numpy.abs(jcspc_interf[mask_prof]).ravel().argsort()
732
733 median_real = int(numpy.median(numpy.real(
734 junkcspc_interf[mask_prof[ind[list(range(3 * num_prof // 4))]], :])))
735 median_imag = int(numpy.median(numpy.imag(
736 junkcspc_interf[mask_prof[ind[list(range(3 * num_prof // 4))]], :])))
737 comp_mask_prof = [int(e) for e in comp_mask_prof]
738 junkcspc_interf[comp_mask_prof, :] = numpy.complex(
739 median_real, median_imag)
740
741 for iprof in range(num_prof):
742 ind = numpy.abs(junkcspc_interf[iprof, :]).ravel().argsort()
743 jcspc_interf[iprof] = junkcspc_interf[iprof, ind[nhei_interf // 2]]
744
745 # Removiendo la Interferencia
746 jcspectra[ip, :, ind_hei] = jcspectra[ip,
747 :, ind_hei] - jcspc_interf
748
749 ListAux = numpy.abs(jcspc_interf[mask_prof]).tolist()
750 maxid = ListAux.index(max(ListAux))
751
752 ind = numpy.array([-2, -1, 1, 2])
753 xx = numpy.zeros([4, 4])
754
755 for id1 in range(4):
756 xx[:, id1] = ind[id1]**numpy.asarray(list(range(4)))
757
758 xx_inv = numpy.linalg.inv(xx)
759 xx = xx_inv[:, 0]
760
761 ind = (ind + maxid + num_mask_prof) % num_mask_prof
762 yy = jcspectra[ip, mask_prof[ind], :]
763 jcspectra[ip, mask_prof[maxid], :] = numpy.dot(yy.transpose(), xx)
764
765 # Guardar Resultados
766 self.dataOut.data_spc = jspectra
767 self.dataOut.data_cspc = jcspectra
768
769 return 1
770
771 def run(self, dataOut, interf = 2,hei_interf = None, nhei_interf = None, offhei_interf = None, mode=1):
772
773 self.dataOut = dataOut
774
775 if mode == 1:
776 self.removeInterference(interf = 2,hei_interf = None, nhei_interf = None, offhei_interf = None)
777 elif mode == 2:
778 self.removeInterference2()
779
780 return self.dataOut
781
782
783 class IncohInt(Operation):
784
785 __profIndex = 0
786 __withOverapping = False
787
788 __byTime = False
789 __initime = None
790 __lastdatatime = None
791 __integrationtime = None
792
793 __buffer_spc = None
794 __buffer_cspc = None
795 __buffer_dc = None
796
797 __dataReady = False
798
799 __timeInterval = None
800
801 n = None
802
803 def __init__(self):
804
805 Operation.__init__(self)
806
807 def setup(self, n=None, timeInterval=None, overlapping=False):
808 """
809 Set the parameters of the integration class.
810
811 Inputs:
812
813 n : Number of coherent integrations
814 timeInterval : Time of integration. If the parameter "n" is selected this one does not work
815 overlapping :
816
817 """
818
819 self.__initime = None
820 self.__lastdatatime = 0
821
822 self.__buffer_spc = 0
823 self.__buffer_cspc = 0
824 self.__buffer_dc = 0
825
826 self.__profIndex = 0
827 self.__dataReady = False
828 self.__byTime = False
829
830 if n is None and timeInterval is None:
831 raise ValueError("n or timeInterval should be specified ...")
832
833 if n is not None:
834 self.n = int(n)
835 else:
836
837 self.__integrationtime = int(timeInterval)
838 self.n = None
839 self.__byTime = True
840
841 def putData(self, data_spc, data_cspc, data_dc):
842 """
843 Add a profile to the __buffer_spc and increase in one the __profileIndex
844
845 """
846
847 self.__buffer_spc += data_spc
848
849 if data_cspc is None:
850 self.__buffer_cspc = None
851 else:
852 self.__buffer_cspc += data_cspc
853
854 if data_dc is None:
855 self.__buffer_dc = None
856 else:
857 self.__buffer_dc += data_dc
858
859 self.__profIndex += 1
860
861 return
862
863 def pushData(self):
864 """
865 Return the sum of the last profiles and the profiles used in the sum.
866
867 Affected:
868
869 self.__profileIndex
870
871 """
872
873 data_spc = self.__buffer_spc
874 data_cspc = self.__buffer_cspc
875 data_dc = self.__buffer_dc
876 n = self.__profIndex
877
878 self.__buffer_spc = 0
879 self.__buffer_cspc = 0
880 self.__buffer_dc = 0
881 self.__profIndex = 0
882
883 return data_spc, data_cspc, data_dc, n
884
885 def byProfiles(self, *args):
886
887 self.__dataReady = False
888 avgdata_spc = None
889 avgdata_cspc = None
890 avgdata_dc = None
891
892 self.putData(*args)
893
894 if self.__profIndex == self.n:
895
896 avgdata_spc, avgdata_cspc, avgdata_dc, n = self.pushData()
897 self.n = n
898 self.__dataReady = True
899
900 return avgdata_spc, avgdata_cspc, avgdata_dc
901
902 def byTime(self, datatime, *args):
903
904 self.__dataReady = False
905 avgdata_spc = None
906 avgdata_cspc = None
907 avgdata_dc = None
908
909 self.putData(*args)
910
911 if (datatime - self.__initime) >= self.__integrationtime:
912 avgdata_spc, avgdata_cspc, avgdata_dc, n = self.pushData()
913 self.n = n
914 self.__dataReady = True
915
916 return avgdata_spc, avgdata_cspc, avgdata_dc
917
918 def integrate(self, datatime, *args):
919
920 if self.__profIndex == 0:
921 self.__initime = datatime
922
923 if self.__byTime:
924 avgdata_spc, avgdata_cspc, avgdata_dc = self.byTime(
925 datatime, *args)
926 else:
927 avgdata_spc, avgdata_cspc, avgdata_dc = self.byProfiles(*args)
928
929 if not self.__dataReady:
930 return None, None, None, None
931
932 return self.__initime, avgdata_spc, avgdata_cspc, avgdata_dc
933
934 def run(self, dataOut, n=None, timeInterval=None, overlapping=False):
935 if n == 1:
936 return dataOut
937 print("JERE")
938 dataOut.flagNoData = True
939
940 if not self.isConfig:
941 self.setup(n, timeInterval, overlapping)
942 self.isConfig = True
943
944 avgdatatime, avgdata_spc, avgdata_cspc, avgdata_dc = self.integrate(dataOut.utctime,
945 dataOut.data_spc,
946 dataOut.data_cspc,
947 dataOut.data_dc)
948
949 if self.__dataReady:
950
951 dataOut.data_spc = avgdata_spc
952 print(numpy.sum(dataOut.data_spc))
953 exit(1)
954 dataOut.data_cspc = avgdata_cspc
955 dataOut.data_dc = avgdata_dc
956 dataOut.nIncohInt *= self.n
957 dataOut.utctime = avgdatatime
958 dataOut.flagNoData = False
959
960 return dataOut
961
962 class dopplerFlip(Operation):
963
964 def run(self, dataOut, chann = None):
965 # arreglo 1: (num_chan, num_profiles, num_heights)
966 self.dataOut = dataOut
967 # JULIA-oblicua, indice 2
968 # arreglo 2: (num_profiles, num_heights)
969 jspectra = self.dataOut.data_spc[chann]
970 jspectra_tmp = numpy.zeros(jspectra.shape)
971 num_profiles = jspectra.shape[0]
972 freq_dc = int(num_profiles / 2)
973 # Flip con for
974 for j in range(num_profiles):
975 jspectra_tmp[num_profiles-j-1]= jspectra[j]
976 # Intercambio perfil de DC con perfil inmediato anterior
977 jspectra_tmp[freq_dc-1]= jspectra[freq_dc-1]
978 jspectra_tmp[freq_dc]= jspectra[freq_dc]
979 # canal modificado es re-escrito en el arreglo de canales
980 self.dataOut.data_spc[chann] = jspectra_tmp
981
982 return self.dataOut
This diff has been collapsed as it changes many lines, (1644 lines changed) Show them Hide them
@@ -0,0 +1,1644
1
2 import os
3 import sys
4 import numpy, math
5 from scipy import interpolate
6 from scipy.optimize import nnls
7 from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation, MPDecorator
8 from schainpy.model.data.jrodata import Voltage,hildebrand_sekhon
9 from schainpy.utils import log
10 from time import time, mktime, strptime, gmtime, ctime
11 from scipy.optimize import least_squares
12 import datetime
13 import collections.abc
14
15 try:
16 from schainpy.model.proc import fitacf_guess
17 from schainpy.model.proc import fitacf_fit_short
18 from schainpy.model.proc import fitacf_acf2
19 from schainpy.model.proc import full_profile_profile
20 except:
21 log.warning('Missing Faraday fortran libs')
22
23 class VoltageProc(ProcessingUnit):
24
25 def __init__(self):
26
27 ProcessingUnit.__init__(self)
28
29 self.dataOut = Voltage()
30 self.flip = 1
31 self.setupReq = False
32
33 def run(self, runNextUnit = 0):
34
35 if self.dataIn.type == 'AMISR':
36 self.__updateObjFromAmisrInput()
37
38 if self.dataIn.type == 'Voltage':
39 self.dataOut.copy(self.dataIn)
40 self.dataOut.runNextUnit = runNextUnit
41
42 def __updateObjFromAmisrInput(self):
43
44 self.dataOut.timeZone = self.dataIn.timeZone
45 self.dataOut.dstFlag = self.dataIn.dstFlag
46 self.dataOut.errorCount = self.dataIn.errorCount
47 self.dataOut.useLocalTime = self.dataIn.useLocalTime
48
49 self.dataOut.flagNoData = self.dataIn.flagNoData
50 self.dataOut.data = self.dataIn.data
51 self.dataOut.utctime = self.dataIn.utctime
52 self.dataOut.channelList = self.dataIn.channelList
53 #self.dataOut.timeInterval = self.dataIn.timeInterval
54 self.dataOut.heightList = self.dataIn.heightList
55 self.dataOut.nProfiles = self.dataIn.nProfiles
56
57 self.dataOut.nCohInt = self.dataIn.nCohInt
58 self.dataOut.ippSeconds = self.dataIn.ippSeconds
59 self.dataOut.frequency = self.dataIn.frequency
60
61 self.dataOut.azimuth = self.dataIn.azimuth
62 self.dataOut.zenith = self.dataIn.zenith
63
64 self.dataOut.beam.codeList = self.dataIn.beam.codeList
65 self.dataOut.beam.azimuthList = self.dataIn.beam.azimuthList
66 self.dataOut.beam.zenithList = self.dataIn.beam.zenithList
67
68
69 class selectChannels(Operation):
70
71 def run(self, dataOut, channelList):
72
73 channelIndexList = []
74 self.dataOut = dataOut
75 for channel in channelList:
76 if channel not in self.dataOut.channelList:
77 raise ValueError("Channel %d is not in %s" %(channel, str(self.dataOut.channelList)))
78
79 index = self.dataOut.channelList.index(channel)
80 channelIndexList.append(index)
81 self.selectChannelsByIndex(channelIndexList)
82 return self.dataOut
83
84 def selectChannelsByIndex(self, channelIndexList):
85 """
86 Selecciona un bloque de datos en base a canales segun el channelIndexList
87
88 Input:
89 channelIndexList : lista sencilla de canales a seleccionar por ej. [2,3,7]
90
91 Affected:
92 self.dataOut.data
93 self.dataOut.channelIndexList
94 self.dataOut.nChannels
95 self.dataOut.m_ProcessingHeader.totalSpectra
96 self.dataOut.systemHeaderObj.numChannels
97 self.dataOut.m_ProcessingHeader.blockSize
98
99 Return:
100 None
101 """
102
103 for channelIndex in channelIndexList:
104 if channelIndex not in self.dataOut.channelIndexList:
105 raise ValueError("The value %d in channelIndexList is not valid" %channelIndex)
106
107 if self.dataOut.type == 'Voltage':
108 if self.dataOut.flagDataAsBlock:
109 """
110 Si la data es obtenida por bloques, dimension = [nChannels, nProfiles, nHeis]
111 """
112 data = self.dataOut.data[channelIndexList,:,:]
113 else:
114 data = self.dataOut.data[channelIndexList,:]
115
116 self.dataOut.data = data
117 # self.dataOut.channelList = [self.dataOut.channelList[i] for i in channelIndexList]
118 self.dataOut.channelList = range(len(channelIndexList))
119
120 elif self.dataOut.type == 'Spectra':
121 data_spc = self.dataOut.data_spc[channelIndexList, :]
122 data_dc = self.dataOut.data_dc[channelIndexList, :]
123
124 self.dataOut.data_spc = data_spc
125 self.dataOut.data_dc = data_dc
126
127 # self.dataOut.channelList = [self.dataOut.channelList[i] for i in channelIndexList]
128 self.dataOut.channelList = range(len(channelIndexList))
129 self.__selectPairsByChannel(channelIndexList)
130
131 return 1
132
133 def __selectPairsByChannel(self, channelList=None):
134
135 if channelList == None:
136 return
137
138 pairsIndexListSelected = []
139 for pairIndex in self.dataOut.pairsIndexList:
140 # First pair
141 if self.dataOut.pairsList[pairIndex][0] not in channelList:
142 continue
143 # Second pair
144 if self.dataOut.pairsList[pairIndex][1] not in channelList:
145 continue
146
147 pairsIndexListSelected.append(pairIndex)
148
149 if not pairsIndexListSelected:
150 self.dataOut.data_cspc = None
151 self.dataOut.pairsList = []
152 return
153
154 self.dataOut.data_cspc = self.dataOut.data_cspc[pairsIndexListSelected]
155 self.dataOut.pairsList = [self.dataOut.pairsList[i]
156 for i in pairsIndexListSelected]
157
158 return
159
160 class selectHeights(Operation):
161
162 def run(self, dataOut, minHei=None, maxHei=None, minIndex=None, maxIndex=None):
163 """
164 Selecciona un bloque de datos en base a un grupo de valores de alturas segun el rango
165 minHei <= height <= maxHei
166
167 Input:
168 minHei : valor minimo de altura a considerar
169 maxHei : valor maximo de altura a considerar
170
171 Affected:
172 Indirectamente son cambiados varios valores a travez del metodo selectHeightsByIndex
173
174 Return:
175 1 si el metodo se ejecuto con exito caso contrario devuelve 0
176 """
177
178 self.dataOut = dataOut
179
180 if minHei and maxHei:
181
182 if (minHei < self.dataOut.heightList[0]):
183 minHei = self.dataOut.heightList[0]
184
185 if (maxHei > self.dataOut.heightList[-1]):
186 maxHei = self.dataOut.heightList[-1]
187
188 minIndex = 0
189 maxIndex = 0
190 heights = self.dataOut.heightList
191
192 inda = numpy.where(heights >= minHei)
193 indb = numpy.where(heights <= maxHei)
194
195 try:
196 minIndex = inda[0][0]
197 except:
198 minIndex = 0
199
200 try:
201 maxIndex = indb[0][-1]
202 except:
203 maxIndex = len(heights)
204
205 self.selectHeightsByIndex(minIndex, maxIndex)
206
207 return self.dataOut
208
209 def selectHeightsByIndex(self, minIndex, maxIndex):
210 """
211 Selecciona un bloque de datos en base a un grupo indices de alturas segun el rango
212 minIndex <= index <= maxIndex
213
214 Input:
215 minIndex : valor de indice minimo de altura a considerar
216 maxIndex : valor de indice maximo de altura a considerar
217
218 Affected:
219 self.dataOut.data
220 self.dataOut.heightList
221
222 Return:
223 1 si el metodo se ejecuto con exito caso contrario devuelve 0
224 """
225
226 if self.dataOut.type == 'Voltage':
227 if (minIndex < 0) or (minIndex > maxIndex):
228 raise ValueError("Height index range (%d,%d) is not valid" % (minIndex, maxIndex))
229
230 if (maxIndex >= self.dataOut.nHeights):
231 maxIndex = self.dataOut.nHeights
232
233 #voltage
234 if self.dataOut.flagDataAsBlock:
235 """
236 Si la data es obtenida por bloques, dimension = [nChannels, nProfiles, nHeis]
237 """
238 data = self.dataOut.data[:,:, minIndex:maxIndex]
239 else:
240 data = self.dataOut.data[:, minIndex:maxIndex]
241
242 # firstHeight = self.dataOut.heightList[minIndex]
243
244 self.dataOut.data = data
245 self.dataOut.heightList = self.dataOut.heightList[minIndex:maxIndex]
246
247 if self.dataOut.nHeights <= 1:
248 raise ValueError("selectHeights: Too few heights. Current number of heights is %d" %(self.dataOut.nHeights))
249 elif self.dataOut.type == 'Spectra':
250 if (minIndex < 0) or (minIndex > maxIndex):
251 raise ValueError("Error selecting heights: Index range (%d,%d) is not valid" % (
252 minIndex, maxIndex))
253
254 if (maxIndex >= self.dataOut.nHeights):
255 maxIndex = self.dataOut.nHeights - 1
256
257 # Spectra
258 data_spc = self.dataOut.data_spc[:, :, minIndex:maxIndex + 1]
259
260 data_cspc = None
261 if self.dataOut.data_cspc is not None:
262 data_cspc = self.dataOut.data_cspc[:, :, minIndex:maxIndex + 1]
263
264 data_dc = None
265 if self.dataOut.data_dc is not None:
266 data_dc = self.dataOut.data_dc[:, minIndex:maxIndex + 1]
267
268 self.dataOut.data_spc = data_spc
269 self.dataOut.data_cspc = data_cspc
270 self.dataOut.data_dc = data_dc
271
272 self.dataOut.heightList = self.dataOut.heightList[minIndex:maxIndex + 1]
273
274 return 1
275
276
277 class filterByHeights(Operation):
278
279 def run(self, dataOut, window):
280
281 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
282
283 if window == None:
284 window = (dataOut.radarControllerHeaderObj.txA/dataOut.radarControllerHeaderObj.nBaud) / deltaHeight
285
286 newdelta = deltaHeight * window
287 r = dataOut.nHeights % window
288 newheights = (dataOut.nHeights-r)/window
289
290 if newheights <= 1:
291 raise ValueError("filterByHeights: Too few heights. Current number of heights is %d and window is %d" %(dataOut.nHeights, window))
292
293 if dataOut.flagDataAsBlock:
294 """
295 Si la data es obtenida por bloques, dimension = [nChannels, nProfiles, nHeis]
296 """
297 buffer = dataOut.data[:, :, 0:int(dataOut.nHeights-r)]
298 buffer = buffer.reshape(dataOut.nChannels, dataOut.nProfiles, int(dataOut.nHeights/window), window)
299 buffer = numpy.sum(buffer,3)
300
301 else:
302 buffer = dataOut.data[:,0:int(dataOut.nHeights-r)]
303 buffer = buffer.reshape(dataOut.nChannels,int(dataOut.nHeights/window),int(window))
304 buffer = numpy.sum(buffer,2)
305
306 dataOut.data = buffer
307 dataOut.heightList = dataOut.heightList[0] + numpy.arange( newheights )*newdelta
308 dataOut.windowOfFilter = window
309
310 return dataOut
311
312
313 class setH0(Operation):
314
315 def run(self, dataOut, h0, deltaHeight = None):
316
317 if not deltaHeight:
318 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
319
320 nHeights = dataOut.nHeights
321
322 newHeiRange = h0 + numpy.arange(nHeights)*deltaHeight
323
324 dataOut.heightList = newHeiRange
325
326 return dataOut
327
328
329 class deFlip(Operation):
330 def __init__(self):
331
332 self.flip = 1
333
334 def run(self, dataOut, channelList = []):
335
336 data = dataOut.data.copy()
337
338 if channelList==1: #PARCHE #Lista de un solo canal produce error
339 channelList=[1]
340
341 dataOut.FlipChannels=channelList
342 if dataOut.flagDataAsBlock:
343 flip = self.flip
344 profileList = list(range(dataOut.nProfiles))
345
346 if not channelList:
347 for thisProfile in profileList:
348 data[:,thisProfile,:] = data[:,thisProfile,:]*flip
349 flip *= -1.0
350 else:
351 for thisChannel in channelList:
352 if thisChannel not in dataOut.channelList:
353 continue
354
355 for thisProfile in profileList:
356 data[thisChannel,thisProfile,:] = data[thisChannel,thisProfile,:]*flip
357 flip *= -1.0
358
359 self.flip = flip
360
361 else:
362 if not channelList:
363 data[:,:] = data[:,:]*self.flip
364 else:
365 for thisChannel in channelList:
366 if thisChannel not in dataOut.channelList:
367 continue
368
369 data[thisChannel,:] = data[thisChannel,:]*self.flip
370
371 self.flip *= -1.
372
373 dataOut.data = data
374
375 return dataOut
376
377
378 class setAttribute(Operation):
379 '''
380 Set an arbitrary attribute(s) to dataOut
381 '''
382
383 def __init__(self):
384
385 Operation.__init__(self)
386 self._ready = False
387
388 def run(self, dataOut, **kwargs):
389
390 for key, value in kwargs.items():
391 setattr(dataOut, key, value)
392
393 return dataOut
394
395
396 @MPDecorator
397 class printAttribute(Operation):
398 '''
399 Print an arbitrary attribute of dataOut
400 '''
401
402 def __init__(self):
403
404 Operation.__init__(self)
405
406 def run(self, dataOut, attributes):
407
408 if isinstance(attributes, str):
409 attributes = [attributes]
410 for attr in attributes:
411 if hasattr(dataOut, attr):
412 log.log(getattr(dataOut, attr), attr)
413
414
415 class interpolateHeights(Operation):
416
417 def run(self, dataOut, topLim, botLim):
418 #69 al 72 para julia
419 #82-84 para meteoros
420 if len(numpy.shape(dataOut.data))==2:
421 sampInterp = (dataOut.data[:,botLim-1] + dataOut.data[:,topLim+1])/2
422 sampInterp = numpy.transpose(numpy.tile(sampInterp,(topLim-botLim + 1,1)))
423 #dataOut.data[:,botLim:limSup+1] = sampInterp
424 dataOut.data[:,botLim:topLim+1] = sampInterp
425 else:
426 nHeights = dataOut.data.shape[2]
427 x = numpy.hstack((numpy.arange(botLim),numpy.arange(topLim+1,nHeights)))
428 y = dataOut.data[:,:,list(range(botLim))+list(range(topLim+1,nHeights))]
429 f = interpolate.interp1d(x, y, axis = 2)
430 xnew = numpy.arange(botLim,topLim+1)
431 ynew = f(xnew)
432 dataOut.data[:,:,botLim:topLim+1] = ynew
433
434 return dataOut
435
436
437 class CohInt(Operation):
438
439 isConfig = False
440 __profIndex = 0
441 __byTime = False
442 __initime = None
443 __lastdatatime = None
444 __integrationtime = None
445 __buffer = None
446 __bufferStride = []
447 __dataReady = False
448 __profIndexStride = 0
449 __dataToPutStride = False
450 n = None
451
452 def __init__(self, **kwargs):
453
454 Operation.__init__(self, **kwargs)
455
456 def setup(self, n=None, timeInterval=None, stride=None, overlapping=False, byblock=False):
457 """
458 Set the parameters of the integration class.
459
460 Inputs:
461
462 n : Number of coherent integrations
463 timeInterval : Time of integration. If the parameter "n" is selected this one does not work
464 overlapping :
465 """
466
467 self.__initime = None
468 self.__lastdatatime = 0
469 self.__buffer = None
470 self.__dataReady = False
471 self.byblock = byblock
472 self.stride = stride
473
474 if n == None and timeInterval == None:
475 raise ValueError("n or timeInterval should be specified ...")
476
477 if n != None:
478 self.n = n
479 self.__byTime = False
480 else:
481 self.__integrationtime = timeInterval #* 60. #if (type(timeInterval)!=integer) -> change this line
482 self.n = 9999
483 self.__byTime = True
484
485 if overlapping:
486 self.__withOverlapping = True
487 self.__buffer = None
488 else:
489 self.__withOverlapping = False
490 self.__buffer = 0
491
492 self.__profIndex = 0
493
494 def putData(self, data):
495
496 """
497 Add a profile to the __buffer and increase in one the __profileIndex
498
499 """
500
501 if not self.__withOverlapping:
502 self.__buffer += data.copy()
503 self.__profIndex += 1
504 return
505
506 #Overlapping data
507 nChannels, nHeis = data.shape
508 data = numpy.reshape(data, (1, nChannels, nHeis))
509
510 #If the buffer is empty then it takes the data value
511 if self.__buffer is None:
512 self.__buffer = data
513 self.__profIndex += 1
514 return
515
516 #If the buffer length is lower than n then stakcing the data value
517 if self.__profIndex < self.n:
518 self.__buffer = numpy.vstack((self.__buffer, data))
519 self.__profIndex += 1
520 return
521
522 #If the buffer length is equal to n then replacing the last buffer value with the data value
523 self.__buffer = numpy.roll(self.__buffer, -1, axis=0)
524 self.__buffer[self.n-1] = data
525 self.__profIndex = self.n
526 return
527
528
529 def pushData(self):
530 """
531 Return the sum of the last profiles and the profiles used in the sum.
532
533 Affected:
534
535 self.__profileIndex
536
537 """
538
539 if not self.__withOverlapping:
540 data = self.__buffer
541 n = self.__profIndex
542
543 self.__buffer = 0
544 self.__profIndex = 0
545
546 return data, n
547
548 #Integration with Overlapping
549 data = numpy.sum(self.__buffer, axis=0)
550 # print data
551 # raise
552 n = self.__profIndex
553
554 return data, n
555
556 def byProfiles(self, data):
557
558 self.__dataReady = False
559 avgdata = None
560 # n = None
561 # print data
562 # raise
563 self.putData(data)
564
565 if self.__profIndex == self.n:
566 avgdata, n = self.pushData()
567 self.__dataReady = True
568
569 return avgdata
570
571 def byTime(self, data, datatime):
572
573 self.__dataReady = False
574 avgdata = None
575 n = None
576
577 self.putData(data)
578
579 if (datatime - self.__initime) >= self.__integrationtime:
580 avgdata, n = self.pushData()
581 self.n = n
582 self.__dataReady = True
583
584 return avgdata
585
586 def integrateByStride(self, data, datatime):
587 # print data
588 if self.__profIndex == 0:
589 self.__buffer = [[data.copy(), datatime]]
590 else:
591 self.__buffer.append([data.copy(),datatime])
592 self.__profIndex += 1
593 self.__dataReady = False
594
595 if self.__profIndex == self.n * self.stride :
596 self.__dataToPutStride = True
597 self.__profIndexStride = 0
598 self.__profIndex = 0
599 self.__bufferStride = []
600 for i in range(self.stride):
601 current = self.__buffer[i::self.stride]
602 data = numpy.sum([t[0] for t in current], axis=0)
603 avgdatatime = numpy.average([t[1] for t in current])
604 # print data
605 self.__bufferStride.append((data, avgdatatime))
606
607 if self.__dataToPutStride:
608 self.__dataReady = True
609 self.__profIndexStride += 1
610 if self.__profIndexStride == self.stride:
611 self.__dataToPutStride = False
612 # print self.__bufferStride[self.__profIndexStride - 1]
613 # raise
614 return self.__bufferStride[self.__profIndexStride - 1]
615
616
617 return None, None
618
619 def integrate(self, data, datatime=None):
620
621 if self.__initime == None:
622 self.__initime = datatime
623
624 if self.__byTime:
625 avgdata = self.byTime(data, datatime)
626 else:
627 avgdata = self.byProfiles(data)
628
629
630 self.__lastdatatime = datatime
631
632 if avgdata is None:
633 return None, None
634
635 avgdatatime = self.__initime
636
637 deltatime = datatime - self.__lastdatatime
638
639 if not self.__withOverlapping:
640 self.__initime = datatime
641 else:
642 self.__initime += deltatime
643
644 return avgdata, avgdatatime
645
646 def integrateByBlock(self, dataOut):
647
648 times = int(dataOut.data.shape[1]/self.n)
649 avgdata = numpy.zeros((dataOut.nChannels, times, dataOut.nHeights), dtype=numpy.complex)
650
651 id_min = 0
652 id_max = self.n
653
654 for i in range(times):
655 junk = dataOut.data[:,id_min:id_max,:]
656 avgdata[:,i,:] = junk.sum(axis=1)
657 id_min += self.n
658 id_max += self.n
659
660 timeInterval = dataOut.ippSeconds*self.n
661 avgdatatime = (times - 1) * timeInterval + dataOut.utctime
662 self.__dataReady = True
663 return avgdata, avgdatatime
664
665 def run(self, dataOut, n=None, timeInterval=None, stride=None, overlapping=False, byblock=False, **kwargs):
666
667 if not self.isConfig:
668 self.setup(n=n, stride=stride, timeInterval=timeInterval, overlapping=overlapping, byblock=byblock, **kwargs)
669 self.isConfig = True
670 if dataOut.flagDataAsBlock:
671 """
672 Si la data es leida por bloques, dimension = [nChannels, nProfiles, nHeis]
673 """
674 avgdata, avgdatatime = self.integrateByBlock(dataOut)
675 dataOut.nProfiles /= self.n
676 else:
677 if stride is None:
678 avgdata, avgdatatime = self.integrate(dataOut.data, dataOut.utctime)
679 else:
680 avgdata, avgdatatime = self.integrateByStride(dataOut.data, dataOut.utctime)
681
682
683 # dataOut.timeInterval *= n
684 dataOut.flagNoData = True
685
686 if self.__dataReady:
687 dataOut.data = avgdata
688 if not dataOut.flagCohInt:
689 dataOut.nCohInt *= self.n
690 dataOut.flagCohInt = True
691 dataOut.utctime = avgdatatime
692 # print avgdata, avgdatatime
693 # raise
694 # dataOut.timeInterval = dataOut.ippSeconds * dataOut.nCohInt
695 dataOut.flagNoData = False
696 return dataOut
697
698 class Decoder(Operation):
699
700 isConfig = False
701 __profIndex = 0
702
703 code = None
704
705 nCode = None
706 nBaud = None
707
708 def __init__(self, **kwargs):
709
710 Operation.__init__(self, **kwargs)
711
712 self.times = None
713 self.osamp = None
714 # self.__setValues = False
715 self.isConfig = False
716 self.setupReq = False
717 def setup(self, code, osamp, dataOut):
718
719 self.__profIndex = 0
720
721 self.code = code
722
723 self.nCode = len(code)
724 self.nBaud = len(code[0])
725
726 if (osamp != None) and (osamp >1):
727 self.osamp = osamp
728 self.code = numpy.repeat(code, repeats=self.osamp, axis=1)
729 self.nBaud = self.nBaud*self.osamp
730
731 self.__nChannels = dataOut.nChannels
732 self.__nProfiles = dataOut.nProfiles
733 self.__nHeis = dataOut.nHeights
734
735 if self.__nHeis < self.nBaud:
736 raise ValueError('Number of heights (%d) should be greater than number of bauds (%d)' %(self.__nHeis, self.nBaud))
737
738 #Frequency
739 __codeBuffer = numpy.zeros((self.nCode, self.__nHeis), dtype=numpy.complex)
740
741 __codeBuffer[:,0:self.nBaud] = self.code
742
743 self.fft_code = numpy.conj(numpy.fft.fft(__codeBuffer, axis=1))
744
745 if dataOut.flagDataAsBlock:
746
747 self.ndatadec = self.__nHeis #- self.nBaud + 1
748
749 self.datadecTime = numpy.zeros((self.__nChannels, self.__nProfiles, self.ndatadec), dtype=numpy.complex)
750
751 else:
752
753 #Time
754 self.ndatadec = self.__nHeis #- self.nBaud + 1
755
756 self.datadecTime = numpy.zeros((self.__nChannels, self.ndatadec), dtype=numpy.complex)
757
758 def __convolutionInFreq(self, data):
759
760 fft_code = self.fft_code[self.__profIndex].reshape(1,-1)
761
762 fft_data = numpy.fft.fft(data, axis=1)
763
764 conv = fft_data*fft_code
765
766 data = numpy.fft.ifft(conv,axis=1)
767
768 return data
769
770 def __convolutionInFreqOpt(self, data):
771
772 raise NotImplementedError
773
774 def __convolutionInTime(self, data):
775
776 code = self.code[self.__profIndex]
777 for i in range(self.__nChannels):
778 self.datadecTime[i,:] = numpy.correlate(data[i,:], code, mode='full')[self.nBaud-1:]
779
780 return self.datadecTime
781
782 def __convolutionByBlockInTime(self, data):
783
784 repetitions = int(self.__nProfiles / self.nCode)
785 junk = numpy.lib.stride_tricks.as_strided(self.code, (repetitions, self.code.size), (0, self.code.itemsize))
786 junk = junk.flatten()
787 code_block = numpy.reshape(junk, (self.nCode*repetitions, self.nBaud))
788 profilesList = range(self.__nProfiles)
789
790 for i in range(self.__nChannels):
791 for j in profilesList:
792 self.datadecTime[i,j,:] = numpy.correlate(data[i,j,:], code_block[j,:], mode='full')[self.nBaud-1:]
793 return self.datadecTime
794
795 def __convolutionByBlockInFreq(self, data):
796
797 raise NotImplementedError("Decoder by frequency fro Blocks not implemented")
798
799
800 fft_code = self.fft_code[self.__profIndex].reshape(1,-1)
801
802 fft_data = numpy.fft.fft(data, axis=2)
803
804 conv = fft_data*fft_code
805
806 data = numpy.fft.ifft(conv,axis=2)
807
808 return data
809
810
811 def run(self, dataOut, code=None, nCode=None, nBaud=None, mode = 0, osamp=None, times=None):
812
813 if dataOut.flagDecodeData:
814 print("This data is already decoded, recoding again ...")
815
816 if not self.isConfig:
817
818 if code is None:
819 if dataOut.code is None:
820 raise ValueError("Code could not be read from %s instance. Enter a value in Code parameter" %dataOut.type)
821
822 code = dataOut.code
823 else:
824 code = numpy.array(code).reshape(nCode,nBaud)
825 self.setup(code, osamp, dataOut)
826
827 self.isConfig = True
828
829 if mode == 3:
830 sys.stderr.write("Decoder Warning: mode=%d is not valid, using mode=0\n" %mode)
831
832 if times != None:
833 sys.stderr.write("Decoder Warning: Argument 'times' in not used anymore\n")
834
835 if self.code is None:
836 print("Fail decoding: Code is not defined.")
837 return
838
839 self.__nProfiles = dataOut.nProfiles
840 datadec = None
841
842 if mode == 3:
843 mode = 0
844
845 if dataOut.flagDataAsBlock:
846 """
847 Decoding when data have been read as block,
848 """
849
850 if mode == 0:
851 datadec = self.__convolutionByBlockInTime(dataOut.data)
852 if mode == 1:
853 datadec = self.__convolutionByBlockInFreq(dataOut.data)
854 else:
855 """
856 Decoding when data have been read profile by profile
857 """
858 if mode == 0:
859 datadec = self.__convolutionInTime(dataOut.data)
860
861 if mode == 1:
862 datadec = self.__convolutionInFreq(dataOut.data)
863
864 if mode == 2:
865 datadec = self.__convolutionInFreqOpt(dataOut.data)
866
867 if datadec is None:
868 raise ValueError("Codification mode selected is not valid: mode=%d. Try selecting 0 or 1" %mode)
869
870 dataOut.code = self.code
871 dataOut.nCode = self.nCode
872 dataOut.nBaud = self.nBaud
873
874 dataOut.data = datadec
875 dataOut.heightList = dataOut.heightList[0:datadec.shape[-1]]
876
877 dataOut.flagDecodeData = True #asumo q la data esta decodificada
878
879 if self.__profIndex == self.nCode-1:
880 self.__profIndex = 0
881 return dataOut
882
883 self.__profIndex += 1
884
885 return dataOut
886
887
888 class ProfileConcat(Operation):
889
890 isConfig = False
891 buffer = None
892
893 def __init__(self, **kwargs):
894
895 Operation.__init__(self, **kwargs)
896 self.profileIndex = 0
897
898 def reset(self):
899 self.buffer = numpy.zeros_like(self.buffer)
900 self.start_index = 0
901 self.times = 1
902
903 def setup(self, data, m, n=1):
904 self.buffer = numpy.zeros((data.shape[0],data.shape[1]*m),dtype=type(data[0,0]))
905 self.nHeights = data.shape[1]#.nHeights
906 self.start_index = 0
907 self.times = 1
908
909 def concat(self, data):
910
911 self.buffer[:,self.start_index:self.nHeights*self.times] = data.copy()
912 self.start_index = self.start_index + self.nHeights
913
914 def run(self, dataOut, m):
915 dataOut.flagNoData = True
916
917 if not self.isConfig:
918 self.setup(dataOut.data, m, 1)
919 self.isConfig = True
920
921 if dataOut.flagDataAsBlock:
922 raise ValueError("ProfileConcat can only be used when voltage have been read profile by profile, getBlock = False")
923
924 else:
925 self.concat(dataOut.data)
926 self.times += 1
927 if self.times > m:
928 dataOut.data = self.buffer
929 self.reset()
930 dataOut.flagNoData = False
931 # se deben actualizar mas propiedades del header y del objeto dataOut, por ejemplo, las alturas
932 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
933 xf = dataOut.heightList[0] + dataOut.nHeights * deltaHeight * m
934 dataOut.heightList = numpy.arange(dataOut.heightList[0], xf, deltaHeight)
935 dataOut.ippSeconds *= m
936 return dataOut
937
938 class ProfileSelector(Operation):
939
940 profileIndex = None
941 # Tamanho total de los perfiles
942 nProfiles = None
943
944 def __init__(self, **kwargs):
945
946 Operation.__init__(self, **kwargs)
947 self.profileIndex = 0
948
949 def incProfileIndex(self):
950
951 self.profileIndex += 1
952
953 if self.profileIndex >= self.nProfiles:
954 self.profileIndex = 0
955
956 def isThisProfileInRange(self, profileIndex, minIndex, maxIndex):
957
958 if profileIndex < minIndex:
959 return False
960
961 if profileIndex > maxIndex:
962 return False
963
964 return True
965
966 def isThisProfileInList(self, profileIndex, profileList):
967
968 if profileIndex not in profileList:
969 return False
970
971 return True
972
973 def run(self, dataOut, profileList=None, profileRangeList=None, beam=None, byblock=False, rangeList = None, nProfiles=None):
974
975 """
976 ProfileSelector:
977
978 Inputs:
979 profileList : Index of profiles selected. Example: profileList = (0,1,2,7,8)
980
981 profileRangeList : Minimum and maximum profile indexes. Example: profileRangeList = (4, 30)
982
983 rangeList : List of profile ranges. Example: rangeList = ((4, 30), (32, 64), (128, 256))
984
985 """
986
987 if rangeList is not None:
988 if type(rangeList[0]) not in (tuple, list):
989 rangeList = [rangeList]
990
991 dataOut.flagNoData = True
992
993 if dataOut.flagDataAsBlock:
994 """
995 data dimension = [nChannels, nProfiles, nHeis]
996 """
997 if profileList != None:
998 dataOut.data = dataOut.data[:,profileList,:]
999
1000 if profileRangeList != None:
1001 minIndex = profileRangeList[0]
1002 maxIndex = profileRangeList[1]
1003 profileList = list(range(minIndex, maxIndex+1))
1004
1005 dataOut.data = dataOut.data[:,minIndex:maxIndex+1,:]
1006
1007 if rangeList != None:
1008
1009 profileList = []
1010
1011 for thisRange in rangeList:
1012 minIndex = thisRange[0]
1013 maxIndex = thisRange[1]
1014
1015 profileList.extend(list(range(minIndex, maxIndex+1)))
1016
1017 dataOut.data = dataOut.data[:,profileList,:]
1018
1019 dataOut.nProfiles = len(profileList)
1020 dataOut.profileIndex = dataOut.nProfiles - 1
1021 dataOut.flagNoData = False
1022
1023 return dataOut
1024
1025 """
1026 data dimension = [nChannels, nHeis]
1027 """
1028
1029 if profileList != None:
1030
1031 if self.isThisProfileInList(dataOut.profileIndex, profileList):
1032
1033 self.nProfiles = len(profileList)
1034 dataOut.nProfiles = self.nProfiles
1035 dataOut.profileIndex = self.profileIndex
1036 dataOut.flagNoData = False
1037
1038 self.incProfileIndex()
1039 return dataOut
1040
1041 if profileRangeList != None:
1042
1043 minIndex = profileRangeList[0]
1044 maxIndex = profileRangeList[1]
1045
1046 if self.isThisProfileInRange(dataOut.profileIndex, minIndex, maxIndex):
1047
1048 self.nProfiles = maxIndex - minIndex + 1
1049 dataOut.nProfiles = self.nProfiles
1050 dataOut.profileIndex = self.profileIndex
1051 dataOut.flagNoData = False
1052
1053 self.incProfileIndex()
1054 return dataOut
1055
1056 if rangeList != None:
1057
1058 nProfiles = 0
1059
1060 for thisRange in rangeList:
1061 minIndex = thisRange[0]
1062 maxIndex = thisRange[1]
1063
1064 nProfiles += maxIndex - minIndex + 1
1065
1066 for thisRange in rangeList:
1067
1068 minIndex = thisRange[0]
1069 maxIndex = thisRange[1]
1070
1071 if self.isThisProfileInRange(dataOut.profileIndex, minIndex, maxIndex):
1072
1073 self.nProfiles = nProfiles
1074 dataOut.nProfiles = self.nProfiles
1075 dataOut.profileIndex = self.profileIndex
1076 dataOut.flagNoData = False
1077
1078 self.incProfileIndex()
1079
1080 break
1081
1082 return dataOut
1083
1084
1085 if beam != None: #beam is only for AMISR data
1086 if self.isThisProfileInList(dataOut.profileIndex, dataOut.beamRangeDict[beam]):
1087 dataOut.flagNoData = False
1088 dataOut.profileIndex = self.profileIndex
1089
1090 self.incProfileIndex()
1091
1092 return dataOut
1093
1094 raise ValueError("ProfileSelector needs profileList, profileRangeList or rangeList parameter")
1095
1096 #return False
1097 return dataOut
1098
1099 class Reshaper(Operation):
1100
1101 def __init__(self, **kwargs):
1102
1103 Operation.__init__(self, **kwargs)
1104
1105 self.__buffer = None
1106 self.__nitems = 0
1107
1108 def __appendProfile(self, dataOut, nTxs):
1109
1110 if self.__buffer is None:
1111 shape = (dataOut.nChannels, int(dataOut.nHeights/nTxs) )
1112 self.__buffer = numpy.empty(shape, dtype = dataOut.data.dtype)
1113
1114 ini = dataOut.nHeights * self.__nitems
1115 end = ini + dataOut.nHeights
1116
1117 self.__buffer[:, ini:end] = dataOut.data
1118
1119 self.__nitems += 1
1120
1121 return int(self.__nitems*nTxs)
1122
1123 def __getBuffer(self):
1124
1125 if self.__nitems == int(1./self.__nTxs):
1126
1127 self.__nitems = 0
1128
1129 return self.__buffer.copy()
1130
1131 return None
1132
1133 def __checkInputs(self, dataOut, shape, nTxs):
1134
1135 if shape is None and nTxs is None:
1136 raise ValueError("Reshaper: shape of factor should be defined")
1137
1138 if nTxs:
1139 if nTxs < 0:
1140 raise ValueError("nTxs should be greater than 0")
1141
1142 if nTxs < 1 and dataOut.nProfiles % (1./nTxs) != 0:
1143 raise ValueError("nProfiles= %d is not divisibled by (1./nTxs) = %f" %(dataOut.nProfiles, (1./nTxs)))
1144
1145 shape = [dataOut.nChannels, dataOut.nProfiles*nTxs, dataOut.nHeights/nTxs]
1146
1147 return shape, nTxs
1148
1149 if len(shape) != 2 and len(shape) != 3:
1150 raise ValueError("shape dimension should be equal to 2 or 3. shape = (nProfiles, nHeis) or (nChannels, nProfiles, nHeis). Actually shape = (%d, %d, %d)" %(dataOut.nChannels, dataOut.nProfiles, dataOut.nHeights))
1151
1152 if len(shape) == 2:
1153 shape_tuple = [dataOut.nChannels]
1154 shape_tuple.extend(shape)
1155 else:
1156 shape_tuple = list(shape)
1157
1158 nTxs = 1.0*shape_tuple[1]/dataOut.nProfiles
1159
1160 return shape_tuple, nTxs
1161
1162 def run(self, dataOut, shape=None, nTxs=None):
1163
1164 shape_tuple, self.__nTxs = self.__checkInputs(dataOut, shape, nTxs)
1165
1166 dataOut.flagNoData = True
1167 profileIndex = None
1168
1169 if dataOut.flagDataAsBlock:
1170
1171 dataOut.data = numpy.reshape(dataOut.data, shape_tuple)
1172 dataOut.flagNoData = False
1173
1174 profileIndex = int(dataOut.nProfiles*self.__nTxs) - 1
1175
1176 else:
1177
1178 if self.__nTxs < 1:
1179
1180 self.__appendProfile(dataOut, self.__nTxs)
1181 new_data = self.__getBuffer()
1182
1183 if new_data is not None:
1184 dataOut.data = new_data
1185 dataOut.flagNoData = False
1186
1187 profileIndex = dataOut.profileIndex*nTxs
1188
1189 else:
1190 raise ValueError("nTxs should be greater than 0 and lower than 1, or use VoltageReader(..., getblock=True)")
1191
1192 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
1193
1194 dataOut.heightList = numpy.arange(dataOut.nHeights/self.__nTxs) * deltaHeight + dataOut.heightList[0]
1195
1196 dataOut.nProfiles = int(dataOut.nProfiles*self.__nTxs)
1197
1198 dataOut.profileIndex = profileIndex
1199
1200 dataOut.ippSeconds /= self.__nTxs
1201
1202 return dataOut
1203
1204 class SplitProfiles(Operation):
1205
1206 def __init__(self, **kwargs):
1207
1208 Operation.__init__(self, **kwargs)
1209
1210 def run(self, dataOut, n):
1211
1212 dataOut.flagNoData = True
1213 profileIndex = None
1214
1215 if dataOut.flagDataAsBlock:
1216
1217 #nchannels, nprofiles, nsamples
1218 shape = dataOut.data.shape
1219
1220 if shape[2] % n != 0:
1221 raise ValueError("Could not split the data, n=%d has to be multiple of %d" %(n, shape[2]))
1222
1223 new_shape = shape[0], shape[1]*n, int(shape[2]/n)
1224
1225 dataOut.data = numpy.reshape(dataOut.data, new_shape)
1226 dataOut.flagNoData = False
1227
1228 profileIndex = int(dataOut.nProfiles/n) - 1
1229
1230 else:
1231
1232 raise ValueError("Could not split the data when is read Profile by Profile. Use VoltageReader(..., getblock=True)")
1233
1234 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
1235
1236 dataOut.heightList = numpy.arange(dataOut.nHeights/n) * deltaHeight + dataOut.heightList[0]
1237
1238 dataOut.nProfiles = int(dataOut.nProfiles*n)
1239
1240 dataOut.profileIndex = profileIndex
1241
1242 dataOut.ippSeconds /= n
1243
1244 return dataOut
1245
1246 class CombineProfiles(Operation):
1247 def __init__(self, **kwargs):
1248
1249 Operation.__init__(self, **kwargs)
1250
1251 self.__remData = None
1252 self.__profileIndex = 0
1253
1254 def run(self, dataOut, n):
1255
1256 dataOut.flagNoData = True
1257 profileIndex = None
1258
1259 if dataOut.flagDataAsBlock:
1260
1261 #nchannels, nprofiles, nsamples
1262 shape = dataOut.data.shape
1263 new_shape = shape[0], shape[1]/n, shape[2]*n
1264
1265 if shape[1] % n != 0:
1266 raise ValueError("Could not split the data, n=%d has to be multiple of %d" %(n, shape[1]))
1267
1268 dataOut.data = numpy.reshape(dataOut.data, new_shape)
1269 dataOut.flagNoData = False
1270
1271 profileIndex = int(dataOut.nProfiles*n) - 1
1272
1273 else:
1274
1275 #nchannels, nsamples
1276 if self.__remData is None:
1277 newData = dataOut.data
1278 else:
1279 newData = numpy.concatenate((self.__remData, dataOut.data), axis=1)
1280
1281 self.__profileIndex += 1
1282
1283 if self.__profileIndex < n:
1284 self.__remData = newData
1285 #continue
1286 return
1287
1288 self.__profileIndex = 0
1289 self.__remData = None
1290
1291 dataOut.data = newData
1292 dataOut.flagNoData = False
1293
1294 profileIndex = dataOut.profileIndex/n
1295
1296
1297 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
1298
1299 dataOut.heightList = numpy.arange(dataOut.nHeights*n) * deltaHeight + dataOut.heightList[0]
1300
1301 dataOut.nProfiles = int(dataOut.nProfiles/n)
1302
1303 dataOut.profileIndex = profileIndex
1304
1305 dataOut.ippSeconds *= n
1306
1307 return dataOut
1308
1309 class PulsePairVoltage(Operation):
1310 '''
1311 Function PulsePair(Signal Power, Velocity)
1312 The real component of Lag[0] provides Intensity Information
1313 The imag component of Lag[1] Phase provides Velocity Information
1314
1315 Configuration Parameters:
1316 nPRF = Number of Several PRF
1317 theta = Degree Azimuth angel Boundaries
1318
1319 Input:
1320 self.dataOut
1321 lag[N]
1322 Affected:
1323 self.dataOut.spc
1324 '''
1325 isConfig = False
1326 __profIndex = 0
1327 __initime = None
1328 __lastdatatime = None
1329 __buffer = None
1330 noise = None
1331 __dataReady = False
1332 n = None
1333 __nch = 0
1334 __nHeis = 0
1335 removeDC = False
1336 ipp = None
1337 lambda_ = 0
1338
1339 def __init__(self,**kwargs):
1340 Operation.__init__(self,**kwargs)
1341
1342 def setup(self, dataOut, n = None, removeDC=False):
1343 '''
1344 n= Numero de PRF's de entrada
1345 '''
1346 self.__initime = None
1347 self.__lastdatatime = 0
1348 self.__dataReady = False
1349 self.__buffer = 0
1350 self.__profIndex = 0
1351 self.noise = None
1352 self.__nch = dataOut.nChannels
1353 self.__nHeis = dataOut.nHeights
1354 self.removeDC = removeDC
1355 self.lambda_ = 3.0e8/(9345.0e6)
1356 self.ippSec = dataOut.ippSeconds
1357 self.nCohInt = dataOut.nCohInt
1358 print("IPPseconds",dataOut.ippSeconds)
1359
1360 print("ELVALOR DE n es:", n)
1361 if n == None:
1362 raise ValueError("n should be specified.")
1363
1364 if n != None:
1365 if n<2:
1366 raise ValueError("n should be greater than 2")
1367
1368 self.n = n
1369 self.__nProf = n
1370
1371 self.__buffer = numpy.zeros((dataOut.nChannels,
1372 n,
1373 dataOut.nHeights),
1374 dtype='complex')
1375
1376 def putData(self,data):
1377 '''
1378 Add a profile to he __buffer and increase in one the __profiel Index
1379 '''
1380 self.__buffer[:,self.__profIndex,:]= data
1381 self.__profIndex += 1
1382 return
1383
1384 def pushData(self,dataOut):
1385 '''
1386 Return the PULSEPAIR and the profiles used in the operation
1387 Affected : self.__profileIndex
1388 '''
1389 #----------------- Remove DC-----------------------------------
1390 if self.removeDC==True:
1391 mean = numpy.mean(self.__buffer,1)
1392 tmp = mean.reshape(self.__nch,1,self.__nHeis)
1393 dc= numpy.tile(tmp,[1,self.__nProf,1])
1394 self.__buffer = self.__buffer - dc
1395 #------------------Calculo de Potencia ------------------------
1396 pair0 = self.__buffer*numpy.conj(self.__buffer)
1397 pair0 = pair0.real
1398 lag_0 = numpy.sum(pair0,1)
1399 #------------------Calculo de Ruido x canal--------------------
1400 self.noise = numpy.zeros(self.__nch)
1401 for i in range(self.__nch):
1402 daux = numpy.sort(pair0[i,:,:],axis= None)
1403 self.noise[i]=hildebrand_sekhon( daux ,self.nCohInt)
1404
1405 self.noise = self.noise.reshape(self.__nch,1)
1406 self.noise = numpy.tile(self.noise,[1,self.__nHeis])
1407 noise_buffer = self.noise.reshape(self.__nch,1,self.__nHeis)
1408 noise_buffer = numpy.tile(noise_buffer,[1,self.__nProf,1])
1409 #------------------ Potencia recibida= P , Potencia senal = S , Ruido= N--
1410 #------------------ P= S+N ,P=lag_0/N ---------------------------------
1411 #-------------------- Power --------------------------------------------------
1412 data_power = lag_0/(self.n*self.nCohInt)
1413 #------------------ Senal ---------------------------------------------------
1414 data_intensity = pair0 - noise_buffer
1415 data_intensity = numpy.sum(data_intensity,axis=1)*(self.n*self.nCohInt)#*self.nCohInt)
1416 #data_intensity = (lag_0-self.noise*self.n)*(self.n*self.nCohInt)
1417 for i in range(self.__nch):
1418 for j in range(self.__nHeis):
1419 if data_intensity[i][j] < 0:
1420 data_intensity[i][j] = numpy.min(numpy.absolute(data_intensity[i][j]))
1421
1422 #----------------- Calculo de Frecuencia y Velocidad doppler--------
1423 pair1 = self.__buffer[:,:-1,:]*numpy.conjugate(self.__buffer[:,1:,:])
1424 lag_1 = numpy.sum(pair1,1)
1425 data_freq = (-1/(2.0*math.pi*self.ippSec*self.nCohInt))*numpy.angle(lag_1)
1426 data_velocity = (self.lambda_/2.0)*data_freq
1427
1428 #---------------- Potencia promedio estimada de la Senal-----------
1429 lag_0 = lag_0/self.n
1430 S = lag_0-self.noise
1431
1432 #---------------- Frecuencia Doppler promedio ---------------------
1433 lag_1 = lag_1/(self.n-1)
1434 R1 = numpy.abs(lag_1)
1435
1436 #---------------- Calculo del SNR----------------------------------
1437 data_snrPP = S/self.noise
1438 for i in range(self.__nch):
1439 for j in range(self.__nHeis):
1440 if data_snrPP[i][j] < 1.e-20:
1441 data_snrPP[i][j] = 1.e-20
1442
1443 #----------------- Calculo del ancho espectral ----------------------
1444 L = S/R1
1445 L = numpy.where(L<0,1,L)
1446 L = numpy.log(L)
1447 tmp = numpy.sqrt(numpy.absolute(L))
1448 data_specwidth = (self.lambda_/(2*math.sqrt(2)*math.pi*self.ippSec*self.nCohInt))*tmp*numpy.sign(L)
1449 n = self.__profIndex
1450
1451 self.__buffer = numpy.zeros((self.__nch, self.__nProf,self.__nHeis), dtype='complex')
1452 self.__profIndex = 0
1453 return data_power,data_intensity,data_velocity,data_snrPP,data_specwidth,n
1454
1455
1456 def pulsePairbyProfiles(self,dataOut):
1457
1458 self.__dataReady = False
1459 data_power = None
1460 data_intensity = None
1461 data_velocity = None
1462 data_specwidth = None
1463 data_snrPP = None
1464 self.putData(data=dataOut.data)
1465 if self.__profIndex == self.n:
1466 data_power,data_intensity, data_velocity,data_snrPP,data_specwidth, n = self.pushData(dataOut=dataOut)
1467 self.__dataReady = True
1468
1469 return data_power, data_intensity, data_velocity, data_snrPP, data_specwidth
1470
1471
1472 def pulsePairOp(self, dataOut, datatime= None):
1473
1474 if self.__initime == None:
1475 self.__initime = datatime
1476 data_power, data_intensity, data_velocity, data_snrPP, data_specwidth = self.pulsePairbyProfiles(dataOut)
1477 self.__lastdatatime = datatime
1478
1479 if data_power is None:
1480 return None, None, None,None,None,None
1481
1482 avgdatatime = self.__initime
1483 deltatime = datatime - self.__lastdatatime
1484 self.__initime = datatime
1485
1486 return data_power, data_intensity, data_velocity, data_snrPP, data_specwidth, avgdatatime
1487
1488 def run(self, dataOut,n = None,removeDC= False, overlapping= False,**kwargs):
1489
1490 if not self.isConfig:
1491 self.setup(dataOut = dataOut, n = n , removeDC=removeDC , **kwargs)
1492 self.isConfig = True
1493 data_power, data_intensity, data_velocity,data_snrPP,data_specwidth, avgdatatime = self.pulsePairOp(dataOut, dataOut.utctime)
1494 dataOut.flagNoData = True
1495
1496 if self.__dataReady:
1497 dataOut.nCohInt *= self.n
1498 dataOut.dataPP_POW = data_intensity # S
1499 dataOut.dataPP_POWER = data_power # P
1500 dataOut.dataPP_DOP = data_velocity
1501 dataOut.dataPP_SNR = data_snrPP
1502 dataOut.dataPP_WIDTH = data_specwidth
1503 dataOut.PRFbyAngle = self.n #numero de PRF*cada angulo rotado que equivale a un tiempo.
1504 dataOut.utctime = avgdatatime
1505 dataOut.flagNoData = False
1506 return dataOut
1507
1508
1509
1510 # import collections
1511 # from scipy.stats import mode
1512 #
1513 # class Synchronize(Operation):
1514 #
1515 # isConfig = False
1516 # __profIndex = 0
1517 #
1518 # def __init__(self, **kwargs):
1519 #
1520 # Operation.__init__(self, **kwargs)
1521 # # self.isConfig = False
1522 # self.__powBuffer = None
1523 # self.__startIndex = 0
1524 # self.__pulseFound = False
1525 #
1526 # def __findTxPulse(self, dataOut, channel=0, pulse_with = None):
1527 #
1528 # #Read data
1529 #
1530 # powerdB = dataOut.getPower(channel = channel)
1531 # noisedB = dataOut.getNoise(channel = channel)[0]
1532 #
1533 # self.__powBuffer.extend(powerdB.flatten())
1534 #
1535 # dataArray = numpy.array(self.__powBuffer)
1536 #
1537 # filteredPower = numpy.correlate(dataArray, dataArray[0:self.__nSamples], "same")
1538 #
1539 # maxValue = numpy.nanmax(filteredPower)
1540 #
1541 # if maxValue < noisedB + 10:
1542 # #No se encuentra ningun pulso de transmision
1543 # return None
1544 #
1545 # maxValuesIndex = numpy.where(filteredPower > maxValue - 0.1*abs(maxValue))[0]
1546 #
1547 # if len(maxValuesIndex) < 2:
1548 # #Solo se encontro un solo pulso de transmision de un baudio, esperando por el siguiente TX
1549 # return None
1550 #
1551 # phasedMaxValuesIndex = maxValuesIndex - self.__nSamples
1552 #
1553 # #Seleccionar solo valores con un espaciamiento de nSamples
1554 # pulseIndex = numpy.intersect1d(maxValuesIndex, phasedMaxValuesIndex)
1555 #
1556 # if len(pulseIndex) < 2:
1557 # #Solo se encontro un pulso de transmision con ancho mayor a 1
1558 # return None
1559 #
1560 # spacing = pulseIndex[1:] - pulseIndex[:-1]
1561 #
1562 # #remover senales que se distancien menos de 10 unidades o muestras
1563 # #(No deberian existir IPP menor a 10 unidades)
1564 #
1565 # realIndex = numpy.where(spacing > 10 )[0]
1566 #
1567 # if len(realIndex) < 2:
1568 # #Solo se encontro un pulso de transmision con ancho mayor a 1
1569 # return None
1570 #
1571 # #Eliminar pulsos anchos (deja solo la diferencia entre IPPs)
1572 # realPulseIndex = pulseIndex[realIndex]
1573 #
1574 # period = mode(realPulseIndex[1:] - realPulseIndex[:-1])[0][0]
1575 #
1576 # print "IPP = %d samples" %period
1577 #
1578 # self.__newNSamples = dataOut.nHeights #int(period)
1579 # self.__startIndex = int(realPulseIndex[0])
1580 #
1581 # return 1
1582 #
1583 #
1584 # def setup(self, nSamples, nChannels, buffer_size = 4):
1585 #
1586 # self.__powBuffer = collections.deque(numpy.zeros( buffer_size*nSamples,dtype=numpy.float),
1587 # maxlen = buffer_size*nSamples)
1588 #
1589 # bufferList = []
1590 #
1591 # for i in range(nChannels):
1592 # bufferByChannel = collections.deque(numpy.zeros( buffer_size*nSamples, dtype=numpy.complex) + numpy.NAN,
1593 # maxlen = buffer_size*nSamples)
1594 #
1595 # bufferList.append(bufferByChannel)
1596 #
1597 # self.__nSamples = nSamples
1598 # self.__nChannels = nChannels
1599 # self.__bufferList = bufferList
1600 #
1601 # def run(self, dataOut, channel = 0):
1602 #
1603 # if not self.isConfig:
1604 # nSamples = dataOut.nHeights
1605 # nChannels = dataOut.nChannels
1606 # self.setup(nSamples, nChannels)
1607 # self.isConfig = True
1608 #
1609 # #Append new data to internal buffer
1610 # for thisChannel in range(self.__nChannels):
1611 # bufferByChannel = self.__bufferList[thisChannel]
1612 # bufferByChannel.extend(dataOut.data[thisChannel])
1613 #
1614 # if self.__pulseFound:
1615 # self.__startIndex -= self.__nSamples
1616 #
1617 # #Finding Tx Pulse
1618 # if not self.__pulseFound:
1619 # indexFound = self.__findTxPulse(dataOut, channel)
1620 #
1621 # if indexFound == None:
1622 # dataOut.flagNoData = True
1623 # return
1624 #
1625 # self.__arrayBuffer = numpy.zeros((self.__nChannels, self.__newNSamples), dtype = numpy.complex)
1626 # self.__pulseFound = True
1627 # self.__startIndex = indexFound
1628 #
1629 # #If pulse was found ...
1630 # for thisChannel in range(self.__nChannels):
1631 # bufferByChannel = self.__bufferList[thisChannel]
1632 # #print self.__startIndex
1633 # x = numpy.array(bufferByChannel)
1634 # self.__arrayBuffer[thisChannel] = x[self.__startIndex:self.__startIndex+self.__newNSamples]
1635 #
1636 # deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
1637 # dataOut.heightList = numpy.arange(self.__newNSamples)*deltaHeight
1638 # # dataOut.ippSeconds = (self.__newNSamples / deltaHeight)/1e6
1639 #
1640 # dataOut.data = self.__arrayBuffer
1641 #
1642 # self.__startIndex += self.__newNSamples
1643 #
1644 # return No newline at end of file
This diff has been collapsed as it changes many lines, (833 lines changed) Show them Hide them
@@ -0,0 +1,833
1 class SpectralFitting(Operation):
2 '''
3 Function GetMoments()
4
5 Input:
6 Output:
7 Variables modified:
8 '''
9 isConfig = False
10 __dataReady = False
11 bloques = None
12 bloque0 = None
13 index = 0
14 fint = 0
15 buffer = 0
16 buffer2 = 0
17 buffer3 = 0
18
19 def __init__(self):
20 Operation.__init__(self)
21 self.i=0
22 self.isConfig = False
23
24
25 def setup(self,nChan,nProf,nHei,nBlocks):
26 self.__dataReady = False
27 self.bloques = numpy.zeros([2, nProf, nHei,nBlocks], dtype= complex)
28 self.bloque0 = numpy.zeros([nChan, nProf, nHei, nBlocks])
29
30 def __calculateMoments(self,oldspec, oldfreq, n0, nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None):
31
32 if (nicoh is None): nicoh = 1
33 if (graph is None): graph = 0
34 if (smooth is None): smooth = 0
35 elif (self.smooth < 3): smooth = 0
36
37 if (type1 is None): type1 = 0
38 if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1
39 if (snrth is None): snrth = -3
40 if (dc is None): dc = 0
41 if (aliasing is None): aliasing = 0
42 if (oldfd is None): oldfd = 0
43 if (wwauto is None): wwauto = 0
44
45 if (n0 < 1.e-20): n0 = 1.e-20
46
47 freq = oldfreq
48 vec_power = numpy.zeros(oldspec.shape[1])
49 vec_fd = numpy.zeros(oldspec.shape[1])
50 vec_w = numpy.zeros(oldspec.shape[1])
51 vec_snr = numpy.zeros(oldspec.shape[1])
52
53 oldspec = numpy.ma.masked_invalid(oldspec)
54
55 for ind in range(oldspec.shape[1]):
56
57 spec = oldspec[:,ind]
58 aux = spec*fwindow
59 max_spec = aux.max()
60 m = list(aux).index(max_spec)
61
62 #Smooth
63 if (smooth == 0): spec2 = spec
64 else: spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth)
65
66 # Calculo de Momentos
67 bb = spec2[list(range(m,spec2.size))]
68 bb = (bb<n0).nonzero()
69 bb = bb[0]
70
71 ss = spec2[list(range(0,m + 1))]
72 ss = (ss<n0).nonzero()
73 ss = ss[0]
74
75 if (bb.size == 0):
76 bb0 = spec.size - 1 - m
77 else:
78 bb0 = bb[0] - 1
79 if (bb0 < 0):
80 bb0 = 0
81
82 if (ss.size == 0): ss1 = 1
83 else: ss1 = max(ss) + 1
84
85 if (ss1 > m): ss1 = m
86
87 valid = numpy.asarray(list(range(int(m + bb0 - ss1 + 1)))) + ss1
88 power = ((spec2[valid] - n0)*fwindow[valid]).sum()
89 fd = ((spec2[valid]- n0)*freq[valid]*fwindow[valid]).sum()/power
90 w = math.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum()/power)
91 snr = (spec2.mean()-n0)/n0
92
93 if (snr < 1.e-20) :
94 snr = 1.e-20
95
96 vec_power[ind] = power
97 vec_fd[ind] = fd
98 vec_w[ind] = w
99 vec_snr[ind] = snr
100
101 moments = numpy.vstack((vec_snr, vec_power, vec_fd, vec_w))
102 return moments
103
104 def __DiffCoherent(self, spectra, cspectra, dataOut, noise, snrth, coh_th, hei_th):
105
106 nProf = dataOut.nProfiles
107 heights = dataOut.heightList
108 nHei = len(heights)
109 channels = dataOut.channelList
110 nChan = len(channels)
111 crosspairs = dataOut.groupList
112 nPairs = len(crosspairs)
113 #Separar espectros incoherentes de coherentes snr > 20 dB'
114 snr_th = 10**(snrth/10.0)
115 my_incoh_spectra = numpy.zeros([nChan, nProf,nHei], dtype='float')
116 my_incoh_cspectra = numpy.zeros([nPairs,nProf, nHei], dtype='complex')
117 my_incoh_aver = numpy.zeros([nChan, nHei])
118 my_coh_aver = numpy.zeros([nChan, nHei])
119
120 coh_spectra = numpy.zeros([nChan, nProf, nHei], dtype='float')
121 coh_cspectra = numpy.zeros([nPairs, nProf, nHei], dtype='complex')
122 coh_aver = numpy.zeros([nChan, nHei])
123
124 incoh_spectra = numpy.zeros([nChan, nProf, nHei], dtype='float')
125 incoh_cspectra = numpy.zeros([nPairs, nProf, nHei], dtype='complex')
126 incoh_aver = numpy.zeros([nChan, nHei])
127 power = numpy.sum(spectra, axis=1)
128
129 if coh_th == None : coh_th = numpy.array([0.75,0.65,0.15]) # 0.65
130 if hei_th == None : hei_th = numpy.array([60,300,650])
131 for ic in range(nPairs):
132 pair = crosspairs[ic]
133 #si el SNR es mayor que el SNR threshold los datos se toman coherentes
134 s_n0 = power[pair[0],:]/noise[pair[0]]
135 s_n1 = power[pair[1],:]/noise[pair[1]]
136 valid1 =(s_n0>=snr_th).nonzero()
137 valid2 = (s_n1>=snr_th).nonzero()
138 valid1 = numpy.array(valid1[0])
139 valid2 = numpy.array(valid2[0])
140 valid = valid1
141 for iv in range(len(valid2)):
142 indv = numpy.array((valid1 == valid2[iv]).nonzero())
143 if len(indv[0]) == 0 :
144 valid = numpy.concatenate((valid,valid2[iv]), axis=None)
145 if len(valid)>0:
146 my_coh_aver[pair[0],valid]=1
147 my_coh_aver[pair[1],valid]=1
148 # si la coherencia es mayor a la coherencia threshold los datos se toman
149 coh = numpy.squeeze(numpy.nansum(cspectra[ic,:,:], axis=0)/numpy.sqrt(numpy.nansum(spectra[pair[0],:,:], axis=0)*numpy.nansum(spectra[pair[1],:,:], axis=0)))
150 for ih in range(len(hei_th)):
151 hvalid = (heights>hei_th[ih]).nonzero()
152 hvalid = hvalid[0]
153 if len(hvalid)>0:
154 valid = (numpy.absolute(coh[hvalid])>coh_th[ih]).nonzero()
155 valid = valid[0]
156 if len(valid)>0:
157 my_coh_aver[pair[0],hvalid[valid]] =1
158 my_coh_aver[pair[1],hvalid[valid]] =1
159
160 coh_echoes = (my_coh_aver[pair[0],:] == 1).nonzero()
161 incoh_echoes = (my_coh_aver[pair[0],:] != 1).nonzero()
162 incoh_echoes = incoh_echoes[0]
163 if len(incoh_echoes) > 0:
164 my_incoh_spectra[pair[0],:,incoh_echoes] = spectra[pair[0],:,incoh_echoes]
165 my_incoh_spectra[pair[1],:,incoh_echoes] = spectra[pair[1],:,incoh_echoes]
166 my_incoh_cspectra[ic,:,incoh_echoes] = cspectra[ic,:,incoh_echoes]
167 my_incoh_aver[pair[0],incoh_echoes] = 1
168 my_incoh_aver[pair[1],incoh_echoes] = 1
169
170
171 for ic in range(nPairs):
172 pair = crosspairs[ic]
173
174 valid1 =(my_coh_aver[pair[0],:]==1 ).nonzero()
175 valid2 = (my_coh_aver[pair[1],:]==1).nonzero()
176 valid1 = numpy.array(valid1[0])
177 valid2 = numpy.array(valid2[0])
178 valid = valid1
179
180 for iv in range(len(valid2)):
181
182 indv = numpy.array((valid1 == valid2[iv]).nonzero())
183 if len(indv[0]) == 0 :
184 valid = numpy.concatenate((valid,valid2[iv]), axis=None)
185 valid1 =(my_coh_aver[pair[0],:] !=1 ).nonzero()
186 valid2 = (my_coh_aver[pair[1],:] !=1).nonzero()
187 valid1 = numpy.array(valid1[0])
188 valid2 = numpy.array(valid2[0])
189 incoh_echoes = valid1
190 for iv in range(len(valid2)):
191
192 indv = numpy.array((valid1 == valid2[iv]).nonzero())
193 if len(indv[0]) == 0 :
194 incoh_echoes = numpy.concatenate(( incoh_echoes,valid2[iv]), axis=None)
195
196 if len(valid)>0:
197 coh_spectra[pair[0],:,valid] = spectra[pair[0],:,valid]
198 coh_spectra[pair[1],:,valid] = spectra[pair[1],:,valid]
199 coh_cspectra[ic,:,valid] = cspectra[ic,:,valid]
200 coh_aver[pair[0],valid]=1
201 coh_aver[pair[1],valid]=1
202 if len(incoh_echoes)>0:
203 incoh_spectra[pair[0],:,incoh_echoes] = spectra[pair[0],:,incoh_echoes]
204 incoh_spectra[pair[1],:,incoh_echoes] = spectra[pair[1],:,incoh_echoes]
205 incoh_cspectra[ic,:,incoh_echoes] = cspectra[ic,:,incoh_echoes]
206 incoh_aver[pair[0],incoh_echoes]=1
207 incoh_aver[pair[1],incoh_echoes]=1
208 return my_incoh_spectra ,my_incoh_cspectra,my_incoh_aver,my_coh_aver, incoh_spectra, coh_spectra, incoh_cspectra, coh_cspectra, incoh_aver, coh_aver
209
210
211 def __CleanCoherent(self,snrth, spectra, cspectra, coh_aver,dataOut, noise,clean_coh_echoes,index):
212
213 nProf = dataOut.nProfiles
214 heights = dataOut.heightList
215 nHei = len(heights)
216 channels = dataOut.channelList
217 nChan = len(channels)
218 crosspairs = dataOut.groupList
219 nPairs = len(crosspairs)
220
221 absc = dataOut.abscissaList[:-1]
222 data_param = numpy.zeros((nChan, 4, spectra.shape[2]))
223 clean_coh_spectra = spectra.copy()
224 clean_coh_cspectra = cspectra.copy()
225 clean_coh_aver = coh_aver.copy()
226
227 spwd_th=[10,6] #spwd_th[0] --> For satellites ; spwd_th[1] --> For special events like SUN.
228 coh_th = 0.75
229
230 rtime0 = [6,18] # periodo sin ESF
231 rtime1 = [10.5,13.5] # periodo con alta coherencia y alto ancho espectral (esperado): SOL.
232
233 time = index*5./60 # en base a 5 min de proceso
234 if clean_coh_echoes == 1 :
235 for ind in range(nChan):
236 data_param[ind,:,:] = self.__calculateMoments( spectra[ind,:,:] , absc , noise[ind] )
237 spwd = data_param[:,3]
238 # SPECB_JULIA,header=anal_header,jspectra=spectra,vel=velocities,hei=heights, num_aver=1, mode_fit=0,smoothing=smoothing,jvelr=velr,jspwd=spwd,jsnr=snr,jnoise=noise,jstdvnoise=stdvnoise
239 # para obtener spwd
240 for ic in range(nPairs):
241 pair = crosspairs[ic]
242 coh = numpy.squeeze(numpy.sum(cspectra[ic,:,:], axis=1)/numpy.sqrt(numpy.sum(spectra[pair[0],:,:], axis=1)*numpy.sum(spectra[pair[1],:,:], axis=1)))
243 for ih in range(nHei) :
244 # Considering heights higher than 200km in order to avoid removing phenomena like EEJ.
245 if heights[ih] >= 200 and coh_aver[pair[0],ih] == 1 and coh_aver[pair[1],ih] == 1 :
246 # Checking coherence
247 if (numpy.abs(coh[ih]) <= coh_th) or (time >= rtime0[0] and time <= rtime0[1]) :
248 # Checking spectral widths
249 if (spwd[pair[0],ih] > spwd_th[0]) or (spwd[pair[1],ih] > spwd_th[0]) :
250 # satelite
251 clean_coh_spectra[pair,ih,:] = 0.0
252 clean_coh_cspectra[ic,ih,:] = 0.0
253 clean_coh_aver[pair,ih] = 0
254 else :
255 if ((spwd[pair[0],ih] < spwd_th[1]) or (spwd[pair[1],ih] < spwd_th[1])) :
256 # Especial event like sun.
257 clean_coh_spectra[pair,ih,:] = 0.0
258 clean_coh_cspectra[ic,ih,:] = 0.0
259 clean_coh_aver[pair,ih] = 0
260
261 return clean_coh_spectra, clean_coh_cspectra, clean_coh_aver
262
263 def CleanRayleigh(self,dataOut,spectra,cspectra,save_drifts):
264
265 rfunc = cspectra.copy()
266 n_funct = len(rfunc[0,:,0,0])
267 val_spc = spectra*0.0
268 val_cspc = cspectra*0.0
269 in_sat_spectra = spectra.copy()
270 in_sat_cspectra = cspectra.copy()
271
272 min_hei = 200
273 nProf = dataOut.nProfiles
274 heights = dataOut.heightList
275 nHei = len(heights)
276 channels = dataOut.channelList
277 nChan = len(channels)
278 crosspairs = dataOut.groupList
279 nPairs = len(crosspairs)
280 hval=(heights >= min_hei).nonzero()
281 ih=hval[0]
282 for ih in range(hval[0][0],nHei):
283 for ifreq in range(nProf):
284 for ii in range(n_funct):
285
286 func2clean = 10*numpy.log10(numpy.absolute(rfunc[:,ii,ifreq,ih]))
287 val = (numpy.isfinite(func2clean)==True).nonzero()
288 if len(val)>0:
289 min_val = numpy.around(numpy.amin(func2clean)-2) #> (-40)
290 if min_val <= -40 : min_val = -40
291 max_val = numpy.around(numpy.amax(func2clean)+2) #< 200
292 if max_val >= 200 : max_val = 200
293 step = 1
294 #Getting bins and the histogram
295 x_dist = min_val + numpy.arange(1 + ((max_val-(min_val))/step))*step
296 y_dist,binstep = numpy.histogram(func2clean,bins=range(int(min_val),int(max_val+2),step))
297 mean = numpy.sum(x_dist * y_dist) / numpy.sum(y_dist)
298 sigma = numpy.sqrt(numpy.sum(y_dist * (x_dist - mean)**2) / numpy.sum(y_dist))
299 parg = [numpy.amax(y_dist),mean,sigma]
300 try :
301 gauss_fit, covariance = curve_fit(fit_func, x_dist, y_dist,p0=parg)
302 mode = gauss_fit[1]
303 stdv = gauss_fit[2]
304 except:
305 mode = mean
306 stdv = sigma
307
308 #Removing echoes greater than mode + 3*stdv
309 factor_stdv = 2.5
310 noval = (abs(func2clean - mode)>=(factor_stdv*stdv)).nonzero()
311
312 if len(noval[0]) > 0:
313 novall = ((func2clean - mode) >= (factor_stdv*stdv)).nonzero()
314 cross_pairs = crosspairs[ii]
315 #Getting coherent echoes which are removed.
316 if len(novall[0]) > 0:
317 val_spc[novall[0],cross_pairs[0],ifreq,ih] = 1
318 val_spc[novall[0],cross_pairs[1],ifreq,ih] = 1
319 val_cspc[novall[0],ii,ifreq,ih] = 1
320 #Removing coherent from ISR data
321 spectra[noval,cross_pairs[0],ifreq,ih] = numpy.nan
322 spectra[noval,cross_pairs[1],ifreq,ih] = numpy.nan
323 cspectra[noval,ii,ifreq,ih] = numpy.nan
324
325 #Getting average of the spectra and cross-spectra from incoherent echoes.
326 out_spectra = numpy.zeros([nChan,nProf,nHei], dtype=float) #+numpy.nan
327 out_cspectra = numpy.zeros([nPairs,nProf,nHei], dtype=complex) #+numpy.nan
328 for ih in range(nHei):
329 for ifreq in range(nProf):
330 for ich in range(nChan):
331 tmp = spectra[:,ich,ifreq,ih]
332 valid = (numpy.isfinite(tmp[:])==True).nonzero()
333 if len(valid[0]) >0 :
334 out_spectra[ich,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
335 for icr in range(nPairs):
336 tmp = numpy.squeeze(cspectra[:,icr,ifreq,ih])
337 valid = (numpy.isfinite(tmp)==True).nonzero()
338 if len(valid[0]) > 0:
339 out_cspectra[icr,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
340 #Removing fake coherent echoes (at least 4 points around the point)
341 val_spectra = numpy.sum(val_spc,0)
342 val_cspectra = numpy.sum(val_cspc,0)
343
344 val_spectra = self.REM_ISOLATED_POINTS(val_spectra,4)
345 val_cspectra = self.REM_ISOLATED_POINTS(val_cspectra,4)
346
347 for i in range(nChan):
348 for j in range(nProf):
349 for k in range(nHei):
350 if numpy.isfinite(val_spectra[i,j,k]) and val_spectra[i,j,k] < 1 :
351 val_spc[:,i,j,k] = 0.0
352 for i in range(nPairs):
353 for j in range(nProf):
354 for k in range(nHei):
355 if numpy.isfinite(val_cspectra[i,j,k]) and val_cspectra[i,j,k] < 1 :
356 val_cspc[:,i,j,k] = 0.0
357
358 tmp_sat_spectra = spectra.copy()
359 tmp_sat_spectra = tmp_sat_spectra*numpy.nan
360 tmp_sat_cspectra = cspectra.copy()
361 tmp_sat_cspectra = tmp_sat_cspectra*numpy.nan
362 val = (val_spc > 0).nonzero()
363 if len(val[0]) > 0:
364 tmp_sat_spectra[val] = in_sat_spectra[val]
365
366 val = (val_cspc > 0).nonzero()
367 if len(val[0]) > 0:
368 tmp_sat_cspectra[val] = in_sat_cspectra[val]
369
370 #Getting average of the spectra and cross-spectra from incoherent echoes.
371 sat_spectra = numpy.zeros((nChan,nProf,nHei), dtype=float)
372 sat_cspectra = numpy.zeros((nPairs,nProf,nHei), dtype=complex)
373 for ih in range(nHei):
374 for ifreq in range(nProf):
375 for ich in range(nChan):
376 tmp = numpy.squeeze(tmp_sat_spectra[:,ich,ifreq,ih])
377 valid = (numpy.isfinite(tmp)).nonzero()
378 if len(valid[0]) > 0:
379 sat_spectra[ich,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
380
381 for icr in range(nPairs):
382 tmp = numpy.squeeze(tmp_sat_cspectra[:,icr,ifreq,ih])
383 valid = (numpy.isfinite(tmp)).nonzero()
384 if len(valid[0]) > 0:
385 sat_cspectra[icr,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
386 return out_spectra, out_cspectra,sat_spectra,sat_cspectra
387 def REM_ISOLATED_POINTS(self,array,rth):
388 if rth == None : rth = 4
389 num_prof = len(array[0,:,0])
390 num_hei = len(array[0,0,:])
391 n2d = len(array[:,0,0])
392
393 for ii in range(n2d) :
394 tmp = array[ii,:,:]
395 tmp = numpy.reshape(tmp,num_prof*num_hei)
396 indxs1 = (numpy.isfinite(tmp)==True).nonzero()
397 indxs2 = (tmp > 0).nonzero()
398 indxs1 = (indxs1[0])
399 indxs2 = indxs2[0]
400 indxs = None
401 for iv in range(len(indxs2)):
402 indv = numpy.array((indxs1 == indxs2[iv]).nonzero())
403 if len(indv[0]) > 0 :
404 indxs = numpy.concatenate((indxs,indxs2[iv]), axis=None)
405 indxs = indxs[1:]
406 if len(indxs) < 4 :
407 array[ii,:,:] = 0.
408 return
409
410 xpos = numpy.mod(indxs ,num_hei)
411 ypos = (indxs / num_hei)
412 sx = numpy.argsort(xpos) # Ordering respect to "x" (time)
413 xpos = xpos[sx]
414 ypos = ypos[sx]
415 # *********************************** Cleaning isolated points **********************************
416 ic = 0
417 while True :
418 r = numpy.sqrt(list(numpy.power((xpos[ic]-xpos),2)+ numpy.power((ypos[ic]-ypos),2)))
419 no_coh1 = (numpy.isfinite(r)==True).nonzero()
420 no_coh2 = (r <= rth).nonzero()
421 no_coh1 = numpy.array(no_coh1[0])
422 no_coh2 = numpy.array(no_coh2[0])
423 no_coh = None
424 for iv in range(len(no_coh2)):
425 indv = numpy.array((no_coh1 == no_coh2[iv]).nonzero())
426 if len(indv[0]) > 0 :
427 no_coh = numpy.concatenate((no_coh,no_coh2[iv]), axis=None)
428 no_coh = no_coh[1:]
429 if len(no_coh) < 4 :
430 xpos[ic] = numpy.nan
431 ypos[ic] = numpy.nan
432
433 ic = ic + 1
434 if (ic == len(indxs)) :
435 break
436 indxs = (numpy.isfinite(list(xpos))==True).nonzero()
437 if len(indxs[0]) < 4 :
438 array[ii,:,:] = 0.
439 return
440
441 xpos = xpos[indxs[0]]
442 ypos = ypos[indxs[0]]
443 for i in range(0,len(ypos)):
444 ypos[i]=int(ypos[i])
445 junk = tmp
446 tmp = junk*0.0
447
448 tmp[list(xpos + (ypos*num_hei))] = junk[list(xpos + (ypos*num_hei))]
449 array[ii,:,:] = numpy.reshape(tmp,(num_prof,num_hei))
450 return array
451
452 def moments(self,doppler,yarray,npoints):
453 ytemp = yarray
454 val = (ytemp > 0).nonzero()
455 val = val[0]
456 if len(val) == 0 : val = range(npoints-1)
457
458 ynew = 0.5*(ytemp[val[0]]+ytemp[val[len(val)-1]])
459 ytemp[len(ytemp):] = [ynew]
460
461 index = 0
462 index = numpy.argmax(ytemp)
463 ytemp = numpy.roll(ytemp,int(npoints/2)-1-index)
464 ytemp = ytemp[0:npoints-1]
465
466 fmom = numpy.sum(doppler*ytemp)/numpy.sum(ytemp)+(index-(npoints/2-1))*numpy.abs(doppler[1]-doppler[0])
467 smom = numpy.sum(doppler*doppler*ytemp)/numpy.sum(ytemp)
468 return [fmom,numpy.sqrt(smom)]
469
470
471
472
473
474 def run(self, dataOut, getSNR = True, path=None, file=None, groupList=None, filec=None,coh_th=None, hei_th=None,taver=None,proc=None,nhei=None,nprofs=None,ipp=None,channelList=None):
475 if not numpy.any(proc):
476 nChannels = dataOut.nChannels
477 nHeights= dataOut.heightList.size
478 nProf = dataOut.nProfiles
479 if numpy.any(taver): taver=int(taver)
480 else : taver = 5
481 tini=time.localtime(dataOut.utctime)
482 if (tini.tm_min % taver) == 0 and (tini.tm_sec < 5 and self.fint==0):
483 self.index = 0
484 jspc = self.buffer
485 jcspc = self.buffer2
486 jnoise = self.buffer3
487 self.buffer = dataOut.data_spc
488 self.buffer2 = dataOut.data_cspc
489 self.buffer3 = dataOut.noise
490 self.fint = 1
491 if numpy.any(jspc) :
492 jspc= numpy.reshape(jspc,(int(len(jspc)/nChannels),nChannels,nProf,nHeights))
493 jcspc= numpy.reshape(jcspc,(int(len(jcspc)/int(nChannels/2)),int(nChannels/2),nProf,nHeights))
494 jnoise= numpy.reshape(jnoise,(int(len(jnoise)/nChannels),nChannels))
495 else:
496 dataOut.flagNoData = True
497 return dataOut
498 else :
499 if (tini.tm_min % taver) == 0 : self.fint = 1
500 else : self.fint = 0
501 self.index += 1
502 if numpy.any(self.buffer):
503 self.buffer = numpy.concatenate((self.buffer,dataOut.data_spc), axis=0)
504 self.buffer2 = numpy.concatenate((self.buffer2,dataOut.data_cspc), axis=0)
505 self.buffer3 = numpy.concatenate((self.buffer3,dataOut.noise), axis=0)
506 else:
507 self.buffer = dataOut.data_spc
508 self.buffer2 = dataOut.data_cspc
509 self.buffer3 = dataOut.noise
510 dataOut.flagNoData = True
511 return dataOut
512 if path != None:
513 sys.path.append(path)
514 self.library = importlib.import_module(file)
515 if filec != None:
516 self.weightf = importlib.import_module(filec)
517
518 #To be inserted as a parameter
519 groupArray = numpy.array(groupList)
520 #groupArray = numpy.array([[0,1],[2,3]])
521 dataOut.groupList = groupArray
522 nGroups = groupArray.shape[0]
523 nChannels = dataOut.nChannels
524 nHeights = dataOut.heightList.size
525
526 #Parameters Array
527 dataOut.data_param = None
528 dataOut.data_paramC = None
529 dataOut.clean_num_aver = None
530 dataOut.coh_num_aver = None
531 dataOut.tmp_spectra_i = None
532 dataOut.tmp_cspectra_i = None
533 dataOut.tmp_spectra_c = None
534 dataOut.tmp_cspectra_c = None
535 dataOut.index = None
536
537 #Set constants
538 constants = self.library.setConstants(dataOut)
539 dataOut.constants = constants
540 M = dataOut.normFactor
541 N = dataOut.nFFTPoints
542 ippSeconds = dataOut.ippSeconds
543 K = dataOut.nIncohInt
544 pairsArray = numpy.array(dataOut.pairsList)
545 snrth= 20
546 spectra = dataOut.data_spc
547 cspectra = dataOut.data_cspc
548 nProf = dataOut.nProfiles
549 heights = dataOut.heightList
550 nHei = len(heights)
551 channels = dataOut.channelList
552 nChan = len(channels)
553 nIncohInt = dataOut.nIncohInt
554 crosspairs = dataOut.groupList
555 noise = dataOut.noise
556 jnoise = jnoise/N
557 noise = numpy.nansum(jnoise,axis=0)#/len(jnoise)
558 power = numpy.sum(spectra, axis=1)
559 nPairs = len(crosspairs)
560 absc = dataOut.abscissaList[:-1]
561
562 if not self.isConfig:
563 self.isConfig = True
564
565 index = tini.tm_hour*12+tini.tm_min/taver
566 dataOut.index= index
567 jspc = jspc/N/N
568 jcspc = jcspc/N/N
569 tmp_spectra,tmp_cspectra,sat_spectra,sat_cspectra = self.CleanRayleigh(dataOut,jspc,jcspc,2)
570 jspectra = tmp_spectra*len(jspc[:,0,0,0])
571 jcspectra = tmp_cspectra*len(jspc[:,0,0,0])
572 my_incoh_spectra ,my_incoh_cspectra,my_incoh_aver,my_coh_aver, incoh_spectra, coh_spectra, incoh_cspectra, coh_cspectra, incoh_aver, coh_aver = self.__DiffCoherent(jspectra, jcspectra, dataOut, noise, snrth,coh_th, hei_th)
573 clean_coh_spectra, clean_coh_cspectra, clean_coh_aver = self.__CleanCoherent(snrth, coh_spectra, coh_cspectra, coh_aver, dataOut, noise,1,index)
574 dataOut.data_spc = incoh_spectra
575 dataOut.data_cspc = incoh_cspectra
576 clean_num_aver = incoh_aver*len(jspc[:,0,0,0])
577 coh_num_aver = clean_coh_aver*len(jspc[:,0,0,0])
578 dataOut.clean_num_aver = clean_num_aver
579 dataOut.coh_num_aver = coh_num_aver
580 dataOut.tmp_spectra_i = incoh_spectra
581 dataOut.tmp_cspectra_i = incoh_cspectra
582 dataOut.tmp_spectra_c = clean_coh_spectra
583 dataOut.tmp_cspectra_c = clean_coh_cspectra
584 #List of possible combinations
585 listComb = itertools.combinations(numpy.arange(groupArray.shape[1]),2)
586 indCross = numpy.zeros(len(list(listComb)), dtype = 'int')
587 if getSNR:
588 listChannels = groupArray.reshape((groupArray.size))
589 listChannels.sort()
590 dataOut.data_SNR = self.__getSNR(dataOut.data_spc[listChannels,:,:], noise[listChannels])
591 else:
592 clean_num_aver = dataOut.clean_num_aver
593 coh_num_aver = dataOut.coh_num_aver
594 dataOut.data_spc = dataOut.tmp_spectra_i
595 dataOut.data_cspc = dataOut.tmp_cspectra_i
596 clean_coh_spectra = dataOut.tmp_spectra_c
597 clean_coh_cspectra = dataOut.tmp_cspectra_c
598 jspectra = dataOut.data_spc+clean_coh_spectra
599 nHeights = len(dataOut.heightList) # nhei
600 nProf = int(dataOut.nProfiles)
601 dataOut.nProfiles = nProf
602 dataOut.data_param = None
603 dataOut.data_paramC = None
604 dataOut.code = numpy.array([[-1.,-1.,1.],[1.,1.,-1.]])
605 #M=600
606 #N=200
607 dataOut.flagDecodeData=True
608 M = int(dataOut.normFactor)
609 N = int(dataOut.nFFTPoints)
610 dataOut.nFFTPoints = N
611 dataOut.nIncohInt= int(dataOut.nIncohInt)
612 dataOut.nProfiles = int(dataOut.nProfiles)
613 dataOut.nCohInt = int(dataOut.nCohInt)
614 print('sale',dataOut.nProfiles,dataOut.nHeights)
615 #dataOut.nFFTPoints=nprofs
616 #dataOut.normFactor = nprofs
617 dataOut.channelList = channelList
618 #dataOut.ippFactor=1
619 #ipp = ipp/150*1.e-3
620 vmax = (300000000/49920000.0/2) / (dataOut.ippSeconds)
621 #dataOut.ippSeconds=ipp
622 absc = vmax*( numpy.arange(nProf,dtype='float')-nProf/2.)/nProf
623 print('sale 2',dataOut.ippSeconds,M,N)
624 print('Empieza procesamiento offline')
625 if path != None:
626 sys.path.append(path)
627 self.library = importlib.import_module(file)
628 constants = self.library.setConstants(dataOut)
629 constants['M'] = M
630 dataOut.constants = constants
631
632 groupArray = numpy.array(groupList)
633 dataOut.groupList = groupArray
634 nGroups = groupArray.shape[0]
635 #List of possible combinations
636 listComb = itertools.combinations(numpy.arange(groupArray.shape[1]),2)
637 indCross = numpy.zeros(len(list(listComb)), dtype = 'int')
638 if dataOut.data_paramC is None:
639 dataOut.data_paramC = numpy.zeros((nGroups*4, nHeights,2))*numpy.nan
640 for i in range(nGroups):
641 coord = groupArray[i,:]
642 #Input data array
643 data = dataOut.data_spc[coord,:,:]/(M*N)
644 data = data.reshape((data.shape[0]*data.shape[1],data.shape[2]))
645
646 #Cross Spectra data array for Covariance Matrixes
647 ind = 0
648 for pairs in listComb:
649 pairsSel = numpy.array([coord[x],coord[y]])
650 indCross[ind] = int(numpy.where(numpy.all(pairsArray == pairsSel, axis = 1))[0][0])
651 ind += 1
652 dataCross = dataOut.data_cspc[indCross,:,:]/(M*N)
653 dataCross = dataCross**2
654 nhei = nHeights
655 poweri = numpy.sum(dataOut.data_spc[:,1:nProf-0,:],axis=1)/clean_num_aver[:,:]
656 if i == 0 : my_noises = numpy.zeros(4,dtype=float)
657 n0i = numpy.nanmin(poweri[0+i*2,0:nhei-0])/(nProf-1)
658 n1i = numpy.nanmin(poweri[1+i*2,0:nhei-0])/(nProf-1)
659 n0 = n0i
660 n1= n1i
661 my_noises[2*i+0] = n0
662 my_noises[2*i+1] = n1
663 snrth = -15.0 # -4 -16 -25
664 snrth = 10**(snrth/10.0)
665 jvelr = numpy.zeros(nHeights, dtype = 'float')
666 hvalid = [0]
667 coh2 = abs(dataOut.data_cspc[i,1:nProf,:])**2/(dataOut.data_spc[0+i*2,1:nProf-0,:]*dataOut.data_spc[1+i*2,1:nProf-0,:])
668 for h in range(nHeights):
669 smooth = clean_num_aver[i+1,h]
670 signalpn0 = (dataOut.data_spc[i*2,1:(nProf-0),h])/smooth
671 signalpn1 = (dataOut.data_spc[i*2+1,1:(nProf-0),h])/smooth
672 signal0 = signalpn0-n0
673 signal1 = signalpn1-n1
674 snr0 = numpy.sum(signal0/n0)/(nProf-1)
675 snr1 = numpy.sum(signal1/n1)/(nProf-1)
676 gamma = coh2[:,h]
677 indxs = (numpy.isfinite(list(gamma))==True).nonzero()
678 if len(indxs) >0:
679 if numpy.nanmean(gamma) > 0.07:
680 maxp0 = numpy.argmax(signal0*gamma)
681 maxp1 = numpy.argmax(signal1*gamma)
682 #print('usa gamma',numpy.nanmean(gamma))
683 else:
684 maxp0 = numpy.argmax(signal0)
685 maxp1 = numpy.argmax(signal1)
686 jvelr[h] = (absc[maxp0]+absc[maxp1])/2.
687 else: jvelr[h] = absc[0]
688 if snr0 > 0.1 and snr1 > 0.1: hvalid = numpy.concatenate((hvalid,h), axis=None)
689 #print(maxp0,absc[maxp0],snr0,jvelr[h])
690
691 if len(hvalid)> 1: fd0 = numpy.median(jvelr[hvalid[1:]])*-1
692 else: fd0 = numpy.nan
693 for h in range(nHeights):
694 d = data[:,h]
695 smooth = clean_num_aver[i+1,h] #dataOut.data_spc[:,1:nProf-0,:]
696 signalpn0 = (dataOut.data_spc[i*2,1:(nProf-0),h])/smooth
697 signalpn1 = (dataOut.data_spc[i*2+1,1:(nProf-0),h])/smooth
698 signal0 = signalpn0-n0
699 signal1 = signalpn1-n1
700 snr0 = numpy.sum(signal0/n0)/(nProf-1)
701 snr1 = numpy.sum(signal1/n1)/(nProf-1)
702 if snr0 > snrth and snr1 > snrth and clean_num_aver[i+1,h] > 0 :
703 #Covariance Matrix
704 D = numpy.diag(d**2)
705 ind = 0
706 for pairs in listComb:
707 #Coordinates in Covariance Matrix
708 x = pairs[0]
709 y = pairs[1]
710 #Channel Index
711 S12 = dataCross[ind,:,h]
712 D12 = numpy.diag(S12)
713 #Completing Covariance Matrix with Cross Spectras
714 D[x*N:(x+1)*N,y*N:(y+1)*N] = D12
715 D[y*N:(y+1)*N,x*N:(x+1)*N] = D12
716 ind += 1
717 diagD = numpy.zeros(256)
718
719 try:
720 Dinv=numpy.linalg.inv(D)
721 L=numpy.linalg.cholesky(Dinv)
722 except:
723 Dinv = D*numpy.nan
724 L= D*numpy.nan
725 LT=L.T
726
727 dp = numpy.dot(LT,d)
728 #Initial values
729 data_spc = dataOut.data_spc[coord,:,h]
730 w = data_spc/data_spc
731 if filec != None:
732 w = self.weightf.weightfit(w,tini.tm_year,tini.tm_yday,index,h,i)
733 if (h>6)and(error1[3]<25):
734 p0 = dataOut.data_param[i,:,h-1]
735 else:
736 p0 = numpy.array(self.library.initialValuesFunction(data_spc*w, constants))# sin el i(data_spc, constants, i)
737 p0[3] = fd0
738 if filec != None:
739 p0 = self.weightf.Vrfit(p0,tini.tm_year,tini.tm_yday,index,h,i)
740 try:
741 #Least Squares
742 minp,covp,infodict,mesg,ier = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants),full_output=True)
743 #minp,covp = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants))
744 #Chi square error
745 error0 = numpy.sum(infodict['fvec']**2)/(2*N)
746 #Error with Jacobian
747 error1 = self.library.errorFunction(minp,constants,LT)
748
749 except:
750 minp = p0*numpy.nan
751 error0 = numpy.nan
752 error1 = p0*numpy.nan
753 else :
754 data_spc = dataOut.data_spc[coord,:,h]
755 p0 = numpy.array(self.library.initialValuesFunction(data_spc, constants))
756 minp = p0*numpy.nan
757 error0 = numpy.nan
758 error1 = p0*numpy.nan
759 if dataOut.data_param is None:
760 dataOut.data_param = numpy.zeros((nGroups, p0.size, nHeights))*numpy.nan
761 dataOut.data_error = numpy.zeros((nGroups, p0.size + 1, nHeights))*numpy.nan
762
763 dataOut.data_error[i,:,h] = numpy.hstack((error0,error1))
764 dataOut.data_param[i,:,h] = minp
765 for ht in range(nHeights-1) :
766 smooth = coh_num_aver[i+1,ht] #datc[0,ht,0,beam]
767 dataOut.data_paramC[4*i,ht,1] = smooth
768 signalpn0 = (clean_coh_spectra[i*2 ,1:(nProf-0),ht])/smooth #coh_spectra
769 signalpn1 = (clean_coh_spectra[i*2+1,1:(nProf-0),ht])/smooth
770 val0 = (signalpn0 > 0).nonzero()
771 val0 = val0[0]
772 if len(val0) == 0 : val0_npoints = nProf
773 else : val0_npoints = len(val0)
774
775 val1 = (signalpn1 > 0).nonzero()
776 val1 = val1[0]
777 if len(val1) == 0 : val1_npoints = nProf
778 else : val1_npoints = len(val1)
779
780 dataOut.data_paramC[0+4*i,ht,0] = numpy.sum((signalpn0/val0_npoints))/n0
781 dataOut.data_paramC[1+4*i,ht,0] = numpy.sum((signalpn1/val1_npoints))/n1
782
783 signal0 = (signalpn0-n0)
784 vali = (signal0 < 0).nonzero()
785 vali = vali[0]
786 if len(vali) > 0 : signal0[vali] = 0
787 signal1 = (signalpn1-n1)
788 vali = (signal1 < 0).nonzero()
789 vali = vali[0]
790 if len(vali) > 0 : signal1[vali] = 0
791 snr0 = numpy.sum(signal0/n0)/(nProf-1)
792 snr1 = numpy.sum(signal1/n1)/(nProf-1)
793 doppler = absc[1:]
794 if snr0 >= snrth and snr1 >= snrth and smooth :
795 signalpn0_n0 = signalpn0
796 signalpn0_n0[val0] = signalpn0[val0] - n0
797 mom0 = self.moments(doppler,signalpn0-n0,nProf)
798 signalpn1_n1 = signalpn1
799 signalpn1_n1[val1] = signalpn1[val1] - n1
800 mom1 = self.moments(doppler,signalpn1_n1,nProf)
801 dataOut.data_paramC[2+4*i,ht,0] = (mom0[0]+mom1[0])/2.
802 dataOut.data_paramC[3+4*i,ht,0] = (mom0[1]+mom1[1])/2.
803
804 dataOut.data_spc = jspectra
805 dataOut.spc_noise = my_noises*nProf*M
806 if numpy.any(proc): dataOut.spc_noise = my_noises*nProf*M
807 if getSNR:
808 listChannels = groupArray.reshape((groupArray.size))
809 listChannels.sort()
810
811 dataOut.data_snr = self.__getSNR(dataOut.data_spc[listChannels,:,:], my_noises[listChannels])
812 return dataOut
813
814 def __residFunction(self, p, dp, LT, constants):
815
816 fm = self.library.modelFunction(p, constants)
817 fmp=numpy.dot(LT,fm)
818 return dp-fmp
819
820 def __getSNR(self, z, noise):
821
822 avg = numpy.average(z, axis=1)
823 SNR = (avg.T-noise)/noise
824 SNR = SNR.T
825 return SNR
826
827 def __chisq(self, p, chindex, hindex):
828 #similar to Resid but calculates CHI**2
829 [LT,d,fm]=setupLTdfm(p,chindex,hindex)
830 dp=numpy.dot(LT,d)
831 fmp=numpy.dot(LT,fm)
832 chisq=numpy.dot((dp-fmp).T,(dp-fmp))
833 return chisq
@@ -945,12 +945,12 class IncohInt(Operation):
945
945
946 class dopplerFlip(Operation):
946 class dopplerFlip(Operation):
947
947
948 def run(self, dataOut):
948 def run(self, dataOut, chann = None):
949 # arreglo 1: (num_chan, num_profiles, num_heights)
949 # arreglo 1: (num_chan, num_profiles, num_heights)
950 self.dataOut = dataOut
950 self.dataOut = dataOut
951 # JULIA-oblicua, indice 2
951 # JULIA-oblicua, indice 2
952 # arreglo 2: (num_profiles, num_heights)
952 # arreglo 2: (num_profiles, num_heights)
953 jspectra = self.dataOut.data_spc[2]
953 jspectra = self.dataOut.data_spc[chann]
954 jspectra_tmp = numpy.zeros(jspectra.shape)
954 jspectra_tmp = numpy.zeros(jspectra.shape)
955 num_profiles = jspectra.shape[0]
955 num_profiles = jspectra.shape[0]
956 freq_dc = int(num_profiles / 2)
956 freq_dc = int(num_profiles / 2)
@@ -961,6 +961,6 class dopplerFlip(Operation):
961 jspectra_tmp[freq_dc-1]= jspectra[freq_dc-1]
961 jspectra_tmp[freq_dc-1]= jspectra[freq_dc-1]
962 jspectra_tmp[freq_dc]= jspectra[freq_dc]
962 jspectra_tmp[freq_dc]= jspectra[freq_dc]
963 # canal modificado es re-escrito en el arreglo de canales
963 # canal modificado es re-escrito en el arreglo de canales
964 self.dataOut.data_spc[2] = jspectra_tmp
964 self.dataOut.data_spc[chann] = jspectra_tmp
965
965
966 return self.dataOut No newline at end of file
966 return self.dataOut
General Comments 0
You need to be logged in to leave comments. Login now