##// END OF EJS Templates
update jroproc_parameters, cambios JULIADriftsEstimation, referencia v3.0-devel IVAN
Alexander Valdez -
r1700:20a5087b4329
parent child
Show More
@@ -1,6223 +1,6238
1 1 import numpy
2 2 import math
3 3 from scipy import optimize, interpolate, signal, stats, ndimage
4 4 from scipy.fftpack import fft
5 5 import scipy
6 6 import re
7 7 import datetime
8 8 import copy
9 9 import sys
10 10 import importlib
11 11 import itertools
12 12 from multiprocessing import Pool, TimeoutError
13 13 from multiprocessing.pool import ThreadPool
14 14 import time
15
15 16 from scipy.optimize import fmin_l_bfgs_b #optimize with bounds on state papameters
16 17 from .jroproc_base import ProcessingUnit, Operation, MPDecorator
17 18 from schainpy.model.data.jrodata import Parameters, hildebrand_sekhon
18 19 from schainpy.model.data.jrodata import Spectra
19 20 from scipy import asarray as ar,exp
20 21 from scipy.optimize import fmin, curve_fit
21 22 from schainpy.utils import log
22 23 import warnings
23 24 from numpy import NaN
24 25 from scipy.optimize.optimize import OptimizeWarning
25 26 warnings.filterwarnings('ignore')
26 27
27 28
28 29 SPEED_OF_LIGHT = 299792458
29 30
30 31 '''solving pickling issue'''
31 32
32 33 def _pickle_method(method):
33 34 func_name = method.__func__.__name__
34 35 obj = method.__self__
35 36 cls = method.__self__.__class__
36 37 return _unpickle_method, (func_name, obj, cls)
37 38
38 39 def _unpickle_method(func_name, obj, cls):
39 40 for cls in cls.mro():
40 41 try:
41 42 func = cls.__dict__[func_name]
42 43 except KeyError:
43 44 pass
44 45 else:
45 46 break
46 47 return func.__get__(obj, cls)
47 48
48 49
49 50 class ParametersProc(ProcessingUnit):
50 51
51 52 METHODS = {}
52 53 nSeconds = None
53 54
54 55 def __init__(self):
55 56 ProcessingUnit.__init__(self)
56 57
57 58 self.buffer = None
58 59 self.firstdatatime = None
59 60 self.profIndex = 0
60 61 self.dataOut = Parameters()
61 62 self.setupReq = False #Agregar a todas las unidades de proc
62 63
63 64 def __updateObjFromInput(self):
64 65
65 66 self.dataOut.inputUnit = self.dataIn.type
66 67
67 68 self.dataOut.timeZone = self.dataIn.timeZone
68 69 self.dataOut.dstFlag = self.dataIn.dstFlag
69 70 self.dataOut.errorCount = self.dataIn.errorCount
70 71 self.dataOut.useLocalTime = self.dataIn.useLocalTime
71 72
72 73 self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy()
73 74 self.dataOut.systemHeaderObj = self.dataIn.systemHeaderObj.copy()
74 75 self.dataOut.channelList = self.dataIn.channelList
75 76 self.dataOut.heightList = self.dataIn.heightList
76 77 self.dataOut.dtype = numpy.dtype([('real','<f4'),('imag','<f4')])
77 78 # self.dataOut.nBaud = self.dataIn.nBaud
78 79 # self.dataOut.nCode = self.dataIn.nCode
79 80 # self.dataOut.code = self.dataIn.code
80 81 self.dataOut.flagDiscontinuousBlock = self.dataIn.flagDiscontinuousBlock
81 82 self.dataOut.utctime = self.dataIn.utctime
82 83 self.dataOut.flagDecodeData = self.dataIn.flagDecodeData #asumo q la data esta decodificada
83 84 self.dataOut.flagDeflipData = self.dataIn.flagDeflipData #asumo q la data esta sin flip
84 85 self.dataOut.nCohInt = self.dataIn.nCohInt
85 86 self.dataOut.timeInterval1 = self.dataIn.timeInterval
86 87 self.dataOut.heightList = self.dataIn.heightList
87 88 self.dataOut.frequency = self.dataIn.frequency
88 89 self.dataOut.runNextUnit = self.dataIn.runNextUnit
89 90
90 91 def run(self, runNextUnit=0):
91 92
92 93 self.dataIn.runNextUnit = runNextUnit
93 94 #---------------------- Voltage Data ---------------------------
94 95
95 96 if self.dataIn.type == "Voltage":
96 97
97 98 self.__updateObjFromInput()
98 99 self.dataOut.data_pre = self.dataIn.data.copy()
99 100 self.dataOut.flagNoData = False
100 101 self.dataOut.utctimeInit = self.dataIn.utctime
101 102 self.dataOut.paramInterval = self.dataIn.nProfiles*self.dataIn.nCohInt*self.dataIn.ippSeconds
102 103 if hasattr(self.dataIn, 'dataPP_POW'):
103 104 self.dataOut.dataPP_POW = self.dataIn.dataPP_POW
104 105
105 106 if hasattr(self.dataIn, 'dataPP_POWER'):
106 107 self.dataOut.dataPP_POWER = self.dataIn.dataPP_POWER
107 108
108 109 if hasattr(self.dataIn, 'dataPP_DOP'):
109 110 self.dataOut.dataPP_DOP = self.dataIn.dataPP_DOP
110 111
111 112 if hasattr(self.dataIn, 'dataPP_SNR'):
112 113 self.dataOut.dataPP_SNR = self.dataIn.dataPP_SNR
113 114
114 115 if hasattr(self.dataIn, 'dataPP_WIDTH'):
115 116 self.dataOut.dataPP_WIDTH = self.dataIn.dataPP_WIDTH
116 117 return
117 118
118 119 #---------------------- Spectra Data ---------------------------
119 120
120 121 if self.dataIn.type == "Spectra":
121 122
122 123 self.dataOut.data_pre = [self.dataIn.data_spc, self.dataIn.data_cspc]
123 124 self.dataOut.data_spc = self.dataIn.data_spc
124 125 self.dataOut.data_cspc = self.dataIn.data_cspc
125 126 self.dataOut.nProfiles = self.dataIn.nProfiles
126 127 self.dataOut.nIncohInt = self.dataIn.nIncohInt
127 128 self.dataOut.nFFTPoints = self.dataIn.nFFTPoints
128 129 self.dataOut.ippFactor = self.dataIn.ippFactor
129 130 self.dataOut.abscissaList = self.dataIn.getVelRange(1)
130 131 self.dataOut.spc_noise = self.dataIn.getNoise()
131 132 self.dataOut.spc_range = (self.dataIn.getFreqRange(1) , self.dataIn.getAcfRange(1) , self.dataIn.getVelRange(1))
132 133 # self.dataOut.normFactor = self.dataIn.normFactor
133 134 self.dataOut.pairsList = self.dataIn.pairsList
134 135 self.dataOut.groupList = self.dataIn.pairsList
135 136 self.dataOut.flagNoData = False
136 137
137 138 if hasattr(self.dataIn, 'ChanDist'): #Distances of receiver channels
138 139 self.dataOut.ChanDist = self.dataIn.ChanDist
139 140 else: self.dataOut.ChanDist = None
140 141
141 142 #if hasattr(self.dataIn, 'VelRange'): #Velocities range
142 143 # self.dataOut.VelRange = self.dataIn.VelRange
143 144 #else: self.dataOut.VelRange = None
144 145
145 146 if hasattr(self.dataIn, 'RadarConst'): #Radar Constant
146 147 self.dataOut.RadarConst = self.dataIn.RadarConst
147 148
148 149 if hasattr(self.dataIn, 'NPW'): #NPW
149 150 self.dataOut.NPW = self.dataIn.NPW
150 151
151 152 if hasattr(self.dataIn, 'COFA'): #COFA
152 153 self.dataOut.COFA = self.dataIn.COFA
153 154
154 155
155 156
156 157 #---------------------- Correlation Data ---------------------------
157 158
158 159 if self.dataIn.type == "Correlation":
159 160 acf_ind, ccf_ind, acf_pairs, ccf_pairs, data_acf, data_ccf = self.dataIn.splitFunctions()
160 161
161 162 self.dataOut.data_pre = (self.dataIn.data_cf[acf_ind,:], self.dataIn.data_cf[ccf_ind,:,:])
162 163 self.dataOut.normFactor = (self.dataIn.normFactor[acf_ind,:], self.dataIn.normFactor[ccf_ind,:])
163 164 self.dataOut.groupList = (acf_pairs, ccf_pairs)
164 165
165 166 self.dataOut.abscissaList = self.dataIn.lagRange
166 167 self.dataOut.noise = self.dataIn.noise
167 168 self.dataOut.data_snr = self.dataIn.SNR
168 169 self.dataOut.flagNoData = False
169 170 self.dataOut.nAvg = self.dataIn.nAvg
170 171
171 172 #---------------------- Parameters Data ---------------------------
172 173
173 174 if self.dataIn.type == "Parameters":
174 175 self.dataOut.copy(self.dataIn)
175 176 self.dataOut.flagNoData = False
176 177
177 178 return True
178 179
179 180 self.__updateObjFromInput()
180 181 self.dataOut.utctimeInit = self.dataIn.utctime
181 182 self.dataOut.paramInterval = self.dataIn.timeInterval
182 183
183 184 return
184 185
185 186
186 187 def target(tups):
187 188
188 189 obj, args = tups
189 190
190 191 return obj.FitGau(args)
191 192
192 193 class RemoveWideGC(Operation):
193 194 ''' This class remove the wide clutter and replace it with a simple interpolation points
194 195 This mainly applies to CLAIRE radar
195 196
196 197 ClutterWidth : Width to look for the clutter peak
197 198
198 199 Input:
199 200
200 201 self.dataOut.data_pre : SPC and CSPC
201 202 self.dataOut.spc_range : To select wind and rainfall velocities
202 203
203 204 Affected:
204 205
205 206 self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
206 207
207 208 Written by D. ScipiΓ³n 25.02.2021
208 209 '''
209 210 def __init__(self):
210 211 Operation.__init__(self)
211 212 self.i = 0
212 213 self.ich = 0
213 214 self.ir = 0
214 215
215 216 def run(self, dataOut, ClutterWidth=2.5):
216 217
217 218 self.spc = dataOut.data_pre[0].copy()
218 219 self.spc_out = dataOut.data_pre[0].copy()
219 220 self.Num_Chn = self.spc.shape[0]
220 221 self.Num_Hei = self.spc.shape[2]
221 222 VelRange = dataOut.spc_range[2][:-1]
222 223 dv = VelRange[1]-VelRange[0]
223 224
224 225 # Find the velocities that corresponds to zero
225 226 gc_values = numpy.squeeze(numpy.where(numpy.abs(VelRange) <= ClutterWidth))
226 227
227 228 # Removing novalid data from the spectra
228 229 for ich in range(self.Num_Chn) :
229 230 for ir in range(self.Num_Hei) :
230 231 # Estimate the noise at each range
231 232 HSn = hildebrand_sekhon(self.spc[ich,:,ir],dataOut.nIncohInt)
232 233
233 234 # Removing the noise floor at each range
234 235 novalid = numpy.where(self.spc[ich,:,ir] < HSn)
235 236 self.spc[ich,novalid,ir] = HSn
236 237
237 238 junk = numpy.append(numpy.insert(numpy.squeeze(self.spc[ich,gc_values,ir]),0,HSn),HSn)
238 239 j1index = numpy.squeeze(numpy.where(numpy.diff(junk)>0))
239 240 j2index = numpy.squeeze(numpy.where(numpy.diff(junk)<0))
240 241 if ((numpy.size(j1index)<=1) | (numpy.size(j2index)<=1)) :
241 242 continue
242 243 junk3 = numpy.squeeze(numpy.diff(j1index))
243 244 junk4 = numpy.squeeze(numpy.diff(j2index))
244 245
245 246 valleyindex = j2index[numpy.where(junk4>1)]
246 247 peakindex = j1index[numpy.where(junk3>1)]
247 248
248 249 isvalid = numpy.squeeze(numpy.where(numpy.abs(VelRange[gc_values[peakindex]]) <= 2.5*dv))
249 250 if numpy.size(isvalid) == 0 :
250 251 continue
251 252 if numpy.size(isvalid) >1 :
252 253 vindex = numpy.argmax(self.spc[ich,gc_values[peakindex[isvalid]],ir])
253 254 isvalid = isvalid[vindex]
254 255
255 256 # clutter peak
256 257 gcpeak = peakindex[isvalid]
257 258 vl = numpy.where(valleyindex < gcpeak)
258 259 if numpy.size(vl) == 0:
259 260 continue
260 261 gcvl = valleyindex[vl[0][-1]]
261 262 vr = numpy.where(valleyindex > gcpeak)
262 263 if numpy.size(vr) == 0:
263 264 continue
264 265 gcvr = valleyindex[vr[0][0]]
265 266
266 267 # Removing the clutter
267 268 interpindex = numpy.array([gc_values[gcvl], gc_values[gcvr]])
268 269 gcindex = gc_values[gcvl+1:gcvr-1]
269 270 self.spc_out[ich,gcindex,ir] = numpy.interp(VelRange[gcindex],VelRange[interpindex],self.spc[ich,interpindex,ir])
270 271
271 272 dataOut.data_pre[0] = self.spc_out
272 273
273 274 return dataOut
274 275
275 276 class SpectralFilters(Operation):
276 277 ''' This class allows to replace the novalid values with noise for each channel
277 278 This applies to CLAIRE RADAR
278 279
279 280 PositiveLimit : RightLimit of novalid data
280 281 NegativeLimit : LeftLimit of novalid data
281 282
282 283 Input:
283 284
284 285 self.dataOut.data_pre : SPC and CSPC
285 286 self.dataOut.spc_range : To select wind and rainfall velocities
286 287
287 288 Affected:
288 289
289 290 self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
290 291
291 292 Written by D. ScipiΓ³n 29.01.2021
292 293 '''
293 294 def __init__(self):
294 295 Operation.__init__(self)
295 296 self.i = 0
296 297
297 298 def run(self, dataOut, ):
298 299
299 300 self.spc = dataOut.data_pre[0].copy()
300 301 self.Num_Chn = self.spc.shape[0]
301 302 VelRange = dataOut.spc_range[2]
302 303
303 304 # novalid corresponds to data within the Negative and PositiveLimit
304 305
305 306
306 307 # Removing novalid data from the spectra
307 308 for i in range(self.Num_Chn):
308 309 self.spc[i,novalid,:] = dataOut.noise[i]
309 310 dataOut.data_pre[0] = self.spc
310 311 return dataOut
311 312
312 313
313 314 class GaussianFit(Operation):
314 315
315 316 '''
316 317 Function that fit of one and two generalized gaussians (gg) based
317 318 on the PSD shape across an "power band" identified from a cumsum of
318 319 the measured spectrum - noise.
319 320
320 321 Input:
321 322 self.dataOut.data_pre : SelfSpectra
322 323
323 324 Output:
324 325 self.dataOut.SPCparam : SPC_ch1, SPC_ch2
325 326
326 327 '''
327 328 def __init__(self):
328 329 Operation.__init__(self)
329 330 self.i=0
330 331
331
332 # def run(self, dataOut, num_intg=7, pnoise=1., SNRlimit=-9): #num_intg: Incoherent integrations, pnoise: Noise, vel_arr: range of velocities, similar to the ftt points
333 332 def run(self, dataOut, SNRdBlimit=-9, method='generalized'):
334 333 """This routine will find a couple of generalized Gaussians to a power spectrum
335 334 methods: generalized, squared
336 335 input: spc
337 336 output:
338 337 noise, amplitude0,shift0,width0,p0,Amplitude1,shift1,width1,p1
339 338 """
340 339 print ('Entering ',method,' double Gaussian fit')
341 340 self.spc = dataOut.data_pre[0].copy()
342 341 self.Num_Hei = self.spc.shape[2]
343 342 self.Num_Bin = self.spc.shape[1]
344 343 self.Num_Chn = self.spc.shape[0]
345 344
346 345 start_time = time.time()
347 346
348 347 pool = Pool(processes=self.Num_Chn)
349 348 args = [(dataOut.spc_range[2], ich, dataOut.spc_noise[ich], dataOut.nIncohInt, SNRdBlimit) for ich in range(self.Num_Chn)]
350 349 objs = [self for __ in range(self.Num_Chn)]
351 350 attrs = list(zip(objs, args))
352 351 DGauFitParam = pool.map(target, attrs)
353 352 # Parameters:
354 353 # 0. Noise, 1. Amplitude, 2. Shift, 3. Width 4. Power
355 354 dataOut.DGauFitParams = numpy.asarray(DGauFitParam)
356 355
357 356 # Double Gaussian Curves
358 357 gau0 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
359 358 gau0[:] = numpy.NaN
360 359 gau1 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
361 360 gau1[:] = numpy.NaN
362 361 x_mtr = numpy.transpose(numpy.tile(dataOut.getVelRange(1)[:-1], (self.Num_Hei,1)))
363 362 for iCh in range(self.Num_Chn):
364 363 N0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,0]] * self.Num_Bin))
365 364 N1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,1]] * self.Num_Bin))
366 365 A0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,0]] * self.Num_Bin))
367 366 A1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,1]] * self.Num_Bin))
368 367 v0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,0]] * self.Num_Bin))
369 368 v1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,1]] * self.Num_Bin))
370 369 s0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,0]] * self.Num_Bin))
371 370 s1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,1]] * self.Num_Bin))
372 371 if method == 'generalized':
373 372 p0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,0]] * self.Num_Bin))
374 373 p1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,1]] * self.Num_Bin))
375 374 elif method == 'squared':
376 375 p0 = 2.
377 376 p1 = 2.
378 377 gau0[iCh] = A0*numpy.exp(-0.5*numpy.abs((x_mtr-v0)/s0)**p0)+N0
379 378 gau1[iCh] = A1*numpy.exp(-0.5*numpy.abs((x_mtr-v1)/s1)**p1)+N1
380 379 dataOut.GaussFit0 = gau0
381 380 dataOut.GaussFit1 = gau1
382 381
383 382 print('Leaving ',method ,' double Gaussian fit')
384 383 return dataOut
385 384
386 385 def FitGau(self, X):
387 386 # print('Entering FitGau')
388 387 # Assigning the variables
389 388 Vrange, ch, wnoise, num_intg, SNRlimit = X
390 389 # Noise Limits
391 390 noisebl = wnoise * 0.9
392 391 noisebh = wnoise * 1.1
393 392 # Radar Velocity
394 393 Va = max(Vrange)
395 394 deltav = Vrange[1] - Vrange[0]
396 395 x = numpy.arange(self.Num_Bin)
397 396
398 397 # print ('stop 0')
399 398
400 399 # 5 parameters, 2 Gaussians
401 400 DGauFitParam = numpy.zeros([5, self.Num_Hei,2])
402 401 DGauFitParam[:] = numpy.NaN
403 402
404 403 # SPCparam = []
405 404 # SPC_ch1 = numpy.zeros([self.Num_Bin,self.Num_Hei])
406 405 # SPC_ch2 = numpy.zeros([self.Num_Bin,self.Num_Hei])
407 406 # SPC_ch1[:] = 0 #numpy.NaN
408 407 # SPC_ch2[:] = 0 #numpy.NaN
409 408 # print ('stop 1')
410 409 for ht in range(self.Num_Hei):
411 410 # print (ht)
412 411 # print ('stop 2')
413 412 # Spectra at each range
414 413 spc = numpy.asarray(self.spc)[ch,:,ht]
415 414 snr = ( spc.mean() - wnoise ) / wnoise
416 415 snrdB = 10.*numpy.log10(snr)
417 416
418 417 #print ('stop 3')
419 418 if snrdB < SNRlimit :
420 419 # snr = numpy.NaN
421 420 # SPC_ch1[:,ht] = 0#numpy.NaN
422 421 # SPC_ch1[:,ht] = 0#numpy.NaN
423 422 # SPCparam = (SPC_ch1,SPC_ch2)
424 423 # print ('SNR less than SNRth')
425 424 continue
426 425 # wnoise = hildebrand_sekhon(spc,num_intg)
427 426 # print ('stop 2.01')
428 427 #############################################
429 428 # normalizing spc and noise
430 429 # This part differs from gg1
431 430 # spc_norm_max = max(spc) #commented by D. ScipiΓ³n 19.03.2021
432 431 #spc = spc / spc_norm_max
433 432 # pnoise = pnoise #/ spc_norm_max #commented by D. ScipiΓ³n 19.03.2021
434 433 #############################################
435 434
436 435 # print ('stop 2.1')
437 436 fatspectra=1.0
438 437 # noise per channel.... we might want to use the noise at each range
439 438
440 439 # wnoise = noise_ #/ spc_norm_max #commented by D. ScipiΓ³n 19.03.2021
441 440 #wnoise,stdv,i_max,index =enoise(spc,num_intg) #noise estimate using Hildebrand Sekhon, only wnoise is used
442 441 #if wnoise>1.1*pnoise: # to be tested later
443 442 # wnoise=pnoise
444 443 # noisebl = wnoise*0.9
445 444 # noisebh = wnoise*1.1
446 445 spc = spc - wnoise # signal
447 446
448 447 # print ('stop 2.2')
449 448 minx = numpy.argmin(spc)
450 449 #spcs=spc.copy()
451 450 spcs = numpy.roll(spc,-minx)
452 451 cum = numpy.cumsum(spcs)
453 452 # tot_noise = wnoise * self.Num_Bin #64;
454 453
455 454 # print ('stop 2.3')
456 455 # snr = sum(spcs) / tot_noise
457 456 # snrdB = 10.*numpy.log10(snr)
458 457 #print ('stop 3')
459 458 # if snrdB < SNRlimit :
460 459 # snr = numpy.NaN
461 460 # SPC_ch1[:,ht] = 0#numpy.NaN
462 461 # SPC_ch1[:,ht] = 0#numpy.NaN
463 462 # SPCparam = (SPC_ch1,SPC_ch2)
464 463 # print ('SNR less than SNRth')
465 464 # continue
466 465
467 466
468 467 #if snrdB<-18 or numpy.isnan(snrdB) or num_intg<4:
469 468 # return [None,]*4,[None,]*4,None,snrdB,None,None,[None,]*5,[None,]*9,None
470 469 # print ('stop 4')
471 470 cummax = max(cum)
472 471 epsi = 0.08 * fatspectra # cumsum to narrow down the energy region
473 472 cumlo = cummax * epsi
474 473 cumhi = cummax * (1-epsi)
475 474 powerindex = numpy.array(numpy.where(numpy.logical_and(cum>cumlo, cum<cumhi))[0])
476 475
477 476 # print ('stop 5')
478 477 if len(powerindex) < 1:# case for powerindex 0
479 478 # print ('powerindex < 1')
480 479 continue
481 480 powerlo = powerindex[0]
482 481 powerhi = powerindex[-1]
483 482 powerwidth = powerhi-powerlo
484 483 if powerwidth <= 1:
485 484 # print('powerwidth <= 1')
486 485 continue
487 486
488 487 # print ('stop 6')
489 488 firstpeak = powerlo + powerwidth/10.# first gaussian energy location
490 489 secondpeak = powerhi - powerwidth/10. #second gaussian energy location
491 490 midpeak = (firstpeak + secondpeak)/2.
492 491 firstamp = spcs[int(firstpeak)]
493 492 secondamp = spcs[int(secondpeak)]
494 493 midamp = spcs[int(midpeak)]
495 494
496 495 y_data = spc + wnoise
497 496
498 497 ''' single Gaussian '''
499 498 shift0 = numpy.mod(midpeak+minx, self.Num_Bin )
500 499 width0 = powerwidth/4.#Initialization entire power of spectrum divided by 4
501 500 power0 = 2.
502 501 amplitude0 = midamp
503 502 state0 = [shift0,width0,amplitude0,power0,wnoise]
504 503 bnds = ((0,self.Num_Bin-1),(1,powerwidth),(0,None),(0.5,3.),(noisebl,noisebh))
505 504 lsq1 = fmin_l_bfgs_b(self.misfit1, state0, args=(y_data,x,num_intg), bounds=bnds, approx_grad=True)
506 505 # print ('stop 7.1')
507 506 # print (bnds)
508 507
509 508 chiSq1=lsq1[1]
510 509
511 510 # print ('stop 8')
512 511 if fatspectra<1.0 and powerwidth<4:
513 512 choice=0
514 513 Amplitude0=lsq1[0][2]
515 514 shift0=lsq1[0][0]
516 515 width0=lsq1[0][1]
517 516 p0=lsq1[0][3]
518 517 Amplitude1=0.
519 518 shift1=0.
520 519 width1=0.
521 520 p1=0.
522 521 noise=lsq1[0][4]
523 522 #return (numpy.array([shift0,width0,Amplitude0,p0]),
524 523 # numpy.array([shift1,width1,Amplitude1,p1]),noise,snrdB,chiSq1,6.,sigmas1,[None,]*9,choice)
525 524 # print ('stop 9')
526 525 ''' two Gaussians '''
527 526 #shift0=numpy.mod(firstpeak+minx,64); shift1=numpy.mod(secondpeak+minx,64)
528 527 shift0 = numpy.mod(firstpeak+minx, self.Num_Bin )
529 528 shift1 = numpy.mod(secondpeak+minx, self.Num_Bin )
530 529 width0 = powerwidth/6.
531 530 width1 = width0
532 531 power0 = 2.
533 532 power1 = power0
534 533 amplitude0 = firstamp
535 534 amplitude1 = secondamp
536 535 state0 = [shift0,width0,amplitude0,power0,shift1,width1,amplitude1,power1,wnoise]
537 536 #bnds=((0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
538 537 bnds=((0,self.Num_Bin-1),(1,powerwidth/2.),(0,None),(0.5,3.),(0,self.Num_Bin-1),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
539 538 #bnds=(( 0,(self.Num_Bin-1) ),(1,powerwidth/2.),(0,None),(0.5,3.),( 0,(self.Num_Bin-1)),(1,powerwidth/2.),(0,None),(0.5,3.),(0.1,0.5))
540 539
541 540 # print ('stop 10')
542 541 lsq2 = fmin_l_bfgs_b( self.misfit2 , state0 , args=(y_data,x,num_intg) , bounds=bnds , approx_grad=True )
543 542
544 543 # print ('stop 11')
545 544 chiSq2 = lsq2[1]
546 545
547 546 # print ('stop 12')
548 547
549 548 oneG = (chiSq1<5 and chiSq1/chiSq2<2.0) and (abs(lsq2[0][0]-lsq2[0][4])<(lsq2[0][1]+lsq2[0][5])/3. or abs(lsq2[0][0]-lsq2[0][4])<10)
550 549
551 550 # print ('stop 13')
552 551 if snrdB>-12: # when SNR is strong pick the peak with least shift (LOS velocity) error
553 552 if oneG:
554 553 choice = 0
555 554 else:
556 555 w1 = lsq2[0][1]; w2 = lsq2[0][5]
557 556 a1 = lsq2[0][2]; a2 = lsq2[0][6]
558 557 p1 = lsq2[0][3]; p2 = lsq2[0][7]
559 558 s1 = (2**(1+1./p1))*scipy.special.gamma(1./p1)/p1
560 559 s2 = (2**(1+1./p2))*scipy.special.gamma(1./p2)/p2
561 560 gp1 = a1*w1*s1; gp2 = a2*w2*s2 # power content of each ggaussian with proper p scaling
562 561
563 562 if gp1>gp2:
564 563 if a1>0.7*a2:
565 564 choice = 1
566 565 else:
567 566 choice = 2
568 567 elif gp2>gp1:
569 568 if a2>0.7*a1:
570 569 choice = 2
571 570 else:
572 571 choice = 1
573 572 else:
574 573 choice = numpy.argmax([a1,a2])+1
575 574 #else:
576 575 #choice=argmin([std2a,std2b])+1
577 576
578 577 else: # with low SNR go to the most energetic peak
579 578 choice = numpy.argmax([lsq1[0][2]*lsq1[0][1],lsq2[0][2]*lsq2[0][1],lsq2[0][6]*lsq2[0][5]])
580 579
581 580 # print ('stop 14')
582 581 shift0 = lsq2[0][0]
583 582 vel0 = Vrange[0] + shift0 * deltav
584 583 shift1 = lsq2[0][4]
585 584 # vel1=Vrange[0] + shift1 * deltav
586 585
587 586 # max_vel = 1.0
588 587 # Va = max(Vrange)
589 588 # deltav = Vrange[1]-Vrange[0]
590 589 # print ('stop 15')
591 590 #first peak will be 0, second peak will be 1
592 591 # if vel0 > -1.0 and vel0 < max_vel : #first peak is in the correct range # Commented by D.ScipiΓ³n 19.03.2021
593 592 if vel0 > -Va and vel0 < Va : #first peak is in the correct range
594 593 shift0 = lsq2[0][0]
595 594 width0 = lsq2[0][1]
596 595 Amplitude0 = lsq2[0][2]
597 596 p0 = lsq2[0][3]
598 597
599 598 shift1 = lsq2[0][4]
600 599 width1 = lsq2[0][5]
601 600 Amplitude1 = lsq2[0][6]
602 601 p1 = lsq2[0][7]
603 602 noise = lsq2[0][8]
604 603 else:
605 604 shift1 = lsq2[0][0]
606 605 width1 = lsq2[0][1]
607 606 Amplitude1 = lsq2[0][2]
608 607 p1 = lsq2[0][3]
609 608
610 609 shift0 = lsq2[0][4]
611 610 width0 = lsq2[0][5]
612 611 Amplitude0 = lsq2[0][6]
613 612 p0 = lsq2[0][7]
614 613 noise = lsq2[0][8]
615 614
616 615 if Amplitude0<0.05: # in case the peak is noise
617 616 shift0,width0,Amplitude0,p0 = 4*[numpy.NaN]
618 617 if Amplitude1<0.05:
619 618 shift1,width1,Amplitude1,p1 = 4*[numpy.NaN]
620 619
621 620 # print ('stop 16 ')
622 621 # SPC_ch1[:,ht] = noise + Amplitude0*numpy.exp(-0.5*(abs(x-shift0)/width0)**p0)
623 622 # SPC_ch2[:,ht] = noise + Amplitude1*numpy.exp(-0.5*(abs(x-shift1)/width1)**p1)
624 623 # SPCparam = (SPC_ch1,SPC_ch2)
625 624
626 625 DGauFitParam[0,ht,0] = noise
627 626 DGauFitParam[0,ht,1] = noise
628 627 DGauFitParam[1,ht,0] = Amplitude0
629 628 DGauFitParam[1,ht,1] = Amplitude1
630 629 DGauFitParam[2,ht,0] = Vrange[0] + shift0 * deltav
631 630 DGauFitParam[2,ht,1] = Vrange[0] + shift1 * deltav
632 631 DGauFitParam[3,ht,0] = width0 * deltav
633 632 DGauFitParam[3,ht,1] = width1 * deltav
634 633 DGauFitParam[4,ht,0] = p0
635 634 DGauFitParam[4,ht,1] = p1
636 635
637 636 return DGauFitParam
638 637
639 638 def y_model1(self,x,state):
640 639 shift0, width0, amplitude0, power0, noise = state
641 640 model0 = amplitude0*numpy.exp(-0.5*abs((x - shift0)/width0)**power0)
642 641 model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0)
643 642 model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0)
644 643 return model0 + model0u + model0d + noise
645 644
646 645 def y_model2(self,x,state): #Equation for two generalized Gaussians with Nyquist
647 646 shift0, width0, amplitude0, power0, shift1, width1, amplitude1, power1, noise = state
648 647 model0 = amplitude0*numpy.exp(-0.5*abs((x-shift0)/width0)**power0)
649 648 model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0)
650 649 model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0)
651 650
652 651 model1 = amplitude1*numpy.exp(-0.5*abs((x - shift1)/width1)**power1)
653 652 model1u = amplitude1*numpy.exp(-0.5*abs((x - shift1 - self.Num_Bin)/width1)**power1)
654 653 model1d = amplitude1*numpy.exp(-0.5*abs((x - shift1 + self.Num_Bin)/width1)**power1)
655 654 return model0 + model0u + model0d + model1 + model1u + model1d + noise
656 655
657 656 def misfit1(self,state,y_data,x,num_intg): # This function compares how close real data is with the model data, the close it is, the better it is.
658 657
659 658 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model1(x,state)))**2)#/(64-5.) # /(64-5.) can be commented
660 659
661 660 def misfit2(self,state,y_data,x,num_intg):
662 661 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model2(x,state)))**2)#/(64-9.)
663 662
664 663 class Oblique_Gauss_Fit(Operation):
665 664 '''
666 665 Written by R. Flores
667 666 '''
668 667 def __init__(self):
669 668 Operation.__init__(self)
670 669
671 670 def Gauss_fit(self,spc,x,nGauss):
672 671
673 672
674 673 def gaussian(x, a, b, c, d):
675 674 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
676 675 return val
677 676
678 677 if nGauss == 'first':
679 678 spc_1_aux = numpy.copy(spc[:numpy.argmax(spc)+1])
680 679 spc_2_aux = numpy.flip(spc_1_aux)
681 680 spc_3_aux = numpy.concatenate((spc_1_aux,spc_2_aux[1:]))
682 681
683 682 len_dif = len(x)-len(spc_3_aux)
684 683
685 684 spc_zeros = numpy.ones(len_dif)*spc_1_aux[0]
686 685
687 686 spc_new = numpy.concatenate((spc_3_aux,spc_zeros))
688 687
689 688 y = spc_new
690 689
691 690 elif nGauss == 'second':
692 691 y = spc
693 692
694 693
695 694 # estimate starting values from the data
696 695 a = y.max()
697 696 b = x[numpy.argmax(y)]
698 697 if nGauss == 'first':
699 698 c = 1.#b#b#numpy.std(spc)
700 699 elif nGauss == 'second':
701 700 c = b
702 701 else:
703 702 print("ERROR")
704 703
705 704 d = numpy.mean(y[-100:])
706 705
707 706 # define a least squares function to optimize
708 707 def minfunc(params):
709 708 return sum((y-gaussian(x,params[0],params[1],params[2],params[3]))**2)
710 709
711 710 # fit
712 711 popt = fmin(minfunc,[a,b,c,d],disp=False)
713 712 #popt,fopt,niter,funcalls = fmin(minfunc,[a,b,c,d])
714 713
715 714
716 715 return gaussian(x, popt[0], popt[1], popt[2], popt[3]), popt[0], popt[1], popt[2], popt[3]
717 716
717
718 718 def Gauss_fit_2(self,spc,x,nGauss):
719 719
720 720
721 721 def gaussian(x, a, b, c, d):
722 722 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
723 723 return val
724 724
725 725 if nGauss == 'first':
726 726 spc_1_aux = numpy.copy(spc[:numpy.argmax(spc)+1])
727 727 spc_2_aux = numpy.flip(spc_1_aux)
728 728 spc_3_aux = numpy.concatenate((spc_1_aux,spc_2_aux[1:]))
729 729
730 730 len_dif = len(x)-len(spc_3_aux)
731 731
732 732 spc_zeros = numpy.ones(len_dif)*spc_1_aux[0]
733 733
734 734 spc_new = numpy.concatenate((spc_3_aux,spc_zeros))
735 735
736 736 y = spc_new
737 737
738 738 elif nGauss == 'second':
739 739 y = spc
740 740
741 741
742 742 # estimate starting values from the data
743 743 a = y.max()
744 744 b = x[numpy.argmax(y)]
745 745 if nGauss == 'first':
746 746 c = 1.#b#b#numpy.std(spc)
747 747 elif nGauss == 'second':
748 748 c = b
749 749 else:
750 750 print("ERROR")
751 751
752 752 d = numpy.mean(y[-100:])
753 753 popt,pcov = curve_fit(gaussian,x,y,p0=[a,b,c,d])
754 754 return gaussian(x, popt[0], popt[1], popt[2], popt[3]),popt[0], popt[1], popt[2], popt[3]
755 755
756 756 def Double_Gauss_fit(self,spc,x,A1,B1,C1,A2,B2,C2,D):
757 757
758 758 def double_gaussian(x, a1, b1, c1, a2, b2, c2, d):
759 759 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
760 760 return val
761 761
762 762
763 763 y = spc
764 764
765 765 # estimate starting values from the data
766 766 a1 = A1
767 767 b1 = B1
768 768 c1 = C1#numpy.std(spc)
769 769
770 770 a2 = A2#y.max()
771 771 b2 = B2#x[numpy.argmax(y)]
772 772 c2 = C2#numpy.std(spc)
773 773 d = D
774 774
775 775 # define a least squares function to optimize
776 776 def minfunc(params):
777 777 return sum((y-double_gaussian(x,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2)
778 778
779 779 # fit
780 780 popt = fmin(minfunc,[a1,b1,c1,a2,b2,c2,d],disp=False)
781 781
782 782 return double_gaussian(x, popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
783 783
784 784 def Double_Gauss_fit_2(self,spc,x,A1,B1,C1,A2,B2,C2,D):
785 785
786 786 def double_gaussian(x, a1, b1, c1, a2, b2, c2, d):
787 787 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
788 788 return val
789 789
790 790
791 791 y = spc
792 792
793 793 # estimate starting values from the data
794 794 a1 = A1
795 795 b1 = B1
796 796 c1 = C1#numpy.std(spc)
797 797
798 798 a2 = A2#y.max()
799 799 b2 = B2#x[numpy.argmax(y)]
800 800 c2 = C2#numpy.std(spc)
801 801 d = D
802 802
803 803 # fit
804 804 popt,pcov = curve_fit(double_gaussian,x,y,p0=[a1,b1,c1,a2,b2,c2,d])
805 805 error = numpy.sqrt(numpy.diag(pcov))
806 806
807 807 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6], error[0], error[1], error[2], error[3], error[4], error[5], error[6]
808 808
809 809 def windowing_double(self,spc,x,A1,B1,C1,A2,B2,C2,D):
810 810 from scipy.optimize import curve_fit,fmin
811 811
812 812 def R_gaussian(x, a, b, c):
813 813 N = int(numpy.shape(x)[0])
814 814 val = a * numpy.exp(-((x)*c*2*2*numpy.pi)**2 / (2))* numpy.exp(1.j*b*x*4*numpy.pi)
815 815 return val
816 816
817 817 def T(x,N):
818 818 T = 1-abs(x)/N
819 819 return T
820 820
821 821 def R_T_spc_fun(x, a1, b1, c1, a2, b2, c2, d):
822 822
823 823 N = int(numpy.shape(x)[0])
824 824
825 825 x_max = x[-1]
826 826
827 827 x_pos = x[1600:]
828 828 x_neg = x[:1600]
829 829
830 830 R_T_neg_1 = R_gaussian(x, a1, b1, c1)[:1600]*T(x_neg,-x[0])
831 831 R_T_pos_1 = R_gaussian(x, a1, b1, c1)[1600:]*T(x_pos,x[-1])
832 832 R_T_sum_1 = R_T_pos_1 + R_T_neg_1
833 833 R_T_spc_1 = numpy.fft.fft(R_T_sum_1).real
834 834 R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1)
835 835 max_val_1 = numpy.max(R_T_spc_1)
836 836 R_T_spc_1 = R_T_spc_1*a1/max_val_1
837 837
838 838 R_T_neg_2 = R_gaussian(x, a2, b2, c2)[:1600]*T(x_neg,-x[0])
839 839 R_T_pos_2 = R_gaussian(x, a2, b2, c2)[1600:]*T(x_pos,x[-1])
840 840 R_T_sum_2 = R_T_pos_2 + R_T_neg_2
841 841 R_T_spc_2 = numpy.fft.fft(R_T_sum_2).real
842 842 R_T_spc_2 = numpy.fft.fftshift(R_T_spc_2)
843 843 max_val_2 = numpy.max(R_T_spc_2)
844 844 R_T_spc_2 = R_T_spc_2*a2/max_val_2
845 845
846 846 R_T_d = d*numpy.fft.fftshift(signal.unit_impulse(N))
847 847 R_T_d_neg = R_T_d[:1600]*T(x_neg,-x[0])
848 848 R_T_d_pos = R_T_d[1600:]*T(x_pos,x[-1])
849 849 R_T_d_sum = R_T_d_pos + R_T_d_neg
850 850 R_T_spc_3 = numpy.fft.fft(R_T_d_sum).real
851 851 R_T_spc_3 = numpy.fft.fftshift(R_T_spc_3)
852 852
853 853 R_T_final = R_T_spc_1 + R_T_spc_2 + R_T_spc_3
854 854
855 855 return R_T_final
856 856
857 857 y = spc#gaussian(x, a, meanY, sigmaY) + a*0.1*numpy.random.normal(0, 1, size=len(x))
858 858
859 859 from scipy.stats import norm
860 860 mean,std=norm.fit(spc)
861 861
862 862 # estimate starting values from the data
863 863 a1 = A1
864 864 b1 = B1
865 865 c1 = C1#numpy.std(spc)
866 866
867 867 a2 = A2#y.max()
868 868 b2 = B2#x[numpy.argmax(y)]
869 869 c2 = C2#numpy.std(spc)
870 870 d = D
871 871
872 872 ippSeconds = 250*20*1.e-6/3
873 873
874 874 x_t = ippSeconds * (numpy.arange(1600) -1600 / 2.)
875 875
876 876 x_t = numpy.linspace(x_t[0],x_t[-1],3200)
877 877
878 878 x_freq = numpy.fft.fftfreq(1600,d=ippSeconds)
879 879 x_freq = numpy.fft.fftshift(x_freq)
880 880
881 881 # define a least squares function to optimize
882 882 def minfunc(params):
883 883 return sum((y-R_T_spc_fun(x_t,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2/1)#y**2)
884 884
885 885 # fit
886 886 popt_full = fmin(minfunc,[a1,b1,c1,a2,b2,c2,d],full_output=True)
887 887 popt = popt_full[0]
888 888
889 889 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
890 890
891 891 def Double_Gauss_fit_weight(self,spc,x,A1,B1,C1,A2,B2,C2,D):
892 892 from scipy.optimize import curve_fit,fmin
893 893
894 894 def double_gaussian(x, a1, b1, c1, a2, b2, c2, d):
895 895 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
896 896 return val
897 897
898 898 y = spc
899 899
900 900 from scipy.stats import norm
901 901 mean,std=norm.fit(spc)
902 902
903 903 # estimate starting values from the data
904 904 a1 = A1
905 905 b1 = B1
906 906 c1 = C1#numpy.std(spc)
907 907
908 908 a2 = A2#y.max()
909 909 b2 = B2#x[numpy.argmax(y)]
910 910 c2 = C2#numpy.std(spc)
911 911 d = D
912 912
913 913 y_clean = signal.medfilt(y)
914 914 # define a least squares function to optimize
915 915 def minfunc(params):
916 916 return sum((y-double_gaussian(x,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2/(y_clean**2/1))
917 917
918 918 # fit
919 919 popt_full = fmin(minfunc,[a1,b1,c1,a2,b2,c2,d], disp =False, full_output=True)
920 920 #print("nIter", popt_full[2])
921 921 popt = popt_full[0]
922 922 #popt,pcov = curve_fit(double_gaussian,x,y,p0=[a1,b1,c1,a2,b2,c2,d])
923 923
924 924 #return double_gaussian(x, popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
925 925 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
926 926
927 927 def DH_mode(self,spectra,VelRange):
928 928
929 929 from scipy.optimize import curve_fit
930 930
931 931 def double_gauss(x, a1,b1,c1, a2,b2,c2, d):
932 932 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
933 933 return val
934 934
935 935 spec = (spectra.copy()).flatten()
936 936 amp=spec.max()
937 937 params=numpy.array([amp,-400,30,amp/4,-200,150,1.0e7])
938 938 #try:
939 939 popt,pcov=curve_fit(double_gauss, VelRange, spec, p0=params,bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf]))
940 940
941 941 error = numpy.sqrt(numpy.diag(pcov))
942 942 #doppler_2=popt[4]
943 943 #err_2 = numpy.sqrt(pcov[4][4])
944 944
945 945 #except:
946 946 #pass
947 947 #doppler_2=numpy.NAN
948 948 #err_2 = numpy.NAN
949 949
950 950 #return doppler_2, err_2
951 951
952 952 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6], error[0], error[1], error[2], error[3], error[4], error[5], error[6]
953 953
954 954 def Tri_Marco(self,spc,freq,a1,b1,c1,a2,b2,c2,d):
955 955
956 956 from scipy.optimize import least_squares
957 957
958 958 freq_max = numpy.max(numpy.abs(freq))
959 959 spc_max = numpy.max(spc)
960 960
961 961 def tri_gaussian(x, a1, b1, c1, a2, b2, c2, a3, b3, c3, d):
962 962 z1 = (x-b1)/c1
963 963 z2 = (x-b2)/c2
964 964 z3 = (x-b3)/c3
965 965 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-z2**2/2) + a3 * numpy.exp(-z3**2/2) + d
966 966 return val
967 967
968 968 from scipy.signal import medfilt
969 969 Nincoh = 20
970 970 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
971 971 c1 = abs(c1)
972 972 c2 = abs(c2)
973 973
974 974 # define a least squares function to optimize
975 975 def lsq_func(params):
976 976 return (spc-tri_gaussian(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7],params[8],params[9]))/spcm
977 977
978 978 # fit
979 979 bounds=([0,-numpy.inf,0,0,-numpy.inf,0,0,0,0,0],[numpy.inf,-100,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,600,numpy.inf,numpy.inf])
980 980
981 981 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,spc_max]
982 982 #print(a1,b1,c1,a2,b2,c2,d)
983 983 popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,a2/4,-b1,c1,d],x_scale=params_scale,bounds=bounds)
984 984
985 985 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
986 986 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]
987 987 A3f = popt.x[6]; B3f = popt.x[7]; C3f = popt.x[8]
988 988 Df = popt.x[9]
989 989
990 990 return A1f, B1f, C1f, A2f, B2f, C2f, Df
991 991
992 992 def Tri_Marco(self,spc,freq,a1,b1,c1,a2,b2,c2,d):
993 993
994 994 from scipy.optimize import least_squares
995 995
996 996 freq_max = numpy.max(numpy.abs(freq))
997 997 spc_max = numpy.max(spc)
998 998
999 999 def duo_gaussian(x, a1, b1, c1, a2, b2, c2, d):
1000 1000 z1 = (x-b1)/c1
1001 1001 z2 = (x-b2)/c2
1002 1002 #z3 = (x-b3)/c3
1003 1003 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-z2**2/2) + d
1004 1004 return val
1005 1005
1006 1006 from scipy.signal import medfilt
1007 1007 Nincoh = 20
1008 1008 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1009 1009 c1 = abs(c1)
1010 1010 c2 = abs(c2)
1011 1011
1012 1012 # define a least squares function to optimize
1013 1013 def lsq_func(params):
1014 1014 return (spc-tri_gaussian(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))/spcm
1015 1015
1016 1016 # fit
1017 1017 bounds=([0,-numpy.inf,0,0,-numpy.inf,0,0],[numpy.inf,-100,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf])
1018 1018
1019 1019 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,spc_max]
1020 1020 popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,d],x_scale=params_scale,bounds=bounds)
1021 1021
1022 1022 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1023 1023 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]
1024 1024 #A3f = popt.x[6]; B3f = popt.x[7]; C3f = popt.x[8]
1025 1025 Df = popt.x[9]
1026 1026
1027 1027 return A1f, B1f, C1f, A2f, B2f, C2f, Df
1028 1028
1029 1029 def double_gaussian_skew(self,x, a1, b1, c1, a2, b2, c2, k2, d):
1030 1030 z1 = (x-b1)/c1
1031 1031 z2 = (x-b2)/c2
1032 1032 h2 = 1-k2*z2
1033 1033 h2[h2<0] = 0
1034 1034 y2 = -1/k2*numpy.log(h2)
1035 1035 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-y2**2/2)/(1-k2*z2) + d
1036 1036 return val
1037 1037
1038 1038 def gaussian(self, x, a, b, c, d):
1039 1039 z = (x-b)/c
1040 1040 val = a * numpy.exp(-z**2/2) + d
1041 1041 return val
1042 1042
1043 1043 def double_gaussian(self, x, a1, b1, c1, a2, b2, c2, d):
1044 1044 z1 = (x-b1)/c1
1045 1045 z2 = (x-b2)/c2
1046 1046 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-z2**2/2) + d
1047 1047 return val
1048 1048
1049 1049 def double_gaussian_double_skew(self,x, a1, b1, c1, k1, a2, b2, c2, k2, d):
1050 1050
1051 1051 z1 = (x-b1)/c1
1052 1052 h1 = 1-k1*z1
1053 1053 h1[h1<0] = 0
1054 1054 y1 = -1/k1*numpy.log(h1)
1055 1055
1056 1056 z2 = (x-b2)/c2
1057 1057 h2 = 1-k2*z2
1058 1058 h2[h2<0] = 0
1059 1059 y2 = -1/k2*numpy.log(h2)
1060 1060
1061 1061 val = a1 * numpy.exp(-y1**2/2)/(1-k1*z1) + a2 * numpy.exp(-y2**2/2)/(1-k2*z2) + d
1062 1062 return val
1063 1063
1064 1064 def gaussian_skew(self,x, a2, b2, c2, k2, d):
1065 1065 z2 = (x-b2)/c2
1066 1066 h2 = 1-k2*z2
1067 1067 h2[h2<0] = 0
1068 1068 y2 = -1/k2*numpy.log(h2)
1069 1069 val = a2 * numpy.exp(-y2**2/2)/(1-k2*z2) + d
1070 1070 return val
1071 1071
1072 1072 def triple_gaussian_skew(self,x, a1, b1, c1, a2, b2, c2, k2, a3, b3, c3, k3, d):
1073 1073 z1 = (x-b1)/c1
1074 1074 z2 = (x-b2)/c2
1075 1075 z3 = (x-b3)/c3
1076 1076 h2 = 1-k2*z2
1077 1077 h2[h2<0] = 0
1078 1078 y2 = -1/k2*numpy.log(h2)
1079 1079 h3 = 1-k3*z3
1080 1080 h3[h3<0] = 0
1081 1081 y3 = -1/k3*numpy.log(h3)
1082 1082 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-y2**2/2)/(1-k2*z2) + a3 * numpy.exp(-y3**2/2)/(1-k3*z3) + d
1083 1083 return val
1084 1084
1085 1085 def Double_Gauss_Skew_fit_weight_bound_no_inputs(self,spc,freq):
1086 1086
1087 1087 from scipy.optimize import least_squares
1088 1088
1089 1089 freq_max = numpy.max(numpy.abs(freq))
1090 1090 spc_max = numpy.max(spc)
1091 1091
1092 1092 from scipy.signal import medfilt
1093 1093 Nincoh = 20
1094 1094 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1095 1095
1096 1096 # define a least squares function to optimize
1097 1097 def lsq_func(params):
1098 1098 return (spc-self.double_gaussian_skew(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7]))/spcm
1099 1099
1100 1100 # fit
1101 1101 bounds=([0,-numpy.inf,0,0,-400,0,0,0],[numpy.inf,-340,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1102 1102
1103 1103 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,1,spc_max]
1104 1104 x0_value = numpy.array([spc_max,-400,30,spc_max/4,-200,150,1,1.0e7])
1105 1105 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1106 1106 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1107 1107 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]; K2f = popt.x[6]
1108 1108 Df = popt.x[7]
1109 1109
1110 1110 aux = self.gaussian_skew(freq, A2f, B2f, C2f, K2f, Df)
1111 1111 doppler = freq[numpy.argmax(aux)]
1112 1112
1113 1113 return A1f, B1f, C1f, A2f, B2f, C2f, K2f, Df, doppler
1114 1114
1115 1115 def Double_Gauss_Double_Skew_fit_weight_bound_no_inputs(self,spc,freq,Nincoh,hei):
1116 1116
1117 1117 from scipy.optimize import least_squares
1118 1118
1119 1119 freq_max = numpy.max(numpy.abs(freq))
1120 1120 spc_max = numpy.max(spc)
1121 1121
1122 1122 #from scipy.signal import medfilt
1123 1123 #Nincoh = 20
1124 1124 #Nincoh = 80
1125 1125 Nincoh = Nincoh
1126 1126 #spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1127 1127 spcm = spc/numpy.sqrt(Nincoh)
1128 1128
1129 1129 # define a least squares function to optimize
1130 1130 def lsq_func(params):
1131 1131 return (spc-self.double_gaussian_double_skew(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7],params[8]))/spcm
1132 1132
1133 1133 # fit
1134 1134 bounds=([0,-numpy.inf,0,-5,0,-400,0,0,0],[numpy.inf,-200,numpy.inf,5,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1135 1135
1136 1136 params_scale = [spc_max,freq_max,freq_max,1,spc_max,freq_max,freq_max,1,spc_max]
1137 1137
1138 1138 dop1_x0 = freq[numpy.argmax(spc)]
1139 1139 if dop1_x0 < 0:
1140 1140 dop2_x0 = dop1_x0 + 100
1141 1141 if dop1_x0 > 0:
1142 1142 dop2_x0 = dop1_x0 - 100
1143 1143
1144 1144 x0_value = numpy.array([spc_max,dop1_x0,30,-.1,spc_max/4, dop2_x0,150,1,1.0e7])
1145 1145 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1146 1146 J = popt.jac
1147 1147
1148 1148 try:
1149 1149 cov = numpy.linalg.inv(J.T.dot(J))
1150 1150 error = numpy.sqrt(numpy.diagonal(cov))
1151 1151 except:
1152 1152 error = numpy.ones((9))*numpy.NAN
1153 1153
1154 1154 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]; K1f = popt.x[3]
1155 1155 A2f = popt.x[4]; B2f = popt.x[5]; C2f = popt.x[6]; K2f = popt.x[7]
1156 1156 Df = popt.x[8]
1157 1157 aux1 = self.gaussian_skew(freq, A1f, B1f, C1f, K1f, Df)
1158 1158 doppler1 = freq[numpy.argmax(aux1)]
1159 1159
1160 1160 aux2 = self.gaussian_skew(freq, A2f, B2f, C2f, K2f, Df)
1161 1161 doppler2 = freq[numpy.argmax(aux2)]
1162 1162 #print("error",error)
1163 1163 #exit(1)
1164 1164
1165 1165
1166 1166 return A1f, B1f, C1f, K1f, A2f, B2f, C2f, K2f, Df, doppler1, doppler2, error
1167 1167
1168 1168 def Double_Gauss_fit_weight_bound_no_inputs(self,spc,freq,Nincoh):
1169 1169
1170 1170 from scipy.optimize import least_squares
1171 1171
1172 1172 freq_max = numpy.max(numpy.abs(freq))
1173 1173 spc_max = numpy.max(spc)
1174 1174
1175 1175 from scipy.signal import medfilt
1176 1176 Nincoh = 20
1177 1177 Nincoh = 80
1178 1178 Nincoh = Nincoh
1179 1179 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1180 1180
1181 1181 # define a least squares function to optimize
1182 1182 def lsq_func(params):
1183 1183 return (spc-self.double_gaussian(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))/spcm
1184 1184
1185 1185 # fit
1186 1186 # bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf])
1187 1187 # bounds=([0,-numpy.inf,0,0,-numpy.inf,0,-numpy.inf,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,0,numpy.inf])
1188 1188 #print(a1,b1,c1,a2,b2,c2,k2,d)
1189 1189
1190 1190 dop1_x0 = freq[numpy.argmax(spcm)]
1191 1191
1192 1192 bounds=([0,-numpy.inf,0,0,dop1_x0-50,0,0],[numpy.inf,-300,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf])
1193 1193 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,spc_max]
1194 1194 x0_value = numpy.array([spc_max,-400.5,30,spc_max/4,dop1_x0,150,1.0e7])
1195 1195 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1196 1196 J = popt.jac
1197 1197
1198 1198 try:
1199 1199 cov = numpy.linalg.inv(J.T.dot(J))
1200 1200 error = numpy.sqrt(numpy.diagonal(cov))
1201 1201 except:
1202 1202 error = numpy.ones((7))*numpy.NAN
1203 1203
1204 1204 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1205 1205 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]
1206 1206 Df = popt.x[6]
1207 1207 return A1f, B1f, C1f, A2f, B2f, C2f, Df, error
1208 1208
1209 1209 def Double_Gauss_Double_Skew_fit_weight_bound_with_inputs(self, spc, freq, a1, b1, c1, a2, b2, c2, k2, d):
1210 1210
1211 1211 from scipy.optimize import least_squares
1212 1212
1213 1213 freq_max = numpy.max(numpy.abs(freq))
1214 1214 spc_max = numpy.max(spc)
1215 1215
1216 1216 from scipy.signal import medfilt
1217 1217 Nincoh = dataOut.nIncohInt
1218 1218 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1219 1219
1220 1220 # define a least squares function to optimize
1221 1221 def lsq_func(params):
1222 1222 return (spc-self.double_gaussian_double_skew(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7],params[8]))/spcm
1223 1223
1224 1224
1225 1225 bounds=([0,-numpy.inf,0,-numpy.inf,0,-400,0,0,0],[numpy.inf,-340,numpy.inf,0,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1226 1226
1227 1227 params_scale = [spc_max,freq_max,freq_max,1,spc_max,freq_max,freq_max,1,spc_max]
1228 1228
1229 1229 x0_value = numpy.array([a1,b1,c1,-.1,a2,b2,c2,k2,d])
1230 1230
1231 1231 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1232 1232
1233 1233 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]; K1f = popt.x[3]
1234 1234 A2f = popt.x[4]; B2f = popt.x[5]; C2f = popt.x[6]; K2f = popt.x[7]
1235 1235 Df = popt.x[8]
1236 1236
1237 1237 aux = self.gaussian_skew(freq, A2f, B2f, C2f, K2f, Df)
1238 1238 doppler = x[numpy.argmax(aux)]
1239 1239
1240 1240 return A1f, B1f, C1f, K1f, A2f, B2f, C2f, K2f, Df, doppler
1241 1241
1242 1242 def Triple_Gauss_Skew_fit_weight_bound_no_inputs(self,spc,freq):
1243 1243
1244 1244 from scipy.optimize import least_squares
1245 1245
1246 1246 freq_max = numpy.max(numpy.abs(freq))
1247 1247 spc_max = numpy.max(spc)
1248 1248
1249 1249 from scipy.signal import medfilt
1250 1250 Nincoh = 20
1251 1251 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1252 1252
1253 1253 # define a least squares function to optimize
1254 1254 def lsq_func(params):
1255 1255 return (spc-self.triple_gaussian_skew(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7],params[8],params[9],params[10],params[11]))/spcm
1256 1256
1257 1257 # fit
1258 1258 bounds=([0,-numpy.inf,0,0,-400,0,0,0,0,0,0,0],[numpy.inf,-340,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf,numpy.inf,numpy.inf,numpy.inf,numpy.inf])
1259 1259
1260 1260 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,1,spc_max,freq_max,freq_max,1,spc_max]
1261 1261 x0_value = numpy.array([spc_max,-400,30,spc_max/4,-200,150,1,spc_max/4,400,150,1,1.0e7])
1262 1262 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1263 1263
1264 1264 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1265 1265 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]; K2f = popt.x[6]
1266 1266 A3f = popt.x[7]; B3f = popt.x[8]; C3f = popt.x[9]; K3f = popt.x[10]
1267 1267 Df = popt.x[11]
1268 1268
1269 1269 aux = self.gaussian_skew(freq, A2f, B2f, C2f, K2f, Df)
1270 1270 doppler = freq[numpy.argmax(aux)]
1271 1271
1272 1272 return A1f, B1f, C1f, A2f, B2f, C2f, K2f, A3f, B3f, C3f, K3f, Df, doppler
1273 1273
1274 1274 def CEEJ_Skew_fit_weight_bound_no_inputs(self,spc,freq,Nincoh):
1275 1275
1276 1276 from scipy.optimize import least_squares
1277 1277
1278 1278 freq_max = numpy.max(numpy.abs(freq))
1279 1279 spc_max = numpy.max(spc)
1280 1280
1281 1281 from scipy.signal import medfilt
1282 1282 Nincoh = 20
1283 1283 Nincoh = 80
1284 1284 Nincoh = Nincoh
1285 1285 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1286 1286
1287 1287 # define a least squares function to optimize
1288 1288 def lsq_func(params):
1289 1289 return (spc-self.gaussian_skew(freq,params[0],params[1],params[2],params[3],params[4]))#/spcm
1290 1290
1291 1291
1292 1292 bounds=([0,0,0,-numpy.inf,0],[numpy.inf,numpy.inf,numpy.inf,0,numpy.inf])
1293 1293
1294 1294 params_scale = [spc_max,freq_max,freq_max,1,spc_max]
1295 1295
1296 1296 x0_value = numpy.array([spc_max,freq[numpy.argmax(spc)],30,-.1,numpy.mean(spc[:50])])
1297 1297
1298 1298 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1299 1299
1300 1300 J = popt.jac
1301 1301
1302 1302 try:
1303 1303 error = numpy.ones((9))*numpy.NAN
1304 1304 cov = numpy.linalg.inv(J.T.dot(J))
1305 1305 error[:4] = numpy.sqrt(numpy.diagonal(cov))[:4]
1306 1306 error[-1] = numpy.sqrt(numpy.diagonal(cov))[-1]
1307 1307 except:
1308 1308 error = numpy.ones((9))*numpy.NAN
1309 1309
1310 1310 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]; K1f = popt.x[3]
1311 1311 Df = popt.x[4]
1312 1312
1313 1313 aux1 = self.gaussian_skew(freq, A1f, B1f, C1f, K1f, Df)
1314 1314 doppler1 = freq[numpy.argmax(aux1)]
1315 1315 #print("CEEJ ERROR:",error)
1316 1316
1317 1317 return A1f, B1f, C1f, K1f, numpy.NAN, numpy.NAN, numpy.NAN, numpy.NAN, Df, doppler1, numpy.NAN, error
1318 1318
1319 1319 def CEEJ_fit_weight_bound_no_inputs(self,spc,freq,Nincoh):
1320 1320
1321 1321 from scipy.optimize import least_squares
1322 1322
1323 1323 freq_max = numpy.max(numpy.abs(freq))
1324 1324 spc_max = numpy.max(spc)
1325 1325
1326 1326 from scipy.signal import medfilt
1327 1327 Nincoh = 20
1328 1328 Nincoh = 80
1329 1329 Nincoh = Nincoh
1330 1330 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1331 1331
1332 1332 # define a least squares function to optimize
1333 1333 def lsq_func(params):
1334 1334 return (spc-self.gaussian(freq,params[0],params[1],params[2],params[3]))#/spcm
1335 1335
1336 1336
1337 1337 bounds=([0,0,0,0],[numpy.inf,numpy.inf,numpy.inf,numpy.inf])
1338 1338
1339 1339 params_scale = [spc_max,freq_max,freq_max,spc_max]
1340 1340
1341 1341 x0_value = numpy.array([spc_max,freq[numpy.argmax(spcm)],30,numpy.mean(spc[:50])])
1342 1342
1343 1343 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1344 1344
1345 1345 J = popt.jac
1346 1346
1347 1347 try:
1348 1348 error = numpy.ones((4))*numpy.NAN
1349 1349 cov = numpy.linalg.inv(J.T.dot(J))
1350 1350 error = numpy.sqrt(numpy.diagonal(cov))
1351 1351 except:
1352 1352 error = numpy.ones((4))*numpy.NAN
1353 1353
1354 1354 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1355 1355 Df = popt.x[3]
1356 1356
1357 1357 return A1f, B1f, C1f, Df, error
1358 1358
1359 1359 def Simple_fit_bound(self,spc,freq,Nincoh):
1360 1360
1361 1361 freq_max = numpy.max(numpy.abs(freq))
1362 1362 spc_max = numpy.max(spc)
1363 1363
1364 1364 Nincoh = Nincoh
1365 1365
1366 1366 def lsq_func(params):
1367 1367 return (spc-self.gaussian(freq,params[0],params[1],params[2],params[3]))
1368 1368
1369 1369 bounds=([0,-50,0,0],[numpy.inf,+50,numpy.inf,numpy.inf])
1370 1370
1371 1371 params_scale = [spc_max,freq_max,freq_max,spc_max]
1372 1372
1373 1373 x0_value = numpy.array([spc_max,-20.5,5,1.0e7])
1374 1374
1375 1375 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1376 1376
1377 1377 J = popt.jac
1378 1378
1379 1379 try:
1380 1380 cov = numpy.linalg.inv(J.T.dot(J))
1381 1381 error = numpy.sqrt(numpy.diagonal(cov))
1382 1382 except:
1383 1383 error = numpy.ones((4))*numpy.NAN
1384 1384
1385 1385 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1386 1386 Df = popt.x[3]
1387 1387
1388 1388 return A1f, B1f, C1f, Df, error
1389 1389
1390 1390 def clean_outliers(self,param):
1391 1391
1392 1392 threshold = 700
1393 1393
1394 1394 param = numpy.where(param < -threshold, numpy.nan, param)
1395 1395 param = numpy.where(param > +threshold, numpy.nan, param)
1396 1396
1397 1397 return param
1398 1398
1399 1399 def windowing_single(self,spc,x,A,B,C,D,nFFTPoints):
1400 1400 from scipy.optimize import curve_fit,fmin
1401 1401
1402 1402 def R_gaussian(x, a, b, c):
1403 1403 N = int(numpy.shape(x)[0])
1404 1404 val = a * numpy.exp(-((x)*c*2*2*numpy.pi)**2 / (2))* numpy.exp(1.j*b*x*4*numpy.pi)
1405 1405 return val
1406 1406
1407 1407 def T(x,N):
1408 1408 T = 1-abs(x)/N
1409 1409 return T
1410 1410
1411 1411 def R_T_spc_fun(x, a, b, c, d, nFFTPoints):
1412 1412
1413 1413 N = int(numpy.shape(x)[0])
1414 1414
1415 1415 x_max = x[-1]
1416 1416
1417 1417 x_pos = x[int(nFFTPoints/2):]
1418 1418 x_neg = x[:int(nFFTPoints/2)]
1419 1419
1420 1420 R_T_neg_1 = R_gaussian(x, a, b, c)[:int(nFFTPoints/2)]*T(x_neg,-x[0])
1421 1421 R_T_pos_1 = R_gaussian(x, a, b, c)[int(nFFTPoints/2):]*T(x_pos,x[-1])
1422 1422 R_T_sum_1 = R_T_pos_1 + R_T_neg_1
1423 1423 R_T_spc_1 = numpy.fft.fft(R_T_sum_1).real
1424 1424 R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1)
1425 1425 max_val_1 = numpy.max(R_T_spc_1)
1426 1426 R_T_spc_1 = R_T_spc_1*a/max_val_1
1427 1427
1428 1428 R_T_d = d*numpy.fft.fftshift(signal.unit_impulse(N))
1429 1429 R_T_d_neg = R_T_d[:int(nFFTPoints/2)]*T(x_neg,-x[0])
1430 1430 R_T_d_pos = R_T_d[int(nFFTPoints/2):]*T(x_pos,x[-1])
1431 1431 R_T_d_sum = R_T_d_pos + R_T_d_neg
1432 1432 R_T_spc_3 = numpy.fft.fft(R_T_d_sum).real
1433 1433 R_T_spc_3 = numpy.fft.fftshift(R_T_spc_3)
1434 1434
1435 1435 R_T_final = R_T_spc_1 + R_T_spc_3
1436 1436
1437 1437 return R_T_final
1438 1438
1439 1439 y = spc#gaussian(x, a, meanY, sigmaY) + a*0.1*numpy.random.normal(0, 1, size=len(x))
1440 1440
1441 1441 from scipy.stats import norm
1442 1442 mean,std=norm.fit(spc)
1443 1443
1444 1444 # estimate starting values from the data
1445 1445 a = A
1446 1446 b = B
1447 1447 c = C#numpy.std(spc)
1448 1448 d = D
1449 1449 '''
1450 1450 ippSeconds = 250*20*1.e-6/3
1451 1451
1452 1452 x_t = ippSeconds * (numpy.arange(1600) -1600 / 2.)
1453 1453
1454 1454 x_t = numpy.linspace(x_t[0],x_t[-1],3200)
1455 1455
1456 1456 x_freq = numpy.fft.fftfreq(1600,d=ippSeconds)
1457 1457 x_freq = numpy.fft.fftshift(x_freq)
1458 1458 '''
1459 1459 # define a least squares function to optimize
1460 1460 def minfunc(params):
1461 1461 return sum((y-R_T_spc_fun(x,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2/1)#y**2)
1462 1462
1463 1463 # fit
1464 1464
1465 1465 popt_full = fmin(minfunc,[a,b,c,d],full_output=True)
1466 1466 #print("nIter", popt_full[2])
1467 1467 popt = popt_full[0]
1468 1468
1469 1469 #return R_T_spc_fun(x_t,popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
1470 1470 return popt[0], popt[1], popt[2], popt[3]
1471 1471
1472 1472 def run(self, dataOut, mode = 0, Hmin1 = None, Hmax1 = None, Hmin2 = None, Hmax2 = None, Dop = 'Shift'):
1473 1473
1474 1474 pwcode = 1
1475 1475
1476 1476 if dataOut.flagDecodeData:
1477 1477 pwcode = numpy.sum(dataOut.code[0]**2)
1478 1478 #normFactor = min(self.nFFTPoints,self.nProfiles)*self.nIncohInt*self.nCohInt*pwcode*self.windowOfFilter
1479 1479 normFactor = dataOut.nProfiles * dataOut.nIncohInt * dataOut.nCohInt * pwcode * dataOut.windowOfFilter
1480 1480 factor = normFactor
1481 1481 z = dataOut.data_spc / factor
1482 1482 z = numpy.where(numpy.isfinite(z), z, numpy.NAN)
1483 1483 dataOut.power = numpy.average(z, axis=1)
1484 1484 dataOut.powerdB = 10 * numpy.log10(dataOut.power)
1485 1485
1486 1486 x = dataOut.getVelRange(0)
1487 1487
1488 1488 dataOut.Oblique_params = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
1489 1489 dataOut.Oblique_param_errors = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
1490 1490 dataOut.dplr_2_u = numpy.ones((1,1,dataOut.nHeights))*numpy.NAN
1491 1491
1492 1492 if mode == 6:
1493 1493 dataOut.Oblique_params = numpy.ones((1,9,dataOut.nHeights))*numpy.NAN
1494 1494 elif mode == 7:
1495 1495 dataOut.Oblique_params = numpy.ones((1,13,dataOut.nHeights))*numpy.NAN
1496 1496 elif mode == 8:
1497 1497 dataOut.Oblique_params = numpy.ones((1,10,dataOut.nHeights))*numpy.NAN
1498 1498 elif mode == 9:
1499 1499 dataOut.Oblique_params = numpy.ones((1,11,dataOut.nHeights))*numpy.NAN
1500 1500 dataOut.Oblique_param_errors = numpy.ones((1,9,dataOut.nHeights))*numpy.NAN
1501 1501 elif mode == 11:
1502 1502 dataOut.Oblique_params = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
1503 1503 dataOut.Oblique_param_errors = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
1504 1504 elif mode == 10: #150 km
1505 1505 dataOut.Oblique_params = numpy.ones((1,4,dataOut.nHeights))*numpy.NAN
1506 1506 dataOut.Oblique_param_errors = numpy.ones((1,4,dataOut.nHeights))*numpy.NAN
1507 1507 dataOut.snr_log10 = numpy.ones((1,dataOut.nHeights))*numpy.NAN
1508 1508
1509 1509 dataOut.VelRange = x
1510 1510
1511 1511
1512 1512
1513 1513 #l1=range(22,36) #+62
1514 1514 #l1=range(32,36)
1515 1515 #l2=range(58,99) #+62
1516 1516
1517 1517 #if Hmin1 == None or Hmax1 == None or Hmin2 == None or Hmax2 == None:
1518 1518
1519 1519 minHei1 = 105.
1520 1520 maxHei1 = 122.5
1521 1521 maxHei1 = 130.5
1522 1522
1523 1523 if mode == 10: #150 km
1524 1524 minHei1 = 100
1525 1525 maxHei1 = 100
1526 1526
1527 1527 inda1 = numpy.where(dataOut.heightList >= minHei1)
1528 1528 indb1 = numpy.where(dataOut.heightList <= maxHei1)
1529 1529
1530 1530 minIndex1 = inda1[0][0]
1531 1531 maxIndex1 = indb1[0][-1]
1532 1532
1533 1533 minHei2 = 150.
1534 1534 maxHei2 = 201.25
1535 1535 maxHei2 = 225.3
1536 1536
1537 1537 if mode == 10: #150 km
1538 1538 minHei2 = 110
1539 1539 maxHei2 = 165
1540 1540
1541 1541 inda2 = numpy.where(dataOut.heightList >= minHei2)
1542 1542 indb2 = numpy.where(dataOut.heightList <= maxHei2)
1543 1543
1544 1544 minIndex2 = inda2[0][0]
1545 1545 maxIndex2 = indb2[0][-1]
1546 1546
1547 1547 l1=range(minIndex1,maxIndex1)
1548 1548 l2=range(minIndex2,maxIndex2)
1549 1549
1550 1550 if mode == 4:
1551 1551 '''
1552 1552 for ind in range(dataOut.nHeights):
1553 1553 if(dataOut.heightList[ind]>=168 and dataOut.heightList[ind]<188):
1554 1554 try:
1555 1555 dataOut.Oblique_params[0,0,ind],dataOut.Oblique_params[0,1,ind],dataOut.Oblique_params[0,2,ind],dataOut.Oblique_params[0,3,ind],dataOut.Oblique_params[0,4,ind],dataOut.Oblique_params[0,5,ind],dataOut.Oblique_params[0,6,ind],dataOut.Oblique_param_errors[0,0,ind],dataOut.Oblique_param_errors[0,1,ind],dataOut.Oblique_param_errors[0,2,ind],dataOut.Oblique_param_errors[0,3,ind],dataOut.Oblique_param_errors[0,4,ind],dataOut.Oblique_param_errors[0,5,ind],dataOut.Oblique_param_errors[0,6,ind] = self.DH_mode(dataOut.data_spc[0,:,ind],dataOut.VelRange)
1556 1556 except:
1557 1557 pass
1558 1558 '''
1559 1559 for ind in itertools.chain(l1, l2):
1560 1560
1561 1561 try:
1562 1562 dataOut.Oblique_params[0,0,ind],dataOut.Oblique_params[0,1,ind],dataOut.Oblique_params[0,2,ind],dataOut.Oblique_params[0,3,ind],dataOut.Oblique_params[0,4,ind],dataOut.Oblique_params[0,5,ind],dataOut.Oblique_params[0,6,ind],dataOut.Oblique_param_errors[0,0,ind],dataOut.Oblique_param_errors[0,1,ind],dataOut.Oblique_param_errors[0,2,ind],dataOut.Oblique_param_errors[0,3,ind],dataOut.Oblique_param_errors[0,4,ind],dataOut.Oblique_param_errors[0,5,ind],dataOut.Oblique_param_errors[0,6,ind] = self.DH_mode(dataOut.data_spc[0,:,ind],dataOut.VelRange)
1563 1563 dataOut.dplr_2_u[0,0,ind] = dataOut.Oblique_params[0,4,ind]/numpy.sin(numpy.arccos(102/dataOut.heightList[ind]))
1564 1564 except:
1565 1565 pass
1566 1566
1567 1567 else:
1568 1568 for hei in itertools.chain(l1, l2):
1569 1569 if numpy.isnan(dataOut.snl[0,hei]) or dataOut.snl[0,hei]<.0:
1570 1570
1571 1571 continue #Avoids the analysis when there is only noise
1572 1572
1573 1573 try:
1574 1574 spc = dataOut.data_spc[0,:,hei]
1575 1575
1576 1576 if mode == 6: #Skew Weighted Bounded
1577 1577 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei] = self.Double_Gauss_Skew_fit_weight_bound_no_inputs(spc,x)
1578 1578 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,8,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1579 1579
1580 1580 elif mode == 7: #Triple Skew Weighted Bounded
1581 1581 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei],dataOut.Oblique_params[0,10,hei],dataOut.Oblique_params[0,11,hei],dataOut.Oblique_params[0,12,hei] = self.Triple_Gauss_Skew_fit_weight_bound_no_inputs(spc,x)
1582 1582 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,12,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1583 1583
1584 1584 elif mode == 8: #Double Skewed Weighted Bounded with inputs
1585 1585 a1, b1, c1, a2, b2, c2, k2, d, dopp = self.Double_Gauss_Skew_fit_weight_bound_no_inputs(spc,x)
1586 1586 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei] = self.Double_Gauss_Skew_fit_weight_bound_no_inputs(spc,x, a1, b1, c1, a2, b2, c2, k2, d)
1587 1587 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,9,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1588 1588
1589 1589 elif mode == 9: #Double Skewed Weighted Bounded no inputs
1590 1590 #if numpy.max(spc) <= 0:
1591 1591 from scipy.signal import medfilt
1592 1592 spcm = medfilt(spc,11)
1593 1593 if x[numpy.argmax(spcm)] <= 0:
1594 1594 #print("EEJ", dataOut.heightList[hei], hei)
1595 1595 #if hei != 70:
1596 1596 #continue
1597 1597 #else:
1598 1598 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei],dataOut.Oblique_params[0,10,hei],dataOut.Oblique_param_errors[0,:,hei] = self.Double_Gauss_Double_Skew_fit_weight_bound_no_inputs(spcm,x,dataOut.nIncohInt,dataOut.heightList[hei])
1599 1599 #if dataOut.Oblique_params[0,-2,hei] < -500 or dataOut.Oblique_params[0,-2,hei] > 500 or dataOut.Oblique_params[0,-1,hei] < -500 or dataOut.Oblique_params[0,-1,hei] > 500:
1600 1600 # dataOut.Oblique_params[0,:,hei] *= numpy.NAN
1601 1601 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,10,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1602 1602
1603 1603 else:
1604 1604 #print("CEEJ")
1605 1605 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei],dataOut.Oblique_params[0,10,hei],dataOut.Oblique_param_errors[0,:,hei] = self.CEEJ_Skew_fit_weight_bound_no_inputs(spcm,x,dataOut.nIncohInt)
1606 1606 #if dataOut.Oblique_params[0,-2,hei] < -500 or dataOut.Oblique_params[0,-2,hei] > 500 or dataOut.Oblique_params[0,-1,hei] < -500 or dataOut.Oblique_params[0,-1,hei] > 500:
1607 1607 # dataOut.Oblique_params[0,:,hei] *= numpy.NAN
1608 1608 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,10,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1609 1609 elif mode == 11: #Double Weighted Bounded no inputs
1610 1610 #if numpy.max(spc) <= 0:
1611 1611 from scipy.signal import medfilt
1612 1612 spcm = medfilt(spc,11)
1613 1613
1614 1614 if x[numpy.argmax(spcm)] <= 0:
1615 1615 #print("EEJ")
1616 1616 #print("EEJ",dataOut.heightList[hei])
1617 1617 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_param_errors[0,:,hei] = self.Double_Gauss_fit_weight_bound_no_inputs(spc,x,dataOut.nIncohInt)
1618 1618 #if dataOut.Oblique_params[0,-2,hei] < -500 or dataOut.Oblique_params[0,-2,hei] > 500 or dataOut.Oblique_params[0,-1,hei] < -500 or dataOut.Oblique_params[0,-1,hei] > 500:
1619 1619 # dataOut.Oblique_params[0,:,hei] *= numpy.NAN
1620 1620 else:
1621 1621 #print("CEEJ",dataOut.heightList[hei])
1622 1622 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_param_errors[0,:,hei] = self.CEEJ_fit_weight_bound_no_inputs(spc,x,dataOut.nIncohInt)
1623 1623
1624 1624 elif mode == 10: #150km
1625 1625 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_param_errors[0,:,hei] = self.Simple_fit_bound(spc,x,dataOut.nIncohInt)
1626 1626 snr = (dataOut.power[0,hei]*factor - dataOut.Oblique_params[0,3,hei])/dataOut.Oblique_params[0,3,hei]
1627 1627 dataOut.snr_log10[0,hei] = numpy.log10(snr)
1628 1628
1629 1629 else:
1630 1630 spc_fit, A1, B1, C1, D1 = self.Gauss_fit_2(spc,x,'first')
1631 1631
1632 1632 spc_diff = spc - spc_fit
1633 1633 spc_diff[spc_diff < 0] = 0
1634 1634
1635 1635 spc_fit_diff, A2, B2, C2, D2 = self.Gauss_fit_2(spc_diff,x,'second')
1636 1636
1637 1637 D = (D1+D2)
1638 1638
1639 1639 if mode == 0: #Double Fit
1640 1640 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_param_errors[0,0,hei],dataOut.Oblique_param_errors[0,1,hei],dataOut.Oblique_param_errors[0,2,hei],dataOut.Oblique_param_errors[0,3,hei],dataOut.Oblique_param_errors[0,4,hei],dataOut.Oblique_param_errors[0,5,hei],dataOut.Oblique_param_errors[0,6,hei] = self.Double_Gauss_fit_2(spc,x,A1,B1,C1,A2,B2,C2,D)
1641 1641 #spc_double_fit,dataOut.Oblique_params = self.Double_Gauss_fit(spc,x,A1,B1,C1,A2,B2,C2,D)
1642 1642
1643 1643 elif mode == 1: #Double Fit Windowed
1644 1644 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei] = self.windowing_double(spc,dataOut.getFreqRange(0),A1,B1,C1,A2,B2,C2,D)
1645 1645
1646 1646 elif mode == 2: #Double Fit Weight
1647 1647 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei] = self.Double_Gauss_fit_weight(spc,x,A1,B1,C1,A2,B2,C2,D)
1648 1648
1649 1649 elif mode == 3: #Simple Fit
1650 1650 dataOut.Oblique_params[0,0,hei] = A1
1651 1651 dataOut.Oblique_params[0,1,hei] = B1
1652 1652 dataOut.Oblique_params[0,2,hei] = C1
1653 1653 dataOut.Oblique_params[0,3,hei] = A2
1654 1654 dataOut.Oblique_params[0,4,hei] = B2
1655 1655 dataOut.Oblique_params[0,5,hei] = C2
1656 1656 dataOut.Oblique_params[0,6,hei] = D
1657 1657
1658 1658 elif mode == 5: #Triple Fit Weight
1659 1659 if hei in l1:
1660 1660 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei] = self.duo_Marco(spc,x,A1,B1,C1,A2,B2,C2,D)
1661 1661 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,4,hei]/numpy.sin(numpy.arccos(102/dataOut.heightList[hei]))
1662 1662 #print(dataOut.Oblique_params[0,0,hei])
1663 1663 #print(dataOut.dplr_2_u[0,0,hei])
1664 1664 else:
1665 1665 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei] = self.Double_Gauss_fit_weight(spc,x,A1,B1,C1,A2,B2,C2,D)
1666 1666 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,4,hei]/numpy.sin(numpy.arccos(102/dataOut.heightList[hei]))
1667 1667
1668 1668
1669 1669 except:
1670 1670 ###dataOut.Oblique_params[0,:,hei] = dataOut.Oblique_params[0,:,hei]*numpy.NAN
1671 1671 pass
1672 1672
1673 1673 #exit(1)
1674 1674 dataOut.paramInterval = dataOut.nProfiles*dataOut.nCohInt*dataOut.ippSeconds
1675 1675 dataOut.lat=-11.95
1676 1676 dataOut.lon=-76.87
1677 1677 '''
1678 1678 dataOut.Oblique_params = numpy.where(dataOut.Oblique_params<-700, numpy.nan, dop_t1)
1679 1679 dataOut.Oblique_params = numpy.where(dataOut.Oblique_params<+700, numpy.nan, dop_t1)
1680 1680 AquΓ­ debo exceptuar las amplitudes
1681 1681 '''
1682 1682 if mode == 9: #Double Skew Gaussian
1683 1683 #dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,-2,:] #Pos[Max_value]
1684 1684 #dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,1,:] #Shift
1685 1685 dataOut.Spec_W_T1 = dataOut.Oblique_params[:,2,:]
1686 1686 #dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,-1,:] #Pos[Max_value]
1687 1687 #dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,5,:] #Shift
1688 1688 dataOut.Spec_W_T2 = dataOut.Oblique_params[:,6,:]
1689 1689 if Dop == 'Shift':
1690 1690 dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,1,:] #Shift
1691 1691 dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,5,:] #Shift
1692 1692 elif Dop == 'Max':
1693 1693 dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,-2,:] #Pos[Max_value]
1694 1694 dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,-1,:] #Pos[Max_value]
1695 1695
1696 1696 dataOut.Err_Dop_EEJ_T1 = dataOut.Oblique_param_errors[:,1,:] #En realidad este es el error?
1697 1697 dataOut.Err_Spec_W_T1 = dataOut.Oblique_param_errors[:,2,:]
1698 1698 dataOut.Err_Dop_EEJ_T2 = dataOut.Oblique_param_errors[:,5,:] #En realidad este es el error?
1699 1699 dataOut.Err_Spec_W_T2 = dataOut.Oblique_param_errors[:,6,:]
1700 1700
1701 1701 elif mode == 11: #Double Gaussian
1702 1702 dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,1,:]
1703 1703 dataOut.Spec_W_T1 = dataOut.Oblique_params[:,2,:]
1704 1704 dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,4,:]
1705 1705 dataOut.Spec_W_T2 = dataOut.Oblique_params[:,5,:]
1706 1706
1707 1707 dataOut.Err_Dop_EEJ_T1 = dataOut.Oblique_param_errors[:,1,:]
1708 1708 dataOut.Err_Spec_W_T1 = dataOut.Oblique_param_errors[:,2,:]
1709 1709 dataOut.Err_Dop_EEJ_T2 = dataOut.Oblique_param_errors[:,4,:]
1710 1710 dataOut.Err_Spec_W_T2 = dataOut.Oblique_param_errors[:,5,:]
1711 1711
1712 1712 #print("Before: ", dataOut.Dop_EEJ_T2)
1713 1713 dataOut.Spec_W_T1 = self.clean_outliers(dataOut.Spec_W_T1)
1714 1714 dataOut.Spec_W_T2 = self.clean_outliers(dataOut.Spec_W_T2)
1715 1715 dataOut.Dop_EEJ_T1 = self.clean_outliers(dataOut.Dop_EEJ_T1)
1716 1716 dataOut.Dop_EEJ_T2 = self.clean_outliers(dataOut.Dop_EEJ_T2)
1717 1717 #print("After: ", dataOut.Dop_EEJ_T2)
1718 1718 dataOut.Err_Spec_W_T1 = self.clean_outliers(dataOut.Err_Spec_W_T1)
1719 1719 dataOut.Err_Spec_W_T2 = self.clean_outliers(dataOut.Err_Spec_W_T2)
1720 1720 dataOut.Err_Dop_EEJ_T1 = self.clean_outliers(dataOut.Err_Dop_EEJ_T1)
1721 1721 dataOut.Err_Dop_EEJ_T2 = self.clean_outliers(dataOut.Err_Dop_EEJ_T2)
1722 1722 #print("Before data_snr: ", dataOut.data_snr)
1723 1723 #dataOut.data_snr = numpy.where(numpy.isnan(dataOut.Dop_EEJ_T1), numpy.nan, dataOut.data_snr)
1724 1724 dataOut.snl = numpy.where(numpy.isnan(dataOut.Dop_EEJ_T1), numpy.nan, dataOut.snl)
1725 1725
1726 1726 #print("After data_snr: ", dataOut.data_snr)
1727 1727 dataOut.mode = mode
1728 1728 dataOut.flagNoData = numpy.all(numpy.isnan(dataOut.Dop_EEJ_T1)) #Si todos los valores son NaN no se prosigue
1729 1729 ###dataOut.flagNoData = False #Descomentar solo para ploteo sino mantener comentado (para guardado)
1730 1730
1731 1731 return dataOut
1732 1732
1733 1733 class Gaussian_Windowed(Operation):
1734 1734 '''
1735 1735 Written by R. Flores
1736 1736 '''
1737 1737 def __init__(self):
1738 1738 Operation.__init__(self)
1739 1739
1740 1740 def windowing_single(self,spc,x,A,B,C,D,nFFTPoints):
1741 1741 from scipy.optimize import curve_fit,fmin
1742 1742
1743 1743 def gaussian(x, a, b, c, d):
1744 1744 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
1745 1745 return val
1746 1746
1747 1747 def R_gaussian(x, a, b, c):
1748 1748 N = int(numpy.shape(x)[0])
1749 1749 val = a * numpy.exp(-((x)*c*2*2*numpy.pi)**2 / (2))* numpy.exp(1.j*b*x*4*numpy.pi)
1750 1750 return val
1751 1751
1752 1752 def T(x,N):
1753 1753 T = 1-abs(x)/N
1754 1754 return T
1755 1755
1756 1756 def R_T_spc_fun(x, a, b, c, d, nFFTPoints):
1757 1757
1758 1758 N = int(numpy.shape(x)[0])
1759 1759
1760 1760 x_max = x[-1]
1761 1761
1762 1762 x_pos = x[nFFTPoints:]
1763 1763 x_neg = x[:nFFTPoints]
1764 1764 #print([int(nFFTPoints/2))
1765 1765 #print("x: ", x)
1766 1766 #print("x_neg: ", x_neg)
1767 1767 #print("x_pos: ", x_pos)
1768 1768
1769 1769
1770 1770 R_T_neg_1 = R_gaussian(x, a, b, c)[:nFFTPoints]*T(x_neg,-x[0])
1771 1771 R_T_pos_1 = R_gaussian(x, a, b, c)[nFFTPoints:]*T(x_pos,x[-1])
1772 1772 #print(T(x_pos,x[-1]),x_pos,x[-1])
1773 1773 #print(R_T_neg_1.shape,R_T_pos_1.shape)
1774 1774 R_T_sum_1 = R_T_pos_1 + R_T_neg_1
1775 1775 R_T_spc_1 = numpy.fft.fft(R_T_sum_1).real
1776 1776 R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1)
1777 1777 max_val_1 = numpy.max(R_T_spc_1)
1778 1778 R_T_spc_1 = R_T_spc_1*a/max_val_1
1779 1779
1780 1780 R_T_d = d*numpy.fft.fftshift(signal.unit_impulse(N))
1781 1781 R_T_d_neg = R_T_d[:nFFTPoints]*T(x_neg,-x[0])
1782 1782 R_T_d_pos = R_T_d[nFFTPoints:]*T(x_pos,x[-1])
1783 1783 R_T_d_sum = R_T_d_pos + R_T_d_neg
1784 1784 R_T_spc_3 = numpy.fft.fft(R_T_d_sum).real
1785 1785 R_T_spc_3 = numpy.fft.fftshift(R_T_spc_3)
1786 1786
1787 1787 R_T_final = R_T_spc_1 + R_T_spc_3
1788 1788
1789 1789 return R_T_final
1790 1790
1791 1791 y = spc#gaussian(x, a, meanY, sigmaY) + a*0.1*numpy.random.normal(0, 1, size=len(x))
1792 1792
1793 1793 from scipy.stats import norm
1794 1794 mean,std=norm.fit(spc)
1795 1795
1796 1796 # estimate starting values from the data
1797 1797 a = A
1798 1798 b = B
1799 1799 c = C#numpy.std(spc)
1800 1800 d = D
1801 1801 #'''
1802 1802 #ippSeconds = 250*20*1.e-6/3
1803 1803
1804 1804 #x_t = ippSeconds * (numpy.arange(nFFTPoints) - nFFTPoints / 2.)
1805 1805
1806 1806 #x_t = numpy.linspace(x_t[0],x_t[-1],3200)
1807 1807 #print("x_t: ", x_t)
1808 1808 #print("nFFTPoints: ", nFFTPoints)
1809 1809 x_vel = numpy.linspace(x[0],x[-1],int(2*nFFTPoints))
1810 1810 #print("x_vel: ", x_vel)
1811 1811 #x_freq = numpy.fft.fftfreq(1600,d=ippSeconds)
1812 1812 #x_freq = numpy.fft.fftshift(x_freq)
1813 1813 #'''
1814 1814 # define a least squares function to optimize
1815 1815 def minfunc(params):
1816 1816 #print("y.shape: ", numpy.shape(y))
1817 1817 return sum((y-R_T_spc_fun(x_vel,params[0],params[1],params[2],params[3],nFFTPoints))**2/1)#y**2)
1818 1818
1819 1819 # fit
1820 1820
1821 1821 popt_full = fmin(minfunc,[a,b,c,d], disp=False)
1822 1822 #print("nIter", popt_full[2])
1823 1823 popt = popt_full#[0]
1824 1824
1825 1825 fun = gaussian(x, popt[0], popt[1], popt[2], popt[3])
1826 1826
1827 1827 #return R_T_spc_fun(x_t,popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
1828 1828 return fun, popt[0], popt[1], popt[2], popt[3]
1829 1829
1830 1830 def run(self, dataOut):
1831 1831
1832 1832 from scipy.signal import medfilt
1833 1833 import matplotlib.pyplot as plt
1834 1834 dataOut.moments = numpy.ones((dataOut.nChannels,4,dataOut.nHeights))*numpy.NAN
1835 1835 dataOut.VelRange = dataOut.getVelRange(0)
1836 1836 for nChannel in range(dataOut.nChannels):
1837 1837 for hei in range(dataOut.heightList.shape[0]):
1838 1838 #print("ipp: ", dataOut.ippSeconds)
1839 1839 spc = numpy.copy(dataOut.data_spc[nChannel,:,hei])
1840 1840
1841 1841 #print(VelRange)
1842 1842 #print(dataOut.getFreqRange(64))
1843 1843 spcm = medfilt(spc,11)
1844 1844 spc_max = numpy.max(spcm)
1845 1845 dop1_x0 = dataOut.VelRange[numpy.argmax(spcm)]
1846 1846 D = numpy.min(spcm)
1847 1847
1848 1848 fun, A, B, C, D = self.windowing_single(spc,dataOut.VelRange,spc_max,dop1_x0,abs(dop1_x0),D,dataOut.nFFTPoints)
1849 1849 dataOut.moments[nChannel,0,hei] = A
1850 1850 dataOut.moments[nChannel,1,hei] = B
1851 1851 dataOut.moments[nChannel,2,hei] = C
1852 1852 dataOut.moments[nChannel,3,hei] = D
1853 1853 '''
1854 1854 plt.figure()
1855 1855 plt.plot(VelRange,spc,marker='*',linestyle='')
1856 1856 plt.plot(VelRange,fun)
1857 1857 plt.title(dataOut.heightList[hei])
1858 1858 plt.show()
1859 1859 '''
1860 1860
1861 1861 return dataOut
1862 1862
1863 1863 class PrecipitationProc(Operation):
1864 1864
1865 1865 '''
1866 1866 Operator that estimates Reflectivity factor (Z), and estimates rainfall Rate (R)
1867 1867
1868 1868 Input:
1869 1869 self.dataOut.data_pre : SelfSpectra
1870 1870
1871 1871 Output:
1872 1872
1873 1873 self.dataOut.data_output : Reflectivity factor, rainfall Rate
1874 1874
1875 1875
1876 1876 Parameters affected:
1877 1877 '''
1878 1878
1879 1879 def __init__(self):
1880 1880 Operation.__init__(self)
1881 1881 self.i=0
1882 1882
1883 1883 def run(self, dataOut, radar=None, Pt=5000, Gt=295.1209, Gr=70.7945, Lambda=0.6741, aL=2.5118,
1884 1884 tauW=4e-06, ThetaT=0.1656317, ThetaR=0.36774087, Km2 = 0.93, Altitude=3350,SNRdBlimit=-30,
1885 1885 channel=None):
1886 1886
1887 1887 # print ('Entering PrecepitationProc ... ')
1888 1888
1889 1889 if radar == "MIRA35C" :
1890 1890
1891 1891 self.spc = dataOut.data_pre[0].copy()
1892 1892 self.Num_Hei = self.spc.shape[2]
1893 1893 self.Num_Bin = self.spc.shape[1]
1894 1894 self.Num_Chn = self.spc.shape[0]
1895 1895 Ze = self.dBZeMODE2(dataOut)
1896 1896
1897 1897 else:
1898 1898
1899 1899 self.spc = dataOut.data_pre[0].copy()
1900 1900
1901 1901 #NOTA SE DEBE REMOVER EL RANGO DEL PULSO TX
1902 1902 self.spc[:,:,0:7]= numpy.NaN
1903 1903
1904 1904 self.Num_Hei = self.spc.shape[2]
1905 1905 self.Num_Bin = self.spc.shape[1]
1906 1906 self.Num_Chn = self.spc.shape[0]
1907 1907
1908 1908 VelRange = dataOut.spc_range[2]
1909 1909
1910 1910 ''' Se obtiene la constante del RADAR '''
1911 1911
1912 1912 self.Pt = Pt
1913 1913 self.Gt = Gt
1914 1914 self.Gr = Gr
1915 1915 self.Lambda = Lambda
1916 1916 self.aL = aL
1917 1917 self.tauW = tauW
1918 1918 self.ThetaT = ThetaT
1919 1919 self.ThetaR = ThetaR
1920 1920 self.GSys = 10**(36.63/10) # Ganancia de los LNA 36.63 dB
1921 1921 self.lt = 10**(1.67/10) # Perdida en cables Tx 1.67 dB
1922 1922 self.lr = 10**(5.73/10) # Perdida en cables Rx 5.73 dB
1923 1923
1924 1924 Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
1925 1925 Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * tauW * numpy.pi * ThetaT * ThetaR)
1926 1926 RadarConstant = 10e-26 * Numerator / Denominator #
1927 1927 ExpConstant = 10**(40/10) #Constante Experimental
1928 1928
1929 1929 SignalPower = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
1930 1930 for i in range(self.Num_Chn):
1931 1931 SignalPower[i,:,:] = self.spc[i,:,:] - dataOut.noise[i]
1932 1932 SignalPower[numpy.where(SignalPower < 0)] = 1e-20
1933 1933
1934 1934 if channel is None:
1935 1935 SPCmean = numpy.mean(SignalPower, 0)
1936 1936 else:
1937 1937 SPCmean = SignalPower[channel]
1938 1938 Pr = SPCmean[:,:]/dataOut.normFactor
1939 1939
1940 1940 # Declaring auxiliary variables
1941 1941 Range = dataOut.heightList*1000. #Range in m
1942 1942 # replicate the heightlist to obtain a matrix [Num_Bin,Num_Hei]
1943 1943 rMtrx = numpy.transpose(numpy.transpose([dataOut.heightList*1000.] * self.Num_Bin))
1944 1944 zMtrx = rMtrx+Altitude
1945 1945 # replicate the VelRange to obtain a matrix [Num_Bin,Num_Hei]
1946 1946 VelMtrx = numpy.transpose(numpy.tile(VelRange[:-1], (self.Num_Hei,1)))
1947 1947
1948 1948 # height dependence to air density Foote and Du Toit (1969)
1949 1949 delv_z = 1 + 3.68e-5 * zMtrx + 1.71e-9 * zMtrx**2
1950 1950 VMtrx = VelMtrx / delv_z #Normalized velocity
1951 1951 VMtrx[numpy.where(VMtrx> 9.6)] = numpy.NaN
1952 1952 # Diameter is related to the fall speed of falling drops
1953 1953 D_Vz = -1.667 * numpy.log( 0.9369 - 0.097087 * VMtrx ) # D in [mm]
1954 1954 # Only valid for D>= 0.16 mm
1955 1955 D_Vz[numpy.where(D_Vz < 0.16)] = numpy.NaN
1956 1956
1957 1957 #Calculate Radar Reflectivity ETAn
1958 1958 ETAn = (RadarConstant *ExpConstant) * Pr * rMtrx**2 #Reflectivity (ETA)
1959 1959 ETAd = ETAn * 6.18 * exp( -0.6 * D_Vz ) * delv_z
1960 1960 # Radar Cross Section
1961 1961 sigmaD = Km2 * (D_Vz * 1e-3 )**6 * numpy.pi**5 / Lambda**4
1962 1962 # Drop Size Distribution
1963 1963 DSD = ETAn / sigmaD
1964 1964 # Equivalente Reflectivy
1965 1965 Ze_eqn = numpy.nansum( DSD * D_Vz**6 ,axis=0)
1966 1966 Ze_org = numpy.nansum(ETAn * Lambda**4, axis=0) / (1e-18*numpy.pi**5 * Km2) # [mm^6 /m^3]
1967 1967 # RainFall Rate
1968 1968 RR = 0.0006*numpy.pi * numpy.nansum( D_Vz**3 * DSD * VelMtrx ,0) #mm/hr
1969 1969
1970 1970 # Censoring the data
1971 1971 # Removing data with SNRth < 0dB se debe considerar el SNR por canal
1972 1972 SNRth = 10**(SNRdBlimit/10) #-30dB
1973 1973 novalid = numpy.where((dataOut.data_snr[0,:] <SNRth) | (dataOut.data_snr[1,:] <SNRth) | (dataOut.data_snr[2,:] <SNRth)) # AND condition. Maybe OR condition better
1974 1974 W = numpy.nanmean(dataOut.data_dop,0)
1975 1975 W[novalid] = numpy.NaN
1976 1976 Ze_org[novalid] = numpy.NaN
1977 1977 RR[novalid] = numpy.NaN
1978 1978
1979 1979 dataOut.data_output = RR[8]
1980 1980 dataOut.data_param = numpy.ones([3,self.Num_Hei])
1981 1981 dataOut.channelList = [0,1,2]
1982 1982
1983 1983 dataOut.data_param[0]=10*numpy.log10(Ze_org)
1984 1984 dataOut.data_param[1]=-W
1985 1985 dataOut.data_param[2]=RR
1986 1986
1987 1987 # print ('Leaving PrecepitationProc ... ')
1988 1988 return dataOut
1989 1989
1990 1990 def dBZeMODE2(self, dataOut): # Processing for MIRA35C
1991 1991
1992 1992 NPW = dataOut.NPW
1993 1993 COFA = dataOut.COFA
1994 1994
1995 1995 SNR = numpy.array([self.spc[0,:,:] / NPW[0]]) #, self.spc[1,:,:] / NPW[1]])
1996 1996 RadarConst = dataOut.RadarConst
1997 1997 #frequency = 34.85*10**9
1998 1998
1999 1999 ETA = numpy.zeros(([self.Num_Chn ,self.Num_Hei]))
2000 2000 data_output = numpy.ones([self.Num_Chn , self.Num_Hei])*numpy.NaN
2001 2001
2002 2002 ETA = numpy.sum(SNR,1)
2003 2003
2004 2004 ETA = numpy.where(ETA != 0. , ETA, numpy.NaN)
2005 2005
2006 2006 Ze = numpy.ones([self.Num_Chn, self.Num_Hei] )
2007 2007
2008 2008 for r in range(self.Num_Hei):
2009 2009
2010 2010 Ze[0,r] = ( ETA[0,r] ) * COFA[0,r][0] * RadarConst * ((r/5000.)**2)
2011 2011 #Ze[1,r] = ( ETA[1,r] ) * COFA[1,r][0] * RadarConst * ((r/5000.)**2)
2012 2012
2013 2013 return Ze
2014 2014
2015 2015 # def GetRadarConstant(self):
2016 2016 #
2017 2017 # """
2018 2018 # Constants:
2019 2019 #
2020 2020 # Pt: Transmission Power dB 5kW 5000
2021 2021 # Gt: Transmission Gain dB 24.7 dB 295.1209
2022 2022 # Gr: Reception Gain dB 18.5 dB 70.7945
2023 2023 # Lambda: Wavelenght m 0.6741 m 0.6741
2024 2024 # aL: Attenuation loses dB 4dB 2.5118
2025 2025 # tauW: Width of transmission pulse s 4us 4e-6
2026 2026 # ThetaT: Transmission antenna bean angle rad 0.1656317 rad 0.1656317
2027 2027 # ThetaR: Reception antenna beam angle rad 0.36774087 rad 0.36774087
2028 2028 #
2029 2029 # """
2030 2030 #
2031 2031 # Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
2032 2032 # Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * TauW * numpy.pi * ThetaT * TheraR)
2033 2033 # RadarConstant = Numerator / Denominator
2034 2034 #
2035 2035 # return RadarConstant
2036 2036
2037 2037
2038 2038 class FullSpectralAnalysis(Operation):
2039 2039
2040 2040 """
2041 2041 Function that implements Full Spectral Analysis technique.
2042 2042
2043 2043 Input:
2044 2044 self.dataOut.data_pre : SelfSpectra and CrossSpectra data
2045 2045 self.dataOut.groupList : Pairlist of channels
2046 2046 self.dataOut.ChanDist : Physical distance between receivers
2047 2047
2048 2048
2049 2049 Output:
2050 2050
2051 2051 self.dataOut.data_output : Zonal wind, Meridional wind, and Vertical wind
2052 2052
2053 2053
2054 2054 Parameters affected: Winds, height range, SNR
2055 2055
2056 2056 """
2057 2057 def run(self, dataOut, Xi01=None, Xi02=None, Xi12=None, Eta01=None, Eta02=None, Eta12=None, SNRdBlimit=-30,
2058 2058 minheight=None, maxheight=None, NegativeLimit=None, PositiveLimit=None):
2059 2059
2060 2060 spc = dataOut.data_pre[0].copy()
2061 2061 cspc = dataOut.data_pre[1]
2062 2062 nHeights = spc.shape[2]
2063 2063
2064 2064 # first_height = 0.75 #km (ref: data header 20170822)
2065 2065 # resolution_height = 0.075 #km
2066 2066 '''
2067 2067 finding height range. check this when radar parameters are changed!
2068 2068 '''
2069 2069 if maxheight is not None:
2070 2070 # range_max = math.ceil((maxheight - first_height) / resolution_height) # theoretical
2071 2071 range_max = math.ceil(13.26 * maxheight - 3) # empirical, works better
2072 2072 else:
2073 2073 range_max = nHeights
2074 2074 if minheight is not None:
2075 2075 # range_min = int((minheight - first_height) / resolution_height) # theoretical
2076 2076 range_min = int(13.26 * minheight - 5) # empirical, works better
2077 2077 if range_min < 0:
2078 2078 range_min = 0
2079 2079 else:
2080 2080 range_min = 0
2081 2081
2082 2082 pairsList = dataOut.groupList
2083 2083 if dataOut.ChanDist is not None :
2084 2084 ChanDist = dataOut.ChanDist
2085 2085 else:
2086 2086 ChanDist = numpy.array([[Xi01, Eta01],[Xi02,Eta02],[Xi12,Eta12]])
2087 2087
2088 2088 # 4 variables: zonal, meridional, vertical, and average SNR
2089 2089 data_param = numpy.zeros([4,nHeights]) * numpy.NaN
2090 2090 velocityX = numpy.zeros([nHeights]) * numpy.NaN
2091 2091 velocityY = numpy.zeros([nHeights]) * numpy.NaN
2092 2092 velocityZ = numpy.zeros([nHeights]) * numpy.NaN
2093 2093
2094 2094 dbSNR = 10*numpy.log10(numpy.average(dataOut.data_snr,0))
2095 2095
2096 2096 '''***********************************************WIND ESTIMATION**************************************'''
2097 2097 for Height in range(nHeights):
2098 2098
2099 2099 if Height >= range_min and Height < range_max:
2100 2100 # error_code will be useful in future analysis
2101 2101 [Vzon,Vmer,Vver, error_code] = self.WindEstimation(spc[:,:,Height], cspc[:,:,Height], pairsList,
2102 2102 ChanDist, Height, dataOut.noise, dataOut.spc_range, dbSNR[Height], SNRdBlimit, NegativeLimit, PositiveLimit,dataOut.frequency)
2103 2103
2104 2104 if abs(Vzon) < 100. and abs(Vmer) < 100.:
2105 2105 velocityX[Height] = Vzon
2106 2106 velocityY[Height] = -Vmer
2107 2107 velocityZ[Height] = Vver
2108 2108
2109 2109 # Censoring data with SNR threshold
2110 2110 dbSNR [dbSNR < SNRdBlimit] = numpy.NaN
2111 2111
2112 2112 data_param[0] = velocityX
2113 2113 data_param[1] = velocityY
2114 2114 data_param[2] = velocityZ
2115 2115 data_param[3] = dbSNR
2116 2116 dataOut.data_param = data_param
2117 2117 return dataOut
2118 2118
2119 2119 def moving_average(self,x, N=2):
2120 2120 """ convolution for smoothenig data. note that last N-1 values are convolution with zeroes """
2121 2121 return numpy.convolve(x, numpy.ones((N,))/N)[(N-1):]
2122 2122
2123 2123 def gaus(self,xSamples,Amp,Mu,Sigma):
2124 2124 return Amp * numpy.exp(-0.5*((xSamples - Mu)/Sigma)**2)
2125 2125
2126 2126 def Moments(self, ySamples, xSamples):
2127 2127 Power = numpy.nanmean(ySamples) # Power, 0th Moment
2128 2128 yNorm = ySamples / numpy.nansum(ySamples)
2129 2129 RadVel = numpy.nansum(xSamples * yNorm) # Radial Velocity, 1st Moment
2130 2130 Sigma2 = numpy.nansum(yNorm * (xSamples - RadVel)**2) # Spectral Width, 2nd Moment
2131 2131 StdDev = numpy.sqrt(numpy.abs(Sigma2)) # Desv. Estandar, Ancho espectral
2132 2132 return numpy.array([Power,RadVel,StdDev])
2133 2133
2134 2134 def StopWindEstimation(self, error_code):
2135 2135 Vzon = numpy.NaN
2136 2136 Vmer = numpy.NaN
2137 2137 Vver = numpy.NaN
2138 2138 return Vzon, Vmer, Vver, error_code
2139 2139
2140 2140 def AntiAliasing(self, interval, maxstep):
2141 2141 """
2142 2142 function to prevent errors from aliased values when computing phaseslope
2143 2143 """
2144 2144 antialiased = numpy.zeros(len(interval))
2145 2145 copyinterval = interval.copy()
2146 2146
2147 2147 antialiased[0] = copyinterval[0]
2148 2148
2149 2149 for i in range(1,len(antialiased)):
2150 2150 step = interval[i] - interval[i-1]
2151 2151 if step > maxstep:
2152 2152 copyinterval -= 2*numpy.pi
2153 2153 antialiased[i] = copyinterval[i]
2154 2154 elif step < maxstep*(-1):
2155 2155 copyinterval += 2*numpy.pi
2156 2156 antialiased[i] = copyinterval[i]
2157 2157 else:
2158 2158 antialiased[i] = copyinterval[i].copy()
2159 2159
2160 2160 return antialiased
2161 2161
2162 2162 def WindEstimation(self, spc, cspc, pairsList, ChanDist, Height, noise, AbbsisaRange, dbSNR, SNRlimit, NegativeLimit, PositiveLimit, radfreq):
2163 2163 """
2164 2164 Function that Calculates Zonal, Meridional and Vertical wind velocities.
2165 2165 Initial Version by E. Bocanegra updated by J. Zibell until Nov. 2019.
2166 2166
2167 2167 Input:
2168 2168 spc, cspc : self spectra and cross spectra data. In Briggs notation something like S_i*(S_i)_conj, (S_j)_conj respectively.
2169 2169 pairsList : Pairlist of channels
2170 2170 ChanDist : array of xi_ij and eta_ij
2171 2171 Height : height at which data is processed
2172 2172 noise : noise in [channels] format for specific height
2173 2173 Abbsisarange : range of the frequencies or velocities
2174 2174 dbSNR, SNRlimit : signal to noise ratio in db, lower limit
2175 2175
2176 2176 Output:
2177 2177 Vzon, Vmer, Vver : wind velocities
2178 2178 error_code : int that states where code is terminated
2179 2179
2180 2180 0 : no error detected
2181 2181 1 : Gaussian of mean spc exceeds widthlimit
2182 2182 2 : no Gaussian of mean spc found
2183 2183 3 : SNR to low or velocity to high -> prec. e.g.
2184 2184 4 : at least one Gaussian of cspc exceeds widthlimit
2185 2185 5 : zero out of three cspc Gaussian fits converged
2186 2186 6 : phase slope fit could not be found
2187 2187 7 : arrays used to fit phase have different length
2188 2188 8 : frequency range is either too short (len <= 5) or very long (> 30% of cspc)
2189 2189
2190 2190 """
2191 2191
2192 2192 error_code = 0
2193 2193
2194 2194 nChan = spc.shape[0]
2195 2195 nProf = spc.shape[1]
2196 2196 nPair = cspc.shape[0]
2197 2197
2198 2198 SPC_Samples = numpy.zeros([nChan, nProf]) # for normalized spc values for one height
2199 2199 CSPC_Samples = numpy.zeros([nPair, nProf], dtype=numpy.complex_) # for normalized cspc values
2200 2200 phase = numpy.zeros([nPair, nProf]) # phase between channels
2201 2201 PhaseSlope = numpy.zeros(nPair) # slope of the phases, channelwise
2202 2202 PhaseInter = numpy.zeros(nPair) # intercept to the slope of the phases, channelwise
2203 2203 xFrec = AbbsisaRange[0][:-1] # frequency range
2204 2204 xVel = AbbsisaRange[2][:-1] # velocity range
2205 2205 xSamples = xFrec # the frequency range is taken
2206 2206 delta_x = xSamples[1] - xSamples[0] # delta_f or delta_x
2207 2207
2208 2208 # only consider velocities with in NegativeLimit and PositiveLimit
2209 2209 if (NegativeLimit is None):
2210 2210 NegativeLimit = numpy.min(xVel)
2211 2211 if (PositiveLimit is None):
2212 2212 PositiveLimit = numpy.max(xVel)
2213 2213 xvalid = numpy.where((xVel > NegativeLimit) & (xVel < PositiveLimit))
2214 2214 xSamples_zoom = xSamples[xvalid]
2215 2215
2216 2216 '''Getting Eij and Nij'''
2217 2217 Xi01, Xi02, Xi12 = ChanDist[:,0]
2218 2218 Eta01, Eta02, Eta12 = ChanDist[:,1]
2219 2219
2220 2220 # spwd limit - updated by D. ScipiΓ³n 30.03.2021
2221 2221 widthlimit = 10
2222 2222 '''************************* SPC is normalized ********************************'''
2223 2223 spc_norm = spc.copy()
2224 2224 # For each channel
2225 2225 for i in range(nChan):
2226 2226 spc_sub = spc_norm[i,:] - noise[i] # only the signal power
2227 2227 SPC_Samples[i] = spc_sub / (numpy.nansum(spc_sub) * delta_x)
2228 2228
2229 2229 '''********************** FITTING MEAN SPC GAUSSIAN **********************'''
2230 2230
2231 2231 """ the gaussian of the mean: first subtract noise, then normalize. this is legal because
2232 2232 you only fit the curve and don't need the absolute value of height for calculation,
2233 2233 only for estimation of width. for normalization of cross spectra, you need initial,
2234 2234 unnormalized self-spectra With noise.
2235 2235
2236 2236 Technically, you don't even need to normalize the self-spectra, as you only need the
2237 2237 width of the peak. However, it was left this way. Note that the normalization has a flaw:
2238 2238 due to subtraction of the noise, some values are below zero. Raw "spc" values should be
2239 2239 >= 0, as it is the modulus squared of the signals (complex * it's conjugate)
2240 2240 """
2241 2241 # initial conditions
2242 2242 popt = [1e-10,0,1e-10]
2243 2243 # Spectra average
2244 2244 SPCMean = numpy.average(SPC_Samples,0)
2245 2245 # Moments in frequency
2246 2246 SPCMoments = self.Moments(SPCMean[xvalid], xSamples_zoom)
2247 2247
2248 2248 # Gauss Fit SPC in frequency domain
2249 2249 if dbSNR > SNRlimit: # only if SNR > SNRth
2250 2250 try:
2251 2251 popt,pcov = curve_fit(self.gaus,xSamples_zoom,SPCMean[xvalid],p0=SPCMoments)
2252 2252 if popt[2] <= 0 or popt[2] > widthlimit: # CONDITION
2253 2253 return self.StopWindEstimation(error_code = 1)
2254 2254 FitGauss = self.gaus(xSamples_zoom,*popt)
2255 2255 except :#RuntimeError:
2256 2256 return self.StopWindEstimation(error_code = 2)
2257 2257 else:
2258 2258 return self.StopWindEstimation(error_code = 3)
2259 2259
2260 2260 '''***************************** CSPC Normalization *************************
2261 2261 The Spc spectra are used to normalize the crossspectra. Peaks from precipitation
2262 2262 influence the norm which is not desired. First, a range is identified where the
2263 2263 wind peak is estimated -> sum_wind is sum of those frequencies. Next, the area
2264 2264 around it gets cut off and values replaced by mean determined by the boundary
2265 2265 data -> sum_noise (spc is not normalized here, thats why the noise is important)
2266 2266
2267 2267 The sums are then added and multiplied by range/datapoints, because you need
2268 2268 an integral and not a sum for normalization.
2269 2269
2270 2270 A norm is found according to Briggs 92.
2271 2271 '''
2272 2272 # for each pair
2273 2273 for i in range(nPair):
2274 2274 cspc_norm = cspc[i,:].copy()
2275 2275 chan_index0 = pairsList[i][0]
2276 2276 chan_index1 = pairsList[i][1]
2277 2277 CSPC_Samples[i] = cspc_norm / (numpy.sqrt(numpy.nansum(spc_norm[chan_index0])*numpy.nansum(spc_norm[chan_index1])) * delta_x)
2278 2278 phase[i] = numpy.arctan2(CSPC_Samples[i].imag, CSPC_Samples[i].real)
2279 2279
2280 2280 CSPCmoments = numpy.vstack([self.Moments(numpy.abs(CSPC_Samples[0,xvalid]), xSamples_zoom),
2281 2281 self.Moments(numpy.abs(CSPC_Samples[1,xvalid]), xSamples_zoom),
2282 2282 self.Moments(numpy.abs(CSPC_Samples[2,xvalid]), xSamples_zoom)])
2283 2283
2284 2284 popt01, popt02, popt12 = [1e-10,0,1e-10], [1e-10,0,1e-10] ,[1e-10,0,1e-10]
2285 2285 FitGauss01, FitGauss02, FitGauss12 = numpy.zeros(len(xSamples)), numpy.zeros(len(xSamples)), numpy.zeros(len(xSamples))
2286 2286
2287 2287 '''*******************************FIT GAUSS CSPC************************************'''
2288 2288 try:
2289 2289 popt01,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[0][xvalid]),p0=CSPCmoments[0])
2290 2290 if popt01[2] > widthlimit: # CONDITION
2291 2291 return self.StopWindEstimation(error_code = 4)
2292 2292 popt02,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[1][xvalid]),p0=CSPCmoments[1])
2293 2293 if popt02[2] > widthlimit: # CONDITION
2294 2294 return self.StopWindEstimation(error_code = 4)
2295 2295 popt12,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[2][xvalid]),p0=CSPCmoments[2])
2296 2296 if popt12[2] > widthlimit: # CONDITION
2297 2297 return self.StopWindEstimation(error_code = 4)
2298 2298
2299 2299 FitGauss01 = self.gaus(xSamples_zoom, *popt01)
2300 2300 FitGauss02 = self.gaus(xSamples_zoom, *popt02)
2301 2301 FitGauss12 = self.gaus(xSamples_zoom, *popt12)
2302 2302 except:
2303 2303 return self.StopWindEstimation(error_code = 5)
2304 2304
2305 2305
2306 2306 '''************* Getting Fij ***************'''
2307 2307 # x-axis point of the gaussian where the center is located from GaussFit of spectra
2308 2308 GaussCenter = popt[1]
2309 2309 ClosestCenter = xSamples_zoom[numpy.abs(xSamples_zoom-GaussCenter).argmin()]
2310 2310 PointGauCenter = numpy.where(xSamples_zoom==ClosestCenter)[0][0]
2311 2311
2312 2312 # Point where e^-1 is located in the gaussian
2313 2313 PeMinus1 = numpy.max(FitGauss) * numpy.exp(-1)
2314 2314 FijClosest = FitGauss[numpy.abs(FitGauss-PeMinus1).argmin()] # The closest point to"Peminus1" in "FitGauss"
2315 2315 PointFij = numpy.where(FitGauss==FijClosest)[0][0]
2316 2316 Fij = numpy.abs(xSamples_zoom[PointFij] - xSamples_zoom[PointGauCenter])
2317 2317
2318 2318 '''********** Taking frequency ranges from mean SPCs **********'''
2319 2319 GauWidth = popt[2] * 3/2 # Bandwidth of Gau01
2320 2320 Range = numpy.empty(2)
2321 2321 Range[0] = GaussCenter - GauWidth
2322 2322 Range[1] = GaussCenter + GauWidth
2323 2323 # Point in x-axis where the bandwidth is located (min:max)
2324 2324 ClosRangeMin = xSamples_zoom[numpy.abs(xSamples_zoom-Range[0]).argmin()]
2325 2325 ClosRangeMax = xSamples_zoom[numpy.abs(xSamples_zoom-Range[1]).argmin()]
2326 2326 PointRangeMin = numpy.where(xSamples_zoom==ClosRangeMin)[0][0]
2327 2327 PointRangeMax = numpy.where(xSamples_zoom==ClosRangeMax)[0][0]
2328 2328 Range = numpy.array([ PointRangeMin, PointRangeMax ])
2329 2329 FrecRange = xSamples_zoom[ Range[0] : Range[1] ]
2330 2330
2331 2331 '''************************** Getting Phase Slope ***************************'''
2332 2332 for i in range(nPair):
2333 2333 if len(FrecRange) > 5:
2334 2334 PhaseRange = phase[i, xvalid[0][Range[0]:Range[1]]].copy()
2335 2335 mask = ~numpy.isnan(FrecRange) & ~numpy.isnan(PhaseRange)
2336 2336 if len(FrecRange) == len(PhaseRange):
2337 2337 try:
2338 2338 slope, intercept, _, _, _ = stats.linregress(FrecRange[mask], self.AntiAliasing(PhaseRange[mask], 4.5))
2339 2339 PhaseSlope[i] = slope
2340 2340 PhaseInter[i] = intercept
2341 2341 except:
2342 2342 return self.StopWindEstimation(error_code = 6)
2343 2343 else:
2344 2344 return self.StopWindEstimation(error_code = 7)
2345 2345 else:
2346 2346 return self.StopWindEstimation(error_code = 8)
2347 2347
2348 2348 '''*** Constants A-H correspond to the convention as in Briggs and Vincent 1992 ***'''
2349 2349
2350 2350 '''Getting constant C'''
2351 2351 cC=(Fij*numpy.pi)**2
2352 2352
2353 2353 '''****** Getting constants F and G ******'''
2354 2354 MijEijNij = numpy.array([[Xi02,Eta02], [Xi12,Eta12]])
2355 2355 # MijEijNij = numpy.array([[Xi01,Eta01], [Xi02,Eta02], [Xi12,Eta12]])
2356 2356 # MijResult0 = (-PhaseSlope[0] * cC) / (2*numpy.pi)
2357 2357 MijResult1 = (-PhaseSlope[1] * cC) / (2*numpy.pi)
2358 2358 MijResult2 = (-PhaseSlope[2] * cC) / (2*numpy.pi)
2359 2359 # MijResults = numpy.array([MijResult0, MijResult1, MijResult2])
2360 2360 MijResults = numpy.array([MijResult1, MijResult2])
2361 2361 (cF,cG) = numpy.linalg.solve(MijEijNij, MijResults)
2362 2362
2363 2363 '''****** Getting constants A, B and H ******'''
2364 2364 W01 = numpy.nanmax( FitGauss01 )
2365 2365 W02 = numpy.nanmax( FitGauss02 )
2366 2366 W12 = numpy.nanmax( FitGauss12 )
2367 2367
2368 2368 WijResult01 = ((cF * Xi01 + cG * Eta01)**2)/cC - numpy.log(W01 / numpy.sqrt(numpy.pi / cC))
2369 2369 WijResult02 = ((cF * Xi02 + cG * Eta02)**2)/cC - numpy.log(W02 / numpy.sqrt(numpy.pi / cC))
2370 2370 WijResult12 = ((cF * Xi12 + cG * Eta12)**2)/cC - numpy.log(W12 / numpy.sqrt(numpy.pi / cC))
2371 2371 WijResults = numpy.array([WijResult01, WijResult02, WijResult12])
2372 2372
2373 2373 WijEijNij = numpy.array([ [Xi01**2, Eta01**2, 2*Xi01*Eta01] , [Xi02**2, Eta02**2, 2*Xi02*Eta02] , [Xi12**2, Eta12**2, 2*Xi12*Eta12] ])
2374 2374 (cA,cB,cH) = numpy.linalg.solve(WijEijNij, WijResults)
2375 2375
2376 2376 VxVy = numpy.array([[cA,cH],[cH,cB]])
2377 2377 VxVyResults = numpy.array([-cF,-cG])
2378 2378 (Vmer,Vzon) = numpy.linalg.solve(VxVy, VxVyResults)
2379 2379 Vver = -SPCMoments[1]*SPEED_OF_LIGHT/(2*radfreq)
2380 2380 error_code = 0
2381 2381
2382 2382 return Vzon, Vmer, Vver, error_code
2383 2383
2384 2384 class SpectralMoments(Operation):
2385 2385
2386 2386 '''
2387 2387 Function SpectralMoments()
2388 2388
2389 2389 Calculates moments (power, mean, standard deviation) and SNR of the signal
2390 2390
2391 2391 Type of dataIn: Spectra
2392 2392
2393 2393 Configuration Parameters:
2394 2394
2395 2395 dirCosx : Cosine director in X axis
2396 2396 dirCosy : Cosine director in Y axis
2397 2397
2398 2398 elevation :
2399 2399 azimuth :
2400 2400
2401 2401 Input:
2402 2402 channelList : simple channel list to select e.g. [2,3,7]
2403 2403 self.dataOut.data_pre : Spectral data
2404 2404 self.dataOut.abscissaList : List of frequencies
2405 2405 self.dataOut.noise : Noise level per channel
2406 2406
2407 2407 Affected:
2408 2408 self.dataOut.moments : Parameters per channel
2409 2409 self.dataOut.data_snr : SNR per channel
2410 2410
2411 2411 '''
2412 2412
2413 2413 def run(self, dataOut, proc_type=0):
2414 2414
2415 2415 absc = dataOut.abscissaList[:-1]
2416 2416 nChannel = dataOut.data_pre[0].shape[0]
2417 2417 nHei = dataOut.data_pre[0].shape[2]
2418 2418 data_param = numpy.zeros((nChannel, 4 + proc_type*3, nHei))
2419 2419
2420 2420 if proc_type == 1:
2421 2421 fwindow = numpy.zeros(absc.size) + 1
2422 2422 b=64
2423 2423 #b=16
2424 2424 fwindow[0:absc.size//2 - b] = 0
2425 2425 fwindow[absc.size//2 + b:] = 0
2426 2426 type1 = 1 # moments calculation & gaussean fitting
2427 2427 nProfiles = dataOut.nProfiles
2428 2428 nCohInt = dataOut.nCohInt
2429 2429 nIncohInt = dataOut.nIncohInt
2430 2430 M = numpy.power(numpy.array(1/(nProfiles * nCohInt) ,dtype='float32'),2)
2431 2431 N = numpy.array(M / nIncohInt,dtype='float32')
2432 2432 data = dataOut.data_pre[0] * N
2433 2433 #noise = dataOut.noise * N
2434 2434 noise = numpy.zeros(nChannel)
2435 2435 for ind in range(nChannel):
2436 2436 noise[ind] = self.__NoiseByChannel(nProfiles, nIncohInt, data[ind,:,:])
2437 2437 smooth=3
2438 2438 else:
2439 2439 data = dataOut.data_pre[0]
2440 2440 noise = dataOut.noise
2441 2441 fwindow = None
2442 2442 type1 = 0
2443 2443 nIncohInt = None
2444 2444 smooth=None
2445 2445
2446 2446 for ind in range(nChannel):
2447 2447 data_param[ind,:,:] = self.__calculateMoments( data[ind,:,:] , absc , noise[ind], nicoh=nIncohInt, smooth=smooth, type1=type1, fwindow=fwindow, id_ch=ind)
2448 2448
2449 2449 if proc_type == 1:
2450 2450 dataOut.moments = data_param[:,1:,:]
2451 2451 dataOut.data_dop = data_param[:,2]
2452 2452 dataOut.data_width = data_param[:,1]
2453 2453 dataOut.data_snr = data_param[:,0]
2454 2454 dataOut.data_pow = data_param[:,6] # to compare with type0 proccessing
2455 2455 dataOut.spcpar=numpy.stack((dataOut.data_dop,dataOut.data_width,dataOut.data_snr, data_param[:,3], data_param[:,4],data_param[:,5]),axis=2)
2456 2456 else:
2457 2457 dataOut.moments = data_param[:,1:,:]
2458 2458 dataOut.data_snr = data_param[:,0]
2459 2459 dataOut.data_pow = data_param[:,1]
2460 2460 dataOut.data_dop = data_param[:,2]
2461 2461 dataOut.data_width = data_param[:,3]
2462 2462 dataOut.spcpar=numpy.stack((dataOut.data_dop,dataOut.data_width,dataOut.data_snr, dataOut.data_pow),axis=2)
2463 2463
2464 2464 return dataOut
2465 2465
2466 2466 def __calculateMoments(self, oldspec, oldfreq, n0,
2467 2467 nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None,id_ch=0):
2468 2468
2469 2469 def __GAUSSWINFIT1(A, flagPDER=0):
2470 2470 nonlocal truex, xvalid
2471 2471 nparams = 4
2472 2472 M=truex.size
2473 2473 mm=numpy.arange(M,dtype='f4')
2474 2474 delta = numpy.zeros(M,dtype='f4')
2475 2475 delta[0] = 1.0
2476 2476 Ts = numpy.array([1.0/(2*truex[0])],dtype='f4')[0]
2477 2477 jj = -1j
2478 2478 #if self.winauto is None: self.winauto = (1.0 - mm/M)
2479 2479 winauto = (1.0 - mm/M)
2480 2480 winauto = winauto/winauto.max() # Normalized to 1
2481 2481 #ON_ERROR,2 # IDL sentence: Return to caller if an error occurs
2482 2482 A[0] = numpy.abs(A[0])
2483 2483 A[2] = numpy.abs(A[2])
2484 2484 A[3] = numpy.abs(A[3])
2485 2485 pi=numpy.array([numpy.pi],dtype='f4')[0]
2486 2486 if A[2] != 0:
2487 2487 Z = numpy.exp(-2*numpy.power((pi*A[2]*mm*Ts),2,dtype='f4')+jj*2*pi*A[1]*mm*Ts, dtype='c8') # Get Z
2488 2488 else:
2489 2489 Z = mm*0.0
2490 2490 A[0] = 0.0
2491 2491 junkF = numpy.roll(2*fft(winauto*(A[0]*Z+A[3]*delta)).real - \
2492 2492 winauto[0]*(A[0]+A[3]), M//2) # *M scale for fft not needed in python
2493 2493 F = junkF[xvalid]
2494 2494 if flagPDER == 0: #NEED PARTIAL?
2495 2495 return F
2496 2496 PDER = numpy.zeros((M,nparams)) #YES, MAKE ARRAY.
2497 2497 PDER[:,0] = numpy.shift(2*(fft(winauto*Z)*M) - winauto[0], M/2)
2498 2498 PDER[:,1] = numpy.shift(2*(fft(winauto*jj*2*numpy.pi*mm*Ts*A[0]*Z)*M), M/2)
2499 2499 PDER[:,2] = numpy.shift(2*(fft(winauto*(-4*numpy.power(numpy.pi*mm*Ts,2)*A[2]*A[0]*Z))*M), M/2)
2500 2500 PDER[:,3] = numpy.shift(2*(fft(winauto*delta)*M) - winauto[0], M/2)
2501 2501 PDER = PDER[xvalid,:]
2502 2502 return F, PDER
2503 2503
2504 2504 def __curvefit_koki(y, a, Weights, FlagNoDerivative=1,
2505 2505 itmax=20, tol=None):
2506 2506 #ON_ERROR,2 IDL SENTENCE: RETURN TO THE CALLER IF ERROR
2507 2507 if tol == None:
2508 2508 tol = numpy.array([1.e-3],dtype='f4')[0]
2509 2509 typ=a.dtype
2510 2510 double = 1 if typ == numpy.float64 else 0
2511 2511 if typ != numpy.float32:
2512 2512 a=a.astype(numpy.float32) #Make params floating
2513 2513 # if we will be estimating partial derivates then compute machine precision
2514 2514 if FlagNoDerivative == 1:
2515 2515 res=numpy.MachAr(float_conv=numpy.float32)
2516 2516 eps=numpy.sqrt(res.eps)
2517 2517
2518 2518 nterms = a.size # Number of parameters
2519 2519 nfree=numpy.array([numpy.size(y) - nterms],dtype='f4')[0] # Degrees of freedom
2520 2520 if nfree <= 0: print('Curvefit - not enough data points.')
2521 2521 flambda= numpy.array([0.001],dtype='f4')[0] # Initial lambda
2522 2522 #diag=numpy.arange(nterms)*(nterms+1) # Subscripta of diagonal elements
2523 2523 # Use diag method in python
2524 2524 converge=1
2525 2525
2526 2526 #Define the partial derivative array
2527 2527 PDER = numpy.zeros((nterms,numpy.size(y)),dtype='f8') if double == 1 else numpy.zeros((nterms,numpy.size(y)),dtype='f4')
2528 2528
2529 2529 for Niter in range(itmax): #Iteration loop
2530 2530
2531 2531 if FlagNoDerivative == 1:
2532 2532 #Evaluate function and estimate partial derivatives
2533 2533 yfit = __GAUSSWINFIT1(a)
2534 2534 for term in range(nterms):
2535 2535 p=a.copy() # Copy current parameters
2536 2536 #Increment size for forward difference derivative
2537 2537 inc = eps * abs(p[term])
2538 2538 if inc == 0: inc = eps
2539 2539 p[term] = p[term] + inc
2540 2540 yfit1 = __GAUSSWINFIT1(p)
2541 2541 PDER[term,:] = (yfit1-yfit)/inc
2542 2542 else:
2543 2543 #The user's procedure will return partial derivatives
2544 2544 yfit,PDER=__GAUSSWINFIT1(a, flagPDER=1)
2545 2545
2546 2546 beta = numpy.dot(PDER,(y-yfit)*Weights)
2547 2547 alpha = numpy.dot(PDER * numpy.tile(Weights,(nterms,1)), numpy.transpose(PDER))
2548 2548 # save current values of return parameters
2549 2549 sigma1 = numpy.sqrt( 1.0 / numpy.diag(alpha) ) # Current sigma.
2550 2550 sigma = sigma1
2551 2551
2552 2552 chisq1 = numpy.sum(Weights*numpy.power(y-yfit,2,dtype='f4'),dtype='f4')/nfree # Current chi squared.
2553 2553 chisq = chisq1
2554 2554 yfit1 = yfit
2555 2555 elev7=numpy.array([1.0e7],dtype='f4')[0]
2556 2556 compara =numpy.sum(abs(y))/elev7/nfree
2557 2557 done_early = chisq1 < compara
2558 2558
2559 2559 if done_early:
2560 2560 chi2 = chisq # Return chi-squared (chi2 obsolete-still works)
2561 2561 if done_early: Niter -= 1
2562 2562 #save_tp(chisq,Niter,yfit)
2563 2563 return yfit, a, converge, sigma, chisq # return result
2564 2564 #c = numpy.dot(c, c) # this operator implemented at the next lines
2565 2565 c_tmp = numpy.sqrt(numpy.diag(alpha))
2566 2566 siz=len(c_tmp)
2567 2567 c=numpy.dot(c_tmp.reshape(siz,1),c_tmp.reshape(1,siz))
2568 2568 lambdaCount = 0
2569 2569 while True:
2570 2570 lambdaCount += 1
2571 2571 # Normalize alpha to have unit diagonal.
2572 2572 array = alpha / c
2573 2573 # Augment the diagonal.
2574 2574 one=numpy.array([1.],dtype='f4')[0]
2575 2575 numpy.fill_diagonal(array,numpy.diag(array)*(one+flambda))
2576 2576 # Invert modified curvature matrix to find new parameters.
2577 2577 try:
2578 2578 array = (1.0/array) if array.size == 1 else numpy.linalg.inv(array)
2579 2579 except Exception as e:
2580 2580 print(e)
2581 2581 array[:]=numpy.NaN
2582 2582
2583 2583 b = a + numpy.dot(numpy.transpose(beta),array/c) # New params
2584 2584 yfit = __GAUSSWINFIT1(b) # Evaluate function
2585 2585 chisq = numpy.sum(Weights*numpy.power(y-yfit,2,dtype='f4'),dtype='f4')/nfree # New chisq
2586 2586 sigma = numpy.sqrt(numpy.diag(array)/numpy.diag(alpha)) # New sigma
2587 2587 if (numpy.isfinite(chisq) == 0) or \
2588 2588 (lambdaCount > 30 and chisq >= chisq1):
2589 2589 # Reject changes made this iteration, use old values.
2590 2590 yfit = yfit1
2591 2591 sigma = sigma1
2592 2592 chisq = chisq1
2593 2593 converge = 0
2594 2594 #print('Failed to converge.')
2595 2595 chi2 = chisq # Return chi-squared (chi2 obsolete-still works)
2596 2596 if done_early: Niter -= 1
2597 2597 #save_tp(chisq,Niter,yfit)
2598 2598 return yfit, a, converge, sigma, chisq, chi2 # return result
2599 2599 ten=numpy.array([10.0],dtype='f4')[0]
2600 2600 flambda *= ten # Assume fit got worse
2601 2601 if chisq <= chisq1:
2602 2602 break
2603 2603 hundred=numpy.array([100.0],dtype='f4')[0]
2604 2604 flambda /= hundred
2605 2605
2606 2606 a=b # Save new parameter estimate.
2607 2607 if ((chisq1-chisq)/chisq1) <= tol: # Finished?
2608 2608 chi2 = chisq # Return chi-squared (chi2 obsolete-still works)
2609 2609 if done_early: Niter -= 1
2610 2610 #save_tp(chisq,Niter,yfit)
2611 2611 return yfit, a, converge, sigma, chisq, chi2 # return result
2612 2612 converge = 0
2613 2613 chi2 = chisq
2614 2614 #print('Failed to converge.')
2615 2615 #save_tp(chisq,Niter,yfit)
2616 2616 return yfit, a, converge, sigma, chisq, chi2
2617 2617
2618 2618 if (nicoh is None): nicoh = 1
2619 2619 if (graph is None): graph = 0
2620 2620 if (smooth is None): smooth = 0
2621 2621 elif (self.smooth < 3): smooth = 0
2622 2622
2623 2623 if (type1 is None): type1 = 0
2624 2624 if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1
2625 2625 if (snrth is None): snrth = -20.0
2626 2626 if (dc is None): dc = 0
2627 2627 if (aliasing is None): aliasing = 0
2628 2628 if (oldfd is None): oldfd = 0
2629 2629 if (wwauto is None): wwauto = 0
2630 2630
2631 2631 if (n0 < 1.e-20): n0 = 1.e-20
2632 2632
2633 2633 xvalid = numpy.where(fwindow == 1)[0]
2634 2634 freq = oldfreq
2635 2635 truex = oldfreq
2636 2636 vec_power = numpy.zeros(oldspec.shape[1])
2637 2637 vec_fd = numpy.zeros(oldspec.shape[1])
2638 2638 vec_w = numpy.zeros(oldspec.shape[1])
2639 2639 vec_snr = numpy.zeros(oldspec.shape[1])
2640 2640 vec_n1 = numpy.empty(oldspec.shape[1])
2641 2641 vec_fp = numpy.empty(oldspec.shape[1])
2642 2642 vec_sigma_fd = numpy.empty(oldspec.shape[1])
2643 2643
2644 2644 for ind in range(oldspec.shape[1]):
2645 2645
2646 2646 spec = oldspec[:,ind]
2647 2647 if (smooth == 0):
2648 2648 spec2 = spec
2649 2649 else:
2650 2650 spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth)
2651 2651
2652 2652 aux = spec2*fwindow
2653 2653 max_spec = aux.max()
2654 2654 m = aux.tolist().index(max_spec)
2655 2655
2656 2656 if m > 2 and m < oldfreq.size - 3:
2657 2657 newindex = m + numpy.array([-2,-1,0,1,2])
2658 2658 newfreq = numpy.arange(20)/20.0*(numpy.max(freq[newindex])-numpy.min(freq[newindex]))+numpy.min(freq[newindex])
2659 2659 #peakspec = SPLINE(,)
2660 2660 tck = interpolate.splrep(freq[newindex], spec2[newindex])
2661 2661 peakspec = interpolate.splev(newfreq, tck)
2662 2662 # max_spec = MAX(peakspec,)
2663 2663 max_spec = numpy.max(peakspec)
2664 2664 mnew = numpy.argmax(peakspec)
2665 2665 #fp = newfreq(mnew)
2666 2666 fp = newfreq[mnew]
2667 2667 else:
2668 2668 fp = freq[m]
2669 2669
2670 2670 if type1==0:
2671 2671
2672 2672 # Moments Estimation
2673 2673 bb = spec2[numpy.arange(m,spec2.size)]
2674 2674 bb = (bb<n0).nonzero()
2675 2675 bb = bb[0]
2676 2676
2677 2677 ss = spec2[numpy.arange(0,m + 1)]
2678 2678 ss = (ss<n0).nonzero()
2679 2679 ss = ss[0]
2680 2680
2681 2681 if (bb.size == 0):
2682 2682 bb0 = spec.size - 1 - m
2683 2683 else:
2684 2684 bb0 = bb[0] - 1
2685 2685 if (bb0 < 0):
2686 2686 bb0 = 0
2687 2687
2688 2688 if (ss.size == 0):
2689 2689 ss1 = 1
2690 2690 else:
2691 2691 ss1 = max(ss) + 1
2692 2692
2693 2693 if (ss1 > m):
2694 2694 ss1 = m
2695 2695
2696 2696 valid = numpy.arange(int(m + bb0 - ss1 + 1)) + ss1
2697 2697
2698 2698 signal_power = ((spec2[valid] - n0) * fwindow[valid]).mean() # D. ScipiΓ³n added with correct definition
2699 2699 total_power = (spec2[valid] * fwindow[valid]).mean() # D. ScipiΓ³n added with correct definition
2700 2700 power = ((spec2[valid] - n0) * fwindow[valid]).sum()
2701 2701 fd = ((spec2[valid]- n0)*freq[valid] * fwindow[valid]).sum() / power
2702 2702 w = numpy.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum() / power)
2703 2703 snr = (spec2.mean()-n0)/n0
2704 2704 if (snr < 1.e-20): snr = 1.e-20
2705 2705
2706 2706 vec_power[ind] = total_power
2707 2707 vec_fd[ind] = fd
2708 2708 vec_w[ind] = w
2709 2709 vec_snr[ind] = snr
2710 2710 else:
2711 2711 # Noise by heights
2712 2712 n1, stdv = self.__get_noise2(spec, nicoh)
2713 2713 # Moments Estimation
2714 2714 bb = spec2[numpy.arange(m,spec2.size)]
2715 2715 bb = (bb<n1).nonzero()
2716 2716 bb = bb[0]
2717 2717
2718 2718 ss = spec2[numpy.arange(0,m + 1)]
2719 2719 ss = (ss<n1).nonzero()
2720 2720 ss = ss[0]
2721 2721
2722 2722 if (bb.size == 0):
2723 2723 bb0 = spec.size - 1 - m
2724 2724 else:
2725 2725 bb0 = bb[0] - 1
2726 2726 if (bb0 < 0):
2727 2727 bb0 = 0
2728 2728
2729 2729 if (ss.size == 0):
2730 2730 ss1 = 1
2731 2731 else:
2732 2732 ss1 = max(ss) + 1
2733 2733
2734 2734 if (ss1 > m):
2735 2735 ss1 = m
2736 2736
2737 2737 valid = numpy.arange(int(m + bb0 - ss1 + 1)) + ss1
2738 2738 power = ((spec[valid] - n1)*fwindow[valid]).sum()
2739 2739 fd = ((spec[valid]- n1)*freq[valid]*fwindow[valid]).sum()/power
2740 2740 try:
2741 2741 w = numpy.sqrt(((spec[valid] - n1)*fwindow[valid]*(freq[valid]- fd)**2).sum()/power)
2742 2742 except:
2743 2743 w = float("NaN")
2744 2744 snr = power/(n0*fwindow.sum())
2745 2745 if snr < 1.e-20: snr = 1.e-20
2746 2746
2747 2747 # Here start gaussean adjustment
2748 2748
2749 2749 if snr > numpy.power(10,0.1*snrth):
2750 2750
2751 2751 a = numpy.zeros(4,dtype='f4')
2752 2752 a[0] = snr * n0
2753 2753 a[1] = fd
2754 2754 a[2] = w
2755 2755 a[3] = n0
2756 2756
2757 2757 np = spec.size
2758 2758 aold = a.copy()
2759 2759 spec2 = spec.copy()
2760 2760 oldxvalid = xvalid.copy()
2761 2761
2762 2762 for i in range(2):
2763 2763
2764 2764 ww = 1.0/(numpy.power(spec2,2)/nicoh)
2765 2765 ww[np//2] = 0.0
2766 2766
2767 2767 a = aold.copy()
2768 2768 xvalid = oldxvalid.copy()
2769 2769 #self.show_var(xvalid)
2770 2770
2771 2771 gaussfn = __curvefit_koki(spec[xvalid], a, ww[xvalid])
2772 2772 a = gaussfn[1]
2773 2773 converge = gaussfn[2]
2774 2774
2775 2775 xvalid = numpy.arange(np)
2776 2776 spec2 = __GAUSSWINFIT1(a)
2777 2777
2778 2778 xvalid = oldxvalid.copy()
2779 2779 power = a[0] * np
2780 2780 fd = a[1]
2781 2781 sigma_fd = gaussfn[3][1]
2782 2782 snr = max(power/ (max(a[3],n0) * len(oldxvalid)) * converge, 1e-20)
2783 2783 w = numpy.abs(a[2])
2784 2784 n1 = max(a[3], n0)
2785 2785
2786 2786 #gauss_adj=[fd,w,snr,n1,fp,sigma_fd]
2787 2787 else:
2788 2788 sigma_fd=numpy.nan # to avoid UnboundLocalError: local variable 'sigma_fd' referenced before assignment
2789 2789
2790 2790 vec_fd[ind] = fd
2791 2791 vec_w[ind] = w
2792 2792 vec_snr[ind] = snr
2793 2793 vec_n1[ind] = n1
2794 2794 vec_fp[ind] = fp
2795 2795 vec_sigma_fd[ind] = sigma_fd
2796 2796 vec_power[ind] = power # to compare with type 0 proccessing
2797 2797
2798 2798 if type1==1:
2799 2799 return numpy.vstack((vec_snr, vec_w, vec_fd, vec_n1, vec_fp, vec_sigma_fd, vec_power)) # snr and fd exchanged to compare doppler of both types
2800 2800 else:
2801 2801 return numpy.vstack((vec_snr, vec_power, vec_fd, vec_w))
2802 2802
2803 2803 def __get_noise2(self,POWER, fft_avg, TALK=0):
2804 2804 '''
2805 2805 Rutina para cΓ‘lculo de ruido por alturas(n1). Similar a IDL
2806 2806 '''
2807 2807 SPECT_PTS = len(POWER)
2808 2808 fft_avg = fft_avg*1.0
2809 2809 NOMIT = 0
2810 2810 NN = SPECT_PTS - NOMIT
2811 2811 N = NN//2
2812 2812 ARR = numpy.concatenate((POWER[0:N+1],POWER[N+NOMIT+1:SPECT_PTS]))
2813 2813 ARR = numpy.sort(ARR)
2814 2814 NUMS_MIN = (SPECT_PTS+7)//8
2815 2815 RTEST = (1.0+1.0/fft_avg)
2816 2816 SUM = 0.0
2817 2817 SUMSQ = 0.0
2818 2818 J = 0
2819 2819 for I in range(NN):
2820 2820 J = J + 1
2821 2821 SUM = SUM + ARR[I]
2822 2822 SUMSQ = SUMSQ + ARR[I]*ARR[I]
2823 2823 AVE = SUM*1.0/J
2824 2824 if J > NUMS_MIN:
2825 2825 if (SUMSQ*J <= RTEST*SUM*SUM): RNOISE = AVE
2826 2826 else:
2827 2827 if J == NUMS_MIN: RNOISE = AVE
2828 2828 if TALK == 1: print('Noise Power (2):%4.4f' %RNOISE)
2829 2829 stdv = numpy.sqrt(SUMSQ/J - numpy.power(SUM/J,2))
2830 2830 return RNOISE, stdv
2831 2831
2832 2832 def __get_noise1(self, power, fft_avg, TALK=0):
2833 2833 '''
2834 2834 Rutina para cΓ‘lculo de ruido por alturas(n0). Similar a IDL
2835 2835 '''
2836 2836 num_pts = numpy.size(power)
2837 2837 #print('num_pts',num_pts)
2838 2838 #print('power',power.shape)
2839 2839 #print(power[256:267,0:2])
2840 2840 fft_avg = fft_avg*1.0
2841 2841
2842 2842 ind = numpy.argsort(power, axis=None, kind='stable')
2843 2843 #ind = numpy.argsort(numpy.reshape(power,-1))
2844 2844 #print(ind.shape)
2845 2845 #print(ind[0:11])
2846 2846 #print(numpy.reshape(power,-1)[ind[0:11]])
2847 2847 ARR = numpy.reshape(power,-1)[ind]
2848 2848 #print('ARR',len(ARR))
2849 2849 #print('ARR',ARR.shape)
2850 2850 NUMS_MIN = num_pts//10
2851 2851 RTEST = (1.0+1.0/fft_avg)
2852 2852 SUM = 0.0
2853 2853 SUMSQ = 0.0
2854 2854 J = 0
2855 2855 cont = 1
2856 2856 while cont == 1 and J < num_pts:
2857 2857
2858 2858 SUM = SUM + ARR[J]
2859 2859 SUMSQ = SUMSQ + ARR[J]*ARR[J]
2860 2860 J = J + 1
2861 2861
2862 2862 if J > NUMS_MIN:
2863 2863 if (SUMSQ*J <= RTEST*SUM*SUM):
2864 2864 LNOISE = SUM*1.0/J
2865 2865 else:
2866 2866 J = J - 1
2867 2867 SUM = SUM - ARR[J]
2868 2868 SUMSQ = SUMSQ - ARR[J]*ARR[J]
2869 2869 cont = 0
2870 2870 else:
2871 2871 if J == NUMS_MIN: LNOISE = SUM*1.0/J
2872 2872 if TALK == 1: print('Noise Power (1):%8.8f' %LNOISE)
2873 2873 stdv = numpy.sqrt(SUMSQ/J - numpy.power(SUM/J,2))
2874 2874 return LNOISE, stdv
2875 2875
2876 2876 def __NoiseByChannel(self, num_prof, num_incoh, spectra,talk=0):
2877 2877
2878 2878 val_frq = numpy.arange(num_prof-2)+1
2879 2879 val_frq[(num_prof-2)//2:] = val_frq[(num_prof-2)//2:] + 1
2880 2880 junkspc = numpy.sum(spectra[val_frq,:], axis=1)
2881 2881 junkid = numpy.argsort(junkspc)
2882 2882 noisezone = val_frq[junkid[0:num_prof//2]]
2883 2883 specnoise = spectra[noisezone,:]
2884 2884 noise, stdvnoise = self.__get_noise1(specnoise,num_incoh)
2885 2885
2886 2886 if talk:
2887 2887 print('noise =', noise)
2888 2888 return noise
2889 2889 #------------------ Get SA Parameters --------------------------
2890 2890
2891 2891 def GetSAParameters(self):
2892 2892 #SA en frecuencia
2893 2893 pairslist = self.dataOut.groupList
2894 2894 num_pairs = len(pairslist)
2895 2895
2896 2896 vel = self.dataOut.abscissaList
2897 2897 spectra = self.dataOut.data_pre
2898 2898 cspectra = self.dataIn.data_cspc
2899 2899 delta_v = vel[1] - vel[0]
2900 2900
2901 2901 #Calculating the power spectrum
2902 2902 spc_pow = numpy.sum(spectra, 3)*delta_v
2903 2903 #Normalizing Spectra
2904 2904 norm_spectra = spectra/spc_pow
2905 2905 #Calculating the norm_spectra at peak
2906 2906 max_spectra = numpy.max(norm_spectra, 3)
2907 2907
2908 2908 #Normalizing Cross Spectra
2909 2909 norm_cspectra = numpy.zeros(cspectra.shape)
2910 2910
2911 2911 for i in range(num_chan):
2912 2912 norm_cspectra[i,:,:] = cspectra[i,:,:]/numpy.sqrt(spc_pow[pairslist[i][0],:]*spc_pow[pairslist[i][1],:])
2913 2913
2914 2914 max_cspectra = numpy.max(norm_cspectra,2)
2915 2915 max_cspectra_index = numpy.argmax(norm_cspectra, 2)
2916 2916
2917 2917 for i in range(num_pairs):
2918 2918 cspc_par[i,:,:] = __calculateMoments(norm_cspectra)
2919 2919 #------------------- Get Lags ----------------------------------
2920 2920
2921 2921 class JULIADriftsEstimation(Operation):
2922 2922
2923 2923 def __init__(self):
2924 2924 Operation.__init__(self)
2925 2925
2926 2926 def newtotal(self, data):
2927 2927 return numpy.nansum(data)
2928 2928
2929 2929 def data_filter(self, parm, snrth=-20, swth=20, wErrth=500):
2930 2930
2931 2931 Sz0 = parm.shape # Sz0: h,p
2932 2932 drift = parm[:,0]
2933 2933 sw = 2*parm[:,1]
2934 2934 snr = 10*numpy.log10(parm[:,2])
2935 2935 Sz = drift.shape # Sz: h
2936 2936 mask = numpy.ones((Sz[0]))
2937 2937 th=0
2938 2938 valid=numpy.where(numpy.isfinite(snr))
2939 2939 cvalid = len(valid[0])
2940 2940 if cvalid >= 1:
2941 2941 # CΓ‘lculo del ruido promedio de snr para el i-Γ©simo grupo de alturas
2942 2942 nbins = int(numpy.max(snr)-numpy.min(snr))+1 # bin size = 1, similar to IDL
2943 2943 h = numpy.histogram(snr,bins=nbins)
2944 2944 hist = h[0]
2945 2945 values = numpy.round_(h[1])
2946 2946 moda = values[numpy.where(hist == numpy.max(hist))]
2947 2947 indNoise = numpy.where(numpy.abs(snr - numpy.min(moda)) < 3)[0]
2948 2948
2949 2949 noise = snr[indNoise]
2950 2950 noise_mean = numpy.sum(noise)/len(noise)
2951 2951 # CΓ‘lculo de media de snr
2952 2952 med = numpy.median(snr)
2953 2953 # Establece el umbral de snr
2954 2954 if noise_mean > med + 3:
2955 2955 th = med
2956 2956 else:
2957 2957 th = noise_mean + 3
2958 2958 # Establece mΓ‘scara
2959 2959 novalid = numpy.where(snr <= th)[0]
2960 2960 mask[novalid] = numpy.nan
2961 2961 # Elimina datos que no sobrepasen el umbral: PARAMETRO
2962 2962 novalid = numpy.where(snr <= snrth)
2963 2963 cnovalid = len(novalid[0])
2964 2964 if cnovalid > 0:
2965 2965 mask[novalid] = numpy.nan
2966 2966 novalid = numpy.where(numpy.isnan(snr))
2967 2967 cnovalid = len(novalid[0])
2968 2968 if cnovalid > 0:
2969 2969 mask[novalid] = numpy.nan
2970 2970 new_parm = numpy.zeros((Sz0[0],Sz0[1]))
2971 2971 for h in range(Sz0[0]):
2972 2972 for p in range(Sz0[1]):
2973 2973 if numpy.isnan(mask[h]):
2974 2974 new_parm[h,p]=numpy.nan
2975 2975 else:
2976 2976 new_parm[h,p]=parm[h,p]
2977 2977
2978 2978 return new_parm, th
2979 2979
2980 2980 def run(self, dataOut, zenith, zenithCorrection,heights=None, statistics=0, otype=0):
2981 2981
2982 dataOut.lat=-11.95
2983 dataOut.lon=-76.87
2982 2984 nCh=dataOut.spcpar.shape[0]
2983 2985 nHei=dataOut.spcpar.shape[1]
2984 2986 nParam=dataOut.spcpar.shape[2]
2985 # Solo las alturas de interes
2986 hei=dataOut.heightList
2987 hvalid=numpy.where([hei >= heights[0]][0] & [hei <= heights[1]][0])[0]
2988 nhvalid=len(hvalid)
2989 parm = numpy.zeros((nCh,nhvalid,nParam))
2990 parm = dataOut.spcpar[:,hvalid,:]
2987 # SelecciΓ³n de alturas
2988
2989 if not heights:
2990 parm = numpy.zeros((nCh,nHei,nParam))
2991 parm[:] = dataOut.spcpar[:]
2992 else:
2993 hei=dataOut.heightList
2994 hvalid=numpy.where([hei >= heights[0]][0] & [hei <= heights[1]][0])[0]
2995 nhvalid=len(hvalid)
2996 dataOut.heightList = hei[hvalid]
2997 parm = numpy.zeros((nCh,nhvalid,nParam))
2998 parm[:] = dataOut.spcpar[:,hvalid,:]
2999
3000
2991 3001 # Primer filtrado: Umbral de SNR
2992 3002 for i in range(nCh):
2993 dataOut.spcpar[i,hvalid,:] = self.data_filter(parm[i,:,:])[0]
3003 parm[i,:,:] = self.data_filter(parm[i,:,:])[0]
3004
2994 3005 zenith = numpy.array(zenith)
2995 3006 zenith -= zenithCorrection
2996 3007 zenith *= numpy.pi/180
2997 3008 alpha = zenith[0]
2998 3009 beta = zenith[1]
2999
3000 dopplerCH0 = dataOut.spcpar[0,:,0]
3001 dopplerCH1 = dataOut.spcpar[1,:,0]
3002 swCH0 = dataOut.spcpar[0,:,1]
3003 swCH1 = dataOut.spcpar[1,:,1]
3004 snrCH0 = 10*numpy.log10(dataOut.spcpar[0,:,2])
3005 snrCH1 = 10*numpy.log10(dataOut.spcpar[1,:,2])
3006 noiseCH0 = dataOut.spcpar[0,:,3]
3007 noiseCH1 = dataOut.spcpar[1,:,3]
3008 wErrCH0 = dataOut.spcpar[0,:,5]
3009 wErrCH1 = dataOut.spcpar[1,:,5]
3010 dopplerCH0 = parm[0,:,0]
3011 dopplerCH1 = parm[1,:,0]
3012 swCH0 = parm[0,:,1]
3013 swCH1 = parm[1,:,1]
3014 snrCH0 = 10*numpy.log10(parm[0,:,2])
3015 snrCH1 = 10*numpy.log10(parm[1,:,2])
3016 noiseCH0 = parm[0,:,3]
3017 noiseCH1 = parm[1,:,3]
3018 wErrCH0 = parm[0,:,5]
3019 wErrCH1 = parm[1,:,5]
3010 3020
3011 3021 # Vertical and zonal calculation according to geometry
3012 3022 sinB_A = numpy.sin(beta)*numpy.cos(alpha) - numpy.sin(alpha)* numpy.cos(beta)
3013 3023 drift = -(dopplerCH0 * numpy.sin(beta) - dopplerCH1 * numpy.sin(alpha))/ sinB_A
3014 3024 zonal = (dopplerCH0 * numpy.cos(beta) - dopplerCH1 * numpy.cos(alpha))/ sinB_A
3015 3025 snr = (snrCH0 + snrCH1)/2
3016 3026 noise = (noiseCH0 + noiseCH1)/2
3017 3027 sw = (swCH0 + swCH1)/2
3018 3028 w_w_err= numpy.sqrt(numpy.power(wErrCH0 * numpy.sin(beta)/numpy.abs(sinB_A),2) + numpy.power(wErrCH1 * numpy.sin(alpha)/numpy.abs(sinB_A),2))
3019 3029 w_e_err= numpy.sqrt(numpy.power(wErrCH0 * numpy.cos(beta)/numpy.abs(-1*sinB_A),2) + numpy.power(wErrCH1 * numpy.cos(alpha)/numpy.abs(-1*sinB_A),2))
3020 3030
3021 3031 # for statistics150km
3022 3032 if statistics:
3023 print('Implemented offline.')
3033 print('Implemented offline.')
3034
3024 3035 if otype == 0:
3025 3036 winds = numpy.vstack((snr, drift, zonal, noise, sw, w_w_err, w_e_err)) # to process statistics drifts
3026 3037 elif otype == 3:
3027 3038 winds = numpy.vstack((snr, drift, zonal)) # to generic plot: 3 RTI's
3028 3039 elif otype == 4:
3029 3040 winds = numpy.vstack((snrCH0, drift, snrCH1, zonal)) # to generic plot: 4 RTI's
3030 3041
3031 3042 snr1 = numpy.vstack((snrCH0, snrCH1))
3032 3043 dataOut.data_output = winds
3033 3044 dataOut.data_snr = snr1
3034 3045
3035 3046 dataOut.utctimeInit = dataOut.utctime
3036 3047 dataOut.outputInterval = dataOut.timeInterval
3048 try:
3049 dataOut.flagNoData = numpy.all(numpy.isnan(dataOut.data_output[0])) # NAN vectors are not written MADRIGAL CASE
3050 except:
3051 print("Check there is no Data")
3037 3052
3038 3053 return dataOut
3039 3054
3040 3055 class SALags(Operation):
3041 3056 '''
3042 3057 Function GetMoments()
3043 3058
3044 3059 Input:
3045 3060 self.dataOut.data_pre
3046 3061 self.dataOut.abscissaList
3047 3062 self.dataOut.noise
3048 3063 self.dataOut.normFactor
3049 3064 self.dataOut.data_snr
3050 3065 self.dataOut.groupList
3051 3066 self.dataOut.nChannels
3052 3067
3053 3068 Affected:
3054 3069 self.dataOut.data_param
3055 3070
3056 3071 '''
3057 3072 def run(self, dataOut):
3058 3073 data_acf = dataOut.data_pre[0]
3059 3074 data_ccf = dataOut.data_pre[1]
3060 3075 normFactor_acf = dataOut.normFactor[0]
3061 3076 normFactor_ccf = dataOut.normFactor[1]
3062 3077 pairs_acf = dataOut.groupList[0]
3063 3078 pairs_ccf = dataOut.groupList[1]
3064 3079
3065 3080 nHeights = dataOut.nHeights
3066 3081 absc = dataOut.abscissaList
3067 3082 noise = dataOut.noise
3068 3083 SNR = dataOut.data_snr
3069 3084 nChannels = dataOut.nChannels
3070 3085 for l in range(len(pairs_acf)):
3071 3086 data_acf[l,:,:] = data_acf[l,:,:]/normFactor_acf[l,:]
3072 3087
3073 3088 for l in range(len(pairs_ccf)):
3074 3089 data_ccf[l,:,:] = data_ccf[l,:,:]/normFactor_ccf[l,:]
3075 3090
3076 3091 dataOut.data_param = numpy.zeros((len(pairs_ccf)*2 + 1, nHeights))
3077 3092 dataOut.data_param[:-1,:] = self.__calculateTaus(data_acf, data_ccf, absc)
3078 3093 dataOut.data_param[-1,:] = self.__calculateLag1Phase(data_acf, absc)
3079 3094 return
3080 3095
3081 3096 def __calculateTaus(self, data_acf, data_ccf, lagRange):
3082 3097
3083 3098 lag0 = data_acf.shape[1]/2
3084 3099 #Funcion de Autocorrelacion
3085 3100 mean_acf = stats.nanmean(data_acf, axis = 0)
3086 3101
3087 3102 #Obtencion Indice de TauCross
3088 3103 ind_ccf = data_ccf.argmax(axis = 1)
3089 3104 #Obtencion Indice de TauAuto
3090 3105 ind_acf = numpy.zeros(ind_ccf.shape,dtype = 'int')
3091 3106 ccf_lag0 = data_ccf[:,lag0,:]
3092 3107
3093 3108 for i in range(ccf_lag0.shape[0]):
3094 3109 ind_acf[i,:] = numpy.abs(mean_acf - ccf_lag0[i,:]).argmin(axis = 0)
3095 3110
3096 3111 #Obtencion de TauCross y TauAuto
3097 3112 tau_ccf = lagRange[ind_ccf]
3098 3113 tau_acf = lagRange[ind_acf]
3099 3114
3100 3115 Nan1, Nan2 = numpy.where(tau_ccf == lagRange[0])
3101 3116
3102 3117 tau_ccf[Nan1,Nan2] = numpy.nan
3103 3118 tau_acf[Nan1,Nan2] = numpy.nan
3104 3119 tau = numpy.vstack((tau_ccf,tau_acf))
3105 3120
3106 3121 return tau
3107 3122
3108 3123 def __calculateLag1Phase(self, data, lagTRange):
3109 3124 data1 = stats.nanmean(data, axis = 0)
3110 3125 lag1 = numpy.where(lagTRange == 0)[0][0] + 1
3111 3126
3112 3127 phase = numpy.angle(data1[lag1,:])
3113 3128
3114 3129 return phase
3115 3130
3116 3131 def fit_func( x, a0, a1, a2): #, a3, a4, a5):
3117 3132 z = (x - a1) / a2
3118 3133 y = a0 * numpy.exp(-z**2 / a2) #+ a3 + a4 * x + a5 * x**2
3119 3134 return y
3120 3135
3121 3136
3122 3137 class SpectralFitting(Operation):
3123 3138 '''
3124 3139 Function GetMoments()
3125 3140
3126 3141 Input:
3127 3142 Output:
3128 3143 Variables modified:
3129 3144 '''
3130 3145 isConfig = False
3131 3146 __dataReady = False
3132 3147 bloques = None
3133 3148 bloque0 = None
3134 3149 index = 0
3135 3150 fint = 0
3136 3151 buffer = 0
3137 3152 buffer2 = 0
3138 3153 buffer3 = 0
3139 3154
3140 3155 def __init__(self):
3141 3156 Operation.__init__(self)
3142 3157 self.i=0
3143 3158 self.isConfig = False
3144 3159
3145 3160
3146 3161 def setup(self,nChan,nProf,nHei,nBlocks):
3147 3162 self.__dataReady = False
3148 3163 self.bloques = numpy.zeros([2, nProf, nHei,nBlocks], dtype= complex)
3149 3164 self.bloque0 = numpy.zeros([nChan, nProf, nHei, nBlocks])
3150 3165
3151 3166 def __calculateMoments(self,oldspec, oldfreq, n0, nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None):
3152 3167
3153 3168 if (nicoh is None): nicoh = 1
3154 3169 if (graph is None): graph = 0
3155 3170 if (smooth is None): smooth = 0
3156 3171 elif (self.smooth < 3): smooth = 0
3157 3172
3158 3173 if (type1 is None): type1 = 0
3159 3174 if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1
3160 3175 if (snrth is None): snrth = -3
3161 3176 if (dc is None): dc = 0
3162 3177 if (aliasing is None): aliasing = 0
3163 3178 if (oldfd is None): oldfd = 0
3164 3179 if (wwauto is None): wwauto = 0
3165 3180
3166 3181 if (n0 < 1.e-20): n0 = 1.e-20
3167 3182
3168 3183 freq = oldfreq
3169 3184 vec_power = numpy.zeros(oldspec.shape[1])
3170 3185 vec_fd = numpy.zeros(oldspec.shape[1])
3171 3186 vec_w = numpy.zeros(oldspec.shape[1])
3172 3187 vec_snr = numpy.zeros(oldspec.shape[1])
3173 3188
3174 3189 oldspec = numpy.ma.masked_invalid(oldspec)
3175 3190
3176 3191 for ind in range(oldspec.shape[1]):
3177 3192
3178 3193 spec = oldspec[:,ind]
3179 3194 aux = spec*fwindow
3180 3195 max_spec = aux.max()
3181 3196 m = list(aux).index(max_spec)
3182 3197
3183 3198 #Smooth
3184 3199 if (smooth == 0): spec2 = spec
3185 3200 else: spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth)
3186 3201
3187 3202 # Calculo de Momentos
3188 3203 bb = spec2[list(range(m,spec2.size))]
3189 3204 bb = (bb<n0).nonzero()
3190 3205 bb = bb[0]
3191 3206
3192 3207 ss = spec2[list(range(0,m + 1))]
3193 3208 ss = (ss<n0).nonzero()
3194 3209 ss = ss[0]
3195 3210
3196 3211 if (bb.size == 0):
3197 3212 bb0 = spec.size - 1 - m
3198 3213 else:
3199 3214 bb0 = bb[0] - 1
3200 3215 if (bb0 < 0):
3201 3216 bb0 = 0
3202 3217
3203 3218 if (ss.size == 0): ss1 = 1
3204 3219 else: ss1 = max(ss) + 1
3205 3220
3206 3221 if (ss1 > m): ss1 = m
3207 3222
3208 3223 valid = numpy.asarray(list(range(int(m + bb0 - ss1 + 1)))) + ss1
3209 3224 power = ((spec2[valid] - n0)*fwindow[valid]).sum()
3210 3225 fd = ((spec2[valid]- n0)*freq[valid]*fwindow[valid]).sum()/power
3211 3226 w = math.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum()/power)
3212 3227 snr = (spec2.mean()-n0)/n0
3213 3228
3214 3229 if (snr < 1.e-20) :
3215 3230 snr = 1.e-20
3216 3231
3217 3232 vec_power[ind] = power
3218 3233 vec_fd[ind] = fd
3219 3234 vec_w[ind] = w
3220 3235 vec_snr[ind] = snr
3221 3236
3222 3237 moments = numpy.vstack((vec_snr, vec_power, vec_fd, vec_w))
3223 3238 return moments
3224 3239
3225 3240 def __DiffCoherent(self, spectra, cspectra, dataOut, noise, snrth, coh_th, hei_th):
3226 3241
3227 3242 nProf = dataOut.nProfiles
3228 3243 heights = dataOut.heightList
3229 3244 nHei = len(heights)
3230 3245 channels = dataOut.channelList
3231 3246 nChan = len(channels)
3232 3247 crosspairs = dataOut.groupList
3233 3248 nPairs = len(crosspairs)
3234 3249 #Separar espectros incoherentes de coherentes snr > 20 dB'
3235 3250 snr_th = 10**(snrth/10.0)
3236 3251 my_incoh_spectra = numpy.zeros([nChan, nProf,nHei], dtype='float')
3237 3252 my_incoh_cspectra = numpy.zeros([nPairs,nProf, nHei], dtype='complex')
3238 3253 my_incoh_aver = numpy.zeros([nChan, nHei])
3239 3254 my_coh_aver = numpy.zeros([nChan, nHei])
3240 3255
3241 3256 coh_spectra = numpy.zeros([nChan, nProf, nHei], dtype='float')
3242 3257 coh_cspectra = numpy.zeros([nPairs, nProf, nHei], dtype='complex')
3243 3258 coh_aver = numpy.zeros([nChan, nHei])
3244 3259
3245 3260 incoh_spectra = numpy.zeros([nChan, nProf, nHei], dtype='float')
3246 3261 incoh_cspectra = numpy.zeros([nPairs, nProf, nHei], dtype='complex')
3247 3262 incoh_aver = numpy.zeros([nChan, nHei])
3248 3263 power = numpy.sum(spectra, axis=1)
3249 3264
3250 3265 if coh_th == None : coh_th = numpy.array([0.75,0.65,0.15]) # 0.65
3251 3266 if hei_th == None : hei_th = numpy.array([60,300,650])
3252 3267 for ic in range(nPairs):
3253 3268 pair = crosspairs[ic]
3254 3269 #si el SNR es mayor que el SNR threshold los datos se toman coherentes
3255 3270 s_n0 = power[pair[0],:]/noise[pair[0]]
3256 3271 s_n1 = power[pair[1],:]/noise[pair[1]]
3257 3272 valid1 =(s_n0>=snr_th).nonzero()
3258 3273 valid2 = (s_n1>=snr_th).nonzero()
3259 3274 valid1 = numpy.array(valid1[0])
3260 3275 valid2 = numpy.array(valid2[0])
3261 3276 valid = valid1
3262 3277 for iv in range(len(valid2)):
3263 3278 indv = numpy.array((valid1 == valid2[iv]).nonzero())
3264 3279 if len(indv[0]) == 0 :
3265 3280 valid = numpy.concatenate((valid,valid2[iv]), axis=None)
3266 3281 if len(valid)>0:
3267 3282 my_coh_aver[pair[0],valid]=1
3268 3283 my_coh_aver[pair[1],valid]=1
3269 3284 # si la coherencia es mayor a la coherencia threshold los datos se toman
3270 3285 coh = numpy.squeeze(numpy.nansum(cspectra[ic,:,:], axis=0)/numpy.sqrt(numpy.nansum(spectra[pair[0],:,:], axis=0)*numpy.nansum(spectra[pair[1],:,:], axis=0)))
3271 3286 for ih in range(len(hei_th)):
3272 3287 hvalid = (heights>hei_th[ih]).nonzero()
3273 3288 hvalid = hvalid[0]
3274 3289 if len(hvalid)>0:
3275 3290 valid = (numpy.absolute(coh[hvalid])>coh_th[ih]).nonzero()
3276 3291 valid = valid[0]
3277 3292 if len(valid)>0:
3278 3293 my_coh_aver[pair[0],hvalid[valid]] =1
3279 3294 my_coh_aver[pair[1],hvalid[valid]] =1
3280 3295
3281 3296 coh_echoes = (my_coh_aver[pair[0],:] == 1).nonzero()
3282 3297 incoh_echoes = (my_coh_aver[pair[0],:] != 1).nonzero()
3283 3298 incoh_echoes = incoh_echoes[0]
3284 3299 if len(incoh_echoes) > 0:
3285 3300 my_incoh_spectra[pair[0],:,incoh_echoes] = spectra[pair[0],:,incoh_echoes]
3286 3301 my_incoh_spectra[pair[1],:,incoh_echoes] = spectra[pair[1],:,incoh_echoes]
3287 3302 my_incoh_cspectra[ic,:,incoh_echoes] = cspectra[ic,:,incoh_echoes]
3288 3303 my_incoh_aver[pair[0],incoh_echoes] = 1
3289 3304 my_incoh_aver[pair[1],incoh_echoes] = 1
3290 3305
3291 3306
3292 3307 for ic in range(nPairs):
3293 3308 pair = crosspairs[ic]
3294 3309
3295 3310 valid1 =(my_coh_aver[pair[0],:]==1 ).nonzero()
3296 3311 valid2 = (my_coh_aver[pair[1],:]==1).nonzero()
3297 3312 valid1 = numpy.array(valid1[0])
3298 3313 valid2 = numpy.array(valid2[0])
3299 3314 valid = valid1
3300 3315
3301 3316 for iv in range(len(valid2)):
3302 3317
3303 3318 indv = numpy.array((valid1 == valid2[iv]).nonzero())
3304 3319 if len(indv[0]) == 0 :
3305 3320 valid = numpy.concatenate((valid,valid2[iv]), axis=None)
3306 3321 valid1 =(my_coh_aver[pair[0],:] !=1 ).nonzero()
3307 3322 valid2 = (my_coh_aver[pair[1],:] !=1).nonzero()
3308 3323 valid1 = numpy.array(valid1[0])
3309 3324 valid2 = numpy.array(valid2[0])
3310 3325 incoh_echoes = valid1
3311 3326 for iv in range(len(valid2)):
3312 3327
3313 3328 indv = numpy.array((valid1 == valid2[iv]).nonzero())
3314 3329 if len(indv[0]) == 0 :
3315 3330 incoh_echoes = numpy.concatenate(( incoh_echoes,valid2[iv]), axis=None)
3316 3331
3317 3332 if len(valid)>0:
3318 3333 coh_spectra[pair[0],:,valid] = spectra[pair[0],:,valid]
3319 3334 coh_spectra[pair[1],:,valid] = spectra[pair[1],:,valid]
3320 3335 coh_cspectra[ic,:,valid] = cspectra[ic,:,valid]
3321 3336 coh_aver[pair[0],valid]=1
3322 3337 coh_aver[pair[1],valid]=1
3323 3338 if len(incoh_echoes)>0:
3324 3339 incoh_spectra[pair[0],:,incoh_echoes] = spectra[pair[0],:,incoh_echoes]
3325 3340 incoh_spectra[pair[1],:,incoh_echoes] = spectra[pair[1],:,incoh_echoes]
3326 3341 incoh_cspectra[ic,:,incoh_echoes] = cspectra[ic,:,incoh_echoes]
3327 3342 incoh_aver[pair[0],incoh_echoes]=1
3328 3343 incoh_aver[pair[1],incoh_echoes]=1
3329 3344 return my_incoh_spectra ,my_incoh_cspectra,my_incoh_aver,my_coh_aver, incoh_spectra, coh_spectra, incoh_cspectra, coh_cspectra, incoh_aver, coh_aver
3330 3345
3331 3346
3332 3347 def __CleanCoherent(self,snrth, spectra, cspectra, coh_aver,dataOut, noise,clean_coh_echoes,index):
3333 3348
3334 3349 nProf = dataOut.nProfiles
3335 3350 heights = dataOut.heightList
3336 3351 nHei = len(heights)
3337 3352 channels = dataOut.channelList
3338 3353 nChan = len(channels)
3339 3354 crosspairs = dataOut.groupList
3340 3355 nPairs = len(crosspairs)
3341 3356
3342 3357 absc = dataOut.abscissaList[:-1]
3343 3358 data_param = numpy.zeros((nChan, 4, spectra.shape[2]))
3344 3359 clean_coh_spectra = spectra.copy()
3345 3360 clean_coh_cspectra = cspectra.copy()
3346 3361 clean_coh_aver = coh_aver.copy()
3347 3362
3348 3363 spwd_th=[10,6] #spwd_th[0] --> For satellites ; spwd_th[1] --> For special events like SUN.
3349 3364 coh_th = 0.75
3350 3365
3351 3366 rtime0 = [6,18] # periodo sin ESF
3352 3367 rtime1 = [10.5,13.5] # periodo con alta coherencia y alto ancho espectral (esperado): SOL.
3353 3368
3354 3369 time = index*5./60 # en base a 5 min de proceso
3355 3370 if clean_coh_echoes == 1 :
3356 3371 for ind in range(nChan):
3357 3372 data_param[ind,:,:] = self.__calculateMoments( spectra[ind,:,:] , absc , noise[ind] )
3358 3373 spwd = data_param[:,3]
3359 3374 # SPECB_JULIA,header=anal_header,jspectra=spectra,vel=velocities,hei=heights, num_aver=1, mode_fit=0,smoothing=smoothing,jvelr=velr,jspwd=spwd,jsnr=snr,jnoise=noise,jstdvnoise=stdvnoise
3360 3375 # para obtener spwd
3361 3376 for ic in range(nPairs):
3362 3377 pair = crosspairs[ic]
3363 3378 coh = numpy.squeeze(numpy.sum(cspectra[ic,:,:], axis=1)/numpy.sqrt(numpy.sum(spectra[pair[0],:,:], axis=1)*numpy.sum(spectra[pair[1],:,:], axis=1)))
3364 3379 for ih in range(nHei) :
3365 3380 # Considering heights higher than 200km in order to avoid removing phenomena like EEJ.
3366 3381 if heights[ih] >= 200 and coh_aver[pair[0],ih] == 1 and coh_aver[pair[1],ih] == 1 :
3367 3382 # Checking coherence
3368 3383 if (numpy.abs(coh[ih]) <= coh_th) or (time >= rtime0[0] and time <= rtime0[1]) :
3369 3384 # Checking spectral widths
3370 3385 if (spwd[pair[0],ih] > spwd_th[0]) or (spwd[pair[1],ih] > spwd_th[0]) :
3371 3386 # satelite
3372 3387 clean_coh_spectra[pair,ih,:] = 0.0
3373 3388 clean_coh_cspectra[ic,ih,:] = 0.0
3374 3389 clean_coh_aver[pair,ih] = 0
3375 3390 else :
3376 3391 if ((spwd[pair[0],ih] < spwd_th[1]) or (spwd[pair[1],ih] < spwd_th[1])) :
3377 3392 # Especial event like sun.
3378 3393 clean_coh_spectra[pair,ih,:] = 0.0
3379 3394 clean_coh_cspectra[ic,ih,:] = 0.0
3380 3395 clean_coh_aver[pair,ih] = 0
3381 3396
3382 3397 return clean_coh_spectra, clean_coh_cspectra, clean_coh_aver
3383 3398
3384 3399 def CleanRayleigh(self,dataOut,spectra,cspectra,save_drifts):
3385 3400
3386 3401 rfunc = cspectra.copy()
3387 3402 n_funct = len(rfunc[0,:,0,0])
3388 3403 val_spc = spectra*0.0
3389 3404 val_cspc = cspectra*0.0
3390 3405 in_sat_spectra = spectra.copy()
3391 3406 in_sat_cspectra = cspectra.copy()
3392 3407
3393 3408 min_hei = 200
3394 3409 nProf = dataOut.nProfiles
3395 3410 heights = dataOut.heightList
3396 3411 nHei = len(heights)
3397 3412 channels = dataOut.channelList
3398 3413 nChan = len(channels)
3399 3414 crosspairs = dataOut.groupList
3400 3415 nPairs = len(crosspairs)
3401 3416 hval=(heights >= min_hei).nonzero()
3402 3417 ih=hval[0]
3403 3418 for ih in range(hval[0][0],nHei):
3404 3419 for ifreq in range(nProf):
3405 3420 for ii in range(n_funct):
3406 3421
3407 3422 func2clean = 10*numpy.log10(numpy.absolute(rfunc[:,ii,ifreq,ih]))
3408 3423 val = (numpy.isfinite(func2clean)==True).nonzero()
3409 3424 if len(val)>0:
3410 3425 min_val = numpy.around(numpy.amin(func2clean)-2) #> (-40)
3411 3426 if min_val <= -40 : min_val = -40
3412 3427 max_val = numpy.around(numpy.amax(func2clean)+2) #< 200
3413 3428 if max_val >= 200 : max_val = 200
3414 3429 step = 1
3415 3430 #Getting bins and the histogram
3416 3431 x_dist = min_val + numpy.arange(1 + ((max_val-(min_val))/step))*step
3417 3432 y_dist,binstep = numpy.histogram(func2clean,bins=range(int(min_val),int(max_val+2),step))
3418 3433 mean = numpy.sum(x_dist * y_dist) / numpy.sum(y_dist)
3419 3434 sigma = numpy.sqrt(numpy.sum(y_dist * (x_dist - mean)**2) / numpy.sum(y_dist))
3420 3435 parg = [numpy.amax(y_dist),mean,sigma]
3421 3436 try :
3422 3437 gauss_fit, covariance = curve_fit(fit_func, x_dist, y_dist,p0=parg)
3423 3438 mode = gauss_fit[1]
3424 3439 stdv = gauss_fit[2]
3425 3440 except:
3426 3441 mode = mean
3427 3442 stdv = sigma
3428 3443
3429 3444 #Removing echoes greater than mode + 3*stdv
3430 3445 factor_stdv = 2.5
3431 3446 noval = (abs(func2clean - mode)>=(factor_stdv*stdv)).nonzero()
3432 3447
3433 3448 if len(noval[0]) > 0:
3434 3449 novall = ((func2clean - mode) >= (factor_stdv*stdv)).nonzero()
3435 3450 cross_pairs = crosspairs[ii]
3436 3451 #Getting coherent echoes which are removed.
3437 3452 if len(novall[0]) > 0:
3438 3453 val_spc[novall[0],cross_pairs[0],ifreq,ih] = 1
3439 3454 val_spc[novall[0],cross_pairs[1],ifreq,ih] = 1
3440 3455 val_cspc[novall[0],ii,ifreq,ih] = 1
3441 3456 #Removing coherent from ISR data
3442 3457 spectra[noval,cross_pairs[0],ifreq,ih] = numpy.nan
3443 3458 spectra[noval,cross_pairs[1],ifreq,ih] = numpy.nan
3444 3459 cspectra[noval,ii,ifreq,ih] = numpy.nan
3445 3460
3446 3461 #Getting average of the spectra and cross-spectra from incoherent echoes.
3447 3462 out_spectra = numpy.zeros([nChan,nProf,nHei], dtype=float) #+numpy.nan
3448 3463 out_cspectra = numpy.zeros([nPairs,nProf,nHei], dtype=complex) #+numpy.nan
3449 3464 for ih in range(nHei):
3450 3465 for ifreq in range(nProf):
3451 3466 for ich in range(nChan):
3452 3467 tmp = spectra[:,ich,ifreq,ih]
3453 3468 valid = (numpy.isfinite(tmp[:])==True).nonzero()
3454 3469 if len(valid[0]) >0 :
3455 3470 out_spectra[ich,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
3456 3471 for icr in range(nPairs):
3457 3472 tmp = numpy.squeeze(cspectra[:,icr,ifreq,ih])
3458 3473 valid = (numpy.isfinite(tmp)==True).nonzero()
3459 3474 if len(valid[0]) > 0:
3460 3475 out_cspectra[icr,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
3461 3476 #Removing fake coherent echoes (at least 4 points around the point)
3462 3477 val_spectra = numpy.sum(val_spc,0)
3463 3478 val_cspectra = numpy.sum(val_cspc,0)
3464 3479
3465 3480 val_spectra = self.REM_ISOLATED_POINTS(val_spectra,4)
3466 3481 val_cspectra = self.REM_ISOLATED_POINTS(val_cspectra,4)
3467 3482
3468 3483 for i in range(nChan):
3469 3484 for j in range(nProf):
3470 3485 for k in range(nHei):
3471 3486 if numpy.isfinite(val_spectra[i,j,k]) and val_spectra[i,j,k] < 1 :
3472 3487 val_spc[:,i,j,k] = 0.0
3473 3488 for i in range(nPairs):
3474 3489 for j in range(nProf):
3475 3490 for k in range(nHei):
3476 3491 if numpy.isfinite(val_cspectra[i,j,k]) and val_cspectra[i,j,k] < 1 :
3477 3492 val_cspc[:,i,j,k] = 0.0
3478 3493
3479 3494 tmp_sat_spectra = spectra.copy()
3480 3495 tmp_sat_spectra = tmp_sat_spectra*numpy.nan
3481 3496 tmp_sat_cspectra = cspectra.copy()
3482 3497 tmp_sat_cspectra = tmp_sat_cspectra*numpy.nan
3483 3498 val = (val_spc > 0).nonzero()
3484 3499 if len(val[0]) > 0:
3485 3500 tmp_sat_spectra[val] = in_sat_spectra[val]
3486 3501
3487 3502 val = (val_cspc > 0).nonzero()
3488 3503 if len(val[0]) > 0:
3489 3504 tmp_sat_cspectra[val] = in_sat_cspectra[val]
3490 3505
3491 3506 #Getting average of the spectra and cross-spectra from incoherent echoes.
3492 3507 sat_spectra = numpy.zeros((nChan,nProf,nHei), dtype=float)
3493 3508 sat_cspectra = numpy.zeros((nPairs,nProf,nHei), dtype=complex)
3494 3509 for ih in range(nHei):
3495 3510 for ifreq in range(nProf):
3496 3511 for ich in range(nChan):
3497 3512 tmp = numpy.squeeze(tmp_sat_spectra[:,ich,ifreq,ih])
3498 3513 valid = (numpy.isfinite(tmp)).nonzero()
3499 3514 if len(valid[0]) > 0:
3500 3515 sat_spectra[ich,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
3501 3516
3502 3517 for icr in range(nPairs):
3503 3518 tmp = numpy.squeeze(tmp_sat_cspectra[:,icr,ifreq,ih])
3504 3519 valid = (numpy.isfinite(tmp)).nonzero()
3505 3520 if len(valid[0]) > 0:
3506 3521 sat_cspectra[icr,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
3507 3522 return out_spectra, out_cspectra,sat_spectra,sat_cspectra
3508 3523 def REM_ISOLATED_POINTS(self,array,rth):
3509 3524 if rth == None : rth = 4
3510 3525 num_prof = len(array[0,:,0])
3511 3526 num_hei = len(array[0,0,:])
3512 3527 n2d = len(array[:,0,0])
3513 3528
3514 3529 for ii in range(n2d) :
3515 3530 tmp = array[ii,:,:]
3516 3531 tmp = numpy.reshape(tmp,num_prof*num_hei)
3517 3532 indxs1 = (numpy.isfinite(tmp)==True).nonzero()
3518 3533 indxs2 = (tmp > 0).nonzero()
3519 3534 indxs1 = (indxs1[0])
3520 3535 indxs2 = indxs2[0]
3521 3536 indxs = None
3522 3537 for iv in range(len(indxs2)):
3523 3538 indv = numpy.array((indxs1 == indxs2[iv]).nonzero())
3524 3539 if len(indv[0]) > 0 :
3525 3540 indxs = numpy.concatenate((indxs,indxs2[iv]), axis=None)
3526 3541 indxs = indxs[1:]
3527 3542 if len(indxs) < 4 :
3528 3543 array[ii,:,:] = 0.
3529 3544 return
3530 3545
3531 3546 xpos = numpy.mod(indxs ,num_hei)
3532 3547 ypos = (indxs / num_hei)
3533 3548 sx = numpy.argsort(xpos) # Ordering respect to "x" (time)
3534 3549 xpos = xpos[sx]
3535 3550 ypos = ypos[sx]
3536 3551 # *********************************** Cleaning isolated points **********************************
3537 3552 ic = 0
3538 3553 while True :
3539 3554 r = numpy.sqrt(list(numpy.power((xpos[ic]-xpos),2)+ numpy.power((ypos[ic]-ypos),2)))
3540 3555 no_coh1 = (numpy.isfinite(r)==True).nonzero()
3541 3556 no_coh2 = (r <= rth).nonzero()
3542 3557 no_coh1 = numpy.array(no_coh1[0])
3543 3558 no_coh2 = numpy.array(no_coh2[0])
3544 3559 no_coh = None
3545 3560 for iv in range(len(no_coh2)):
3546 3561 indv = numpy.array((no_coh1 == no_coh2[iv]).nonzero())
3547 3562 if len(indv[0]) > 0 :
3548 3563 no_coh = numpy.concatenate((no_coh,no_coh2[iv]), axis=None)
3549 3564 no_coh = no_coh[1:]
3550 3565 if len(no_coh) < 4 :
3551 3566 xpos[ic] = numpy.nan
3552 3567 ypos[ic] = numpy.nan
3553 3568
3554 3569 ic = ic + 1
3555 3570 if (ic == len(indxs)) :
3556 3571 break
3557 3572 indxs = (numpy.isfinite(list(xpos))==True).nonzero()
3558 3573 if len(indxs[0]) < 4 :
3559 3574 array[ii,:,:] = 0.
3560 3575 return
3561 3576
3562 3577 xpos = xpos[indxs[0]]
3563 3578 ypos = ypos[indxs[0]]
3564 3579 for i in range(0,len(ypos)):
3565 3580 ypos[i]=int(ypos[i])
3566 3581 junk = tmp
3567 3582 tmp = junk*0.0
3568 3583
3569 3584 tmp[list(xpos + (ypos*num_hei))] = junk[list(xpos + (ypos*num_hei))]
3570 3585 array[ii,:,:] = numpy.reshape(tmp,(num_prof,num_hei))
3571 3586 return array
3572 3587
3573 3588 def moments(self,doppler,yarray,npoints):
3574 3589 ytemp = yarray
3575 3590 val = (ytemp > 0).nonzero()
3576 3591 val = val[0]
3577 3592 if len(val) == 0 : val = range(npoints-1)
3578 3593
3579 3594 ynew = 0.5*(ytemp[val[0]]+ytemp[val[len(val)-1]])
3580 3595 ytemp[len(ytemp):] = [ynew]
3581 3596
3582 3597 index = 0
3583 3598 index = numpy.argmax(ytemp)
3584 3599 ytemp = numpy.roll(ytemp,int(npoints/2)-1-index)
3585 3600 ytemp = ytemp[0:npoints-1]
3586 3601
3587 3602 fmom = numpy.sum(doppler*ytemp)/numpy.sum(ytemp)+(index-(npoints/2-1))*numpy.abs(doppler[1]-doppler[0])
3588 3603 smom = numpy.sum(doppler*doppler*ytemp)/numpy.sum(ytemp)
3589 3604 return [fmom,numpy.sqrt(smom)]
3590 3605
3591 3606
3592 3607
3593 3608
3594 3609
3595 3610 def run(self, dataOut, getSNR = True, path=None, file=None, groupList=None, filec=None,coh_th=None, hei_th=None,taver=None,proc=None,nhei=None,nprofs=None,ipp=None,channelList=None):
3596 3611 if not numpy.any(proc):
3597 3612 nChannels = dataOut.nChannels
3598 3613 nHeights= dataOut.heightList.size
3599 3614 nProf = dataOut.nProfiles
3600 3615 if numpy.any(taver): taver=int(taver)
3601 3616 else : taver = 5
3602 3617 tini=time.localtime(dataOut.utctime)
3603 3618 if (tini.tm_min % taver) == 0 and (tini.tm_sec < 5 and self.fint==0):
3604 3619 self.index = 0
3605 3620 jspc = self.buffer
3606 3621 jcspc = self.buffer2
3607 3622 jnoise = self.buffer3
3608 3623 self.buffer = dataOut.data_spc
3609 3624 self.buffer2 = dataOut.data_cspc
3610 3625 self.buffer3 = dataOut.noise
3611 3626 self.fint = 1
3612 3627 if numpy.any(jspc) :
3613 3628 jspc= numpy.reshape(jspc,(int(len(jspc)/nChannels),nChannels,nProf,nHeights))
3614 3629 jcspc= numpy.reshape(jcspc,(int(len(jcspc)/int(nChannels/2)),int(nChannels/2),nProf,nHeights))
3615 3630 jnoise= numpy.reshape(jnoise,(int(len(jnoise)/nChannels),nChannels))
3616 3631 else:
3617 3632 dataOut.flagNoData = True
3618 3633 return dataOut
3619 3634 else :
3620 3635 if (tini.tm_min % taver) == 0 : self.fint = 1
3621 3636 else : self.fint = 0
3622 3637 self.index += 1
3623 3638 if numpy.any(self.buffer):
3624 3639 self.buffer = numpy.concatenate((self.buffer,dataOut.data_spc), axis=0)
3625 3640 self.buffer2 = numpy.concatenate((self.buffer2,dataOut.data_cspc), axis=0)
3626 3641 self.buffer3 = numpy.concatenate((self.buffer3,dataOut.noise), axis=0)
3627 3642 else:
3628 3643 self.buffer = dataOut.data_spc
3629 3644 self.buffer2 = dataOut.data_cspc
3630 3645 self.buffer3 = dataOut.noise
3631 3646 dataOut.flagNoData = True
3632 3647 return dataOut
3633 3648 if path != None:
3634 3649 sys.path.append(path)
3635 3650 self.library = importlib.import_module(file)
3636 3651 if filec != None:
3637 3652 self.weightf = importlib.import_module(filec)
3638 3653
3639 3654 #To be inserted as a parameter
3640 3655 groupArray = numpy.array(groupList)
3641 3656 #groupArray = numpy.array([[0,1],[2,3]])
3642 3657 dataOut.groupList = groupArray
3643 3658 nGroups = groupArray.shape[0]
3644 3659 nChannels = dataOut.nChannels
3645 3660 nHeights = dataOut.heightList.size
3646 3661
3647 3662 #Parameters Array
3648 3663 dataOut.data_param = None
3649 3664 dataOut.data_paramC = None
3650 3665 dataOut.clean_num_aver = None
3651 3666 dataOut.coh_num_aver = None
3652 3667 dataOut.tmp_spectra_i = None
3653 3668 dataOut.tmp_cspectra_i = None
3654 3669 dataOut.tmp_spectra_c = None
3655 3670 dataOut.tmp_cspectra_c = None
3656 3671 dataOut.index = None
3657 3672
3658 3673 #Set constants
3659 3674 constants = self.library.setConstants(dataOut)
3660 3675 dataOut.constants = constants
3661 3676 M = dataOut.normFactor
3662 3677 N = dataOut.nFFTPoints
3663 3678 ippSeconds = dataOut.ippSeconds
3664 3679 K = dataOut.nIncohInt
3665 3680 pairsArray = numpy.array(dataOut.pairsList)
3666 3681 snrth= 20
3667 3682 spectra = dataOut.data_spc
3668 3683 cspectra = dataOut.data_cspc
3669 3684 nProf = dataOut.nProfiles
3670 3685 heights = dataOut.heightList
3671 3686 nHei = len(heights)
3672 3687 channels = dataOut.channelList
3673 3688 nChan = len(channels)
3674 3689 nIncohInt = dataOut.nIncohInt
3675 3690 crosspairs = dataOut.groupList
3676 3691 noise = dataOut.noise
3677 3692 jnoise = jnoise/N
3678 3693 noise = numpy.nansum(jnoise,axis=0)#/len(jnoise)
3679 3694 power = numpy.sum(spectra, axis=1)
3680 3695 nPairs = len(crosspairs)
3681 3696 absc = dataOut.abscissaList[:-1]
3682 3697
3683 3698 if not self.isConfig:
3684 3699 self.isConfig = True
3685 3700
3686 3701 index = tini.tm_hour*12+tini.tm_min/taver
3687 3702 dataOut.index= index
3688 3703 jspc = jspc/N/N
3689 3704 jcspc = jcspc/N/N
3690 3705 tmp_spectra,tmp_cspectra,sat_spectra,sat_cspectra = self.CleanRayleigh(dataOut,jspc,jcspc,2)
3691 3706 jspectra = tmp_spectra*len(jspc[:,0,0,0])
3692 3707 jcspectra = tmp_cspectra*len(jspc[:,0,0,0])
3693 3708 my_incoh_spectra ,my_incoh_cspectra,my_incoh_aver,my_coh_aver, incoh_spectra, coh_spectra, incoh_cspectra, coh_cspectra, incoh_aver, coh_aver = self.__DiffCoherent(jspectra, jcspectra, dataOut, noise, snrth,coh_th, hei_th)
3694 3709 clean_coh_spectra, clean_coh_cspectra, clean_coh_aver = self.__CleanCoherent(snrth, coh_spectra, coh_cspectra, coh_aver, dataOut, noise,1,index)
3695 3710 dataOut.data_spc = incoh_spectra
3696 3711 dataOut.data_cspc = incoh_cspectra
3697 3712 clean_num_aver = incoh_aver*len(jspc[:,0,0,0])
3698 3713 coh_num_aver = clean_coh_aver*len(jspc[:,0,0,0])
3699 3714 dataOut.clean_num_aver = clean_num_aver
3700 3715 dataOut.coh_num_aver = coh_num_aver
3701 3716 dataOut.tmp_spectra_i = incoh_spectra
3702 3717 dataOut.tmp_cspectra_i = incoh_cspectra
3703 3718 dataOut.tmp_spectra_c = clean_coh_spectra
3704 3719 dataOut.tmp_cspectra_c = clean_coh_cspectra
3705 3720 #List of possible combinations
3706 3721 listComb = itertools.combinations(numpy.arange(groupArray.shape[1]),2)
3707 3722 indCross = numpy.zeros(len(list(listComb)), dtype = 'int')
3708 3723 if getSNR:
3709 3724 listChannels = groupArray.reshape((groupArray.size))
3710 3725 listChannels.sort()
3711 3726 dataOut.data_SNR = self.__getSNR(dataOut.data_spc[listChannels,:,:], noise[listChannels])
3712 3727 else:
3713 3728 clean_num_aver = dataOut.clean_num_aver
3714 3729 coh_num_aver = dataOut.coh_num_aver
3715 3730 dataOut.data_spc = dataOut.tmp_spectra_i
3716 3731 dataOut.data_cspc = dataOut.tmp_cspectra_i
3717 3732 clean_coh_spectra = dataOut.tmp_spectra_c
3718 3733 clean_coh_cspectra = dataOut.tmp_cspectra_c
3719 3734 jspectra = dataOut.data_spc+clean_coh_spectra
3720 3735 nHeights = len(dataOut.heightList) # nhei
3721 3736 nProf = int(dataOut.nProfiles)
3722 3737 dataOut.nProfiles = nProf
3723 3738 dataOut.data_param = None
3724 3739 dataOut.data_paramC = None
3725 3740 dataOut.code = numpy.array([[-1.,-1.,1.],[1.,1.,-1.]])
3726 3741 #M=600
3727 3742 #N=200
3728 3743 dataOut.flagDecodeData=True
3729 3744 M = int(dataOut.normFactor)
3730 3745 N = int(dataOut.nFFTPoints)
3731 3746 dataOut.nFFTPoints = N
3732 3747 dataOut.nIncohInt= int(dataOut.nIncohInt)
3733 3748 dataOut.nProfiles = int(dataOut.nProfiles)
3734 3749 dataOut.nCohInt = int(dataOut.nCohInt)
3735 3750 #dataOut.nFFTPoints=nprofs
3736 3751 #dataOut.normFactor = nprofs
3737 3752 dataOut.channelList = channelList
3738 3753 #dataOut.ippFactor=1
3739 3754 #ipp = ipp/150*1.e-3
3740 3755 vmax = (300000000/49920000.0/2) / (dataOut.ippSeconds)
3741 3756 #dataOut.ippSeconds=ipp
3742 3757 absc = vmax*( numpy.arange(nProf,dtype='float')-nProf/2.)/nProf
3743 3758 if path != None:
3744 3759 sys.path.append(path)
3745 3760 self.library = importlib.import_module(file)
3746 3761 constants = self.library.setConstants(dataOut)
3747 3762 constants['M'] = M
3748 3763 dataOut.constants = constants
3749 3764
3750 3765 groupArray = numpy.array(groupList)
3751 3766 dataOut.groupList = groupArray
3752 3767 nGroups = groupArray.shape[0]
3753 3768 #List of possible combinations
3754 3769 listComb = itertools.combinations(numpy.arange(groupArray.shape[1]),2)
3755 3770 indCross = numpy.zeros(len(list(listComb)), dtype = 'int')
3756 3771 if dataOut.data_paramC is None:
3757 3772 dataOut.data_paramC = numpy.zeros((nGroups*4, nHeights,2))*numpy.nan
3758 3773 for i in range(nGroups):
3759 3774 coord = groupArray[i,:]
3760 3775 #Input data array
3761 3776 data = dataOut.data_spc[coord,:,:]/(M*N)
3762 3777 data = data.reshape((data.shape[0]*data.shape[1],data.shape[2]))
3763 3778
3764 3779 #Cross Spectra data array for Covariance Matrixes
3765 3780 ind = 0
3766 3781 for pairs in listComb:
3767 3782 pairsSel = numpy.array([coord[x],coord[y]])
3768 3783 indCross[ind] = int(numpy.where(numpy.all(pairsArray == pairsSel, axis = 1))[0][0])
3769 3784 ind += 1
3770 3785 dataCross = dataOut.data_cspc[indCross,:,:]/(M*N)
3771 3786 dataCross = dataCross**2
3772 3787 nhei = nHeights
3773 3788 poweri = numpy.sum(dataOut.data_spc[:,1:nProf-0,:],axis=1)/clean_num_aver[:,:]
3774 3789 if i == 0 : my_noises = numpy.zeros(4,dtype=float)
3775 3790 n0i = numpy.nanmin(poweri[0+i*2,0:nhei-0])/(nProf-1)
3776 3791 n1i = numpy.nanmin(poweri[1+i*2,0:nhei-0])/(nProf-1)
3777 3792 n0 = n0i
3778 3793 n1= n1i
3779 3794 my_noises[2*i+0] = n0
3780 3795 my_noises[2*i+1] = n1
3781 3796 snrth = -15.0 # -4 -16 -25
3782 3797 snrth = 10**(snrth/10.0)
3783 3798 jvelr = numpy.zeros(nHeights, dtype = 'float')
3784 3799 hvalid = [0]
3785 3800 coh2 = abs(dataOut.data_cspc[i,1:nProf,:])**2/(dataOut.data_spc[0+i*2,1:nProf-0,:]*dataOut.data_spc[1+i*2,1:nProf-0,:])
3786 3801
3787 3802 for h in range(nHeights):
3788 3803 smooth = clean_num_aver[i+1,h]
3789 3804 signalpn0 = (dataOut.data_spc[i*2,1:(nProf-0),h])/smooth
3790 3805 signalpn1 = (dataOut.data_spc[i*2+1,1:(nProf-0),h])/smooth
3791 3806 signal0 = signalpn0-n0
3792 3807 signal1 = signalpn1-n1
3793 3808 snr0 = numpy.sum(signal0/n0)/(nProf-1)
3794 3809 snr1 = numpy.sum(signal1/n1)/(nProf-1)
3795 3810 gamma = coh2[:,h]
3796 3811 indxs = (numpy.isfinite(list(gamma))==True).nonzero()
3797 3812 if len(indxs) >0:
3798 3813 if numpy.nanmean(gamma) > 0.07:
3799 3814 maxp0 = numpy.argmax(signal0*gamma)
3800 3815 maxp1 = numpy.argmax(signal1*gamma)
3801 3816 #print('usa gamma',numpy.nanmean(gamma))
3802 3817 else:
3803 3818 maxp0 = numpy.argmax(signal0)
3804 3819 maxp1 = numpy.argmax(signal1)
3805 3820 jvelr[h] = (absc[maxp0]+absc[maxp1])/2.
3806 3821 else: jvelr[h] = absc[0]
3807 3822 if snr0 > 0.1 and snr1 > 0.1: hvalid = numpy.concatenate((hvalid,h), axis=None)
3808 3823 #print(maxp0,absc[maxp0],snr0,jvelr[h])
3809 3824
3810 3825 if len(hvalid)> 1: fd0 = numpy.median(jvelr[hvalid[1:]])*-1
3811 3826 else: fd0 = numpy.nan
3812 3827 for h in range(nHeights):
3813 3828 d = data[:,h]
3814 3829 smooth = clean_num_aver[i+1,h] #dataOut.data_spc[:,1:nProf-0,:]
3815 3830 signalpn0 = (dataOut.data_spc[i*2,1:(nProf-0),h])/smooth
3816 3831 signalpn1 = (dataOut.data_spc[i*2+1,1:(nProf-0),h])/smooth
3817 3832 signal0 = signalpn0-n0
3818 3833 signal1 = signalpn1-n1
3819 3834 snr0 = numpy.sum(signal0/n0)/(nProf-1)
3820 3835 snr1 = numpy.sum(signal1/n1)/(nProf-1)
3821 3836 if snr0 > snrth and snr1 > snrth and clean_num_aver[i+1,h] > 0 :
3822 3837 #Covariance Matrix
3823 3838 D = numpy.diag(d**2)
3824 3839 ind = 0
3825 3840 for pairs in listComb:
3826 3841 #Coordinates in Covariance Matrix
3827 3842 x = pairs[0]
3828 3843 y = pairs[1]
3829 3844 #Channel Index
3830 3845 S12 = dataCross[ind,:,h]
3831 3846 D12 = numpy.diag(S12)
3832 3847 #Completing Covariance Matrix with Cross Spectras
3833 3848 D[x*N:(x+1)*N,y*N:(y+1)*N] = D12
3834 3849 D[y*N:(y+1)*N,x*N:(x+1)*N] = D12
3835 3850 ind += 1
3836 3851 diagD = numpy.zeros(256)
3837 3852
3838 3853 try:
3839 3854 Dinv=numpy.linalg.inv(D)
3840 3855 L=numpy.linalg.cholesky(Dinv)
3841 3856 except:
3842 3857 Dinv = D*numpy.nan
3843 3858 L= D*numpy.nan
3844 3859 LT=L.T
3845 3860
3846 3861 dp = numpy.dot(LT,d)
3847 3862 #Initial values
3848 3863 data_spc = dataOut.data_spc[coord,:,h]
3849 3864 w = data_spc/data_spc
3850 3865 if filec != None:
3851 3866 w = self.weightf.weightfit(w,tini.tm_year,tini.tm_yday,index,h,i)
3852 3867 if (h>6)and(error1[3]<25):
3853 3868 p0 = dataOut.data_param[i,:,h-1].copy()
3854 3869 else:
3855 3870 p0 = numpy.array(self.library.initialValuesFunction(data_spc*w, constants))# sin el i(data_spc, constants, i)
3856 3871 p0[3] = fd0
3857 3872
3858 3873 if filec != None:
3859 3874 p0 = self.weightf.Vrfit(p0,tini.tm_year,tini.tm_yday,index,h,i)
3860 3875
3861 3876 try:
3862 3877 #Least Squares
3863 3878 minp,covp,infodict,mesg,ier = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants),full_output=True)
3864 3879 #minp,covp = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants))
3865 3880 #Chi square error
3866 3881 error0 = numpy.sum(infodict['fvec']**2)/(2*N)
3867 3882 #Error with Jacobian
3868 3883 error1 = self.library.errorFunction(minp,constants,LT)
3869 3884
3870 3885 except:
3871 3886 minp = p0*numpy.nan
3872 3887 error0 = numpy.nan
3873 3888 error1 = p0*numpy.nan
3874 3889 else :
3875 3890 data_spc = dataOut.data_spc[coord,:,h]
3876 3891 p0 = numpy.array(self.library.initialValuesFunction(data_spc, constants))
3877 3892 minp = p0*numpy.nan
3878 3893 error0 = numpy.nan
3879 3894 error1 = p0*numpy.nan
3880 3895 if dataOut.data_param is None:
3881 3896 dataOut.data_param = numpy.zeros((nGroups, p0.size, nHeights))*numpy.nan
3882 3897 dataOut.data_error = numpy.zeros((nGroups, p0.size + 1, nHeights))*numpy.nan
3883 3898 dataOut.data_error[i,:,h] = numpy.hstack((error0,error1))
3884 3899 dataOut.data_param[i,:,h] = minp
3885 3900
3886 3901 for ht in range(nHeights-1) :
3887 3902 smooth = coh_num_aver[i+1,ht] #datc[0,ht,0,beam]
3888 3903 dataOut.data_paramC[4*i,ht,1] = smooth
3889 3904 signalpn0 = (clean_coh_spectra[i*2 ,1:(nProf-0),ht])/smooth #coh_spectra
3890 3905 signalpn1 = (clean_coh_spectra[i*2+1,1:(nProf-0),ht])/smooth
3891 3906 val0 = (signalpn0 > 0).nonzero()
3892 3907 val0 = val0[0]
3893 3908 if len(val0) == 0 : val0_npoints = nProf
3894 3909 else : val0_npoints = len(val0)
3895 3910
3896 3911 val1 = (signalpn1 > 0).nonzero()
3897 3912 val1 = val1[0]
3898 3913 if len(val1) == 0 : val1_npoints = nProf
3899 3914 else : val1_npoints = len(val1)
3900 3915
3901 3916 dataOut.data_paramC[0+4*i,ht,0] = numpy.sum((signalpn0/val0_npoints))/n0
3902 3917 dataOut.data_paramC[1+4*i,ht,0] = numpy.sum((signalpn1/val1_npoints))/n1
3903 3918
3904 3919 signal0 = (signalpn0-n0)
3905 3920 vali = (signal0 < 0).nonzero()
3906 3921 vali = vali[0]
3907 3922 if len(vali) > 0 : signal0[vali] = 0
3908 3923 signal1 = (signalpn1-n1)
3909 3924 vali = (signal1 < 0).nonzero()
3910 3925 vali = vali[0]
3911 3926 if len(vali) > 0 : signal1[vali] = 0
3912 3927 snr0 = numpy.sum(signal0/n0)/(nProf-1)
3913 3928 snr1 = numpy.sum(signal1/n1)/(nProf-1)
3914 3929 doppler = absc[1:]
3915 3930 if snr0 >= snrth and snr1 >= snrth and smooth :
3916 3931 signalpn0_n0 = signalpn0
3917 3932 signalpn0_n0[val0] = signalpn0[val0] - n0
3918 3933 mom0 = self.moments(doppler,signalpn0-n0,nProf)
3919 3934 signalpn1_n1 = signalpn1
3920 3935 signalpn1_n1[val1] = signalpn1[val1] - n1
3921 3936 mom1 = self.moments(doppler,signalpn1_n1,nProf)
3922 3937 dataOut.data_paramC[2+4*i,ht,0] = (mom0[0]+mom1[0])/2.
3923 3938 dataOut.data_paramC[3+4*i,ht,0] = (mom0[1]+mom1[1])/2.
3924 3939 dataOut.data_spc = jspectra
3925 3940 dataOut.spc_noise = my_noises*nProf*M
3926 3941 if numpy.any(proc): dataOut.spc_noise = my_noises*nProf*M
3927 3942 if getSNR:
3928 3943 listChannels = groupArray.reshape((groupArray.size))
3929 3944 listChannels.sort()
3930 3945
3931 3946 dataOut.data_snr = self.__getSNR(dataOut.data_spc[listChannels,:,:], my_noises[listChannels])
3932 3947 return dataOut
3933 3948
3934 3949 def __residFunction(self, p, dp, LT, constants):
3935 3950
3936 3951 fm = self.library.modelFunction(p, constants)
3937 3952 fmp=numpy.dot(LT,fm)
3938 3953 return dp-fmp
3939 3954
3940 3955 def __getSNR(self, z, noise):
3941 3956
3942 3957 avg = numpy.average(z, axis=1)
3943 3958 SNR = (avg.T-noise)/noise
3944 3959 SNR = SNR.T
3945 3960 return SNR
3946 3961
3947 3962 def __chisq(self, p, chindex, hindex):
3948 3963 #similar to Resid but calculates CHI**2
3949 3964 [LT,d,fm]=setupLTdfm(p,chindex,hindex)
3950 3965 dp=numpy.dot(LT,d)
3951 3966 fmp=numpy.dot(LT,fm)
3952 3967 chisq=numpy.dot((dp-fmp).T,(dp-fmp))
3953 3968 return chisq
3954 3969
3955 3970 class WindProfiler(Operation):
3956 3971
3957 3972 __isConfig = False
3958 3973
3959 3974 __initime = None
3960 3975 __lastdatatime = None
3961 3976 __integrationtime = None
3962 3977
3963 3978 __buffer = None
3964 3979
3965 3980 __dataReady = False
3966 3981
3967 3982 __firstdata = None
3968 3983
3969 3984 n = None
3970 3985
3971 3986 def __init__(self):
3972 3987 Operation.__init__(self)
3973 3988
3974 3989 def __calculateCosDir(self, elev, azim):
3975 3990 zen = (90 - elev)*numpy.pi/180
3976 3991 azim = azim*numpy.pi/180
3977 3992 cosDirX = numpy.sqrt((1-numpy.cos(zen)**2)/((1+numpy.tan(azim)**2)))
3978 3993 cosDirY = numpy.sqrt(1-numpy.cos(zen)**2-cosDirX**2)
3979 3994
3980 3995 signX = numpy.sign(numpy.cos(azim))
3981 3996 signY = numpy.sign(numpy.sin(azim))
3982 3997
3983 3998 cosDirX = numpy.copysign(cosDirX, signX)
3984 3999 cosDirY = numpy.copysign(cosDirY, signY)
3985 4000 return cosDirX, cosDirY
3986 4001
3987 4002 def __calculateAngles(self, theta_x, theta_y, azimuth):
3988 4003
3989 4004 dir_cosw = numpy.sqrt(1-theta_x**2-theta_y**2)
3990 4005 zenith_arr = numpy.arccos(dir_cosw)
3991 4006 azimuth_arr = numpy.arctan2(theta_x,theta_y) + azimuth*math.pi/180
3992 4007
3993 4008 dir_cosu = numpy.sin(azimuth_arr)*numpy.sin(zenith_arr)
3994 4009 dir_cosv = numpy.cos(azimuth_arr)*numpy.sin(zenith_arr)
3995 4010
3996 4011 return azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw
3997 4012
3998 4013 def __calculateMatA(self, dir_cosu, dir_cosv, dir_cosw, horOnly):
3999 4014
4000 4015 if horOnly:
4001 4016 A = numpy.c_[dir_cosu,dir_cosv]
4002 4017 else:
4003 4018 A = numpy.c_[dir_cosu,dir_cosv,dir_cosw]
4004 4019 A = numpy.asmatrix(A)
4005 4020 A1 = numpy.linalg.inv(A.transpose()*A)*A.transpose()
4006 4021
4007 4022 return A1
4008 4023
4009 4024 def __correctValues(self, heiRang, phi, velRadial, SNR):
4010 4025 listPhi = phi.tolist()
4011 4026 maxid = listPhi.index(max(listPhi))
4012 4027 minid = listPhi.index(min(listPhi))
4013 4028
4014 4029 rango = list(range(len(phi)))
4015 4030
4016 4031 heiRang1 = heiRang*math.cos(phi[maxid])
4017 4032 heiRangAux = heiRang*math.cos(phi[minid])
4018 4033 indOut = (heiRang1 < heiRangAux[0]).nonzero()
4019 4034 heiRang1 = numpy.delete(heiRang1,indOut)
4020 4035
4021 4036 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
4022 4037 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
4023 4038
4024 4039 for i in rango:
4025 4040 x = heiRang*math.cos(phi[i])
4026 4041 y1 = velRadial[i,:]
4027 4042 f1 = interpolate.interp1d(x,y1,kind = 'cubic')
4028 4043
4029 4044 x1 = heiRang1
4030 4045 y11 = f1(x1)
4031 4046
4032 4047 y2 = SNR[i,:]
4033 4048 f2 = interpolate.interp1d(x,y2,kind = 'cubic')
4034 4049 y21 = f2(x1)
4035 4050
4036 4051 velRadial1[i,:] = y11
4037 4052 SNR1[i,:] = y21
4038 4053
4039 4054 return heiRang1, velRadial1, SNR1
4040 4055
4041 4056 def __calculateVelUVW(self, A, velRadial):
4042 4057
4043 4058 #Operacion Matricial
4044 4059 velUVW = numpy.zeros((A.shape[0],velRadial.shape[1]))
4045 4060 velUVW[:,:] = numpy.dot(A,velRadial)
4046 4061
4047 4062
4048 4063 return velUVW
4049 4064
4050 4065 def techniqueDBS(self, kwargs):
4051 4066 """
4052 4067 Function that implements Doppler Beam Swinging (DBS) technique.
4053 4068
4054 4069 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
4055 4070 Direction correction (if necessary), Ranges and SNR
4056 4071
4057 4072 Output: Winds estimation (Zonal, Meridional and Vertical)
4058 4073
4059 4074 Parameters affected: Winds, height range, SNR
4060 4075 """
4061 4076 velRadial0 = kwargs['velRadial']
4062 4077 heiRang = kwargs['heightList']
4063 4078 SNR0 = kwargs['SNR']
4064 4079
4065 4080 if 'dirCosx' in kwargs and 'dirCosy' in kwargs:
4066 4081 theta_x = numpy.array(kwargs['dirCosx'])
4067 4082 theta_y = numpy.array(kwargs['dirCosy'])
4068 4083 else:
4069 4084 elev = numpy.array(kwargs['elevation'])
4070 4085 azim = numpy.array(kwargs['azimuth'])
4071 4086 theta_x, theta_y = self.__calculateCosDir(elev, azim)
4072 4087 azimuth = kwargs['correctAzimuth']
4073 4088 if 'horizontalOnly' in kwargs:
4074 4089 horizontalOnly = kwargs['horizontalOnly']
4075 4090 else: horizontalOnly = False
4076 4091 if 'correctFactor' in kwargs:
4077 4092 correctFactor = kwargs['correctFactor']
4078 4093 else: correctFactor = 1
4079 4094 if 'channelList' in kwargs:
4080 4095 channelList = kwargs['channelList']
4081 4096 if len(channelList) == 2:
4082 4097 horizontalOnly = True
4083 4098 arrayChannel = numpy.array(channelList)
4084 4099 param = param[arrayChannel,:,:]
4085 4100 theta_x = theta_x[arrayChannel]
4086 4101 theta_y = theta_y[arrayChannel]
4087 4102
4088 4103 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
4089 4104 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, zenith_arr, correctFactor*velRadial0, SNR0)
4090 4105 A = self.__calculateMatA(dir_cosu, dir_cosv, dir_cosw, horizontalOnly)
4091 4106
4092 4107 #Calculo de Componentes de la velocidad con DBS
4093 4108 winds = self.__calculateVelUVW(A,velRadial1)
4094 4109
4095 4110 return winds, heiRang1, SNR1
4096 4111
4097 4112 def __calculateDistance(self, posx, posy, pairs_ccf, azimuth = None):
4098 4113
4099 4114 nPairs = len(pairs_ccf)
4100 4115 posx = numpy.asarray(posx)
4101 4116 posy = numpy.asarray(posy)
4102 4117
4103 4118 #Rotacion Inversa para alinear con el azimuth
4104 4119 if azimuth!= None:
4105 4120 azimuth = azimuth*math.pi/180
4106 4121 posx1 = posx*math.cos(azimuth) + posy*math.sin(azimuth)
4107 4122 posy1 = -posx*math.sin(azimuth) + posy*math.cos(azimuth)
4108 4123 else:
4109 4124 posx1 = posx
4110 4125 posy1 = posy
4111 4126
4112 4127 #Calculo de Distancias
4113 4128 distx = numpy.zeros(nPairs)
4114 4129 disty = numpy.zeros(nPairs)
4115 4130 dist = numpy.zeros(nPairs)
4116 4131 ang = numpy.zeros(nPairs)
4117 4132
4118 4133 for i in range(nPairs):
4119 4134 distx[i] = posx1[pairs_ccf[i][1]] - posx1[pairs_ccf[i][0]]
4120 4135 disty[i] = posy1[pairs_ccf[i][1]] - posy1[pairs_ccf[i][0]]
4121 4136 dist[i] = numpy.sqrt(distx[i]**2 + disty[i]**2)
4122 4137 ang[i] = numpy.arctan2(disty[i],distx[i])
4123 4138
4124 4139 return distx, disty, dist, ang
4125 4140 #Calculo de Matrices
4126 4141
4127 4142
4128 4143 def __calculateVelVer(self, phase, lagTRange, _lambda):
4129 4144
4130 4145 Ts = lagTRange[1] - lagTRange[0]
4131 4146 velW = -_lambda*phase/(4*math.pi*Ts)
4132 4147
4133 4148 return velW
4134 4149
4135 4150 def __calculateVelHorDir(self, dist, tau1, tau2, ang):
4136 4151 nPairs = tau1.shape[0]
4137 4152 nHeights = tau1.shape[1]
4138 4153 vel = numpy.zeros((nPairs,3,nHeights))
4139 4154 dist1 = numpy.reshape(dist, (dist.size,1))
4140 4155
4141 4156 angCos = numpy.cos(ang)
4142 4157 angSin = numpy.sin(ang)
4143 4158
4144 4159 vel0 = dist1*tau1/(2*tau2**2)
4145 4160 vel[:,0,:] = (vel0*angCos).sum(axis = 1)
4146 4161 vel[:,1,:] = (vel0*angSin).sum(axis = 1)
4147 4162
4148 4163 ind = numpy.where(numpy.isinf(vel))
4149 4164 vel[ind] = numpy.nan
4150 4165
4151 4166 return vel
4152 4167
4153 4168 def techniqueSA(self, kwargs):
4154 4169
4155 4170 """
4156 4171 Function that implements Spaced Antenna (SA) technique.
4157 4172
4158 4173 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
4159 4174 Direction correction (if necessary), Ranges and SNR
4160 4175
4161 4176 Output: Winds estimation (Zonal, Meridional and Vertical)
4162 4177
4163 4178 Parameters affected: Winds
4164 4179 """
4165 4180 position_x = kwargs['positionX']
4166 4181 position_y = kwargs['positionY']
4167 4182 azimuth = kwargs['azimuth']
4168 4183
4169 4184 if 'correctFactor' in kwargs:
4170 4185 correctFactor = kwargs['correctFactor']
4171 4186 else:
4172 4187 correctFactor = 1
4173 4188
4174 4189 groupList = kwargs['groupList']
4175 4190 pairs_ccf = groupList[1]
4176 4191 tau = kwargs['tau']
4177 4192 _lambda = kwargs['_lambda']
4178 4193
4179 4194 #Cross Correlation pairs obtained
4180 4195
4181 4196 indtau = tau.shape[0]/2
4182 4197 tau1 = tau[:indtau,:]
4183 4198 tau2 = tau[indtau:-1,:]
4184 4199 phase1 = tau[-1,:]
4185 4200
4186 4201 #---------------------------------------------------------------------
4187 4202 #Metodo Directo
4188 4203 distx, disty, dist, ang = self.__calculateDistance(position_x, position_y, pairs_ccf,azimuth)
4189 4204 winds = self.__calculateVelHorDir(dist, tau1, tau2, ang)
4190 4205 winds = stats.nanmean(winds, axis=0)
4191 4206 #---------------------------------------------------------------------
4192 4207 #Metodo General
4193 4208
4194 4209 #---------------------------------------------------------------------
4195 4210 winds[2,:] = self.__calculateVelVer(phase1, lagTRange, _lambda)
4196 4211 winds = correctFactor*winds
4197 4212 return winds
4198 4213
4199 4214 def __checkTime(self, currentTime, paramInterval, outputInterval):
4200 4215
4201 4216 dataTime = currentTime + paramInterval
4202 4217 deltaTime = dataTime - self.__initime
4203 4218
4204 4219 if deltaTime >= outputInterval or deltaTime < 0:
4205 4220 self.__dataReady = True
4206 4221 return
4207 4222
4208 4223 def techniqueMeteors(self, arrayMeteor, meteorThresh, heightMin, heightMax):
4209 4224 '''
4210 4225 Function that implements winds estimation technique with detected meteors.
4211 4226
4212 4227 Input: Detected meteors, Minimum meteor quantity to wind estimation
4213 4228
4214 4229 Output: Winds estimation (Zonal and Meridional)
4215 4230
4216 4231 Parameters affected: Winds
4217 4232 '''
4218 4233 #Settings
4219 4234 nInt = (heightMax - heightMin)/2
4220 4235 nInt = int(nInt)
4221 4236 winds = numpy.zeros((2,nInt))*numpy.nan
4222 4237
4223 4238 #Filter errors
4224 4239 error = numpy.where(arrayMeteor[:,-1] == 0)[0]
4225 4240 finalMeteor = arrayMeteor[error,:]
4226 4241
4227 4242 #Meteor Histogram
4228 4243 finalHeights = finalMeteor[:,2]
4229 4244 hist = numpy.histogram(finalHeights, bins = nInt, range = (heightMin,heightMax))
4230 4245 nMeteorsPerI = hist[0]
4231 4246 heightPerI = hist[1]
4232 4247
4233 4248 #Sort of meteors
4234 4249 indSort = finalHeights.argsort()
4235 4250 finalMeteor2 = finalMeteor[indSort,:]
4236 4251
4237 4252 # Calculating winds
4238 4253 ind1 = 0
4239 4254 ind2 = 0
4240 4255
4241 4256 for i in range(nInt):
4242 4257 nMet = nMeteorsPerI[i]
4243 4258 ind1 = ind2
4244 4259 ind2 = ind1 + nMet
4245 4260
4246 4261 meteorAux = finalMeteor2[ind1:ind2,:]
4247 4262
4248 4263 if meteorAux.shape[0] >= meteorThresh:
4249 4264 vel = meteorAux[:, 6]
4250 4265 zen = meteorAux[:, 4]*numpy.pi/180
4251 4266 azim = meteorAux[:, 3]*numpy.pi/180
4252 4267
4253 4268 n = numpy.cos(zen)
4254 4269 l = numpy.sin(zen)*numpy.sin(azim)
4255 4270 m = numpy.sin(zen)*numpy.cos(azim)
4256 4271
4257 4272 A = numpy.vstack((l, m)).transpose()
4258 4273 A1 = numpy.dot(numpy.linalg.inv( numpy.dot(A.transpose(),A) ),A.transpose())
4259 4274 windsAux = numpy.dot(A1, vel)
4260 4275
4261 4276 winds[0,i] = windsAux[0]
4262 4277 winds[1,i] = windsAux[1]
4263 4278
4264 4279 return winds, heightPerI[:-1]
4265 4280
4266 4281 def techniqueNSM_SA(self, **kwargs):
4267 4282 metArray = kwargs['metArray']
4268 4283 heightList = kwargs['heightList']
4269 4284 timeList = kwargs['timeList']
4270 4285
4271 4286 rx_location = kwargs['rx_location']
4272 4287 groupList = kwargs['groupList']
4273 4288 azimuth = kwargs['azimuth']
4274 4289 dfactor = kwargs['dfactor']
4275 4290 k = kwargs['k']
4276 4291
4277 4292 azimuth1, dist = self.__calculateAzimuth1(rx_location, groupList, azimuth)
4278 4293 d = dist*dfactor
4279 4294 #Phase calculation
4280 4295 metArray1 = self.__getPhaseSlope(metArray, heightList, timeList)
4281 4296
4282 4297 metArray1[:,-2] = metArray1[:,-2]*metArray1[:,2]*1000/(k*d[metArray1[:,1].astype(int)]) #angles into velocities
4283 4298
4284 4299 velEst = numpy.zeros((heightList.size,2))*numpy.nan
4285 4300 azimuth1 = azimuth1*numpy.pi/180
4286 4301
4287 4302 for i in range(heightList.size):
4288 4303 h = heightList[i]
4289 4304 indH = numpy.where((metArray1[:,2] == h)&(numpy.abs(metArray1[:,-2]) < 100))[0]
4290 4305 metHeight = metArray1[indH,:]
4291 4306 if metHeight.shape[0] >= 2:
4292 4307 velAux = numpy.asmatrix(metHeight[:,-2]).T #Radial Velocities
4293 4308 iazim = metHeight[:,1].astype(int)
4294 4309 azimAux = numpy.asmatrix(azimuth1[iazim]).T #Azimuths
4295 4310 A = numpy.hstack((numpy.cos(azimAux),numpy.sin(azimAux)))
4296 4311 A = numpy.asmatrix(A)
4297 4312 A1 = numpy.linalg.pinv(A.transpose()*A)*A.transpose()
4298 4313 velHor = numpy.dot(A1,velAux)
4299 4314
4300 4315 velEst[i,:] = numpy.squeeze(velHor)
4301 4316 return velEst
4302 4317
4303 4318 def __getPhaseSlope(self, metArray, heightList, timeList):
4304 4319 meteorList = []
4305 4320 #utctime sec1 height SNR velRad ph0 ph1 ph2 coh0 coh1 coh2
4306 4321 #Putting back together the meteor matrix
4307 4322 utctime = metArray[:,0]
4308 4323 uniqueTime = numpy.unique(utctime)
4309 4324
4310 4325 phaseDerThresh = 0.5
4311 4326 ippSeconds = timeList[1] - timeList[0]
4312 4327 sec = numpy.where(timeList>1)[0][0]
4313 4328 nPairs = metArray.shape[1] - 6
4314 4329 nHeights = len(heightList)
4315 4330
4316 4331 for t in uniqueTime:
4317 4332 metArray1 = metArray[utctime==t,:]
4318 4333 tmet = metArray1[:,1].astype(int)
4319 4334 hmet = metArray1[:,2].astype(int)
4320 4335
4321 4336 metPhase = numpy.zeros((nPairs, heightList.size, timeList.size - 1))
4322 4337 metPhase[:,:] = numpy.nan
4323 4338 metPhase[:,hmet,tmet] = metArray1[:,6:].T
4324 4339
4325 4340 #Delete short trails
4326 4341 metBool = ~numpy.isnan(metPhase[0,:,:])
4327 4342 heightVect = numpy.sum(metBool, axis = 1)
4328 4343 metBool[heightVect<sec,:] = False
4329 4344 metPhase[:,heightVect<sec,:] = numpy.nan
4330 4345
4331 4346 #Derivative
4332 4347 metDer = numpy.abs(metPhase[:,:,1:] - metPhase[:,:,:-1])
4333 4348 phDerAux = numpy.dstack((numpy.full((nPairs,nHeights,1), False, dtype=bool),metDer > phaseDerThresh))
4334 4349 metPhase[phDerAux] = numpy.nan
4335 4350
4336 4351 #--------------------------METEOR DETECTION -----------------------------------------
4337 4352 indMet = numpy.where(numpy.any(metBool,axis=1))[0]
4338 4353
4339 4354 for p in numpy.arange(nPairs):
4340 4355 phase = metPhase[p,:,:]
4341 4356 phDer = metDer[p,:,:]
4342 4357
4343 4358 for h in indMet:
4344 4359 height = heightList[h]
4345 4360 phase1 = phase[h,:] #82
4346 4361 phDer1 = phDer[h,:]
4347 4362
4348 4363 phase1[~numpy.isnan(phase1)] = numpy.unwrap(phase1[~numpy.isnan(phase1)]) #Unwrap
4349 4364
4350 4365 indValid = numpy.where(~numpy.isnan(phase1))[0]
4351 4366 initMet = indValid[0]
4352 4367 endMet = 0
4353 4368
4354 4369 for i in range(len(indValid)-1):
4355 4370
4356 4371 #Time difference
4357 4372 inow = indValid[i]
4358 4373 inext = indValid[i+1]
4359 4374 idiff = inext - inow
4360 4375 #Phase difference
4361 4376 phDiff = numpy.abs(phase1[inext] - phase1[inow])
4362 4377
4363 4378 if idiff>sec or phDiff>numpy.pi/4 or inext==indValid[-1]: #End of Meteor
4364 4379 sizeTrail = inow - initMet + 1
4365 4380 if sizeTrail>3*sec: #Too short meteors
4366 4381 x = numpy.arange(initMet,inow+1)*ippSeconds
4367 4382 y = phase1[initMet:inow+1]
4368 4383 ynnan = ~numpy.isnan(y)
4369 4384 x = x[ynnan]
4370 4385 y = y[ynnan]
4371 4386 slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
4372 4387 ylin = x*slope + intercept
4373 4388 rsq = r_value**2
4374 4389 if rsq > 0.5:
4375 4390 vel = slope#*height*1000/(k*d)
4376 4391 estAux = numpy.array([utctime,p,height, vel, rsq])
4377 4392 meteorList.append(estAux)
4378 4393 initMet = inext
4379 4394 metArray2 = numpy.array(meteorList)
4380 4395
4381 4396 return metArray2
4382 4397
4383 4398 def __calculateAzimuth1(self, rx_location, pairslist, azimuth0):
4384 4399
4385 4400 azimuth1 = numpy.zeros(len(pairslist))
4386 4401 dist = numpy.zeros(len(pairslist))
4387 4402
4388 4403 for i in range(len(rx_location)):
4389 4404 ch0 = pairslist[i][0]
4390 4405 ch1 = pairslist[i][1]
4391 4406
4392 4407 diffX = rx_location[ch0][0] - rx_location[ch1][0]
4393 4408 diffY = rx_location[ch0][1] - rx_location[ch1][1]
4394 4409 azimuth1[i] = numpy.arctan2(diffY,diffX)*180/numpy.pi
4395 4410 dist[i] = numpy.sqrt(diffX**2 + diffY**2)
4396 4411
4397 4412 azimuth1 -= azimuth0
4398 4413 return azimuth1, dist
4399 4414
4400 4415 def techniqueNSM_DBS(self, **kwargs):
4401 4416 metArray = kwargs['metArray']
4402 4417 heightList = kwargs['heightList']
4403 4418 timeList = kwargs['timeList']
4404 4419 azimuth = kwargs['azimuth']
4405 4420 theta_x = numpy.array(kwargs['theta_x'])
4406 4421 theta_y = numpy.array(kwargs['theta_y'])
4407 4422
4408 4423 utctime = metArray[:,0]
4409 4424 cmet = metArray[:,1].astype(int)
4410 4425 hmet = metArray[:,3].astype(int)
4411 4426 SNRmet = metArray[:,4]
4412 4427 vmet = metArray[:,5]
4413 4428 spcmet = metArray[:,6]
4414 4429
4415 4430 nChan = numpy.max(cmet) + 1
4416 4431 nHeights = len(heightList)
4417 4432
4418 4433 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
4419 4434 hmet = heightList[hmet]
4420 4435 h1met = hmet*numpy.cos(zenith_arr[cmet]) #Corrected heights
4421 4436
4422 4437 velEst = numpy.zeros((heightList.size,2))*numpy.nan
4423 4438
4424 4439 for i in range(nHeights - 1):
4425 4440 hmin = heightList[i]
4426 4441 hmax = heightList[i + 1]
4427 4442
4428 4443 thisH = (h1met>=hmin) & (h1met<hmax) & (cmet!=2) & (SNRmet>8) & (vmet<50) & (spcmet<10)
4429 4444 indthisH = numpy.where(thisH)
4430 4445
4431 4446 if numpy.size(indthisH) > 3:
4432 4447
4433 4448 vel_aux = vmet[thisH]
4434 4449 chan_aux = cmet[thisH]
4435 4450 cosu_aux = dir_cosu[chan_aux]
4436 4451 cosv_aux = dir_cosv[chan_aux]
4437 4452 cosw_aux = dir_cosw[chan_aux]
4438 4453
4439 4454 nch = numpy.size(numpy.unique(chan_aux))
4440 4455 if nch > 1:
4441 4456 A = self.__calculateMatA(cosu_aux, cosv_aux, cosw_aux, True)
4442 4457 velEst[i,:] = numpy.dot(A,vel_aux)
4443 4458
4444 4459 return velEst
4445 4460
4446 4461 def run(self, dataOut, technique, nHours=1, hmin=70, hmax=110, **kwargs):
4447 4462
4448 4463 param = dataOut.moments
4449 4464 if numpy.any(dataOut.abscissaList):
4450 4465 absc = dataOut.abscissaList[:-1]
4451 4466 # noise = dataOut.noise
4452 4467 heightList = dataOut.heightList
4453 4468 SNR = dataOut.data_snr
4454 4469
4455 4470 if technique == 'DBS':
4456 4471
4457 4472 kwargs['velRadial'] = param[:,1,:] #Radial velocity
4458 4473 kwargs['heightList'] = heightList
4459 4474 kwargs['SNR'] = SNR
4460 4475
4461 4476 dataOut.data_output, dataOut.heightList, dataOut.data_snr = self.techniqueDBS(kwargs) #DBS Function
4462 4477 dataOut.utctimeInit = dataOut.utctime
4463 4478 dataOut.outputInterval = dataOut.paramInterval
4464 4479
4465 4480 elif technique == 'SA':
4466 4481
4467 4482 #Parameters
4468 4483 kwargs['groupList'] = dataOut.groupList
4469 4484 kwargs['tau'] = dataOut.data_param
4470 4485 kwargs['_lambda'] = dataOut.C/dataOut.frequency
4471 4486 dataOut.data_output = self.techniqueSA(kwargs)
4472 4487 dataOut.utctimeInit = dataOut.utctime
4473 4488 dataOut.outputInterval = dataOut.timeInterval
4474 4489
4475 4490 elif technique == 'Meteors':
4476 4491 dataOut.flagNoData = True
4477 4492 self.__dataReady = False
4478 4493
4479 4494 if 'nHours' in kwargs:
4480 4495 nHours = kwargs['nHours']
4481 4496 else:
4482 4497 nHours = 1
4483 4498
4484 4499 if 'meteorsPerBin' in kwargs:
4485 4500 meteorThresh = kwargs['meteorsPerBin']
4486 4501 else:
4487 4502 meteorThresh = 6
4488 4503
4489 4504 if 'hmin' in kwargs:
4490 4505 hmin = kwargs['hmin']
4491 4506 else: hmin = 70
4492 4507 if 'hmax' in kwargs:
4493 4508 hmax = kwargs['hmax']
4494 4509 else: hmax = 110
4495 4510
4496 4511 dataOut.outputInterval = nHours*3600
4497 4512
4498 4513 if self.__isConfig == False:
4499 4514 #Get Initial LTC time
4500 4515 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
4501 4516 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
4502 4517
4503 4518 self.__isConfig = True
4504 4519
4505 4520 if self.__buffer is None:
4506 4521 self.__buffer = dataOut.data_param
4507 4522 self.__firstdata = copy.copy(dataOut)
4508 4523
4509 4524 else:
4510 4525 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
4511 4526
4512 4527 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
4513 4528
4514 4529 if self.__dataReady:
4515 4530 dataOut.utctimeInit = self.__initime
4516 4531
4517 4532 self.__initime += dataOut.outputInterval #to erase time offset
4518 4533
4519 4534 dataOut.data_output, dataOut.heightList = self.techniqueMeteors(self.__buffer, meteorThresh, hmin, hmax)
4520 4535 dataOut.flagNoData = False
4521 4536 self.__buffer = None
4522 4537
4523 4538 elif technique == 'Meteors1':
4524 4539 dataOut.flagNoData = True
4525 4540 self.__dataReady = False
4526 4541
4527 4542 if 'nMins' in kwargs:
4528 4543 nMins = kwargs['nMins']
4529 4544 else: nMins = 20
4530 4545 if 'rx_location' in kwargs:
4531 4546 rx_location = kwargs['rx_location']
4532 4547 else: rx_location = [(0,1),(1,1),(1,0)]
4533 4548 if 'azimuth' in kwargs:
4534 4549 azimuth = kwargs['azimuth']
4535 4550 else: azimuth = 51.06
4536 4551 if 'dfactor' in kwargs:
4537 4552 dfactor = kwargs['dfactor']
4538 4553 if 'mode' in kwargs:
4539 4554 mode = kwargs['mode']
4540 4555 if 'theta_x' in kwargs:
4541 4556 theta_x = kwargs['theta_x']
4542 4557 if 'theta_y' in kwargs:
4543 4558 theta_y = kwargs['theta_y']
4544 4559 else: mode = 'SA'
4545 4560
4546 4561 #Borrar luego esto
4547 4562 if dataOut.groupList is None:
4548 4563 dataOut.groupList = [(0,1),(0,2),(1,2)]
4549 4564 groupList = dataOut.groupList
4550 4565 C = 3e8
4551 4566 freq = 50e6
4552 4567 lamb = C/freq
4553 4568 k = 2*numpy.pi/lamb
4554 4569
4555 4570 timeList = dataOut.abscissaList
4556 4571 heightList = dataOut.heightList
4557 4572
4558 4573 if self.__isConfig == False:
4559 4574 dataOut.outputInterval = nMins*60
4560 4575 #Get Initial LTC time
4561 4576 initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
4562 4577 minuteAux = initime.minute
4563 4578 minuteNew = int(numpy.floor(minuteAux/nMins)*nMins)
4564 4579 self.__initime = (initime.replace(minute = minuteNew, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
4565 4580
4566 4581 self.__isConfig = True
4567 4582
4568 4583 if self.__buffer is None:
4569 4584 self.__buffer = dataOut.data_param
4570 4585 self.__firstdata = copy.copy(dataOut)
4571 4586
4572 4587 else:
4573 4588 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
4574 4589
4575 4590 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
4576 4591
4577 4592 if self.__dataReady:
4578 4593 dataOut.utctimeInit = self.__initime
4579 4594 self.__initime += dataOut.outputInterval #to erase time offset
4580 4595
4581 4596 metArray = self.__buffer
4582 4597 if mode == 'SA':
4583 4598 dataOut.data_output = self.techniqueNSM_SA(rx_location=rx_location, groupList=groupList, azimuth=azimuth, dfactor=dfactor, k=k,metArray=metArray, heightList=heightList,timeList=timeList)
4584 4599 elif mode == 'DBS':
4585 4600 dataOut.data_output = self.techniqueNSM_DBS(metArray=metArray,heightList=heightList,timeList=timeList, azimuth=azimuth, theta_x=theta_x, theta_y=theta_y)
4586 4601 dataOut.data_output = dataOut.data_output.T
4587 4602 dataOut.flagNoData = False
4588 4603 self.__buffer = None
4589 4604
4590 4605 return dataOut
4591 4606
4592 4607 class EWDriftsEstimation(Operation):
4593 4608
4594 4609 def __init__(self):
4595 4610 Operation.__init__(self)
4596 4611
4597 4612 def __correctValues(self, heiRang, phi, velRadial, SNR):
4598 4613 listPhi = phi.tolist()
4599 4614 maxid = listPhi.index(max(listPhi))
4600 4615 minid = listPhi.index(min(listPhi))
4601 4616
4602 4617 rango = list(range(len(phi)))
4603 4618 heiRang1 = heiRang*math.cos(phi[maxid])
4604 4619 heiRangAux = heiRang*math.cos(phi[minid])
4605 4620 indOut = (heiRang1 < heiRangAux[0]).nonzero()
4606 4621 heiRang1 = numpy.delete(heiRang1,indOut)
4607 4622
4608 4623 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
4609 4624 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
4610 4625
4611 4626 for i in rango:
4612 4627 x = heiRang*math.cos(phi[i])
4613 4628 y1 = velRadial[i,:]
4614 4629 vali= (numpy.isfinite(y1)==True).nonzero()
4615 4630 y1=y1[vali]
4616 4631 x = x[vali]
4617 4632 f1 = interpolate.interp1d(x,y1,kind = 'cubic',bounds_error=False)
4618 4633 x1 = heiRang1
4619 4634 y11 = f1(x1)
4620 4635 y2 = SNR[i,:]
4621 4636 x = heiRang*math.cos(phi[i])
4622 4637 vali= (y2 != -1).nonzero()
4623 4638 y2 = y2[vali]
4624 4639 x = x[vali]
4625 4640 f2 = interpolate.interp1d(x,y2,kind = 'cubic',bounds_error=False)
4626 4641 y21 = f2(x1)
4627 4642
4628 4643 velRadial1[i,:] = y11
4629 4644 SNR1[i,:] = y21
4630 4645
4631 4646 return heiRang1, velRadial1, SNR1
4632 4647
4633 4648 def run(self, dataOut, zenith, zenithCorrection,fileDrifts):
4634 4649
4635 4650 heiRang = dataOut.heightList
4636 4651 velRadial = dataOut.data_param[:,3,:]
4637 4652 velRadialm = dataOut.data_param[:,2:4,:]*-1
4638 4653 rbufc=dataOut.data_paramC[:,:,0]
4639 4654 ebufc=dataOut.data_paramC[:,:,1]
4640 4655 SNR = dataOut.data_snr
4641 4656 velRerr = dataOut.data_error[:,4,:]
4642 4657 channels = dataOut.channelList
4643 4658 nChan = len(channels)
4644 4659 my_nbeams = nChan/2
4645 4660 if my_nbeams == 2:
4646 4661 moments=numpy.vstack(([velRadialm[0,:]],[velRadialm[0,:]],[velRadialm[1,:]],[velRadialm[1,:]]))
4647 4662 else :
4648 4663 moments=numpy.vstack(([velRadialm[0,:]],[velRadialm[0,:]]))
4649 4664 dataOut.moments=moments
4650 4665 # Coherent
4651 4666 smooth_wC = ebufc[0,:]
4652 4667 p_w0C = rbufc[0,:]
4653 4668 p_w1C = rbufc[1,:]
4654 4669 w_wC = rbufc[2,:]*-1 #*radial_sign(radial EQ 1)
4655 4670 t_wC = rbufc[3,:]
4656 4671 if my_nbeams == 1:
4657 4672 w = velRadial[0,:]
4658 4673 winds = velRadial.copy()
4659 4674 w_err = velRerr[0,:]
4660 4675 snr1 = 10*numpy.log10(SNR[0])
4661 4676 if my_nbeams == 2:
4662 4677 zenith = numpy.array(zenith)
4663 4678 zenith -= zenithCorrection
4664 4679 zenith *= numpy.pi/180
4665 4680 if zenithCorrection != 0 :
4666 4681 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, numpy.abs(zenith), velRadial, SNR)
4667 4682 else :
4668 4683 heiRang1 = heiRang
4669 4684 velRadial1 = velRadial
4670 4685 SNR1 = SNR
4671 4686
4672 4687 alp = zenith[0]
4673 4688 bet = zenith[1]
4674 4689
4675 4690 w_w = velRadial1[0,:]
4676 4691 w_e = velRadial1[1,:]
4677 4692 w_w_err = velRerr[0,:]
4678 4693 w_e_err = velRerr[1,:]
4679 4694
4680 4695 val = (numpy.isfinite(w_w)==False).nonzero()
4681 4696 val = val[0]
4682 4697 bad = val
4683 4698 if len(bad) > 0 :
4684 4699 w_w[bad] = w_wC[bad]
4685 4700 w_w_err[bad]= numpy.nan
4686 4701 smooth_eC=ebufc[4,:]
4687 4702 p_e0C = rbufc[4,:]
4688 4703 p_e1C = rbufc[5,:]
4689 4704 w_eC = rbufc[6,:]*-1
4690 4705 t_eC = rbufc[7,:]
4691 4706 val = (numpy.isfinite(w_e)==False).nonzero()
4692 4707 val = val[0]
4693 4708 bad = val
4694 4709 if len(bad) > 0 :
4695 4710 w_e[bad] = w_eC[bad]
4696 4711 w_e_err[bad]= numpy.nan
4697 4712
4698 4713 w = (w_w*numpy.sin(bet) - w_e*numpy.sin(alp))/(numpy.cos(alp)*numpy.sin(bet) - numpy.cos(bet)*numpy.sin(alp))
4699 4714 u = (w_w*numpy.cos(bet) - w_e*numpy.cos(alp))/(numpy.sin(alp)*numpy.cos(bet) - numpy.sin(bet)*numpy.cos(alp))
4700 4715
4701 4716 w_err = numpy.sqrt((w_w_err*numpy.sin(bet))**2.+(w_e_err*numpy.sin(alp))**2.)/ numpy.absolute(numpy.cos(alp)*numpy.sin(bet)-numpy.cos(bet)*numpy.sin(alp))
4702 4717 u_err = numpy.sqrt((w_w_err*numpy.cos(bet))**2.+(w_e_err*numpy.cos(alp))**2.)/ numpy.absolute(numpy.cos(alp)*numpy.sin(bet)-numpy.cos(bet)*numpy.sin(alp))
4703 4718
4704 4719 winds = numpy.vstack((w,u))
4705 4720
4706 4721 dataOut.heightList = heiRang1
4707 4722 snr1 = 10*numpy.log10(SNR1[0])
4708 4723 dataOut.data_output = winds
4709 4724 #snr1 = 10*numpy.log10(SNR1[0])
4710 4725 dataOut.data_snr1 = numpy.reshape(snr1,(1,snr1.shape[0]))
4711 4726 dataOut.utctimeInit = dataOut.utctime
4712 4727 dataOut.outputInterval = dataOut.timeInterval
4713 4728
4714 4729 hei_aver0 = 218
4715 4730 jrange = 450 #900 para HA drifts
4716 4731 deltah = 15.0 #dataOut.spacing(0) 25 HAD
4717 4732 h0 = 0.0 #dataOut.first_height(0)
4718 4733 heights = dataOut.heightList
4719 4734 nhei = len(heights)
4720 4735
4721 4736 range1 = numpy.arange(nhei) * deltah + h0
4722 4737 jhei = (range1 >= hei_aver0).nonzero()
4723 4738 if len(jhei[0]) > 0 :
4724 4739 h0_index = jhei[0][0] # Initial height for getting averages 218km
4725 4740
4726 4741 mynhei = 7
4727 4742 nhei_avg = int(jrange/deltah)
4728 4743 h_avgs = int(nhei_avg/mynhei)
4729 4744 nhei_avg = h_avgs*(mynhei-1)+mynhei
4730 4745
4731 4746 navgs = numpy.zeros(mynhei,dtype='float')
4732 4747 delta_h = numpy.zeros(mynhei,dtype='float')
4733 4748 range_aver = numpy.zeros(mynhei,dtype='float')
4734 4749 for ih in range( mynhei-1 ):
4735 4750 range_aver[ih] = numpy.sum(range1[h0_index+h_avgs*ih:h0_index+h_avgs*(ih+1)-0])/h_avgs
4736 4751 navgs[ih] = h_avgs
4737 4752 delta_h[ih] = deltah*h_avgs
4738 4753
4739 4754 range_aver[mynhei-1] = numpy.sum(range1[h0_index:h0_index+6*h_avgs-0])/(6*h_avgs)
4740 4755 navgs[mynhei-1] = 6*h_avgs
4741 4756 delta_h[mynhei-1] = deltah*6*h_avgs
4742 4757
4743 4758 wA = w[h0_index:h0_index+nhei_avg-0]
4744 4759 wA_err = w_err[h0_index:h0_index+nhei_avg-0]
4745 4760 for i in range(5) :
4746 4761 vals = wA[i*h_avgs:(i+1)*h_avgs-0]
4747 4762 errs = wA_err[i*h_avgs:(i+1)*h_avgs-0]
4748 4763 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2.)
4749 4764 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
4750 4765 wA[6*h_avgs+i] = avg
4751 4766 wA_err[6*h_avgs+i] = sigma
4752 4767
4753 4768
4754 4769 vals = wA[0:6*h_avgs-0]
4755 4770 errs=wA_err[0:6*h_avgs-0]
4756 4771 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2)
4757 4772 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
4758 4773 wA[nhei_avg-1] = avg
4759 4774 wA_err[nhei_avg-1] = sigma
4760 4775
4761 4776 wA = wA[6*h_avgs:nhei_avg-0]
4762 4777 wA_err=wA_err[6*h_avgs:nhei_avg-0]
4763 4778 if my_nbeams == 2 :
4764 4779 uA = u[h0_index:h0_index+nhei_avg]
4765 4780 uA_err=u_err[h0_index:h0_index+nhei_avg]
4766 4781
4767 4782 for i in range(5) :
4768 4783 vals = uA[i*h_avgs:(i+1)*h_avgs-0]
4769 4784 errs=uA_err[i*h_avgs:(i+1)*h_avgs-0]
4770 4785 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2.)
4771 4786 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
4772 4787 uA[6*h_avgs+i] = avg
4773 4788 uA_err[6*h_avgs+i]=sigma
4774 4789
4775 4790 vals = uA[0:6*h_avgs-0]
4776 4791 errs = uA_err[0:6*h_avgs-0]
4777 4792 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2.)
4778 4793 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
4779 4794 uA[nhei_avg-1] = avg
4780 4795 uA_err[nhei_avg-1] = sigma
4781 4796 uA = uA[6*h_avgs:nhei_avg-0]
4782 4797 uA_err = uA_err[6*h_avgs:nhei_avg-0]
4783 4798 dataOut.drifts_avg = numpy.vstack((wA,uA))
4784 4799 if my_nbeams == 1: dataOut.drifts_avg = wA
4785 4800 tini=time.localtime(dataOut.utctime)
4786 4801 datefile= str(tini[0]).zfill(4)+str(tini[1]).zfill(2)+str(tini[2]).zfill(2)
4787 4802 nfile = fileDrifts+'/jro'+datefile+'drifts.txt'
4788 4803 f1 = open(nfile,'a')
4789 4804 datedriftavg=str(tini[0])+' '+str(tini[1])+' '+str(tini[2])+' '+str(tini[3])+' '+str(tini[4])
4790 4805 driftavgstr=str(dataOut.drifts_avg)
4791 4806 numpy.savetxt(f1,numpy.column_stack([tini[0],tini[1],tini[2],tini[3],tini[4]]),fmt='%4i')
4792 4807 numpy.savetxt(f1,dataOut.drifts_avg,fmt='%10.2f')
4793 4808 f1.close()
4794 4809
4795 4810 return dataOut
4796 4811
4797 4812 #--------------- Non Specular Meteor ----------------
4798 4813
4799 4814 class NonSpecularMeteorDetection(Operation):
4800 4815
4801 4816 def run(self, dataOut, mode, SNRthresh=8, phaseDerThresh=0.5, cohThresh=0.8, allData = False):
4802 4817 data_acf = dataOut.data_pre[0]
4803 4818 data_ccf = dataOut.data_pre[1]
4804 4819 pairsList = dataOut.groupList[1]
4805 4820
4806 4821 lamb = dataOut.C/dataOut.frequency
4807 4822 tSamp = dataOut.ippSeconds*dataOut.nCohInt
4808 4823 paramInterval = dataOut.paramInterval
4809 4824
4810 4825 nChannels = data_acf.shape[0]
4811 4826 nLags = data_acf.shape[1]
4812 4827 nProfiles = data_acf.shape[2]
4813 4828 nHeights = dataOut.nHeights
4814 4829 nCohInt = dataOut.nCohInt
4815 4830 sec = numpy.round(nProfiles/dataOut.paramInterval)
4816 4831 heightList = dataOut.heightList
4817 4832 ippSeconds = dataOut.ippSeconds*dataOut.nCohInt*dataOut.nAvg
4818 4833 utctime = dataOut.utctime
4819 4834
4820 4835 dataOut.abscissaList = numpy.arange(0,paramInterval+ippSeconds,ippSeconds)
4821 4836
4822 4837 #------------------------ SNR --------------------------------------
4823 4838 power = data_acf[:,0,:,:].real
4824 4839 noise = numpy.zeros(nChannels)
4825 4840 SNR = numpy.zeros(power.shape)
4826 4841 for i in range(nChannels):
4827 4842 noise[i] = hildebrand_sekhon(power[i,:], nCohInt)
4828 4843 SNR[i] = (power[i]-noise[i])/noise[i]
4829 4844 SNRm = numpy.nanmean(SNR, axis = 0)
4830 4845 SNRdB = 10*numpy.log10(SNR)
4831 4846
4832 4847 if mode == 'SA':
4833 4848 dataOut.groupList = dataOut.groupList[1]
4834 4849 nPairs = data_ccf.shape[0]
4835 4850 #---------------------- Coherence and Phase --------------------------
4836 4851 phase = numpy.zeros(data_ccf[:,0,:,:].shape)
4837 4852 coh1 = numpy.zeros(data_ccf[:,0,:,:].shape)
4838 4853
4839 4854 for p in range(nPairs):
4840 4855 ch0 = pairsList[p][0]
4841 4856 ch1 = pairsList[p][1]
4842 4857 ccf = data_ccf[p,0,:,:]/numpy.sqrt(data_acf[ch0,0,:,:]*data_acf[ch1,0,:,:])
4843 4858 phase[p,:,:] = ndimage.median_filter(numpy.angle(ccf), size = (5,1)) #median filter
4844 4859 coh1[p,:,:] = ndimage.median_filter(numpy.abs(ccf), 5) #median filter
4845 4860 coh = numpy.nanmax(coh1, axis = 0)
4846 4861 #---------------------- Radial Velocity ----------------------------
4847 4862 phaseAux = numpy.mean(numpy.angle(data_acf[:,1,:,:]), axis = 0)
4848 4863 velRad = phaseAux*lamb/(4*numpy.pi*tSamp)
4849 4864
4850 4865 if allData:
4851 4866 boolMetFin = ~numpy.isnan(SNRm)
4852 4867 else:
4853 4868 #------------------------ Meteor mask ---------------------------------
4854 4869
4855 4870 #Coherence mask
4856 4871 boolMet1 = coh > 0.75
4857 4872 struc = numpy.ones((30,1))
4858 4873 boolMet1 = ndimage.morphology.binary_dilation(boolMet1, structure=struc)
4859 4874
4860 4875 #Derivative mask
4861 4876 derPhase = numpy.nanmean(numpy.abs(phase[:,1:,:] - phase[:,:-1,:]),axis=0)
4862 4877 boolMet2 = derPhase < 0.2
4863 4878 boolMet2 = ndimage.median_filter(boolMet2,size=5)
4864 4879 boolMet2 = numpy.vstack((boolMet2,numpy.full((1,nHeights), True, dtype=bool)))
4865 4880 boolMetFin = boolMet1&boolMet2
4866 4881 #Creating data_param
4867 4882 coordMet = numpy.where(boolMetFin)
4868 4883
4869 4884 tmet = coordMet[0]
4870 4885 hmet = coordMet[1]
4871 4886
4872 4887 data_param = numpy.zeros((tmet.size, 6 + nPairs))
4873 4888 data_param[:,0] = utctime
4874 4889 data_param[:,1] = tmet
4875 4890 data_param[:,2] = hmet
4876 4891 data_param[:,3] = SNRm[tmet,hmet]
4877 4892 data_param[:,4] = velRad[tmet,hmet]
4878 4893 data_param[:,5] = coh[tmet,hmet]
4879 4894 data_param[:,6:] = phase[:,tmet,hmet].T
4880 4895
4881 4896 elif mode == 'DBS':
4882 4897 dataOut.groupList = numpy.arange(nChannels)
4883 4898
4884 4899 #Radial Velocities
4885 4900 phase = numpy.angle(data_acf[:,1,:,:])
4886 4901 velRad = phase*lamb/(4*numpy.pi*tSamp)
4887 4902
4888 4903 #Spectral width
4889 4904 acf1 = data_acf[:,1,:,:]
4890 4905 acf2 = data_acf[:,2,:,:]
4891 4906
4892 4907 spcWidth = (lamb/(2*numpy.sqrt(6)*numpy.pi*tSamp))*numpy.sqrt(numpy.log(acf1/acf2))
4893 4908 if allData:
4894 4909 boolMetFin = ~numpy.isnan(SNRdB)
4895 4910 else:
4896 4911 #SNR
4897 4912 boolMet1 = (SNRdB>SNRthresh) #SNR mask
4898 4913 boolMet1 = ndimage.median_filter(boolMet1, size=(1,5,5))
4899 4914
4900 4915 #Radial velocity
4901 4916 boolMet2 = numpy.abs(velRad) < 20
4902 4917 boolMet2 = ndimage.median_filter(boolMet2, (1,5,5))
4903 4918
4904 4919 #Spectral Width
4905 4920 boolMet3 = spcWidth < 30
4906 4921 boolMet3 = ndimage.median_filter(boolMet3, (1,5,5))
4907 4922 boolMetFin = boolMet1&boolMet2&boolMet3
4908 4923
4909 4924 #Creating data_param
4910 4925 coordMet = numpy.where(boolMetFin)
4911 4926
4912 4927 cmet = coordMet[0]
4913 4928 tmet = coordMet[1]
4914 4929 hmet = coordMet[2]
4915 4930
4916 4931 data_param = numpy.zeros((tmet.size, 7))
4917 4932 data_param[:,0] = utctime
4918 4933 data_param[:,1] = cmet
4919 4934 data_param[:,2] = tmet
4920 4935 data_param[:,3] = hmet
4921 4936 data_param[:,4] = SNR[cmet,tmet,hmet].T
4922 4937 data_param[:,5] = velRad[cmet,tmet,hmet].T
4923 4938 data_param[:,6] = spcWidth[cmet,tmet,hmet].T
4924 4939
4925 4940 if len(data_param) == 0:
4926 4941 dataOut.flagNoData = True
4927 4942 else:
4928 4943 dataOut.data_param = data_param
4929 4944
4930 4945 def __erase_small(self, binArray, threshX, threshY):
4931 4946 labarray, numfeat = ndimage.measurements.label(binArray)
4932 4947 binArray1 = numpy.copy(binArray)
4933 4948
4934 4949 for i in range(1,numfeat + 1):
4935 4950 auxBin = (labarray==i)
4936 4951 auxSize = auxBin.sum()
4937 4952
4938 4953 x,y = numpy.where(auxBin)
4939 4954 widthX = x.max() - x.min()
4940 4955 widthY = y.max() - y.min()
4941 4956
4942 4957 #width X: 3 seg -> 12.5*3
4943 4958 #width Y:
4944 4959
4945 4960 if (auxSize < 50) or (widthX < threshX) or (widthY < threshY):
4946 4961 binArray1[auxBin] = False
4947 4962
4948 4963 return binArray1
4949 4964
4950 4965 #--------------- Specular Meteor ----------------
4951 4966
4952 4967 class SMDetection(Operation):
4953 4968 '''
4954 4969 Function DetectMeteors()
4955 4970 Project developed with paper:
4956 4971 HOLDSWORTH ET AL. 2004
4957 4972
4958 4973 Input:
4959 4974 self.dataOut.data_pre
4960 4975
4961 4976 centerReceiverIndex: From the channels, which is the center receiver
4962 4977
4963 4978 hei_ref: Height reference for the Beacon signal extraction
4964 4979 tauindex:
4965 4980 predefinedPhaseShifts: Predefined phase offset for the voltge signals
4966 4981
4967 4982 cohDetection: Whether to user Coherent detection or not
4968 4983 cohDet_timeStep: Coherent Detection calculation time step
4969 4984 cohDet_thresh: Coherent Detection phase threshold to correct phases
4970 4985
4971 4986 noise_timeStep: Noise calculation time step
4972 4987 noise_multiple: Noise multiple to define signal threshold
4973 4988
4974 4989 multDet_timeLimit: Multiple Detection Removal time limit in seconds
4975 4990 multDet_rangeLimit: Multiple Detection Removal range limit in km
4976 4991
4977 4992 phaseThresh: Maximum phase difference between receiver to be consider a meteor
4978 4993 SNRThresh: Minimum SNR threshold of the meteor signal to be consider a meteor
4979 4994
4980 4995 hmin: Minimum Height of the meteor to use it in the further wind estimations
4981 4996 hmax: Maximum Height of the meteor to use it in the further wind estimations
4982 4997 azimuth: Azimuth angle correction
4983 4998
4984 4999 Affected:
4985 5000 self.dataOut.data_param
4986 5001
4987 5002 Rejection Criteria (Errors):
4988 5003 0: No error; analysis OK
4989 5004 1: SNR < SNR threshold
4990 5005 2: angle of arrival (AOA) ambiguously determined
4991 5006 3: AOA estimate not feasible
4992 5007 4: Large difference in AOAs obtained from different antenna baselines
4993 5008 5: echo at start or end of time series
4994 5009 6: echo less than 5 examples long; too short for analysis
4995 5010 7: echo rise exceeds 0.3s
4996 5011 8: echo decay time less than twice rise time
4997 5012 9: large power level before echo
4998 5013 10: large power level after echo
4999 5014 11: poor fit to amplitude for estimation of decay time
5000 5015 12: poor fit to CCF phase variation for estimation of radial drift velocity
5001 5016 13: height unresolvable echo: not valid height within 70 to 110 km
5002 5017 14: height ambiguous echo: more then one possible height within 70 to 110 km
5003 5018 15: radial drift velocity or projected horizontal velocity exceeds 200 m/s
5004 5019 16: oscilatory echo, indicating event most likely not an underdense echo
5005 5020
5006 5021 17: phase difference in meteor Reestimation
5007 5022
5008 5023 Data Storage:
5009 5024 Meteors for Wind Estimation (8):
5010 5025 Utc Time | Range Height
5011 5026 Azimuth Zenith errorCosDir
5012 5027 VelRad errorVelRad
5013 5028 Phase0 Phase1 Phase2 Phase3
5014 5029 TypeError
5015 5030
5016 5031 '''
5017 5032
5018 5033 def run(self, dataOut, hei_ref = None, tauindex = 0,
5019 5034 phaseOffsets = None,
5020 5035 cohDetection = False, cohDet_timeStep = 1, cohDet_thresh = 25,
5021 5036 noise_timeStep = 4, noise_multiple = 4,
5022 5037 multDet_timeLimit = 1, multDet_rangeLimit = 3,
5023 5038 phaseThresh = 20, SNRThresh = 5,
5024 5039 hmin = 50, hmax=150, azimuth = 0,
5025 5040 channelPositions = None) :
5026 5041
5027 5042
5028 5043 #Getting Pairslist
5029 5044 if channelPositions is None:
5030 5045 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
5031 5046 meteorOps = SMOperations()
5032 5047 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
5033 5048 heiRang = dataOut.heightList
5034 5049 #Get Beacon signal - No Beacon signal anymore
5035 5050 #****************REMOVING HARDWARE PHASE DIFFERENCES***************
5036 5051 # see if the user put in pre defined phase shifts
5037 5052 voltsPShift = dataOut.data_pre.copy()
5038 5053
5039 5054 #******************END OF REMOVING HARDWARE PHASE DIFFERENCES*********
5040 5055
5041 5056 #Remove DC
5042 5057 voltsDC = numpy.mean(voltsPShift,1)
5043 5058 voltsDC = numpy.mean(voltsDC,1)
5044 5059 for i in range(voltsDC.shape[0]):
5045 5060 voltsPShift[i] = voltsPShift[i] - voltsDC[i]
5046 5061
5047 5062 #Don't considerate last heights, theyre used to calculate Hardware Phase Shift
5048 5063
5049 5064 #************ FIND POWER OF DATA W/COH OR NON COH DETECTION (3.4) **********
5050 5065 #Coherent Detection
5051 5066 if cohDetection:
5052 5067 #use coherent detection to get the net power
5053 5068 cohDet_thresh = cohDet_thresh*numpy.pi/180
5054 5069 voltsPShift = self.__coherentDetection(voltsPShift, cohDet_timeStep, dataOut.timeInterval, pairslist0, cohDet_thresh)
5055 5070
5056 5071 #Non-coherent detection!
5057 5072 powerNet = numpy.nansum(numpy.abs(voltsPShift[:,:,:])**2,0)
5058 5073 #********** END OF COH/NON-COH POWER CALCULATION**********************
5059 5074
5060 5075 #********** FIND THE NOISE LEVEL AND POSSIBLE METEORS ****************
5061 5076 #Get noise
5062 5077 noise, noise1 = self.__getNoise(powerNet, noise_timeStep, dataOut.timeInterval)
5063 5078 #Get signal threshold
5064 5079 signalThresh = noise_multiple*noise
5065 5080 #Meteor echoes detection
5066 5081 listMeteors = self.__findMeteors(powerNet, signalThresh)
5067 5082 #******* END OF NOISE LEVEL AND POSSIBLE METEORS CACULATION **********
5068 5083
5069 5084 #************** REMOVE MULTIPLE DETECTIONS (3.5) ***************************
5070 5085 #Parameters
5071 5086 heiRange = dataOut.heightList
5072 5087 rangeInterval = heiRange[1] - heiRange[0]
5073 5088 rangeLimit = multDet_rangeLimit/rangeInterval
5074 5089 timeLimit = multDet_timeLimit/dataOut.timeInterval
5075 5090 #Multiple detection removals
5076 5091 listMeteors1 = self.__removeMultipleDetections(listMeteors, rangeLimit, timeLimit)
5077 5092 #************ END OF REMOVE MULTIPLE DETECTIONS **********************
5078 5093
5079 5094 #********************* METEOR REESTIMATION (3.7, 3.8, 3.9, 3.10) ********************
5080 5095 #Parameters
5081 5096 phaseThresh = phaseThresh*numpy.pi/180
5082 5097 thresh = [phaseThresh, noise_multiple, SNRThresh]
5083 5098 #Meteor reestimation (Errors N 1, 6, 12, 17)
5084 5099 listMeteors2, listMeteorsPower, listMeteorsVolts = self.__meteorReestimation(listMeteors1, voltsPShift, pairslist0, thresh, noise, dataOut.timeInterval, dataOut.frequency)
5085 5100 #Estimation of decay times (Errors N 7, 8, 11)
5086 5101 listMeteors3 = self.__estimateDecayTime(listMeteors2, listMeteorsPower, dataOut.timeInterval, dataOut.frequency)
5087 5102 #******************* END OF METEOR REESTIMATION *******************
5088 5103
5089 5104 #********************* METEOR PARAMETERS CALCULATION (3.11, 3.12, 3.13) **************************
5090 5105 #Calculating Radial Velocity (Error N 15)
5091 5106 radialStdThresh = 10
5092 5107 listMeteors4 = self.__getRadialVelocity(listMeteors3, listMeteorsVolts, radialStdThresh, pairslist0, dataOut.timeInterval)
5093 5108
5094 5109 if len(listMeteors4) > 0:
5095 5110 #Setting New Array
5096 5111 date = dataOut.utctime
5097 5112 arrayParameters = self.__setNewArrays(listMeteors4, date, heiRang)
5098 5113
5099 5114 #Correcting phase offset
5100 5115 if phaseOffsets != None:
5101 5116 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
5102 5117 arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
5103 5118
5104 5119 #Second Pairslist
5105 5120 pairsList = []
5106 5121 pairx = (0,1)
5107 5122 pairy = (2,3)
5108 5123 pairsList.append(pairx)
5109 5124 pairsList.append(pairy)
5110 5125
5111 5126 jph = numpy.array([0,0,0,0])
5112 5127 h = (hmin,hmax)
5113 5128 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
5114 5129 dataOut.data_param = arrayParameters
5115 5130
5116 5131 if arrayParameters is None:
5117 5132 dataOut.flagNoData = True
5118 5133 else:
5119 5134 dataOut.flagNoData = True
5120 5135
5121 5136 return
5122 5137
5123 5138 def __getHardwarePhaseDiff(self, voltage0, pairslist, newheis, n):
5124 5139
5125 5140 minIndex = min(newheis[0])
5126 5141 maxIndex = max(newheis[0])
5127 5142
5128 5143 voltage = voltage0[:,:,minIndex:maxIndex+1]
5129 5144 nLength = voltage.shape[1]/n
5130 5145 nMin = 0
5131 5146 nMax = 0
5132 5147 phaseOffset = numpy.zeros((len(pairslist),n))
5133 5148
5134 5149 for i in range(n):
5135 5150 nMax += nLength
5136 5151 phaseCCF = -numpy.angle(self.__calculateCCF(voltage[:,nMin:nMax,:], pairslist, [0]))
5137 5152 phaseCCF = numpy.mean(phaseCCF, axis = 2)
5138 5153 phaseOffset[:,i] = phaseCCF.transpose()
5139 5154 nMin = nMax
5140 5155
5141 5156 #Remove Outliers
5142 5157 factor = 2
5143 5158 wt = phaseOffset - signal.medfilt(phaseOffset,(1,5))
5144 5159 dw = numpy.std(wt,axis = 1)
5145 5160 dw = dw.reshape((dw.size,1))
5146 5161 ind = numpy.where(numpy.logical_or(wt>dw*factor,wt<-dw*factor))
5147 5162 phaseOffset[ind] = numpy.nan
5148 5163 phaseOffset = stats.nanmean(phaseOffset, axis=1)
5149 5164
5150 5165 return phaseOffset
5151 5166
5152 5167 def __shiftPhase(self, data, phaseShift):
5153 5168 #this will shift the phase of a complex number
5154 5169 dataShifted = numpy.abs(data) * numpy.exp((numpy.angle(data)+phaseShift)*1j)
5155 5170 return dataShifted
5156 5171
5157 5172 def __estimatePhaseDifference(self, array, pairslist):
5158 5173 nChannel = array.shape[0]
5159 5174 nHeights = array.shape[2]
5160 5175 numPairs = len(pairslist)
5161 5176 phaseCCF = numpy.angle(self.__calculateCCF(array, pairslist, [-2,-1,0,1,2]))
5162 5177
5163 5178 #Correct phases
5164 5179 derPhaseCCF = phaseCCF[:,1:,:] - phaseCCF[:,0:-1,:]
5165 5180 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
5166 5181
5167 5182 if indDer[0].shape[0] > 0:
5168 5183 for i in range(indDer[0].shape[0]):
5169 5184 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i],indDer[2][i]])
5170 5185 phaseCCF[indDer[0][i],indDer[1][i]+1:,:] += signo*2*numpy.pi
5171 5186
5172 5187 #Linear
5173 5188 phaseInt = numpy.zeros((numPairs,1))
5174 5189 angAllCCF = phaseCCF[:,[0,1,3,4],0]
5175 5190 for j in range(numPairs):
5176 5191 fit = stats.linregress([-2,-1,1,2],angAllCCF[j,:])
5177 5192 phaseInt[j] = fit[1]
5178 5193 #Phase Differences
5179 5194 phaseDiff = phaseInt - phaseCCF[:,2,:]
5180 5195 phaseArrival = phaseInt.reshape(phaseInt.size)
5181 5196
5182 5197 #Dealias
5183 5198 phaseArrival = numpy.angle(numpy.exp(1j*phaseArrival))
5184 5199
5185 5200 return phaseDiff, phaseArrival
5186 5201
5187 5202 def __coherentDetection(self, volts, timeSegment, timeInterval, pairslist, thresh):
5188 5203 #this function will run the coherent detection used in Holdworth et al. 2004 and return the net power
5189 5204 #find the phase shifts of each channel over 1 second intervals
5190 5205 #only look at ranges below the beacon signal
5191 5206 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
5192 5207 numBlocks = int(volts.shape[1]/numProfPerBlock)
5193 5208 numHeights = volts.shape[2]
5194 5209 nChannel = volts.shape[0]
5195 5210 voltsCohDet = volts.copy()
5196 5211
5197 5212 pairsarray = numpy.array(pairslist)
5198 5213 indSides = pairsarray[:,1]
5199 5214 listBlocks = numpy.array_split(volts, numBlocks, 1)
5200 5215
5201 5216 startInd = 0
5202 5217 endInd = 0
5203 5218
5204 5219 for i in range(numBlocks):
5205 5220 startInd = endInd
5206 5221 endInd = endInd + listBlocks[i].shape[1]
5207 5222
5208 5223 arrayBlock = listBlocks[i]
5209 5224
5210 5225 #Estimate the Phase Difference
5211 5226 phaseDiff, aux = self.__estimatePhaseDifference(arrayBlock, pairslist)
5212 5227 #Phase Difference RMS
5213 5228 arrayPhaseRMS = numpy.abs(phaseDiff)
5214 5229 phaseRMSaux = numpy.sum(arrayPhaseRMS < thresh,0)
5215 5230 indPhase = numpy.where(phaseRMSaux==4)
5216 5231 #Shifting
5217 5232 if indPhase[0].shape[0] > 0:
5218 5233 for j in range(indSides.size):
5219 5234 arrayBlock[indSides[j],:,indPhase] = self.__shiftPhase(arrayBlock[indSides[j],:,indPhase], phaseDiff[j,indPhase].transpose())
5220 5235 voltsCohDet[:,startInd:endInd,:] = arrayBlock
5221 5236
5222 5237 return voltsCohDet
5223 5238
5224 5239 def __calculateCCF(self, volts, pairslist ,laglist):
5225 5240
5226 5241 nHeights = volts.shape[2]
5227 5242 nPoints = volts.shape[1]
5228 5243 voltsCCF = numpy.zeros((len(pairslist), len(laglist), nHeights),dtype = 'complex')
5229 5244
5230 5245 for i in range(len(pairslist)):
5231 5246 volts1 = volts[pairslist[i][0]]
5232 5247 volts2 = volts[pairslist[i][1]]
5233 5248
5234 5249 for t in range(len(laglist)):
5235 5250 idxT = laglist[t]
5236 5251 if idxT >= 0:
5237 5252 vStacked = numpy.vstack((volts2[idxT:,:],
5238 5253 numpy.zeros((idxT, nHeights),dtype='complex')))
5239 5254 else:
5240 5255 vStacked = numpy.vstack((numpy.zeros((-idxT, nHeights),dtype='complex'),
5241 5256 volts2[:(nPoints + idxT),:]))
5242 5257 voltsCCF[i,t,:] = numpy.sum((numpy.conjugate(volts1)*vStacked),axis=0)
5243 5258
5244 5259 vStacked = None
5245 5260 return voltsCCF
5246 5261
5247 5262 def __getNoise(self, power, timeSegment, timeInterval):
5248 5263 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
5249 5264 numBlocks = int(power.shape[0]/numProfPerBlock)
5250 5265 numHeights = power.shape[1]
5251 5266
5252 5267 listPower = numpy.array_split(power, numBlocks, 0)
5253 5268 noise = numpy.zeros((power.shape[0], power.shape[1]))
5254 5269 noise1 = numpy.zeros((power.shape[0], power.shape[1]))
5255 5270
5256 5271 startInd = 0
5257 5272 endInd = 0
5258 5273
5259 5274 for i in range(numBlocks): #split por canal
5260 5275 startInd = endInd
5261 5276 endInd = endInd + listPower[i].shape[0]
5262 5277
5263 5278 arrayBlock = listPower[i]
5264 5279 noiseAux = numpy.mean(arrayBlock, 0)
5265 5280 noise[startInd:endInd,:] = noise[startInd:endInd,:] + noiseAux
5266 5281
5267 5282 noiseAux1 = numpy.mean(arrayBlock)
5268 5283 noise1[startInd:endInd,:] = noise1[startInd:endInd,:] + noiseAux1
5269 5284
5270 5285 return noise, noise1
5271 5286
5272 5287 def __findMeteors(self, power, thresh):
5273 5288 nProf = power.shape[0]
5274 5289 nHeights = power.shape[1]
5275 5290 listMeteors = []
5276 5291
5277 5292 for i in range(nHeights):
5278 5293 powerAux = power[:,i]
5279 5294 threshAux = thresh[:,i]
5280 5295
5281 5296 indUPthresh = numpy.where(powerAux > threshAux)[0]
5282 5297 indDNthresh = numpy.where(powerAux <= threshAux)[0]
5283 5298
5284 5299 j = 0
5285 5300
5286 5301 while (j < indUPthresh.size - 2):
5287 5302 if (indUPthresh[j + 2] == indUPthresh[j] + 2):
5288 5303 indDNAux = numpy.where(indDNthresh > indUPthresh[j])
5289 5304 indDNthresh = indDNthresh[indDNAux]
5290 5305
5291 5306 if (indDNthresh.size > 0):
5292 5307 indEnd = indDNthresh[0] - 1
5293 5308 indInit = indUPthresh[j]
5294 5309
5295 5310 meteor = powerAux[indInit:indEnd + 1]
5296 5311 indPeak = meteor.argmax() + indInit
5297 5312 FLA = sum(numpy.conj(meteor)*numpy.hstack((meteor[1:],0)))
5298 5313
5299 5314 listMeteors.append(numpy.array([i,indInit,indPeak,indEnd,FLA])) #CHEQUEAR!!!!!
5300 5315 j = numpy.where(indUPthresh == indEnd)[0] + 1
5301 5316 else: j+=1
5302 5317 else: j+=1
5303 5318
5304 5319 return listMeteors
5305 5320
5306 5321 def __removeMultipleDetections(self,listMeteors, rangeLimit, timeLimit):
5307 5322
5308 5323 arrayMeteors = numpy.asarray(listMeteors)
5309 5324 listMeteors1 = []
5310 5325
5311 5326 while arrayMeteors.shape[0] > 0:
5312 5327 FLAs = arrayMeteors[:,4]
5313 5328 maxFLA = FLAs.argmax()
5314 5329 listMeteors1.append(arrayMeteors[maxFLA,:])
5315 5330
5316 5331 MeteorInitTime = arrayMeteors[maxFLA,1]
5317 5332 MeteorEndTime = arrayMeteors[maxFLA,3]
5318 5333 MeteorHeight = arrayMeteors[maxFLA,0]
5319 5334
5320 5335 #Check neighborhood
5321 5336 maxHeightIndex = MeteorHeight + rangeLimit
5322 5337 minHeightIndex = MeteorHeight - rangeLimit
5323 5338 minTimeIndex = MeteorInitTime - timeLimit
5324 5339 maxTimeIndex = MeteorEndTime + timeLimit
5325 5340
5326 5341 #Check Heights
5327 5342 indHeight = numpy.logical_and(arrayMeteors[:,0] >= minHeightIndex, arrayMeteors[:,0] <= maxHeightIndex)
5328 5343 indTime = numpy.logical_and(arrayMeteors[:,3] >= minTimeIndex, arrayMeteors[:,1] <= maxTimeIndex)
5329 5344 indBoth = numpy.where(numpy.logical_and(indTime,indHeight))
5330 5345
5331 5346 arrayMeteors = numpy.delete(arrayMeteors, indBoth, axis = 0)
5332 5347
5333 5348 return listMeteors1
5334 5349
5335 5350 def __meteorReestimation(self, listMeteors, volts, pairslist, thresh, noise, timeInterval,frequency):
5336 5351 numHeights = volts.shape[2]
5337 5352 nChannel = volts.shape[0]
5338 5353
5339 5354 thresholdPhase = thresh[0]
5340 5355 thresholdNoise = thresh[1]
5341 5356 thresholdDB = float(thresh[2])
5342 5357
5343 5358 thresholdDB1 = 10**(thresholdDB/10)
5344 5359 pairsarray = numpy.array(pairslist)
5345 5360 indSides = pairsarray[:,1]
5346 5361
5347 5362 pairslist1 = list(pairslist)
5348 5363 pairslist1.append((0,1))
5349 5364 pairslist1.append((3,4))
5350 5365
5351 5366 listMeteors1 = []
5352 5367 listPowerSeries = []
5353 5368 listVoltageSeries = []
5354 5369 #volts has the war data
5355 5370
5356 5371 if frequency == 30e6:
5357 5372 timeLag = 45*10**-3
5358 5373 else:
5359 5374 timeLag = 15*10**-3
5360 5375 lag = numpy.ceil(timeLag/timeInterval)
5361 5376
5362 5377 for i in range(len(listMeteors)):
5363 5378
5364 5379 ###################### 3.6 - 3.7 PARAMETERS REESTIMATION #########################
5365 5380 meteorAux = numpy.zeros(16)
5366 5381
5367 5382 #Loading meteor Data (mHeight, mStart, mPeak, mEnd)
5368 5383 mHeight = listMeteors[i][0]
5369 5384 mStart = listMeteors[i][1]
5370 5385 mPeak = listMeteors[i][2]
5371 5386 mEnd = listMeteors[i][3]
5372 5387
5373 5388 #get the volt data between the start and end times of the meteor
5374 5389 meteorVolts = volts[:,mStart:mEnd+1,mHeight]
5375 5390 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
5376 5391
5377 5392 #3.6. Phase Difference estimation
5378 5393 phaseDiff, aux = self.__estimatePhaseDifference(meteorVolts, pairslist)
5379 5394
5380 5395 #3.7. Phase difference removal & meteor start, peak and end times reestimated
5381 5396 #meteorVolts0.- all Channels, all Profiles
5382 5397 meteorVolts0 = volts[:,:,mHeight]
5383 5398 meteorThresh = noise[:,mHeight]*thresholdNoise
5384 5399 meteorNoise = noise[:,mHeight]
5385 5400 meteorVolts0[indSides,:] = self.__shiftPhase(meteorVolts0[indSides,:], phaseDiff) #Phase Shifting
5386 5401 powerNet0 = numpy.nansum(numpy.abs(meteorVolts0)**2, axis = 0) #Power
5387 5402
5388 5403 #Times reestimation
5389 5404 mStart1 = numpy.where(powerNet0[:mPeak] < meteorThresh[:mPeak])[0]
5390 5405 if mStart1.size > 0:
5391 5406 mStart1 = mStart1[-1] + 1
5392 5407
5393 5408 else:
5394 5409 mStart1 = mPeak
5395 5410
5396 5411 mEnd1 = numpy.where(powerNet0[mPeak:] < meteorThresh[mPeak:])[0][0] + mPeak - 1
5397 5412 mEndDecayTime1 = numpy.where(powerNet0[mPeak:] < meteorNoise[mPeak:])[0]
5398 5413 if mEndDecayTime1.size == 0:
5399 5414 mEndDecayTime1 = powerNet0.size
5400 5415 else:
5401 5416 mEndDecayTime1 = mEndDecayTime1[0] + mPeak - 1
5402 5417
5403 5418 #meteorVolts1.- all Channels, from start to end
5404 5419 meteorVolts1 = meteorVolts0[:,mStart1:mEnd1 + 1]
5405 5420 meteorVolts2 = meteorVolts0[:,mPeak + lag:mEnd1 + 1]
5406 5421 if meteorVolts2.shape[1] == 0:
5407 5422 meteorVolts2 = meteorVolts0[:,mPeak:mEnd1 + 1]
5408 5423 meteorVolts1 = meteorVolts1.reshape(meteorVolts1.shape[0], meteorVolts1.shape[1], 1)
5409 5424 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1], 1)
5410 5425 ##################### END PARAMETERS REESTIMATION #########################
5411 5426
5412 5427 ##################### 3.8 PHASE DIFFERENCE REESTIMATION ########################
5413 5428 if meteorVolts2.shape[1] > 0:
5414 5429 #Phase Difference re-estimation
5415 5430 phaseDiff1, phaseDiffint = self.__estimatePhaseDifference(meteorVolts2, pairslist1) #Phase Difference Estimation
5416 5431 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1])
5417 5432 phaseDiff11 = numpy.reshape(phaseDiff1, (phaseDiff1.shape[0],1))
5418 5433 meteorVolts2[indSides,:] = self.__shiftPhase(meteorVolts2[indSides,:], phaseDiff11[0:4]) #Phase Shifting
5419 5434
5420 5435 #Phase Difference RMS
5421 5436 phaseRMS1 = numpy.sqrt(numpy.mean(numpy.square(phaseDiff1)))
5422 5437 powerNet1 = numpy.nansum(numpy.abs(meteorVolts1[:,:])**2,0)
5423 5438 #Data from Meteor
5424 5439 mPeak1 = powerNet1.argmax() + mStart1
5425 5440 mPeakPower1 = powerNet1.max()
5426 5441 noiseAux = sum(noise[mStart1:mEnd1 + 1,mHeight])
5427 5442 mSNR1 = (sum(powerNet1)-noiseAux)/noiseAux
5428 5443 Meteor1 = numpy.array([mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1])
5429 5444 Meteor1 = numpy.hstack((Meteor1,phaseDiffint))
5430 5445 PowerSeries = powerNet0[mStart1:mEndDecayTime1 + 1]
5431 5446 #Vectorize
5432 5447 meteorAux[0:7] = [mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1]
5433 5448 meteorAux[7:11] = phaseDiffint[0:4]
5434 5449
5435 5450 #Rejection Criterions
5436 5451 if phaseRMS1 > thresholdPhase: #Error Number 17: Phase variation
5437 5452 meteorAux[-1] = 17
5438 5453 elif mSNR1 < thresholdDB1: #Error Number 1: SNR < threshold dB
5439 5454 meteorAux[-1] = 1
5440 5455
5441 5456
5442 5457 else:
5443 5458 meteorAux[0:4] = [mHeight, mStart, mPeak, mEnd]
5444 5459 meteorAux[-1] = 6 #Error Number 6: echo less than 5 samples long; too short for analysis
5445 5460 PowerSeries = 0
5446 5461
5447 5462 listMeteors1.append(meteorAux)
5448 5463 listPowerSeries.append(PowerSeries)
5449 5464 listVoltageSeries.append(meteorVolts1)
5450 5465
5451 5466 return listMeteors1, listPowerSeries, listVoltageSeries
5452 5467
5453 5468 def __estimateDecayTime(self, listMeteors, listPower, timeInterval, frequency):
5454 5469
5455 5470 threshError = 10
5456 5471 #Depending if it is 30 or 50 MHz
5457 5472 if frequency == 30e6:
5458 5473 timeLag = 45*10**-3
5459 5474 else:
5460 5475 timeLag = 15*10**-3
5461 5476 lag = numpy.ceil(timeLag/timeInterval)
5462 5477
5463 5478 listMeteors1 = []
5464 5479
5465 5480 for i in range(len(listMeteors)):
5466 5481 meteorPower = listPower[i]
5467 5482 meteorAux = listMeteors[i]
5468 5483
5469 5484 if meteorAux[-1] == 0:
5470 5485
5471 5486 try:
5472 5487 indmax = meteorPower.argmax()
5473 5488 indlag = indmax + lag
5474 5489
5475 5490 y = meteorPower[indlag:]
5476 5491 x = numpy.arange(0, y.size)*timeLag
5477 5492
5478 5493 #first guess
5479 5494 a = y[0]
5480 5495 tau = timeLag
5481 5496 #exponential fit
5482 5497 popt, pcov = optimize.curve_fit(self.__exponential_function, x, y, p0 = [a, tau])
5483 5498 y1 = self.__exponential_function(x, *popt)
5484 5499 #error estimation
5485 5500 error = sum((y - y1)**2)/(numpy.var(y)*(y.size - popt.size))
5486 5501
5487 5502 decayTime = popt[1]
5488 5503 riseTime = indmax*timeInterval
5489 5504 meteorAux[11:13] = [decayTime, error]
5490 5505
5491 5506 #Table items 7, 8 and 11
5492 5507 if (riseTime > 0.3): #Number 7: Echo rise exceeds 0.3s
5493 5508 meteorAux[-1] = 7
5494 5509 elif (decayTime < 2*riseTime) : #Number 8: Echo decay time less than than twice rise time
5495 5510 meteorAux[-1] = 8
5496 5511 if (error > threshError): #Number 11: Poor fit to amplitude for estimation of decay time
5497 5512 meteorAux[-1] = 11
5498 5513
5499 5514
5500 5515 except:
5501 5516 meteorAux[-1] = 11
5502 5517
5503 5518
5504 5519 listMeteors1.append(meteorAux)
5505 5520
5506 5521 return listMeteors1
5507 5522
5508 5523 #Exponential Function
5509 5524
5510 5525 def __exponential_function(self, x, a, tau):
5511 5526 y = a*numpy.exp(-x/tau)
5512 5527 return y
5513 5528
5514 5529 def __getRadialVelocity(self, listMeteors, listVolts, radialStdThresh, pairslist, timeInterval):
5515 5530
5516 5531 pairslist1 = list(pairslist)
5517 5532 pairslist1.append((0,1))
5518 5533 pairslist1.append((3,4))
5519 5534 numPairs = len(pairslist1)
5520 5535 #Time Lag
5521 5536 timeLag = 45*10**-3
5522 5537 c = 3e8
5523 5538 lag = numpy.ceil(timeLag/timeInterval)
5524 5539 freq = 30e6
5525 5540
5526 5541 listMeteors1 = []
5527 5542
5528 5543 for i in range(len(listMeteors)):
5529 5544 meteorAux = listMeteors[i]
5530 5545 if meteorAux[-1] == 0:
5531 5546 mStart = listMeteors[i][1]
5532 5547 mPeak = listMeteors[i][2]
5533 5548 mLag = mPeak - mStart + lag
5534 5549
5535 5550 #get the volt data between the start and end times of the meteor
5536 5551 meteorVolts = listVolts[i]
5537 5552 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
5538 5553
5539 5554 #Get CCF
5540 5555 allCCFs = self.__calculateCCF(meteorVolts, pairslist1, [-2,-1,0,1,2])
5541 5556
5542 5557 #Method 2
5543 5558 slopes = numpy.zeros(numPairs)
5544 5559 time = numpy.array([-2,-1,1,2])*timeInterval
5545 5560 angAllCCF = numpy.angle(allCCFs[:,[0,1,3,4],0])
5546 5561
5547 5562 #Correct phases
5548 5563 derPhaseCCF = angAllCCF[:,1:] - angAllCCF[:,0:-1]
5549 5564 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
5550 5565
5551 5566 if indDer[0].shape[0] > 0:
5552 5567 for i in range(indDer[0].shape[0]):
5553 5568 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i]])
5554 5569 angAllCCF[indDer[0][i],indDer[1][i]+1:] += signo*2*numpy.pi
5555 5570
5556 5571 for j in range(numPairs):
5557 5572 fit = stats.linregress(time, angAllCCF[j,:])
5558 5573 slopes[j] = fit[0]
5559 5574
5560 5575 #Remove Outlier
5561 5576 radialVelocity = -numpy.mean(slopes)*(0.25/numpy.pi)*(c/freq)
5562 5577 radialError = numpy.std(slopes)*(0.25/numpy.pi)*(c/freq)
5563 5578 meteorAux[-2] = radialError
5564 5579 meteorAux[-3] = radialVelocity
5565 5580
5566 5581 #Setting Error
5567 5582 #Number 15: Radial Drift velocity or projected horizontal velocity exceeds 200 m/s
5568 5583 if numpy.abs(radialVelocity) > 200:
5569 5584 meteorAux[-1] = 15
5570 5585 #Number 12: Poor fit to CCF variation for estimation of radial drift velocity
5571 5586 elif radialError > radialStdThresh:
5572 5587 meteorAux[-1] = 12
5573 5588
5574 5589 listMeteors1.append(meteorAux)
5575 5590 return listMeteors1
5576 5591
5577 5592 def __setNewArrays(self, listMeteors, date, heiRang):
5578 5593
5579 5594 #New arrays
5580 5595 arrayMeteors = numpy.array(listMeteors)
5581 5596 arrayParameters = numpy.zeros((len(listMeteors), 13))
5582 5597
5583 5598 #Date inclusion
5584 5599 arrayDate = numpy.tile(date, (len(listMeteors)))
5585 5600
5586 5601 #Meteor array
5587 5602 #Parameters Array
5588 5603 arrayParameters[:,0] = arrayDate #Date
5589 5604 arrayParameters[:,1] = heiRang[arrayMeteors[:,0].astype(int)] #Range
5590 5605 arrayParameters[:,6:8] = arrayMeteors[:,-3:-1] #Radial velocity and its error
5591 5606 arrayParameters[:,8:12] = arrayMeteors[:,7:11] #Phases
5592 5607 arrayParameters[:,-1] = arrayMeteors[:,-1] #Error
5593 5608
5594 5609
5595 5610 return arrayParameters
5596 5611
5597 5612 class CorrectSMPhases(Operation):
5598 5613
5599 5614 def run(self, dataOut, phaseOffsets, hmin = 50, hmax = 150, azimuth = 45, channelPositions = None):
5600 5615
5601 5616 arrayParameters = dataOut.data_param
5602 5617 pairsList = []
5603 5618 pairx = (0,1)
5604 5619 pairy = (2,3)
5605 5620 pairsList.append(pairx)
5606 5621 pairsList.append(pairy)
5607 5622 jph = numpy.zeros(4)
5608 5623
5609 5624 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
5610 5625 # arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
5611 5626 arrayParameters[:,8:12] = numpy.angle(numpy.exp(1j*(arrayParameters[:,8:12] + phaseOffsets)))
5612 5627
5613 5628 meteorOps = SMOperations()
5614 5629 if channelPositions is None:
5615 5630 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
5616 5631 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
5617 5632
5618 5633 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
5619 5634 h = (hmin,hmax)
5620 5635
5621 5636 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
5622 5637
5623 5638 dataOut.data_param = arrayParameters
5624 5639 return
5625 5640
5626 5641 class SMPhaseCalibration(Operation):
5627 5642
5628 5643 __buffer = None
5629 5644
5630 5645 __initime = None
5631 5646
5632 5647 __dataReady = False
5633 5648
5634 5649 __isConfig = False
5635 5650
5636 5651 def __checkTime(self, currentTime, initTime, paramInterval, outputInterval):
5637 5652
5638 5653 dataTime = currentTime + paramInterval
5639 5654 deltaTime = dataTime - initTime
5640 5655
5641 5656 if deltaTime >= outputInterval or deltaTime < 0:
5642 5657 return True
5643 5658
5644 5659 return False
5645 5660
5646 5661 def __getGammas(self, pairs, d, phases):
5647 5662 gammas = numpy.zeros(2)
5648 5663
5649 5664 for i in range(len(pairs)):
5650 5665
5651 5666 pairi = pairs[i]
5652 5667
5653 5668 phip3 = phases[:,pairi[0]]
5654 5669 d3 = d[pairi[0]]
5655 5670 phip2 = phases[:,pairi[1]]
5656 5671 d2 = d[pairi[1]]
5657 5672 #Calculating gamma
5658 5673 jgamma = -phip2*d3/d2 - phip3
5659 5674 jgamma = numpy.angle(numpy.exp(1j*jgamma))
5660 5675
5661 5676 #Revised distribution
5662 5677 jgammaArray = numpy.hstack((jgamma,jgamma+0.5*numpy.pi,jgamma-0.5*numpy.pi))
5663 5678
5664 5679 #Histogram
5665 5680 nBins = 64
5666 5681 rmin = -0.5*numpy.pi
5667 5682 rmax = 0.5*numpy.pi
5668 5683 phaseHisto = numpy.histogram(jgammaArray, bins=nBins, range=(rmin,rmax))
5669 5684
5670 5685 meteorsY = phaseHisto[0]
5671 5686 phasesX = phaseHisto[1][:-1]
5672 5687 width = phasesX[1] - phasesX[0]
5673 5688 phasesX += width/2
5674 5689
5675 5690 #Gaussian aproximation
5676 5691 bpeak = meteorsY.argmax()
5677 5692 peak = meteorsY.max()
5678 5693 jmin = bpeak - 5
5679 5694 jmax = bpeak + 5 + 1
5680 5695
5681 5696 if jmin<0:
5682 5697 jmin = 0
5683 5698 jmax = 6
5684 5699 elif jmax > meteorsY.size:
5685 5700 jmin = meteorsY.size - 6
5686 5701 jmax = meteorsY.size
5687 5702
5688 5703 x0 = numpy.array([peak,bpeak,50])
5689 5704 coeff = optimize.leastsq(self.__residualFunction, x0, args=(meteorsY[jmin:jmax], phasesX[jmin:jmax]))
5690 5705
5691 5706 #Gammas
5692 5707 gammas[i] = coeff[0][1]
5693 5708
5694 5709 return gammas
5695 5710
5696 5711 def __residualFunction(self, coeffs, y, t):
5697 5712
5698 5713 return y - self.__gauss_function(t, coeffs)
5699 5714
5700 5715 def __gauss_function(self, t, coeffs):
5701 5716
5702 5717 return coeffs[0]*numpy.exp(-0.5*((t - coeffs[1]) / coeffs[2])**2)
5703 5718
5704 5719 def __getPhases(self, azimuth, h, pairsList, d, gammas, meteorsArray):
5705 5720 meteorOps = SMOperations()
5706 5721 nchan = 4
5707 5722 pairx = pairsList[0] #x es 0
5708 5723 pairy = pairsList[1] #y es 1
5709 5724 center_xangle = 0
5710 5725 center_yangle = 0
5711 5726 range_angle = numpy.array([10*numpy.pi,numpy.pi,numpy.pi/2,numpy.pi/4])
5712 5727 ntimes = len(range_angle)
5713 5728
5714 5729 nstepsx = 20
5715 5730 nstepsy = 20
5716 5731
5717 5732 for iz in range(ntimes):
5718 5733 min_xangle = -range_angle[iz]/2 + center_xangle
5719 5734 max_xangle = range_angle[iz]/2 + center_xangle
5720 5735 min_yangle = -range_angle[iz]/2 + center_yangle
5721 5736 max_yangle = range_angle[iz]/2 + center_yangle
5722 5737
5723 5738 inc_x = (max_xangle-min_xangle)/nstepsx
5724 5739 inc_y = (max_yangle-min_yangle)/nstepsy
5725 5740
5726 5741 alpha_y = numpy.arange(nstepsy)*inc_y + min_yangle
5727 5742 alpha_x = numpy.arange(nstepsx)*inc_x + min_xangle
5728 5743 penalty = numpy.zeros((nstepsx,nstepsy))
5729 5744 jph_array = numpy.zeros((nchan,nstepsx,nstepsy))
5730 5745 jph = numpy.zeros(nchan)
5731 5746
5732 5747 # Iterations looking for the offset
5733 5748 for iy in range(int(nstepsy)):
5734 5749 for ix in range(int(nstepsx)):
5735 5750 d3 = d[pairsList[1][0]]
5736 5751 d2 = d[pairsList[1][1]]
5737 5752 d5 = d[pairsList[0][0]]
5738 5753 d4 = d[pairsList[0][1]]
5739 5754
5740 5755 alp2 = alpha_y[iy] #gamma 1
5741 5756 alp4 = alpha_x[ix] #gamma 0
5742 5757
5743 5758 alp3 = -alp2*d3/d2 - gammas[1]
5744 5759 alp5 = -alp4*d5/d4 - gammas[0]
5745 5760 jph[pairsList[0][1]] = alp4
5746 5761 jph[pairsList[0][0]] = alp5
5747 5762 jph[pairsList[1][0]] = alp3
5748 5763 jph[pairsList[1][1]] = alp2
5749 5764 jph_array[:,ix,iy] = jph
5750 5765 meteorsArray1 = meteorOps.getMeteorParams(meteorsArray, azimuth, h, pairsList, d, jph)
5751 5766 error = meteorsArray1[:,-1]
5752 5767 ind1 = numpy.where(error==0)[0]
5753 5768 penalty[ix,iy] = ind1.size
5754 5769
5755 5770 i,j = numpy.unravel_index(penalty.argmax(), penalty.shape)
5756 5771 phOffset = jph_array[:,i,j]
5757 5772
5758 5773 center_xangle = phOffset[pairx[1]]
5759 5774 center_yangle = phOffset[pairy[1]]
5760 5775
5761 5776 phOffset = numpy.angle(numpy.exp(1j*jph_array[:,i,j]))
5762 5777 phOffset = phOffset*180/numpy.pi
5763 5778 return phOffset
5764 5779
5765 5780
5766 5781 def run(self, dataOut, hmin, hmax, channelPositions=None, nHours = 1):
5767 5782
5768 5783 dataOut.flagNoData = True
5769 5784 self.__dataReady = False
5770 5785 dataOut.outputInterval = nHours*3600
5771 5786
5772 5787 if self.__isConfig == False:
5773 5788 #Get Initial LTC time
5774 5789 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
5775 5790 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
5776 5791
5777 5792 self.__isConfig = True
5778 5793
5779 5794 if self.__buffer is None:
5780 5795 self.__buffer = dataOut.data_param.copy()
5781 5796
5782 5797 else:
5783 5798 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
5784 5799
5785 5800 self.__dataReady = self.__checkTime(dataOut.utctime, self.__initime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
5786 5801
5787 5802 if self.__dataReady:
5788 5803 dataOut.utctimeInit = self.__initime
5789 5804 self.__initime += dataOut.outputInterval #to erase time offset
5790 5805
5791 5806 freq = dataOut.frequency
5792 5807 c = dataOut.C #m/s
5793 5808 lamb = c/freq
5794 5809 k = 2*numpy.pi/lamb
5795 5810 azimuth = 0
5796 5811 h = (hmin, hmax)
5797 5812
5798 5813 if channelPositions is None:
5799 5814 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
5800 5815 meteorOps = SMOperations()
5801 5816 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
5802 5817
5803 5818 #Checking correct order of pairs
5804 5819 pairs = []
5805 5820 if distances[1] > distances[0]:
5806 5821 pairs.append((1,0))
5807 5822 else:
5808 5823 pairs.append((0,1))
5809 5824
5810 5825 if distances[3] > distances[2]:
5811 5826 pairs.append((3,2))
5812 5827 else:
5813 5828 pairs.append((2,3))
5814 5829
5815 5830 meteorsArray = self.__buffer
5816 5831 error = meteorsArray[:,-1]
5817 5832 boolError = (error==0)|(error==3)|(error==4)|(error==13)|(error==14)
5818 5833 ind1 = numpy.where(boolError)[0]
5819 5834 meteorsArray = meteorsArray[ind1,:]
5820 5835 meteorsArray[:,-1] = 0
5821 5836 phases = meteorsArray[:,8:12]
5822 5837
5823 5838 #Calculate Gammas
5824 5839 gammas = self.__getGammas(pairs, distances, phases)
5825 5840 #Calculate Phases
5826 5841 phasesOff = self.__getPhases(azimuth, h, pairs, distances, gammas, meteorsArray)
5827 5842 phasesOff = phasesOff.reshape((1,phasesOff.size))
5828 5843 dataOut.data_output = -phasesOff
5829 5844 dataOut.flagNoData = False
5830 5845 self.__buffer = None
5831 5846
5832 5847
5833 5848 return
5834 5849
5835 5850 class SMOperations():
5836 5851
5837 5852 def __init__(self):
5838 5853
5839 5854 return
5840 5855
5841 5856 def getMeteorParams(self, arrayParameters0, azimuth, h, pairsList, distances, jph):
5842 5857
5843 5858 arrayParameters = arrayParameters0.copy()
5844 5859 hmin = h[0]
5845 5860 hmax = h[1]
5846 5861
5847 5862 #Calculate AOA (Error N 3, 4)
5848 5863 #JONES ET AL. 1998
5849 5864 AOAthresh = numpy.pi/8
5850 5865 error = arrayParameters[:,-1]
5851 5866 phases = -arrayParameters[:,8:12] + jph
5852 5867 arrayParameters[:,3:6], arrayParameters[:,-1] = self.__getAOA(phases, pairsList, distances, error, AOAthresh, azimuth)
5853 5868
5854 5869 #Calculate Heights (Error N 13 and 14)
5855 5870 error = arrayParameters[:,-1]
5856 5871 Ranges = arrayParameters[:,1]
5857 5872 zenith = arrayParameters[:,4]
5858 5873 arrayParameters[:,2], arrayParameters[:,-1] = self.__getHeights(Ranges, zenith, error, hmin, hmax)
5859 5874
5860 5875 #----------------------- Get Final data ------------------------------------
5861 5876
5862 5877 return arrayParameters
5863 5878
5864 5879 def __getAOA(self, phases, pairsList, directions, error, AOAthresh, azimuth):
5865 5880
5866 5881 arrayAOA = numpy.zeros((phases.shape[0],3))
5867 5882 cosdir0, cosdir = self.__getDirectionCosines(phases, pairsList,directions)
5868 5883
5869 5884 arrayAOA[:,:2] = self.__calculateAOA(cosdir, azimuth)
5870 5885 cosDirError = numpy.sum(numpy.abs(cosdir0 - cosdir), axis = 1)
5871 5886 arrayAOA[:,2] = cosDirError
5872 5887
5873 5888 azimuthAngle = arrayAOA[:,0]
5874 5889 zenithAngle = arrayAOA[:,1]
5875 5890
5876 5891 #Setting Error
5877 5892 indError = numpy.where(numpy.logical_or(error == 3, error == 4))[0]
5878 5893 error[indError] = 0
5879 5894 #Number 3: AOA not fesible
5880 5895 indInvalid = numpy.where(numpy.logical_and((numpy.logical_or(numpy.isnan(zenithAngle), numpy.isnan(azimuthAngle))),error == 0))[0]
5881 5896 error[indInvalid] = 3
5882 5897 #Number 4: Large difference in AOAs obtained from different antenna baselines
5883 5898 indInvalid = numpy.where(numpy.logical_and(cosDirError > AOAthresh,error == 0))[0]
5884 5899 error[indInvalid] = 4
5885 5900 return arrayAOA, error
5886 5901
5887 5902 def __getDirectionCosines(self, arrayPhase, pairsList, distances):
5888 5903
5889 5904 #Initializing some variables
5890 5905 ang_aux = numpy.array([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])*2*numpy.pi
5891 5906 ang_aux = ang_aux.reshape(1,ang_aux.size)
5892 5907
5893 5908 cosdir = numpy.zeros((arrayPhase.shape[0],2))
5894 5909 cosdir0 = numpy.zeros((arrayPhase.shape[0],2))
5895 5910
5896 5911
5897 5912 for i in range(2):
5898 5913 ph0 = arrayPhase[:,pairsList[i][0]]
5899 5914 ph1 = arrayPhase[:,pairsList[i][1]]
5900 5915 d0 = distances[pairsList[i][0]]
5901 5916 d1 = distances[pairsList[i][1]]
5902 5917
5903 5918 ph0_aux = ph0 + ph1
5904 5919 ph0_aux = numpy.angle(numpy.exp(1j*ph0_aux))
5905 5920 #First Estimation
5906 5921 cosdir0[:,i] = (ph0_aux)/(2*numpy.pi*(d0 - d1))
5907 5922
5908 5923 #Most-Accurate Second Estimation
5909 5924 phi1_aux = ph0 - ph1
5910 5925 phi1_aux = phi1_aux.reshape(phi1_aux.size,1)
5911 5926 #Direction Cosine 1
5912 5927 cosdir1 = (phi1_aux + ang_aux)/(2*numpy.pi*(d0 + d1))
5913 5928
5914 5929 #Searching the correct Direction Cosine
5915 5930 cosdir0_aux = cosdir0[:,i]
5916 5931 cosdir0_aux = cosdir0_aux.reshape(cosdir0_aux.size,1)
5917 5932 #Minimum Distance
5918 5933 cosDiff = (cosdir1 - cosdir0_aux)**2
5919 5934 indcos = cosDiff.argmin(axis = 1)
5920 5935 #Saving Value obtained
5921 5936 cosdir[:,i] = cosdir1[numpy.arange(len(indcos)),indcos]
5922 5937
5923 5938 return cosdir0, cosdir
5924 5939
5925 5940 def __calculateAOA(self, cosdir, azimuth):
5926 5941 cosdirX = cosdir[:,0]
5927 5942 cosdirY = cosdir[:,1]
5928 5943
5929 5944 zenithAngle = numpy.arccos(numpy.sqrt(1 - cosdirX**2 - cosdirY**2))*180/numpy.pi
5930 5945 azimuthAngle = numpy.arctan2(cosdirX,cosdirY)*180/numpy.pi + azimuth#0 deg north, 90 deg east
5931 5946 angles = numpy.vstack((azimuthAngle, zenithAngle)).transpose()
5932 5947
5933 5948 return angles
5934 5949
5935 5950 def __getHeights(self, Ranges, zenith, error, minHeight, maxHeight):
5936 5951
5937 5952 Ramb = 375 #Ramb = c/(2*PRF)
5938 5953 Re = 6371 #Earth Radius
5939 5954 heights = numpy.zeros(Ranges.shape)
5940 5955
5941 5956 R_aux = numpy.array([0,1,2])*Ramb
5942 5957 R_aux = R_aux.reshape(1,R_aux.size)
5943 5958
5944 5959 Ranges = Ranges.reshape(Ranges.size,1)
5945 5960
5946 5961 Ri = Ranges + R_aux
5947 5962 hi = numpy.sqrt(Re**2 + Ri**2 + (2*Re*numpy.cos(zenith*numpy.pi/180)*Ri.transpose()).transpose()) - Re
5948 5963
5949 5964 #Check if there is a height between 70 and 110 km
5950 5965 h_bool = numpy.sum(numpy.logical_and(hi > minHeight, hi < maxHeight), axis = 1)
5951 5966 ind_h = numpy.where(h_bool == 1)[0]
5952 5967
5953 5968 hCorr = hi[ind_h, :]
5954 5969 ind_hCorr = numpy.where(numpy.logical_and(hi > minHeight, hi < maxHeight))
5955 5970
5956 5971 hCorr = hi[ind_hCorr][:len(ind_h)]
5957 5972 heights[ind_h] = hCorr
5958 5973
5959 5974 #Setting Error
5960 5975 #Number 13: Height unresolvable echo: not valid height within 70 to 110 km
5961 5976 #Number 14: Height ambiguous echo: more than one possible height within 70 to 110 km
5962 5977 indError = numpy.where(numpy.logical_or(error == 13, error == 14))[0]
5963 5978 error[indError] = 0
5964 5979 indInvalid2 = numpy.where(numpy.logical_and(h_bool > 1, error == 0))[0]
5965 5980 error[indInvalid2] = 14
5966 5981 indInvalid1 = numpy.where(numpy.logical_and(h_bool == 0, error == 0))[0]
5967 5982 error[indInvalid1] = 13
5968 5983
5969 5984 return heights, error
5970 5985
5971 5986 def getPhasePairs(self, channelPositions):
5972 5987 chanPos = numpy.array(channelPositions)
5973 5988 listOper = list(itertools.combinations(list(range(5)),2))
5974 5989
5975 5990 distances = numpy.zeros(4)
5976 5991 axisX = []
5977 5992 axisY = []
5978 5993 distX = numpy.zeros(3)
5979 5994 distY = numpy.zeros(3)
5980 5995 ix = 0
5981 5996 iy = 0
5982 5997
5983 5998 pairX = numpy.zeros((2,2))
5984 5999 pairY = numpy.zeros((2,2))
5985 6000
5986 6001 for i in range(len(listOper)):
5987 6002 pairi = listOper[i]
5988 6003
5989 6004 posDif = numpy.abs(chanPos[pairi[0],:] - chanPos[pairi[1],:])
5990 6005
5991 6006 if posDif[0] == 0:
5992 6007 axisY.append(pairi)
5993 6008 distY[iy] = posDif[1]
5994 6009 iy += 1
5995 6010 elif posDif[1] == 0:
5996 6011 axisX.append(pairi)
5997 6012 distX[ix] = posDif[0]
5998 6013 ix += 1
5999 6014
6000 6015 for i in range(2):
6001 6016 if i==0:
6002 6017 dist0 = distX
6003 6018 axis0 = axisX
6004 6019 else:
6005 6020 dist0 = distY
6006 6021 axis0 = axisY
6007 6022
6008 6023 side = numpy.argsort(dist0)[:-1]
6009 6024 axis0 = numpy.array(axis0)[side,:]
6010 6025 chanC = int(numpy.intersect1d(axis0[0,:], axis0[1,:])[0])
6011 6026 axis1 = numpy.unique(numpy.reshape(axis0,4))
6012 6027 side = axis1[axis1 != chanC]
6013 6028 diff1 = chanPos[chanC,i] - chanPos[side[0],i]
6014 6029 diff2 = chanPos[chanC,i] - chanPos[side[1],i]
6015 6030 if diff1<0:
6016 6031 chan2 = side[0]
6017 6032 d2 = numpy.abs(diff1)
6018 6033 chan1 = side[1]
6019 6034 d1 = numpy.abs(diff2)
6020 6035 else:
6021 6036 chan2 = side[1]
6022 6037 d2 = numpy.abs(diff2)
6023 6038 chan1 = side[0]
6024 6039 d1 = numpy.abs(diff1)
6025 6040
6026 6041 if i==0:
6027 6042 chanCX = chanC
6028 6043 chan1X = chan1
6029 6044 chan2X = chan2
6030 6045 distances[0:2] = numpy.array([d1,d2])
6031 6046 else:
6032 6047 chanCY = chanC
6033 6048 chan1Y = chan1
6034 6049 chan2Y = chan2
6035 6050 distances[2:4] = numpy.array([d1,d2])
6036 6051
6037 6052 pairslist = [(chanCX, chan1X),(chanCX, chan2X),(chanCY,chan1Y),(chanCY, chan2Y)]
6038 6053
6039 6054 return pairslist, distances
6040 6055
6041 6056 class IGRFModel(Operation):
6042 6057 """Operation to calculate Geomagnetic parameters.
6043 6058
6044 6059 Parameters:
6045 6060 -----------
6046 6061 None
6047 6062
6048 6063 Example
6049 6064 --------
6050 6065
6051 6066 op = proc_unit.addOperation(name='IGRFModel', optype='other')
6052 6067
6053 6068 """
6054 6069
6055 6070 def __init__(self, **kwargs):
6056 6071
6057 6072 Operation.__init__(self, **kwargs)
6058 6073
6059 6074 self.aux=1
6060 6075
6061 6076 def run(self,dataOut):
6062 6077
6063 6078 try:
6064 6079 from schainpy.model.proc import mkfact_short_2020
6065 6080 except:
6066 6081 log.warning('You should install "mkfact_short_2020" module to process IGRF Model')
6067 6082
6068 6083 if self.aux==1:
6069 6084
6070 6085 #dataOut.TimeBlockSeconds_First_Time=time.mktime(time.strptime(dataOut.TimeBlockDate))
6071 6086 #### we do not use dataOut.datatime.ctime() because it's the time of the second (next) block
6072 6087 dataOut.TimeBlockSeconds_First_Time=dataOut.TimeBlockSeconds
6073 6088 dataOut.bd_time=time.gmtime(dataOut.TimeBlockSeconds_First_Time)
6074 6089 dataOut.year=dataOut.bd_time.tm_year+(dataOut.bd_time.tm_yday-1)/364.0
6075 6090 dataOut.ut=dataOut.bd_time.tm_hour+dataOut.bd_time.tm_min/60.0+dataOut.bd_time.tm_sec/3600.0
6076 6091
6077 6092 self.aux=0
6078 6093
6079 6094 dataOut.h=numpy.arange(0.0,15.0*dataOut.MAXNRANGENDT,15.0,dtype='float32')
6080 6095 dataOut.bfm=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32')
6081 6096 dataOut.bfm=numpy.array(dataOut.bfm,order='F')
6082 6097 dataOut.thb=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32')
6083 6098 dataOut.thb=numpy.array(dataOut.thb,order='F')
6084 6099 dataOut.bki=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32')
6085 6100 dataOut.bki=numpy.array(dataOut.bki,order='F')
6086 6101
6087 6102 mkfact_short_2020.mkfact(dataOut.year,dataOut.h,dataOut.bfm,dataOut.thb,dataOut.bki,dataOut.MAXNRANGENDT)
6088 6103
6089 6104 return dataOut
6090 6105
6091 6106 class MergeProc(ProcessingUnit):
6092 6107
6093 6108 def __init__(self):
6094 6109 ProcessingUnit.__init__(self)
6095 6110
6096 6111 def run(self, attr_data, attr_data_2 = None, attr_data_3 = None, attr_data_4 = None, attr_data_5 = None, mode=0):
6097 6112
6098 6113 self.dataOut = getattr(self, self.inputs[0])
6099 6114 data_inputs = [getattr(self, attr) for attr in self.inputs]
6100 6115 #print(self.inputs)
6101 6116 #print(numpy.shape([getattr(data, attr_data) for data in data_inputs][1]))
6102 6117 #exit(1)
6103 6118 if mode==0:
6104 6119 data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs])
6105 6120 setattr(self.dataOut, attr_data, data)
6106 6121
6107 6122 if mode==1: #Hybrid
6108 6123 #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1)
6109 6124 #setattr(self.dataOut, attr_data, data)
6110 6125 setattr(self.dataOut, 'dataLag_spc', [getattr(data, attr_data) for data in data_inputs][0])
6111 6126 setattr(self.dataOut, 'dataLag_spc_LP', [getattr(data, attr_data) for data in data_inputs][1])
6112 6127 setattr(self.dataOut, 'dataLag_cspc', [getattr(data, attr_data_2) for data in data_inputs][0])
6113 6128 setattr(self.dataOut, 'dataLag_cspc_LP', [getattr(data, attr_data_2) for data in data_inputs][1])
6114 6129 #setattr(self.dataOut, 'nIncohInt', [getattr(data, attr_data_3) for data in data_inputs][0])
6115 6130 #setattr(self.dataOut, 'nIncohInt_LP', [getattr(data, attr_data_3) for data in data_inputs][1])
6116 6131 '''
6117 6132 print(self.dataOut.dataLag_spc_LP.shape)
6118 6133 print(self.dataOut.dataLag_cspc_LP.shape)
6119 6134 exit(1)
6120 6135 '''
6121 6136
6122 6137 #self.dataOut.dataLag_spc_LP = numpy.transpose(self.dataOut.dataLag_spc_LP[0],(2,0,1))
6123 6138 #self.dataOut.dataLag_cspc_LP = numpy.transpose(self.dataOut.dataLag_cspc_LP,(3,1,2,0))
6124 6139 '''
6125 6140 print("Merge")
6126 6141 print(numpy.shape(self.dataOut.dataLag_spc))
6127 6142 print(numpy.shape(self.dataOut.dataLag_spc_LP))
6128 6143 print(numpy.shape(self.dataOut.dataLag_cspc))
6129 6144 print(numpy.shape(self.dataOut.dataLag_cspc_LP))
6130 6145 exit(1)
6131 6146 '''
6132 6147 #print(numpy.sum(self.dataOut.dataLag_spc_LP[2,:,164])/128)
6133 6148 #print(numpy.sum(self.dataOut.dataLag_cspc_LP[0,:,30,1])/128)
6134 6149 #exit(1)
6135 6150 #print(self.dataOut.NDP)
6136 6151 #print(self.dataOut.nNoiseProfiles)
6137 6152
6138 6153 #self.dataOut.nIncohInt_LP = 128
6139 6154 self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP
6140 6155 self.dataOut.nIncohInt_LP = self.dataOut.nIncohInt
6141 6156 self.dataOut.NLAG = 16
6142 6157 self.dataOut.NRANGE = 200
6143 6158 self.dataOut.NSCAN = 128
6144 6159 #print(numpy.shape(self.dataOut.data_spc))
6145 6160
6146 6161 #exit(1)
6147 6162
6148 6163 if mode==2: #HAE 2022
6149 6164 data = numpy.sum([getattr(data, attr_data) for data in data_inputs],axis=0)
6150 6165 setattr(self.dataOut, attr_data, data)
6151 6166
6152 6167 self.dataOut.nIncohInt *= 2
6153 6168 #meta = self.dataOut.getFreqRange(1)/1000.
6154 6169 self.dataOut.freqRange = self.dataOut.getFreqRange(1)/1000.
6155 6170
6156 6171 #exit(1)
6157 6172
6158 6173 if mode==4: #Hybrid LP-SSheightProfiles
6159 6174 #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1)
6160 6175 #setattr(self.dataOut, attr_data, data)
6161 6176 setattr(self.dataOut, 'dataLag_spc', getattr(data_inputs[0], attr_data)) #DP
6162 6177 setattr(self.dataOut, 'dataLag_cspc', getattr(data_inputs[0], attr_data_2)) #DP
6163 6178 setattr(self.dataOut, 'dataLag_spc_LP', getattr(data_inputs[1], attr_data_3)) #LP
6164 6179 #setattr(self.dataOut, 'dataLag_cspc_LP', getattr(data_inputs[1], attr_data_4)) #LP
6165 6180 #setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP
6166 6181 setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP
6167 6182 #print("Merge data_acf: ",self.dataOut.data_acf.shape)
6168 6183 #exit(1)
6169 6184 #print(self.dataOut.data_spc_LP.shape)
6170 6185 #print("Exit")
6171 6186 #exit(1)
6172 6187 #setattr(self.dataOut, 'dataLag_cspc', [getattr(data, attr_data_2) for data in data_inputs][0])
6173 6188 #setattr(self.dataOut, 'dataLag_cspc_LP', [getattr(data, attr_data_2) for data in data_inputs][1])
6174 6189 #setattr(self.dataOut, 'nIncohInt', [getattr(data, attr_data_3) for data in data_inputs][0])
6175 6190 #setattr(self.dataOut, 'nIncohInt_LP', [getattr(data, attr_data_3) for data in data_inputs][1])
6176 6191 '''
6177 6192 print(self.dataOut.dataLag_spc_LP.shape)
6178 6193 print(self.dataOut.dataLag_cspc_LP.shape)
6179 6194 exit(1)
6180 6195 '''
6181 6196 '''
6182 6197 print(self.dataOut.dataLag_spc_LP[0,:,100])
6183 6198 print(self.dataOut.dataLag_spc_LP[1,:,100])
6184 6199 exit(1)
6185 6200 '''
6186 6201 #self.dataOut.dataLag_spc_LP = numpy.transpose(self.dataOut.dataLag_spc_LP[0],(2,0,1))
6187 6202 #self.dataOut.dataLag_cspc_LP = numpy.transpose(self.dataOut.dataLag_cspc_LP,(3,1,2,0))
6188 6203 '''
6189 6204 print("Merge")
6190 6205 print(numpy.shape(self.dataOut.dataLag_spc))
6191 6206 print(numpy.shape(self.dataOut.dataLag_spc_LP))
6192 6207 print(numpy.shape(self.dataOut.dataLag_cspc))
6193 6208 print(numpy.shape(self.dataOut.dataLag_cspc_LP))
6194 6209 exit(1)
6195 6210 '''
6196 6211 #print(numpy.sum(self.dataOut.dataLag_spc_LP[2,:,164])/128)
6197 6212 #print(numpy.sum(self.dataOut.dataLag_cspc_LP[0,:,30,1])/128)
6198 6213 #exit(1)
6199 6214 #print(self.dataOut.NDP)
6200 6215 #print(self.dataOut.nNoiseProfiles)
6201 6216
6202 6217 #self.dataOut.nIncohInt_LP = 128
6203 6218 #self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP
6204 6219 self.dataOut.nProfiles_LP = 16#28#self.dataOut.nIncohInt_LP
6205 6220 self.dataOut.nProfiles_LP = self.dataOut.data_acf.shape[1]#28#self.dataOut.nIncohInt_LP
6206 6221 self.dataOut.NSCAN = 128
6207 6222 self.dataOut.nIncohInt_LP = self.dataOut.nIncohInt*self.dataOut.NSCAN
6208 6223 #print("sahpi",self.dataOut.nIncohInt_LP)
6209 6224 #exit(1)
6210 6225 self.dataOut.NLAG = 16
6211 6226 self.dataOut.NRANGE = self.dataOut.data_acf.shape[-1]
6212 6227
6213 6228 #print(numpy.shape(self.dataOut.data_spc))
6214 6229
6215 6230 #exit(1)
6216 6231 if mode==5:
6217 6232 data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs])
6218 6233 setattr(self.dataOut, attr_data, data)
6219 6234 data = numpy.concatenate([getattr(data, attr_data_2) for data in data_inputs])
6220 6235 setattr(self.dataOut, attr_data_2, data)
6221 6236 #data = numpy.concatenate([getattr(data, attr_data_3) for data in data_inputs])
6222 6237 #setattr(self.dataOut, attr_data_3, data)
6223 6238 #print(self.dataOut.moments.shape,self.dataOut.data_snr.shape,self.dataOut.heightList.shape) No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now