##// END OF EJS Templates
Fix Spectral Fitting
Alexander Valdez -
r1683:256acf43d60a
parent child
Show More
@@ -1,5231 +1,5224
1 1 import numpy
2 2 import math
3 3 from scipy import optimize, interpolate, signal, stats, ndimage
4 4 from scipy.fftpack import fft
5 5 import scipy
6 6 import re
7 7 import datetime
8 8 import copy
9 9 import sys
10 10 import importlib
11 11 import itertools
12 12 from multiprocessing import Pool, TimeoutError
13 13 from multiprocessing.pool import ThreadPool
14 14 import time
15 15 from scipy.optimize import fmin_l_bfgs_b #optimize with bounds on state papameters
16 16 from .jroproc_base import ProcessingUnit, Operation, MPDecorator
17 17 from schainpy.model.data.jrodata import Parameters, hildebrand_sekhon
18 18 from schainpy.model.data.jrodata import Spectra
19 19 from scipy import asarray as ar,exp
20 20 from scipy.optimize import fmin, curve_fit
21 21 from schainpy.utils import log
22 22 import warnings
23 23 from numpy import NaN
24 24 from scipy.optimize.optimize import OptimizeWarning
25 25 warnings.filterwarnings('ignore')
26 import pdb
26 27
27 28
28 29 SPEED_OF_LIGHT = 299792458
29 30
30 31 '''solving pickling issue'''
31 32
32 33 def _pickle_method(method):
33 34 func_name = method.__func__.__name__
34 35 obj = method.__self__
35 36 cls = method.__self__.__class__
36 37 return _unpickle_method, (func_name, obj, cls)
37 38
38 39 def _unpickle_method(func_name, obj, cls):
39 40 for cls in cls.mro():
40 41 try:
41 42 func = cls.__dict__[func_name]
42 43 except KeyError:
43 44 pass
44 45 else:
45 46 break
46 47 return func.__get__(obj, cls)
47 48
48 49
49 50 class ParametersProc(ProcessingUnit):
50 51
51 52 METHODS = {}
52 53 nSeconds = None
53 54
54 55 def __init__(self):
55 56 ProcessingUnit.__init__(self)
56 57
57 58 self.buffer = None
58 59 self.firstdatatime = None
59 60 self.profIndex = 0
60 61 self.dataOut = Parameters()
61 62 self.setupReq = False #Agregar a todas las unidades de proc
62 63
63 64 def __updateObjFromInput(self):
64 65
65 66 self.dataOut.inputUnit = self.dataIn.type
66 67
67 68 self.dataOut.timeZone = self.dataIn.timeZone
68 69 self.dataOut.dstFlag = self.dataIn.dstFlag
69 70 self.dataOut.errorCount = self.dataIn.errorCount
70 71 self.dataOut.useLocalTime = self.dataIn.useLocalTime
71 72
72 73 self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy()
73 74 self.dataOut.systemHeaderObj = self.dataIn.systemHeaderObj.copy()
74 75 self.dataOut.channelList = self.dataIn.channelList
75 76 self.dataOut.heightList = self.dataIn.heightList
76 77 self.dataOut.dtype = numpy.dtype([('real','<f4'),('imag','<f4')])
77 78 # self.dataOut.nBaud = self.dataIn.nBaud
78 79 # self.dataOut.nCode = self.dataIn.nCode
79 80 # self.dataOut.code = self.dataIn.code
80 81 self.dataOut.flagDiscontinuousBlock = self.dataIn.flagDiscontinuousBlock
81 82 self.dataOut.utctime = self.dataIn.utctime
82 83 self.dataOut.flagDecodeData = self.dataIn.flagDecodeData #asumo q la data esta decodificada
83 84 self.dataOut.flagDeflipData = self.dataIn.flagDeflipData #asumo q la data esta sin flip
84 85 self.dataOut.nCohInt = self.dataIn.nCohInt
85 86 self.dataOut.timeInterval1 = self.dataIn.timeInterval
86 87 self.dataOut.heightList = self.dataIn.heightList
87 88 self.dataOut.frequency = self.dataIn.frequency
88 89 self.dataOut.runNextUnit = self.dataIn.runNextUnit
89 90
90 91 def run(self, runNextUnit=0):
91 92
92 93 self.dataIn.runNextUnit = runNextUnit
93 94 #---------------------- Voltage Data ---------------------------
94 95
95 96 if self.dataIn.type == "Voltage":
96 97
97 98 self.__updateObjFromInput()
98 99 self.dataOut.data_pre = self.dataIn.data.copy()
99 100 self.dataOut.flagNoData = False
100 101 self.dataOut.utctimeInit = self.dataIn.utctime
101 102 self.dataOut.paramInterval = self.dataIn.nProfiles*self.dataIn.nCohInt*self.dataIn.ippSeconds
102 103 if hasattr(self.dataIn, 'dataPP_POW'):
103 104 self.dataOut.dataPP_POW = self.dataIn.dataPP_POW
104 105
105 106 if hasattr(self.dataIn, 'dataPP_POWER'):
106 107 self.dataOut.dataPP_POWER = self.dataIn.dataPP_POWER
107 108
108 109 if hasattr(self.dataIn, 'dataPP_DOP'):
109 110 self.dataOut.dataPP_DOP = self.dataIn.dataPP_DOP
110 111
111 112 if hasattr(self.dataIn, 'dataPP_SNR'):
112 113 self.dataOut.dataPP_SNR = self.dataIn.dataPP_SNR
113 114
114 115 if hasattr(self.dataIn, 'dataPP_WIDTH'):
115 116 self.dataOut.dataPP_WIDTH = self.dataIn.dataPP_WIDTH
116 117 return
117 118
118 119 #---------------------- Spectra Data ---------------------------
119 120
120 121 if self.dataIn.type == "Spectra":
121 122
122 123 self.dataOut.data_pre = [self.dataIn.data_spc, self.dataIn.data_cspc]
123 124 self.dataOut.data_spc = self.dataIn.data_spc
124 125 self.dataOut.data_cspc = self.dataIn.data_cspc
125 126 self.dataOut.nProfiles = self.dataIn.nProfiles
126 127 self.dataOut.nIncohInt = self.dataIn.nIncohInt
127 128 self.dataOut.nFFTPoints = self.dataIn.nFFTPoints
128 129 self.dataOut.ippFactor = self.dataIn.ippFactor
129 130 self.dataOut.abscissaList = self.dataIn.getVelRange(1)
130 131 self.dataOut.spc_noise = self.dataIn.getNoise()
131 132 self.dataOut.spc_range = (self.dataIn.getFreqRange(1) , self.dataIn.getAcfRange(1) , self.dataIn.getVelRange(1))
132 133 # self.dataOut.normFactor = self.dataIn.normFactor
133 134 self.dataOut.pairsList = self.dataIn.pairsList
134 135 self.dataOut.groupList = self.dataIn.pairsList
135 136 self.dataOut.flagNoData = False
136 137
137 138 if hasattr(self.dataIn, 'ChanDist'): #Distances of receiver channels
138 139 self.dataOut.ChanDist = self.dataIn.ChanDist
139 140 else: self.dataOut.ChanDist = None
140 141
141 142 #if hasattr(self.dataIn, 'VelRange'): #Velocities range
142 143 # self.dataOut.VelRange = self.dataIn.VelRange
143 144 #else: self.dataOut.VelRange = None
144 145
145 146 if hasattr(self.dataIn, 'RadarConst'): #Radar Constant
146 147 self.dataOut.RadarConst = self.dataIn.RadarConst
147 148
148 149 if hasattr(self.dataIn, 'NPW'): #NPW
149 150 self.dataOut.NPW = self.dataIn.NPW
150 151
151 152 if hasattr(self.dataIn, 'COFA'): #COFA
152 153 self.dataOut.COFA = self.dataIn.COFA
153 154
154 155
155 156
156 157 #---------------------- Correlation Data ---------------------------
157 158
158 159 if self.dataIn.type == "Correlation":
159 160 acf_ind, ccf_ind, acf_pairs, ccf_pairs, data_acf, data_ccf = self.dataIn.splitFunctions()
160 161
161 162 self.dataOut.data_pre = (self.dataIn.data_cf[acf_ind,:], self.dataIn.data_cf[ccf_ind,:,:])
162 163 self.dataOut.normFactor = (self.dataIn.normFactor[acf_ind,:], self.dataIn.normFactor[ccf_ind,:])
163 164 self.dataOut.groupList = (acf_pairs, ccf_pairs)
164 165
165 166 self.dataOut.abscissaList = self.dataIn.lagRange
166 167 self.dataOut.noise = self.dataIn.noise
167 168 self.dataOut.data_snr = self.dataIn.SNR
168 169 self.dataOut.flagNoData = False
169 170 self.dataOut.nAvg = self.dataIn.nAvg
170 171
171 172 #---------------------- Parameters Data ---------------------------
172 173
173 174 if self.dataIn.type == "Parameters":
174 175 self.dataOut.copy(self.dataIn)
175 176 self.dataOut.flagNoData = False
176 177
177 178 return True
178 179
179 180 self.__updateObjFromInput()
180 181 self.dataOut.utctimeInit = self.dataIn.utctime
181 182 self.dataOut.paramInterval = self.dataIn.timeInterval
182 183
183 184 return
184 185
185 186
186 187 def target(tups):
187 188
188 189 obj, args = tups
189 190
190 191 return obj.FitGau(args)
191 192
192 193 class RemoveWideGC(Operation):
193 194 ''' This class remove the wide clutter and replace it with a simple interpolation points
194 195 This mainly applies to CLAIRE radar
195 196
196 197 ClutterWidth : Width to look for the clutter peak
197 198
198 199 Input:
199 200
200 201 self.dataOut.data_pre : SPC and CSPC
201 202 self.dataOut.spc_range : To select wind and rainfall velocities
202 203
203 204 Affected:
204 205
205 206 self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
206 207
207 208 Written by D. ScipiΓ³n 25.02.2021
208 209 '''
209 210 def __init__(self):
210 211 Operation.__init__(self)
211 212 self.i = 0
212 213 self.ich = 0
213 214 self.ir = 0
214 215
215 216 def run(self, dataOut, ClutterWidth=2.5):
216 217
217 218 self.spc = dataOut.data_pre[0].copy()
218 219 self.spc_out = dataOut.data_pre[0].copy()
219 220 self.Num_Chn = self.spc.shape[0]
220 221 self.Num_Hei = self.spc.shape[2]
221 222 VelRange = dataOut.spc_range[2][:-1]
222 223 dv = VelRange[1]-VelRange[0]
223 224
224 225 # Find the velocities that corresponds to zero
225 226 gc_values = numpy.squeeze(numpy.where(numpy.abs(VelRange) <= ClutterWidth))
226 227
227 228 # Removing novalid data from the spectra
228 229 for ich in range(self.Num_Chn) :
229 230 for ir in range(self.Num_Hei) :
230 231 # Estimate the noise at each range
231 232 HSn = hildebrand_sekhon(self.spc[ich,:,ir],dataOut.nIncohInt)
232 233
233 234 # Removing the noise floor at each range
234 235 novalid = numpy.where(self.spc[ich,:,ir] < HSn)
235 236 self.spc[ich,novalid,ir] = HSn
236 237
237 238 junk = numpy.append(numpy.insert(numpy.squeeze(self.spc[ich,gc_values,ir]),0,HSn),HSn)
238 239 j1index = numpy.squeeze(numpy.where(numpy.diff(junk)>0))
239 240 j2index = numpy.squeeze(numpy.where(numpy.diff(junk)<0))
240 241 if ((numpy.size(j1index)<=1) | (numpy.size(j2index)<=1)) :
241 242 continue
242 243 junk3 = numpy.squeeze(numpy.diff(j1index))
243 244 junk4 = numpy.squeeze(numpy.diff(j2index))
244 245
245 246 valleyindex = j2index[numpy.where(junk4>1)]
246 247 peakindex = j1index[numpy.where(junk3>1)]
247 248
248 249 isvalid = numpy.squeeze(numpy.where(numpy.abs(VelRange[gc_values[peakindex]]) <= 2.5*dv))
249 250 if numpy.size(isvalid) == 0 :
250 251 continue
251 252 if numpy.size(isvalid) >1 :
252 253 vindex = numpy.argmax(self.spc[ich,gc_values[peakindex[isvalid]],ir])
253 254 isvalid = isvalid[vindex]
254 255
255 256 # clutter peak
256 257 gcpeak = peakindex[isvalid]
257 258 vl = numpy.where(valleyindex < gcpeak)
258 259 if numpy.size(vl) == 0:
259 260 continue
260 261 gcvl = valleyindex[vl[0][-1]]
261 262 vr = numpy.where(valleyindex > gcpeak)
262 263 if numpy.size(vr) == 0:
263 264 continue
264 265 gcvr = valleyindex[vr[0][0]]
265 266
266 267 # Removing the clutter
267 268 interpindex = numpy.array([gc_values[gcvl], gc_values[gcvr]])
268 269 gcindex = gc_values[gcvl+1:gcvr-1]
269 270 self.spc_out[ich,gcindex,ir] = numpy.interp(VelRange[gcindex],VelRange[interpindex],self.spc[ich,interpindex,ir])
270 271
271 272 dataOut.data_pre[0] = self.spc_out
272 273
273 274 return dataOut
274 275
275 276 class SpectralFilters(Operation):
276 277 ''' This class allows to replace the novalid values with noise for each channel
277 278 This applies to CLAIRE RADAR
278 279
279 280 PositiveLimit : RightLimit of novalid data
280 281 NegativeLimit : LeftLimit of novalid data
281 282
282 283 Input:
283 284
284 285 self.dataOut.data_pre : SPC and CSPC
285 286 self.dataOut.spc_range : To select wind and rainfall velocities
286 287
287 288 Affected:
288 289
289 290 self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
290 291
291 292 Written by D. ScipiΓ³n 29.01.2021
292 293 '''
293 294 def __init__(self):
294 295 Operation.__init__(self)
295 296 self.i = 0
296 297
297 298 def run(self, dataOut, ):
298 299
299 300 self.spc = dataOut.data_pre[0].copy()
300 301 self.Num_Chn = self.spc.shape[0]
301 302 VelRange = dataOut.spc_range[2]
302 303
303 304 # novalid corresponds to data within the Negative and PositiveLimit
304 305
305 306
306 307 # Removing novalid data from the spectra
307 308 for i in range(self.Num_Chn):
308 309 self.spc[i,novalid,:] = dataOut.noise[i]
309 310 dataOut.data_pre[0] = self.spc
310 311 return dataOut
311 312
312 313
313 314 class GaussianFit(Operation):
314 315
315 316 '''
316 317 Function that fit of one and two generalized gaussians (gg) based
317 318 on the PSD shape across an "power band" identified from a cumsum of
318 319 the measured spectrum - noise.
319 320
320 321 Input:
321 322 self.dataOut.data_pre : SelfSpectra
322 323
323 324 Output:
324 325 self.dataOut.SPCparam : SPC_ch1, SPC_ch2
325 326
326 327 '''
327 328 def __init__(self):
328 329 Operation.__init__(self)
329 330 self.i=0
330 331
331 332
332 333 # def run(self, dataOut, num_intg=7, pnoise=1., SNRlimit=-9): #num_intg: Incoherent integrations, pnoise: Noise, vel_arr: range of velocities, similar to the ftt points
333 334 def run(self, dataOut, SNRdBlimit=-9, method='generalized'):
334 335 """This routine will find a couple of generalized Gaussians to a power spectrum
335 336 methods: generalized, squared
336 337 input: spc
337 338 output:
338 339 noise, amplitude0,shift0,width0,p0,Amplitude1,shift1,width1,p1
339 340 """
340 341 print ('Entering ',method,' double Gaussian fit')
341 342 self.spc = dataOut.data_pre[0].copy()
342 343 self.Num_Hei = self.spc.shape[2]
343 344 self.Num_Bin = self.spc.shape[1]
344 345 self.Num_Chn = self.spc.shape[0]
345 346
346 347 start_time = time.time()
347 348
348 349 pool = Pool(processes=self.Num_Chn)
349 350 args = [(dataOut.spc_range[2], ich, dataOut.spc_noise[ich], dataOut.nIncohInt, SNRdBlimit) for ich in range(self.Num_Chn)]
350 351 objs = [self for __ in range(self.Num_Chn)]
351 352 attrs = list(zip(objs, args))
352 353 DGauFitParam = pool.map(target, attrs)
353 354 # Parameters:
354 355 # 0. Noise, 1. Amplitude, 2. Shift, 3. Width 4. Power
355 356 dataOut.DGauFitParams = numpy.asarray(DGauFitParam)
356 357
357 358 # Double Gaussian Curves
358 359 gau0 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
359 360 gau0[:] = numpy.NaN
360 361 gau1 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
361 362 gau1[:] = numpy.NaN
362 363 x_mtr = numpy.transpose(numpy.tile(dataOut.getVelRange(1)[:-1], (self.Num_Hei,1)))
363 364 for iCh in range(self.Num_Chn):
364 365 N0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,0]] * self.Num_Bin))
365 366 N1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,1]] * self.Num_Bin))
366 367 A0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,0]] * self.Num_Bin))
367 368 A1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,1]] * self.Num_Bin))
368 369 v0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,0]] * self.Num_Bin))
369 370 v1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,1]] * self.Num_Bin))
370 371 s0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,0]] * self.Num_Bin))
371 372 s1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,1]] * self.Num_Bin))
372 373 if method == 'generalized':
373 374 p0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,0]] * self.Num_Bin))
374 375 p1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,1]] * self.Num_Bin))
375 376 elif method == 'squared':
376 377 p0 = 2.
377 378 p1 = 2.
378 379 gau0[iCh] = A0*numpy.exp(-0.5*numpy.abs((x_mtr-v0)/s0)**p0)+N0
379 380 gau1[iCh] = A1*numpy.exp(-0.5*numpy.abs((x_mtr-v1)/s1)**p1)+N1
380 381 dataOut.GaussFit0 = gau0
381 382 dataOut.GaussFit1 = gau1
382 383
383 384 print('Leaving ',method ,' double Gaussian fit')
384 385 return dataOut
385 386
386 387 def FitGau(self, X):
387 388 # print('Entering FitGau')
388 389 # Assigning the variables
389 390 Vrange, ch, wnoise, num_intg, SNRlimit = X
390 391 # Noise Limits
391 392 noisebl = wnoise * 0.9
392 393 noisebh = wnoise * 1.1
393 394 # Radar Velocity
394 395 Va = max(Vrange)
395 396 deltav = Vrange[1] - Vrange[0]
396 397 x = numpy.arange(self.Num_Bin)
397 398
398 399 # print ('stop 0')
399 400
400 401 # 5 parameters, 2 Gaussians
401 402 DGauFitParam = numpy.zeros([5, self.Num_Hei,2])
402 403 DGauFitParam[:] = numpy.NaN
403 404
404 405 # SPCparam = []
405 406 # SPC_ch1 = numpy.zeros([self.Num_Bin,self.Num_Hei])
406 407 # SPC_ch2 = numpy.zeros([self.Num_Bin,self.Num_Hei])
407 408 # SPC_ch1[:] = 0 #numpy.NaN
408 409 # SPC_ch2[:] = 0 #numpy.NaN
409 410 # print ('stop 1')
410 411 for ht in range(self.Num_Hei):
411 412 # print (ht)
412 413 # print ('stop 2')
413 414 # Spectra at each range
414 415 spc = numpy.asarray(self.spc)[ch,:,ht]
415 416 snr = ( spc.mean() - wnoise ) / wnoise
416 417 snrdB = 10.*numpy.log10(snr)
417 418
418 419 #print ('stop 3')
419 420 if snrdB < SNRlimit :
420 421 # snr = numpy.NaN
421 422 # SPC_ch1[:,ht] = 0#numpy.NaN
422 423 # SPC_ch1[:,ht] = 0#numpy.NaN
423 424 # SPCparam = (SPC_ch1,SPC_ch2)
424 425 # print ('SNR less than SNRth')
425 426 continue
426 427 # wnoise = hildebrand_sekhon(spc,num_intg)
427 428 # print ('stop 2.01')
428 429 #############################################
429 430 # normalizing spc and noise
430 431 # This part differs from gg1
431 432 # spc_norm_max = max(spc) #commented by D. ScipiΓ³n 19.03.2021
432 433 #spc = spc / spc_norm_max
433 434 # pnoise = pnoise #/ spc_norm_max #commented by D. ScipiΓ³n 19.03.2021
434 435 #############################################
435 436
436 437 # print ('stop 2.1')
437 438 fatspectra=1.0
438 439 # noise per channel.... we might want to use the noise at each range
439 440
440 441 # wnoise = noise_ #/ spc_norm_max #commented by D. ScipiΓ³n 19.03.2021
441 442 #wnoise,stdv,i_max,index =enoise(spc,num_intg) #noise estimate using Hildebrand Sekhon, only wnoise is used
442 443 #if wnoise>1.1*pnoise: # to be tested later
443 444 # wnoise=pnoise
444 445 # noisebl = wnoise*0.9
445 446 # noisebh = wnoise*1.1
446 447 spc = spc - wnoise # signal
447 448
448 449 # print ('stop 2.2')
449 450 minx = numpy.argmin(spc)
450 451 #spcs=spc.copy()
451 452 spcs = numpy.roll(spc,-minx)
452 453 cum = numpy.cumsum(spcs)
453 454 # tot_noise = wnoise * self.Num_Bin #64;
454 455
455 456 # print ('stop 2.3')
456 457 # snr = sum(spcs) / tot_noise
457 458 # snrdB = 10.*numpy.log10(snr)
458 459 #print ('stop 3')
459 460 # if snrdB < SNRlimit :
460 461 # snr = numpy.NaN
461 462 # SPC_ch1[:,ht] = 0#numpy.NaN
462 463 # SPC_ch1[:,ht] = 0#numpy.NaN
463 464 # SPCparam = (SPC_ch1,SPC_ch2)
464 465 # print ('SNR less than SNRth')
465 466 # continue
466 467
467 468
468 469 #if snrdB<-18 or numpy.isnan(snrdB) or num_intg<4:
469 470 # return [None,]*4,[None,]*4,None,snrdB,None,None,[None,]*5,[None,]*9,None
470 471 # print ('stop 4')
471 472 cummax = max(cum)
472 473 epsi = 0.08 * fatspectra # cumsum to narrow down the energy region
473 474 cumlo = cummax * epsi
474 475 cumhi = cummax * (1-epsi)
475 476 powerindex = numpy.array(numpy.where(numpy.logical_and(cum>cumlo, cum<cumhi))[0])
476 477
477 478 # print ('stop 5')
478 479 if len(powerindex) < 1:# case for powerindex 0
479 480 # print ('powerindex < 1')
480 481 continue
481 482 powerlo = powerindex[0]
482 483 powerhi = powerindex[-1]
483 484 powerwidth = powerhi-powerlo
484 485 if powerwidth <= 1:
485 486 # print('powerwidth <= 1')
486 487 continue
487 488
488 489 # print ('stop 6')
489 490 firstpeak = powerlo + powerwidth/10.# first gaussian energy location
490 491 secondpeak = powerhi - powerwidth/10. #second gaussian energy location
491 492 midpeak = (firstpeak + secondpeak)/2.
492 493 firstamp = spcs[int(firstpeak)]
493 494 secondamp = spcs[int(secondpeak)]
494 495 midamp = spcs[int(midpeak)]
495 496
496 497 y_data = spc + wnoise
497 498
498 499 ''' single Gaussian '''
499 500 shift0 = numpy.mod(midpeak+minx, self.Num_Bin )
500 501 width0 = powerwidth/4.#Initialization entire power of spectrum divided by 4
501 502 power0 = 2.
502 503 amplitude0 = midamp
503 504 state0 = [shift0,width0,amplitude0,power0,wnoise]
504 505 bnds = ((0,self.Num_Bin-1),(1,powerwidth),(0,None),(0.5,3.),(noisebl,noisebh))
505 506 lsq1 = fmin_l_bfgs_b(self.misfit1, state0, args=(y_data,x,num_intg), bounds=bnds, approx_grad=True)
506 507 # print ('stop 7.1')
507 508 # print (bnds)
508 509
509 510 chiSq1=lsq1[1]
510 511
511 512 # print ('stop 8')
512 513 if fatspectra<1.0 and powerwidth<4:
513 514 choice=0
514 515 Amplitude0=lsq1[0][2]
515 516 shift0=lsq1[0][0]
516 517 width0=lsq1[0][1]
517 518 p0=lsq1[0][3]
518 519 Amplitude1=0.
519 520 shift1=0.
520 521 width1=0.
521 522 p1=0.
522 523 noise=lsq1[0][4]
523 524 #return (numpy.array([shift0,width0,Amplitude0,p0]),
524 525 # numpy.array([shift1,width1,Amplitude1,p1]),noise,snrdB,chiSq1,6.,sigmas1,[None,]*9,choice)
525 526 # print ('stop 9')
526 527 ''' two Gaussians '''
527 528 #shift0=numpy.mod(firstpeak+minx,64); shift1=numpy.mod(secondpeak+minx,64)
528 529 shift0 = numpy.mod(firstpeak+minx, self.Num_Bin )
529 530 shift1 = numpy.mod(secondpeak+minx, self.Num_Bin )
530 531 width0 = powerwidth/6.
531 532 width1 = width0
532 533 power0 = 2.
533 534 power1 = power0
534 535 amplitude0 = firstamp
535 536 amplitude1 = secondamp
536 537 state0 = [shift0,width0,amplitude0,power0,shift1,width1,amplitude1,power1,wnoise]
537 538 #bnds=((0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
538 539 bnds=((0,self.Num_Bin-1),(1,powerwidth/2.),(0,None),(0.5,3.),(0,self.Num_Bin-1),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
539 540 #bnds=(( 0,(self.Num_Bin-1) ),(1,powerwidth/2.),(0,None),(0.5,3.),( 0,(self.Num_Bin-1)),(1,powerwidth/2.),(0,None),(0.5,3.),(0.1,0.5))
540 541
541 542 # print ('stop 10')
542 543 lsq2 = fmin_l_bfgs_b( self.misfit2 , state0 , args=(y_data,x,num_intg) , bounds=bnds , approx_grad=True )
543 544
544 545 # print ('stop 11')
545 546 chiSq2 = lsq2[1]
546 547
547 548 # print ('stop 12')
548 549
549 550 oneG = (chiSq1<5 and chiSq1/chiSq2<2.0) and (abs(lsq2[0][0]-lsq2[0][4])<(lsq2[0][1]+lsq2[0][5])/3. or abs(lsq2[0][0]-lsq2[0][4])<10)
550 551
551 552 # print ('stop 13')
552 553 if snrdB>-12: # when SNR is strong pick the peak with least shift (LOS velocity) error
553 554 if oneG:
554 555 choice = 0
555 556 else:
556 557 w1 = lsq2[0][1]; w2 = lsq2[0][5]
557 558 a1 = lsq2[0][2]; a2 = lsq2[0][6]
558 559 p1 = lsq2[0][3]; p2 = lsq2[0][7]
559 560 s1 = (2**(1+1./p1))*scipy.special.gamma(1./p1)/p1
560 561 s2 = (2**(1+1./p2))*scipy.special.gamma(1./p2)/p2
561 562 gp1 = a1*w1*s1; gp2 = a2*w2*s2 # power content of each ggaussian with proper p scaling
562 563
563 564 if gp1>gp2:
564 565 if a1>0.7*a2:
565 566 choice = 1
566 567 else:
567 568 choice = 2
568 569 elif gp2>gp1:
569 570 if a2>0.7*a1:
570 571 choice = 2
571 572 else:
572 573 choice = 1
573 574 else:
574 575 choice = numpy.argmax([a1,a2])+1
575 576 #else:
576 577 #choice=argmin([std2a,std2b])+1
577 578
578 579 else: # with low SNR go to the most energetic peak
579 580 choice = numpy.argmax([lsq1[0][2]*lsq1[0][1],lsq2[0][2]*lsq2[0][1],lsq2[0][6]*lsq2[0][5]])
580 581
581 582 # print ('stop 14')
582 583 shift0 = lsq2[0][0]
583 584 vel0 = Vrange[0] + shift0 * deltav
584 585 shift1 = lsq2[0][4]
585 586 # vel1=Vrange[0] + shift1 * deltav
586 587
587 588 # max_vel = 1.0
588 589 # Va = max(Vrange)
589 590 # deltav = Vrange[1]-Vrange[0]
590 591 # print ('stop 15')
591 592 #first peak will be 0, second peak will be 1
592 593 # if vel0 > -1.0 and vel0 < max_vel : #first peak is in the correct range # Commented by D.ScipiΓ³n 19.03.2021
593 594 if vel0 > -Va and vel0 < Va : #first peak is in the correct range
594 595 shift0 = lsq2[0][0]
595 596 width0 = lsq2[0][1]
596 597 Amplitude0 = lsq2[0][2]
597 598 p0 = lsq2[0][3]
598 599
599 600 shift1 = lsq2[0][4]
600 601 width1 = lsq2[0][5]
601 602 Amplitude1 = lsq2[0][6]
602 603 p1 = lsq2[0][7]
603 604 noise = lsq2[0][8]
604 605 else:
605 606 shift1 = lsq2[0][0]
606 607 width1 = lsq2[0][1]
607 608 Amplitude1 = lsq2[0][2]
608 609 p1 = lsq2[0][3]
609 610
610 611 shift0 = lsq2[0][4]
611 612 width0 = lsq2[0][5]
612 613 Amplitude0 = lsq2[0][6]
613 614 p0 = lsq2[0][7]
614 615 noise = lsq2[0][8]
615 616
616 617 if Amplitude0<0.05: # in case the peak is noise
617 618 shift0,width0,Amplitude0,p0 = 4*[numpy.NaN]
618 619 if Amplitude1<0.05:
619 620 shift1,width1,Amplitude1,p1 = 4*[numpy.NaN]
620 621
621 622 # print ('stop 16 ')
622 623 # SPC_ch1[:,ht] = noise + Amplitude0*numpy.exp(-0.5*(abs(x-shift0)/width0)**p0)
623 624 # SPC_ch2[:,ht] = noise + Amplitude1*numpy.exp(-0.5*(abs(x-shift1)/width1)**p1)
624 625 # SPCparam = (SPC_ch1,SPC_ch2)
625 626
626 627 DGauFitParam[0,ht,0] = noise
627 628 DGauFitParam[0,ht,1] = noise
628 629 DGauFitParam[1,ht,0] = Amplitude0
629 630 DGauFitParam[1,ht,1] = Amplitude1
630 631 DGauFitParam[2,ht,0] = Vrange[0] + shift0 * deltav
631 632 DGauFitParam[2,ht,1] = Vrange[0] + shift1 * deltav
632 633 DGauFitParam[3,ht,0] = width0 * deltav
633 634 DGauFitParam[3,ht,1] = width1 * deltav
634 635 DGauFitParam[4,ht,0] = p0
635 636 DGauFitParam[4,ht,1] = p1
636 637
637 638 return DGauFitParam
638 639
639 640 def y_model1(self,x,state):
640 641 shift0, width0, amplitude0, power0, noise = state
641 642 model0 = amplitude0*numpy.exp(-0.5*abs((x - shift0)/width0)**power0)
642 643 model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0)
643 644 model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0)
644 645 return model0 + model0u + model0d + noise
645 646
646 647 def y_model2(self,x,state): #Equation for two generalized Gaussians with Nyquist
647 648 shift0, width0, amplitude0, power0, shift1, width1, amplitude1, power1, noise = state
648 649 model0 = amplitude0*numpy.exp(-0.5*abs((x-shift0)/width0)**power0)
649 650 model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0)
650 651 model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0)
651 652
652 653 model1 = amplitude1*numpy.exp(-0.5*abs((x - shift1)/width1)**power1)
653 654 model1u = amplitude1*numpy.exp(-0.5*abs((x - shift1 - self.Num_Bin)/width1)**power1)
654 655 model1d = amplitude1*numpy.exp(-0.5*abs((x - shift1 + self.Num_Bin)/width1)**power1)
655 656 return model0 + model0u + model0d + model1 + model1u + model1d + noise
656 657
657 658 def misfit1(self,state,y_data,x,num_intg): # This function compares how close real data is with the model data, the close it is, the better it is.
658 659
659 660 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model1(x,state)))**2)#/(64-5.) # /(64-5.) can be commented
660 661
661 662 def misfit2(self,state,y_data,x,num_intg):
662 663 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model2(x,state)))**2)#/(64-9.)
663 664
664 665 class Oblique_Gauss_Fit(Operation):
665 666
666 667 def __init__(self):
667 668 Operation.__init__(self)
668 669
669 670 def Gauss_fit(self,spc,x,nGauss):
670 671
671 672
672 673 def gaussian(x, a, b, c, d):
673 674 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
674 675 return val
675 676
676 677 if nGauss == 'first':
677 678 spc_1_aux = numpy.copy(spc[:numpy.argmax(spc)+1])
678 679 spc_2_aux = numpy.flip(spc_1_aux)
679 680 spc_3_aux = numpy.concatenate((spc_1_aux,spc_2_aux[1:]))
680 681
681 682 len_dif = len(x)-len(spc_3_aux)
682 683
683 684 spc_zeros = numpy.ones(len_dif)*spc_1_aux[0]
684 685
685 686 spc_new = numpy.concatenate((spc_3_aux,spc_zeros))
686 687
687 688 y = spc_new
688 689
689 690 elif nGauss == 'second':
690 691 y = spc
691 692
692 693
693 694 # estimate starting values from the data
694 695 a = y.max()
695 696 b = x[numpy.argmax(y)]
696 697 if nGauss == 'first':
697 698 c = 1.#b#b#numpy.std(spc)
698 699 elif nGauss == 'second':
699 700 c = b
700 701 else:
701 702 print("ERROR")
702 703
703 704 d = numpy.mean(y[-100:])
704 705
705 706 # define a least squares function to optimize
706 707 def minfunc(params):
707 708 return sum((y-gaussian(x,params[0],params[1],params[2],params[3]))**2)
708 709
709 710 # fit
710 711 popt = fmin(minfunc,[a,b,c,d],disp=False)
711 712 #popt,fopt,niter,funcalls = fmin(minfunc,[a,b,c,d])
712 713
713 714
714 715 return gaussian(x, popt[0], popt[1], popt[2], popt[3]), popt[0], popt[1], popt[2], popt[3]
715 716
716 717
717 718 def Gauss_fit_2(self,spc,x,nGauss):
718 719
719 720
720 721 def gaussian(x, a, b, c, d):
721 722 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
722 723 return val
723 724
724 725 if nGauss == 'first':
725 726 spc_1_aux = numpy.copy(spc[:numpy.argmax(spc)+1])
726 727 spc_2_aux = numpy.flip(spc_1_aux)
727 728 spc_3_aux = numpy.concatenate((spc_1_aux,spc_2_aux[1:]))
728 729
729 730 len_dif = len(x)-len(spc_3_aux)
730 731
731 732 spc_zeros = numpy.ones(len_dif)*spc_1_aux[0]
732 733
733 734 spc_new = numpy.concatenate((spc_3_aux,spc_zeros))
734 735
735 736 y = spc_new
736 737
737 738 elif nGauss == 'second':
738 739 y = spc
739 740
740 741
741 742 # estimate starting values from the data
742 743 a = y.max()
743 744 b = x[numpy.argmax(y)]
744 745 if nGauss == 'first':
745 746 c = 1.#b#b#numpy.std(spc)
746 747 elif nGauss == 'second':
747 748 c = b
748 749 else:
749 750 print("ERROR")
750 751
751 752 d = numpy.mean(y[-100:])
752 753
753 754 # define a least squares function to optimize
754 755 popt,pcov = curve_fit(gaussian,x,y,p0=[a,b,c,d])
755 756 #popt,fopt,niter,funcalls = fmin(minfunc,[a,b,c,d])
756 757
757 758
758 759 #return gaussian(x, popt[0], popt[1], popt[2], popt[3]), popt[0], popt[1], popt[2], popt[3]
759 760 return gaussian(x, popt[0], popt[1], popt[2], popt[3]),popt[0], popt[1], popt[2], popt[3]
760 761
761 762 def Double_Gauss_fit(self,spc,x,A1,B1,C1,A2,B2,C2,D):
762 763
763 764 def double_gaussian(x, a1, b1, c1, a2, b2, c2, d):
764 765 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
765 766 return val
766 767
767 768
768 769 y = spc
769 770
770 771 # estimate starting values from the data
771 772 a1 = A1
772 773 b1 = B1
773 774 c1 = C1#numpy.std(spc)
774 775
775 776 a2 = A2#y.max()
776 777 b2 = B2#x[numpy.argmax(y)]
777 778 c2 = C2#numpy.std(spc)
778 779 d = D
779 780
780 781 # define a least squares function to optimize
781 782 def minfunc(params):
782 783 return sum((y-double_gaussian(x,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2)
783 784
784 785 # fit
785 786 popt = fmin(minfunc,[a1,b1,c1,a2,b2,c2,d],disp=False)
786 787
787 788 return double_gaussian(x, popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
788 789
789 790 def Double_Gauss_fit_2(self,spc,x,A1,B1,C1,A2,B2,C2,D):
790 791
791 792 def double_gaussian(x, a1, b1, c1, a2, b2, c2, d):
792 793 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
793 794 return val
794 795
795 796
796 797 y = spc
797 798
798 799 # estimate starting values from the data
799 800 a1 = A1
800 801 b1 = B1
801 802 c1 = C1#numpy.std(spc)
802 803
803 804 a2 = A2#y.max()
804 805 b2 = B2#x[numpy.argmax(y)]
805 806 c2 = C2#numpy.std(spc)
806 807 d = D
807 808
808 809 # fit
809 810
810 811 popt,pcov = curve_fit(double_gaussian,x,y,p0=[a1,b1,c1,a2,b2,c2,d])
811 812
812 813 error = numpy.sqrt(numpy.diag(pcov))
813 814
814 815 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6], error[0], error[1], error[2], error[3], error[4], error[5], error[6]
815 816
816 817 def run(self, dataOut):
817 818
818 819 pwcode = 1
819 820
820 821 if dataOut.flagDecodeData:
821 822 pwcode = numpy.sum(dataOut.code[0]**2)
822 823 #normFactor = min(self.nFFTPoints,self.nProfiles)*self.nIncohInt*self.nCohInt*pwcode*self.windowOfFilter
823 824 normFactor = dataOut.nProfiles * dataOut.nIncohInt * dataOut.nCohInt * pwcode * dataOut.windowOfFilter
824 825 factor = normFactor
825 826 z = dataOut.data_spc / factor
826 827 z = numpy.where(numpy.isfinite(z), z, numpy.NAN)
827 828 dataOut.power = numpy.average(z, axis=1)
828 829 dataOut.powerdB = 10 * numpy.log10(dataOut.power)
829 830
830 831
831 832 x = dataOut.getVelRange(0)
832 833
833 834 dataOut.Oblique_params = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
834 835 dataOut.Oblique_param_errors = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
835 836
836 837 dataOut.VelRange = x
837 838
838 839
839 840 l1=range(22,36)
840 841 l2=range(58,99)
841 842
842 843 for hei in itertools.chain(l1, l2):
843 844
844 845 try:
845 846 spc = dataOut.data_spc[0,:,hei]
846 847
847 848 spc_fit, A1, B1, C1, D1 = self.Gauss_fit_2(spc,x,'first')
848 849
849 850 spc_diff = spc - spc_fit
850 851 spc_diff[spc_diff < 0] = 0
851 852
852 853 spc_fit_diff, A2, B2, C2, D2 = self.Gauss_fit_2(spc_diff,x,'second')
853 854
854 855 D = (D1+D2)
855 856
856 857 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_param_errors[0,0,hei],dataOut.Oblique_param_errors[0,1,hei],dataOut.Oblique_param_errors[0,2,hei],dataOut.Oblique_param_errors[0,3,hei],dataOut.Oblique_param_errors[0,4,hei],dataOut.Oblique_param_errors[0,5,hei],dataOut.Oblique_param_errors[0,6,hei] = self.Double_Gauss_fit_2(spc,x,A1,B1,C1,A2,B2,C2,D)
857 858 #spc_double_fit,dataOut.Oblique_params = self.Double_Gauss_fit(spc,x,A1,B1,C1,A2,B2,C2,D)
858 859
859 860 except:
860 861 ###dataOut.Oblique_params[0,:,hei] = dataOut.Oblique_params[0,:,hei]*numpy.NAN
861 862 pass
862 863
863 864 return dataOut
864 865
865 866 class PrecipitationProc(Operation):
866 867
867 868 '''
868 869 Operator that estimates Reflectivity factor (Z), and estimates rainfall Rate (R)
869 870
870 871 Input:
871 872 self.dataOut.data_pre : SelfSpectra
872 873
873 874 Output:
874 875
875 876 self.dataOut.data_output : Reflectivity factor, rainfall Rate
876 877
877 878
878 879 Parameters affected:
879 880 '''
880 881
881 882 def __init__(self):
882 883 Operation.__init__(self)
883 884 self.i=0
884 885
885 886 def run(self, dataOut, radar=None, Pt=5000, Gt=295.1209, Gr=70.7945, Lambda=0.6741, aL=2.5118,
886 887 tauW=4e-06, ThetaT=0.1656317, ThetaR=0.36774087, Km2 = 0.93, Altitude=3350,SNRdBlimit=-30,
887 888 channel=None):
888 889
889 890 # print ('Entering PrecepitationProc ... ')
890 891
891 892 if radar == "MIRA35C" :
892 893
893 894 self.spc = dataOut.data_pre[0].copy()
894 895 self.Num_Hei = self.spc.shape[2]
895 896 self.Num_Bin = self.spc.shape[1]
896 897 self.Num_Chn = self.spc.shape[0]
897 898 Ze = self.dBZeMODE2(dataOut)
898 899
899 900 else:
900 901
901 902 self.spc = dataOut.data_pre[0].copy()
902 903
903 904 #NOTA SE DEBE REMOVER EL RANGO DEL PULSO TX
904 905 self.spc[:,:,0:7]= numpy.NaN
905 906
906 907 self.Num_Hei = self.spc.shape[2]
907 908 self.Num_Bin = self.spc.shape[1]
908 909 self.Num_Chn = self.spc.shape[0]
909 910
910 911 VelRange = dataOut.spc_range[2]
911 912
912 913 ''' Se obtiene la constante del RADAR '''
913 914
914 915 self.Pt = Pt
915 916 self.Gt = Gt
916 917 self.Gr = Gr
917 918 self.Lambda = Lambda
918 919 self.aL = aL
919 920 self.tauW = tauW
920 921 self.ThetaT = ThetaT
921 922 self.ThetaR = ThetaR
922 923 self.GSys = 10**(36.63/10) # Ganancia de los LNA 36.63 dB
923 924 self.lt = 10**(1.67/10) # Perdida en cables Tx 1.67 dB
924 925 self.lr = 10**(5.73/10) # Perdida en cables Rx 5.73 dB
925 926
926 927 Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
927 928 Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * tauW * numpy.pi * ThetaT * ThetaR)
928 929 RadarConstant = 10e-26 * Numerator / Denominator #
929 930 ExpConstant = 10**(40/10) #Constante Experimental
930 931
931 932 SignalPower = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
932 933 for i in range(self.Num_Chn):
933 934 SignalPower[i,:,:] = self.spc[i,:,:] - dataOut.noise[i]
934 935 SignalPower[numpy.where(SignalPower < 0)] = 1e-20
935 936
936 937 if channel is None:
937 938 SPCmean = numpy.mean(SignalPower, 0)
938 939 else:
939 940 SPCmean = SignalPower[channel]
940 941 Pr = SPCmean[:,:]/dataOut.normFactor
941 942
942 943 # Declaring auxiliary variables
943 944 Range = dataOut.heightList*1000. #Range in m
944 945 # replicate the heightlist to obtain a matrix [Num_Bin,Num_Hei]
945 946 rMtrx = numpy.transpose(numpy.transpose([dataOut.heightList*1000.] * self.Num_Bin))
946 947 zMtrx = rMtrx+Altitude
947 948 # replicate the VelRange to obtain a matrix [Num_Bin,Num_Hei]
948 949 VelMtrx = numpy.transpose(numpy.tile(VelRange[:-1], (self.Num_Hei,1)))
949 950
950 951 # height dependence to air density Foote and Du Toit (1969)
951 952 delv_z = 1 + 3.68e-5 * zMtrx + 1.71e-9 * zMtrx**2
952 953 VMtrx = VelMtrx / delv_z #Normalized velocity
953 954 VMtrx[numpy.where(VMtrx> 9.6)] = numpy.NaN
954 955 # Diameter is related to the fall speed of falling drops
955 956 D_Vz = -1.667 * numpy.log( 0.9369 - 0.097087 * VMtrx ) # D in [mm]
956 957 # Only valid for D>= 0.16 mm
957 958 D_Vz[numpy.where(D_Vz < 0.16)] = numpy.NaN
958 959
959 960 #Calculate Radar Reflectivity ETAn
960 961 ETAn = (RadarConstant *ExpConstant) * Pr * rMtrx**2 #Reflectivity (ETA)
961 962 ETAd = ETAn * 6.18 * exp( -0.6 * D_Vz ) * delv_z
962 963 # Radar Cross Section
963 964 sigmaD = Km2 * (D_Vz * 1e-3 )**6 * numpy.pi**5 / Lambda**4
964 965 # Drop Size Distribution
965 966 DSD = ETAn / sigmaD
966 967 # Equivalente Reflectivy
967 968 Ze_eqn = numpy.nansum( DSD * D_Vz**6 ,axis=0)
968 969 Ze_org = numpy.nansum(ETAn * Lambda**4, axis=0) / (1e-18*numpy.pi**5 * Km2) # [mm^6 /m^3]
969 970 # RainFall Rate
970 971 RR = 0.0006*numpy.pi * numpy.nansum( D_Vz**3 * DSD * VelMtrx ,0) #mm/hr
971 972
972 973 # Censoring the data
973 974 # Removing data with SNRth < 0dB se debe considerar el SNR por canal
974 975 SNRth = 10**(SNRdBlimit/10) #-30dB
975 976 novalid = numpy.where((dataOut.data_snr[0,:] <SNRth) | (dataOut.data_snr[1,:] <SNRth) | (dataOut.data_snr[2,:] <SNRth)) # AND condition. Maybe OR condition better
976 977 W = numpy.nanmean(dataOut.data_dop,0)
977 978 W[novalid] = numpy.NaN
978 979 Ze_org[novalid] = numpy.NaN
979 980 RR[novalid] = numpy.NaN
980 981
981 982 dataOut.data_output = RR[8]
982 983 dataOut.data_param = numpy.ones([3,self.Num_Hei])
983 984 dataOut.channelList = [0,1,2]
984 985
985 986 dataOut.data_param[0]=10*numpy.log10(Ze_org)
986 987 dataOut.data_param[1]=-W
987 988 dataOut.data_param[2]=RR
988 989
989 990 # print ('Leaving PrecepitationProc ... ')
990 991 return dataOut
991 992
992 993 def dBZeMODE2(self, dataOut): # Processing for MIRA35C
993 994
994 995 NPW = dataOut.NPW
995 996 COFA = dataOut.COFA
996 997
997 998 SNR = numpy.array([self.spc[0,:,:] / NPW[0]]) #, self.spc[1,:,:] / NPW[1]])
998 999 RadarConst = dataOut.RadarConst
999 1000 #frequency = 34.85*10**9
1000 1001
1001 1002 ETA = numpy.zeros(([self.Num_Chn ,self.Num_Hei]))
1002 1003 data_output = numpy.ones([self.Num_Chn , self.Num_Hei])*numpy.NaN
1003 1004
1004 1005 ETA = numpy.sum(SNR,1)
1005 1006
1006 1007 ETA = numpy.where(ETA != 0. , ETA, numpy.NaN)
1007 1008
1008 1009 Ze = numpy.ones([self.Num_Chn, self.Num_Hei] )
1009 1010
1010 1011 for r in range(self.Num_Hei):
1011 1012
1012 1013 Ze[0,r] = ( ETA[0,r] ) * COFA[0,r][0] * RadarConst * ((r/5000.)**2)
1013 1014 #Ze[1,r] = ( ETA[1,r] ) * COFA[1,r][0] * RadarConst * ((r/5000.)**2)
1014 1015
1015 1016 return Ze
1016 1017
1017 1018 # def GetRadarConstant(self):
1018 1019 #
1019 1020 # """
1020 1021 # Constants:
1021 1022 #
1022 1023 # Pt: Transmission Power dB 5kW 5000
1023 1024 # Gt: Transmission Gain dB 24.7 dB 295.1209
1024 1025 # Gr: Reception Gain dB 18.5 dB 70.7945
1025 1026 # Lambda: Wavelenght m 0.6741 m 0.6741
1026 1027 # aL: Attenuation loses dB 4dB 2.5118
1027 1028 # tauW: Width of transmission pulse s 4us 4e-6
1028 1029 # ThetaT: Transmission antenna bean angle rad 0.1656317 rad 0.1656317
1029 1030 # ThetaR: Reception antenna beam angle rad 0.36774087 rad 0.36774087
1030 1031 #
1031 1032 # """
1032 1033 #
1033 1034 # Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
1034 1035 # Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * TauW * numpy.pi * ThetaT * TheraR)
1035 1036 # RadarConstant = Numerator / Denominator
1036 1037 #
1037 1038 # return RadarConstant
1038 1039
1039 1040
1040 1041 class FullSpectralAnalysis(Operation):
1041 1042
1042 1043 """
1043 1044 Function that implements Full Spectral Analysis technique.
1044 1045
1045 1046 Input:
1046 1047 self.dataOut.data_pre : SelfSpectra and CrossSpectra data
1047 1048 self.dataOut.groupList : Pairlist of channels
1048 1049 self.dataOut.ChanDist : Physical distance between receivers
1049 1050
1050 1051
1051 1052 Output:
1052 1053
1053 1054 self.dataOut.data_output : Zonal wind, Meridional wind, and Vertical wind
1054 1055
1055 1056
1056 1057 Parameters affected: Winds, height range, SNR
1057 1058
1058 1059 """
1059 1060 def run(self, dataOut, Xi01=None, Xi02=None, Xi12=None, Eta01=None, Eta02=None, Eta12=None, SNRdBlimit=-30,
1060 1061 minheight=None, maxheight=None, NegativeLimit=None, PositiveLimit=None):
1061 1062
1062 1063 spc = dataOut.data_pre[0].copy()
1063 1064 cspc = dataOut.data_pre[1]
1064 1065 nHeights = spc.shape[2]
1065 1066
1066 1067 # first_height = 0.75 #km (ref: data header 20170822)
1067 1068 # resolution_height = 0.075 #km
1068 1069 '''
1069 1070 finding height range. check this when radar parameters are changed!
1070 1071 '''
1071 1072 if maxheight is not None:
1072 1073 # range_max = math.ceil((maxheight - first_height) / resolution_height) # theoretical
1073 1074 range_max = math.ceil(13.26 * maxheight - 3) # empirical, works better
1074 1075 else:
1075 1076 range_max = nHeights
1076 1077 if minheight is not None:
1077 1078 # range_min = int((minheight - first_height) / resolution_height) # theoretical
1078 1079 range_min = int(13.26 * minheight - 5) # empirical, works better
1079 1080 if range_min < 0:
1080 1081 range_min = 0
1081 1082 else:
1082 1083 range_min = 0
1083 1084
1084 1085 pairsList = dataOut.groupList
1085 1086 if dataOut.ChanDist is not None :
1086 1087 ChanDist = dataOut.ChanDist
1087 1088 else:
1088 1089 ChanDist = numpy.array([[Xi01, Eta01],[Xi02,Eta02],[Xi12,Eta12]])
1089 1090
1090 1091 # 4 variables: zonal, meridional, vertical, and average SNR
1091 1092 data_param = numpy.zeros([4,nHeights]) * numpy.NaN
1092 1093 velocityX = numpy.zeros([nHeights]) * numpy.NaN
1093 1094 velocityY = numpy.zeros([nHeights]) * numpy.NaN
1094 1095 velocityZ = numpy.zeros([nHeights]) * numpy.NaN
1095 1096
1096 1097 dbSNR = 10*numpy.log10(numpy.average(dataOut.data_snr,0))
1097 1098
1098 1099 '''***********************************************WIND ESTIMATION**************************************'''
1099 1100 for Height in range(nHeights):
1100 1101
1101 1102 if Height >= range_min and Height < range_max:
1102 1103 # error_code will be useful in future analysis
1103 1104 [Vzon,Vmer,Vver, error_code] = self.WindEstimation(spc[:,:,Height], cspc[:,:,Height], pairsList,
1104 1105 ChanDist, Height, dataOut.noise, dataOut.spc_range, dbSNR[Height], SNRdBlimit, NegativeLimit, PositiveLimit,dataOut.frequency)
1105 1106
1106 1107 if abs(Vzon) < 100. and abs(Vmer) < 100.:
1107 1108 velocityX[Height] = Vzon
1108 1109 velocityY[Height] = -Vmer
1109 1110 velocityZ[Height] = Vver
1110 1111
1111 1112 # Censoring data with SNR threshold
1112 1113 dbSNR [dbSNR < SNRdBlimit] = numpy.NaN
1113 1114
1114 1115 data_param[0] = velocityX
1115 1116 data_param[1] = velocityY
1116 1117 data_param[2] = velocityZ
1117 1118 data_param[3] = dbSNR
1118 1119 dataOut.data_param = data_param
1119 1120 return dataOut
1120 1121
1121 1122 def moving_average(self,x, N=2):
1122 1123 """ convolution for smoothenig data. note that last N-1 values are convolution with zeroes """
1123 1124 return numpy.convolve(x, numpy.ones((N,))/N)[(N-1):]
1124 1125
1125 1126 def gaus(self,xSamples,Amp,Mu,Sigma):
1126 1127 return Amp * numpy.exp(-0.5*((xSamples - Mu)/Sigma)**2)
1127 1128
1128 1129 def Moments(self, ySamples, xSamples):
1129 1130 Power = numpy.nanmean(ySamples) # Power, 0th Moment
1130 1131 yNorm = ySamples / numpy.nansum(ySamples)
1131 1132 RadVel = numpy.nansum(xSamples * yNorm) # Radial Velocity, 1st Moment
1132 1133 Sigma2 = numpy.nansum(yNorm * (xSamples - RadVel)**2) # Spectral Width, 2nd Moment
1133 1134 StdDev = numpy.sqrt(numpy.abs(Sigma2)) # Desv. Estandar, Ancho espectral
1134 1135 return numpy.array([Power,RadVel,StdDev])
1135 1136
1136 1137 def StopWindEstimation(self, error_code):
1137 1138 Vzon = numpy.NaN
1138 1139 Vmer = numpy.NaN
1139 1140 Vver = numpy.NaN
1140 1141 return Vzon, Vmer, Vver, error_code
1141 1142
1142 1143 def AntiAliasing(self, interval, maxstep):
1143 1144 """
1144 1145 function to prevent errors from aliased values when computing phaseslope
1145 1146 """
1146 1147 antialiased = numpy.zeros(len(interval))
1147 1148 copyinterval = interval.copy()
1148 1149
1149 1150 antialiased[0] = copyinterval[0]
1150 1151
1151 1152 for i in range(1,len(antialiased)):
1152 1153 step = interval[i] - interval[i-1]
1153 1154 if step > maxstep:
1154 1155 copyinterval -= 2*numpy.pi
1155 1156 antialiased[i] = copyinterval[i]
1156 1157 elif step < maxstep*(-1):
1157 1158 copyinterval += 2*numpy.pi
1158 1159 antialiased[i] = copyinterval[i]
1159 1160 else:
1160 1161 antialiased[i] = copyinterval[i].copy()
1161 1162
1162 1163 return antialiased
1163 1164
1164 1165 def WindEstimation(self, spc, cspc, pairsList, ChanDist, Height, noise, AbbsisaRange, dbSNR, SNRlimit, NegativeLimit, PositiveLimit, radfreq):
1165 1166 """
1166 1167 Function that Calculates Zonal, Meridional and Vertical wind velocities.
1167 1168 Initial Version by E. Bocanegra updated by J. Zibell until Nov. 2019.
1168 1169
1169 1170 Input:
1170 1171 spc, cspc : self spectra and cross spectra data. In Briggs notation something like S_i*(S_i)_conj, (S_j)_conj respectively.
1171 1172 pairsList : Pairlist of channels
1172 1173 ChanDist : array of xi_ij and eta_ij
1173 1174 Height : height at which data is processed
1174 1175 noise : noise in [channels] format for specific height
1175 1176 Abbsisarange : range of the frequencies or velocities
1176 1177 dbSNR, SNRlimit : signal to noise ratio in db, lower limit
1177 1178
1178 1179 Output:
1179 1180 Vzon, Vmer, Vver : wind velocities
1180 1181 error_code : int that states where code is terminated
1181 1182
1182 1183 0 : no error detected
1183 1184 1 : Gaussian of mean spc exceeds widthlimit
1184 1185 2 : no Gaussian of mean spc found
1185 1186 3 : SNR to low or velocity to high -> prec. e.g.
1186 1187 4 : at least one Gaussian of cspc exceeds widthlimit
1187 1188 5 : zero out of three cspc Gaussian fits converged
1188 1189 6 : phase slope fit could not be found
1189 1190 7 : arrays used to fit phase have different length
1190 1191 8 : frequency range is either too short (len <= 5) or very long (> 30% of cspc)
1191 1192
1192 1193 """
1193 1194
1194 1195 error_code = 0
1195 1196
1196 1197 nChan = spc.shape[0]
1197 1198 nProf = spc.shape[1]
1198 1199 nPair = cspc.shape[0]
1199 1200
1200 1201 SPC_Samples = numpy.zeros([nChan, nProf]) # for normalized spc values for one height
1201 1202 CSPC_Samples = numpy.zeros([nPair, nProf], dtype=numpy.complex_) # for normalized cspc values
1202 1203 phase = numpy.zeros([nPair, nProf]) # phase between channels
1203 1204 PhaseSlope = numpy.zeros(nPair) # slope of the phases, channelwise
1204 1205 PhaseInter = numpy.zeros(nPair) # intercept to the slope of the phases, channelwise
1205 1206 xFrec = AbbsisaRange[0][:-1] # frequency range
1206 1207 xVel = AbbsisaRange[2][:-1] # velocity range
1207 1208 xSamples = xFrec # the frequency range is taken
1208 1209 delta_x = xSamples[1] - xSamples[0] # delta_f or delta_x
1209 1210
1210 1211 # only consider velocities with in NegativeLimit and PositiveLimit
1211 1212 if (NegativeLimit is None):
1212 1213 NegativeLimit = numpy.min(xVel)
1213 1214 if (PositiveLimit is None):
1214 1215 PositiveLimit = numpy.max(xVel)
1215 1216 xvalid = numpy.where((xVel > NegativeLimit) & (xVel < PositiveLimit))
1216 1217 xSamples_zoom = xSamples[xvalid]
1217 1218
1218 1219 '''Getting Eij and Nij'''
1219 1220 Xi01, Xi02, Xi12 = ChanDist[:,0]
1220 1221 Eta01, Eta02, Eta12 = ChanDist[:,1]
1221 1222
1222 1223 # spwd limit - updated by D. ScipiΓ³n 30.03.2021
1223 1224 widthlimit = 10
1224 1225 '''************************* SPC is normalized ********************************'''
1225 1226 spc_norm = spc.copy()
1226 1227 # For each channel
1227 1228 for i in range(nChan):
1228 1229 spc_sub = spc_norm[i,:] - noise[i] # only the signal power
1229 1230 SPC_Samples[i] = spc_sub / (numpy.nansum(spc_sub) * delta_x)
1230 1231
1231 1232 '''********************** FITTING MEAN SPC GAUSSIAN **********************'''
1232 1233
1233 1234 """ the gaussian of the mean: first subtract noise, then normalize. this is legal because
1234 1235 you only fit the curve and don't need the absolute value of height for calculation,
1235 1236 only for estimation of width. for normalization of cross spectra, you need initial,
1236 1237 unnormalized self-spectra With noise.
1237 1238
1238 1239 Technically, you don't even need to normalize the self-spectra, as you only need the
1239 1240 width of the peak. However, it was left this way. Note that the normalization has a flaw:
1240 1241 due to subtraction of the noise, some values are below zero. Raw "spc" values should be
1241 1242 >= 0, as it is the modulus squared of the signals (complex * it's conjugate)
1242 1243 """
1243 1244 # initial conditions
1244 1245 popt = [1e-10,0,1e-10]
1245 1246 # Spectra average
1246 1247 SPCMean = numpy.average(SPC_Samples,0)
1247 1248 # Moments in frequency
1248 1249 SPCMoments = self.Moments(SPCMean[xvalid], xSamples_zoom)
1249 1250
1250 1251 # Gauss Fit SPC in frequency domain
1251 1252 if dbSNR > SNRlimit: # only if SNR > SNRth
1252 1253 try:
1253 1254 popt,pcov = curve_fit(self.gaus,xSamples_zoom,SPCMean[xvalid],p0=SPCMoments)
1254 1255 if popt[2] <= 0 or popt[2] > widthlimit: # CONDITION
1255 1256 return self.StopWindEstimation(error_code = 1)
1256 1257 FitGauss = self.gaus(xSamples_zoom,*popt)
1257 1258 except :#RuntimeError:
1258 1259 return self.StopWindEstimation(error_code = 2)
1259 1260 else:
1260 1261 return self.StopWindEstimation(error_code = 3)
1261 1262
1262 1263 '''***************************** CSPC Normalization *************************
1263 1264 The Spc spectra are used to normalize the crossspectra. Peaks from precipitation
1264 1265 influence the norm which is not desired. First, a range is identified where the
1265 1266 wind peak is estimated -> sum_wind is sum of those frequencies. Next, the area
1266 1267 around it gets cut off and values replaced by mean determined by the boundary
1267 1268 data -> sum_noise (spc is not normalized here, thats why the noise is important)
1268 1269
1269 1270 The sums are then added and multiplied by range/datapoints, because you need
1270 1271 an integral and not a sum for normalization.
1271 1272
1272 1273 A norm is found according to Briggs 92.
1273 1274 '''
1274 1275 # for each pair
1275 1276 for i in range(nPair):
1276 1277 cspc_norm = cspc[i,:].copy()
1277 1278 chan_index0 = pairsList[i][0]
1278 1279 chan_index1 = pairsList[i][1]
1279 1280 CSPC_Samples[i] = cspc_norm / (numpy.sqrt(numpy.nansum(spc_norm[chan_index0])*numpy.nansum(spc_norm[chan_index1])) * delta_x)
1280 1281 phase[i] = numpy.arctan2(CSPC_Samples[i].imag, CSPC_Samples[i].real)
1281 1282
1282 1283 CSPCmoments = numpy.vstack([self.Moments(numpy.abs(CSPC_Samples[0,xvalid]), xSamples_zoom),
1283 1284 self.Moments(numpy.abs(CSPC_Samples[1,xvalid]), xSamples_zoom),
1284 1285 self.Moments(numpy.abs(CSPC_Samples[2,xvalid]), xSamples_zoom)])
1285 1286
1286 1287 popt01, popt02, popt12 = [1e-10,0,1e-10], [1e-10,0,1e-10] ,[1e-10,0,1e-10]
1287 1288 FitGauss01, FitGauss02, FitGauss12 = numpy.zeros(len(xSamples)), numpy.zeros(len(xSamples)), numpy.zeros(len(xSamples))
1288 1289
1289 1290 '''*******************************FIT GAUSS CSPC************************************'''
1290 1291 try:
1291 1292 popt01,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[0][xvalid]),p0=CSPCmoments[0])
1292 1293 if popt01[2] > widthlimit: # CONDITION
1293 1294 return self.StopWindEstimation(error_code = 4)
1294 1295 popt02,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[1][xvalid]),p0=CSPCmoments[1])
1295 1296 if popt02[2] > widthlimit: # CONDITION
1296 1297 return self.StopWindEstimation(error_code = 4)
1297 1298 popt12,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[2][xvalid]),p0=CSPCmoments[2])
1298 1299 if popt12[2] > widthlimit: # CONDITION
1299 1300 return self.StopWindEstimation(error_code = 4)
1300 1301
1301 1302 FitGauss01 = self.gaus(xSamples_zoom, *popt01)
1302 1303 FitGauss02 = self.gaus(xSamples_zoom, *popt02)
1303 1304 FitGauss12 = self.gaus(xSamples_zoom, *popt12)
1304 1305 except:
1305 1306 return self.StopWindEstimation(error_code = 5)
1306 1307
1307 1308
1308 1309 '''************* Getting Fij ***************'''
1309 1310 # x-axis point of the gaussian where the center is located from GaussFit of spectra
1310 1311 GaussCenter = popt[1]
1311 1312 ClosestCenter = xSamples_zoom[numpy.abs(xSamples_zoom-GaussCenter).argmin()]
1312 1313 PointGauCenter = numpy.where(xSamples_zoom==ClosestCenter)[0][0]
1313 1314
1314 1315 # Point where e^-1 is located in the gaussian
1315 1316 PeMinus1 = numpy.max(FitGauss) * numpy.exp(-1)
1316 1317 FijClosest = FitGauss[numpy.abs(FitGauss-PeMinus1).argmin()] # The closest point to"Peminus1" in "FitGauss"
1317 1318 PointFij = numpy.where(FitGauss==FijClosest)[0][0]
1318 1319 Fij = numpy.abs(xSamples_zoom[PointFij] - xSamples_zoom[PointGauCenter])
1319 1320
1320 1321 '''********** Taking frequency ranges from mean SPCs **********'''
1321 1322 GauWidth = popt[2] * 3/2 # Bandwidth of Gau01
1322 1323 Range = numpy.empty(2)
1323 1324 Range[0] = GaussCenter - GauWidth
1324 1325 Range[1] = GaussCenter + GauWidth
1325 1326 # Point in x-axis where the bandwidth is located (min:max)
1326 1327 ClosRangeMin = xSamples_zoom[numpy.abs(xSamples_zoom-Range[0]).argmin()]
1327 1328 ClosRangeMax = xSamples_zoom[numpy.abs(xSamples_zoom-Range[1]).argmin()]
1328 1329 PointRangeMin = numpy.where(xSamples_zoom==ClosRangeMin)[0][0]
1329 1330 PointRangeMax = numpy.where(xSamples_zoom==ClosRangeMax)[0][0]
1330 1331 Range = numpy.array([ PointRangeMin, PointRangeMax ])
1331 1332 FrecRange = xSamples_zoom[ Range[0] : Range[1] ]
1332 1333
1333 1334 '''************************** Getting Phase Slope ***************************'''
1334 1335 for i in range(nPair):
1335 1336 if len(FrecRange) > 5:
1336 1337 PhaseRange = phase[i, xvalid[0][Range[0]:Range[1]]].copy()
1337 1338 mask = ~numpy.isnan(FrecRange) & ~numpy.isnan(PhaseRange)
1338 1339 if len(FrecRange) == len(PhaseRange):
1339 1340 try:
1340 1341 slope, intercept, _, _, _ = stats.linregress(FrecRange[mask], self.AntiAliasing(PhaseRange[mask], 4.5))
1341 1342 PhaseSlope[i] = slope
1342 1343 PhaseInter[i] = intercept
1343 1344 except:
1344 1345 return self.StopWindEstimation(error_code = 6)
1345 1346 else:
1346 1347 return self.StopWindEstimation(error_code = 7)
1347 1348 else:
1348 1349 return self.StopWindEstimation(error_code = 8)
1349 1350
1350 1351 '''*** Constants A-H correspond to the convention as in Briggs and Vincent 1992 ***'''
1351 1352
1352 1353 '''Getting constant C'''
1353 1354 cC=(Fij*numpy.pi)**2
1354 1355
1355 1356 '''****** Getting constants F and G ******'''
1356 1357 MijEijNij = numpy.array([[Xi02,Eta02], [Xi12,Eta12]])
1357 1358 # MijEijNij = numpy.array([[Xi01,Eta01], [Xi02,Eta02], [Xi12,Eta12]])
1358 1359 # MijResult0 = (-PhaseSlope[0] * cC) / (2*numpy.pi)
1359 1360 MijResult1 = (-PhaseSlope[1] * cC) / (2*numpy.pi)
1360 1361 MijResult2 = (-PhaseSlope[2] * cC) / (2*numpy.pi)
1361 1362 # MijResults = numpy.array([MijResult0, MijResult1, MijResult2])
1362 1363 MijResults = numpy.array([MijResult1, MijResult2])
1363 1364 (cF,cG) = numpy.linalg.solve(MijEijNij, MijResults)
1364 1365
1365 1366 '''****** Getting constants A, B and H ******'''
1366 1367 W01 = numpy.nanmax( FitGauss01 )
1367 1368 W02 = numpy.nanmax( FitGauss02 )
1368 1369 W12 = numpy.nanmax( FitGauss12 )
1369 1370
1370 1371 WijResult01 = ((cF * Xi01 + cG * Eta01)**2)/cC - numpy.log(W01 / numpy.sqrt(numpy.pi / cC))
1371 1372 WijResult02 = ((cF * Xi02 + cG * Eta02)**2)/cC - numpy.log(W02 / numpy.sqrt(numpy.pi / cC))
1372 1373 WijResult12 = ((cF * Xi12 + cG * Eta12)**2)/cC - numpy.log(W12 / numpy.sqrt(numpy.pi / cC))
1373 1374 WijResults = numpy.array([WijResult01, WijResult02, WijResult12])
1374 1375
1375 1376 WijEijNij = numpy.array([ [Xi01**2, Eta01**2, 2*Xi01*Eta01] , [Xi02**2, Eta02**2, 2*Xi02*Eta02] , [Xi12**2, Eta12**2, 2*Xi12*Eta12] ])
1376 1377 (cA,cB,cH) = numpy.linalg.solve(WijEijNij, WijResults)
1377 1378
1378 1379 VxVy = numpy.array([[cA,cH],[cH,cB]])
1379 1380 VxVyResults = numpy.array([-cF,-cG])
1380 1381 (Vmer,Vzon) = numpy.linalg.solve(VxVy, VxVyResults)
1381 1382 Vver = -SPCMoments[1]*SPEED_OF_LIGHT/(2*radfreq)
1382 1383 error_code = 0
1383 1384
1384 1385 return Vzon, Vmer, Vver, error_code
1385 1386
1386 1387 class SpectralMoments(Operation):
1387 1388
1388 1389 '''
1389 1390 Function SpectralMoments()
1390 1391
1391 1392 Calculates moments (power, mean, standard deviation) and SNR of the signal
1392 1393
1393 1394 Type of dataIn: Spectra
1394 1395
1395 1396 Configuration Parameters:
1396 1397
1397 1398 dirCosx : Cosine director in X axis
1398 1399 dirCosy : Cosine director in Y axis
1399 1400
1400 1401 elevation :
1401 1402 azimuth :
1402 1403
1403 1404 Input:
1404 1405 channelList : simple channel list to select e.g. [2,3,7]
1405 1406 self.dataOut.data_pre : Spectral data
1406 1407 self.dataOut.abscissaList : List of frequencies
1407 1408 self.dataOut.noise : Noise level per channel
1408 1409
1409 1410 Affected:
1410 1411 self.dataOut.moments : Parameters per channel
1411 1412 self.dataOut.data_snr : SNR per channel
1412 1413
1413 1414 '''
1414 1415
1415 1416 def run(self, dataOut, proc_type=0):
1416 1417
1417 1418 absc = dataOut.abscissaList[:-1]
1418 1419 nChannel = dataOut.data_pre[0].shape[0]
1419 1420 nHei = dataOut.data_pre[0].shape[2]
1420 1421 data_param = numpy.zeros((nChannel, 4 + proc_type*3, nHei))
1421 1422
1422 1423 if proc_type == 1:
1423 1424 fwindow = numpy.zeros(absc.size) + 1
1424 1425 b=64
1425 1426 #b=16
1426 1427 fwindow[0:absc.size//2 - b] = 0
1427 1428 fwindow[absc.size//2 + b:] = 0
1428 1429 type1 = 1 # moments calculation & gaussean fitting
1429 1430 nProfiles = dataOut.nProfiles
1430 1431 nCohInt = dataOut.nCohInt
1431 1432 nIncohInt = dataOut.nIncohInt
1432 1433 M = numpy.power(numpy.array(1/(nProfiles * nCohInt) ,dtype='float32'),2)
1433 1434 N = numpy.array(M / nIncohInt,dtype='float32')
1434 1435 data = dataOut.data_pre[0] * N
1435 1436 #noise = dataOut.noise * N
1436 1437 noise = numpy.zeros(nChannel)
1437 1438 for ind in range(nChannel):
1438 1439 noise[ind] = self.__NoiseByChannel(nProfiles, nIncohInt, data[ind,:,:])
1439 1440 smooth=3
1440 1441 else:
1441 1442 data = dataOut.data_pre[0]
1442 1443 noise = dataOut.noise
1443 1444 fwindow = None
1444 1445 type1 = 0
1445 1446 nIncohInt = None
1446 1447 smooth=None
1447 1448
1448 1449 for ind in range(nChannel):
1449 1450 data_param[ind,:,:] = self.__calculateMoments( data[ind,:,:] , absc , noise[ind], nicoh=nIncohInt, smooth=smooth, type1=type1, fwindow=fwindow, id_ch=ind)
1450 1451
1451 1452 if proc_type == 1:
1452 1453 dataOut.moments = data_param[:,1:,:]
1453 1454 dataOut.data_dop = data_param[:,2]
1454 1455 dataOut.data_width = data_param[:,1]
1455 1456 dataOut.data_snr = data_param[:,0]
1456 1457 dataOut.data_pow = data_param[:,6] # to compare with type0 proccessing
1457 1458 dataOut.spcpar=numpy.stack((dataOut.data_dop,dataOut.data_width,dataOut.data_snr, data_param[:,3], data_param[:,4],data_param[:,5]),axis=2)
1458 1459 else:
1459 1460 dataOut.moments = data_param[:,1:,:]
1460 1461 dataOut.data_snr = data_param[:,0]
1461 1462 dataOut.data_pow = data_param[:,1]
1462 1463 dataOut.data_dop = data_param[:,2]
1463 1464 dataOut.data_width = data_param[:,3]
1464 1465 dataOut.spcpar=numpy.stack((dataOut.data_dop,dataOut.data_width,dataOut.data_snr, dataOut.data_pow),axis=2)
1465 1466
1466 1467 return dataOut
1467 1468
1468 1469 def __calculateMoments(self, oldspec, oldfreq, n0,
1469 1470 nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None,id_ch=0):
1470 1471
1471 1472 def __GAUSSWINFIT1(A, flagPDER=0):
1472 1473 nonlocal truex, xvalid
1473 1474 nparams = 4
1474 1475 M=truex.size
1475 1476 mm=numpy.arange(M,dtype='f4')
1476 1477 delta = numpy.zeros(M,dtype='f4')
1477 1478 delta[0] = 1.0
1478 1479 Ts = numpy.array([1.0/(2*truex[0])],dtype='f4')[0]
1479 1480 jj = -1j
1480 1481 #if self.winauto is None: self.winauto = (1.0 - mm/M)
1481 1482 winauto = (1.0 - mm/M)
1482 1483 winauto = winauto/winauto.max() # Normalized to 1
1483 1484 #ON_ERROR,2 # IDL sentence: Return to caller if an error occurs
1484 1485 A[0] = numpy.abs(A[0])
1485 1486 A[2] = numpy.abs(A[2])
1486 1487 A[3] = numpy.abs(A[3])
1487 1488 pi=numpy.array([numpy.pi],dtype='f4')[0]
1488 1489 if A[2] != 0:
1489 1490 Z = numpy.exp(-2*numpy.power((pi*A[2]*mm*Ts),2,dtype='f4')+jj*2*pi*A[1]*mm*Ts, dtype='c8') # Get Z
1490 1491 else:
1491 1492 Z = mm*0.0
1492 1493 A[0] = 0.0
1493 1494 junkF = numpy.roll(2*fft(winauto*(A[0]*Z+A[3]*delta)).real - \
1494 1495 winauto[0]*(A[0]+A[3]), M//2) # *M scale for fft not needed in python
1495 1496 F = junkF[xvalid]
1496 1497 if flagPDER == 0: #NEED PARTIAL?
1497 1498 return F
1498 1499 PDER = numpy.zeros((M,nparams)) #YES, MAKE ARRAY.
1499 1500 PDER[:,0] = numpy.shift(2*(fft(winauto*Z)*M) - winauto[0], M/2)
1500 1501 PDER[:,1] = numpy.shift(2*(fft(winauto*jj*2*numpy.pi*mm*Ts*A[0]*Z)*M), M/2)
1501 1502 PDER[:,2] = numpy.shift(2*(fft(winauto*(-4*numpy.power(numpy.pi*mm*Ts,2)*A[2]*A[0]*Z))*M), M/2)
1502 1503 PDER[:,3] = numpy.shift(2*(fft(winauto*delta)*M) - winauto[0], M/2)
1503 1504 PDER = PDER[xvalid,:]
1504 1505 return F, PDER
1505 1506
1506 1507 def __curvefit_koki(y, a, Weights, FlagNoDerivative=1,
1507 1508 itmax=20, tol=None):
1508 1509 #ON_ERROR,2 IDL SENTENCE: RETURN TO THE CALLER IF ERROR
1509 1510 if tol == None:
1510 1511 tol = numpy.array([1.e-3],dtype='f4')[0]
1511 1512 typ=a.dtype
1512 1513 double = 1 if typ == numpy.float64 else 0
1513 1514 if typ != numpy.float32:
1514 1515 a=a.astype(numpy.float32) #Make params floating
1515 1516 # if we will be estimating partial derivates then compute machine precision
1516 1517 if FlagNoDerivative == 1:
1517 1518 res=numpy.MachAr(float_conv=numpy.float32)
1518 1519 eps=numpy.sqrt(res.eps)
1519 1520
1520 1521 nterms = a.size # Number of parameters
1521 1522 nfree=numpy.array([numpy.size(y) - nterms],dtype='f4')[0] # Degrees of freedom
1522 1523 if nfree <= 0: print('Curvefit - not enough data points.')
1523 1524 flambda= numpy.array([0.001],dtype='f4')[0] # Initial lambda
1524 1525 #diag=numpy.arange(nterms)*(nterms+1) # Subscripta of diagonal elements
1525 1526 # Use diag method in python
1526 1527 converge=1
1527 1528
1528 1529 #Define the partial derivative array
1529 1530 PDER = numpy.zeros((nterms,numpy.size(y)),dtype='f8') if double == 1 else numpy.zeros((nterms,numpy.size(y)),dtype='f4')
1530 1531
1531 1532 for Niter in range(itmax): #Iteration loop
1532 1533
1533 1534 if FlagNoDerivative == 1:
1534 1535 #Evaluate function and estimate partial derivatives
1535 1536 yfit = __GAUSSWINFIT1(a)
1536 1537 for term in range(nterms):
1537 1538 p=a.copy() # Copy current parameters
1538 1539 #Increment size for forward difference derivative
1539 1540 inc = eps * abs(p[term])
1540 1541 if inc == 0: inc = eps
1541 1542 p[term] = p[term] + inc
1542 1543 yfit1 = __GAUSSWINFIT1(p)
1543 1544 PDER[term,:] = (yfit1-yfit)/inc
1544 1545 else:
1545 1546 #The user's procedure will return partial derivatives
1546 1547 yfit,PDER=__GAUSSWINFIT1(a, flagPDER=1)
1547 1548
1548 1549 beta = numpy.dot(PDER,(y-yfit)*Weights)
1549 1550 alpha = numpy.dot(PDER * numpy.tile(Weights,(nterms,1)), numpy.transpose(PDER))
1550 1551 # save current values of return parameters
1551 1552 sigma1 = numpy.sqrt( 1.0 / numpy.diag(alpha) ) # Current sigma.
1552 1553 sigma = sigma1
1553 1554
1554 1555 chisq1 = numpy.sum(Weights*numpy.power(y-yfit,2,dtype='f4'),dtype='f4')/nfree # Current chi squared.
1555 1556 chisq = chisq1
1556 1557 yfit1 = yfit
1557 1558 elev7=numpy.array([1.0e7],dtype='f4')[0]
1558 1559 compara =numpy.sum(abs(y))/elev7/nfree
1559 1560 done_early = chisq1 < compara
1560 1561
1561 1562 if done_early:
1562 1563 chi2 = chisq # Return chi-squared (chi2 obsolete-still works)
1563 1564 if done_early: Niter -= 1
1564 1565 #save_tp(chisq,Niter,yfit)
1565 1566 return yfit, a, converge, sigma, chisq # return result
1566 1567 #c = numpy.dot(c, c) # this operator implemented at the next lines
1567 1568 c_tmp = numpy.sqrt(numpy.diag(alpha))
1568 1569 siz=len(c_tmp)
1569 1570 c=numpy.dot(c_tmp.reshape(siz,1),c_tmp.reshape(1,siz))
1570 1571 lambdaCount = 0
1571 1572 while True:
1572 1573 lambdaCount += 1
1573 1574 # Normalize alpha to have unit diagonal.
1574 1575 array = alpha / c
1575 1576 # Augment the diagonal.
1576 1577 one=numpy.array([1.],dtype='f4')[0]
1577 1578 numpy.fill_diagonal(array,numpy.diag(array)*(one+flambda))
1578 1579 # Invert modified curvature matrix to find new parameters.
1579 1580 try:
1580 1581 array = (1.0/array) if array.size == 1 else numpy.linalg.inv(array)
1581 1582 except Exception as e:
1582 1583 print(e)
1583 1584 array[:]=numpy.NaN
1584 1585
1585 1586 b = a + numpy.dot(numpy.transpose(beta),array/c) # New params
1586 1587 yfit = __GAUSSWINFIT1(b) # Evaluate function
1587 1588 chisq = numpy.sum(Weights*numpy.power(y-yfit,2,dtype='f4'),dtype='f4')/nfree # New chisq
1588 1589 sigma = numpy.sqrt(numpy.diag(array)/numpy.diag(alpha)) # New sigma
1589 1590 if (numpy.isfinite(chisq) == 0) or \
1590 1591 (lambdaCount > 30 and chisq >= chisq1):
1591 1592 # Reject changes made this iteration, use old values.
1592 1593 yfit = yfit1
1593 1594 sigma = sigma1
1594 1595 chisq = chisq1
1595 1596 converge = 0
1596 1597 #print('Failed to converge.')
1597 1598 chi2 = chisq # Return chi-squared (chi2 obsolete-still works)
1598 1599 if done_early: Niter -= 1
1599 1600 #save_tp(chisq,Niter,yfit)
1600 1601 return yfit, a, converge, sigma, chisq, chi2 # return result
1601 1602 ten=numpy.array([10.0],dtype='f4')[0]
1602 1603 flambda *= ten # Assume fit got worse
1603 1604 if chisq <= chisq1:
1604 1605 break
1605 1606 hundred=numpy.array([100.0],dtype='f4')[0]
1606 1607 flambda /= hundred
1607 1608
1608 1609 a=b # Save new parameter estimate.
1609 1610 if ((chisq1-chisq)/chisq1) <= tol: # Finished?
1610 1611 chi2 = chisq # Return chi-squared (chi2 obsolete-still works)
1611 1612 if done_early: Niter -= 1
1612 1613 #save_tp(chisq,Niter,yfit)
1613 1614 return yfit, a, converge, sigma, chisq, chi2 # return result
1614 1615 converge = 0
1615 1616 chi2 = chisq
1616 1617 #print('Failed to converge.')
1617 1618 #save_tp(chisq,Niter,yfit)
1618 1619 return yfit, a, converge, sigma, chisq, chi2
1619 1620
1620 1621 if (nicoh is None): nicoh = 1
1621 1622 if (graph is None): graph = 0
1622 1623 if (smooth is None): smooth = 0
1623 1624 if (type1 is None): type1 = 0
1624 1625 if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1
1625 1626 if (snrth is None): snrth = -20.0
1626 1627 if (dc is None): dc = 0
1627 1628 if (aliasing is None): aliasing = 0
1628 1629 if (oldfd is None): oldfd = 0
1629 1630 if (wwauto is None): wwauto = 0
1630 1631
1631 1632 if (n0 < 1.e-20): n0 = 1.e-20
1632 1633
1633 1634 xvalid = numpy.where(fwindow == 1)[0]
1634 1635 freq = oldfreq
1635 1636 truex = oldfreq
1636 1637 vec_power = numpy.zeros(oldspec.shape[1])
1637 1638 vec_fd = numpy.zeros(oldspec.shape[1])
1638 1639 vec_w = numpy.zeros(oldspec.shape[1])
1639 1640 vec_snr = numpy.zeros(oldspec.shape[1])
1640 1641 vec_n1 = numpy.empty(oldspec.shape[1])
1641 1642 vec_fp = numpy.empty(oldspec.shape[1])
1642 1643 vec_sigma_fd = numpy.empty(oldspec.shape[1])
1643 1644
1644 1645 for ind in range(oldspec.shape[1]):
1645 1646
1646 1647 spec = oldspec[:,ind]
1647 1648 if (smooth == 0):
1648 1649 spec2 = spec
1649 1650 else:
1650 1651 spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth)
1651 1652
1652 1653 aux = spec2*fwindow
1653 1654 max_spec = aux.max()
1654 1655 m = aux.tolist().index(max_spec)
1655 1656
1656 1657 if m > 2 and m < oldfreq.size - 3:
1657 1658 newindex = m + numpy.array([-2,-1,0,1,2])
1658 1659 newfreq = numpy.arange(20)/20.0*(numpy.max(freq[newindex])-numpy.min(freq[newindex]))+numpy.min(freq[newindex])
1659 1660 #peakspec = SPLINE(,)
1660 1661 tck = interpolate.splrep(freq[newindex], spec2[newindex])
1661 1662 peakspec = interpolate.splev(newfreq, tck)
1662 1663 # max_spec = MAX(peakspec,)
1663 1664 max_spec = numpy.max(peakspec)
1664 1665 mnew = numpy.argmax(peakspec)
1665 1666 #fp = newfreq(mnew)
1666 1667 fp = newfreq[mnew]
1667 1668 else:
1668 1669 fp = freq[m]
1669 1670
1670 1671 if type1==0:
1671 1672
1672 1673 # Moments Estimation
1673 1674 bb = spec2[numpy.arange(m,spec2.size)]
1674 1675 bb = (bb<n0).nonzero()
1675 1676 bb = bb[0]
1676 1677
1677 1678 ss = spec2[numpy.arange(0,m + 1)]
1678 1679 ss = (ss<n0).nonzero()
1679 1680 ss = ss[0]
1680 1681
1681 1682 if (bb.size == 0):
1682 1683 bb0 = spec.size - 1 - m
1683 1684 else:
1684 1685 bb0 = bb[0] - 1
1685 1686 if (bb0 < 0):
1686 1687 bb0 = 0
1687 1688
1688 1689 if (ss.size == 0):
1689 1690 ss1 = 1
1690 1691 else:
1691 1692 ss1 = max(ss) + 1
1692 1693
1693 1694 if (ss1 > m):
1694 1695 ss1 = m
1695 1696
1696 1697 valid = numpy.arange(int(m + bb0 - ss1 + 1)) + ss1
1697 1698
1698 1699 signal_power = ((spec2[valid] - n0) * fwindow[valid]).mean() # D. ScipiΓ³n added with correct definition
1699 1700 total_power = (spec2[valid] * fwindow[valid]).mean() # D. ScipiΓ³n added with correct definition
1700 1701 power = ((spec2[valid] - n0) * fwindow[valid]).sum()
1701 1702 fd = ((spec2[valid]- n0)*freq[valid] * fwindow[valid]).sum() / power
1702 1703 w = numpy.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum() / power)
1703 1704 snr = (spec2.mean()-n0)/n0
1704 1705 if (snr < 1.e-20): snr = 1.e-20
1705 1706
1706 1707 vec_power[ind] = total_power
1707 1708 vec_fd[ind] = fd
1708 1709 vec_w[ind] = w
1709 1710 vec_snr[ind] = snr
1710 1711 else:
1711 1712 # Noise by heights
1712 1713 n1, stdv = self.__get_noise2(spec, nicoh)
1713 1714 # Moments Estimation
1714 1715 bb = spec2[numpy.arange(m,spec2.size)]
1715 1716 bb = (bb<n1).nonzero()
1716 1717 bb = bb[0]
1717 1718
1718 1719 ss = spec2[numpy.arange(0,m + 1)]
1719 1720 ss = (ss<n1).nonzero()
1720 1721 ss = ss[0]
1721 1722
1722 1723 if (bb.size == 0):
1723 1724 bb0 = spec.size - 1 - m
1724 1725 else:
1725 1726 bb0 = bb[0] - 1
1726 1727 if (bb0 < 0):
1727 1728 bb0 = 0
1728 1729
1729 1730 if (ss.size == 0):
1730 1731 ss1 = 1
1731 1732 else:
1732 1733 ss1 = max(ss) + 1
1733 1734
1734 1735 if (ss1 > m):
1735 1736 ss1 = m
1736 1737
1737 1738 valid = numpy.arange(int(m + bb0 - ss1 + 1)) + ss1
1738 1739 power = ((spec[valid] - n1)*fwindow[valid]).sum()
1739 1740 fd = ((spec[valid]- n1)*freq[valid]*fwindow[valid]).sum()/power
1740 1741 try:
1741 1742 w = numpy.sqrt(((spec[valid] - n1)*fwindow[valid]*(freq[valid]- fd)**2).sum()/power)
1742 1743 except:
1743 1744 w = float("NaN")
1744 1745 snr = power/(n0*fwindow.sum())
1745 1746 if snr < 1.e-20: snr = 1.e-20
1746 1747
1747 1748 # Here start gaussean adjustment
1748 1749
1749 1750 if snr > numpy.power(10,0.1*snrth):
1750 1751
1751 1752 a = numpy.zeros(4,dtype='f4')
1752 1753 a[0] = snr * n0
1753 1754 a[1] = fd
1754 1755 a[2] = w
1755 1756 a[3] = n0
1756 1757
1757 1758 np = spec.size
1758 1759 aold = a.copy()
1759 1760 spec2 = spec.copy()
1760 1761 oldxvalid = xvalid.copy()
1761 1762
1762 1763 for i in range(2):
1763 1764
1764 1765 ww = 1.0/(numpy.power(spec2,2)/nicoh)
1765 1766 ww[np//2] = 0.0
1766 1767
1767 1768 a = aold.copy()
1768 1769 xvalid = oldxvalid.copy()
1769 1770 #self.show_var(xvalid)
1770 1771
1771 1772 gaussfn = __curvefit_koki(spec[xvalid], a, ww[xvalid])
1772 1773 a = gaussfn[1]
1773 1774 converge = gaussfn[2]
1774 1775
1775 1776 xvalid = numpy.arange(np)
1776 1777 spec2 = __GAUSSWINFIT1(a)
1777 1778
1778 1779 xvalid = oldxvalid.copy()
1779 1780 power = a[0] * np
1780 1781 fd = a[1]
1781 1782 sigma_fd = gaussfn[3][1]
1782 1783 snr = max(power/ (max(a[3],n0) * len(oldxvalid)) * converge, 1e-20)
1783 1784 w = numpy.abs(a[2])
1784 1785 n1 = max(a[3], n0)
1785 1786
1786 1787 #gauss_adj=[fd,w,snr,n1,fp,sigma_fd]
1787 1788 else:
1788 1789 sigma_fd=numpy.nan # to avoid UnboundLocalError: local variable 'sigma_fd' referenced before assignment
1789 1790
1790 1791 vec_fd[ind] = fd
1791 1792 vec_w[ind] = w
1792 1793 vec_snr[ind] = snr
1793 1794 vec_n1[ind] = n1
1794 1795 vec_fp[ind] = fp
1795 1796 vec_sigma_fd[ind] = sigma_fd
1796 1797 vec_power[ind] = power # to compare with type 0 proccessing
1797 1798
1798 1799 if type1==1:
1799 1800 return numpy.vstack((vec_snr, vec_w, vec_fd, vec_n1, vec_fp, vec_sigma_fd, vec_power)) # snr and fd exchanged to compare doppler of both types
1800 1801 else:
1801 1802 return numpy.vstack((vec_snr, vec_power, vec_fd, vec_w))
1802 1803
1803 1804 def __get_noise2(self,POWER, fft_avg, TALK=0):
1804 1805 '''
1805 1806 Rutina para cΓ‘lculo de ruido por alturas(n1). Similar a IDL
1806 1807 '''
1807 1808 SPECT_PTS = len(POWER)
1808 1809 fft_avg = fft_avg*1.0
1809 1810 NOMIT = 0
1810 1811 NN = SPECT_PTS - NOMIT
1811 1812 N = NN//2
1812 1813 ARR = numpy.concatenate((POWER[0:N+1],POWER[N+NOMIT+1:SPECT_PTS]))
1813 1814 ARR = numpy.sort(ARR)
1814 1815 NUMS_MIN = (SPECT_PTS+7)//8
1815 1816 RTEST = (1.0+1.0/fft_avg)
1816 1817 SUM = 0.0
1817 1818 SUMSQ = 0.0
1818 1819 J = 0
1819 1820 for I in range(NN):
1820 1821 J = J + 1
1821 1822 SUM = SUM + ARR[I]
1822 1823 SUMSQ = SUMSQ + ARR[I]*ARR[I]
1823 1824 AVE = SUM*1.0/J
1824 1825 if J > NUMS_MIN:
1825 1826 if (SUMSQ*J <= RTEST*SUM*SUM): RNOISE = AVE
1826 1827 else:
1827 1828 if J == NUMS_MIN: RNOISE = AVE
1828 1829 if TALK == 1: print('Noise Power (2):%4.4f' %RNOISE)
1829 1830 stdv = numpy.sqrt(SUMSQ/J - numpy.power(SUM/J,2))
1830 1831 return RNOISE, stdv
1831 1832
1832 1833 def __get_noise1(self, power, fft_avg, TALK=0):
1833 1834 '''
1834 1835 Rutina para cΓ‘lculo de ruido por alturas(n0). Similar a IDL
1835 1836 '''
1836 1837 num_pts = numpy.size(power)
1837 1838 #print('num_pts',num_pts)
1838 1839 #print('power',power.shape)
1839 1840 #print(power[256:267,0:2])
1840 1841 fft_avg = fft_avg*1.0
1841 1842
1842 1843 ind = numpy.argsort(power, axis=None, kind='stable')
1843 1844 #ind = numpy.argsort(numpy.reshape(power,-1))
1844 1845 #print(ind.shape)
1845 1846 #print(ind[0:11])
1846 1847 #print(numpy.reshape(power,-1)[ind[0:11]])
1847 1848 ARR = numpy.reshape(power,-1)[ind]
1848 1849 #print('ARR',len(ARR))
1849 1850 #print('ARR',ARR.shape)
1850 1851 NUMS_MIN = num_pts//10
1851 1852 RTEST = (1.0+1.0/fft_avg)
1852 1853 SUM = 0.0
1853 1854 SUMSQ = 0.0
1854 1855 J = 0
1855 1856 cont = 1
1856 1857 while cont == 1 and J < num_pts:
1857 1858
1858 1859 SUM = SUM + ARR[J]
1859 1860 SUMSQ = SUMSQ + ARR[J]*ARR[J]
1860 1861 J = J + 1
1861 1862
1862 1863 if J > NUMS_MIN:
1863 1864 if (SUMSQ*J <= RTEST*SUM*SUM):
1864 1865 LNOISE = SUM*1.0/J
1865 1866 else:
1866 1867 J = J - 1
1867 1868 SUM = SUM - ARR[J]
1868 1869 SUMSQ = SUMSQ - ARR[J]*ARR[J]
1869 1870 cont = 0
1870 1871 else:
1871 1872 if J == NUMS_MIN: LNOISE = SUM*1.0/J
1872 1873 if TALK == 1: print('Noise Power (1):%8.8f' %LNOISE)
1873 1874 stdv = numpy.sqrt(SUMSQ/J - numpy.power(SUM/J,2))
1874 1875 return LNOISE, stdv
1875 1876
1876 1877 def __NoiseByChannel(self, num_prof, num_incoh, spectra,talk=0):
1877 1878
1878 1879 val_frq = numpy.arange(num_prof-2)+1
1879 1880 val_frq[(num_prof-2)//2:] = val_frq[(num_prof-2)//2:] + 1
1880 1881 junkspc = numpy.sum(spectra[val_frq,:], axis=1)
1881 1882 junkid = numpy.argsort(junkspc)
1882 1883 noisezone = val_frq[junkid[0:num_prof//2]]
1883 1884 specnoise = spectra[noisezone,:]
1884 1885 noise, stdvnoise = self.__get_noise1(specnoise,num_incoh)
1885 1886
1886 1887 if talk:
1887 1888 print('noise =', noise)
1888 1889 return noise
1889 1890 #------------------ Get SA Parameters --------------------------
1890 1891
1891 1892 def GetSAParameters(self):
1892 1893 #SA en frecuencia
1893 1894 pairslist = self.dataOut.groupList
1894 1895 num_pairs = len(pairslist)
1895 1896
1896 1897 vel = self.dataOut.abscissaList
1897 1898 spectra = self.dataOut.data_pre
1898 1899 cspectra = self.dataIn.data_cspc
1899 1900 delta_v = vel[1] - vel[0]
1900 1901
1901 1902 #Calculating the power spectrum
1902 1903 spc_pow = numpy.sum(spectra, 3)*delta_v
1903 1904 #Normalizing Spectra
1904 1905 norm_spectra = spectra/spc_pow
1905 1906 #Calculating the norm_spectra at peak
1906 1907 max_spectra = numpy.max(norm_spectra, 3)
1907 1908
1908 1909 #Normalizing Cross Spectra
1909 1910 norm_cspectra = numpy.zeros(cspectra.shape)
1910 1911
1911 1912 for i in range(num_chan):
1912 1913 norm_cspectra[i,:,:] = cspectra[i,:,:]/numpy.sqrt(spc_pow[pairslist[i][0],:]*spc_pow[pairslist[i][1],:])
1913 1914
1914 1915 max_cspectra = numpy.max(norm_cspectra,2)
1915 1916 max_cspectra_index = numpy.argmax(norm_cspectra, 2)
1916 1917
1917 1918 for i in range(num_pairs):
1918 1919 cspc_par[i,:,:] = __calculateMoments(norm_cspectra)
1919 1920 #------------------- Get Lags ----------------------------------
1920 1921
1921 1922 class JULIADriftsEstimation(Operation):
1922 1923
1923 1924 def __init__(self):
1924 1925 Operation.__init__(self)
1925 1926
1926 1927 def newtotal(self, data):
1927 1928 return numpy.nansum(data)
1928 1929
1929 1930 def data_filter(self, parm, snrth=-20, swth=20, wErrth=500):
1930 1931
1931 1932 Sz0 = parm.shape # Sz0: h,p
1932 1933 drift = parm[:,0]
1933 1934 sw = 2*parm[:,1]
1934 1935 snr = 10*numpy.log10(parm[:,2])
1935 1936 Sz = drift.shape # Sz: h
1936 1937 mask = numpy.ones((Sz[0]))
1937 1938 th=0
1938 1939 valid=numpy.where(numpy.isfinite(snr))
1939 1940 cvalid = len(valid[0])
1940 1941 if cvalid >= 1:
1941 1942 # CΓ‘lculo del ruido promedio de snr para el i-Γ©simo grupo de alturas
1942 1943 nbins = int(numpy.max(snr)-numpy.min(snr))+1 # bin size = 1, similar to IDL
1943 1944 h = numpy.histogram(snr,bins=nbins)
1944 1945 hist = h[0]
1945 1946 values = numpy.round_(h[1])
1946 1947 moda = values[numpy.where(hist == numpy.max(hist))]
1947 1948 indNoise = numpy.where(numpy.abs(snr - numpy.min(moda)) < 3)[0]
1948 1949
1949 1950 noise = snr[indNoise]
1950 1951 noise_mean = numpy.sum(noise)/len(noise)
1951 1952 # CΓ‘lculo de media de snr
1952 1953 med = numpy.median(snr)
1953 1954 # Establece el umbral de snr
1954 1955 if noise_mean > med + 3:
1955 1956 th = med
1956 1957 else:
1957 1958 th = noise_mean + 3
1958 1959 # Establece mΓ‘scara
1959 1960 novalid = numpy.where(snr <= th)[0]
1960 1961 mask[novalid] = numpy.nan
1961 1962 # Elimina datos que no sobrepasen el umbral: PARAMETRO
1962 1963 novalid = numpy.where(snr <= snrth)
1963 1964 cnovalid = len(novalid[0])
1964 1965 if cnovalid > 0:
1965 1966 mask[novalid] = numpy.nan
1966 1967 novalid = numpy.where(numpy.isnan(snr))
1967 1968 cnovalid = len(novalid[0])
1968 1969 if cnovalid > 0:
1969 1970 mask[novalid] = numpy.nan
1970 1971 new_parm = numpy.zeros((Sz0[0],Sz0[1]))
1971 1972 for h in range(Sz0[0]):
1972 1973 for p in range(Sz0[1]):
1973 1974 if numpy.isnan(mask[h]):
1974 1975 new_parm[h,p]=numpy.nan
1975 1976 else:
1976 1977 new_parm[h,p]=parm[h,p]
1977 1978
1978 1979 return new_parm, th
1979 1980
1980 1981 def run(self, dataOut, zenith, zenithCorrection,heights=None, statistics=0, otype=0):
1981 1982
1982 1983 nCh=dataOut.spcpar.shape[0]
1983 1984 nHei=dataOut.spcpar.shape[1]
1984 1985 nParam=dataOut.spcpar.shape[2]
1985 1986 # Solo las alturas de interes
1986 1987 hei=dataOut.heightList
1987 1988 hvalid=numpy.where([hei >= heights[0]][0] & [hei <= heights[1]][0])[0]
1988 1989 nhvalid=len(hvalid)
1989 1990 parm = numpy.zeros((nCh,nhvalid,nParam))
1990 1991 parm = dataOut.spcpar[:,hvalid,:]
1991 1992 # Primer filtrado: Umbral de SNR
1992 1993 for i in range(nCh):
1993 1994 dataOut.spcpar[i,hvalid,:] = self.data_filter(parm[i,:,:])[0]
1994 1995 zenith = numpy.array(zenith)
1995 1996 zenith -= zenithCorrection
1996 1997 zenith *= numpy.pi/180
1997 1998 alpha = zenith[0]
1998 1999 beta = zenith[1]
1999 2000
2000 2001 dopplerCH0 = dataOut.spcpar[0,:,0]
2001 2002 dopplerCH1 = dataOut.spcpar[1,:,0]
2002 2003 swCH0 = dataOut.spcpar[0,:,1]
2003 2004 swCH1 = dataOut.spcpar[1,:,1]
2004 2005 snrCH0 = 10*numpy.log10(dataOut.spcpar[0,:,2])
2005 2006 snrCH1 = 10*numpy.log10(dataOut.spcpar[1,:,2])
2006 2007 noiseCH0 = dataOut.spcpar[0,:,3]
2007 2008 noiseCH1 = dataOut.spcpar[1,:,3]
2008 2009 wErrCH0 = dataOut.spcpar[0,:,5]
2009 2010 wErrCH1 = dataOut.spcpar[1,:,5]
2010 2011
2011 2012 # Vertical and zonal calculation according to geometry
2012 2013 sinB_A = numpy.sin(beta)*numpy.cos(alpha) - numpy.sin(alpha)* numpy.cos(beta)
2013 2014 drift = -(dopplerCH0 * numpy.sin(beta) - dopplerCH1 * numpy.sin(alpha))/ sinB_A
2014 2015 zonal = (dopplerCH0 * numpy.cos(beta) - dopplerCH1 * numpy.cos(alpha))/ sinB_A
2015 2016 snr = (snrCH0 + snrCH1)/2
2016 2017 noise = (noiseCH0 + noiseCH1)/2
2017 2018 sw = (swCH0 + swCH1)/2
2018 2019 w_w_err= numpy.sqrt(numpy.power(wErrCH0 * numpy.sin(beta)/numpy.abs(sinB_A),2) + numpy.power(wErrCH1 * numpy.sin(alpha)/numpy.abs(sinB_A),2))
2019 2020 w_e_err= numpy.sqrt(numpy.power(wErrCH0 * numpy.cos(beta)/numpy.abs(-1*sinB_A),2) + numpy.power(wErrCH1 * numpy.cos(alpha)/numpy.abs(-1*sinB_A),2))
2020 2021
2021 2022 # for statistics150km
2022 2023 if statistics:
2023 2024 print('Implemented offline.')
2024 2025 if otype == 0:
2025 2026 winds = numpy.vstack((snr, drift, zonal, noise, sw, w_w_err, w_e_err)) # to process statistics drifts
2026 2027 elif otype == 3:
2027 2028 winds = numpy.vstack((snr, drift, zonal)) # to generic plot: 3 RTI's
2028 2029 elif otype == 4:
2029 2030 winds = numpy.vstack((snrCH0, drift, snrCH1, zonal)) # to generic plot: 4 RTI's
2030 2031
2031 2032 snr1 = numpy.vstack((snrCH0, snrCH1))
2032 2033 dataOut.data_output = winds
2033 2034 dataOut.data_snr = snr1
2034 2035
2035 2036 dataOut.utctimeInit = dataOut.utctime
2036 2037 dataOut.outputInterval = dataOut.timeInterval
2037 2038
2038 2039 return dataOut
2039 2040
2040 2041 class SALags(Operation):
2041 2042 '''
2042 2043 Function GetMoments()
2043 2044
2044 2045 Input:
2045 2046 self.dataOut.data_pre
2046 2047 self.dataOut.abscissaList
2047 2048 self.dataOut.noise
2048 2049 self.dataOut.normFactor
2049 2050 self.dataOut.data_snr
2050 2051 self.dataOut.groupList
2051 2052 self.dataOut.nChannels
2052 2053
2053 2054 Affected:
2054 2055 self.dataOut.data_param
2055 2056
2056 2057 '''
2057 2058 def run(self, dataOut):
2058 2059 data_acf = dataOut.data_pre[0]
2059 2060 data_ccf = dataOut.data_pre[1]
2060 2061 normFactor_acf = dataOut.normFactor[0]
2061 2062 normFactor_ccf = dataOut.normFactor[1]
2062 2063 pairs_acf = dataOut.groupList[0]
2063 2064 pairs_ccf = dataOut.groupList[1]
2064 2065
2065 2066 nHeights = dataOut.nHeights
2066 2067 absc = dataOut.abscissaList
2067 2068 noise = dataOut.noise
2068 2069 SNR = dataOut.data_snr
2069 2070 nChannels = dataOut.nChannels
2070 2071 for l in range(len(pairs_acf)):
2071 2072 data_acf[l,:,:] = data_acf[l,:,:]/normFactor_acf[l,:]
2072 2073
2073 2074 for l in range(len(pairs_ccf)):
2074 2075 data_ccf[l,:,:] = data_ccf[l,:,:]/normFactor_ccf[l,:]
2075 2076
2076 2077 dataOut.data_param = numpy.zeros((len(pairs_ccf)*2 + 1, nHeights))
2077 2078 dataOut.data_param[:-1,:] = self.__calculateTaus(data_acf, data_ccf, absc)
2078 2079 dataOut.data_param[-1,:] = self.__calculateLag1Phase(data_acf, absc)
2079 2080 return
2080 2081
2081 2082 def __calculateTaus(self, data_acf, data_ccf, lagRange):
2082 2083
2083 2084 lag0 = data_acf.shape[1]/2
2084 2085 #Funcion de Autocorrelacion
2085 2086 mean_acf = stats.nanmean(data_acf, axis = 0)
2086 2087
2087 2088 #Obtencion Indice de TauCross
2088 2089 ind_ccf = data_ccf.argmax(axis = 1)
2089 2090 #Obtencion Indice de TauAuto
2090 2091 ind_acf = numpy.zeros(ind_ccf.shape,dtype = 'int')
2091 2092 ccf_lag0 = data_ccf[:,lag0,:]
2092 2093
2093 2094 for i in range(ccf_lag0.shape[0]):
2094 2095 ind_acf[i,:] = numpy.abs(mean_acf - ccf_lag0[i,:]).argmin(axis = 0)
2095 2096
2096 2097 #Obtencion de TauCross y TauAuto
2097 2098 tau_ccf = lagRange[ind_ccf]
2098 2099 tau_acf = lagRange[ind_acf]
2099 2100
2100 2101 Nan1, Nan2 = numpy.where(tau_ccf == lagRange[0])
2101 2102
2102 2103 tau_ccf[Nan1,Nan2] = numpy.nan
2103 2104 tau_acf[Nan1,Nan2] = numpy.nan
2104 2105 tau = numpy.vstack((tau_ccf,tau_acf))
2105 2106
2106 2107 return tau
2107 2108
2108 2109 def __calculateLag1Phase(self, data, lagTRange):
2109 2110 data1 = stats.nanmean(data, axis = 0)
2110 2111 lag1 = numpy.where(lagTRange == 0)[0][0] + 1
2111 2112
2112 2113 phase = numpy.angle(data1[lag1,:])
2113 2114
2114 2115 return phase
2115 2116
2116 2117 def fit_func( x, a0, a1, a2): #, a3, a4, a5):
2117 2118 z = (x - a1) / a2
2118 2119 y = a0 * numpy.exp(-z**2 / a2) #+ a3 + a4 * x + a5 * x**2
2119 2120 return y
2120 2121
2121 2122
2122 2123 class SpectralFitting(Operation):
2123 2124 '''
2124 2125 Function GetMoments()
2125 2126
2126 2127 Input:
2127 2128 Output:
2128 2129 Variables modified:
2129 2130 '''
2130 2131 isConfig = False
2131 2132 __dataReady = False
2132 2133 bloques = None
2133 2134 bloque0 = None
2134 2135 index = 0
2135 2136 fint = 0
2136 2137 buffer = 0
2137 2138 buffer2 = 0
2138 2139 buffer3 = 0
2139 2140
2140 2141 def __init__(self):
2141 2142 Operation.__init__(self)
2142 2143 self.i=0
2143 2144 self.isConfig = False
2144 2145
2145 2146
2146 2147 def setup(self,nChan,nProf,nHei,nBlocks):
2147 2148 self.__dataReady = False
2148 2149 self.bloques = numpy.zeros([2, nProf, nHei,nBlocks], dtype= complex)
2149 2150 self.bloque0 = numpy.zeros([nChan, nProf, nHei, nBlocks])
2150 2151
2151 2152 def __calculateMoments(self,oldspec, oldfreq, n0, nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None):
2152 2153
2153 2154 if (nicoh is None): nicoh = 1
2154 2155 if (graph is None): graph = 0
2155 2156 if (smooth is None): smooth = 0
2156 2157 elif (self.smooth < 3): smooth = 0
2157 2158
2158 2159 if (type1 is None): type1 = 0
2159 2160 if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1
2160 2161 if (snrth is None): snrth = -3
2161 2162 if (dc is None): dc = 0
2162 2163 if (aliasing is None): aliasing = 0
2163 2164 if (oldfd is None): oldfd = 0
2164 2165 if (wwauto is None): wwauto = 0
2165 2166
2166 2167 if (n0 < 1.e-20): n0 = 1.e-20
2167 2168
2168 2169 freq = oldfreq
2169 2170 vec_power = numpy.zeros(oldspec.shape[1])
2170 2171 vec_fd = numpy.zeros(oldspec.shape[1])
2171 2172 vec_w = numpy.zeros(oldspec.shape[1])
2172 2173 vec_snr = numpy.zeros(oldspec.shape[1])
2173 2174
2174 2175 oldspec = numpy.ma.masked_invalid(oldspec)
2175 2176
2176 2177 for ind in range(oldspec.shape[1]):
2177 2178
2178 2179 spec = oldspec[:,ind]
2179 2180 aux = spec*fwindow
2180 2181 max_spec = aux.max()
2181 2182 m = list(aux).index(max_spec)
2182 2183
2183 2184 #Smooth
2184 2185 if (smooth == 0): spec2 = spec
2185 2186 else: spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth)
2186 2187
2187 2188 # Calculo de Momentos
2188 2189 bb = spec2[list(range(m,spec2.size))]
2189 2190 bb = (bb<n0).nonzero()
2190 2191 bb = bb[0]
2191 2192
2192 2193 ss = spec2[list(range(0,m + 1))]
2193 2194 ss = (ss<n0).nonzero()
2194 2195 ss = ss[0]
2195 2196
2196 2197 if (bb.size == 0):
2197 2198 bb0 = spec.size - 1 - m
2198 2199 else:
2199 2200 bb0 = bb[0] - 1
2200 2201 if (bb0 < 0):
2201 2202 bb0 = 0
2202 2203
2203 2204 if (ss.size == 0): ss1 = 1
2204 2205 else: ss1 = max(ss) + 1
2205 2206
2206 2207 if (ss1 > m): ss1 = m
2207 2208
2208 2209 valid = numpy.asarray(list(range(int(m + bb0 - ss1 + 1)))) + ss1
2209 2210 power = ((spec2[valid] - n0)*fwindow[valid]).sum()
2210 2211 fd = ((spec2[valid]- n0)*freq[valid]*fwindow[valid]).sum()/power
2211 2212 w = math.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum()/power)
2212 2213 snr = (spec2.mean()-n0)/n0
2213 2214
2214 2215 if (snr < 1.e-20) :
2215 2216 snr = 1.e-20
2216 2217
2217 2218 vec_power[ind] = power
2218 2219 vec_fd[ind] = fd
2219 2220 vec_w[ind] = w
2220 2221 vec_snr[ind] = snr
2221 2222
2222 2223 moments = numpy.vstack((vec_snr, vec_power, vec_fd, vec_w))
2223 2224 return moments
2224 2225
2225 2226 def __DiffCoherent(self, spectra, cspectra, dataOut, noise, snrth, coh_th, hei_th):
2226 2227
2227 2228 nProf = dataOut.nProfiles
2228 2229 heights = dataOut.heightList
2229 2230 nHei = len(heights)
2230 2231 channels = dataOut.channelList
2231 2232 nChan = len(channels)
2232 2233 crosspairs = dataOut.groupList
2233 2234 nPairs = len(crosspairs)
2234 2235 #Separar espectros incoherentes de coherentes snr > 20 dB'
2235 2236 snr_th = 10**(snrth/10.0)
2236 2237 my_incoh_spectra = numpy.zeros([nChan, nProf,nHei], dtype='float')
2237 2238 my_incoh_cspectra = numpy.zeros([nPairs,nProf, nHei], dtype='complex')
2238 2239 my_incoh_aver = numpy.zeros([nChan, nHei])
2239 2240 my_coh_aver = numpy.zeros([nChan, nHei])
2240 2241
2241 2242 coh_spectra = numpy.zeros([nChan, nProf, nHei], dtype='float')
2242 2243 coh_cspectra = numpy.zeros([nPairs, nProf, nHei], dtype='complex')
2243 2244 coh_aver = numpy.zeros([nChan, nHei])
2244 2245
2245 2246 incoh_spectra = numpy.zeros([nChan, nProf, nHei], dtype='float')
2246 2247 incoh_cspectra = numpy.zeros([nPairs, nProf, nHei], dtype='complex')
2247 2248 incoh_aver = numpy.zeros([nChan, nHei])
2248 2249 power = numpy.sum(spectra, axis=1)
2249 2250
2250 2251 if coh_th == None : coh_th = numpy.array([0.75,0.65,0.15]) # 0.65
2251 2252 if hei_th == None : hei_th = numpy.array([60,300,650])
2252 2253 for ic in range(nPairs):
2253 2254 pair = crosspairs[ic]
2254 2255 #si el SNR es mayor que el SNR threshold los datos se toman coherentes
2255 2256 s_n0 = power[pair[0],:]/noise[pair[0]]
2256 2257 s_n1 = power[pair[1],:]/noise[pair[1]]
2257 2258 valid1 =(s_n0>=snr_th).nonzero()
2258 2259 valid2 = (s_n1>=snr_th).nonzero()
2259 2260 valid1 = numpy.array(valid1[0])
2260 2261 valid2 = numpy.array(valid2[0])
2261 2262 valid = valid1
2262 2263 for iv in range(len(valid2)):
2263 2264 indv = numpy.array((valid1 == valid2[iv]).nonzero())
2264 2265 if len(indv[0]) == 0 :
2265 2266 valid = numpy.concatenate((valid,valid2[iv]), axis=None)
2266 2267 if len(valid)>0:
2267 2268 my_coh_aver[pair[0],valid]=1
2268 2269 my_coh_aver[pair[1],valid]=1
2269 2270 # si la coherencia es mayor a la coherencia threshold los datos se toman
2270 2271 coh = numpy.squeeze(numpy.nansum(cspectra[ic,:,:], axis=0)/numpy.sqrt(numpy.nansum(spectra[pair[0],:,:], axis=0)*numpy.nansum(spectra[pair[1],:,:], axis=0)))
2271 2272 for ih in range(len(hei_th)):
2272 2273 hvalid = (heights>hei_th[ih]).nonzero()
2273 2274 hvalid = hvalid[0]
2274 2275 if len(hvalid)>0:
2275 2276 valid = (numpy.absolute(coh[hvalid])>coh_th[ih]).nonzero()
2276 2277 valid = valid[0]
2277 2278 if len(valid)>0:
2278 2279 my_coh_aver[pair[0],hvalid[valid]] =1
2279 2280 my_coh_aver[pair[1],hvalid[valid]] =1
2280 2281
2281 2282 coh_echoes = (my_coh_aver[pair[0],:] == 1).nonzero()
2282 2283 incoh_echoes = (my_coh_aver[pair[0],:] != 1).nonzero()
2283 2284 incoh_echoes = incoh_echoes[0]
2284 2285 if len(incoh_echoes) > 0:
2285 2286 my_incoh_spectra[pair[0],:,incoh_echoes] = spectra[pair[0],:,incoh_echoes]
2286 2287 my_incoh_spectra[pair[1],:,incoh_echoes] = spectra[pair[1],:,incoh_echoes]
2287 2288 my_incoh_cspectra[ic,:,incoh_echoes] = cspectra[ic,:,incoh_echoes]
2288 2289 my_incoh_aver[pair[0],incoh_echoes] = 1
2289 2290 my_incoh_aver[pair[1],incoh_echoes] = 1
2290 2291
2291 2292
2292 2293 for ic in range(nPairs):
2293 2294 pair = crosspairs[ic]
2294 2295
2295 2296 valid1 =(my_coh_aver[pair[0],:]==1 ).nonzero()
2296 2297 valid2 = (my_coh_aver[pair[1],:]==1).nonzero()
2297 2298 valid1 = numpy.array(valid1[0])
2298 2299 valid2 = numpy.array(valid2[0])
2299 2300 valid = valid1
2300 2301
2301 2302 for iv in range(len(valid2)):
2302 2303
2303 2304 indv = numpy.array((valid1 == valid2[iv]).nonzero())
2304 2305 if len(indv[0]) == 0 :
2305 2306 valid = numpy.concatenate((valid,valid2[iv]), axis=None)
2306 2307 valid1 =(my_coh_aver[pair[0],:] !=1 ).nonzero()
2307 2308 valid2 = (my_coh_aver[pair[1],:] !=1).nonzero()
2308 2309 valid1 = numpy.array(valid1[0])
2309 2310 valid2 = numpy.array(valid2[0])
2310 2311 incoh_echoes = valid1
2311 2312 for iv in range(len(valid2)):
2312 2313
2313 2314 indv = numpy.array((valid1 == valid2[iv]).nonzero())
2314 2315 if len(indv[0]) == 0 :
2315 2316 incoh_echoes = numpy.concatenate(( incoh_echoes,valid2[iv]), axis=None)
2316 2317
2317 2318 if len(valid)>0:
2318 2319 coh_spectra[pair[0],:,valid] = spectra[pair[0],:,valid]
2319 2320 coh_spectra[pair[1],:,valid] = spectra[pair[1],:,valid]
2320 2321 coh_cspectra[ic,:,valid] = cspectra[ic,:,valid]
2321 2322 coh_aver[pair[0],valid]=1
2322 2323 coh_aver[pair[1],valid]=1
2323 2324 if len(incoh_echoes)>0:
2324 2325 incoh_spectra[pair[0],:,incoh_echoes] = spectra[pair[0],:,incoh_echoes]
2325 2326 incoh_spectra[pair[1],:,incoh_echoes] = spectra[pair[1],:,incoh_echoes]
2326 2327 incoh_cspectra[ic,:,incoh_echoes] = cspectra[ic,:,incoh_echoes]
2327 2328 incoh_aver[pair[0],incoh_echoes]=1
2328 2329 incoh_aver[pair[1],incoh_echoes]=1
2329 2330 return my_incoh_spectra ,my_incoh_cspectra,my_incoh_aver,my_coh_aver, incoh_spectra, coh_spectra, incoh_cspectra, coh_cspectra, incoh_aver, coh_aver
2330 2331
2331 2332
2332 2333 def __CleanCoherent(self,snrth, spectra, cspectra, coh_aver,dataOut, noise,clean_coh_echoes,index):
2333 2334
2334 2335 nProf = dataOut.nProfiles
2335 2336 heights = dataOut.heightList
2336 2337 nHei = len(heights)
2337 2338 channels = dataOut.channelList
2338 2339 nChan = len(channels)
2339 2340 crosspairs = dataOut.groupList
2340 2341 nPairs = len(crosspairs)
2341 2342
2342 2343 absc = dataOut.abscissaList[:-1]
2343 2344 data_param = numpy.zeros((nChan, 4, spectra.shape[2]))
2344 2345 clean_coh_spectra = spectra.copy()
2345 2346 clean_coh_cspectra = cspectra.copy()
2346 2347 clean_coh_aver = coh_aver.copy()
2347 2348
2348 2349 spwd_th=[10,6] #spwd_th[0] --> For satellites ; spwd_th[1] --> For special events like SUN.
2349 2350 coh_th = 0.75
2350 2351
2351 2352 rtime0 = [6,18] # periodo sin ESF
2352 2353 rtime1 = [10.5,13.5] # periodo con alta coherencia y alto ancho espectral (esperado): SOL.
2353 2354
2354 2355 time = index*5./60 # en base a 5 min de proceso
2355 2356 if clean_coh_echoes == 1 :
2356 2357 for ind in range(nChan):
2357 2358 data_param[ind,:,:] = self.__calculateMoments( spectra[ind,:,:] , absc , noise[ind] )
2358 2359 spwd = data_param[:,3]
2359 2360 # SPECB_JULIA,header=anal_header,jspectra=spectra,vel=velocities,hei=heights, num_aver=1, mode_fit=0,smoothing=smoothing,jvelr=velr,jspwd=spwd,jsnr=snr,jnoise=noise,jstdvnoise=stdvnoise
2360 2361 # para obtener spwd
2361 2362 for ic in range(nPairs):
2362 2363 pair = crosspairs[ic]
2363 2364 coh = numpy.squeeze(numpy.sum(cspectra[ic,:,:], axis=1)/numpy.sqrt(numpy.sum(spectra[pair[0],:,:], axis=1)*numpy.sum(spectra[pair[1],:,:], axis=1)))
2364 2365 for ih in range(nHei) :
2365 2366 # Considering heights higher than 200km in order to avoid removing phenomena like EEJ.
2366 2367 if heights[ih] >= 200 and coh_aver[pair[0],ih] == 1 and coh_aver[pair[1],ih] == 1 :
2367 2368 # Checking coherence
2368 2369 if (numpy.abs(coh[ih]) <= coh_th) or (time >= rtime0[0] and time <= rtime0[1]) :
2369 2370 # Checking spectral widths
2370 2371 if (spwd[pair[0],ih] > spwd_th[0]) or (spwd[pair[1],ih] > spwd_th[0]) :
2371 2372 # satelite
2372 2373 clean_coh_spectra[pair,ih,:] = 0.0
2373 2374 clean_coh_cspectra[ic,ih,:] = 0.0
2374 2375 clean_coh_aver[pair,ih] = 0
2375 2376 else :
2376 2377 if ((spwd[pair[0],ih] < spwd_th[1]) or (spwd[pair[1],ih] < spwd_th[1])) :
2377 2378 # Especial event like sun.
2378 2379 clean_coh_spectra[pair,ih,:] = 0.0
2379 2380 clean_coh_cspectra[ic,ih,:] = 0.0
2380 2381 clean_coh_aver[pair,ih] = 0
2381 2382
2382 2383 return clean_coh_spectra, clean_coh_cspectra, clean_coh_aver
2383 2384
2384 2385 def CleanRayleigh(self,dataOut,spectra,cspectra,save_drifts):
2385 2386
2386 2387 rfunc = cspectra.copy()
2387 2388 n_funct = len(rfunc[0,:,0,0])
2388 2389 val_spc = spectra*0.0
2389 2390 val_cspc = cspectra*0.0
2390 2391 in_sat_spectra = spectra.copy()
2391 2392 in_sat_cspectra = cspectra.copy()
2392 2393
2393 2394 min_hei = 200
2394 2395 nProf = dataOut.nProfiles
2395 2396 heights = dataOut.heightList
2396 2397 nHei = len(heights)
2397 2398 channels = dataOut.channelList
2398 2399 nChan = len(channels)
2399 2400 crosspairs = dataOut.groupList
2400 2401 nPairs = len(crosspairs)
2401 2402 hval=(heights >= min_hei).nonzero()
2402 2403 ih=hval[0]
2403 2404 for ih in range(hval[0][0],nHei):
2404 2405 for ifreq in range(nProf):
2405 2406 for ii in range(n_funct):
2406 2407
2407 2408 func2clean = 10*numpy.log10(numpy.absolute(rfunc[:,ii,ifreq,ih]))
2408 2409 val = (numpy.isfinite(func2clean)==True).nonzero()
2409 2410 if len(val)>0:
2410 2411 min_val = numpy.around(numpy.amin(func2clean)-2) #> (-40)
2411 2412 if min_val <= -40 : min_val = -40
2412 2413 max_val = numpy.around(numpy.amax(func2clean)+2) #< 200
2413 2414 if max_val >= 200 : max_val = 200
2414 2415 step = 1
2415 2416 #Getting bins and the histogram
2416 2417 x_dist = min_val + numpy.arange(1 + ((max_val-(min_val))/step))*step
2417 2418 y_dist,binstep = numpy.histogram(func2clean,bins=range(int(min_val),int(max_val+2),step))
2418 2419 mean = numpy.sum(x_dist * y_dist) / numpy.sum(y_dist)
2419 2420 sigma = numpy.sqrt(numpy.sum(y_dist * (x_dist - mean)**2) / numpy.sum(y_dist))
2420 2421 parg = [numpy.amax(y_dist),mean,sigma]
2421 2422 try :
2422 2423 gauss_fit, covariance = curve_fit(fit_func, x_dist, y_dist,p0=parg)
2423 2424 mode = gauss_fit[1]
2424 2425 stdv = gauss_fit[2]
2425 2426 except:
2426 2427 mode = mean
2427 2428 stdv = sigma
2428 2429
2429 2430 #Removing echoes greater than mode + 3*stdv
2430 2431 factor_stdv = 2.5
2431 2432 noval = (abs(func2clean - mode)>=(factor_stdv*stdv)).nonzero()
2432 2433
2433 2434 if len(noval[0]) > 0:
2434 2435 novall = ((func2clean - mode) >= (factor_stdv*stdv)).nonzero()
2435 2436 cross_pairs = crosspairs[ii]
2436 2437 #Getting coherent echoes which are removed.
2437 2438 if len(novall[0]) > 0:
2438 2439 val_spc[novall[0],cross_pairs[0],ifreq,ih] = 1
2439 2440 val_spc[novall[0],cross_pairs[1],ifreq,ih] = 1
2440 2441 val_cspc[novall[0],ii,ifreq,ih] = 1
2441 2442 #Removing coherent from ISR data
2442 2443 spectra[noval,cross_pairs[0],ifreq,ih] = numpy.nan
2443 2444 spectra[noval,cross_pairs[1],ifreq,ih] = numpy.nan
2444 2445 cspectra[noval,ii,ifreq,ih] = numpy.nan
2445 2446
2446 2447 #Getting average of the spectra and cross-spectra from incoherent echoes.
2447 2448 out_spectra = numpy.zeros([nChan,nProf,nHei], dtype=float) #+numpy.nan
2448 2449 out_cspectra = numpy.zeros([nPairs,nProf,nHei], dtype=complex) #+numpy.nan
2449 2450 for ih in range(nHei):
2450 2451 for ifreq in range(nProf):
2451 2452 for ich in range(nChan):
2452 2453 tmp = spectra[:,ich,ifreq,ih]
2453 2454 valid = (numpy.isfinite(tmp[:])==True).nonzero()
2454 2455 if len(valid[0]) >0 :
2455 2456 out_spectra[ich,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
2456 2457 for icr in range(nPairs):
2457 2458 tmp = numpy.squeeze(cspectra[:,icr,ifreq,ih])
2458 2459 valid = (numpy.isfinite(tmp)==True).nonzero()
2459 2460 if len(valid[0]) > 0:
2460 2461 out_cspectra[icr,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
2461 2462 #Removing fake coherent echoes (at least 4 points around the point)
2462 2463 val_spectra = numpy.sum(val_spc,0)
2463 2464 val_cspectra = numpy.sum(val_cspc,0)
2464 2465
2465 2466 val_spectra = self.REM_ISOLATED_POINTS(val_spectra,4)
2466 2467 val_cspectra = self.REM_ISOLATED_POINTS(val_cspectra,4)
2467 2468
2468 2469 for i in range(nChan):
2469 2470 for j in range(nProf):
2470 2471 for k in range(nHei):
2471 2472 if numpy.isfinite(val_spectra[i,j,k]) and val_spectra[i,j,k] < 1 :
2472 2473 val_spc[:,i,j,k] = 0.0
2473 2474 for i in range(nPairs):
2474 2475 for j in range(nProf):
2475 2476 for k in range(nHei):
2476 2477 if numpy.isfinite(val_cspectra[i,j,k]) and val_cspectra[i,j,k] < 1 :
2477 2478 val_cspc[:,i,j,k] = 0.0
2478 2479
2479 2480 tmp_sat_spectra = spectra.copy()
2480 2481 tmp_sat_spectra = tmp_sat_spectra*numpy.nan
2481 2482 tmp_sat_cspectra = cspectra.copy()
2482 2483 tmp_sat_cspectra = tmp_sat_cspectra*numpy.nan
2483 2484 val = (val_spc > 0).nonzero()
2484 2485 if len(val[0]) > 0:
2485 2486 tmp_sat_spectra[val] = in_sat_spectra[val]
2486 2487
2487 2488 val = (val_cspc > 0).nonzero()
2488 2489 if len(val[0]) > 0:
2489 2490 tmp_sat_cspectra[val] = in_sat_cspectra[val]
2490 2491
2491 2492 #Getting average of the spectra and cross-spectra from incoherent echoes.
2492 2493 sat_spectra = numpy.zeros((nChan,nProf,nHei), dtype=float)
2493 2494 sat_cspectra = numpy.zeros((nPairs,nProf,nHei), dtype=complex)
2494 2495 for ih in range(nHei):
2495 2496 for ifreq in range(nProf):
2496 2497 for ich in range(nChan):
2497 2498 tmp = numpy.squeeze(tmp_sat_spectra[:,ich,ifreq,ih])
2498 2499 valid = (numpy.isfinite(tmp)).nonzero()
2499 2500 if len(valid[0]) > 0:
2500 2501 sat_spectra[ich,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
2501 2502
2502 2503 for icr in range(nPairs):
2503 2504 tmp = numpy.squeeze(tmp_sat_cspectra[:,icr,ifreq,ih])
2504 2505 valid = (numpy.isfinite(tmp)).nonzero()
2505 2506 if len(valid[0]) > 0:
2506 2507 sat_cspectra[icr,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
2507 2508 return out_spectra, out_cspectra,sat_spectra,sat_cspectra
2508 2509 def REM_ISOLATED_POINTS(self,array,rth):
2509 2510 if rth == None : rth = 4
2510 2511 num_prof = len(array[0,:,0])
2511 2512 num_hei = len(array[0,0,:])
2512 2513 n2d = len(array[:,0,0])
2513 2514
2514 2515 for ii in range(n2d) :
2515 2516 tmp = array[ii,:,:]
2516 2517 tmp = numpy.reshape(tmp,num_prof*num_hei)
2517 2518 indxs1 = (numpy.isfinite(tmp)==True).nonzero()
2518 2519 indxs2 = (tmp > 0).nonzero()
2519 2520 indxs1 = (indxs1[0])
2520 2521 indxs2 = indxs2[0]
2521 2522 indxs = None
2522 2523 for iv in range(len(indxs2)):
2523 2524 indv = numpy.array((indxs1 == indxs2[iv]).nonzero())
2524 2525 if len(indv[0]) > 0 :
2525 2526 indxs = numpy.concatenate((indxs,indxs2[iv]), axis=None)
2526 2527 indxs = indxs[1:]
2527 2528 if len(indxs) < 4 :
2528 2529 array[ii,:,:] = 0.
2529 2530 return
2530 2531
2531 2532 xpos = numpy.mod(indxs ,num_hei)
2532 2533 ypos = (indxs / num_hei)
2533 2534 sx = numpy.argsort(xpos) # Ordering respect to "x" (time)
2534 2535 xpos = xpos[sx]
2535 2536 ypos = ypos[sx]
2536 2537 # *********************************** Cleaning isolated points **********************************
2537 2538 ic = 0
2538 2539 while True :
2539 2540 r = numpy.sqrt(list(numpy.power((xpos[ic]-xpos),2)+ numpy.power((ypos[ic]-ypos),2)))
2540 2541 no_coh1 = (numpy.isfinite(r)==True).nonzero()
2541 2542 no_coh2 = (r <= rth).nonzero()
2542 2543 no_coh1 = numpy.array(no_coh1[0])
2543 2544 no_coh2 = numpy.array(no_coh2[0])
2544 2545 no_coh = None
2545 2546 for iv in range(len(no_coh2)):
2546 2547 indv = numpy.array((no_coh1 == no_coh2[iv]).nonzero())
2547 2548 if len(indv[0]) > 0 :
2548 2549 no_coh = numpy.concatenate((no_coh,no_coh2[iv]), axis=None)
2549 2550 no_coh = no_coh[1:]
2550 2551 if len(no_coh) < 4 :
2551 2552 xpos[ic] = numpy.nan
2552 2553 ypos[ic] = numpy.nan
2553 2554
2554 2555 ic = ic + 1
2555 2556 if (ic == len(indxs)) :
2556 2557 break
2557 2558 indxs = (numpy.isfinite(list(xpos))==True).nonzero()
2558 2559 if len(indxs[0]) < 4 :
2559 2560 array[ii,:,:] = 0.
2560 2561 return
2561 2562
2562 2563 xpos = xpos[indxs[0]]
2563 2564 ypos = ypos[indxs[0]]
2564 2565 for i in range(0,len(ypos)):
2565 2566 ypos[i]=int(ypos[i])
2566 2567 junk = tmp
2567 2568 tmp = junk*0.0
2568 2569
2569 2570 tmp[list(xpos + (ypos*num_hei))] = junk[list(xpos + (ypos*num_hei))]
2570 2571 array[ii,:,:] = numpy.reshape(tmp,(num_prof,num_hei))
2571 2572 return array
2572 2573
2573 2574 def moments(self,doppler,yarray,npoints):
2574 2575 ytemp = yarray
2575 2576 val = (ytemp > 0).nonzero()
2576 2577 val = val[0]
2577 2578 if len(val) == 0 : val = range(npoints-1)
2578 2579
2579 2580 ynew = 0.5*(ytemp[val[0]]+ytemp[val[len(val)-1]])
2580 2581 ytemp[len(ytemp):] = [ynew]
2581 2582
2582 2583 index = 0
2583 2584 index = numpy.argmax(ytemp)
2584 2585 ytemp = numpy.roll(ytemp,int(npoints/2)-1-index)
2585 2586 ytemp = ytemp[0:npoints-1]
2586 2587
2587 2588 fmom = numpy.sum(doppler*ytemp)/numpy.sum(ytemp)+(index-(npoints/2-1))*numpy.abs(doppler[1]-doppler[0])
2588 2589 smom = numpy.sum(doppler*doppler*ytemp)/numpy.sum(ytemp)
2589 2590 return [fmom,numpy.sqrt(smom)]
2590 2591
2591 2592
2592 2593
2593 2594
2594 2595
2595 2596 def run(self, dataOut, getSNR = True, path=None, file=None, groupList=None, filec=None,coh_th=None, hei_th=None,taver=None,proc=None,nhei=None,nprofs=None,ipp=None,channelList=None):
2596 2597 if not numpy.any(proc):
2597 2598 nChannels = dataOut.nChannels
2598 2599 nHeights= dataOut.heightList.size
2599 2600 nProf = dataOut.nProfiles
2600 2601 if numpy.any(taver): taver=int(taver)
2601 2602 else : taver = 5
2602 2603 tini=time.localtime(dataOut.utctime)
2603 2604 if (tini.tm_min % taver) == 0 and (tini.tm_sec < 5 and self.fint==0):
2604 2605 self.index = 0
2605 2606 jspc = self.buffer
2606 2607 jcspc = self.buffer2
2607 2608 jnoise = self.buffer3
2608 2609 self.buffer = dataOut.data_spc
2609 2610 self.buffer2 = dataOut.data_cspc
2610 2611 self.buffer3 = dataOut.noise
2611 2612 self.fint = 1
2612 2613 if numpy.any(jspc) :
2613 2614 jspc= numpy.reshape(jspc,(int(len(jspc)/nChannels),nChannels,nProf,nHeights))
2614 2615 jcspc= numpy.reshape(jcspc,(int(len(jcspc)/int(nChannels/2)),int(nChannels/2),nProf,nHeights))
2615 2616 jnoise= numpy.reshape(jnoise,(int(len(jnoise)/nChannels),nChannels))
2616 2617 else:
2617 2618 dataOut.flagNoData = True
2618 2619 return dataOut
2619 2620 else :
2620 2621 if (tini.tm_min % taver) == 0 : self.fint = 1
2621 2622 else : self.fint = 0
2622 2623 self.index += 1
2623 2624 if numpy.any(self.buffer):
2624 2625 self.buffer = numpy.concatenate((self.buffer,dataOut.data_spc), axis=0)
2625 2626 self.buffer2 = numpy.concatenate((self.buffer2,dataOut.data_cspc), axis=0)
2626 2627 self.buffer3 = numpy.concatenate((self.buffer3,dataOut.noise), axis=0)
2627 2628 else:
2628 2629 self.buffer = dataOut.data_spc
2629 2630 self.buffer2 = dataOut.data_cspc
2630 2631 self.buffer3 = dataOut.noise
2631 2632 dataOut.flagNoData = True
2632 2633 return dataOut
2633 2634 if path != None:
2634 2635 sys.path.append(path)
2635 2636 self.library = importlib.import_module(file)
2636 2637 if filec != None:
2637 2638 self.weightf = importlib.import_module(filec)
2638 2639
2639 2640 #To be inserted as a parameter
2640 2641 groupArray = numpy.array(groupList)
2641 2642 #groupArray = numpy.array([[0,1],[2,3]])
2642 2643 dataOut.groupList = groupArray
2643 2644 nGroups = groupArray.shape[0]
2644 2645 nChannels = dataOut.nChannels
2645 2646 nHeights = dataOut.heightList.size
2646 2647
2647 2648 #Parameters Array
2648 2649 dataOut.data_param = None
2649 2650 dataOut.data_paramC = None
2650 2651 dataOut.clean_num_aver = None
2651 2652 dataOut.coh_num_aver = None
2652 2653 dataOut.tmp_spectra_i = None
2653 2654 dataOut.tmp_cspectra_i = None
2654 2655 dataOut.tmp_spectra_c = None
2655 2656 dataOut.tmp_cspectra_c = None
2656 2657 dataOut.index = None
2657 2658
2658 2659 #Set constants
2659 2660 constants = self.library.setConstants(dataOut)
2660 2661 dataOut.constants = constants
2661 2662 M = dataOut.normFactor
2662 2663 N = dataOut.nFFTPoints
2663 2664 ippSeconds = dataOut.ippSeconds
2664 2665 K = dataOut.nIncohInt
2665 2666 pairsArray = numpy.array(dataOut.pairsList)
2666 2667 snrth= 20
2667 2668 spectra = dataOut.data_spc
2668 2669 cspectra = dataOut.data_cspc
2669 2670 nProf = dataOut.nProfiles
2670 2671 heights = dataOut.heightList
2671 2672 nHei = len(heights)
2672 2673 channels = dataOut.channelList
2673 2674 nChan = len(channels)
2674 2675 nIncohInt = dataOut.nIncohInt
2675 2676 crosspairs = dataOut.groupList
2676 2677 noise = dataOut.noise
2677 2678 jnoise = jnoise/N
2678 2679 noise = numpy.nansum(jnoise,axis=0)#/len(jnoise)
2679 2680 power = numpy.sum(spectra, axis=1)
2680 2681 nPairs = len(crosspairs)
2681 2682 absc = dataOut.abscissaList[:-1]
2682 2683
2683 2684 if not self.isConfig:
2684 2685 self.isConfig = True
2685 2686
2686 2687 index = tini.tm_hour*12+tini.tm_min/taver
2687 2688 dataOut.index= index
2688 2689 jspc = jspc/N/N
2689 2690 jcspc = jcspc/N/N
2690 2691 tmp_spectra,tmp_cspectra,sat_spectra,sat_cspectra = self.CleanRayleigh(dataOut,jspc,jcspc,2)
2691 2692 jspectra = tmp_spectra*len(jspc[:,0,0,0])
2692 2693 jcspectra = tmp_cspectra*len(jspc[:,0,0,0])
2693 2694 my_incoh_spectra ,my_incoh_cspectra,my_incoh_aver,my_coh_aver, incoh_spectra, coh_spectra, incoh_cspectra, coh_cspectra, incoh_aver, coh_aver = self.__DiffCoherent(jspectra, jcspectra, dataOut, noise, snrth,coh_th, hei_th)
2694 2695 clean_coh_spectra, clean_coh_cspectra, clean_coh_aver = self.__CleanCoherent(snrth, coh_spectra, coh_cspectra, coh_aver, dataOut, noise,1,index)
2695 2696 dataOut.data_spc = incoh_spectra
2696 2697 dataOut.data_cspc = incoh_cspectra
2697 2698 clean_num_aver = incoh_aver*len(jspc[:,0,0,0])
2698 2699 coh_num_aver = clean_coh_aver*len(jspc[:,0,0,0])
2699 2700 dataOut.clean_num_aver = clean_num_aver
2700 2701 dataOut.coh_num_aver = coh_num_aver
2701 2702 dataOut.tmp_spectra_i = incoh_spectra
2702 2703 dataOut.tmp_cspectra_i = incoh_cspectra
2703 2704 dataOut.tmp_spectra_c = clean_coh_spectra
2704 2705 dataOut.tmp_cspectra_c = clean_coh_cspectra
2705 2706 #List of possible combinations
2706 2707 listComb = itertools.combinations(numpy.arange(groupArray.shape[1]),2)
2707 2708 indCross = numpy.zeros(len(list(listComb)), dtype = 'int')
2708 2709 if getSNR:
2709 2710 listChannels = groupArray.reshape((groupArray.size))
2710 2711 listChannels.sort()
2711 2712 dataOut.data_SNR = self.__getSNR(dataOut.data_spc[listChannels,:,:], noise[listChannels])
2712 2713 else:
2713 2714 clean_num_aver = dataOut.clean_num_aver
2714 2715 coh_num_aver = dataOut.coh_num_aver
2715 2716 dataOut.data_spc = dataOut.tmp_spectra_i
2716 2717 dataOut.data_cspc = dataOut.tmp_cspectra_i
2717 2718 clean_coh_spectra = dataOut.tmp_spectra_c
2718 2719 clean_coh_cspectra = dataOut.tmp_cspectra_c
2719 2720 jspectra = dataOut.data_spc+clean_coh_spectra
2720 2721 nHeights = len(dataOut.heightList) # nhei
2721 2722 nProf = int(dataOut.nProfiles)
2722 2723 dataOut.nProfiles = nProf
2723 2724 dataOut.data_param = None
2724 2725 dataOut.data_paramC = None
2725 2726 dataOut.code = numpy.array([[-1.,-1.,1.],[1.,1.,-1.]])
2726 2727 #M=600
2727 2728 #N=200
2728 2729 dataOut.flagDecodeData=True
2729 2730 M = int(dataOut.normFactor)
2730 2731 N = int(dataOut.nFFTPoints)
2731 2732 dataOut.nFFTPoints = N
2732 2733 dataOut.nIncohInt= int(dataOut.nIncohInt)
2733 2734 dataOut.nProfiles = int(dataOut.nProfiles)
2734 2735 dataOut.nCohInt = int(dataOut.nCohInt)
2735 print('sale',dataOut.nProfiles,dataOut.nHeights)
2736 2736 #dataOut.nFFTPoints=nprofs
2737 2737 #dataOut.normFactor = nprofs
2738 2738 dataOut.channelList = channelList
2739 2739 #dataOut.ippFactor=1
2740 2740 #ipp = ipp/150*1.e-3
2741 2741 vmax = (300000000/49920000.0/2) / (dataOut.ippSeconds)
2742 2742 #dataOut.ippSeconds=ipp
2743 2743 absc = vmax*( numpy.arange(nProf,dtype='float')-nProf/2.)/nProf
2744 print('sale 2',dataOut.ippSeconds,M,N)
2745 print('Empieza procesamiento offline')
2746 2744 if path != None:
2747 2745 sys.path.append(path)
2748 2746 self.library = importlib.import_module(file)
2749 2747 constants = self.library.setConstants(dataOut)
2750 2748 constants['M'] = M
2751 2749 dataOut.constants = constants
2752 2750
2753 2751 groupArray = numpy.array(groupList)
2754 2752 dataOut.groupList = groupArray
2755 2753 nGroups = groupArray.shape[0]
2756 2754 #List of possible combinations
2757 2755 listComb = itertools.combinations(numpy.arange(groupArray.shape[1]),2)
2758 2756 indCross = numpy.zeros(len(list(listComb)), dtype = 'int')
2759 2757 if dataOut.data_paramC is None:
2760 2758 dataOut.data_paramC = numpy.zeros((nGroups*4, nHeights,2))*numpy.nan
2761 2759 for i in range(nGroups):
2762 2760 coord = groupArray[i,:]
2763 2761 #Input data array
2764 2762 data = dataOut.data_spc[coord,:,:]/(M*N)
2765 2763 data = data.reshape((data.shape[0]*data.shape[1],data.shape[2]))
2766 2764
2767 2765 #Cross Spectra data array for Covariance Matrixes
2768 2766 ind = 0
2769 2767 for pairs in listComb:
2770 2768 pairsSel = numpy.array([coord[x],coord[y]])
2771 2769 indCross[ind] = int(numpy.where(numpy.all(pairsArray == pairsSel, axis = 1))[0][0])
2772 2770 ind += 1
2773 2771 dataCross = dataOut.data_cspc[indCross,:,:]/(M*N)
2774 2772 dataCross = dataCross**2
2775 2773 nhei = nHeights
2776 2774 poweri = numpy.sum(dataOut.data_spc[:,1:nProf-0,:],axis=1)/clean_num_aver[:,:]
2777 2775 if i == 0 : my_noises = numpy.zeros(4,dtype=float)
2778 2776 n0i = numpy.nanmin(poweri[0+i*2,0:nhei-0])/(nProf-1)
2779 2777 n1i = numpy.nanmin(poweri[1+i*2,0:nhei-0])/(nProf-1)
2780 2778 n0 = n0i
2781 2779 n1= n1i
2782 2780 my_noises[2*i+0] = n0
2783 2781 my_noises[2*i+1] = n1
2784 2782 snrth = -15.0 # -4 -16 -25
2785 2783 snrth = 10**(snrth/10.0)
2786 2784 jvelr = numpy.zeros(nHeights, dtype = 'float')
2787 2785 hvalid = [0]
2788 2786 coh2 = abs(dataOut.data_cspc[i,1:nProf,:])**2/(dataOut.data_spc[0+i*2,1:nProf-0,:]*dataOut.data_spc[1+i*2,1:nProf-0,:])
2787
2789 2788 for h in range(nHeights):
2790 2789 smooth = clean_num_aver[i+1,h]
2791 2790 signalpn0 = (dataOut.data_spc[i*2,1:(nProf-0),h])/smooth
2792 2791 signalpn1 = (dataOut.data_spc[i*2+1,1:(nProf-0),h])/smooth
2793 2792 signal0 = signalpn0-n0
2794 2793 signal1 = signalpn1-n1
2795 2794 snr0 = numpy.sum(signal0/n0)/(nProf-1)
2796 2795 snr1 = numpy.sum(signal1/n1)/(nProf-1)
2797 2796 gamma = coh2[:,h]
2798 2797 indxs = (numpy.isfinite(list(gamma))==True).nonzero()
2799 2798 if len(indxs) >0:
2800 2799 if numpy.nanmean(gamma) > 0.07:
2801 2800 maxp0 = numpy.argmax(signal0*gamma)
2802 2801 maxp1 = numpy.argmax(signal1*gamma)
2803 2802 #print('usa gamma',numpy.nanmean(gamma))
2804 2803 else:
2805 2804 maxp0 = numpy.argmax(signal0)
2806 2805 maxp1 = numpy.argmax(signal1)
2807 2806 jvelr[h] = (absc[maxp0]+absc[maxp1])/2.
2808 2807 else: jvelr[h] = absc[0]
2809 2808 if snr0 > 0.1 and snr1 > 0.1: hvalid = numpy.concatenate((hvalid,h), axis=None)
2810 2809 #print(maxp0,absc[maxp0],snr0,jvelr[h])
2811 2810
2812 2811 if len(hvalid)> 1: fd0 = numpy.median(jvelr[hvalid[1:]])*-1
2813 2812 else: fd0 = numpy.nan
2814 2813 for h in range(nHeights):
2815 2814 d = data[:,h]
2816 2815 smooth = clean_num_aver[i+1,h] #dataOut.data_spc[:,1:nProf-0,:]
2817 2816 signalpn0 = (dataOut.data_spc[i*2,1:(nProf-0),h])/smooth
2818 2817 signalpn1 = (dataOut.data_spc[i*2+1,1:(nProf-0),h])/smooth
2819 2818 signal0 = signalpn0-n0
2820 2819 signal1 = signalpn1-n1
2821 2820 snr0 = numpy.sum(signal0/n0)/(nProf-1)
2822 2821 snr1 = numpy.sum(signal1/n1)/(nProf-1)
2823 2822 if snr0 > snrth and snr1 > snrth and clean_num_aver[i+1,h] > 0 :
2824 2823 #Covariance Matrix
2825 2824 D = numpy.diag(d**2)
2826 2825 ind = 0
2827 2826 for pairs in listComb:
2828 2827 #Coordinates in Covariance Matrix
2829 2828 x = pairs[0]
2830 2829 y = pairs[1]
2831 2830 #Channel Index
2832 2831 S12 = dataCross[ind,:,h]
2833 2832 D12 = numpy.diag(S12)
2834 2833 #Completing Covariance Matrix with Cross Spectras
2835 2834 D[x*N:(x+1)*N,y*N:(y+1)*N] = D12
2836 2835 D[y*N:(y+1)*N,x*N:(x+1)*N] = D12
2837 2836 ind += 1
2838 2837 diagD = numpy.zeros(256)
2839 2838
2840 2839 try:
2841 2840 Dinv=numpy.linalg.inv(D)
2842 2841 L=numpy.linalg.cholesky(Dinv)
2843 2842 except:
2844 2843 Dinv = D*numpy.nan
2845 2844 L= D*numpy.nan
2846 2845 LT=L.T
2847 2846
2848 2847 dp = numpy.dot(LT,d)
2849 2848 #Initial values
2850 2849 data_spc = dataOut.data_spc[coord,:,h]
2851 2850 w = data_spc/data_spc
2852 2851 if filec != None:
2853 2852 w = self.weightf.weightfit(w,tini.tm_year,tini.tm_yday,index,h,i)
2854 2853 if (h>6)and(error1[3]<25):
2855 p0 = dataOut.data_param[i,:,h-1]
2854 p0 = dataOut.data_param[i,:,h-1].copy()
2856 2855 else:
2857 2856 p0 = numpy.array(self.library.initialValuesFunction(data_spc*w, constants))# sin el i(data_spc, constants, i)
2858 #print("AQUI REEMPLAZAS CON EL fd0",fd0)
2859 2857 p0[3] = fd0
2858
2860 2859 if filec != None:
2861 2860 p0 = self.weightf.Vrfit(p0,tini.tm_year,tini.tm_yday,index,h,i)
2862 2861
2863 #print("minP0-ANTES DE OPTIMIZE")
2864 #print(p0)
2865 2862 try:
2866 2863 #Least Squares
2867 2864 minp,covp,infodict,mesg,ier = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants),full_output=True)
2868 print("VERIFICAR SI ESTA VARIANDO minp--------------------------------", minp[3])
2869 print("COMPARACION ---- p0[3],fd0",p0[3],fd0)
2870 2865 #minp,covp = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants))
2871 2866 #Chi square error
2872 2867 error0 = numpy.sum(infodict['fvec']**2)/(2*N)
2873 2868 #Error with Jacobian
2874 2869 error1 = self.library.errorFunction(minp,constants,LT)
2875 2870
2876 2871 except:
2877 2872 minp = p0*numpy.nan
2878 2873 error0 = numpy.nan
2879 2874 error1 = p0*numpy.nan
2880 2875 else :
2881 2876 data_spc = dataOut.data_spc[coord,:,h]
2882 2877 p0 = numpy.array(self.library.initialValuesFunction(data_spc, constants))
2883 2878 minp = p0*numpy.nan
2884 2879 error0 = numpy.nan
2885 2880 error1 = p0*numpy.nan
2886 2881 if dataOut.data_param is None:
2887 2882 dataOut.data_param = numpy.zeros((nGroups, p0.size, nHeights))*numpy.nan
2888 2883 dataOut.data_error = numpy.zeros((nGroups, p0.size + 1, nHeights))*numpy.nan
2889 print("CLASE SF minp- ?????",minp)
2890 2884 dataOut.data_error[i,:,h] = numpy.hstack((error0,error1))
2891 2885 dataOut.data_param[i,:,h] = minp
2886
2892 2887 for ht in range(nHeights-1) :
2893 2888 smooth = coh_num_aver[i+1,ht] #datc[0,ht,0,beam]
2894 2889 dataOut.data_paramC[4*i,ht,1] = smooth
2895 2890 signalpn0 = (clean_coh_spectra[i*2 ,1:(nProf-0),ht])/smooth #coh_spectra
2896 2891 signalpn1 = (clean_coh_spectra[i*2+1,1:(nProf-0),ht])/smooth
2897 2892 val0 = (signalpn0 > 0).nonzero()
2898 2893 val0 = val0[0]
2899 2894 if len(val0) == 0 : val0_npoints = nProf
2900 2895 else : val0_npoints = len(val0)
2901 2896
2902 2897 val1 = (signalpn1 > 0).nonzero()
2903 2898 val1 = val1[0]
2904 2899 if len(val1) == 0 : val1_npoints = nProf
2905 2900 else : val1_npoints = len(val1)
2906 2901
2907 2902 dataOut.data_paramC[0+4*i,ht,0] = numpy.sum((signalpn0/val0_npoints))/n0
2908 2903 dataOut.data_paramC[1+4*i,ht,0] = numpy.sum((signalpn1/val1_npoints))/n1
2909 2904
2910 2905 signal0 = (signalpn0-n0)
2911 2906 vali = (signal0 < 0).nonzero()
2912 2907 vali = vali[0]
2913 2908 if len(vali) > 0 : signal0[vali] = 0
2914 2909 signal1 = (signalpn1-n1)
2915 2910 vali = (signal1 < 0).nonzero()
2916 2911 vali = vali[0]
2917 2912 if len(vali) > 0 : signal1[vali] = 0
2918 2913 snr0 = numpy.sum(signal0/n0)/(nProf-1)
2919 2914 snr1 = numpy.sum(signal1/n1)/(nProf-1)
2920 2915 doppler = absc[1:]
2921 2916 if snr0 >= snrth and snr1 >= snrth and smooth :
2922 2917 signalpn0_n0 = signalpn0
2923 2918 signalpn0_n0[val0] = signalpn0[val0] - n0
2924 2919 mom0 = self.moments(doppler,signalpn0-n0,nProf)
2925 2920 signalpn1_n1 = signalpn1
2926 2921 signalpn1_n1[val1] = signalpn1[val1] - n1
2927 2922 mom1 = self.moments(doppler,signalpn1_n1,nProf)
2928 2923 dataOut.data_paramC[2+4*i,ht,0] = (mom0[0]+mom1[0])/2.
2929 2924 dataOut.data_paramC[3+4*i,ht,0] = (mom0[1]+mom1[1])/2.
2930
2931 2925 dataOut.data_spc = jspectra
2932 2926 dataOut.spc_noise = my_noises*nProf*M
2933 2927 if numpy.any(proc): dataOut.spc_noise = my_noises*nProf*M
2934 2928 if getSNR:
2935 2929 listChannels = groupArray.reshape((groupArray.size))
2936 2930 listChannels.sort()
2937 2931
2938 2932 dataOut.data_snr = self.__getSNR(dataOut.data_spc[listChannels,:,:], my_noises[listChannels])
2939 2933 return dataOut
2940 2934
2941 2935 def __residFunction(self, p, dp, LT, constants):
2942 2936
2943 2937 fm = self.library.modelFunction(p, constants)
2944 2938 fmp=numpy.dot(LT,fm)
2945 2939 return dp-fmp
2946 2940
2947 2941 def __getSNR(self, z, noise):
2948 2942
2949 2943 avg = numpy.average(z, axis=1)
2950 2944 SNR = (avg.T-noise)/noise
2951 2945 SNR = SNR.T
2952 2946 return SNR
2953 2947
2954 2948 def __chisq(self, p, chindex, hindex):
2955 2949 #similar to Resid but calculates CHI**2
2956 2950 [LT,d,fm]=setupLTdfm(p,chindex,hindex)
2957 2951 dp=numpy.dot(LT,d)
2958 2952 fmp=numpy.dot(LT,fm)
2959 2953 chisq=numpy.dot((dp-fmp).T,(dp-fmp))
2960 2954 return chisq
2961 2955
2962 2956 class WindProfiler(Operation):
2963 2957
2964 2958 __isConfig = False
2965 2959
2966 2960 __initime = None
2967 2961 __lastdatatime = None
2968 2962 __integrationtime = None
2969 2963
2970 2964 __buffer = None
2971 2965
2972 2966 __dataReady = False
2973 2967
2974 2968 __firstdata = None
2975 2969
2976 2970 n = None
2977 2971
2978 2972 def __init__(self):
2979 2973 Operation.__init__(self)
2980 2974
2981 2975 def __calculateCosDir(self, elev, azim):
2982 2976 zen = (90 - elev)*numpy.pi/180
2983 2977 azim = azim*numpy.pi/180
2984 2978 cosDirX = numpy.sqrt((1-numpy.cos(zen)**2)/((1+numpy.tan(azim)**2)))
2985 2979 cosDirY = numpy.sqrt(1-numpy.cos(zen)**2-cosDirX**2)
2986 2980
2987 2981 signX = numpy.sign(numpy.cos(azim))
2988 2982 signY = numpy.sign(numpy.sin(azim))
2989 2983
2990 2984 cosDirX = numpy.copysign(cosDirX, signX)
2991 2985 cosDirY = numpy.copysign(cosDirY, signY)
2992 2986 return cosDirX, cosDirY
2993 2987
2994 2988 def __calculateAngles(self, theta_x, theta_y, azimuth):
2995 2989
2996 2990 dir_cosw = numpy.sqrt(1-theta_x**2-theta_y**2)
2997 2991 zenith_arr = numpy.arccos(dir_cosw)
2998 2992 azimuth_arr = numpy.arctan2(theta_x,theta_y) + azimuth*math.pi/180
2999 2993
3000 2994 dir_cosu = numpy.sin(azimuth_arr)*numpy.sin(zenith_arr)
3001 2995 dir_cosv = numpy.cos(azimuth_arr)*numpy.sin(zenith_arr)
3002 2996
3003 2997 return azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw
3004 2998
3005 2999 def __calculateMatA(self, dir_cosu, dir_cosv, dir_cosw, horOnly):
3006 3000
3007 3001 if horOnly:
3008 3002 A = numpy.c_[dir_cosu,dir_cosv]
3009 3003 else:
3010 3004 A = numpy.c_[dir_cosu,dir_cosv,dir_cosw]
3011 3005 A = numpy.asmatrix(A)
3012 3006 A1 = numpy.linalg.inv(A.transpose()*A)*A.transpose()
3013 3007
3014 3008 return A1
3015 3009
3016 3010 def __correctValues(self, heiRang, phi, velRadial, SNR):
3017 3011 listPhi = phi.tolist()
3018 3012 maxid = listPhi.index(max(listPhi))
3019 3013 minid = listPhi.index(min(listPhi))
3020 3014
3021 3015 rango = list(range(len(phi)))
3022 3016
3023 3017 heiRang1 = heiRang*math.cos(phi[maxid])
3024 3018 heiRangAux = heiRang*math.cos(phi[minid])
3025 3019 indOut = (heiRang1 < heiRangAux[0]).nonzero()
3026 3020 heiRang1 = numpy.delete(heiRang1,indOut)
3027 3021
3028 3022 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
3029 3023 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
3030 3024
3031 3025 for i in rango:
3032 3026 x = heiRang*math.cos(phi[i])
3033 3027 y1 = velRadial[i,:]
3034 3028 f1 = interpolate.interp1d(x,y1,kind = 'cubic')
3035 3029
3036 3030 x1 = heiRang1
3037 3031 y11 = f1(x1)
3038 3032
3039 3033 y2 = SNR[i,:]
3040 3034 f2 = interpolate.interp1d(x,y2,kind = 'cubic')
3041 3035 y21 = f2(x1)
3042 3036
3043 3037 velRadial1[i,:] = y11
3044 3038 SNR1[i,:] = y21
3045 3039
3046 3040 return heiRang1, velRadial1, SNR1
3047 3041
3048 3042 def __calculateVelUVW(self, A, velRadial):
3049 3043
3050 3044 #Operacion Matricial
3051 3045 velUVW = numpy.zeros((A.shape[0],velRadial.shape[1]))
3052 3046 velUVW[:,:] = numpy.dot(A,velRadial)
3053 3047
3054 3048
3055 3049 return velUVW
3056 3050
3057 3051 def techniqueDBS(self, kwargs):
3058 3052 """
3059 3053 Function that implements Doppler Beam Swinging (DBS) technique.
3060 3054
3061 3055 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
3062 3056 Direction correction (if necessary), Ranges and SNR
3063 3057
3064 3058 Output: Winds estimation (Zonal, Meridional and Vertical)
3065 3059
3066 3060 Parameters affected: Winds, height range, SNR
3067 3061 """
3068 3062 velRadial0 = kwargs['velRadial']
3069 3063 heiRang = kwargs['heightList']
3070 3064 SNR0 = kwargs['SNR']
3071 3065
3072 3066 if 'dirCosx' in kwargs and 'dirCosy' in kwargs:
3073 3067 theta_x = numpy.array(kwargs['dirCosx'])
3074 3068 theta_y = numpy.array(kwargs['dirCosy'])
3075 3069 else:
3076 3070 elev = numpy.array(kwargs['elevation'])
3077 3071 azim = numpy.array(kwargs['azimuth'])
3078 3072 theta_x, theta_y = self.__calculateCosDir(elev, azim)
3079 3073 azimuth = kwargs['correctAzimuth']
3080 3074 if 'horizontalOnly' in kwargs:
3081 3075 horizontalOnly = kwargs['horizontalOnly']
3082 3076 else: horizontalOnly = False
3083 3077 if 'correctFactor' in kwargs:
3084 3078 correctFactor = kwargs['correctFactor']
3085 3079 else: correctFactor = 1
3086 3080 if 'channelList' in kwargs:
3087 3081 channelList = kwargs['channelList']
3088 3082 if len(channelList) == 2:
3089 3083 horizontalOnly = True
3090 3084 arrayChannel = numpy.array(channelList)
3091 3085 param = param[arrayChannel,:,:]
3092 3086 theta_x = theta_x[arrayChannel]
3093 3087 theta_y = theta_y[arrayChannel]
3094 3088
3095 3089 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
3096 3090 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, zenith_arr, correctFactor*velRadial0, SNR0)
3097 3091 A = self.__calculateMatA(dir_cosu, dir_cosv, dir_cosw, horizontalOnly)
3098 3092
3099 3093 #Calculo de Componentes de la velocidad con DBS
3100 3094 winds = self.__calculateVelUVW(A,velRadial1)
3101 3095
3102 3096 return winds, heiRang1, SNR1
3103 3097
3104 3098 def __calculateDistance(self, posx, posy, pairs_ccf, azimuth = None):
3105 3099
3106 3100 nPairs = len(pairs_ccf)
3107 3101 posx = numpy.asarray(posx)
3108 3102 posy = numpy.asarray(posy)
3109 3103
3110 3104 #Rotacion Inversa para alinear con el azimuth
3111 3105 if azimuth!= None:
3112 3106 azimuth = azimuth*math.pi/180
3113 3107 posx1 = posx*math.cos(azimuth) + posy*math.sin(azimuth)
3114 3108 posy1 = -posx*math.sin(azimuth) + posy*math.cos(azimuth)
3115 3109 else:
3116 3110 posx1 = posx
3117 3111 posy1 = posy
3118 3112
3119 3113 #Calculo de Distancias
3120 3114 distx = numpy.zeros(nPairs)
3121 3115 disty = numpy.zeros(nPairs)
3122 3116 dist = numpy.zeros(nPairs)
3123 3117 ang = numpy.zeros(nPairs)
3124 3118
3125 3119 for i in range(nPairs):
3126 3120 distx[i] = posx1[pairs_ccf[i][1]] - posx1[pairs_ccf[i][0]]
3127 3121 disty[i] = posy1[pairs_ccf[i][1]] - posy1[pairs_ccf[i][0]]
3128 3122 dist[i] = numpy.sqrt(distx[i]**2 + disty[i]**2)
3129 3123 ang[i] = numpy.arctan2(disty[i],distx[i])
3130 3124
3131 3125 return distx, disty, dist, ang
3132 3126 #Calculo de Matrices
3133 3127
3134 3128
3135 3129 def __calculateVelVer(self, phase, lagTRange, _lambda):
3136 3130
3137 3131 Ts = lagTRange[1] - lagTRange[0]
3138 3132 velW = -_lambda*phase/(4*math.pi*Ts)
3139 3133
3140 3134 return velW
3141 3135
3142 3136 def __calculateVelHorDir(self, dist, tau1, tau2, ang):
3143 3137 nPairs = tau1.shape[0]
3144 3138 nHeights = tau1.shape[1]
3145 3139 vel = numpy.zeros((nPairs,3,nHeights))
3146 3140 dist1 = numpy.reshape(dist, (dist.size,1))
3147 3141
3148 3142 angCos = numpy.cos(ang)
3149 3143 angSin = numpy.sin(ang)
3150 3144
3151 3145 vel0 = dist1*tau1/(2*tau2**2)
3152 3146 vel[:,0,:] = (vel0*angCos).sum(axis = 1)
3153 3147 vel[:,1,:] = (vel0*angSin).sum(axis = 1)
3154 3148
3155 3149 ind = numpy.where(numpy.isinf(vel))
3156 3150 vel[ind] = numpy.nan
3157 3151
3158 3152 return vel
3159 3153
3160 3154 def techniqueSA(self, kwargs):
3161 3155
3162 3156 """
3163 3157 Function that implements Spaced Antenna (SA) technique.
3164 3158
3165 3159 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
3166 3160 Direction correction (if necessary), Ranges and SNR
3167 3161
3168 3162 Output: Winds estimation (Zonal, Meridional and Vertical)
3169 3163
3170 3164 Parameters affected: Winds
3171 3165 """
3172 3166 position_x = kwargs['positionX']
3173 3167 position_y = kwargs['positionY']
3174 3168 azimuth = kwargs['azimuth']
3175 3169
3176 3170 if 'correctFactor' in kwargs:
3177 3171 correctFactor = kwargs['correctFactor']
3178 3172 else:
3179 3173 correctFactor = 1
3180 3174
3181 3175 groupList = kwargs['groupList']
3182 3176 pairs_ccf = groupList[1]
3183 3177 tau = kwargs['tau']
3184 3178 _lambda = kwargs['_lambda']
3185 3179
3186 3180 #Cross Correlation pairs obtained
3187 3181
3188 3182 indtau = tau.shape[0]/2
3189 3183 tau1 = tau[:indtau,:]
3190 3184 tau2 = tau[indtau:-1,:]
3191 3185 phase1 = tau[-1,:]
3192 3186
3193 3187 #---------------------------------------------------------------------
3194 3188 #Metodo Directo
3195 3189 distx, disty, dist, ang = self.__calculateDistance(position_x, position_y, pairs_ccf,azimuth)
3196 3190 winds = self.__calculateVelHorDir(dist, tau1, tau2, ang)
3197 3191 winds = stats.nanmean(winds, axis=0)
3198 3192 #---------------------------------------------------------------------
3199 3193 #Metodo General
3200 3194
3201 3195 #---------------------------------------------------------------------
3202 3196 winds[2,:] = self.__calculateVelVer(phase1, lagTRange, _lambda)
3203 3197 winds = correctFactor*winds
3204 3198 return winds
3205 3199
3206 3200 def __checkTime(self, currentTime, paramInterval, outputInterval):
3207 3201
3208 3202 dataTime = currentTime + paramInterval
3209 3203 deltaTime = dataTime - self.__initime
3210 3204
3211 3205 if deltaTime >= outputInterval or deltaTime < 0:
3212 3206 self.__dataReady = True
3213 3207 return
3214 3208
3215 3209 def techniqueMeteors(self, arrayMeteor, meteorThresh, heightMin, heightMax):
3216 3210 '''
3217 3211 Function that implements winds estimation technique with detected meteors.
3218 3212
3219 3213 Input: Detected meteors, Minimum meteor quantity to wind estimation
3220 3214
3221 3215 Output: Winds estimation (Zonal and Meridional)
3222 3216
3223 3217 Parameters affected: Winds
3224 3218 '''
3225 3219 #Settings
3226 3220 nInt = (heightMax - heightMin)/2
3227 3221 nInt = int(nInt)
3228 3222 winds = numpy.zeros((2,nInt))*numpy.nan
3229 3223
3230 3224 #Filter errors
3231 3225 error = numpy.where(arrayMeteor[:,-1] == 0)[0]
3232 3226 finalMeteor = arrayMeteor[error,:]
3233 3227
3234 3228 #Meteor Histogram
3235 3229 finalHeights = finalMeteor[:,2]
3236 3230 hist = numpy.histogram(finalHeights, bins = nInt, range = (heightMin,heightMax))
3237 3231 nMeteorsPerI = hist[0]
3238 3232 heightPerI = hist[1]
3239 3233
3240 3234 #Sort of meteors
3241 3235 indSort = finalHeights.argsort()
3242 3236 finalMeteor2 = finalMeteor[indSort,:]
3243 3237
3244 3238 # Calculating winds
3245 3239 ind1 = 0
3246 3240 ind2 = 0
3247 3241
3248 3242 for i in range(nInt):
3249 3243 nMet = nMeteorsPerI[i]
3250 3244 ind1 = ind2
3251 3245 ind2 = ind1 + nMet
3252 3246
3253 3247 meteorAux = finalMeteor2[ind1:ind2,:]
3254 3248
3255 3249 if meteorAux.shape[0] >= meteorThresh:
3256 3250 vel = meteorAux[:, 6]
3257 3251 zen = meteorAux[:, 4]*numpy.pi/180
3258 3252 azim = meteorAux[:, 3]*numpy.pi/180
3259 3253
3260 3254 n = numpy.cos(zen)
3261 3255 l = numpy.sin(zen)*numpy.sin(azim)
3262 3256 m = numpy.sin(zen)*numpy.cos(azim)
3263 3257
3264 3258 A = numpy.vstack((l, m)).transpose()
3265 3259 A1 = numpy.dot(numpy.linalg.inv( numpy.dot(A.transpose(),A) ),A.transpose())
3266 3260 windsAux = numpy.dot(A1, vel)
3267 3261
3268 3262 winds[0,i] = windsAux[0]
3269 3263 winds[1,i] = windsAux[1]
3270 3264
3271 3265 return winds, heightPerI[:-1]
3272 3266
3273 3267 def techniqueNSM_SA(self, **kwargs):
3274 3268 metArray = kwargs['metArray']
3275 3269 heightList = kwargs['heightList']
3276 3270 timeList = kwargs['timeList']
3277 3271
3278 3272 rx_location = kwargs['rx_location']
3279 3273 groupList = kwargs['groupList']
3280 3274 azimuth = kwargs['azimuth']
3281 3275 dfactor = kwargs['dfactor']
3282 3276 k = kwargs['k']
3283 3277
3284 3278 azimuth1, dist = self.__calculateAzimuth1(rx_location, groupList, azimuth)
3285 3279 d = dist*dfactor
3286 3280 #Phase calculation
3287 3281 metArray1 = self.__getPhaseSlope(metArray, heightList, timeList)
3288 3282
3289 3283 metArray1[:,-2] = metArray1[:,-2]*metArray1[:,2]*1000/(k*d[metArray1[:,1].astype(int)]) #angles into velocities
3290 3284
3291 3285 velEst = numpy.zeros((heightList.size,2))*numpy.nan
3292 3286 azimuth1 = azimuth1*numpy.pi/180
3293 3287
3294 3288 for i in range(heightList.size):
3295 3289 h = heightList[i]
3296 3290 indH = numpy.where((metArray1[:,2] == h)&(numpy.abs(metArray1[:,-2]) < 100))[0]
3297 3291 metHeight = metArray1[indH,:]
3298 3292 if metHeight.shape[0] >= 2:
3299 3293 velAux = numpy.asmatrix(metHeight[:,-2]).T #Radial Velocities
3300 3294 iazim = metHeight[:,1].astype(int)
3301 3295 azimAux = numpy.asmatrix(azimuth1[iazim]).T #Azimuths
3302 3296 A = numpy.hstack((numpy.cos(azimAux),numpy.sin(azimAux)))
3303 3297 A = numpy.asmatrix(A)
3304 3298 A1 = numpy.linalg.pinv(A.transpose()*A)*A.transpose()
3305 3299 velHor = numpy.dot(A1,velAux)
3306 3300
3307 3301 velEst[i,:] = numpy.squeeze(velHor)
3308 3302 return velEst
3309 3303
3310 3304 def __getPhaseSlope(self, metArray, heightList, timeList):
3311 3305 meteorList = []
3312 3306 #utctime sec1 height SNR velRad ph0 ph1 ph2 coh0 coh1 coh2
3313 3307 #Putting back together the meteor matrix
3314 3308 utctime = metArray[:,0]
3315 3309 uniqueTime = numpy.unique(utctime)
3316 3310
3317 3311 phaseDerThresh = 0.5
3318 3312 ippSeconds = timeList[1] - timeList[0]
3319 3313 sec = numpy.where(timeList>1)[0][0]
3320 3314 nPairs = metArray.shape[1] - 6
3321 3315 nHeights = len(heightList)
3322 3316
3323 3317 for t in uniqueTime:
3324 3318 metArray1 = metArray[utctime==t,:]
3325 3319 tmet = metArray1[:,1].astype(int)
3326 3320 hmet = metArray1[:,2].astype(int)
3327 3321
3328 3322 metPhase = numpy.zeros((nPairs, heightList.size, timeList.size - 1))
3329 3323 metPhase[:,:] = numpy.nan
3330 3324 metPhase[:,hmet,tmet] = metArray1[:,6:].T
3331 3325
3332 3326 #Delete short trails
3333 3327 metBool = ~numpy.isnan(metPhase[0,:,:])
3334 3328 heightVect = numpy.sum(metBool, axis = 1)
3335 3329 metBool[heightVect<sec,:] = False
3336 3330 metPhase[:,heightVect<sec,:] = numpy.nan
3337 3331
3338 3332 #Derivative
3339 3333 metDer = numpy.abs(metPhase[:,:,1:] - metPhase[:,:,:-1])
3340 3334 phDerAux = numpy.dstack((numpy.full((nPairs,nHeights,1), False, dtype=bool),metDer > phaseDerThresh))
3341 3335 metPhase[phDerAux] = numpy.nan
3342 3336
3343 3337 #--------------------------METEOR DETECTION -----------------------------------------
3344 3338 indMet = numpy.where(numpy.any(metBool,axis=1))[0]
3345 3339
3346 3340 for p in numpy.arange(nPairs):
3347 3341 phase = metPhase[p,:,:]
3348 3342 phDer = metDer[p,:,:]
3349 3343
3350 3344 for h in indMet:
3351 3345 height = heightList[h]
3352 3346 phase1 = phase[h,:] #82
3353 3347 phDer1 = phDer[h,:]
3354 3348
3355 3349 phase1[~numpy.isnan(phase1)] = numpy.unwrap(phase1[~numpy.isnan(phase1)]) #Unwrap
3356 3350
3357 3351 indValid = numpy.where(~numpy.isnan(phase1))[0]
3358 3352 initMet = indValid[0]
3359 3353 endMet = 0
3360 3354
3361 3355 for i in range(len(indValid)-1):
3362 3356
3363 3357 #Time difference
3364 3358 inow = indValid[i]
3365 3359 inext = indValid[i+1]
3366 3360 idiff = inext - inow
3367 3361 #Phase difference
3368 3362 phDiff = numpy.abs(phase1[inext] - phase1[inow])
3369 3363
3370 3364 if idiff>sec or phDiff>numpy.pi/4 or inext==indValid[-1]: #End of Meteor
3371 3365 sizeTrail = inow - initMet + 1
3372 3366 if sizeTrail>3*sec: #Too short meteors
3373 3367 x = numpy.arange(initMet,inow+1)*ippSeconds
3374 3368 y = phase1[initMet:inow+1]
3375 3369 ynnan = ~numpy.isnan(y)
3376 3370 x = x[ynnan]
3377 3371 y = y[ynnan]
3378 3372 slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
3379 3373 ylin = x*slope + intercept
3380 3374 rsq = r_value**2
3381 3375 if rsq > 0.5:
3382 3376 vel = slope#*height*1000/(k*d)
3383 3377 estAux = numpy.array([utctime,p,height, vel, rsq])
3384 3378 meteorList.append(estAux)
3385 3379 initMet = inext
3386 3380 metArray2 = numpy.array(meteorList)
3387 3381
3388 3382 return metArray2
3389 3383
3390 3384 def __calculateAzimuth1(self, rx_location, pairslist, azimuth0):
3391 3385
3392 3386 azimuth1 = numpy.zeros(len(pairslist))
3393 3387 dist = numpy.zeros(len(pairslist))
3394 3388
3395 3389 for i in range(len(rx_location)):
3396 3390 ch0 = pairslist[i][0]
3397 3391 ch1 = pairslist[i][1]
3398 3392
3399 3393 diffX = rx_location[ch0][0] - rx_location[ch1][0]
3400 3394 diffY = rx_location[ch0][1] - rx_location[ch1][1]
3401 3395 azimuth1[i] = numpy.arctan2(diffY,diffX)*180/numpy.pi
3402 3396 dist[i] = numpy.sqrt(diffX**2 + diffY**2)
3403 3397
3404 3398 azimuth1 -= azimuth0
3405 3399 return azimuth1, dist
3406 3400
3407 3401 def techniqueNSM_DBS(self, **kwargs):
3408 3402 metArray = kwargs['metArray']
3409 3403 heightList = kwargs['heightList']
3410 3404 timeList = kwargs['timeList']
3411 3405 azimuth = kwargs['azimuth']
3412 3406 theta_x = numpy.array(kwargs['theta_x'])
3413 3407 theta_y = numpy.array(kwargs['theta_y'])
3414 3408
3415 3409 utctime = metArray[:,0]
3416 3410 cmet = metArray[:,1].astype(int)
3417 3411 hmet = metArray[:,3].astype(int)
3418 3412 SNRmet = metArray[:,4]
3419 3413 vmet = metArray[:,5]
3420 3414 spcmet = metArray[:,6]
3421 3415
3422 3416 nChan = numpy.max(cmet) + 1
3423 3417 nHeights = len(heightList)
3424 3418
3425 3419 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
3426 3420 hmet = heightList[hmet]
3427 3421 h1met = hmet*numpy.cos(zenith_arr[cmet]) #Corrected heights
3428 3422
3429 3423 velEst = numpy.zeros((heightList.size,2))*numpy.nan
3430 3424
3431 3425 for i in range(nHeights - 1):
3432 3426 hmin = heightList[i]
3433 3427 hmax = heightList[i + 1]
3434 3428
3435 3429 thisH = (h1met>=hmin) & (h1met<hmax) & (cmet!=2) & (SNRmet>8) & (vmet<50) & (spcmet<10)
3436 3430 indthisH = numpy.where(thisH)
3437 3431
3438 3432 if numpy.size(indthisH) > 3:
3439 3433
3440 3434 vel_aux = vmet[thisH]
3441 3435 chan_aux = cmet[thisH]
3442 3436 cosu_aux = dir_cosu[chan_aux]
3443 3437 cosv_aux = dir_cosv[chan_aux]
3444 3438 cosw_aux = dir_cosw[chan_aux]
3445 3439
3446 3440 nch = numpy.size(numpy.unique(chan_aux))
3447 3441 if nch > 1:
3448 3442 A = self.__calculateMatA(cosu_aux, cosv_aux, cosw_aux, True)
3449 3443 velEst[i,:] = numpy.dot(A,vel_aux)
3450 3444
3451 3445 return velEst
3452 3446
3453 3447 def run(self, dataOut, technique, nHours=1, hmin=70, hmax=110, **kwargs):
3454 3448
3455 3449 param = dataOut.moments
3456 3450 if numpy.any(dataOut.abscissaList):
3457 3451 absc = dataOut.abscissaList[:-1]
3458 3452 # noise = dataOut.noise
3459 3453 heightList = dataOut.heightList
3460 3454 SNR = dataOut.data_snr
3461 3455
3462 3456 if technique == 'DBS':
3463 3457
3464 3458 kwargs['velRadial'] = param[:,1,:] #Radial velocity
3465 3459 kwargs['heightList'] = heightList
3466 3460 kwargs['SNR'] = SNR
3467 3461
3468 3462 dataOut.data_output, dataOut.heightList, dataOut.data_snr = self.techniqueDBS(kwargs) #DBS Function
3469 3463 dataOut.utctimeInit = dataOut.utctime
3470 3464 dataOut.outputInterval = dataOut.paramInterval
3471 3465
3472 3466 elif technique == 'SA':
3473 3467
3474 3468 #Parameters
3475 3469 kwargs['groupList'] = dataOut.groupList
3476 3470 kwargs['tau'] = dataOut.data_param
3477 3471 kwargs['_lambda'] = dataOut.C/dataOut.frequency
3478 3472 dataOut.data_output = self.techniqueSA(kwargs)
3479 3473 dataOut.utctimeInit = dataOut.utctime
3480 3474 dataOut.outputInterval = dataOut.timeInterval
3481 3475
3482 3476 elif technique == 'Meteors':
3483 3477 dataOut.flagNoData = True
3484 3478 self.__dataReady = False
3485 3479
3486 3480 if 'nHours' in kwargs:
3487 3481 nHours = kwargs['nHours']
3488 3482 else:
3489 3483 nHours = 1
3490 3484
3491 3485 if 'meteorsPerBin' in kwargs:
3492 3486 meteorThresh = kwargs['meteorsPerBin']
3493 3487 else:
3494 3488 meteorThresh = 6
3495 3489
3496 3490 if 'hmin' in kwargs:
3497 3491 hmin = kwargs['hmin']
3498 3492 else: hmin = 70
3499 3493 if 'hmax' in kwargs:
3500 3494 hmax = kwargs['hmax']
3501 3495 else: hmax = 110
3502 3496
3503 3497 dataOut.outputInterval = nHours*3600
3504 3498
3505 3499 if self.__isConfig == False:
3506 3500 #Get Initial LTC time
3507 3501 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
3508 3502 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
3509 3503
3510 3504 self.__isConfig = True
3511 3505
3512 3506 if self.__buffer is None:
3513 3507 self.__buffer = dataOut.data_param
3514 3508 self.__firstdata = copy.copy(dataOut)
3515 3509
3516 3510 else:
3517 3511 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
3518 3512
3519 3513 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
3520 3514
3521 3515 if self.__dataReady:
3522 3516 dataOut.utctimeInit = self.__initime
3523 3517
3524 3518 self.__initime += dataOut.outputInterval #to erase time offset
3525 3519
3526 3520 dataOut.data_output, dataOut.heightList = self.techniqueMeteors(self.__buffer, meteorThresh, hmin, hmax)
3527 3521 dataOut.flagNoData = False
3528 3522 self.__buffer = None
3529 3523
3530 3524 elif technique == 'Meteors1':
3531 3525 dataOut.flagNoData = True
3532 3526 self.__dataReady = False
3533 3527
3534 3528 if 'nMins' in kwargs:
3535 3529 nMins = kwargs['nMins']
3536 3530 else: nMins = 20
3537 3531 if 'rx_location' in kwargs:
3538 3532 rx_location = kwargs['rx_location']
3539 3533 else: rx_location = [(0,1),(1,1),(1,0)]
3540 3534 if 'azimuth' in kwargs:
3541 3535 azimuth = kwargs['azimuth']
3542 3536 else: azimuth = 51.06
3543 3537 if 'dfactor' in kwargs:
3544 3538 dfactor = kwargs['dfactor']
3545 3539 if 'mode' in kwargs:
3546 3540 mode = kwargs['mode']
3547 3541 if 'theta_x' in kwargs:
3548 3542 theta_x = kwargs['theta_x']
3549 3543 if 'theta_y' in kwargs:
3550 3544 theta_y = kwargs['theta_y']
3551 3545 else: mode = 'SA'
3552 3546
3553 3547 #Borrar luego esto
3554 3548 if dataOut.groupList is None:
3555 3549 dataOut.groupList = [(0,1),(0,2),(1,2)]
3556 3550 groupList = dataOut.groupList
3557 3551 C = 3e8
3558 3552 freq = 50e6
3559 3553 lamb = C/freq
3560 3554 k = 2*numpy.pi/lamb
3561 3555
3562 3556 timeList = dataOut.abscissaList
3563 3557 heightList = dataOut.heightList
3564 3558
3565 3559 if self.__isConfig == False:
3566 3560 dataOut.outputInterval = nMins*60
3567 3561 #Get Initial LTC time
3568 3562 initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
3569 3563 minuteAux = initime.minute
3570 3564 minuteNew = int(numpy.floor(minuteAux/nMins)*nMins)
3571 3565 self.__initime = (initime.replace(minute = minuteNew, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
3572 3566
3573 3567 self.__isConfig = True
3574 3568
3575 3569 if self.__buffer is None:
3576 3570 self.__buffer = dataOut.data_param
3577 3571 self.__firstdata = copy.copy(dataOut)
3578 3572
3579 3573 else:
3580 3574 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
3581 3575
3582 3576 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
3583 3577
3584 3578 if self.__dataReady:
3585 3579 dataOut.utctimeInit = self.__initime
3586 3580 self.__initime += dataOut.outputInterval #to erase time offset
3587 3581
3588 3582 metArray = self.__buffer
3589 3583 if mode == 'SA':
3590 3584 dataOut.data_output = self.techniqueNSM_SA(rx_location=rx_location, groupList=groupList, azimuth=azimuth, dfactor=dfactor, k=k,metArray=metArray, heightList=heightList,timeList=timeList)
3591 3585 elif mode == 'DBS':
3592 3586 dataOut.data_output = self.techniqueNSM_DBS(metArray=metArray,heightList=heightList,timeList=timeList, azimuth=azimuth, theta_x=theta_x, theta_y=theta_y)
3593 3587 dataOut.data_output = dataOut.data_output.T
3594 3588 dataOut.flagNoData = False
3595 3589 self.__buffer = None
3596 3590
3597 3591 return dataOut
3598 3592
3599 3593 class EWDriftsEstimation(Operation):
3600 3594
3601 3595 def __init__(self):
3602 3596 Operation.__init__(self)
3603 3597
3604 3598 def __correctValues(self, heiRang, phi, velRadial, SNR):
3605 3599 listPhi = phi.tolist()
3606 3600 maxid = listPhi.index(max(listPhi))
3607 3601 minid = listPhi.index(min(listPhi))
3608 3602
3609 3603 rango = list(range(len(phi)))
3610 3604 heiRang1 = heiRang*math.cos(phi[maxid])
3611 3605 heiRangAux = heiRang*math.cos(phi[minid])
3612 3606 indOut = (heiRang1 < heiRangAux[0]).nonzero()
3613 3607 heiRang1 = numpy.delete(heiRang1,indOut)
3614 3608
3615 3609 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
3616 3610 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
3617 3611
3618 3612 for i in rango:
3619 3613 x = heiRang*math.cos(phi[i])
3620 3614 y1 = velRadial[i,:]
3621 3615 vali= (numpy.isfinite(y1)==True).nonzero()
3622 3616 y1=y1[vali]
3623 3617 x = x[vali]
3624 3618 f1 = interpolate.interp1d(x,y1,kind = 'cubic',bounds_error=False)
3625 3619 x1 = heiRang1
3626 3620 y11 = f1(x1)
3627 3621 y2 = SNR[i,:]
3628 3622 x = heiRang*math.cos(phi[i])
3629 3623 vali= (y2 != -1).nonzero()
3630 3624 y2 = y2[vali]
3631 3625 x = x[vali]
3632 3626 f2 = interpolate.interp1d(x,y2,kind = 'cubic',bounds_error=False)
3633 3627 y21 = f2(x1)
3634 3628
3635 3629 velRadial1[i,:] = y11
3636 3630 SNR1[i,:] = y21
3637 3631
3638 3632 return heiRang1, velRadial1, SNR1
3639 3633
3640 3634 def run(self, dataOut, zenith, zenithCorrection,fileDrifts):
3641 3635
3642 3636 heiRang = dataOut.heightList
3643 3637 velRadial = dataOut.data_param[:,3,:]
3644 3638 velRadialm = dataOut.data_param[:,2:4,:]*-1
3645 3639 rbufc=dataOut.data_paramC[:,:,0]
3646 3640 ebufc=dataOut.data_paramC[:,:,1]
3647 3641 SNR = dataOut.data_snr
3648 3642 velRerr = dataOut.data_error[:,4,:]
3649 3643 channels = dataOut.channelList
3650 3644 nChan = len(channels)
3651 3645 my_nbeams = nChan/2
3652 3646 if my_nbeams == 2:
3653 3647 moments=numpy.vstack(([velRadialm[0,:]],[velRadialm[0,:]],[velRadialm[1,:]],[velRadialm[1,:]]))
3654 3648 else :
3655 3649 moments=numpy.vstack(([velRadialm[0,:]],[velRadialm[0,:]]))
3656 3650 dataOut.moments=moments
3657 print("CLASE EWD Estimation",moments)
3658 3651 # Coherent
3659 3652 smooth_wC = ebufc[0,:]
3660 3653 p_w0C = rbufc[0,:]
3661 3654 p_w1C = rbufc[1,:]
3662 3655 w_wC = rbufc[2,:]*-1 #*radial_sign(radial EQ 1)
3663 3656 t_wC = rbufc[3,:]
3664 3657 if my_nbeams == 1:
3665 3658 w = velRadial[0,:]
3666 3659 winds = velRadial.copy()
3667 3660 w_err = velRerr[0,:]
3668 3661 snr1 = 10*numpy.log10(SNR[0])
3669 3662 if my_nbeams == 2:
3670 3663 zenith = numpy.array(zenith)
3671 3664 zenith -= zenithCorrection
3672 3665 zenith *= numpy.pi/180
3673 3666 if zenithCorrection != 0 :
3674 3667 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, numpy.abs(zenith), velRadial, SNR)
3675 3668 else :
3676 3669 heiRang1 = heiRang
3677 3670 velRadial1 = velRadial
3678 3671 SNR1 = SNR
3679 3672
3680 3673 alp = zenith[0]
3681 3674 bet = zenith[1]
3682 3675
3683 3676 w_w = velRadial1[0,:]
3684 3677 w_e = velRadial1[1,:]
3685 3678 w_w_err = velRerr[0,:]
3686 3679 w_e_err = velRerr[1,:]
3687 3680
3688 3681 val = (numpy.isfinite(w_w)==False).nonzero()
3689 3682 val = val[0]
3690 3683 bad = val
3691 3684 if len(bad) > 0 :
3692 3685 w_w[bad] = w_wC[bad]
3693 3686 w_w_err[bad]= numpy.nan
3694 3687 smooth_eC=ebufc[4,:]
3695 3688 p_e0C = rbufc[4,:]
3696 3689 p_e1C = rbufc[5,:]
3697 3690 w_eC = rbufc[6,:]*-1
3698 3691 t_eC = rbufc[7,:]
3699 3692 val = (numpy.isfinite(w_e)==False).nonzero()
3700 3693 val = val[0]
3701 3694 bad = val
3702 3695 if len(bad) > 0 :
3703 3696 w_e[bad] = w_eC[bad]
3704 3697 w_e_err[bad]= numpy.nan
3705 3698
3706 3699 w = (w_w*numpy.sin(bet) - w_e*numpy.sin(alp))/(numpy.cos(alp)*numpy.sin(bet) - numpy.cos(bet)*numpy.sin(alp))
3707 3700 u = (w_w*numpy.cos(bet) - w_e*numpy.cos(alp))/(numpy.sin(alp)*numpy.cos(bet) - numpy.sin(bet)*numpy.cos(alp))
3708 3701
3709 3702 w_err = numpy.sqrt((w_w_err*numpy.sin(bet))**2.+(w_e_err*numpy.sin(alp))**2.)/ numpy.absolute(numpy.cos(alp)*numpy.sin(bet)-numpy.cos(bet)*numpy.sin(alp))
3710 3703 u_err = numpy.sqrt((w_w_err*numpy.cos(bet))**2.+(w_e_err*numpy.cos(alp))**2.)/ numpy.absolute(numpy.cos(alp)*numpy.sin(bet)-numpy.cos(bet)*numpy.sin(alp))
3711 3704
3712 3705 winds = numpy.vstack((w,u))
3713 3706
3714 3707 dataOut.heightList = heiRang1
3715 3708 snr1 = 10*numpy.log10(SNR1[0])
3716 3709 dataOut.data_output = winds
3717 3710 #snr1 = 10*numpy.log10(SNR1[0])
3718 3711 dataOut.data_snr1 = numpy.reshape(snr1,(1,snr1.shape[0]))
3719 3712 dataOut.utctimeInit = dataOut.utctime
3720 3713 dataOut.outputInterval = dataOut.timeInterval
3721 3714
3722 3715 hei_aver0 = 218
3723 3716 jrange = 450 #900 para HA drifts
3724 3717 deltah = 15.0 #dataOut.spacing(0) 25 HAD
3725 3718 h0 = 0.0 #dataOut.first_height(0)
3726 3719 heights = dataOut.heightList
3727 3720 nhei = len(heights)
3728 3721
3729 3722 range1 = numpy.arange(nhei) * deltah + h0
3730 3723 jhei = (range1 >= hei_aver0).nonzero()
3731 3724 if len(jhei[0]) > 0 :
3732 3725 h0_index = jhei[0][0] # Initial height for getting averages 218km
3733 3726
3734 3727 mynhei = 7
3735 3728 nhei_avg = int(jrange/deltah)
3736 3729 h_avgs = int(nhei_avg/mynhei)
3737 3730 nhei_avg = h_avgs*(mynhei-1)+mynhei
3738 3731
3739 3732 navgs = numpy.zeros(mynhei,dtype='float')
3740 3733 delta_h = numpy.zeros(mynhei,dtype='float')
3741 3734 range_aver = numpy.zeros(mynhei,dtype='float')
3742 3735 for ih in range( mynhei-1 ):
3743 3736 range_aver[ih] = numpy.sum(range1[h0_index+h_avgs*ih:h0_index+h_avgs*(ih+1)-0])/h_avgs
3744 3737 navgs[ih] = h_avgs
3745 3738 delta_h[ih] = deltah*h_avgs
3746 3739
3747 3740 range_aver[mynhei-1] = numpy.sum(range1[h0_index:h0_index+6*h_avgs-0])/(6*h_avgs)
3748 3741 navgs[mynhei-1] = 6*h_avgs
3749 3742 delta_h[mynhei-1] = deltah*6*h_avgs
3750 3743
3751 3744 wA = w[h0_index:h0_index+nhei_avg-0]
3752 3745 wA_err = w_err[h0_index:h0_index+nhei_avg-0]
3753 3746 for i in range(5) :
3754 3747 vals = wA[i*h_avgs:(i+1)*h_avgs-0]
3755 3748 errs = wA_err[i*h_avgs:(i+1)*h_avgs-0]
3756 3749 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2.)
3757 3750 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
3758 3751 wA[6*h_avgs+i] = avg
3759 3752 wA_err[6*h_avgs+i] = sigma
3760 3753
3761 3754
3762 3755 vals = wA[0:6*h_avgs-0]
3763 3756 errs=wA_err[0:6*h_avgs-0]
3764 3757 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2)
3765 3758 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
3766 3759 wA[nhei_avg-1] = avg
3767 3760 wA_err[nhei_avg-1] = sigma
3768 3761
3769 3762 wA = wA[6*h_avgs:nhei_avg-0]
3770 3763 wA_err=wA_err[6*h_avgs:nhei_avg-0]
3771 3764 if my_nbeams == 2 :
3772 3765 uA = u[h0_index:h0_index+nhei_avg]
3773 3766 uA_err=u_err[h0_index:h0_index+nhei_avg]
3774 3767
3775 3768 for i in range(5) :
3776 3769 vals = uA[i*h_avgs:(i+1)*h_avgs-0]
3777 3770 errs=uA_err[i*h_avgs:(i+1)*h_avgs-0]
3778 3771 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2.)
3779 3772 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
3780 3773 uA[6*h_avgs+i] = avg
3781 3774 uA_err[6*h_avgs+i]=sigma
3782 3775
3783 3776 vals = uA[0:6*h_avgs-0]
3784 3777 errs = uA_err[0:6*h_avgs-0]
3785 3778 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2.)
3786 3779 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
3787 3780 uA[nhei_avg-1] = avg
3788 3781 uA_err[nhei_avg-1] = sigma
3789 3782 uA = uA[6*h_avgs:nhei_avg-0]
3790 3783 uA_err = uA_err[6*h_avgs:nhei_avg-0]
3791 3784 dataOut.drifts_avg = numpy.vstack((wA,uA))
3792 3785 if my_nbeams == 1: dataOut.drifts_avg = wA
3793 3786 tini=time.localtime(dataOut.utctime)
3794 3787 datefile= str(tini[0]).zfill(4)+str(tini[1]).zfill(2)+str(tini[2]).zfill(2)
3795 3788 nfile = fileDrifts+'/jro'+datefile+'drifts.txt'
3796 3789 f1 = open(nfile,'a')
3797 3790 datedriftavg=str(tini[0])+' '+str(tini[1])+' '+str(tini[2])+' '+str(tini[3])+' '+str(tini[4])
3798 3791 driftavgstr=str(dataOut.drifts_avg)
3799 3792 numpy.savetxt(f1,numpy.column_stack([tini[0],tini[1],tini[2],tini[3],tini[4]]),fmt='%4i')
3800 3793 numpy.savetxt(f1,dataOut.drifts_avg,fmt='%10.2f')
3801 3794 f1.close()
3802 3795
3803 3796 return dataOut
3804 3797
3805 3798 #--------------- Non Specular Meteor ----------------
3806 3799
3807 3800 class NonSpecularMeteorDetection(Operation):
3808 3801
3809 3802 def run(self, dataOut, mode, SNRthresh=8, phaseDerThresh=0.5, cohThresh=0.8, allData = False):
3810 3803 data_acf = dataOut.data_pre[0]
3811 3804 data_ccf = dataOut.data_pre[1]
3812 3805 pairsList = dataOut.groupList[1]
3813 3806
3814 3807 lamb = dataOut.C/dataOut.frequency
3815 3808 tSamp = dataOut.ippSeconds*dataOut.nCohInt
3816 3809 paramInterval = dataOut.paramInterval
3817 3810
3818 3811 nChannels = data_acf.shape[0]
3819 3812 nLags = data_acf.shape[1]
3820 3813 nProfiles = data_acf.shape[2]
3821 3814 nHeights = dataOut.nHeights
3822 3815 nCohInt = dataOut.nCohInt
3823 3816 sec = numpy.round(nProfiles/dataOut.paramInterval)
3824 3817 heightList = dataOut.heightList
3825 3818 ippSeconds = dataOut.ippSeconds*dataOut.nCohInt*dataOut.nAvg
3826 3819 utctime = dataOut.utctime
3827 3820
3828 3821 dataOut.abscissaList = numpy.arange(0,paramInterval+ippSeconds,ippSeconds)
3829 3822
3830 3823 #------------------------ SNR --------------------------------------
3831 3824 power = data_acf[:,0,:,:].real
3832 3825 noise = numpy.zeros(nChannels)
3833 3826 SNR = numpy.zeros(power.shape)
3834 3827 for i in range(nChannels):
3835 3828 noise[i] = hildebrand_sekhon(power[i,:], nCohInt)
3836 3829 SNR[i] = (power[i]-noise[i])/noise[i]
3837 3830 SNRm = numpy.nanmean(SNR, axis = 0)
3838 3831 SNRdB = 10*numpy.log10(SNR)
3839 3832
3840 3833 if mode == 'SA':
3841 3834 dataOut.groupList = dataOut.groupList[1]
3842 3835 nPairs = data_ccf.shape[0]
3843 3836 #---------------------- Coherence and Phase --------------------------
3844 3837 phase = numpy.zeros(data_ccf[:,0,:,:].shape)
3845 3838 coh1 = numpy.zeros(data_ccf[:,0,:,:].shape)
3846 3839
3847 3840 for p in range(nPairs):
3848 3841 ch0 = pairsList[p][0]
3849 3842 ch1 = pairsList[p][1]
3850 3843 ccf = data_ccf[p,0,:,:]/numpy.sqrt(data_acf[ch0,0,:,:]*data_acf[ch1,0,:,:])
3851 3844 phase[p,:,:] = ndimage.median_filter(numpy.angle(ccf), size = (5,1)) #median filter
3852 3845 coh1[p,:,:] = ndimage.median_filter(numpy.abs(ccf), 5) #median filter
3853 3846 coh = numpy.nanmax(coh1, axis = 0)
3854 3847 #---------------------- Radial Velocity ----------------------------
3855 3848 phaseAux = numpy.mean(numpy.angle(data_acf[:,1,:,:]), axis = 0)
3856 3849 velRad = phaseAux*lamb/(4*numpy.pi*tSamp)
3857 3850
3858 3851 if allData:
3859 3852 boolMetFin = ~numpy.isnan(SNRm)
3860 3853 else:
3861 3854 #------------------------ Meteor mask ---------------------------------
3862 3855
3863 3856 #Coherence mask
3864 3857 boolMet1 = coh > 0.75
3865 3858 struc = numpy.ones((30,1))
3866 3859 boolMet1 = ndimage.morphology.binary_dilation(boolMet1, structure=struc)
3867 3860
3868 3861 #Derivative mask
3869 3862 derPhase = numpy.nanmean(numpy.abs(phase[:,1:,:] - phase[:,:-1,:]),axis=0)
3870 3863 boolMet2 = derPhase < 0.2
3871 3864 boolMet2 = ndimage.median_filter(boolMet2,size=5)
3872 3865 boolMet2 = numpy.vstack((boolMet2,numpy.full((1,nHeights), True, dtype=bool)))
3873 3866 boolMetFin = boolMet1&boolMet2
3874 3867 #Creating data_param
3875 3868 coordMet = numpy.where(boolMetFin)
3876 3869
3877 3870 tmet = coordMet[0]
3878 3871 hmet = coordMet[1]
3879 3872
3880 3873 data_param = numpy.zeros((tmet.size, 6 + nPairs))
3881 3874 data_param[:,0] = utctime
3882 3875 data_param[:,1] = tmet
3883 3876 data_param[:,2] = hmet
3884 3877 data_param[:,3] = SNRm[tmet,hmet]
3885 3878 data_param[:,4] = velRad[tmet,hmet]
3886 3879 data_param[:,5] = coh[tmet,hmet]
3887 3880 data_param[:,6:] = phase[:,tmet,hmet].T
3888 3881
3889 3882 elif mode == 'DBS':
3890 3883 dataOut.groupList = numpy.arange(nChannels)
3891 3884
3892 3885 #Radial Velocities
3893 3886 phase = numpy.angle(data_acf[:,1,:,:])
3894 3887 velRad = phase*lamb/(4*numpy.pi*tSamp)
3895 3888
3896 3889 #Spectral width
3897 3890 acf1 = data_acf[:,1,:,:]
3898 3891 acf2 = data_acf[:,2,:,:]
3899 3892
3900 3893 spcWidth = (lamb/(2*numpy.sqrt(6)*numpy.pi*tSamp))*numpy.sqrt(numpy.log(acf1/acf2))
3901 3894 if allData:
3902 3895 boolMetFin = ~numpy.isnan(SNRdB)
3903 3896 else:
3904 3897 #SNR
3905 3898 boolMet1 = (SNRdB>SNRthresh) #SNR mask
3906 3899 boolMet1 = ndimage.median_filter(boolMet1, size=(1,5,5))
3907 3900
3908 3901 #Radial velocity
3909 3902 boolMet2 = numpy.abs(velRad) < 20
3910 3903 boolMet2 = ndimage.median_filter(boolMet2, (1,5,5))
3911 3904
3912 3905 #Spectral Width
3913 3906 boolMet3 = spcWidth < 30
3914 3907 boolMet3 = ndimage.median_filter(boolMet3, (1,5,5))
3915 3908 boolMetFin = boolMet1&boolMet2&boolMet3
3916 3909
3917 3910 #Creating data_param
3918 3911 coordMet = numpy.where(boolMetFin)
3919 3912
3920 3913 cmet = coordMet[0]
3921 3914 tmet = coordMet[1]
3922 3915 hmet = coordMet[2]
3923 3916
3924 3917 data_param = numpy.zeros((tmet.size, 7))
3925 3918 data_param[:,0] = utctime
3926 3919 data_param[:,1] = cmet
3927 3920 data_param[:,2] = tmet
3928 3921 data_param[:,3] = hmet
3929 3922 data_param[:,4] = SNR[cmet,tmet,hmet].T
3930 3923 data_param[:,5] = velRad[cmet,tmet,hmet].T
3931 3924 data_param[:,6] = spcWidth[cmet,tmet,hmet].T
3932 3925
3933 3926 if len(data_param) == 0:
3934 3927 dataOut.flagNoData = True
3935 3928 else:
3936 3929 dataOut.data_param = data_param
3937 3930
3938 3931 def __erase_small(self, binArray, threshX, threshY):
3939 3932 labarray, numfeat = ndimage.measurements.label(binArray)
3940 3933 binArray1 = numpy.copy(binArray)
3941 3934
3942 3935 for i in range(1,numfeat + 1):
3943 3936 auxBin = (labarray==i)
3944 3937 auxSize = auxBin.sum()
3945 3938
3946 3939 x,y = numpy.where(auxBin)
3947 3940 widthX = x.max() - x.min()
3948 3941 widthY = y.max() - y.min()
3949 3942
3950 3943 #width X: 3 seg -> 12.5*3
3951 3944 #width Y:
3952 3945
3953 3946 if (auxSize < 50) or (widthX < threshX) or (widthY < threshY):
3954 3947 binArray1[auxBin] = False
3955 3948
3956 3949 return binArray1
3957 3950
3958 3951 #--------------- Specular Meteor ----------------
3959 3952
3960 3953 class SMDetection(Operation):
3961 3954 '''
3962 3955 Function DetectMeteors()
3963 3956 Project developed with paper:
3964 3957 HOLDSWORTH ET AL. 2004
3965 3958
3966 3959 Input:
3967 3960 self.dataOut.data_pre
3968 3961
3969 3962 centerReceiverIndex: From the channels, which is the center receiver
3970 3963
3971 3964 hei_ref: Height reference for the Beacon signal extraction
3972 3965 tauindex:
3973 3966 predefinedPhaseShifts: Predefined phase offset for the voltge signals
3974 3967
3975 3968 cohDetection: Whether to user Coherent detection or not
3976 3969 cohDet_timeStep: Coherent Detection calculation time step
3977 3970 cohDet_thresh: Coherent Detection phase threshold to correct phases
3978 3971
3979 3972 noise_timeStep: Noise calculation time step
3980 3973 noise_multiple: Noise multiple to define signal threshold
3981 3974
3982 3975 multDet_timeLimit: Multiple Detection Removal time limit in seconds
3983 3976 multDet_rangeLimit: Multiple Detection Removal range limit in km
3984 3977
3985 3978 phaseThresh: Maximum phase difference between receiver to be consider a meteor
3986 3979 SNRThresh: Minimum SNR threshold of the meteor signal to be consider a meteor
3987 3980
3988 3981 hmin: Minimum Height of the meteor to use it in the further wind estimations
3989 3982 hmax: Maximum Height of the meteor to use it in the further wind estimations
3990 3983 azimuth: Azimuth angle correction
3991 3984
3992 3985 Affected:
3993 3986 self.dataOut.data_param
3994 3987
3995 3988 Rejection Criteria (Errors):
3996 3989 0: No error; analysis OK
3997 3990 1: SNR < SNR threshold
3998 3991 2: angle of arrival (AOA) ambiguously determined
3999 3992 3: AOA estimate not feasible
4000 3993 4: Large difference in AOAs obtained from different antenna baselines
4001 3994 5: echo at start or end of time series
4002 3995 6: echo less than 5 examples long; too short for analysis
4003 3996 7: echo rise exceeds 0.3s
4004 3997 8: echo decay time less than twice rise time
4005 3998 9: large power level before echo
4006 3999 10: large power level after echo
4007 4000 11: poor fit to amplitude for estimation of decay time
4008 4001 12: poor fit to CCF phase variation for estimation of radial drift velocity
4009 4002 13: height unresolvable echo: not valid height within 70 to 110 km
4010 4003 14: height ambiguous echo: more then one possible height within 70 to 110 km
4011 4004 15: radial drift velocity or projected horizontal velocity exceeds 200 m/s
4012 4005 16: oscilatory echo, indicating event most likely not an underdense echo
4013 4006
4014 4007 17: phase difference in meteor Reestimation
4015 4008
4016 4009 Data Storage:
4017 4010 Meteors for Wind Estimation (8):
4018 4011 Utc Time | Range Height
4019 4012 Azimuth Zenith errorCosDir
4020 4013 VelRad errorVelRad
4021 4014 Phase0 Phase1 Phase2 Phase3
4022 4015 TypeError
4023 4016
4024 4017 '''
4025 4018
4026 4019 def run(self, dataOut, hei_ref = None, tauindex = 0,
4027 4020 phaseOffsets = None,
4028 4021 cohDetection = False, cohDet_timeStep = 1, cohDet_thresh = 25,
4029 4022 noise_timeStep = 4, noise_multiple = 4,
4030 4023 multDet_timeLimit = 1, multDet_rangeLimit = 3,
4031 4024 phaseThresh = 20, SNRThresh = 5,
4032 4025 hmin = 50, hmax=150, azimuth = 0,
4033 4026 channelPositions = None) :
4034 4027
4035 4028
4036 4029 #Getting Pairslist
4037 4030 if channelPositions is None:
4038 4031 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
4039 4032 meteorOps = SMOperations()
4040 4033 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
4041 4034 heiRang = dataOut.heightList
4042 4035 #Get Beacon signal - No Beacon signal anymore
4043 4036 #****************REMOVING HARDWARE PHASE DIFFERENCES***************
4044 4037 # see if the user put in pre defined phase shifts
4045 4038 voltsPShift = dataOut.data_pre.copy()
4046 4039
4047 4040 #******************END OF REMOVING HARDWARE PHASE DIFFERENCES*********
4048 4041
4049 4042 #Remove DC
4050 4043 voltsDC = numpy.mean(voltsPShift,1)
4051 4044 voltsDC = numpy.mean(voltsDC,1)
4052 4045 for i in range(voltsDC.shape[0]):
4053 4046 voltsPShift[i] = voltsPShift[i] - voltsDC[i]
4054 4047
4055 4048 #Don't considerate last heights, theyre used to calculate Hardware Phase Shift
4056 4049
4057 4050 #************ FIND POWER OF DATA W/COH OR NON COH DETECTION (3.4) **********
4058 4051 #Coherent Detection
4059 4052 if cohDetection:
4060 4053 #use coherent detection to get the net power
4061 4054 cohDet_thresh = cohDet_thresh*numpy.pi/180
4062 4055 voltsPShift = self.__coherentDetection(voltsPShift, cohDet_timeStep, dataOut.timeInterval, pairslist0, cohDet_thresh)
4063 4056
4064 4057 #Non-coherent detection!
4065 4058 powerNet = numpy.nansum(numpy.abs(voltsPShift[:,:,:])**2,0)
4066 4059 #********** END OF COH/NON-COH POWER CALCULATION**********************
4067 4060
4068 4061 #********** FIND THE NOISE LEVEL AND POSSIBLE METEORS ****************
4069 4062 #Get noise
4070 4063 noise, noise1 = self.__getNoise(powerNet, noise_timeStep, dataOut.timeInterval)
4071 4064 #Get signal threshold
4072 4065 signalThresh = noise_multiple*noise
4073 4066 #Meteor echoes detection
4074 4067 listMeteors = self.__findMeteors(powerNet, signalThresh)
4075 4068 #******* END OF NOISE LEVEL AND POSSIBLE METEORS CACULATION **********
4076 4069
4077 4070 #************** REMOVE MULTIPLE DETECTIONS (3.5) ***************************
4078 4071 #Parameters
4079 4072 heiRange = dataOut.heightList
4080 4073 rangeInterval = heiRange[1] - heiRange[0]
4081 4074 rangeLimit = multDet_rangeLimit/rangeInterval
4082 4075 timeLimit = multDet_timeLimit/dataOut.timeInterval
4083 4076 #Multiple detection removals
4084 4077 listMeteors1 = self.__removeMultipleDetections(listMeteors, rangeLimit, timeLimit)
4085 4078 #************ END OF REMOVE MULTIPLE DETECTIONS **********************
4086 4079
4087 4080 #********************* METEOR REESTIMATION (3.7, 3.8, 3.9, 3.10) ********************
4088 4081 #Parameters
4089 4082 phaseThresh = phaseThresh*numpy.pi/180
4090 4083 thresh = [phaseThresh, noise_multiple, SNRThresh]
4091 4084 #Meteor reestimation (Errors N 1, 6, 12, 17)
4092 4085 listMeteors2, listMeteorsPower, listMeteorsVolts = self.__meteorReestimation(listMeteors1, voltsPShift, pairslist0, thresh, noise, dataOut.timeInterval, dataOut.frequency)
4093 4086 #Estimation of decay times (Errors N 7, 8, 11)
4094 4087 listMeteors3 = self.__estimateDecayTime(listMeteors2, listMeteorsPower, dataOut.timeInterval, dataOut.frequency)
4095 4088 #******************* END OF METEOR REESTIMATION *******************
4096 4089
4097 4090 #********************* METEOR PARAMETERS CALCULATION (3.11, 3.12, 3.13) **************************
4098 4091 #Calculating Radial Velocity (Error N 15)
4099 4092 radialStdThresh = 10
4100 4093 listMeteors4 = self.__getRadialVelocity(listMeteors3, listMeteorsVolts, radialStdThresh, pairslist0, dataOut.timeInterval)
4101 4094
4102 4095 if len(listMeteors4) > 0:
4103 4096 #Setting New Array
4104 4097 date = dataOut.utctime
4105 4098 arrayParameters = self.__setNewArrays(listMeteors4, date, heiRang)
4106 4099
4107 4100 #Correcting phase offset
4108 4101 if phaseOffsets != None:
4109 4102 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
4110 4103 arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
4111 4104
4112 4105 #Second Pairslist
4113 4106 pairsList = []
4114 4107 pairx = (0,1)
4115 4108 pairy = (2,3)
4116 4109 pairsList.append(pairx)
4117 4110 pairsList.append(pairy)
4118 4111
4119 4112 jph = numpy.array([0,0,0,0])
4120 4113 h = (hmin,hmax)
4121 4114 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
4122 4115 dataOut.data_param = arrayParameters
4123 4116
4124 4117 if arrayParameters is None:
4125 4118 dataOut.flagNoData = True
4126 4119 else:
4127 4120 dataOut.flagNoData = True
4128 4121
4129 4122 return
4130 4123
4131 4124 def __getHardwarePhaseDiff(self, voltage0, pairslist, newheis, n):
4132 4125
4133 4126 minIndex = min(newheis[0])
4134 4127 maxIndex = max(newheis[0])
4135 4128
4136 4129 voltage = voltage0[:,:,minIndex:maxIndex+1]
4137 4130 nLength = voltage.shape[1]/n
4138 4131 nMin = 0
4139 4132 nMax = 0
4140 4133 phaseOffset = numpy.zeros((len(pairslist),n))
4141 4134
4142 4135 for i in range(n):
4143 4136 nMax += nLength
4144 4137 phaseCCF = -numpy.angle(self.__calculateCCF(voltage[:,nMin:nMax,:], pairslist, [0]))
4145 4138 phaseCCF = numpy.mean(phaseCCF, axis = 2)
4146 4139 phaseOffset[:,i] = phaseCCF.transpose()
4147 4140 nMin = nMax
4148 4141
4149 4142 #Remove Outliers
4150 4143 factor = 2
4151 4144 wt = phaseOffset - signal.medfilt(phaseOffset,(1,5))
4152 4145 dw = numpy.std(wt,axis = 1)
4153 4146 dw = dw.reshape((dw.size,1))
4154 4147 ind = numpy.where(numpy.logical_or(wt>dw*factor,wt<-dw*factor))
4155 4148 phaseOffset[ind] = numpy.nan
4156 4149 phaseOffset = stats.nanmean(phaseOffset, axis=1)
4157 4150
4158 4151 return phaseOffset
4159 4152
4160 4153 def __shiftPhase(self, data, phaseShift):
4161 4154 #this will shift the phase of a complex number
4162 4155 dataShifted = numpy.abs(data) * numpy.exp((numpy.angle(data)+phaseShift)*1j)
4163 4156 return dataShifted
4164 4157
4165 4158 def __estimatePhaseDifference(self, array, pairslist):
4166 4159 nChannel = array.shape[0]
4167 4160 nHeights = array.shape[2]
4168 4161 numPairs = len(pairslist)
4169 4162 phaseCCF = numpy.angle(self.__calculateCCF(array, pairslist, [-2,-1,0,1,2]))
4170 4163
4171 4164 #Correct phases
4172 4165 derPhaseCCF = phaseCCF[:,1:,:] - phaseCCF[:,0:-1,:]
4173 4166 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
4174 4167
4175 4168 if indDer[0].shape[0] > 0:
4176 4169 for i in range(indDer[0].shape[0]):
4177 4170 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i],indDer[2][i]])
4178 4171 phaseCCF[indDer[0][i],indDer[1][i]+1:,:] += signo*2*numpy.pi
4179 4172
4180 4173 #Linear
4181 4174 phaseInt = numpy.zeros((numPairs,1))
4182 4175 angAllCCF = phaseCCF[:,[0,1,3,4],0]
4183 4176 for j in range(numPairs):
4184 4177 fit = stats.linregress([-2,-1,1,2],angAllCCF[j,:])
4185 4178 phaseInt[j] = fit[1]
4186 4179 #Phase Differences
4187 4180 phaseDiff = phaseInt - phaseCCF[:,2,:]
4188 4181 phaseArrival = phaseInt.reshape(phaseInt.size)
4189 4182
4190 4183 #Dealias
4191 4184 phaseArrival = numpy.angle(numpy.exp(1j*phaseArrival))
4192 4185
4193 4186 return phaseDiff, phaseArrival
4194 4187
4195 4188 def __coherentDetection(self, volts, timeSegment, timeInterval, pairslist, thresh):
4196 4189 #this function will run the coherent detection used in Holdworth et al. 2004 and return the net power
4197 4190 #find the phase shifts of each channel over 1 second intervals
4198 4191 #only look at ranges below the beacon signal
4199 4192 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
4200 4193 numBlocks = int(volts.shape[1]/numProfPerBlock)
4201 4194 numHeights = volts.shape[2]
4202 4195 nChannel = volts.shape[0]
4203 4196 voltsCohDet = volts.copy()
4204 4197
4205 4198 pairsarray = numpy.array(pairslist)
4206 4199 indSides = pairsarray[:,1]
4207 4200 listBlocks = numpy.array_split(volts, numBlocks, 1)
4208 4201
4209 4202 startInd = 0
4210 4203 endInd = 0
4211 4204
4212 4205 for i in range(numBlocks):
4213 4206 startInd = endInd
4214 4207 endInd = endInd + listBlocks[i].shape[1]
4215 4208
4216 4209 arrayBlock = listBlocks[i]
4217 4210
4218 4211 #Estimate the Phase Difference
4219 4212 phaseDiff, aux = self.__estimatePhaseDifference(arrayBlock, pairslist)
4220 4213 #Phase Difference RMS
4221 4214 arrayPhaseRMS = numpy.abs(phaseDiff)
4222 4215 phaseRMSaux = numpy.sum(arrayPhaseRMS < thresh,0)
4223 4216 indPhase = numpy.where(phaseRMSaux==4)
4224 4217 #Shifting
4225 4218 if indPhase[0].shape[0] > 0:
4226 4219 for j in range(indSides.size):
4227 4220 arrayBlock[indSides[j],:,indPhase] = self.__shiftPhase(arrayBlock[indSides[j],:,indPhase], phaseDiff[j,indPhase].transpose())
4228 4221 voltsCohDet[:,startInd:endInd,:] = arrayBlock
4229 4222
4230 4223 return voltsCohDet
4231 4224
4232 4225 def __calculateCCF(self, volts, pairslist ,laglist):
4233 4226
4234 4227 nHeights = volts.shape[2]
4235 4228 nPoints = volts.shape[1]
4236 4229 voltsCCF = numpy.zeros((len(pairslist), len(laglist), nHeights),dtype = 'complex')
4237 4230
4238 4231 for i in range(len(pairslist)):
4239 4232 volts1 = volts[pairslist[i][0]]
4240 4233 volts2 = volts[pairslist[i][1]]
4241 4234
4242 4235 for t in range(len(laglist)):
4243 4236 idxT = laglist[t]
4244 4237 if idxT >= 0:
4245 4238 vStacked = numpy.vstack((volts2[idxT:,:],
4246 4239 numpy.zeros((idxT, nHeights),dtype='complex')))
4247 4240 else:
4248 4241 vStacked = numpy.vstack((numpy.zeros((-idxT, nHeights),dtype='complex'),
4249 4242 volts2[:(nPoints + idxT),:]))
4250 4243 voltsCCF[i,t,:] = numpy.sum((numpy.conjugate(volts1)*vStacked),axis=0)
4251 4244
4252 4245 vStacked = None
4253 4246 return voltsCCF
4254 4247
4255 4248 def __getNoise(self, power, timeSegment, timeInterval):
4256 4249 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
4257 4250 numBlocks = int(power.shape[0]/numProfPerBlock)
4258 4251 numHeights = power.shape[1]
4259 4252
4260 4253 listPower = numpy.array_split(power, numBlocks, 0)
4261 4254 noise = numpy.zeros((power.shape[0], power.shape[1]))
4262 4255 noise1 = numpy.zeros((power.shape[0], power.shape[1]))
4263 4256
4264 4257 startInd = 0
4265 4258 endInd = 0
4266 4259
4267 4260 for i in range(numBlocks): #split por canal
4268 4261 startInd = endInd
4269 4262 endInd = endInd + listPower[i].shape[0]
4270 4263
4271 4264 arrayBlock = listPower[i]
4272 4265 noiseAux = numpy.mean(arrayBlock, 0)
4273 4266 noise[startInd:endInd,:] = noise[startInd:endInd,:] + noiseAux
4274 4267
4275 4268 noiseAux1 = numpy.mean(arrayBlock)
4276 4269 noise1[startInd:endInd,:] = noise1[startInd:endInd,:] + noiseAux1
4277 4270
4278 4271 return noise, noise1
4279 4272
4280 4273 def __findMeteors(self, power, thresh):
4281 4274 nProf = power.shape[0]
4282 4275 nHeights = power.shape[1]
4283 4276 listMeteors = []
4284 4277
4285 4278 for i in range(nHeights):
4286 4279 powerAux = power[:,i]
4287 4280 threshAux = thresh[:,i]
4288 4281
4289 4282 indUPthresh = numpy.where(powerAux > threshAux)[0]
4290 4283 indDNthresh = numpy.where(powerAux <= threshAux)[0]
4291 4284
4292 4285 j = 0
4293 4286
4294 4287 while (j < indUPthresh.size - 2):
4295 4288 if (indUPthresh[j + 2] == indUPthresh[j] + 2):
4296 4289 indDNAux = numpy.where(indDNthresh > indUPthresh[j])
4297 4290 indDNthresh = indDNthresh[indDNAux]
4298 4291
4299 4292 if (indDNthresh.size > 0):
4300 4293 indEnd = indDNthresh[0] - 1
4301 4294 indInit = indUPthresh[j]
4302 4295
4303 4296 meteor = powerAux[indInit:indEnd + 1]
4304 4297 indPeak = meteor.argmax() + indInit
4305 4298 FLA = sum(numpy.conj(meteor)*numpy.hstack((meteor[1:],0)))
4306 4299
4307 4300 listMeteors.append(numpy.array([i,indInit,indPeak,indEnd,FLA])) #CHEQUEAR!!!!!
4308 4301 j = numpy.where(indUPthresh == indEnd)[0] + 1
4309 4302 else: j+=1
4310 4303 else: j+=1
4311 4304
4312 4305 return listMeteors
4313 4306
4314 4307 def __removeMultipleDetections(self,listMeteors, rangeLimit, timeLimit):
4315 4308
4316 4309 arrayMeteors = numpy.asarray(listMeteors)
4317 4310 listMeteors1 = []
4318 4311
4319 4312 while arrayMeteors.shape[0] > 0:
4320 4313 FLAs = arrayMeteors[:,4]
4321 4314 maxFLA = FLAs.argmax()
4322 4315 listMeteors1.append(arrayMeteors[maxFLA,:])
4323 4316
4324 4317 MeteorInitTime = arrayMeteors[maxFLA,1]
4325 4318 MeteorEndTime = arrayMeteors[maxFLA,3]
4326 4319 MeteorHeight = arrayMeteors[maxFLA,0]
4327 4320
4328 4321 #Check neighborhood
4329 4322 maxHeightIndex = MeteorHeight + rangeLimit
4330 4323 minHeightIndex = MeteorHeight - rangeLimit
4331 4324 minTimeIndex = MeteorInitTime - timeLimit
4332 4325 maxTimeIndex = MeteorEndTime + timeLimit
4333 4326
4334 4327 #Check Heights
4335 4328 indHeight = numpy.logical_and(arrayMeteors[:,0] >= minHeightIndex, arrayMeteors[:,0] <= maxHeightIndex)
4336 4329 indTime = numpy.logical_and(arrayMeteors[:,3] >= minTimeIndex, arrayMeteors[:,1] <= maxTimeIndex)
4337 4330 indBoth = numpy.where(numpy.logical_and(indTime,indHeight))
4338 4331
4339 4332 arrayMeteors = numpy.delete(arrayMeteors, indBoth, axis = 0)
4340 4333
4341 4334 return listMeteors1
4342 4335
4343 4336 def __meteorReestimation(self, listMeteors, volts, pairslist, thresh, noise, timeInterval,frequency):
4344 4337 numHeights = volts.shape[2]
4345 4338 nChannel = volts.shape[0]
4346 4339
4347 4340 thresholdPhase = thresh[0]
4348 4341 thresholdNoise = thresh[1]
4349 4342 thresholdDB = float(thresh[2])
4350 4343
4351 4344 thresholdDB1 = 10**(thresholdDB/10)
4352 4345 pairsarray = numpy.array(pairslist)
4353 4346 indSides = pairsarray[:,1]
4354 4347
4355 4348 pairslist1 = list(pairslist)
4356 4349 pairslist1.append((0,1))
4357 4350 pairslist1.append((3,4))
4358 4351
4359 4352 listMeteors1 = []
4360 4353 listPowerSeries = []
4361 4354 listVoltageSeries = []
4362 4355 #volts has the war data
4363 4356
4364 4357 if frequency == 30e6:
4365 4358 timeLag = 45*10**-3
4366 4359 else:
4367 4360 timeLag = 15*10**-3
4368 4361 lag = numpy.ceil(timeLag/timeInterval)
4369 4362
4370 4363 for i in range(len(listMeteors)):
4371 4364
4372 4365 ###################### 3.6 - 3.7 PARAMETERS REESTIMATION #########################
4373 4366 meteorAux = numpy.zeros(16)
4374 4367
4375 4368 #Loading meteor Data (mHeight, mStart, mPeak, mEnd)
4376 4369 mHeight = listMeteors[i][0]
4377 4370 mStart = listMeteors[i][1]
4378 4371 mPeak = listMeteors[i][2]
4379 4372 mEnd = listMeteors[i][3]
4380 4373
4381 4374 #get the volt data between the start and end times of the meteor
4382 4375 meteorVolts = volts[:,mStart:mEnd+1,mHeight]
4383 4376 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
4384 4377
4385 4378 #3.6. Phase Difference estimation
4386 4379 phaseDiff, aux = self.__estimatePhaseDifference(meteorVolts, pairslist)
4387 4380
4388 4381 #3.7. Phase difference removal & meteor start, peak and end times reestimated
4389 4382 #meteorVolts0.- all Channels, all Profiles
4390 4383 meteorVolts0 = volts[:,:,mHeight]
4391 4384 meteorThresh = noise[:,mHeight]*thresholdNoise
4392 4385 meteorNoise = noise[:,mHeight]
4393 4386 meteorVolts0[indSides,:] = self.__shiftPhase(meteorVolts0[indSides,:], phaseDiff) #Phase Shifting
4394 4387 powerNet0 = numpy.nansum(numpy.abs(meteorVolts0)**2, axis = 0) #Power
4395 4388
4396 4389 #Times reestimation
4397 4390 mStart1 = numpy.where(powerNet0[:mPeak] < meteorThresh[:mPeak])[0]
4398 4391 if mStart1.size > 0:
4399 4392 mStart1 = mStart1[-1] + 1
4400 4393
4401 4394 else:
4402 4395 mStart1 = mPeak
4403 4396
4404 4397 mEnd1 = numpy.where(powerNet0[mPeak:] < meteorThresh[mPeak:])[0][0] + mPeak - 1
4405 4398 mEndDecayTime1 = numpy.where(powerNet0[mPeak:] < meteorNoise[mPeak:])[0]
4406 4399 if mEndDecayTime1.size == 0:
4407 4400 mEndDecayTime1 = powerNet0.size
4408 4401 else:
4409 4402 mEndDecayTime1 = mEndDecayTime1[0] + mPeak - 1
4410 4403
4411 4404 #meteorVolts1.- all Channels, from start to end
4412 4405 meteorVolts1 = meteorVolts0[:,mStart1:mEnd1 + 1]
4413 4406 meteorVolts2 = meteorVolts0[:,mPeak + lag:mEnd1 + 1]
4414 4407 if meteorVolts2.shape[1] == 0:
4415 4408 meteorVolts2 = meteorVolts0[:,mPeak:mEnd1 + 1]
4416 4409 meteorVolts1 = meteorVolts1.reshape(meteorVolts1.shape[0], meteorVolts1.shape[1], 1)
4417 4410 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1], 1)
4418 4411 ##################### END PARAMETERS REESTIMATION #########################
4419 4412
4420 4413 ##################### 3.8 PHASE DIFFERENCE REESTIMATION ########################
4421 4414 if meteorVolts2.shape[1] > 0:
4422 4415 #Phase Difference re-estimation
4423 4416 phaseDiff1, phaseDiffint = self.__estimatePhaseDifference(meteorVolts2, pairslist1) #Phase Difference Estimation
4424 4417 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1])
4425 4418 phaseDiff11 = numpy.reshape(phaseDiff1, (phaseDiff1.shape[0],1))
4426 4419 meteorVolts2[indSides,:] = self.__shiftPhase(meteorVolts2[indSides,:], phaseDiff11[0:4]) #Phase Shifting
4427 4420
4428 4421 #Phase Difference RMS
4429 4422 phaseRMS1 = numpy.sqrt(numpy.mean(numpy.square(phaseDiff1)))
4430 4423 powerNet1 = numpy.nansum(numpy.abs(meteorVolts1[:,:])**2,0)
4431 4424 #Data from Meteor
4432 4425 mPeak1 = powerNet1.argmax() + mStart1
4433 4426 mPeakPower1 = powerNet1.max()
4434 4427 noiseAux = sum(noise[mStart1:mEnd1 + 1,mHeight])
4435 4428 mSNR1 = (sum(powerNet1)-noiseAux)/noiseAux
4436 4429 Meteor1 = numpy.array([mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1])
4437 4430 Meteor1 = numpy.hstack((Meteor1,phaseDiffint))
4438 4431 PowerSeries = powerNet0[mStart1:mEndDecayTime1 + 1]
4439 4432 #Vectorize
4440 4433 meteorAux[0:7] = [mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1]
4441 4434 meteorAux[7:11] = phaseDiffint[0:4]
4442 4435
4443 4436 #Rejection Criterions
4444 4437 if phaseRMS1 > thresholdPhase: #Error Number 17: Phase variation
4445 4438 meteorAux[-1] = 17
4446 4439 elif mSNR1 < thresholdDB1: #Error Number 1: SNR < threshold dB
4447 4440 meteorAux[-1] = 1
4448 4441
4449 4442
4450 4443 else:
4451 4444 meteorAux[0:4] = [mHeight, mStart, mPeak, mEnd]
4452 4445 meteorAux[-1] = 6 #Error Number 6: echo less than 5 samples long; too short for analysis
4453 4446 PowerSeries = 0
4454 4447
4455 4448 listMeteors1.append(meteorAux)
4456 4449 listPowerSeries.append(PowerSeries)
4457 4450 listVoltageSeries.append(meteorVolts1)
4458 4451
4459 4452 return listMeteors1, listPowerSeries, listVoltageSeries
4460 4453
4461 4454 def __estimateDecayTime(self, listMeteors, listPower, timeInterval, frequency):
4462 4455
4463 4456 threshError = 10
4464 4457 #Depending if it is 30 or 50 MHz
4465 4458 if frequency == 30e6:
4466 4459 timeLag = 45*10**-3
4467 4460 else:
4468 4461 timeLag = 15*10**-3
4469 4462 lag = numpy.ceil(timeLag/timeInterval)
4470 4463
4471 4464 listMeteors1 = []
4472 4465
4473 4466 for i in range(len(listMeteors)):
4474 4467 meteorPower = listPower[i]
4475 4468 meteorAux = listMeteors[i]
4476 4469
4477 4470 if meteorAux[-1] == 0:
4478 4471
4479 4472 try:
4480 4473 indmax = meteorPower.argmax()
4481 4474 indlag = indmax + lag
4482 4475
4483 4476 y = meteorPower[indlag:]
4484 4477 x = numpy.arange(0, y.size)*timeLag
4485 4478
4486 4479 #first guess
4487 4480 a = y[0]
4488 4481 tau = timeLag
4489 4482 #exponential fit
4490 4483 popt, pcov = optimize.curve_fit(self.__exponential_function, x, y, p0 = [a, tau])
4491 4484 y1 = self.__exponential_function(x, *popt)
4492 4485 #error estimation
4493 4486 error = sum((y - y1)**2)/(numpy.var(y)*(y.size - popt.size))
4494 4487
4495 4488 decayTime = popt[1]
4496 4489 riseTime = indmax*timeInterval
4497 4490 meteorAux[11:13] = [decayTime, error]
4498 4491
4499 4492 #Table items 7, 8 and 11
4500 4493 if (riseTime > 0.3): #Number 7: Echo rise exceeds 0.3s
4501 4494 meteorAux[-1] = 7
4502 4495 elif (decayTime < 2*riseTime) : #Number 8: Echo decay time less than than twice rise time
4503 4496 meteorAux[-1] = 8
4504 4497 if (error > threshError): #Number 11: Poor fit to amplitude for estimation of decay time
4505 4498 meteorAux[-1] = 11
4506 4499
4507 4500
4508 4501 except:
4509 4502 meteorAux[-1] = 11
4510 4503
4511 4504
4512 4505 listMeteors1.append(meteorAux)
4513 4506
4514 4507 return listMeteors1
4515 4508
4516 4509 #Exponential Function
4517 4510
4518 4511 def __exponential_function(self, x, a, tau):
4519 4512 y = a*numpy.exp(-x/tau)
4520 4513 return y
4521 4514
4522 4515 def __getRadialVelocity(self, listMeteors, listVolts, radialStdThresh, pairslist, timeInterval):
4523 4516
4524 4517 pairslist1 = list(pairslist)
4525 4518 pairslist1.append((0,1))
4526 4519 pairslist1.append((3,4))
4527 4520 numPairs = len(pairslist1)
4528 4521 #Time Lag
4529 4522 timeLag = 45*10**-3
4530 4523 c = 3e8
4531 4524 lag = numpy.ceil(timeLag/timeInterval)
4532 4525 freq = 30e6
4533 4526
4534 4527 listMeteors1 = []
4535 4528
4536 4529 for i in range(len(listMeteors)):
4537 4530 meteorAux = listMeteors[i]
4538 4531 if meteorAux[-1] == 0:
4539 4532 mStart = listMeteors[i][1]
4540 4533 mPeak = listMeteors[i][2]
4541 4534 mLag = mPeak - mStart + lag
4542 4535
4543 4536 #get the volt data between the start and end times of the meteor
4544 4537 meteorVolts = listVolts[i]
4545 4538 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
4546 4539
4547 4540 #Get CCF
4548 4541 allCCFs = self.__calculateCCF(meteorVolts, pairslist1, [-2,-1,0,1,2])
4549 4542
4550 4543 #Method 2
4551 4544 slopes = numpy.zeros(numPairs)
4552 4545 time = numpy.array([-2,-1,1,2])*timeInterval
4553 4546 angAllCCF = numpy.angle(allCCFs[:,[0,1,3,4],0])
4554 4547
4555 4548 #Correct phases
4556 4549 derPhaseCCF = angAllCCF[:,1:] - angAllCCF[:,0:-1]
4557 4550 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
4558 4551
4559 4552 if indDer[0].shape[0] > 0:
4560 4553 for i in range(indDer[0].shape[0]):
4561 4554 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i]])
4562 4555 angAllCCF[indDer[0][i],indDer[1][i]+1:] += signo*2*numpy.pi
4563 4556
4564 4557 for j in range(numPairs):
4565 4558 fit = stats.linregress(time, angAllCCF[j,:])
4566 4559 slopes[j] = fit[0]
4567 4560
4568 4561 #Remove Outlier
4569 4562 radialVelocity = -numpy.mean(slopes)*(0.25/numpy.pi)*(c/freq)
4570 4563 radialError = numpy.std(slopes)*(0.25/numpy.pi)*(c/freq)
4571 4564 meteorAux[-2] = radialError
4572 4565 meteorAux[-3] = radialVelocity
4573 4566
4574 4567 #Setting Error
4575 4568 #Number 15: Radial Drift velocity or projected horizontal velocity exceeds 200 m/s
4576 4569 if numpy.abs(radialVelocity) > 200:
4577 4570 meteorAux[-1] = 15
4578 4571 #Number 12: Poor fit to CCF variation for estimation of radial drift velocity
4579 4572 elif radialError > radialStdThresh:
4580 4573 meteorAux[-1] = 12
4581 4574
4582 4575 listMeteors1.append(meteorAux)
4583 4576 return listMeteors1
4584 4577
4585 4578 def __setNewArrays(self, listMeteors, date, heiRang):
4586 4579
4587 4580 #New arrays
4588 4581 arrayMeteors = numpy.array(listMeteors)
4589 4582 arrayParameters = numpy.zeros((len(listMeteors), 13))
4590 4583
4591 4584 #Date inclusion
4592 4585 arrayDate = numpy.tile(date, (len(listMeteors)))
4593 4586
4594 4587 #Meteor array
4595 4588 #Parameters Array
4596 4589 arrayParameters[:,0] = arrayDate #Date
4597 4590 arrayParameters[:,1] = heiRang[arrayMeteors[:,0].astype(int)] #Range
4598 4591 arrayParameters[:,6:8] = arrayMeteors[:,-3:-1] #Radial velocity and its error
4599 4592 arrayParameters[:,8:12] = arrayMeteors[:,7:11] #Phases
4600 4593 arrayParameters[:,-1] = arrayMeteors[:,-1] #Error
4601 4594
4602 4595
4603 4596 return arrayParameters
4604 4597
4605 4598 class CorrectSMPhases(Operation):
4606 4599
4607 4600 def run(self, dataOut, phaseOffsets, hmin = 50, hmax = 150, azimuth = 45, channelPositions = None):
4608 4601
4609 4602 arrayParameters = dataOut.data_param
4610 4603 pairsList = []
4611 4604 pairx = (0,1)
4612 4605 pairy = (2,3)
4613 4606 pairsList.append(pairx)
4614 4607 pairsList.append(pairy)
4615 4608 jph = numpy.zeros(4)
4616 4609
4617 4610 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
4618 4611 # arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
4619 4612 arrayParameters[:,8:12] = numpy.angle(numpy.exp(1j*(arrayParameters[:,8:12] + phaseOffsets)))
4620 4613
4621 4614 meteorOps = SMOperations()
4622 4615 if channelPositions is None:
4623 4616 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
4624 4617 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
4625 4618
4626 4619 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
4627 4620 h = (hmin,hmax)
4628 4621
4629 4622 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
4630 4623
4631 4624 dataOut.data_param = arrayParameters
4632 4625 return
4633 4626
4634 4627 class SMPhaseCalibration(Operation):
4635 4628
4636 4629 __buffer = None
4637 4630
4638 4631 __initime = None
4639 4632
4640 4633 __dataReady = False
4641 4634
4642 4635 __isConfig = False
4643 4636
4644 4637 def __checkTime(self, currentTime, initTime, paramInterval, outputInterval):
4645 4638
4646 4639 dataTime = currentTime + paramInterval
4647 4640 deltaTime = dataTime - initTime
4648 4641
4649 4642 if deltaTime >= outputInterval or deltaTime < 0:
4650 4643 return True
4651 4644
4652 4645 return False
4653 4646
4654 4647 def __getGammas(self, pairs, d, phases):
4655 4648 gammas = numpy.zeros(2)
4656 4649
4657 4650 for i in range(len(pairs)):
4658 4651
4659 4652 pairi = pairs[i]
4660 4653
4661 4654 phip3 = phases[:,pairi[0]]
4662 4655 d3 = d[pairi[0]]
4663 4656 phip2 = phases[:,pairi[1]]
4664 4657 d2 = d[pairi[1]]
4665 4658 #Calculating gamma
4666 4659 jgamma = -phip2*d3/d2 - phip3
4667 4660 jgamma = numpy.angle(numpy.exp(1j*jgamma))
4668 4661
4669 4662 #Revised distribution
4670 4663 jgammaArray = numpy.hstack((jgamma,jgamma+0.5*numpy.pi,jgamma-0.5*numpy.pi))
4671 4664
4672 4665 #Histogram
4673 4666 nBins = 64
4674 4667 rmin = -0.5*numpy.pi
4675 4668 rmax = 0.5*numpy.pi
4676 4669 phaseHisto = numpy.histogram(jgammaArray, bins=nBins, range=(rmin,rmax))
4677 4670
4678 4671 meteorsY = phaseHisto[0]
4679 4672 phasesX = phaseHisto[1][:-1]
4680 4673 width = phasesX[1] - phasesX[0]
4681 4674 phasesX += width/2
4682 4675
4683 4676 #Gaussian aproximation
4684 4677 bpeak = meteorsY.argmax()
4685 4678 peak = meteorsY.max()
4686 4679 jmin = bpeak - 5
4687 4680 jmax = bpeak + 5 + 1
4688 4681
4689 4682 if jmin<0:
4690 4683 jmin = 0
4691 4684 jmax = 6
4692 4685 elif jmax > meteorsY.size:
4693 4686 jmin = meteorsY.size - 6
4694 4687 jmax = meteorsY.size
4695 4688
4696 4689 x0 = numpy.array([peak,bpeak,50])
4697 4690 coeff = optimize.leastsq(self.__residualFunction, x0, args=(meteorsY[jmin:jmax], phasesX[jmin:jmax]))
4698 4691
4699 4692 #Gammas
4700 4693 gammas[i] = coeff[0][1]
4701 4694
4702 4695 return gammas
4703 4696
4704 4697 def __residualFunction(self, coeffs, y, t):
4705 4698
4706 4699 return y - self.__gauss_function(t, coeffs)
4707 4700
4708 4701 def __gauss_function(self, t, coeffs):
4709 4702
4710 4703 return coeffs[0]*numpy.exp(-0.5*((t - coeffs[1]) / coeffs[2])**2)
4711 4704
4712 4705 def __getPhases(self, azimuth, h, pairsList, d, gammas, meteorsArray):
4713 4706 meteorOps = SMOperations()
4714 4707 nchan = 4
4715 4708 pairx = pairsList[0] #x es 0
4716 4709 pairy = pairsList[1] #y es 1
4717 4710 center_xangle = 0
4718 4711 center_yangle = 0
4719 4712 range_angle = numpy.array([10*numpy.pi,numpy.pi,numpy.pi/2,numpy.pi/4])
4720 4713 ntimes = len(range_angle)
4721 4714
4722 4715 nstepsx = 20
4723 4716 nstepsy = 20
4724 4717
4725 4718 for iz in range(ntimes):
4726 4719 min_xangle = -range_angle[iz]/2 + center_xangle
4727 4720 max_xangle = range_angle[iz]/2 + center_xangle
4728 4721 min_yangle = -range_angle[iz]/2 + center_yangle
4729 4722 max_yangle = range_angle[iz]/2 + center_yangle
4730 4723
4731 4724 inc_x = (max_xangle-min_xangle)/nstepsx
4732 4725 inc_y = (max_yangle-min_yangle)/nstepsy
4733 4726
4734 4727 alpha_y = numpy.arange(nstepsy)*inc_y + min_yangle
4735 4728 alpha_x = numpy.arange(nstepsx)*inc_x + min_xangle
4736 4729 penalty = numpy.zeros((nstepsx,nstepsy))
4737 4730 jph_array = numpy.zeros((nchan,nstepsx,nstepsy))
4738 4731 jph = numpy.zeros(nchan)
4739 4732
4740 4733 # Iterations looking for the offset
4741 4734 for iy in range(int(nstepsy)):
4742 4735 for ix in range(int(nstepsx)):
4743 4736 d3 = d[pairsList[1][0]]
4744 4737 d2 = d[pairsList[1][1]]
4745 4738 d5 = d[pairsList[0][0]]
4746 4739 d4 = d[pairsList[0][1]]
4747 4740
4748 4741 alp2 = alpha_y[iy] #gamma 1
4749 4742 alp4 = alpha_x[ix] #gamma 0
4750 4743
4751 4744 alp3 = -alp2*d3/d2 - gammas[1]
4752 4745 alp5 = -alp4*d5/d4 - gammas[0]
4753 4746 jph[pairsList[0][1]] = alp4
4754 4747 jph[pairsList[0][0]] = alp5
4755 4748 jph[pairsList[1][0]] = alp3
4756 4749 jph[pairsList[1][1]] = alp2
4757 4750 jph_array[:,ix,iy] = jph
4758 4751 meteorsArray1 = meteorOps.getMeteorParams(meteorsArray, azimuth, h, pairsList, d, jph)
4759 4752 error = meteorsArray1[:,-1]
4760 4753 ind1 = numpy.where(error==0)[0]
4761 4754 penalty[ix,iy] = ind1.size
4762 4755
4763 4756 i,j = numpy.unravel_index(penalty.argmax(), penalty.shape)
4764 4757 phOffset = jph_array[:,i,j]
4765 4758
4766 4759 center_xangle = phOffset[pairx[1]]
4767 4760 center_yangle = phOffset[pairy[1]]
4768 4761
4769 4762 phOffset = numpy.angle(numpy.exp(1j*jph_array[:,i,j]))
4770 4763 phOffset = phOffset*180/numpy.pi
4771 4764 return phOffset
4772 4765
4773 4766
4774 4767 def run(self, dataOut, hmin, hmax, channelPositions=None, nHours = 1):
4775 4768
4776 4769 dataOut.flagNoData = True
4777 4770 self.__dataReady = False
4778 4771 dataOut.outputInterval = nHours*3600
4779 4772
4780 4773 if self.__isConfig == False:
4781 4774 #Get Initial LTC time
4782 4775 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
4783 4776 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
4784 4777
4785 4778 self.__isConfig = True
4786 4779
4787 4780 if self.__buffer is None:
4788 4781 self.__buffer = dataOut.data_param.copy()
4789 4782
4790 4783 else:
4791 4784 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
4792 4785
4793 4786 self.__dataReady = self.__checkTime(dataOut.utctime, self.__initime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
4794 4787
4795 4788 if self.__dataReady:
4796 4789 dataOut.utctimeInit = self.__initime
4797 4790 self.__initime += dataOut.outputInterval #to erase time offset
4798 4791
4799 4792 freq = dataOut.frequency
4800 4793 c = dataOut.C #m/s
4801 4794 lamb = c/freq
4802 4795 k = 2*numpy.pi/lamb
4803 4796 azimuth = 0
4804 4797 h = (hmin, hmax)
4805 4798
4806 4799 if channelPositions is None:
4807 4800 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
4808 4801 meteorOps = SMOperations()
4809 4802 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
4810 4803
4811 4804 #Checking correct order of pairs
4812 4805 pairs = []
4813 4806 if distances[1] > distances[0]:
4814 4807 pairs.append((1,0))
4815 4808 else:
4816 4809 pairs.append((0,1))
4817 4810
4818 4811 if distances[3] > distances[2]:
4819 4812 pairs.append((3,2))
4820 4813 else:
4821 4814 pairs.append((2,3))
4822 4815
4823 4816 meteorsArray = self.__buffer
4824 4817 error = meteorsArray[:,-1]
4825 4818 boolError = (error==0)|(error==3)|(error==4)|(error==13)|(error==14)
4826 4819 ind1 = numpy.where(boolError)[0]
4827 4820 meteorsArray = meteorsArray[ind1,:]
4828 4821 meteorsArray[:,-1] = 0
4829 4822 phases = meteorsArray[:,8:12]
4830 4823
4831 4824 #Calculate Gammas
4832 4825 gammas = self.__getGammas(pairs, distances, phases)
4833 4826 #Calculate Phases
4834 4827 phasesOff = self.__getPhases(azimuth, h, pairs, distances, gammas, meteorsArray)
4835 4828 phasesOff = phasesOff.reshape((1,phasesOff.size))
4836 4829 dataOut.data_output = -phasesOff
4837 4830 dataOut.flagNoData = False
4838 4831 self.__buffer = None
4839 4832
4840 4833
4841 4834 return
4842 4835
4843 4836 class SMOperations():
4844 4837
4845 4838 def __init__(self):
4846 4839
4847 4840 return
4848 4841
4849 4842 def getMeteorParams(self, arrayParameters0, azimuth, h, pairsList, distances, jph):
4850 4843
4851 4844 arrayParameters = arrayParameters0.copy()
4852 4845 hmin = h[0]
4853 4846 hmax = h[1]
4854 4847
4855 4848 #Calculate AOA (Error N 3, 4)
4856 4849 #JONES ET AL. 1998
4857 4850 AOAthresh = numpy.pi/8
4858 4851 error = arrayParameters[:,-1]
4859 4852 phases = -arrayParameters[:,8:12] + jph
4860 4853 arrayParameters[:,3:6], arrayParameters[:,-1] = self.__getAOA(phases, pairsList, distances, error, AOAthresh, azimuth)
4861 4854
4862 4855 #Calculate Heights (Error N 13 and 14)
4863 4856 error = arrayParameters[:,-1]
4864 4857 Ranges = arrayParameters[:,1]
4865 4858 zenith = arrayParameters[:,4]
4866 4859 arrayParameters[:,2], arrayParameters[:,-1] = self.__getHeights(Ranges, zenith, error, hmin, hmax)
4867 4860
4868 4861 #----------------------- Get Final data ------------------------------------
4869 4862
4870 4863 return arrayParameters
4871 4864
4872 4865 def __getAOA(self, phases, pairsList, directions, error, AOAthresh, azimuth):
4873 4866
4874 4867 arrayAOA = numpy.zeros((phases.shape[0],3))
4875 4868 cosdir0, cosdir = self.__getDirectionCosines(phases, pairsList,directions)
4876 4869
4877 4870 arrayAOA[:,:2] = self.__calculateAOA(cosdir, azimuth)
4878 4871 cosDirError = numpy.sum(numpy.abs(cosdir0 - cosdir), axis = 1)
4879 4872 arrayAOA[:,2] = cosDirError
4880 4873
4881 4874 azimuthAngle = arrayAOA[:,0]
4882 4875 zenithAngle = arrayAOA[:,1]
4883 4876
4884 4877 #Setting Error
4885 4878 indError = numpy.where(numpy.logical_or(error == 3, error == 4))[0]
4886 4879 error[indError] = 0
4887 4880 #Number 3: AOA not fesible
4888 4881 indInvalid = numpy.where(numpy.logical_and((numpy.logical_or(numpy.isnan(zenithAngle), numpy.isnan(azimuthAngle))),error == 0))[0]
4889 4882 error[indInvalid] = 3
4890 4883 #Number 4: Large difference in AOAs obtained from different antenna baselines
4891 4884 indInvalid = numpy.where(numpy.logical_and(cosDirError > AOAthresh,error == 0))[0]
4892 4885 error[indInvalid] = 4
4893 4886 return arrayAOA, error
4894 4887
4895 4888 def __getDirectionCosines(self, arrayPhase, pairsList, distances):
4896 4889
4897 4890 #Initializing some variables
4898 4891 ang_aux = numpy.array([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])*2*numpy.pi
4899 4892 ang_aux = ang_aux.reshape(1,ang_aux.size)
4900 4893
4901 4894 cosdir = numpy.zeros((arrayPhase.shape[0],2))
4902 4895 cosdir0 = numpy.zeros((arrayPhase.shape[0],2))
4903 4896
4904 4897
4905 4898 for i in range(2):
4906 4899 ph0 = arrayPhase[:,pairsList[i][0]]
4907 4900 ph1 = arrayPhase[:,pairsList[i][1]]
4908 4901 d0 = distances[pairsList[i][0]]
4909 4902 d1 = distances[pairsList[i][1]]
4910 4903
4911 4904 ph0_aux = ph0 + ph1
4912 4905 ph0_aux = numpy.angle(numpy.exp(1j*ph0_aux))
4913 4906 #First Estimation
4914 4907 cosdir0[:,i] = (ph0_aux)/(2*numpy.pi*(d0 - d1))
4915 4908
4916 4909 #Most-Accurate Second Estimation
4917 4910 phi1_aux = ph0 - ph1
4918 4911 phi1_aux = phi1_aux.reshape(phi1_aux.size,1)
4919 4912 #Direction Cosine 1
4920 4913 cosdir1 = (phi1_aux + ang_aux)/(2*numpy.pi*(d0 + d1))
4921 4914
4922 4915 #Searching the correct Direction Cosine
4923 4916 cosdir0_aux = cosdir0[:,i]
4924 4917 cosdir0_aux = cosdir0_aux.reshape(cosdir0_aux.size,1)
4925 4918 #Minimum Distance
4926 4919 cosDiff = (cosdir1 - cosdir0_aux)**2
4927 4920 indcos = cosDiff.argmin(axis = 1)
4928 4921 #Saving Value obtained
4929 4922 cosdir[:,i] = cosdir1[numpy.arange(len(indcos)),indcos]
4930 4923
4931 4924 return cosdir0, cosdir
4932 4925
4933 4926 def __calculateAOA(self, cosdir, azimuth):
4934 4927 cosdirX = cosdir[:,0]
4935 4928 cosdirY = cosdir[:,1]
4936 4929
4937 4930 zenithAngle = numpy.arccos(numpy.sqrt(1 - cosdirX**2 - cosdirY**2))*180/numpy.pi
4938 4931 azimuthAngle = numpy.arctan2(cosdirX,cosdirY)*180/numpy.pi + azimuth#0 deg north, 90 deg east
4939 4932 angles = numpy.vstack((azimuthAngle, zenithAngle)).transpose()
4940 4933
4941 4934 return angles
4942 4935
4943 4936 def __getHeights(self, Ranges, zenith, error, minHeight, maxHeight):
4944 4937
4945 4938 Ramb = 375 #Ramb = c/(2*PRF)
4946 4939 Re = 6371 #Earth Radius
4947 4940 heights = numpy.zeros(Ranges.shape)
4948 4941
4949 4942 R_aux = numpy.array([0,1,2])*Ramb
4950 4943 R_aux = R_aux.reshape(1,R_aux.size)
4951 4944
4952 4945 Ranges = Ranges.reshape(Ranges.size,1)
4953 4946
4954 4947 Ri = Ranges + R_aux
4955 4948 hi = numpy.sqrt(Re**2 + Ri**2 + (2*Re*numpy.cos(zenith*numpy.pi/180)*Ri.transpose()).transpose()) - Re
4956 4949
4957 4950 #Check if there is a height between 70 and 110 km
4958 4951 h_bool = numpy.sum(numpy.logical_and(hi > minHeight, hi < maxHeight), axis = 1)
4959 4952 ind_h = numpy.where(h_bool == 1)[0]
4960 4953
4961 4954 hCorr = hi[ind_h, :]
4962 4955 ind_hCorr = numpy.where(numpy.logical_and(hi > minHeight, hi < maxHeight))
4963 4956
4964 4957 hCorr = hi[ind_hCorr][:len(ind_h)]
4965 4958 heights[ind_h] = hCorr
4966 4959
4967 4960 #Setting Error
4968 4961 #Number 13: Height unresolvable echo: not valid height within 70 to 110 km
4969 4962 #Number 14: Height ambiguous echo: more than one possible height within 70 to 110 km
4970 4963 indError = numpy.where(numpy.logical_or(error == 13, error == 14))[0]
4971 4964 error[indError] = 0
4972 4965 indInvalid2 = numpy.where(numpy.logical_and(h_bool > 1, error == 0))[0]
4973 4966 error[indInvalid2] = 14
4974 4967 indInvalid1 = numpy.where(numpy.logical_and(h_bool == 0, error == 0))[0]
4975 4968 error[indInvalid1] = 13
4976 4969
4977 4970 return heights, error
4978 4971
4979 4972 def getPhasePairs(self, channelPositions):
4980 4973 chanPos = numpy.array(channelPositions)
4981 4974 listOper = list(itertools.combinations(list(range(5)),2))
4982 4975
4983 4976 distances = numpy.zeros(4)
4984 4977 axisX = []
4985 4978 axisY = []
4986 4979 distX = numpy.zeros(3)
4987 4980 distY = numpy.zeros(3)
4988 4981 ix = 0
4989 4982 iy = 0
4990 4983
4991 4984 pairX = numpy.zeros((2,2))
4992 4985 pairY = numpy.zeros((2,2))
4993 4986
4994 4987 for i in range(len(listOper)):
4995 4988 pairi = listOper[i]
4996 4989
4997 4990 posDif = numpy.abs(chanPos[pairi[0],:] - chanPos[pairi[1],:])
4998 4991
4999 4992 if posDif[0] == 0:
5000 4993 axisY.append(pairi)
5001 4994 distY[iy] = posDif[1]
5002 4995 iy += 1
5003 4996 elif posDif[1] == 0:
5004 4997 axisX.append(pairi)
5005 4998 distX[ix] = posDif[0]
5006 4999 ix += 1
5007 5000
5008 5001 for i in range(2):
5009 5002 if i==0:
5010 5003 dist0 = distX
5011 5004 axis0 = axisX
5012 5005 else:
5013 5006 dist0 = distY
5014 5007 axis0 = axisY
5015 5008
5016 5009 side = numpy.argsort(dist0)[:-1]
5017 5010 axis0 = numpy.array(axis0)[side,:]
5018 5011 chanC = int(numpy.intersect1d(axis0[0,:], axis0[1,:])[0])
5019 5012 axis1 = numpy.unique(numpy.reshape(axis0,4))
5020 5013 side = axis1[axis1 != chanC]
5021 5014 diff1 = chanPos[chanC,i] - chanPos[side[0],i]
5022 5015 diff2 = chanPos[chanC,i] - chanPos[side[1],i]
5023 5016 if diff1<0:
5024 5017 chan2 = side[0]
5025 5018 d2 = numpy.abs(diff1)
5026 5019 chan1 = side[1]
5027 5020 d1 = numpy.abs(diff2)
5028 5021 else:
5029 5022 chan2 = side[1]
5030 5023 d2 = numpy.abs(diff2)
5031 5024 chan1 = side[0]
5032 5025 d1 = numpy.abs(diff1)
5033 5026
5034 5027 if i==0:
5035 5028 chanCX = chanC
5036 5029 chan1X = chan1
5037 5030 chan2X = chan2
5038 5031 distances[0:2] = numpy.array([d1,d2])
5039 5032 else:
5040 5033 chanCY = chanC
5041 5034 chan1Y = chan1
5042 5035 chan2Y = chan2
5043 5036 distances[2:4] = numpy.array([d1,d2])
5044 5037
5045 5038 pairslist = [(chanCX, chan1X),(chanCX, chan2X),(chanCY,chan1Y),(chanCY, chan2Y)]
5046 5039
5047 5040 return pairslist, distances
5048 5041
5049 5042 class IGRFModel(Operation):
5050 5043 """Operation to calculate Geomagnetic parameters.
5051 5044
5052 5045 Parameters:
5053 5046 -----------
5054 5047 None
5055 5048
5056 5049 Example
5057 5050 --------
5058 5051
5059 5052 op = proc_unit.addOperation(name='IGRFModel', optype='other')
5060 5053
5061 5054 """
5062 5055
5063 5056 def __init__(self, **kwargs):
5064 5057
5065 5058 Operation.__init__(self, **kwargs)
5066 5059
5067 5060 self.aux=1
5068 5061
5069 5062 def run(self,dataOut):
5070 5063
5071 5064 try:
5072 5065 from schainpy.model.proc import mkfact_short_2020
5073 5066 except:
5074 5067 log.warning('You should install "mkfact_short_2020" module to process IGRF Model')
5075 5068
5076 5069 if self.aux==1:
5077 5070
5078 5071 #dataOut.TimeBlockSeconds_First_Time=time.mktime(time.strptime(dataOut.TimeBlockDate))
5079 5072 #### we do not use dataOut.datatime.ctime() because it's the time of the second (next) block
5080 5073 dataOut.TimeBlockSeconds_First_Time=dataOut.TimeBlockSeconds
5081 5074 dataOut.bd_time=time.gmtime(dataOut.TimeBlockSeconds_First_Time)
5082 5075 dataOut.year=dataOut.bd_time.tm_year+(dataOut.bd_time.tm_yday-1)/364.0
5083 5076 dataOut.ut=dataOut.bd_time.tm_hour+dataOut.bd_time.tm_min/60.0+dataOut.bd_time.tm_sec/3600.0
5084 5077
5085 5078 self.aux=0
5086 5079
5087 5080 dataOut.h=numpy.arange(0.0,15.0*dataOut.MAXNRANGENDT,15.0,dtype='float32')
5088 5081 dataOut.bfm=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32')
5089 5082 dataOut.bfm=numpy.array(dataOut.bfm,order='F')
5090 5083 dataOut.thb=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32')
5091 5084 dataOut.thb=numpy.array(dataOut.thb,order='F')
5092 5085 dataOut.bki=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32')
5093 5086 dataOut.bki=numpy.array(dataOut.bki,order='F')
5094 5087
5095 5088 mkfact_short_2020.mkfact(dataOut.year,dataOut.h,dataOut.bfm,dataOut.thb,dataOut.bki,dataOut.MAXNRANGENDT)
5096 5089
5097 5090 return dataOut
5098 5091
5099 5092 class MergeProc(ProcessingUnit):
5100 5093
5101 5094 def __init__(self):
5102 5095 ProcessingUnit.__init__(self)
5103 5096
5104 5097 def run(self, attr_data, attr_data_2 = None, attr_data_3 = None, attr_data_4 = None, attr_data_5 = None, mode=0):
5105 5098
5106 5099 self.dataOut = getattr(self, self.inputs[0])
5107 5100 data_inputs = [getattr(self, attr) for attr in self.inputs]
5108 5101 #print(self.inputs)
5109 5102 #print(numpy.shape([getattr(data, attr_data) for data in data_inputs][1]))
5110 5103 #exit(1)
5111 5104 if mode==0:
5112 5105 data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs])
5113 5106 setattr(self.dataOut, attr_data, data)
5114 5107
5115 5108 if mode==1: #Hybrid
5116 5109 #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1)
5117 5110 #setattr(self.dataOut, attr_data, data)
5118 5111 setattr(self.dataOut, 'dataLag_spc', [getattr(data, attr_data) for data in data_inputs][0])
5119 5112 setattr(self.dataOut, 'dataLag_spc_LP', [getattr(data, attr_data) for data in data_inputs][1])
5120 5113 setattr(self.dataOut, 'dataLag_cspc', [getattr(data, attr_data_2) for data in data_inputs][0])
5121 5114 setattr(self.dataOut, 'dataLag_cspc_LP', [getattr(data, attr_data_2) for data in data_inputs][1])
5122 5115 #setattr(self.dataOut, 'nIncohInt', [getattr(data, attr_data_3) for data in data_inputs][0])
5123 5116 #setattr(self.dataOut, 'nIncohInt_LP', [getattr(data, attr_data_3) for data in data_inputs][1])
5124 5117 '''
5125 5118 print(self.dataOut.dataLag_spc_LP.shape)
5126 5119 print(self.dataOut.dataLag_cspc_LP.shape)
5127 5120 exit(1)
5128 5121 '''
5129 5122
5130 5123 #self.dataOut.dataLag_spc_LP = numpy.transpose(self.dataOut.dataLag_spc_LP[0],(2,0,1))
5131 5124 #self.dataOut.dataLag_cspc_LP = numpy.transpose(self.dataOut.dataLag_cspc_LP,(3,1,2,0))
5132 5125 '''
5133 5126 print("Merge")
5134 5127 print(numpy.shape(self.dataOut.dataLag_spc))
5135 5128 print(numpy.shape(self.dataOut.dataLag_spc_LP))
5136 5129 print(numpy.shape(self.dataOut.dataLag_cspc))
5137 5130 print(numpy.shape(self.dataOut.dataLag_cspc_LP))
5138 5131 exit(1)
5139 5132 '''
5140 5133 #print(numpy.sum(self.dataOut.dataLag_spc_LP[2,:,164])/128)
5141 5134 #print(numpy.sum(self.dataOut.dataLag_cspc_LP[0,:,30,1])/128)
5142 5135 #exit(1)
5143 5136 #print(self.dataOut.NDP)
5144 5137 #print(self.dataOut.nNoiseProfiles)
5145 5138
5146 5139 #self.dataOut.nIncohInt_LP = 128
5147 5140 self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP
5148 5141 self.dataOut.nIncohInt_LP = self.dataOut.nIncohInt
5149 5142 self.dataOut.NLAG = 16
5150 5143 self.dataOut.NRANGE = 200
5151 5144 self.dataOut.NSCAN = 128
5152 5145 #print(numpy.shape(self.dataOut.data_spc))
5153 5146
5154 5147 #exit(1)
5155 5148
5156 5149 if mode==2: #HAE 2022
5157 5150 data = numpy.sum([getattr(data, attr_data) for data in data_inputs],axis=0)
5158 5151 setattr(self.dataOut, attr_data, data)
5159 5152
5160 5153 self.dataOut.nIncohInt *= 2
5161 5154 #meta = self.dataOut.getFreqRange(1)/1000.
5162 5155 self.dataOut.freqRange = self.dataOut.getFreqRange(1)/1000.
5163 5156
5164 5157 #exit(1)
5165 5158
5166 5159 if mode==4: #Hybrid LP-SSheightProfiles
5167 5160 #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1)
5168 5161 #setattr(self.dataOut, attr_data, data)
5169 5162 setattr(self.dataOut, 'dataLag_spc', getattr(data_inputs[0], attr_data)) #DP
5170 5163 setattr(self.dataOut, 'dataLag_cspc', getattr(data_inputs[0], attr_data_2)) #DP
5171 5164 setattr(self.dataOut, 'dataLag_spc_LP', getattr(data_inputs[1], attr_data_3)) #LP
5172 5165 #setattr(self.dataOut, 'dataLag_cspc_LP', getattr(data_inputs[1], attr_data_4)) #LP
5173 5166 #setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP
5174 5167 setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP
5175 5168 #print("Merge data_acf: ",self.dataOut.data_acf.shape)
5176 5169 #exit(1)
5177 5170 #print(self.dataOut.data_spc_LP.shape)
5178 5171 #print("Exit")
5179 5172 #exit(1)
5180 5173 #setattr(self.dataOut, 'dataLag_cspc', [getattr(data, attr_data_2) for data in data_inputs][0])
5181 5174 #setattr(self.dataOut, 'dataLag_cspc_LP', [getattr(data, attr_data_2) for data in data_inputs][1])
5182 5175 #setattr(self.dataOut, 'nIncohInt', [getattr(data, attr_data_3) for data in data_inputs][0])
5183 5176 #setattr(self.dataOut, 'nIncohInt_LP', [getattr(data, attr_data_3) for data in data_inputs][1])
5184 5177 '''
5185 5178 print(self.dataOut.dataLag_spc_LP.shape)
5186 5179 print(self.dataOut.dataLag_cspc_LP.shape)
5187 5180 exit(1)
5188 5181 '''
5189 5182 '''
5190 5183 print(self.dataOut.dataLag_spc_LP[0,:,100])
5191 5184 print(self.dataOut.dataLag_spc_LP[1,:,100])
5192 5185 exit(1)
5193 5186 '''
5194 5187 #self.dataOut.dataLag_spc_LP = numpy.transpose(self.dataOut.dataLag_spc_LP[0],(2,0,1))
5195 5188 #self.dataOut.dataLag_cspc_LP = numpy.transpose(self.dataOut.dataLag_cspc_LP,(3,1,2,0))
5196 5189 '''
5197 5190 print("Merge")
5198 5191 print(numpy.shape(self.dataOut.dataLag_spc))
5199 5192 print(numpy.shape(self.dataOut.dataLag_spc_LP))
5200 5193 print(numpy.shape(self.dataOut.dataLag_cspc))
5201 5194 print(numpy.shape(self.dataOut.dataLag_cspc_LP))
5202 5195 exit(1)
5203 5196 '''
5204 5197 #print(numpy.sum(self.dataOut.dataLag_spc_LP[2,:,164])/128)
5205 5198 #print(numpy.sum(self.dataOut.dataLag_cspc_LP[0,:,30,1])/128)
5206 5199 #exit(1)
5207 5200 #print(self.dataOut.NDP)
5208 5201 #print(self.dataOut.nNoiseProfiles)
5209 5202
5210 5203 #self.dataOut.nIncohInt_LP = 128
5211 5204 #self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP
5212 5205 self.dataOut.nProfiles_LP = 16#28#self.dataOut.nIncohInt_LP
5213 5206 self.dataOut.nProfiles_LP = self.dataOut.data_acf.shape[1]#28#self.dataOut.nIncohInt_LP
5214 5207 self.dataOut.NSCAN = 128
5215 5208 self.dataOut.nIncohInt_LP = self.dataOut.nIncohInt*self.dataOut.NSCAN
5216 5209 #print("sahpi",self.dataOut.nIncohInt_LP)
5217 5210 #exit(1)
5218 5211 self.dataOut.NLAG = 16
5219 5212 self.dataOut.NRANGE = self.dataOut.data_acf.shape[-1]
5220 5213
5221 5214 #print(numpy.shape(self.dataOut.data_spc))
5222 5215
5223 5216 #exit(1)
5224 5217 if mode==5:
5225 5218 data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs])
5226 5219 setattr(self.dataOut, attr_data, data)
5227 5220 data = numpy.concatenate([getattr(data, attr_data_2) for data in data_inputs])
5228 5221 setattr(self.dataOut, attr_data_2, data)
5229 5222 #data = numpy.concatenate([getattr(data, attr_data_3) for data in data_inputs])
5230 5223 #setattr(self.dataOut, attr_data_3, data)
5231 5224 #print(self.dataOut.moments.shape,self.dataOut.data_snr.shape,self.dataOut.heightList.shape) No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now