##// END OF EJS Templates
amountdata from statistics150km method as a user parameter. By default is 3 and it represents the minimun quantity of velocity samples to be considering to proccess.
imanay -
r1799:72fde67b5cb4 v3.0-devel
parent child
Show More
@@ -1,7510 +1,7510
1 1 # v3.0-devel
2 2 import numpy
3 3 import math
4 4 from scipy import optimize, interpolate, signal, stats, ndimage
5 5 from scipy.fftpack import fft
6 6 import scipy
7 7 from scipy.optimize import least_squares
8 8 import re
9 9 import datetime
10 10 import copy
11 11 import sys
12 12 import importlib
13 13 import itertools
14 14 from multiprocessing import Pool, TimeoutError
15 15 from multiprocessing.pool import ThreadPool
16 16 import time
17 17
18 18 import matplotlib.pyplot as plt
19 19 from scipy.optimize import fmin_l_bfgs_b #optimize with bounds on state papameters
20 20 from .jroproc_base import ProcessingUnit, Operation, MPDecorator
21 21 from schainpy.model.data.jrodata import Parameters, hildebrand_sekhon
22 22 from scipy import asarray as ar,exp
23 23 from scipy.optimize import fmin, curve_fit
24 24 from schainpy.utils import log
25 25 import warnings
26 26 from numpy import NaN
27 27 from scipy.optimize.optimize import OptimizeWarning
28 28 warnings.filterwarnings('ignore')
29 29
30 30
31 31 SPEED_OF_LIGHT = 299792458
32 32
33 33 '''solving pickling issue'''
34 34
35 35 def _pickle_method(method):
36 36 func_name = method.__func__.__name__
37 37 obj = method.__self__
38 38 cls = method.__self__.__class__
39 39 return _unpickle_method, (func_name, obj, cls)
40 40
41 41 def _unpickle_method(func_name, obj, cls):
42 42 for cls in cls.mro():
43 43 try:
44 44 func = cls.__dict__[func_name]
45 45 except KeyError:
46 46 pass
47 47 else:
48 48 break
49 49 return func.__get__(obj, cls)
50 50
51 51 # @MPDecorator
52 52 class ParametersProc(ProcessingUnit):
53 53
54 54 METHODS = {}
55 55 nSeconds = None
56 56
57 57 def __init__(self):
58 58 ProcessingUnit.__init__(self)
59 59
60 60 self.buffer = None
61 61 self.firstdatatime = None
62 62 self.profIndex = 0
63 63 self.dataOut = Parameters()
64 64 self.setupReq = False #Agregar a todas las unidades de proc
65 65
66 66 def __updateObjFromInput(self):
67 67
68 68 self.dataOut.inputUnit = self.dataIn.type
69 69
70 70 self.dataOut.timeZone = self.dataIn.timeZone
71 71 self.dataOut.dstFlag = self.dataIn.dstFlag
72 72 self.dataOut.errorCount = self.dataIn.errorCount
73 73 self.dataOut.useLocalTime = self.dataIn.useLocalTime
74 74
75 75 self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy()
76 76 self.dataOut.systemHeaderObj = self.dataIn.systemHeaderObj.copy()
77 77 self.dataOut.channelList = self.dataIn.channelList
78 78 self.dataOut.heightList = self.dataIn.heightList
79 79 self.dataOut.dtype = numpy.dtype([('real','<f4'),('imag','<f4')])
80 80 # self.dataOut.nHeights = self.dataIn.nHeights
81 81 # self.dataOut.nChannels = self.dataIn.nChannels
82 82 # self.dataOut.nBaud = self.dataIn.nBaud
83 83 # self.dataOut.nCode = self.dataIn.nCode
84 84 # self.dataOut.code = self.dataIn.code
85 85 # self.dataOut.nProfiles = self.dataOut.nFFTPoints
86 86 self.dataOut.flagDiscontinuousBlock = self.dataIn.flagDiscontinuousBlock
87 87 # self.dataOut.utctime = self.firstdatatime
88 88 self.dataOut.utctime = self.dataIn.utctime
89 89 self.dataOut.flagDecodeData = self.dataIn.flagDecodeData #asumo q la data esta decodificada
90 90 self.dataOut.flagDeflipData = self.dataIn.flagDeflipData #asumo q la data esta sin flip
91 91 self.dataOut.nCohInt = self.dataIn.nCohInt
92 92 # self.dataOut.nIncohInt = 1
93 93 # self.dataOut.ippSeconds = self.dataIn.ippSeconds
94 94 # self.dataOut.windowOfFilter = self.dataIn.windowOfFilter
95 95 self.dataOut.timeInterval1 = self.dataIn.timeInterval
96 96 self.dataOut.heightList = self.dataIn.heightList
97 97 self.dataOut.frequency = self.dataIn.frequency
98 98 #self.dataOut.noise = self.dataIn.noise
99 99
100 100 def run(self):
101 101
102 102 #---------------------- Voltage Data ---------------------------
103 103
104 104 if self.dataIn.type == "Voltage":
105 105
106 106 self.__updateObjFromInput()
107 107 self.dataOut.data_pre = self.dataIn.data.copy()
108 108 self.dataOut.flagNoData = False
109 109 self.dataOut.utctimeInit = self.dataIn.utctime
110 110 self.dataOut.paramInterval = self.dataIn.nProfiles*self.dataIn.nCohInt*self.dataIn.ippSeconds
111 111 if hasattr(self.dataIn, 'dataPP_POW'):
112 112 self.dataOut.dataPP_POW = self.dataIn.dataPP_POW
113 113
114 114 if hasattr(self.dataIn, 'dataPP_POWER'):
115 115 self.dataOut.dataPP_POWER = self.dataIn.dataPP_POWER
116 116
117 117 if hasattr(self.dataIn, 'dataPP_DOP'):
118 118 self.dataOut.dataPP_DOP = self.dataIn.dataPP_DOP
119 119
120 120 if hasattr(self.dataIn, 'dataPP_SNR'):
121 121 self.dataOut.dataPP_SNR = self.dataIn.dataPP_SNR
122 122
123 123 if hasattr(self.dataIn, 'dataPP_WIDTH'):
124 124 self.dataOut.dataPP_WIDTH = self.dataIn.dataPP_WIDTH
125 125 return
126 126
127 127 #---------------------- Spectra Data ---------------------------
128 128
129 129 if self.dataIn.type == "Spectra":
130 130
131 131 self.dataOut.data_pre = [self.dataIn.data_spc, self.dataIn.data_cspc]
132 132 self.dataOut.data_spc = self.dataIn.data_spc
133 133 self.dataOut.data_cspc = self.dataIn.data_cspc
134 134 # for JULIA processing
135 135 self.dataOut.data_diffcspc = self.dataIn.data_diffcspc
136 136 self.dataOut.nDiffIncohInt = self.dataIn.nDiffIncohInt
137 137 # for JULIA processing
138 138 self.dataOut.nProfiles = self.dataIn.nProfiles
139 139 self.dataOut.nIncohInt = self.dataIn.nIncohInt
140 140 self.dataOut.nFFTPoints = self.dataIn.nFFTPoints
141 141 self.dataOut.ippFactor = self.dataIn.ippFactor
142 142 self.dataOut.abscissaList = self.dataIn.getVelRange(1)
143 143 self.dataOut.spc_noise = self.dataIn.getNoise()
144 144 self.dataOut.spc_range = (self.dataIn.getFreqRange(1) , self.dataIn.getAcfRange(1) , self.dataIn.getVelRange(1))
145 145 # self.dataOut.normFactor = self.dataIn.normFactor
146 146 self.dataOut.pairsList = self.dataIn.pairsList
147 147 self.dataOut.groupList = self.dataIn.pairsList
148 148 self.dataOut.flagNoData = False
149 149
150 150 if hasattr(self.dataIn, 'ChanDist'): #Distances of receiver channels
151 151 self.dataOut.ChanDist = self.dataIn.ChanDist
152 152 else: self.dataOut.ChanDist = None
153 153
154 154 #if hasattr(self.dataIn, 'VelRange'): #Velocities range
155 155 # self.dataOut.VelRange = self.dataIn.VelRange
156 156 #else: self.dataOut.VelRange = None
157 157
158 158 if hasattr(self.dataIn, 'RadarConst'): #Radar Constant
159 159 self.dataOut.RadarConst = self.dataIn.RadarConst
160 160
161 161 if hasattr(self.dataIn, 'NPW'): #NPW
162 162 self.dataOut.NPW = self.dataIn.NPW
163 163
164 164 if hasattr(self.dataIn, 'COFA'): #COFA
165 165 self.dataOut.COFA = self.dataIn.COFA
166 166
167 167
168 168
169 169 #---------------------- Correlation Data ---------------------------
170 170
171 171 if self.dataIn.type == "Correlation":
172 172 acf_ind, ccf_ind, acf_pairs, ccf_pairs, data_acf, data_ccf = self.dataIn.splitFunctions()
173 173
174 174 self.dataOut.data_pre = (self.dataIn.data_cf[acf_ind,:], self.dataIn.data_cf[ccf_ind,:,:])
175 175 self.dataOut.normFactor = (self.dataIn.normFactor[acf_ind,:], self.dataIn.normFactor[ccf_ind,:])
176 176 self.dataOut.groupList = (acf_pairs, ccf_pairs)
177 177
178 178 self.dataOut.abscissaList = self.dataIn.lagRange
179 179 self.dataOut.noise = self.dataIn.noise
180 180 self.dataOut.data_snr = self.dataIn.SNR
181 181 self.dataOut.flagNoData = False
182 182 self.dataOut.nAvg = self.dataIn.nAvg
183 183
184 184 #---------------------- Parameters Data ---------------------------
185 185
186 186 if self.dataIn.type == "Parameters":
187 187 self.dataOut.copy(self.dataIn)
188 188 self.dataOut.flagNoData = False
189 189
190 190 return True
191 191
192 192 self.__updateObjFromInput()
193 193 self.dataOut.utctimeInit = self.dataIn.utctime
194 194 self.dataOut.paramInterval = self.dataIn.timeInterval
195 195
196 196 return
197 197
198 198
199 199 def target(tups):
200 200
201 201 obj, args = tups
202 202
203 203 return obj.FitGau(args)
204 204
205 205 class RemoveWideGC(Operation):
206 206 ''' This class remove the wide clutter and replace it with a simple interpolation points
207 207 This mainly applies to CLAIRE radar
208 208
209 209 ClutterWidth : Width to look for the clutter peak
210 210
211 211 Input:
212 212
213 213 self.dataOut.data_pre : SPC and CSPC
214 214 self.dataOut.spc_range : To select wind and rainfall velocities
215 215
216 216 Affected:
217 217
218 218 self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
219 219
220 220 Written by D. ScipiΓ³n 25.02.2021
221 221 '''
222 222 def __init__(self):
223 223 Operation.__init__(self)
224 224 self.i = 0
225 225 self.ich = 0
226 226 self.ir = 0
227 227
228 228 def run(self, dataOut, ClutterWidth=2.5):
229 229
230 230 self.spc = dataOut.data_pre[0].copy()
231 231 self.spc_out = dataOut.data_pre[0].copy()
232 232 self.Num_Chn = self.spc.shape[0]
233 233 self.Num_Hei = self.spc.shape[2]
234 234 VelRange = dataOut.spc_range[2][:-1]
235 235 dv = VelRange[1]-VelRange[0]
236 236
237 237 # Find the velocities that corresponds to zero
238 238 gc_values = numpy.squeeze(numpy.where(numpy.abs(VelRange) <= ClutterWidth))
239 239
240 240 # Removing novalid data from the spectra
241 241 for ich in range(self.Num_Chn) :
242 242 for ir in range(self.Num_Hei) :
243 243 # Estimate the noise at each range
244 244 HSn = hildebrand_sekhon(self.spc[ich,:,ir],dataOut.nIncohInt)
245 245
246 246 # Removing the noise floor at each range
247 247 novalid = numpy.where(self.spc[ich,:,ir] < HSn)
248 248 self.spc[ich,novalid,ir] = HSn
249 249
250 250 junk = numpy.append(numpy.insert(numpy.squeeze(self.spc[ich,gc_values,ir]),0,HSn),HSn)
251 251 j1index = numpy.squeeze(numpy.where(numpy.diff(junk)>0))
252 252 j2index = numpy.squeeze(numpy.where(numpy.diff(junk)<0))
253 253 if ((numpy.size(j1index)<=1) | (numpy.size(j2index)<=1)) :
254 254 continue
255 255 junk3 = numpy.squeeze(numpy.diff(j1index))
256 256 junk4 = numpy.squeeze(numpy.diff(j2index))
257 257
258 258 valleyindex = j2index[numpy.where(junk4>1)]
259 259 peakindex = j1index[numpy.where(junk3>1)]
260 260
261 261 isvalid = numpy.squeeze(numpy.where(numpy.abs(VelRange[gc_values[peakindex]]) <= 2.5*dv))
262 262 if numpy.size(isvalid) == 0 :
263 263 continue
264 264 if numpy.size(isvalid) >1 :
265 265 vindex = numpy.argmax(self.spc[ich,gc_values[peakindex[isvalid]],ir])
266 266 isvalid = isvalid[vindex]
267 267
268 268 # clutter peak
269 269 gcpeak = peakindex[isvalid]
270 270 vl = numpy.where(valleyindex < gcpeak)
271 271 if numpy.size(vl) == 0:
272 272 continue
273 273 gcvl = valleyindex[vl[0][-1]]
274 274 vr = numpy.where(valleyindex > gcpeak)
275 275 if numpy.size(vr) == 0:
276 276 continue
277 277 gcvr = valleyindex[vr[0][0]]
278 278
279 279 # Removing the clutter
280 280 interpindex = numpy.array([gc_values[gcvl], gc_values[gcvr]])
281 281 gcindex = gc_values[gcvl+1:gcvr-1]
282 282 self.spc_out[ich,gcindex,ir] = numpy.interp(VelRange[gcindex],VelRange[interpindex],self.spc[ich,interpindex,ir])
283 283
284 284 dataOut.data_pre[0] = self.spc_out
285 285
286 286 return dataOut
287 287
288 288 class SpectralFilters(Operation):
289 289 ''' This class allows to replace the novalid values with noise for each channel
290 290 This applies to CLAIRE RADAR
291 291
292 292 PositiveLimit : RightLimit of novalid data
293 293 NegativeLimit : LeftLimit of novalid data
294 294
295 295 Input:
296 296
297 297 self.dataOut.data_pre : SPC and CSPC
298 298 self.dataOut.spc_range : To select wind and rainfall velocities
299 299
300 300 Affected:
301 301
302 302 self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
303 303
304 304 Written by D. ScipiΓ³n 29.01.2021
305 305 '''
306 306 def __init__(self):
307 307 Operation.__init__(self)
308 308 self.i = 0
309 309
310 310 def run(self, dataOut, ):
311 311
312 312 self.spc = dataOut.data_pre[0].copy()
313 313 self.Num_Chn = self.spc.shape[0]
314 314 VelRange = dataOut.spc_range[2]
315 315
316 316 # novalid corresponds to data within the Negative and PositiveLimit
317 317
318 318
319 319 # Removing novalid data from the spectra
320 320 for i in range(self.Num_Chn):
321 321 self.spc[i,novalid,:] = dataOut.noise[i]
322 322 dataOut.data_pre[0] = self.spc
323 323 return dataOut
324 324
325 325
326 326
327 327 class GaussianFit(Operation):
328 328
329 329 '''
330 330 Function that fit of one and two generalized gaussians (gg) based
331 331 on the PSD shape across an "power band" identified from a cumsum of
332 332 the measured spectrum - noise.
333 333
334 334 Input:
335 335 self.dataOut.data_pre : SelfSpectra
336 336
337 337 Output:
338 338 self.dataOut.SPCparam : SPC_ch1, SPC_ch2
339 339
340 340 '''
341 341 def __init__(self):
342 342 Operation.__init__(self)
343 343 self.i=0
344 344
345 345
346 346 # def run(self, dataOut, num_intg=7, pnoise=1., SNRlimit=-9): #num_intg: Incoherent integrations, pnoise: Noise, vel_arr: range of velocities, similar to the ftt points
347 347 def run(self, dataOut, SNRdBlimit=-9, method='generalized'):
348 348 """This routine will find a couple of generalized Gaussians to a power spectrum
349 349 methods: generalized, squared
350 350 input: spc
351 351 output:
352 352 noise, amplitude0,shift0,width0,p0,Amplitude1,shift1,width1,p1
353 353 """
354 354 print ('Entering ',method,' double Gaussian fit')
355 355 self.spc = dataOut.data_pre[0].copy()
356 356 self.Num_Hei = self.spc.shape[2]
357 357 self.Num_Bin = self.spc.shape[1]
358 358 self.Num_Chn = self.spc.shape[0]
359 359
360 360 start_time = time.time()
361 361
362 362 pool = Pool(processes=self.Num_Chn)
363 363 args = [(dataOut.spc_range[2], ich, dataOut.spc_noise[ich], dataOut.nIncohInt, SNRdBlimit) for ich in range(self.Num_Chn)]
364 364 objs = [self for __ in range(self.Num_Chn)]
365 365 attrs = list(zip(objs, args))
366 366 DGauFitParam = pool.map(target, attrs)
367 367 # Parameters:
368 368 # 0. Noise, 1. Amplitude, 2. Shift, 3. Width 4. Power
369 369 dataOut.DGauFitParams = numpy.asarray(DGauFitParam)
370 370
371 371 # Double Gaussian Curves
372 372 gau0 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
373 373 gau0[:] = numpy.NaN
374 374 gau1 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
375 375 gau1[:] = numpy.NaN
376 376 x_mtr = numpy.transpose(numpy.tile(dataOut.getVelRange(1)[:-1], (self.Num_Hei,1)))
377 377 for iCh in range(self.Num_Chn):
378 378 N0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,0]] * self.Num_Bin))
379 379 N1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,1]] * self.Num_Bin))
380 380 A0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,0]] * self.Num_Bin))
381 381 A1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,1]] * self.Num_Bin))
382 382 v0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,0]] * self.Num_Bin))
383 383 v1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,1]] * self.Num_Bin))
384 384 s0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,0]] * self.Num_Bin))
385 385 s1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,1]] * self.Num_Bin))
386 386 if method == 'generalized':
387 387 p0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,0]] * self.Num_Bin))
388 388 p1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,1]] * self.Num_Bin))
389 389 elif method == 'squared':
390 390 p0 = 2.
391 391 p1 = 2.
392 392 gau0[iCh] = A0*numpy.exp(-0.5*numpy.abs((x_mtr-v0)/s0)**p0)+N0
393 393 gau1[iCh] = A1*numpy.exp(-0.5*numpy.abs((x_mtr-v1)/s1)**p1)+N1
394 394 dataOut.GaussFit0 = gau0
395 395 dataOut.GaussFit1 = gau1
396 396
397 397 print('Leaving ',method ,' double Gaussian fit')
398 398 return dataOut
399 399
400 400 def FitGau(self, X):
401 401 # print('Entering FitGau')
402 402 # Assigning the variables
403 403 Vrange, ch, wnoise, num_intg, SNRlimit = X
404 404 # Noise Limits
405 405 noisebl = wnoise * 0.9
406 406 noisebh = wnoise * 1.1
407 407 # Radar Velocity
408 408 Va = max(Vrange)
409 409 deltav = Vrange[1] - Vrange[0]
410 410 x = numpy.arange(self.Num_Bin)
411 411
412 412 # print ('stop 0')
413 413
414 414 # 5 parameters, 2 Gaussians
415 415 DGauFitParam = numpy.zeros([5, self.Num_Hei,2])
416 416 DGauFitParam[:] = numpy.NaN
417 417
418 418 # SPCparam = []
419 419 # SPC_ch1 = numpy.zeros([self.Num_Bin,self.Num_Hei])
420 420 # SPC_ch2 = numpy.zeros([self.Num_Bin,self.Num_Hei])
421 421 # SPC_ch1[:] = 0 #numpy.NaN
422 422 # SPC_ch2[:] = 0 #numpy.NaN
423 423 # print ('stop 1')
424 424 for ht in range(self.Num_Hei):
425 425 # print (ht)
426 426 # print ('stop 2')
427 427 # Spectra at each range
428 428 spc = numpy.asarray(self.spc)[ch,:,ht]
429 429 snr = ( spc.mean() - wnoise ) / wnoise
430 430 snrdB = 10.*numpy.log10(snr)
431 431
432 432 #print ('stop 3')
433 433 if snrdB < SNRlimit :
434 434 # snr = numpy.NaN
435 435 # SPC_ch1[:,ht] = 0#numpy.NaN
436 436 # SPC_ch1[:,ht] = 0#numpy.NaN
437 437 # SPCparam = (SPC_ch1,SPC_ch2)
438 438 # print ('SNR less than SNRth')
439 439 continue
440 440 # wnoise = hildebrand_sekhon(spc,num_intg)
441 441 # print ('stop 2.01')
442 442 #############################################
443 443 # normalizing spc and noise
444 444 # This part differs from gg1
445 445 # spc_norm_max = max(spc) #commented by D. ScipiΓ³n 19.03.2021
446 446 #spc = spc / spc_norm_max
447 447 # pnoise = pnoise #/ spc_norm_max #commented by D. ScipiΓ³n 19.03.2021
448 448 #############################################
449 449
450 450 # print ('stop 2.1')
451 451 fatspectra=1.0
452 452 # noise per channel.... we might want to use the noise at each range
453 453
454 454 # wnoise = noise_ #/ spc_norm_max #commented by D. ScipiΓ³n 19.03.2021
455 455 #wnoise,stdv,i_max,index =enoise(spc,num_intg) #noise estimate using Hildebrand Sekhon, only wnoise is used
456 456 #if wnoise>1.1*pnoise: # to be tested later
457 457 # wnoise=pnoise
458 458 # noisebl = wnoise*0.9
459 459 # noisebh = wnoise*1.1
460 460 spc = spc - wnoise # signal
461 461
462 462 # print ('stop 2.2')
463 463 minx = numpy.argmin(spc)
464 464 #spcs=spc.copy()
465 465 spcs = numpy.roll(spc,-minx)
466 466 cum = numpy.cumsum(spcs)
467 467 # tot_noise = wnoise * self.Num_Bin #64;
468 468
469 469 # print ('stop 2.3')
470 470 # snr = sum(spcs) / tot_noise
471 471 # snrdB = 10.*numpy.log10(snr)
472 472 #print ('stop 3')
473 473 # if snrdB < SNRlimit :
474 474 # snr = numpy.NaN
475 475 # SPC_ch1[:,ht] = 0#numpy.NaN
476 476 # SPC_ch1[:,ht] = 0#numpy.NaN
477 477 # SPCparam = (SPC_ch1,SPC_ch2)
478 478 # print ('SNR less than SNRth')
479 479 # continue
480 480
481 481
482 482 #if snrdB<-18 or numpy.isnan(snrdB) or num_intg<4:
483 483 # return [None,]*4,[None,]*4,None,snrdB,None,None,[None,]*5,[None,]*9,None
484 484 # print ('stop 4')
485 485 cummax = max(cum)
486 486 epsi = 0.08 * fatspectra # cumsum to narrow down the energy region
487 487 cumlo = cummax * epsi
488 488 cumhi = cummax * (1-epsi)
489 489 powerindex = numpy.array(numpy.where(numpy.logical_and(cum>cumlo, cum<cumhi))[0])
490 490
491 491 # print ('stop 5')
492 492 if len(powerindex) < 1:# case for powerindex 0
493 493 # print ('powerindex < 1')
494 494 continue
495 495 powerlo = powerindex[0]
496 496 powerhi = powerindex[-1]
497 497 powerwidth = powerhi-powerlo
498 498 if powerwidth <= 1:
499 499 # print('powerwidth <= 1')
500 500 continue
501 501
502 502 # print ('stop 6')
503 503 firstpeak = powerlo + powerwidth/10.# first gaussian energy location
504 504 secondpeak = powerhi - powerwidth/10. #second gaussian energy location
505 505 midpeak = (firstpeak + secondpeak)/2.
506 506 firstamp = spcs[int(firstpeak)]
507 507 secondamp = spcs[int(secondpeak)]
508 508 midamp = spcs[int(midpeak)]
509 509
510 510 y_data = spc + wnoise
511 511
512 512 ''' single Gaussian '''
513 513 shift0 = numpy.mod(midpeak+minx, self.Num_Bin )
514 514 width0 = powerwidth/4.#Initialization entire power of spectrum divided by 4
515 515 power0 = 2.
516 516 amplitude0 = midamp
517 517 state0 = [shift0,width0,amplitude0,power0,wnoise]
518 518 bnds = ((0,self.Num_Bin-1),(1,powerwidth),(0,None),(0.5,3.),(noisebl,noisebh))
519 519 lsq1 = fmin_l_bfgs_b(self.misfit1, state0, args=(y_data,x,num_intg), bounds=bnds, approx_grad=True)
520 520 # print ('stop 7.1')
521 521 # print (bnds)
522 522
523 523 chiSq1=lsq1[1]
524 524
525 525 # print ('stop 8')
526 526 if fatspectra<1.0 and powerwidth<4:
527 527 choice=0
528 528 Amplitude0=lsq1[0][2]
529 529 shift0=lsq1[0][0]
530 530 width0=lsq1[0][1]
531 531 p0=lsq1[0][3]
532 532 Amplitude1=0.
533 533 shift1=0.
534 534 width1=0.
535 535 p1=0.
536 536 noise=lsq1[0][4]
537 537 #return (numpy.array([shift0,width0,Amplitude0,p0]),
538 538 # numpy.array([shift1,width1,Amplitude1,p1]),noise,snrdB,chiSq1,6.,sigmas1,[None,]*9,choice)
539 539 # print ('stop 9')
540 540 ''' two Gaussians '''
541 541 #shift0=numpy.mod(firstpeak+minx,64); shift1=numpy.mod(secondpeak+minx,64)
542 542 shift0 = numpy.mod(firstpeak+minx, self.Num_Bin )
543 543 shift1 = numpy.mod(secondpeak+minx, self.Num_Bin )
544 544 width0 = powerwidth/6.
545 545 width1 = width0
546 546 power0 = 2.
547 547 power1 = power0
548 548 amplitude0 = firstamp
549 549 amplitude1 = secondamp
550 550 state0 = [shift0,width0,amplitude0,power0,shift1,width1,amplitude1,power1,wnoise]
551 551 #bnds=((0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
552 552 bnds=((0,self.Num_Bin-1),(1,powerwidth/2.),(0,None),(0.5,3.),(0,self.Num_Bin-1),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
553 553 #bnds=(( 0,(self.Num_Bin-1) ),(1,powerwidth/2.),(0,None),(0.5,3.),( 0,(self.Num_Bin-1)),(1,powerwidth/2.),(0,None),(0.5,3.),(0.1,0.5))
554 554
555 555 # print ('stop 10')
556 556 lsq2 = fmin_l_bfgs_b( self.misfit2 , state0 , args=(y_data,x,num_intg) , bounds=bnds , approx_grad=True )
557 557
558 558 # print ('stop 11')
559 559 chiSq2 = lsq2[1]
560 560
561 561 # print ('stop 12')
562 562
563 563 oneG = (chiSq1<5 and chiSq1/chiSq2<2.0) and (abs(lsq2[0][0]-lsq2[0][4])<(lsq2[0][1]+lsq2[0][5])/3. or abs(lsq2[0][0]-lsq2[0][4])<10)
564 564
565 565 # print ('stop 13')
566 566 if snrdB>-12: # when SNR is strong pick the peak with least shift (LOS velocity) error
567 567 if oneG:
568 568 choice = 0
569 569 else:
570 570 w1 = lsq2[0][1]; w2 = lsq2[0][5]
571 571 a1 = lsq2[0][2]; a2 = lsq2[0][6]
572 572 p1 = lsq2[0][3]; p2 = lsq2[0][7]
573 573 s1 = (2**(1+1./p1))*scipy.special.gamma(1./p1)/p1
574 574 s2 = (2**(1+1./p2))*scipy.special.gamma(1./p2)/p2
575 575 gp1 = a1*w1*s1; gp2 = a2*w2*s2 # power content of each ggaussian with proper p scaling
576 576
577 577 if gp1>gp2:
578 578 if a1>0.7*a2:
579 579 choice = 1
580 580 else:
581 581 choice = 2
582 582 elif gp2>gp1:
583 583 if a2>0.7*a1:
584 584 choice = 2
585 585 else:
586 586 choice = 1
587 587 else:
588 588 choice = numpy.argmax([a1,a2])+1
589 589 #else:
590 590 #choice=argmin([std2a,std2b])+1
591 591
592 592 else: # with low SNR go to the most energetic peak
593 593 choice = numpy.argmax([lsq1[0][2]*lsq1[0][1],lsq2[0][2]*lsq2[0][1],lsq2[0][6]*lsq2[0][5]])
594 594
595 595 # print ('stop 14')
596 596 shift0 = lsq2[0][0]
597 597 vel0 = Vrange[0] + shift0 * deltav
598 598 shift1 = lsq2[0][4]
599 599 # vel1=Vrange[0] + shift1 * deltav
600 600
601 601 # max_vel = 1.0
602 602 # Va = max(Vrange)
603 603 # deltav = Vrange[1]-Vrange[0]
604 604 # print ('stop 15')
605 605 #first peak will be 0, second peak will be 1
606 606 # if vel0 > -1.0 and vel0 < max_vel : #first peak is in the correct range # Commented by D.ScipiΓ³n 19.03.2021
607 607 if vel0 > -Va and vel0 < Va : #first peak is in the correct range
608 608 shift0 = lsq2[0][0]
609 609 width0 = lsq2[0][1]
610 610 Amplitude0 = lsq2[0][2]
611 611 p0 = lsq2[0][3]
612 612
613 613 shift1 = lsq2[0][4]
614 614 width1 = lsq2[0][5]
615 615 Amplitude1 = lsq2[0][6]
616 616 p1 = lsq2[0][7]
617 617 noise = lsq2[0][8]
618 618 else:
619 619 shift1 = lsq2[0][0]
620 620 width1 = lsq2[0][1]
621 621 Amplitude1 = lsq2[0][2]
622 622 p1 = lsq2[0][3]
623 623
624 624 shift0 = lsq2[0][4]
625 625 width0 = lsq2[0][5]
626 626 Amplitude0 = lsq2[0][6]
627 627 p0 = lsq2[0][7]
628 628 noise = lsq2[0][8]
629 629
630 630 if Amplitude0<0.05: # in case the peak is noise
631 631 shift0,width0,Amplitude0,p0 = 4*[numpy.NaN]
632 632 if Amplitude1<0.05:
633 633 shift1,width1,Amplitude1,p1 = 4*[numpy.NaN]
634 634
635 635 # print ('stop 16 ')
636 636 # SPC_ch1[:,ht] = noise + Amplitude0*numpy.exp(-0.5*(abs(x-shift0)/width0)**p0)
637 637 # SPC_ch2[:,ht] = noise + Amplitude1*numpy.exp(-0.5*(abs(x-shift1)/width1)**p1)
638 638 # SPCparam = (SPC_ch1,SPC_ch2)
639 639
640 640 DGauFitParam[0,ht,0] = noise
641 641 DGauFitParam[0,ht,1] = noise
642 642 DGauFitParam[1,ht,0] = Amplitude0
643 643 DGauFitParam[1,ht,1] = Amplitude1
644 644 DGauFitParam[2,ht,0] = Vrange[0] + shift0 * deltav
645 645 DGauFitParam[2,ht,1] = Vrange[0] + shift1 * deltav
646 646 DGauFitParam[3,ht,0] = width0 * deltav
647 647 DGauFitParam[3,ht,1] = width1 * deltav
648 648 DGauFitParam[4,ht,0] = p0
649 649 DGauFitParam[4,ht,1] = p1
650 650
651 651 return DGauFitParam
652 652
653 653 def y_model1(self,x,state):
654 654 shift0, width0, amplitude0, power0, noise = state
655 655 model0 = amplitude0*numpy.exp(-0.5*abs((x - shift0)/width0)**power0)
656 656 model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0)
657 657 model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0)
658 658 return model0 + model0u + model0d + noise
659 659
660 660 def y_model2(self,x,state): #Equation for two generalized Gaussians with Nyquist
661 661 shift0, width0, amplitude0, power0, shift1, width1, amplitude1, power1, noise = state
662 662 model0 = amplitude0*numpy.exp(-0.5*abs((x-shift0)/width0)**power0)
663 663 model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0)
664 664 model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0)
665 665
666 666 model1 = amplitude1*numpy.exp(-0.5*abs((x - shift1)/width1)**power1)
667 667 model1u = amplitude1*numpy.exp(-0.5*abs((x - shift1 - self.Num_Bin)/width1)**power1)
668 668 model1d = amplitude1*numpy.exp(-0.5*abs((x - shift1 + self.Num_Bin)/width1)**power1)
669 669 return model0 + model0u + model0d + model1 + model1u + model1d + noise
670 670
671 671 def misfit1(self,state,y_data,x,num_intg): # This function compares how close real data is with the model data, the close it is, the better it is.
672 672
673 673 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model1(x,state)))**2)#/(64-5.) # /(64-5.) can be commented
674 674
675 675 def misfit2(self,state,y_data,x,num_intg):
676 676 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model2(x,state)))**2)#/(64-9.)
677 677
678 678 class Oblique_Gauss_Fit(Operation):
679 679 '''
680 680 Written by R. Flores
681 681 '''
682 682 def __init__(self):
683 683 Operation.__init__(self)
684 684
685 685 def Gauss_fit(self,spc,x,nGauss):
686 686
687 687
688 688 def gaussian(x, a, b, c, d):
689 689 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
690 690 return val
691 691
692 692 if nGauss == 'first':
693 693 spc_1_aux = numpy.copy(spc[:numpy.argmax(spc)+1])
694 694 spc_2_aux = numpy.flip(spc_1_aux)
695 695 spc_3_aux = numpy.concatenate((spc_1_aux,spc_2_aux[1:]))
696 696
697 697 len_dif = len(x)-len(spc_3_aux)
698 698
699 699 spc_zeros = numpy.ones(len_dif)*spc_1_aux[0]
700 700
701 701 spc_new = numpy.concatenate((spc_3_aux,spc_zeros))
702 702
703 703 y = spc_new
704 704
705 705 elif nGauss == 'second':
706 706 y = spc
707 707
708 708
709 709 # estimate starting values from the data
710 710 a = y.max()
711 711 b = x[numpy.argmax(y)]
712 712 if nGauss == 'first':
713 713 c = 1.#b#b#numpy.std(spc)
714 714 elif nGauss == 'second':
715 715 c = b
716 716 else:
717 717 print("ERROR")
718 718
719 719 d = numpy.mean(y[-100:])
720 720
721 721 # define a least squares function to optimize
722 722 def minfunc(params):
723 723 return sum((y-gaussian(x,params[0],params[1],params[2],params[3]))**2)
724 724
725 725 # fit
726 726 popt = fmin(minfunc,[a,b,c,d],disp=False)
727 727 #popt,fopt,niter,funcalls = fmin(minfunc,[a,b,c,d])
728 728
729 729
730 730 return gaussian(x, popt[0], popt[1], popt[2], popt[3]), popt[0], popt[1], popt[2], popt[3]
731 731
732 732 def Gauss_fit_2(self,spc,x,nGauss):
733 733
734 734
735 735 def gaussian(x, a, b, c, d):
736 736 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
737 737 return val
738 738
739 739 if nGauss == 'first':
740 740 spc_1_aux = numpy.copy(spc[:numpy.argmax(spc)+1])
741 741 spc_2_aux = numpy.flip(spc_1_aux)
742 742 spc_3_aux = numpy.concatenate((spc_1_aux,spc_2_aux[1:]))
743 743
744 744 len_dif = len(x)-len(spc_3_aux)
745 745
746 746 spc_zeros = numpy.ones(len_dif)*spc_1_aux[0]
747 747
748 748 spc_new = numpy.concatenate((spc_3_aux,spc_zeros))
749 749
750 750 y = spc_new
751 751
752 752 elif nGauss == 'second':
753 753 y = spc
754 754
755 755
756 756 # estimate starting values from the data
757 757 a = y.max()
758 758 b = x[numpy.argmax(y)]
759 759 if nGauss == 'first':
760 760 c = 1.#b#b#numpy.std(spc)
761 761 elif nGauss == 'second':
762 762 c = b
763 763 else:
764 764 print("ERROR")
765 765
766 766 d = numpy.mean(y[-100:])
767 767
768 768 # define a least squares function to optimize
769 769 popt,pcov = curve_fit(gaussian,x,y,p0=[a,b,c,d])
770 770 #popt,fopt,niter,funcalls = fmin(minfunc,[a,b,c,d])
771 771
772 772
773 773 #return gaussian(x, popt[0], popt[1], popt[2], popt[3]), popt[0], popt[1], popt[2], popt[3]
774 774 return gaussian(x, popt[0], popt[1], popt[2], popt[3]),popt[0], popt[1], popt[2], popt[3]
775 775
776 776 def Double_Gauss_fit(self,spc,x,A1,B1,C1,A2,B2,C2,D):
777 777
778 778 def double_gaussian(x, a1, b1, c1, a2, b2, c2, d):
779 779 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
780 780 return val
781 781
782 782
783 783 y = spc
784 784
785 785 # estimate starting values from the data
786 786 a1 = A1
787 787 b1 = B1
788 788 c1 = C1#numpy.std(spc)
789 789
790 790 a2 = A2#y.max()
791 791 b2 = B2#x[numpy.argmax(y)]
792 792 c2 = C2#numpy.std(spc)
793 793 d = D
794 794
795 795 # define a least squares function to optimize
796 796 def minfunc(params):
797 797 return sum((y-double_gaussian(x,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2)
798 798
799 799 # fit
800 800 popt = fmin(minfunc,[a1,b1,c1,a2,b2,c2,d],disp=False)
801 801
802 802 return double_gaussian(x, popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
803 803
804 804 def Double_Gauss_fit_2(self,spc,x,A1,B1,C1,A2,B2,C2,D):
805 805
806 806 def double_gaussian(x, a1, b1, c1, a2, b2, c2, d):
807 807 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
808 808 return val
809 809
810 810
811 811 y = spc
812 812
813 813 # estimate starting values from the data
814 814 a1 = A1
815 815 b1 = B1
816 816 c1 = C1#numpy.std(spc)
817 817
818 818 a2 = A2#y.max()
819 819 b2 = B2#x[numpy.argmax(y)]
820 820 c2 = C2#numpy.std(spc)
821 821 d = D
822 822
823 823 # fit
824 824
825 825 popt,pcov = curve_fit(double_gaussian,x,y,p0=[a1,b1,c1,a2,b2,c2,d])
826 826
827 827 error = numpy.sqrt(numpy.diag(pcov))
828 828
829 829 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6], error[0], error[1], error[2], error[3], error[4], error[5], error[6]
830 830
831 831 def windowing_double(self,spc,x,A1,B1,C1,A2,B2,C2,D):
832 832 from scipy.optimize import curve_fit,fmin
833 833
834 834 def R_gaussian(x, a, b, c):
835 835 N = int(numpy.shape(x)[0])
836 836 val = a * numpy.exp(-((x)*c*2*2*numpy.pi)**2 / (2))* numpy.exp(1.j*b*x*4*numpy.pi)
837 837 return val
838 838
839 839 def T(x,N):
840 840 T = 1-abs(x)/N
841 841 return T
842 842
843 843 def R_T_spc_fun(x, a1, b1, c1, a2, b2, c2, d):
844 844
845 845 N = int(numpy.shape(x)[0])
846 846
847 847 x_max = x[-1]
848 848
849 849 x_pos = x[1600:]
850 850 x_neg = x[:1600]
851 851
852 852 R_T_neg_1 = R_gaussian(x, a1, b1, c1)[:1600]*T(x_neg,-x[0])
853 853 R_T_pos_1 = R_gaussian(x, a1, b1, c1)[1600:]*T(x_pos,x[-1])
854 854 R_T_sum_1 = R_T_pos_1 + R_T_neg_1
855 855 R_T_spc_1 = numpy.fft.fft(R_T_sum_1).real
856 856 R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1)
857 857 max_val_1 = numpy.max(R_T_spc_1)
858 858 R_T_spc_1 = R_T_spc_1*a1/max_val_1
859 859
860 860 R_T_neg_2 = R_gaussian(x, a2, b2, c2)[:1600]*T(x_neg,-x[0])
861 861 R_T_pos_2 = R_gaussian(x, a2, b2, c2)[1600:]*T(x_pos,x[-1])
862 862 R_T_sum_2 = R_T_pos_2 + R_T_neg_2
863 863 R_T_spc_2 = numpy.fft.fft(R_T_sum_2).real
864 864 R_T_spc_2 = numpy.fft.fftshift(R_T_spc_2)
865 865 max_val_2 = numpy.max(R_T_spc_2)
866 866 R_T_spc_2 = R_T_spc_2*a2/max_val_2
867 867
868 868 R_T_d = d*numpy.fft.fftshift(signal.unit_impulse(N))
869 869 R_T_d_neg = R_T_d[:1600]*T(x_neg,-x[0])
870 870 R_T_d_pos = R_T_d[1600:]*T(x_pos,x[-1])
871 871 R_T_d_sum = R_T_d_pos + R_T_d_neg
872 872 R_T_spc_3 = numpy.fft.fft(R_T_d_sum).real
873 873 R_T_spc_3 = numpy.fft.fftshift(R_T_spc_3)
874 874
875 875 R_T_final = R_T_spc_1 + R_T_spc_2 + R_T_spc_3
876 876
877 877 return R_T_final
878 878
879 879 y = spc#gaussian(x, a, meanY, sigmaY) + a*0.1*numpy.random.normal(0, 1, size=len(x))
880 880
881 881 from scipy.stats import norm
882 882 mean,std=norm.fit(spc)
883 883
884 884 # estimate starting values from the data
885 885 a1 = A1
886 886 b1 = B1
887 887 c1 = C1#numpy.std(spc)
888 888
889 889 a2 = A2#y.max()
890 890 b2 = B2#x[numpy.argmax(y)]
891 891 c2 = C2#numpy.std(spc)
892 892 d = D
893 893
894 894 ippSeconds = 250*20*1.e-6/3
895 895
896 896 x_t = ippSeconds * (numpy.arange(1600) -1600 / 2.)
897 897
898 898 x_t = numpy.linspace(x_t[0],x_t[-1],3200)
899 899
900 900 x_freq = numpy.fft.fftfreq(1600,d=ippSeconds)
901 901 x_freq = numpy.fft.fftshift(x_freq)
902 902
903 903 # define a least squares function to optimize
904 904 def minfunc(params):
905 905 #print(params[2])
906 906 #print(numpy.shape(params[2]))
907 907 return sum((y-R_T_spc_fun(x_t,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2/1)#y**2)
908 908
909 909 # fit
910 910
911 911 popt_full = fmin(minfunc,[a1,b1,c1,a2,b2,c2,d],full_output=True)
912 912 #print("nIter", popt_full[2])
913 913 popt = popt_full[0]
914 914
915 915 #return R_T_spc_fun(x_t,popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
916 916 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
917 917
918 918 def Double_Gauss_fit_weight(self,spc,x,A1,B1,C1,A2,B2,C2,D):
919 919 from scipy.optimize import curve_fit,fmin
920 920
921 921 def double_gaussian(x, a1, b1, c1, a2, b2, c2, d):
922 922 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
923 923 return val
924 924
925 925 y = spc
926 926
927 927 from scipy.stats import norm
928 928 mean,std=norm.fit(spc)
929 929
930 930 # estimate starting values from the data
931 931 a1 = A1
932 932 b1 = B1
933 933 c1 = C1#numpy.std(spc)
934 934
935 935 a2 = A2#y.max()
936 936 b2 = B2#x[numpy.argmax(y)]
937 937 c2 = C2#numpy.std(spc)
938 938 d = D
939 939
940 940 y_clean = signal.medfilt(y)
941 941 # define a least squares function to optimize
942 942 def minfunc(params):
943 943 return sum((y-double_gaussian(x,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2/(y_clean**2/1))
944 944
945 945 # fit
946 946 popt_full = fmin(minfunc,[a1,b1,c1,a2,b2,c2,d], disp =False, full_output=True)
947 947 #print("nIter", popt_full[2])
948 948 popt = popt_full[0]
949 949 #popt,pcov = curve_fit(double_gaussian,x,y,p0=[a1,b1,c1,a2,b2,c2,d])
950 950
951 951 #return double_gaussian(x, popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
952 952 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
953 953
954 954 def DH_mode(self,spectra,VelRange):
955 955
956 956 from scipy.optimize import curve_fit
957 957
958 958 def double_gauss(x, a1,b1,c1, a2,b2,c2, d):
959 959 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
960 960 return val
961 961
962 962 spec = (spectra.copy()).flatten()
963 963 amp=spec.max()
964 964 params=numpy.array([amp,-400,30,amp/4,-200,150,1.0e7])
965 965 #try:
966 966 popt,pcov=curve_fit(double_gauss, VelRange, spec, p0=params,bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf]))
967 967
968 968 error = numpy.sqrt(numpy.diag(pcov))
969 969 #doppler_2=popt[4]
970 970 #err_2 = numpy.sqrt(pcov[4][4])
971 971
972 972 #except:
973 973 #pass
974 974 #doppler_2=numpy.NAN
975 975 #err_2 = numpy.NAN
976 976
977 977 #return doppler_2, err_2
978 978
979 979 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6], error[0], error[1], error[2], error[3], error[4], error[5], error[6]
980 980
981 981 def Tri_Marco(self,spc,freq,a1,b1,c1,a2,b2,c2,d):
982 982
983 983 from scipy.optimize import least_squares
984 984
985 985 freq_max = numpy.max(numpy.abs(freq))
986 986 spc_max = numpy.max(spc)
987 987
988 988 def tri_gaussian(x, a1, b1, c1, a2, b2, c2, a3, b3, c3, d):
989 989 z1 = (x-b1)/c1
990 990 z2 = (x-b2)/c2
991 991 z3 = (x-b3)/c3
992 992 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-z2**2/2) + a3 * numpy.exp(-z3**2/2) + d
993 993 return val
994 994
995 995 from scipy.signal import medfilt
996 996 Nincoh = 20
997 997 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
998 998 c1 = abs(c1)
999 999 c2 = abs(c2)
1000 1000
1001 1001 # define a least squares function to optimize
1002 1002 def lsq_func(params):
1003 1003 return (spc-tri_gaussian(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7],params[8],params[9]))/spcm
1004 1004
1005 1005 # fit
1006 1006 #bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf])
1007 1007 bounds=([0,-numpy.inf,0,0,-numpy.inf,0,0,0,0,0],[numpy.inf,-100,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,600,numpy.inf,numpy.inf])
1008 1008 #bounds=([0,-180,0,0,-100,30,0,110,0,0],[numpy.inf,-110,20,numpy.inf,33,80,numpy.inf,150,16,numpy.inf])
1009 1009 #bounds=([0,-540,0,0,-300,100,0,330,0,0],[numpy.inf,-330,60,numpy.inf,100,240,numpy.inf,450,80,numpy.inf])
1010 1010
1011 1011 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,spc_max]
1012 1012 #print(a1,b1,c1,a2,b2,c2,d)
1013 1013 popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,a2/4,-b1,c1,d],x_scale=params_scale,bounds=bounds)
1014 1014
1015 1015 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1016 1016 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]
1017 1017 A3f = popt.x[6]; B3f = popt.x[7]; C3f = popt.x[8]
1018 1018 Df = popt.x[9]
1019 1019
1020 1020 return A1f, B1f, C1f, A2f, B2f, C2f, Df
1021 1021
1022 1022 def Tri_Marco(self,spc,freq,a1,b1,c1,a2,b2,c2,d):
1023 1023
1024 1024 from scipy.optimize import least_squares
1025 1025
1026 1026 freq_max = numpy.max(numpy.abs(freq))
1027 1027 spc_max = numpy.max(spc)
1028 1028
1029 1029 def duo_gaussian(x, a1, b1, c1, a2, b2, c2, d):
1030 1030 z1 = (x-b1)/c1
1031 1031 z2 = (x-b2)/c2
1032 1032 #z3 = (x-b3)/c3
1033 1033 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-z2**2/2) + d
1034 1034 return val
1035 1035
1036 1036 from scipy.signal import medfilt
1037 1037 Nincoh = 20
1038 1038 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1039 1039 c1 = abs(c1)
1040 1040 c2 = abs(c2)
1041 1041
1042 1042 # define a least squares function to optimize
1043 1043 def lsq_func(params):
1044 1044 return (spc-tri_gaussian(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))/spcm
1045 1045
1046 1046 # fit
1047 1047 #bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf])
1048 1048 bounds=([0,-numpy.inf,0,0,-numpy.inf,0,0],[numpy.inf,-100,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf])
1049 1049 #bounds=([0,-180,0,0,-100,30,0,110,0,0],[numpy.inf,-110,20,numpy.inf,33,80,numpy.inf,150,16,numpy.inf])
1050 1050 #bounds=([0,-540,0,0,-300,100,0,330,0,0],[numpy.inf,-330,60,numpy.inf,100,240,numpy.inf,450,80,numpy.inf])
1051 1051
1052 1052 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,spc_max]
1053 1053 #print(a1,b1,c1,a2,b2,c2,d)
1054 1054 popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,d],x_scale=params_scale,bounds=bounds)
1055 1055
1056 1056 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1057 1057 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]
1058 1058 #A3f = popt.x[6]; B3f = popt.x[7]; C3f = popt.x[8]
1059 1059 Df = popt.x[9]
1060 1060
1061 1061 return A1f, B1f, C1f, A2f, B2f, C2f, Df
1062 1062
1063 1063 def double_gaussian_skew(self,x, a1, b1, c1, a2, b2, c2, k2, d):
1064 1064 #from scipy import special
1065 1065 z1 = (x-b1)/c1
1066 1066 z2 = (x-b2)/c2
1067 1067 h2 = 1-k2*z2
1068 1068 h2[h2<0] = 0
1069 1069 y2 = -1/k2*numpy.log(h2)
1070 1070 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-y2**2/2)/(1-k2*z2) + d
1071 1071 return val
1072 1072
1073 1073 def gaussian(self, x, a, b, c, d):
1074 1074 z = (x-b)/c
1075 1075 val = a * numpy.exp(-z**2/2) + d
1076 1076 return val
1077 1077
1078 1078 def double_gaussian(self, x, a1, b1, c1, a2, b2, c2, d):
1079 1079 z1 = (x-b1)/c1
1080 1080 z2 = (x-b2)/c2
1081 1081 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-z2**2/2) + d
1082 1082 return val
1083 1083
1084 1084 def double_gaussian_double_skew(self,x, a1, b1, c1, k1, a2, b2, c2, k2, d):
1085 1085
1086 1086 z1 = (x-b1)/c1
1087 1087 h1 = 1-k1*z1
1088 1088 h1[h1<0] = 0
1089 1089 y1 = -1/k1*numpy.log(h1)
1090 1090
1091 1091 z2 = (x-b2)/c2
1092 1092 h2 = 1-k2*z2
1093 1093 h2[h2<0] = 0
1094 1094 y2 = -1/k2*numpy.log(h2)
1095 1095
1096 1096 val = a1 * numpy.exp(-y1**2/2)/(1-k1*z1) + a2 * numpy.exp(-y2**2/2)/(1-k2*z2) + d
1097 1097 return val
1098 1098
1099 1099 def gaussian_skew(self,x, a2, b2, c2, k2, d):
1100 1100 #from scipy import special
1101 1101 z2 = (x-b2)/c2
1102 1102 h2 = 1-k2*z2
1103 1103 h2[h2<0] = 0
1104 1104 y2 = -1/k2*numpy.log(h2)
1105 1105 val = a2 * numpy.exp(-y2**2/2)/(1-k2*z2) + d
1106 1106 return val
1107 1107
1108 1108 def triple_gaussian_skew(self,x, a1, b1, c1, a2, b2, c2, k2, a3, b3, c3, k3, d):
1109 1109 #from scipy import special
1110 1110 z1 = (x-b1)/c1
1111 1111 z2 = (x-b2)/c2
1112 1112 z3 = (x-b3)/c3
1113 1113 h2 = 1-k2*z2
1114 1114 h2[h2<0] = 0
1115 1115 y2 = -1/k2*numpy.log(h2)
1116 1116 h3 = 1-k3*z3
1117 1117 h3[h3<0] = 0
1118 1118 y3 = -1/k3*numpy.log(h3)
1119 1119 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-y2**2/2)/(1-k2*z2) + a3 * numpy.exp(-y3**2/2)/(1-k3*z3) + d
1120 1120 return val
1121 1121
1122 1122 def Double_Gauss_Skew_fit_weight_bound_no_inputs(self,spc,freq):
1123 1123
1124 1124 from scipy.optimize import least_squares
1125 1125
1126 1126 freq_max = numpy.max(numpy.abs(freq))
1127 1127 spc_max = numpy.max(spc)
1128 1128
1129 1129 from scipy.signal import medfilt
1130 1130 Nincoh = 20
1131 1131 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1132 1132
1133 1133 # define a least squares function to optimize
1134 1134 def lsq_func(params):
1135 1135 return (spc-self.double_gaussian_skew(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7]))/spcm
1136 1136
1137 1137 # fit
1138 1138 # bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf])
1139 1139 # bounds=([0,-numpy.inf,0,0,-numpy.inf,0,-numpy.inf,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,0,numpy.inf])
1140 1140 #print(a1,b1,c1,a2,b2,c2,k2,d)
1141 1141 bounds=([0,-numpy.inf,0,0,-400,0,0,0],[numpy.inf,-340,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1142 1142 #print(bounds)
1143 1143 #bounds=([0,-numpy.inf,0,0,-numpy.inf,0,0,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1144 1144 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,1,spc_max]
1145 1145 x0_value = numpy.array([spc_max,-400,30,spc_max/4,-200,150,1,1.0e7])
1146 1146 #popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=1)
1147 1147 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1148 1148 # popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x_scale=params_scale,verbose=1)
1149 1149
1150 1150 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1151 1151 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]; K2f = popt.x[6]
1152 1152 Df = popt.x[7]
1153 1153
1154 1154 aux = self.gaussian_skew(freq, A2f, B2f, C2f, K2f, Df)
1155 1155 doppler = freq[numpy.argmax(aux)]
1156 1156
1157 1157 #return A1f, B1f, C1f, A2f, B2f, C2f, K2f, Df, doppler
1158 1158 return A1f, B1f, C1f, A2f, B2f, C2f, K2f, Df, doppler
1159 1159
1160 1160 def Double_Gauss_Double_Skew_fit_weight_bound_no_inputs(self,spc,freq,Nincoh,hei):
1161 1161
1162 1162 from scipy.optimize import least_squares
1163 1163
1164 1164 freq_max = numpy.max(numpy.abs(freq))
1165 1165 spc_max = numpy.max(spc)
1166 1166
1167 1167 #from scipy.signal import medfilt
1168 1168 #Nincoh = 20
1169 1169 #Nincoh = 80
1170 1170 Nincoh = Nincoh
1171 1171 #spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1172 1172 spcm = spc/numpy.sqrt(Nincoh)
1173 1173
1174 1174 # define a least squares function to optimize
1175 1175 def lsq_func(params):
1176 1176 return (spc-self.double_gaussian_double_skew(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7],params[8]))/spcm
1177 1177
1178 1178 # fit
1179 1179 # bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf])
1180 1180 # bounds=([0,-numpy.inf,0,0,-numpy.inf,0,-numpy.inf,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,0,numpy.inf])
1181 1181 #print(a1,b1,c1,a2,b2,c2,k2,d)
1182 1182 #bounds=([0,-numpy.inf,0,-numpy.inf,0,-400,0,0,0],[numpy.inf,-340,numpy.inf,0,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1183 1183 #bounds=([0,-numpy.inf,0,-numpy.inf,0,-400,0,0,0],[numpy.inf,-140,numpy.inf,0,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1184 1184 bounds=([0,-numpy.inf,0,-5,0,-400,0,0,0],[numpy.inf,-200,numpy.inf,5,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1185 1185
1186 1186 #print(bounds)
1187 1187 #bounds=([0,-numpy.inf,0,0,-numpy.inf,0,0,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1188 1188 params_scale = [spc_max,freq_max,freq_max,1,spc_max,freq_max,freq_max,1,spc_max]
1189 1189 ####################x0_value = numpy.array([spc_max,-400,30,-.1,spc_max/4,-200,150,1,1.0e7])
1190 1190
1191 1191 dop1_x0 = freq[numpy.argmax(spc)]
1192 1192 ####dop1_x0 = freq[numpy.argmax(spcm)]
1193 1193 if dop1_x0 < 0:
1194 1194 dop2_x0 = dop1_x0 + 100
1195 1195 if dop1_x0 > 0:
1196 1196 dop2_x0 = dop1_x0 - 100
1197 1197
1198 1198 ###########x0_value = numpy.array([spc_max,-200.5,30,-.1,spc_max/4,-100.5,150,1,1.0e7])
1199 1199 x0_value = numpy.array([spc_max,dop1_x0,30,-.1,spc_max/4, dop2_x0,150,1,1.0e7])
1200 1200 #x0_value = numpy.array([spc_max,-400.5,30,-.1,spc_max/4,-200.5,150,1,1.0e7])
1201 1201 #popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=1)
1202 1202 '''
1203 1203 print("INSIDE 1")
1204 1204 print("x0_value: ", x0_value)
1205 1205 print("boundaries: ", bounds)
1206 1206 import matplotlib.pyplot as plt
1207 1207 plt.plot(freq,spc)
1208 1208 plt.plot(freq,self.double_gaussian_double_skew(freq,x0_value[0],x0_value[1],x0_value[2],x0_value[3],x0_value[4],x0_value[5],x0_value[6],x0_value[7],x0_value[8]))
1209 1209 plt.title(hei)
1210 1210 plt.show()
1211 1211 '''
1212 1212 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1213 1213 # popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x_scale=params_scale,verbose=1)
1214 1214 #print(popt)
1215 1215 #########print("INSIDE 2")
1216 1216 J = popt.jac
1217 1217
1218 1218 try:
1219 1219 cov = numpy.linalg.inv(J.T.dot(J))
1220 1220 error = numpy.sqrt(numpy.diagonal(cov))
1221 1221 except:
1222 1222 error = numpy.ones((9))*numpy.NAN
1223 1223 #print("error_inside",error)
1224 1224 #exit(1)
1225 1225
1226 1226 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]; K1f = popt.x[3]
1227 1227 A2f = popt.x[4]; B2f = popt.x[5]; C2f = popt.x[6]; K2f = popt.x[7]
1228 1228 Df = popt.x[8]
1229 1229 '''
1230 1230 A1f_err = error.x[0]; B1f_err= error.x[1]; C1f_err = error.x[2]; K1f_err = error.x[3]
1231 1231 A2f_err = error.x[4]; B2f_err = error.x[5]; C2f_err = error.x[6]; K2f_err = error.x[7]
1232 1232 Df_err = error.x[8]
1233 1233 '''
1234 1234 aux1 = self.gaussian_skew(freq, A1f, B1f, C1f, K1f, Df)
1235 1235 doppler1 = freq[numpy.argmax(aux1)]
1236 1236
1237 1237 aux2 = self.gaussian_skew(freq, A2f, B2f, C2f, K2f, Df)
1238 1238 doppler2 = freq[numpy.argmax(aux2)]
1239 1239 #print("error",error)
1240 1240 #exit(1)
1241 1241
1242 1242
1243 1243 return A1f, B1f, C1f, K1f, A2f, B2f, C2f, K2f, Df, doppler1, doppler2, error
1244 1244
1245 1245 def Double_Gauss_fit_weight_bound_no_inputs(self,spc,freq,Nincoh):
1246 1246
1247 1247 from scipy.optimize import least_squares
1248 1248
1249 1249 freq_max = numpy.max(numpy.abs(freq))
1250 1250 spc_max = numpy.max(spc)
1251 1251
1252 1252 from scipy.signal import medfilt
1253 1253 Nincoh = 20
1254 1254 Nincoh = 80
1255 1255 Nincoh = Nincoh
1256 1256 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1257 1257
1258 1258 # define a least squares function to optimize
1259 1259 def lsq_func(params):
1260 1260 return (spc-self.double_gaussian(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))/spcm
1261 1261
1262 1262 # fit
1263 1263 # bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf])
1264 1264 # bounds=([0,-numpy.inf,0,0,-numpy.inf,0,-numpy.inf,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,0,numpy.inf])
1265 1265 #print(a1,b1,c1,a2,b2,c2,k2,d)
1266 1266
1267 1267 dop1_x0 = freq[numpy.argmax(spcm)]
1268 1268
1269 1269 #####bounds=([0,-numpy.inf,0,0,-400,0,0],[numpy.inf,-340,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf])
1270 1270 #####bounds=([0,-numpy.inf,0,0,dop1_x0-50,0,0],[numpy.inf,-340,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf])
1271 1271 bounds=([0,-numpy.inf,0,0,dop1_x0-50,0,0],[numpy.inf,-300,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf])
1272 1272 #####bounds=([0,-numpy.inf,0,0,-500,0,0],[numpy.inf,-340,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf])
1273 1273 #bounds=([0,-numpy.inf,0,-numpy.inf,0,-500,0,0,0],[numpy.inf,-240,numpy.inf,0,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1274 1274 #print(bounds)
1275 1275 #bounds=([0,-numpy.inf,0,0,-numpy.inf,0,0,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1276 1276 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,spc_max]
1277 1277 #x0_value = numpy.array([spc_max,-400.5,30,spc_max/4,-200.5,150,1.0e7])
1278 1278 x0_value = numpy.array([spc_max,-400.5,30,spc_max/4,dop1_x0,150,1.0e7])
1279 1279 #x0_value = numpy.array([spc_max,-420.5,30,-.1,spc_max/4,-50,150,.1,numpy.mean(spc[-50:])])
1280 1280 #print("before popt")
1281 1281 #print(x0_value)
1282 1282 #print("freq: ",freq)
1283 1283 #popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=1)
1284 1284 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1285 1285 # popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x_scale=params_scale,verbose=1)
1286 1286 #print("after popt")
1287 1287 J = popt.jac
1288 1288
1289 1289 try:
1290 1290 cov = numpy.linalg.inv(J.T.dot(J))
1291 1291 error = numpy.sqrt(numpy.diagonal(cov))
1292 1292 except:
1293 1293 error = numpy.ones((7))*numpy.NAN
1294 1294
1295 1295 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1296 1296 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]
1297 1297 Df = popt.x[6]
1298 1298 #print("before return")
1299 1299 return A1f, B1f, C1f, A2f, B2f, C2f, Df, error
1300 1300
1301 1301 def Double_Gauss_Double_Skew_fit_weight_bound_with_inputs(self, spc, freq, a1, b1, c1, a2, b2, c2, k2, d):
1302 1302
1303 1303 from scipy.optimize import least_squares
1304 1304
1305 1305 freq_max = numpy.max(numpy.abs(freq))
1306 1306 spc_max = numpy.max(spc)
1307 1307
1308 1308 from scipy.signal import medfilt
1309 1309 Nincoh = dataOut.nIncohInt
1310 1310 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1311 1311
1312 1312 # define a least squares function to optimize
1313 1313 def lsq_func(params):
1314 1314 return (spc-self.double_gaussian_double_skew(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7],params[8]))/spcm
1315 1315
1316 1316
1317 1317 bounds=([0,-numpy.inf,0,-numpy.inf,0,-400,0,0,0],[numpy.inf,-340,numpy.inf,0,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1318 1318
1319 1319 params_scale = [spc_max,freq_max,freq_max,1,spc_max,freq_max,freq_max,1,spc_max]
1320 1320
1321 1321 x0_value = numpy.array([a1,b1,c1,-.1,a2,b2,c2,k2,d])
1322 1322
1323 1323 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1324 1324
1325 1325 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]; K1f = popt.x[3]
1326 1326 A2f = popt.x[4]; B2f = popt.x[5]; C2f = popt.x[6]; K2f = popt.x[7]
1327 1327 Df = popt.x[8]
1328 1328
1329 1329 aux = self.gaussian_skew(freq, A2f, B2f, C2f, K2f, Df)
1330 1330 doppler = x[numpy.argmax(aux)]
1331 1331
1332 1332 return A1f, B1f, C1f, K1f, A2f, B2f, C2f, K2f, Df, doppler
1333 1333
1334 1334 def Triple_Gauss_Skew_fit_weight_bound_no_inputs(self,spc,freq):
1335 1335
1336 1336 from scipy.optimize import least_squares
1337 1337
1338 1338 freq_max = numpy.max(numpy.abs(freq))
1339 1339 spc_max = numpy.max(spc)
1340 1340
1341 1341 from scipy.signal import medfilt
1342 1342 Nincoh = 20
1343 1343 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1344 1344
1345 1345 # define a least squares function to optimize
1346 1346 def lsq_func(params):
1347 1347 return (spc-self.triple_gaussian_skew(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7],params[8],params[9],params[10],params[11]))/spcm
1348 1348
1349 1349 # fit
1350 1350 # bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf])
1351 1351 # bounds=([0,-numpy.inf,0,0,-numpy.inf,0,-numpy.inf,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,0,numpy.inf])
1352 1352 #print(a1,b1,c1,a2,b2,c2,k2,d)
1353 1353 bounds=([0,-numpy.inf,0,0,-400,0,0,0,0,0,0,0],[numpy.inf,-340,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf,numpy.inf,numpy.inf,numpy.inf,numpy.inf])
1354 1354 #print(bounds)
1355 1355 #bounds=([0,-numpy.inf,0,0,-numpy.inf,0,0,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1356 1356 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,1,spc_max,freq_max,freq_max,1,spc_max]
1357 1357 x0_value = numpy.array([spc_max,-400,30,spc_max/4,-200,150,1,spc_max/4,400,150,1,1.0e7])
1358 1358 #popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=1)
1359 1359 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1360 1360 # popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,k2,d],x_scale=params_scale,verbose=1)
1361 1361
1362 1362 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1363 1363 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]; K2f = popt.x[6]
1364 1364 A3f = popt.x[7]; B3f = popt.x[8]; C3f = popt.x[9]; K3f = popt.x[10]
1365 1365 Df = popt.x[11]
1366 1366
1367 1367 aux = self.gaussian_skew(freq, A2f, B2f, C2f, K2f, Df)
1368 1368 doppler = freq[numpy.argmax(aux)]
1369 1369
1370 1370 return A1f, B1f, C1f, A2f, B2f, C2f, K2f, A3f, B3f, C3f, K3f, Df, doppler
1371 1371
1372 1372 def CEEJ_Skew_fit_weight_bound_no_inputs(self,spc,freq,Nincoh):
1373 1373
1374 1374 from scipy.optimize import least_squares
1375 1375
1376 1376 freq_max = numpy.max(numpy.abs(freq))
1377 1377 spc_max = numpy.max(spc)
1378 1378
1379 1379 from scipy.signal import medfilt
1380 1380 Nincoh = 20
1381 1381 Nincoh = 80
1382 1382 Nincoh = Nincoh
1383 1383 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1384 1384
1385 1385 # define a least squares function to optimize
1386 1386 def lsq_func(params):
1387 1387 return (spc-self.gaussian_skew(freq,params[0],params[1],params[2],params[3],params[4]))#/spcm
1388 1388
1389 1389
1390 1390 bounds=([0,0,0,-numpy.inf,0],[numpy.inf,numpy.inf,numpy.inf,0,numpy.inf])
1391 1391
1392 1392 params_scale = [spc_max,freq_max,freq_max,1,spc_max]
1393 1393
1394 1394 x0_value = numpy.array([spc_max,freq[numpy.argmax(spc)],30,-.1,numpy.mean(spc[:50])])
1395 1395
1396 1396 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1397 1397
1398 1398 J = popt.jac
1399 1399
1400 1400 try:
1401 1401 error = numpy.ones((9))*numpy.NAN
1402 1402 cov = numpy.linalg.inv(J.T.dot(J))
1403 1403 error[:4] = numpy.sqrt(numpy.diagonal(cov))[:4]
1404 1404 error[-1] = numpy.sqrt(numpy.diagonal(cov))[-1]
1405 1405 except:
1406 1406 error = numpy.ones((9))*numpy.NAN
1407 1407
1408 1408 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]; K1f = popt.x[3]
1409 1409 Df = popt.x[4]
1410 1410
1411 1411 aux1 = self.gaussian_skew(freq, A1f, B1f, C1f, K1f, Df)
1412 1412 doppler1 = freq[numpy.argmax(aux1)]
1413 1413 #print("CEEJ ERROR:",error)
1414 1414
1415 1415 return A1f, B1f, C1f, K1f, numpy.NAN, numpy.NAN, numpy.NAN, numpy.NAN, Df, doppler1, numpy.NAN, error
1416 1416
1417 1417 def CEEJ_fit_weight_bound_no_inputs(self,spc,freq,Nincoh):
1418 1418
1419 1419 from scipy.optimize import least_squares
1420 1420
1421 1421 freq_max = numpy.max(numpy.abs(freq))
1422 1422 spc_max = numpy.max(spc)
1423 1423
1424 1424 from scipy.signal import medfilt
1425 1425 Nincoh = 20
1426 1426 Nincoh = 80
1427 1427 Nincoh = Nincoh
1428 1428 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1429 1429
1430 1430 # define a least squares function to optimize
1431 1431 def lsq_func(params):
1432 1432 return (spc-self.gaussian(freq,params[0],params[1],params[2],params[3]))#/spcm
1433 1433
1434 1434
1435 1435 bounds=([0,0,0,0],[numpy.inf,numpy.inf,numpy.inf,numpy.inf])
1436 1436
1437 1437 params_scale = [spc_max,freq_max,freq_max,spc_max]
1438 1438
1439 1439 x0_value = numpy.array([spc_max,freq[numpy.argmax(spcm)],30,numpy.mean(spc[:50])])
1440 1440
1441 1441 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1442 1442
1443 1443 J = popt.jac
1444 1444
1445 1445 try:
1446 1446 error = numpy.ones((4))*numpy.NAN
1447 1447 cov = numpy.linalg.inv(J.T.dot(J))
1448 1448 error = numpy.sqrt(numpy.diagonal(cov))
1449 1449 except:
1450 1450 error = numpy.ones((4))*numpy.NAN
1451 1451
1452 1452 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1453 1453 Df = popt.x[3]
1454 1454
1455 1455 return A1f, B1f, C1f, Df, error
1456 1456
1457 1457 def Simple_fit_bound(self,spc,freq,Nincoh):
1458 1458
1459 1459 freq_max = numpy.max(numpy.abs(freq))
1460 1460 spc_max = numpy.max(spc)
1461 1461
1462 1462 Nincoh = Nincoh
1463 1463
1464 1464 def lsq_func(params):
1465 1465 return (spc-self.gaussian(freq,params[0],params[1],params[2],params[3]))
1466 1466
1467 1467 bounds=([0,-50,0,0],[numpy.inf,+50,numpy.inf,numpy.inf])
1468 1468
1469 1469 params_scale = [spc_max,freq_max,freq_max,spc_max]
1470 1470
1471 1471 x0_value = numpy.array([spc_max,-20.5,5,1.0e7])
1472 1472
1473 1473 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1474 1474
1475 1475 J = popt.jac
1476 1476
1477 1477 try:
1478 1478 cov = numpy.linalg.inv(J.T.dot(J))
1479 1479 error = numpy.sqrt(numpy.diagonal(cov))
1480 1480 except:
1481 1481 error = numpy.ones((4))*numpy.NAN
1482 1482
1483 1483 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1484 1484 Df = popt.x[3]
1485 1485
1486 1486 return A1f, B1f, C1f, Df, error
1487 1487
1488 1488 def clean_outliers(self,param):
1489 1489
1490 1490 threshold = 700
1491 1491
1492 1492 param = numpy.where(param < -threshold, numpy.nan, param)
1493 1493 param = numpy.where(param > +threshold, numpy.nan, param)
1494 1494
1495 1495 return param
1496 1496
1497 1497 def windowing_single(self,spc,x,A,B,C,D,nFFTPoints):
1498 1498 from scipy.optimize import curve_fit,fmin
1499 1499
1500 1500 def R_gaussian(x, a, b, c):
1501 1501 N = int(numpy.shape(x)[0])
1502 1502 val = a * numpy.exp(-((x)*c*2*2*numpy.pi)**2 / (2))* numpy.exp(1.j*b*x*4*numpy.pi)
1503 1503 return val
1504 1504
1505 1505 def T(x,N):
1506 1506 T = 1-abs(x)/N
1507 1507 return T
1508 1508
1509 1509 def R_T_spc_fun(x, a, b, c, d, nFFTPoints):
1510 1510
1511 1511 N = int(numpy.shape(x)[0])
1512 1512
1513 1513 x_max = x[-1]
1514 1514
1515 1515 x_pos = x[int(nFFTPoints/2):]
1516 1516 x_neg = x[:int(nFFTPoints/2)]
1517 1517
1518 1518 R_T_neg_1 = R_gaussian(x, a, b, c)[:int(nFFTPoints/2)]*T(x_neg,-x[0])
1519 1519 R_T_pos_1 = R_gaussian(x, a, b, c)[int(nFFTPoints/2):]*T(x_pos,x[-1])
1520 1520 R_T_sum_1 = R_T_pos_1 + R_T_neg_1
1521 1521 R_T_spc_1 = numpy.fft.fft(R_T_sum_1).real
1522 1522 R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1)
1523 1523 max_val_1 = numpy.max(R_T_spc_1)
1524 1524 R_T_spc_1 = R_T_spc_1*a/max_val_1
1525 1525
1526 1526 R_T_d = d*numpy.fft.fftshift(signal.unit_impulse(N))
1527 1527 R_T_d_neg = R_T_d[:int(nFFTPoints/2)]*T(x_neg,-x[0])
1528 1528 R_T_d_pos = R_T_d[int(nFFTPoints/2):]*T(x_pos,x[-1])
1529 1529 R_T_d_sum = R_T_d_pos + R_T_d_neg
1530 1530 R_T_spc_3 = numpy.fft.fft(R_T_d_sum).real
1531 1531 R_T_spc_3 = numpy.fft.fftshift(R_T_spc_3)
1532 1532
1533 1533 R_T_final = R_T_spc_1 + R_T_spc_3
1534 1534
1535 1535 return R_T_final
1536 1536
1537 1537 y = spc#gaussian(x, a, meanY, sigmaY) + a*0.1*numpy.random.normal(0, 1, size=len(x))
1538 1538
1539 1539 from scipy.stats import norm
1540 1540 mean,std=norm.fit(spc)
1541 1541
1542 1542 # estimate starting values from the data
1543 1543 a = A
1544 1544 b = B
1545 1545 c = C#numpy.std(spc)
1546 1546 d = D
1547 1547 '''
1548 1548 ippSeconds = 250*20*1.e-6/3
1549 1549
1550 1550 x_t = ippSeconds * (numpy.arange(1600) -1600 / 2.)
1551 1551
1552 1552 x_t = numpy.linspace(x_t[0],x_t[-1],3200)
1553 1553
1554 1554 x_freq = numpy.fft.fftfreq(1600,d=ippSeconds)
1555 1555 x_freq = numpy.fft.fftshift(x_freq)
1556 1556 '''
1557 1557 # define a least squares function to optimize
1558 1558 def minfunc(params):
1559 1559 return sum((y-R_T_spc_fun(x,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2/1)#y**2)
1560 1560
1561 1561 # fit
1562 1562
1563 1563 popt_full = fmin(minfunc,[a,b,c,d],full_output=True)
1564 1564 #print("nIter", popt_full[2])
1565 1565 popt = popt_full[0]
1566 1566
1567 1567 #return R_T_spc_fun(x_t,popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
1568 1568 return popt[0], popt[1], popt[2], popt[3]
1569 1569
1570 1570 def run(self, dataOut, mode = 0, Hmin1 = None, Hmax1 = None, Hmin2 = None, Hmax2 = None, Dop = 'Shift'):
1571 1571
1572 1572 pwcode = 1
1573 1573
1574 1574 if dataOut.flagDecodeData:
1575 1575 pwcode = numpy.sum(dataOut.code[0]**2)
1576 1576 #normFactor = min(self.nFFTPoints,self.nProfiles)*self.nIncohInt*self.nCohInt*pwcode*self.windowOfFilter
1577 1577 normFactor = dataOut.nProfiles * dataOut.nIncohInt * dataOut.nCohInt * pwcode * dataOut.windowOfFilter
1578 1578 factor = normFactor
1579 1579 z = dataOut.data_spc / factor
1580 1580 z = numpy.where(numpy.isfinite(z), z, numpy.NAN)
1581 1581 dataOut.power = numpy.average(z, axis=1)
1582 1582 dataOut.powerdB = 10 * numpy.log10(dataOut.power)
1583 1583
1584 1584 x = dataOut.getVelRange(0)
1585 1585
1586 1586 dataOut.Oblique_params = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
1587 1587 dataOut.Oblique_param_errors = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
1588 1588 dataOut.dplr_2_u = numpy.ones((1,1,dataOut.nHeights))*numpy.NAN
1589 1589
1590 1590 if mode == 6:
1591 1591 dataOut.Oblique_params = numpy.ones((1,9,dataOut.nHeights))*numpy.NAN
1592 1592 elif mode == 7:
1593 1593 dataOut.Oblique_params = numpy.ones((1,13,dataOut.nHeights))*numpy.NAN
1594 1594 elif mode == 8:
1595 1595 dataOut.Oblique_params = numpy.ones((1,10,dataOut.nHeights))*numpy.NAN
1596 1596 elif mode == 9:
1597 1597 dataOut.Oblique_params = numpy.ones((1,11,dataOut.nHeights))*numpy.NAN
1598 1598 dataOut.Oblique_param_errors = numpy.ones((1,9,dataOut.nHeights))*numpy.NAN
1599 1599 elif mode == 11:
1600 1600 dataOut.Oblique_params = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
1601 1601 dataOut.Oblique_param_errors = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
1602 1602 elif mode == 10: #150 km
1603 1603 dataOut.Oblique_params = numpy.ones((1,4,dataOut.nHeights))*numpy.NAN
1604 1604 dataOut.Oblique_param_errors = numpy.ones((1,4,dataOut.nHeights))*numpy.NAN
1605 1605 dataOut.snr_log10 = numpy.ones((1,dataOut.nHeights))*numpy.NAN
1606 1606
1607 1607 dataOut.VelRange = x
1608 1608
1609 1609
1610 1610
1611 1611 #l1=range(22,36) #+62
1612 1612 #l1=range(32,36)
1613 1613 #l2=range(58,99) #+62
1614 1614
1615 1615 #if Hmin1 == None or Hmax1 == None or Hmin2 == None or Hmax2 == None:
1616 1616
1617 1617 minHei1 = 105.
1618 1618 maxHei1 = 122.5
1619 1619 maxHei1 = 130.5
1620 1620
1621 1621 if mode == 10: #150 km
1622 1622 minHei1 = 100
1623 1623 maxHei1 = 100
1624 1624
1625 1625 inda1 = numpy.where(dataOut.heightList >= minHei1)
1626 1626 indb1 = numpy.where(dataOut.heightList <= maxHei1)
1627 1627
1628 1628 minIndex1 = inda1[0][0]
1629 1629 maxIndex1 = indb1[0][-1]
1630 1630
1631 1631 minHei2 = 150.
1632 1632 maxHei2 = 201.25
1633 1633 maxHei2 = 225.3
1634 1634
1635 1635 if mode == 10: #150 km
1636 1636 minHei2 = 110
1637 1637 maxHei2 = 165
1638 1638
1639 1639 inda2 = numpy.where(dataOut.heightList >= minHei2)
1640 1640 indb2 = numpy.where(dataOut.heightList <= maxHei2)
1641 1641
1642 1642 minIndex2 = inda2[0][0]
1643 1643 maxIndex2 = indb2[0][-1]
1644 1644
1645 1645 l1=range(minIndex1,maxIndex1)
1646 1646 l2=range(minIndex2,maxIndex2)
1647 1647
1648 1648 if mode == 4:
1649 1649 '''
1650 1650 for ind in range(dataOut.nHeights):
1651 1651 if(dataOut.heightList[ind]>=168 and dataOut.heightList[ind]<188):
1652 1652 try:
1653 1653 dataOut.Oblique_params[0,0,ind],dataOut.Oblique_params[0,1,ind],dataOut.Oblique_params[0,2,ind],dataOut.Oblique_params[0,3,ind],dataOut.Oblique_params[0,4,ind],dataOut.Oblique_params[0,5,ind],dataOut.Oblique_params[0,6,ind],dataOut.Oblique_param_errors[0,0,ind],dataOut.Oblique_param_errors[0,1,ind],dataOut.Oblique_param_errors[0,2,ind],dataOut.Oblique_param_errors[0,3,ind],dataOut.Oblique_param_errors[0,4,ind],dataOut.Oblique_param_errors[0,5,ind],dataOut.Oblique_param_errors[0,6,ind] = self.DH_mode(dataOut.data_spc[0,:,ind],dataOut.VelRange)
1654 1654 except:
1655 1655 pass
1656 1656 '''
1657 1657 for ind in itertools.chain(l1, l2):
1658 1658
1659 1659 try:
1660 1660 dataOut.Oblique_params[0,0,ind],dataOut.Oblique_params[0,1,ind],dataOut.Oblique_params[0,2,ind],dataOut.Oblique_params[0,3,ind],dataOut.Oblique_params[0,4,ind],dataOut.Oblique_params[0,5,ind],dataOut.Oblique_params[0,6,ind],dataOut.Oblique_param_errors[0,0,ind],dataOut.Oblique_param_errors[0,1,ind],dataOut.Oblique_param_errors[0,2,ind],dataOut.Oblique_param_errors[0,3,ind],dataOut.Oblique_param_errors[0,4,ind],dataOut.Oblique_param_errors[0,5,ind],dataOut.Oblique_param_errors[0,6,ind] = self.DH_mode(dataOut.data_spc[0,:,ind],dataOut.VelRange)
1661 1661 dataOut.dplr_2_u[0,0,ind] = dataOut.Oblique_params[0,4,ind]/numpy.sin(numpy.arccos(102/dataOut.heightList[ind]))
1662 1662 except:
1663 1663 pass
1664 1664
1665 1665 else:
1666 1666 #print("After: ", dataOut.data_snr[0])
1667 1667 #######import matplotlib.pyplot as plt
1668 1668 #######plt.plot(dataOut.data_snr[0],dataOut.heightList,marker='*',linestyle='--')
1669 1669 #######plt.show()
1670 1670 #print("l1: ", dataOut.heightList[l1])
1671 1671 #print("l2: ", dataOut.heightList[l2])
1672 1672 for hei in itertools.chain(l1, l2):
1673 1673 #for hei in range(79,81):
1674 1674 #if numpy.isnan(dataOut.data_snr[0,hei]) or numpy.isnan(numpy.log10(dataOut.data_snr[0,hei])):
1675 1675 if numpy.isnan(dataOut.snl[0,hei]):# or dataOut.snl[0,hei]<.0:
1676 1676
1677 1677 continue #Avoids the analysis when there is only noise
1678 1678
1679 1679 try:
1680 1680 spc = dataOut.data_spc[0,:,hei]
1681 1681
1682 1682 if mode == 6: #Skew Weighted Bounded
1683 1683 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei] = self.Double_Gauss_Skew_fit_weight_bound_no_inputs(spc,x)
1684 1684 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,8,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1685 1685
1686 1686 elif mode == 7: #Triple Skew Weighted Bounded
1687 1687 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei],dataOut.Oblique_params[0,10,hei],dataOut.Oblique_params[0,11,hei],dataOut.Oblique_params[0,12,hei] = self.Triple_Gauss_Skew_fit_weight_bound_no_inputs(spc,x)
1688 1688 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,12,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1689 1689
1690 1690 elif mode == 8: #Double Skewed Weighted Bounded with inputs
1691 1691 a1, b1, c1, a2, b2, c2, k2, d, dopp = self.Double_Gauss_Skew_fit_weight_bound_no_inputs(spc,x)
1692 1692 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei] = self.Double_Gauss_Skew_fit_weight_bound_no_inputs(spc,x, a1, b1, c1, a2, b2, c2, k2, d)
1693 1693 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,9,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1694 1694
1695 1695 elif mode == 9: #Double Skewed Weighted Bounded no inputs
1696 1696 #if numpy.max(spc) <= 0:
1697 1697 from scipy.signal import medfilt
1698 1698 spcm = medfilt(spc,11)
1699 1699 if x[numpy.argmax(spcm)] <= 0:
1700 1700 #print("EEJ", dataOut.heightList[hei], hei)
1701 1701 #if hei != 70:
1702 1702 #continue
1703 1703 #else:
1704 1704 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei],dataOut.Oblique_params[0,10,hei],dataOut.Oblique_param_errors[0,:,hei] = self.Double_Gauss_Double_Skew_fit_weight_bound_no_inputs(spcm,x,dataOut.nIncohInt,dataOut.heightList[hei])
1705 1705 #if dataOut.Oblique_params[0,-2,hei] < -500 or dataOut.Oblique_params[0,-2,hei] > 500 or dataOut.Oblique_params[0,-1,hei] < -500 or dataOut.Oblique_params[0,-1,hei] > 500:
1706 1706 # dataOut.Oblique_params[0,:,hei] *= numpy.NAN
1707 1707 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,10,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1708 1708
1709 1709 else:
1710 1710 #print("CEEJ")
1711 1711 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei],dataOut.Oblique_params[0,10,hei],dataOut.Oblique_param_errors[0,:,hei] = self.CEEJ_Skew_fit_weight_bound_no_inputs(spcm,x,dataOut.nIncohInt)
1712 1712 #if dataOut.Oblique_params[0,-2,hei] < -500 or dataOut.Oblique_params[0,-2,hei] > 500 or dataOut.Oblique_params[0,-1,hei] < -500 or dataOut.Oblique_params[0,-1,hei] > 500:
1713 1713 # dataOut.Oblique_params[0,:,hei] *= numpy.NAN
1714 1714 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,10,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1715 1715 elif mode == 11: #Double Weighted Bounded no inputs
1716 1716 #if numpy.max(spc) <= 0:
1717 1717 from scipy.signal import medfilt
1718 1718 spcm = medfilt(spc,11)
1719 1719
1720 1720 if x[numpy.argmax(spcm)] <= 0:
1721 1721 #print("EEJ")
1722 1722 #print("EEJ",dataOut.heightList[hei])
1723 1723 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_param_errors[0,:,hei] = self.Double_Gauss_fit_weight_bound_no_inputs(spc,x,dataOut.nIncohInt)
1724 1724 #if dataOut.Oblique_params[0,-2,hei] < -500 or dataOut.Oblique_params[0,-2,hei] > 500 or dataOut.Oblique_params[0,-1,hei] < -500 or dataOut.Oblique_params[0,-1,hei] > 500:
1725 1725 # dataOut.Oblique_params[0,:,hei] *= numpy.NAN
1726 1726 else:
1727 1727 #print("CEEJ",dataOut.heightList[hei])
1728 1728 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_param_errors[0,:,hei] = self.CEEJ_fit_weight_bound_no_inputs(spc,x,dataOut.nIncohInt)
1729 1729
1730 1730 elif mode == 10: #150km
1731 1731 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_param_errors[0,:,hei] = self.Simple_fit_bound(spc,x,dataOut.nIncohInt)
1732 1732 snr = (dataOut.power[0,hei]*factor - dataOut.Oblique_params[0,3,hei])/dataOut.Oblique_params[0,3,hei]
1733 1733 dataOut.snr_log10[0,hei] = numpy.log10(snr)
1734 1734
1735 1735 else:
1736 1736 spc_fit, A1, B1, C1, D1 = self.Gauss_fit_2(spc,x,'first')
1737 1737
1738 1738 spc_diff = spc - spc_fit
1739 1739 spc_diff[spc_diff < 0] = 0
1740 1740
1741 1741 spc_fit_diff, A2, B2, C2, D2 = self.Gauss_fit_2(spc_diff,x,'second')
1742 1742
1743 1743 D = (D1+D2)
1744 1744
1745 1745 if mode == 0: #Double Fit
1746 1746 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_param_errors[0,0,hei],dataOut.Oblique_param_errors[0,1,hei],dataOut.Oblique_param_errors[0,2,hei],dataOut.Oblique_param_errors[0,3,hei],dataOut.Oblique_param_errors[0,4,hei],dataOut.Oblique_param_errors[0,5,hei],dataOut.Oblique_param_errors[0,6,hei] = self.Double_Gauss_fit_2(spc,x,A1,B1,C1,A2,B2,C2,D)
1747 1747 #spc_double_fit,dataOut.Oblique_params = self.Double_Gauss_fit(spc,x,A1,B1,C1,A2,B2,C2,D)
1748 1748
1749 1749 elif mode == 1: #Double Fit Windowed
1750 1750 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei] = self.windowing_double(spc,dataOut.getFreqRange(0),A1,B1,C1,A2,B2,C2,D)
1751 1751
1752 1752 elif mode == 2: #Double Fit Weight
1753 1753 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei] = self.Double_Gauss_fit_weight(spc,x,A1,B1,C1,A2,B2,C2,D)
1754 1754
1755 1755 elif mode == 3: #Simple Fit
1756 1756 dataOut.Oblique_params[0,0,hei] = A1
1757 1757 dataOut.Oblique_params[0,1,hei] = B1
1758 1758 dataOut.Oblique_params[0,2,hei] = C1
1759 1759 dataOut.Oblique_params[0,3,hei] = A2
1760 1760 dataOut.Oblique_params[0,4,hei] = B2
1761 1761 dataOut.Oblique_params[0,5,hei] = C2
1762 1762 dataOut.Oblique_params[0,6,hei] = D
1763 1763
1764 1764 elif mode == 5: #Triple Fit Weight
1765 1765 if hei in l1:
1766 1766 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei] = self.duo_Marco(spc,x,A1,B1,C1,A2,B2,C2,D)
1767 1767 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,4,hei]/numpy.sin(numpy.arccos(102/dataOut.heightList[hei]))
1768 1768 #print(dataOut.Oblique_params[0,0,hei])
1769 1769 #print(dataOut.dplr_2_u[0,0,hei])
1770 1770 else:
1771 1771 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei] = self.Double_Gauss_fit_weight(spc,x,A1,B1,C1,A2,B2,C2,D)
1772 1772 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,4,hei]/numpy.sin(numpy.arccos(102/dataOut.heightList[hei]))
1773 1773
1774 1774
1775 1775 except:
1776 1776 ###dataOut.Oblique_params[0,:,hei] = dataOut.Oblique_params[0,:,hei]*numpy.NAN
1777 1777 pass
1778 1778
1779 1779 #exit(1)
1780 1780 dataOut.paramInterval = dataOut.nProfiles*dataOut.nCohInt*dataOut.ippSeconds
1781 1781 dataOut.lat=-11.95
1782 1782 dataOut.lon=-76.87
1783 1783 '''
1784 1784 dataOut.Oblique_params = numpy.where(dataOut.Oblique_params<-700, numpy.nan, dop_t1)
1785 1785 dataOut.Oblique_params = numpy.where(dataOut.Oblique_params<+700, numpy.nan, dop_t1)
1786 1786 AquΓ­ debo exceptuar las amplitudes
1787 1787 '''
1788 1788 if mode == 9: #Double Skew Gaussian
1789 1789 #dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,-2,:] #Pos[Max_value]
1790 1790 #dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,1,:] #Shift
1791 1791 dataOut.Spec_W_T1 = dataOut.Oblique_params[:,2,:]
1792 1792 #dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,-1,:] #Pos[Max_value]
1793 1793 #dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,5,:] #Shift
1794 1794 dataOut.Spec_W_T2 = dataOut.Oblique_params[:,6,:]
1795 1795 if Dop == 'Shift':
1796 1796 dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,1,:] #Shift
1797 1797 dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,5,:] #Shift
1798 1798 elif Dop == 'Max':
1799 1799 dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,-2,:] #Pos[Max_value]
1800 1800 dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,-1,:] #Pos[Max_value]
1801 1801
1802 1802 dataOut.Err_Dop_EEJ_T1 = dataOut.Oblique_param_errors[:,1,:] #En realidad este es el error?
1803 1803 dataOut.Err_Spec_W_T1 = dataOut.Oblique_param_errors[:,2,:]
1804 1804 dataOut.Err_Dop_EEJ_T2 = dataOut.Oblique_param_errors[:,5,:] #En realidad este es el error?
1805 1805 dataOut.Err_Spec_W_T2 = dataOut.Oblique_param_errors[:,6,:]
1806 1806
1807 1807 elif mode == 11: #Double Gaussian
1808 1808 dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,1,:]
1809 1809 dataOut.Spec_W_T1 = dataOut.Oblique_params[:,2,:]
1810 1810 dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,4,:]
1811 1811 dataOut.Spec_W_T2 = dataOut.Oblique_params[:,5,:]
1812 1812
1813 1813 dataOut.Err_Dop_EEJ_T1 = dataOut.Oblique_param_errors[:,1,:]
1814 1814 dataOut.Err_Spec_W_T1 = dataOut.Oblique_param_errors[:,2,:]
1815 1815 dataOut.Err_Dop_EEJ_T2 = dataOut.Oblique_param_errors[:,4,:]
1816 1816 dataOut.Err_Spec_W_T2 = dataOut.Oblique_param_errors[:,5,:]
1817 1817
1818 1818 #print("Before: ", dataOut.Dop_EEJ_T2)
1819 1819 dataOut.Spec_W_T1 = self.clean_outliers(dataOut.Spec_W_T1)
1820 1820 dataOut.Spec_W_T2 = self.clean_outliers(dataOut.Spec_W_T2)
1821 1821 dataOut.Dop_EEJ_T1 = self.clean_outliers(dataOut.Dop_EEJ_T1)
1822 1822 dataOut.Dop_EEJ_T2 = self.clean_outliers(dataOut.Dop_EEJ_T2)
1823 1823 #print("After: ", dataOut.Dop_EEJ_T2)
1824 1824 dataOut.Err_Spec_W_T1 = self.clean_outliers(dataOut.Err_Spec_W_T1)
1825 1825 dataOut.Err_Spec_W_T2 = self.clean_outliers(dataOut.Err_Spec_W_T2)
1826 1826 dataOut.Err_Dop_EEJ_T1 = self.clean_outliers(dataOut.Err_Dop_EEJ_T1)
1827 1827 dataOut.Err_Dop_EEJ_T2 = self.clean_outliers(dataOut.Err_Dop_EEJ_T2)
1828 1828 #print("Before data_snr: ", dataOut.data_snr)
1829 1829 #dataOut.data_snr = numpy.where(numpy.isnan(dataOut.Dop_EEJ_T1), numpy.nan, dataOut.data_snr)
1830 1830 dataOut.snl = numpy.where(numpy.isnan(dataOut.Dop_EEJ_T1), numpy.nan, dataOut.snl)
1831 1831
1832 1832 #print("After data_snr: ", dataOut.data_snr)
1833 1833 dataOut.mode = mode
1834 1834 dataOut.flagNoData = numpy.all(numpy.isnan(dataOut.Dop_EEJ_T1)) #Si todos los valores son NaN no se prosigue
1835 1835 ###dataOut.flagNoData = False #Descomentar solo para ploteo sino mantener comentado (para guardado)
1836 1836
1837 1837 return dataOut
1838 1838
1839 1839 class Gaussian_Windowed(Operation):
1840 1840 '''
1841 1841 Written by R. Flores
1842 1842 '''
1843 1843 def __init__(self):
1844 1844 Operation.__init__(self)
1845 1845
1846 1846 def windowing_single(self,spc,x,A,B,C,D,nFFTPoints):
1847 1847 from scipy.optimize import curve_fit,fmin
1848 1848
1849 1849 def gaussian(x, a, b, c, d):
1850 1850 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
1851 1851 return val
1852 1852
1853 1853 def R_gaussian(x, a, b, c):
1854 1854 N = int(numpy.shape(x)[0])
1855 1855 val = a * numpy.exp(-((x)*c*2*2*numpy.pi)**2 / (2))* numpy.exp(1.j*b*x*4*numpy.pi)
1856 1856 return val
1857 1857
1858 1858 def T(x,N):
1859 1859 T = 1-abs(x)/N
1860 1860 return T
1861 1861
1862 1862 def R_T_spc_fun(x, a, b, c, d, nFFTPoints):
1863 1863
1864 1864 N = int(numpy.shape(x)[0])
1865 1865
1866 1866 x_max = x[-1]
1867 1867
1868 1868 x_pos = x[nFFTPoints:]
1869 1869 x_neg = x[:nFFTPoints]
1870 1870 #print([int(nFFTPoints/2))
1871 1871 #print("x: ", x)
1872 1872 #print("x_neg: ", x_neg)
1873 1873 #print("x_pos: ", x_pos)
1874 1874
1875 1875
1876 1876 R_T_neg_1 = R_gaussian(x, a, b, c)[:nFFTPoints]*T(x_neg,-x[0])
1877 1877 R_T_pos_1 = R_gaussian(x, a, b, c)[nFFTPoints:]*T(x_pos,x[-1])
1878 1878 #print(T(x_pos,x[-1]),x_pos,x[-1])
1879 1879 #print(R_T_neg_1.shape,R_T_pos_1.shape)
1880 1880 R_T_sum_1 = R_T_pos_1 + R_T_neg_1
1881 1881 R_T_spc_1 = numpy.fft.fft(R_T_sum_1).real
1882 1882 R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1)
1883 1883 max_val_1 = numpy.max(R_T_spc_1)
1884 1884 R_T_spc_1 = R_T_spc_1*a/max_val_1
1885 1885
1886 1886 R_T_d = d*numpy.fft.fftshift(signal.unit_impulse(N))
1887 1887 R_T_d_neg = R_T_d[:nFFTPoints]*T(x_neg,-x[0])
1888 1888 R_T_d_pos = R_T_d[nFFTPoints:]*T(x_pos,x[-1])
1889 1889 R_T_d_sum = R_T_d_pos + R_T_d_neg
1890 1890 R_T_spc_3 = numpy.fft.fft(R_T_d_sum).real
1891 1891 R_T_spc_3 = numpy.fft.fftshift(R_T_spc_3)
1892 1892
1893 1893 R_T_final = R_T_spc_1 + R_T_spc_3
1894 1894
1895 1895 return R_T_final
1896 1896
1897 1897 y = spc#gaussian(x, a, meanY, sigmaY) + a*0.1*numpy.random.normal(0, 1, size=len(x))
1898 1898
1899 1899 from scipy.stats import norm
1900 1900 mean,std=norm.fit(spc)
1901 1901
1902 1902 # estimate starting values from the data
1903 1903 a = A
1904 1904 b = B
1905 1905 c = C#numpy.std(spc)
1906 1906 d = D
1907 1907 #'''
1908 1908 #ippSeconds = 250*20*1.e-6/3
1909 1909
1910 1910 #x_t = ippSeconds * (numpy.arange(nFFTPoints) - nFFTPoints / 2.)
1911 1911
1912 1912 #x_t = numpy.linspace(x_t[0],x_t[-1],3200)
1913 1913 #print("x_t: ", x_t)
1914 1914 #print("nFFTPoints: ", nFFTPoints)
1915 1915 x_vel = numpy.linspace(x[0],x[-1],int(2*nFFTPoints))
1916 1916 #print("x_vel: ", x_vel)
1917 1917 #x_freq = numpy.fft.fftfreq(1600,d=ippSeconds)
1918 1918 #x_freq = numpy.fft.fftshift(x_freq)
1919 1919 #'''
1920 1920 # define a least squares function to optimize
1921 1921 def minfunc(params):
1922 1922 #print("y.shape: ", numpy.shape(y))
1923 1923 return sum((y-R_T_spc_fun(x_vel,params[0],params[1],params[2],params[3],nFFTPoints))**2/1)#y**2)
1924 1924
1925 1925 # fit
1926 1926
1927 1927 popt_full = fmin(minfunc,[a,b,c,d], disp=False)
1928 1928 #print("nIter", popt_full[2])
1929 1929 popt = popt_full#[0]
1930 1930
1931 1931 fun = gaussian(x, popt[0], popt[1], popt[2], popt[3])
1932 1932
1933 1933 #return R_T_spc_fun(x_t,popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
1934 1934 return fun, popt[0], popt[1], popt[2], popt[3]
1935 1935
1936 1936 def run(self, dataOut):
1937 1937
1938 1938 from scipy.signal import medfilt
1939 1939 import matplotlib.pyplot as plt
1940 1940 dataOut.moments = numpy.ones((dataOut.nChannels,4,dataOut.nHeights))*numpy.NAN
1941 1941 dataOut.VelRange = dataOut.getVelRange(0)
1942 1942 for nChannel in range(dataOut.nChannels):
1943 1943 for hei in range(dataOut.heightList.shape[0]):
1944 1944 #print("ipp: ", dataOut.ippSeconds)
1945 1945 spc = numpy.copy(dataOut.data_spc[nChannel,:,hei])
1946 1946
1947 1947 #print(VelRange)
1948 1948 #print(dataOut.getFreqRange(64))
1949 1949 spcm = medfilt(spc,11)
1950 1950 spc_max = numpy.max(spcm)
1951 1951 dop1_x0 = dataOut.VelRange[numpy.argmax(spcm)]
1952 1952 D = numpy.min(spcm)
1953 1953
1954 1954 fun, A, B, C, D = self.windowing_single(spc,dataOut.VelRange,spc_max,dop1_x0,abs(dop1_x0),D,dataOut.nFFTPoints)
1955 1955 dataOut.moments[nChannel,0,hei] = A
1956 1956 dataOut.moments[nChannel,1,hei] = B
1957 1957 dataOut.moments[nChannel,2,hei] = C
1958 1958 dataOut.moments[nChannel,3,hei] = D
1959 1959 '''
1960 1960 plt.figure()
1961 1961 plt.plot(VelRange,spc,marker='*',linestyle='')
1962 1962 plt.plot(VelRange,fun)
1963 1963 plt.title(dataOut.heightList[hei])
1964 1964 plt.show()
1965 1965 '''
1966 1966
1967 1967 return dataOut
1968 1968
1969 1969 class PrecipitationProc(Operation):
1970 1970
1971 1971 '''
1972 1972 Operator that estimates Reflectivity factor (Z), and estimates rainfall Rate (R)
1973 1973
1974 1974 Input:
1975 1975 self.dataOut.data_pre : SelfSpectra
1976 1976
1977 1977 Output:
1978 1978
1979 1979 self.dataOut.data_output : Reflectivity factor, rainfall Rate
1980 1980
1981 1981
1982 1982 Parameters affected:
1983 1983 '''
1984 1984
1985 1985 def __init__(self):
1986 1986 Operation.__init__(self)
1987 1987 self.i=0
1988 1988
1989 1989 def run(self, dataOut, radar=None, Pt=5000, Gt=295.1209, Gr=70.7945, Lambda=0.6741, aL=2.5118,
1990 1990 tauW=4e-06, ThetaT=0.1656317, ThetaR=0.36774087, Km2 = 0.93, Altitude=3350, SNRdBlimit=-30,
1991 1991 channel=None):
1992 1992
1993 1993 # print ('Entering PrecepitationProc ... ')
1994 1994
1995 1995 if radar == "MIRA35C" :
1996 1996
1997 1997 self.spc = dataOut.data_pre[0].copy()
1998 1998 self.Num_Hei = self.spc.shape[2]
1999 1999 self.Num_Bin = self.spc.shape[1]
2000 2000 self.Num_Chn = self.spc.shape[0]
2001 2001 Ze = self.dBZeMODE2(dataOut)
2002 2002
2003 2003 else:
2004 2004
2005 2005 self.spc = dataOut.data_pre[0].copy()
2006 2006
2007 2007 #NOTA SE DEBE REMOVER EL RANGO DEL PULSO TX
2008 2008 self.spc[:,:,0:7]= numpy.NaN
2009 2009
2010 2010 self.Num_Hei = self.spc.shape[2]
2011 2011 self.Num_Bin = self.spc.shape[1]
2012 2012 self.Num_Chn = self.spc.shape[0]
2013 2013
2014 2014 VelRange = dataOut.spc_range[2]
2015 2015
2016 2016 ''' Se obtiene la constante del RADAR '''
2017 2017
2018 2018 self.Pt = Pt
2019 2019 self.Gt = Gt
2020 2020 self.Gr = Gr
2021 2021 self.Lambda = Lambda
2022 2022 self.aL = aL
2023 2023 self.tauW = tauW
2024 2024 self.ThetaT = ThetaT
2025 2025 self.ThetaR = ThetaR
2026 2026 self.GSys = 10**(36.63/10) # Ganancia de los LNA 36.63 dB
2027 2027 self.lt = 10**(1.67/10) # Perdida en cables Tx 1.67 dB
2028 2028 self.lr = 10**(5.73/10) # Perdida en cables Rx 5.73 dB
2029 2029
2030 2030 Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
2031 2031 Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * tauW * numpy.pi * ThetaT * ThetaR)
2032 2032 RadarConstant = 10e-26 * Numerator / Denominator #
2033 2033 ExpConstant = 10**(40/10) #Constante Experimental
2034 2034
2035 2035 SignalPower = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
2036 2036 for i in range(self.Num_Chn):
2037 2037 SignalPower[i,:,:] = self.spc[i,:,:] - dataOut.noise[i]
2038 2038 SignalPower[numpy.where(SignalPower < 0)] = 1e-20
2039 2039
2040 2040 if channel is None:
2041 2041 SPCmean = numpy.mean(SignalPower, 0)
2042 2042 else:
2043 2043 SPCmean = SignalPower[channel]
2044 2044 Pr = SPCmean[:,:]/dataOut.normFactor
2045 2045
2046 2046 # Declaring auxiliary variables
2047 2047 Range = dataOut.heightList*1000. #Range in m
2048 2048 # replicate the heightlist to obtain a matrix [Num_Bin,Num_Hei]
2049 2049 rMtrx = numpy.transpose(numpy.transpose([dataOut.heightList*1000.] * self.Num_Bin))
2050 2050 zMtrx = rMtrx+Altitude
2051 2051 # replicate the VelRange to obtain a matrix [Num_Bin,Num_Hei]
2052 2052 VelMtrx = numpy.transpose(numpy.tile(VelRange[:-1], (self.Num_Hei,1)))
2053 2053
2054 2054 # height dependence to air density Foote and Du Toit (1969)
2055 2055 delv_z = 1 + 3.68e-5 * zMtrx + 1.71e-9 * zMtrx**2
2056 2056 VMtrx = VelMtrx / delv_z #Normalized velocity
2057 2057 VMtrx[numpy.where(VMtrx> 9.6)] = numpy.NaN
2058 2058 # Diameter is related to the fall speed of falling drops
2059 2059 D_Vz = -1.667 * numpy.log( 0.9369 - 0.097087 * VMtrx ) # D in [mm]
2060 2060 # Only valid for D>= 0.16 mm
2061 2061 D_Vz[numpy.where(D_Vz < 0.16)] = numpy.NaN
2062 2062
2063 2063 #Calculate Radar Reflectivity ETAn
2064 2064 ETAn = (RadarConstant *ExpConstant) * Pr * rMtrx**2 #Reflectivity (ETA)
2065 2065 ETAd = ETAn * 6.18 * exp( -0.6 * D_Vz ) * delv_z
2066 2066 # Radar Cross Section
2067 2067 sigmaD = Km2 * (D_Vz * 1e-3 )**6 * numpy.pi**5 / Lambda**4
2068 2068 # Drop Size Distribution
2069 2069 DSD = ETAn / sigmaD
2070 2070 # Equivalente Reflectivy
2071 2071 Ze_eqn = numpy.nansum( DSD * D_Vz**6 ,axis=0)
2072 2072 Ze_org = numpy.nansum(ETAn * Lambda**4, axis=0) / (1e-18*numpy.pi**5 * Km2) # [mm^6 /m^3]
2073 2073 # RainFall Rate
2074 2074 RR = 0.0006*numpy.pi * numpy.nansum( D_Vz**3 * DSD * VelMtrx ,0) #mm/hr
2075 2075
2076 2076 # Censoring the data
2077 2077 # Removing data with SNRth < 0dB se debe considerar el SNR por canal
2078 2078 SNRth = 10**(SNRdBlimit/10) #-30dB
2079 2079 novalid = numpy.where((dataOut.data_snr[0,:] <SNRth) | (dataOut.data_snr[1,:] <SNRth) | (dataOut.data_snr[2,:] <SNRth)) # AND condition. Maybe OR condition better
2080 2080 W = numpy.nanmean(dataOut.data_dop,0)
2081 2081 W[novalid] = numpy.NaN
2082 2082 Ze_org[novalid] = numpy.NaN
2083 2083 RR[novalid] = numpy.NaN
2084 2084
2085 2085 dataOut.data_output = RR[8]
2086 2086 dataOut.data_param = numpy.ones([3,self.Num_Hei])
2087 2087 dataOut.channelList = [0,1,2]
2088 2088
2089 2089 dataOut.data_param[0]=10*numpy.log10(Ze_org)
2090 2090 dataOut.data_param[1]=-W
2091 2091 dataOut.data_param[2]=RR
2092 2092
2093 2093 # print ('Leaving PrecepitationProc ... ')
2094 2094 return dataOut
2095 2095
2096 2096 def dBZeMODE2(self, dataOut): # Processing for MIRA35C
2097 2097
2098 2098 NPW = dataOut.NPW
2099 2099 COFA = dataOut.COFA
2100 2100
2101 2101 SNR = numpy.array([self.spc[0,:,:] / NPW[0]]) #, self.spc[1,:,:] / NPW[1]])
2102 2102 RadarConst = dataOut.RadarConst
2103 2103 #frequency = 34.85*10**9
2104 2104
2105 2105 ETA = numpy.zeros(([self.Num_Chn ,self.Num_Hei]))
2106 2106 data_output = numpy.ones([self.Num_Chn , self.Num_Hei])*numpy.NaN
2107 2107
2108 2108 ETA = numpy.sum(SNR,1)
2109 2109
2110 2110 ETA = numpy.where(ETA != 0. , ETA, numpy.NaN)
2111 2111
2112 2112 Ze = numpy.ones([self.Num_Chn, self.Num_Hei] )
2113 2113
2114 2114 for r in range(self.Num_Hei):
2115 2115
2116 2116 Ze[0,r] = ( ETA[0,r] ) * COFA[0,r][0] * RadarConst * ((r/5000.)**2)
2117 2117 #Ze[1,r] = ( ETA[1,r] ) * COFA[1,r][0] * RadarConst * ((r/5000.)**2)
2118 2118
2119 2119 return Ze
2120 2120
2121 2121 # def GetRadarConstant(self):
2122 2122 #
2123 2123 # """
2124 2124 # Constants:
2125 2125 #
2126 2126 # Pt: Transmission Power dB 5kW 5000
2127 2127 # Gt: Transmission Gain dB 24.7 dB 295.1209
2128 2128 # Gr: Reception Gain dB 18.5 dB 70.7945
2129 2129 # Lambda: Wavelenght m 0.6741 m 0.6741
2130 2130 # aL: Attenuation loses dB 4dB 2.5118
2131 2131 # tauW: Width of transmission pulse s 4us 4e-6
2132 2132 # ThetaT: Transmission antenna bean angle rad 0.1656317 rad 0.1656317
2133 2133 # ThetaR: Reception antenna beam angle rad 0.36774087 rad 0.36774087
2134 2134 #
2135 2135 # """
2136 2136 #
2137 2137 # Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
2138 2138 # Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * TauW * numpy.pi * ThetaT * TheraR)
2139 2139 # RadarConstant = Numerator / Denominator
2140 2140 #
2141 2141 # return RadarConstant
2142 2142
2143 2143
2144 2144 class FullSpectralAnalysis(Operation):
2145 2145
2146 2146 """
2147 2147 Function that implements Full Spectral Analysis technique.
2148 2148
2149 2149 Input:
2150 2150 self.dataOut.data_pre : SelfSpectra and CrossSpectra data
2151 2151 self.dataOut.groupList : Pairlist of channels
2152 2152 self.dataOut.ChanDist : Physical distance between receivers
2153 2153
2154 2154
2155 2155 Output:
2156 2156
2157 2157 self.dataOut.data_output : Zonal wind, Meridional wind, and Vertical wind
2158 2158
2159 2159
2160 2160 Parameters affected: Winds, height range, SNR
2161 2161
2162 2162 """
2163 2163 def run(self, dataOut, Xi01=None, Xi02=None, Xi12=None, Eta01=None, Eta02=None, Eta12=None, SNRdBlimit=-30,
2164 2164 minheight=None, maxheight=None, NegativeLimit=None, PositiveLimit=None):
2165 2165
2166 2166 spc = dataOut.data_pre[0].copy()
2167 2167 cspc = dataOut.data_pre[1]
2168 2168 nHeights = spc.shape[2]
2169 2169
2170 2170 # first_height = 0.75 #km (ref: data header 20170822)
2171 2171 # resolution_height = 0.075 #km
2172 2172 '''
2173 2173 finding height range. check this when radar parameters are changed!
2174 2174 '''
2175 2175 if maxheight is not None:
2176 2176 # range_max = math.ceil((maxheight - first_height) / resolution_height) # theoretical
2177 2177 range_max = math.ceil(13.26 * maxheight - 3) # empirical, works better
2178 2178 else:
2179 2179 range_max = nHeights
2180 2180 if minheight is not None:
2181 2181 # range_min = int((minheight - first_height) / resolution_height) # theoretical
2182 2182 range_min = int(13.26 * minheight - 5) # empirical, works better
2183 2183 if range_min < 0:
2184 2184 range_min = 0
2185 2185 else:
2186 2186 range_min = 0
2187 2187
2188 2188 pairsList = dataOut.groupList
2189 2189 if dataOut.ChanDist is not None :
2190 2190 ChanDist = dataOut.ChanDist
2191 2191 else:
2192 2192 ChanDist = numpy.array([[Xi01, Eta01],[Xi02,Eta02],[Xi12,Eta12]])
2193 2193
2194 2194 # 4 variables: zonal, meridional, vertical, and average SNR
2195 2195 data_param = numpy.zeros([4,nHeights]) * numpy.NaN
2196 2196 velocityX = numpy.zeros([nHeights]) * numpy.NaN
2197 2197 velocityY = numpy.zeros([nHeights]) * numpy.NaN
2198 2198 velocityZ = numpy.zeros([nHeights]) * numpy.NaN
2199 2199
2200 2200 dbSNR = 10*numpy.log10(numpy.average(dataOut.data_snr,0))
2201 2201
2202 2202 '''***********************************************WIND ESTIMATION**************************************'''
2203 2203 for Height in range(nHeights):
2204 2204
2205 2205 if Height >= range_min and Height < range_max:
2206 2206 # error_code will be useful in future analysis
2207 2207 [Vzon,Vmer,Vver, error_code] = self.WindEstimation(spc[:,:,Height], cspc[:,:,Height], pairsList,
2208 2208 ChanDist, Height, dataOut.noise, dataOut.spc_range, dbSNR[Height], SNRdBlimit, NegativeLimit, PositiveLimit,dataOut.frequency)
2209 2209
2210 2210 if abs(Vzon) < 100. and abs(Vmer) < 100.:
2211 2211 velocityX[Height] = Vzon
2212 2212 velocityY[Height] = -Vmer
2213 2213 velocityZ[Height] = Vver
2214 2214
2215 2215 # Censoring data with SNR threshold
2216 2216 dbSNR [dbSNR < SNRdBlimit] = numpy.NaN
2217 2217
2218 2218 data_param[0] = velocityX
2219 2219 data_param[1] = velocityY
2220 2220 data_param[2] = velocityZ
2221 2221 data_param[3] = dbSNR
2222 2222 dataOut.data_param = data_param
2223 2223 return dataOut
2224 2224
2225 2225 def moving_average(self,x, N=2):
2226 2226 """ convolution for smoothenig data. note that last N-1 values are convolution with zeroes """
2227 2227 return numpy.convolve(x, numpy.ones((N,))/N)[(N-1):]
2228 2228
2229 2229 def gaus(self,xSamples,Amp,Mu,Sigma):
2230 2230 return Amp * numpy.exp(-0.5*((xSamples - Mu)/Sigma)**2)
2231 2231
2232 2232 def Moments(self, ySamples, xSamples):
2233 2233 Power = numpy.nanmean(ySamples) # Power, 0th Moment
2234 2234 yNorm = ySamples / numpy.nansum(ySamples)
2235 2235 RadVel = numpy.nansum(xSamples * yNorm) # Radial Velocity, 1st Moment
2236 2236 Sigma2 = numpy.nansum(yNorm * (xSamples - RadVel)**2) # Spectral Width, 2nd Moment
2237 2237 StdDev = numpy.sqrt(numpy.abs(Sigma2)) # Desv. Estandar, Ancho espectral
2238 2238 return numpy.array([Power,RadVel,StdDev])
2239 2239
2240 2240 def StopWindEstimation(self, error_code):
2241 2241 Vzon = numpy.NaN
2242 2242 Vmer = numpy.NaN
2243 2243 Vver = numpy.NaN
2244 2244 return Vzon, Vmer, Vver, error_code
2245 2245
2246 2246 def AntiAliasing(self, interval, maxstep):
2247 2247 """
2248 2248 function to prevent errors from aliased values when computing phaseslope
2249 2249 """
2250 2250 antialiased = numpy.zeros(len(interval))
2251 2251 copyinterval = interval.copy()
2252 2252
2253 2253 antialiased[0] = copyinterval[0]
2254 2254
2255 2255 for i in range(1,len(antialiased)):
2256 2256 step = interval[i] - interval[i-1]
2257 2257 if step > maxstep:
2258 2258 copyinterval -= 2*numpy.pi
2259 2259 antialiased[i] = copyinterval[i]
2260 2260 elif step < maxstep*(-1):
2261 2261 copyinterval += 2*numpy.pi
2262 2262 antialiased[i] = copyinterval[i]
2263 2263 else:
2264 2264 antialiased[i] = copyinterval[i].copy()
2265 2265
2266 2266 return antialiased
2267 2267
2268 2268 def WindEstimation(self, spc, cspc, pairsList, ChanDist, Height, noise, AbbsisaRange, dbSNR, SNRlimit, NegativeLimit, PositiveLimit, radfreq):
2269 2269 """
2270 2270 Function that Calculates Zonal, Meridional and Vertical wind velocities.
2271 2271 Initial Version by E. Bocanegra updated by J. Zibell until Nov. 2019.
2272 2272
2273 2273 Input:
2274 2274 spc, cspc : self spectra and cross spectra data. In Briggs notation something like S_i*(S_i)_conj, (S_j)_conj respectively.
2275 2275 pairsList : Pairlist of channels
2276 2276 ChanDist : array of xi_ij and eta_ij
2277 2277 Height : height at which data is processed
2278 2278 noise : noise in [channels] format for specific height
2279 2279 Abbsisarange : range of the frequencies or velocities
2280 2280 dbSNR, SNRlimit : signal to noise ratio in db, lower limit
2281 2281
2282 2282 Output:
2283 2283 Vzon, Vmer, Vver : wind velocities
2284 2284 error_code : int that states where code is terminated
2285 2285
2286 2286 0 : no error detected
2287 2287 1 : Gaussian of mean spc exceeds widthlimit
2288 2288 2 : no Gaussian of mean spc found
2289 2289 3 : SNR to low or velocity to high -> prec. e.g.
2290 2290 4 : at least one Gaussian of cspc exceeds widthlimit
2291 2291 5 : zero out of three cspc Gaussian fits converged
2292 2292 6 : phase slope fit could not be found
2293 2293 7 : arrays used to fit phase have different length
2294 2294 8 : frequency range is either too short (len <= 5) or very long (> 30% of cspc)
2295 2295
2296 2296 """
2297 2297
2298 2298 error_code = 0
2299 2299
2300 2300 nChan = spc.shape[0]
2301 2301 nProf = spc.shape[1]
2302 2302 nPair = cspc.shape[0]
2303 2303
2304 2304 SPC_Samples = numpy.zeros([nChan, nProf]) # for normalized spc values for one height
2305 2305 CSPC_Samples = numpy.zeros([nPair, nProf], dtype=numpy.complex_) # for normalized cspc values
2306 2306 phase = numpy.zeros([nPair, nProf]) # phase between channels
2307 2307 PhaseSlope = numpy.zeros(nPair) # slope of the phases, channelwise
2308 2308 PhaseInter = numpy.zeros(nPair) # intercept to the slope of the phases, channelwise
2309 2309 xFrec = AbbsisaRange[0][:-1] # frequency range
2310 2310 xVel = AbbsisaRange[2][:-1] # velocity range
2311 2311 xSamples = xFrec # the frequency range is taken
2312 2312 delta_x = xSamples[1] - xSamples[0] # delta_f or delta_x
2313 2313
2314 2314 # only consider velocities with in NegativeLimit and PositiveLimit
2315 2315 if (NegativeLimit is None):
2316 2316 NegativeLimit = numpy.min(xVel)
2317 2317 if (PositiveLimit is None):
2318 2318 PositiveLimit = numpy.max(xVel)
2319 2319 xvalid = numpy.where((xVel > NegativeLimit) & (xVel < PositiveLimit))
2320 2320 xSamples_zoom = xSamples[xvalid]
2321 2321
2322 2322 '''Getting Eij and Nij'''
2323 2323 Xi01, Xi02, Xi12 = ChanDist[:,0]
2324 2324 Eta01, Eta02, Eta12 = ChanDist[:,1]
2325 2325
2326 2326 # spwd limit - updated by D. ScipiΓ³n 30.03.2021
2327 2327 widthlimit = 10
2328 2328 '''************************* SPC is normalized ********************************'''
2329 2329 spc_norm = spc.copy()
2330 2330 # For each channel
2331 2331 for i in range(nChan):
2332 2332 spc_sub = spc_norm[i,:] - noise[i] # only the signal power
2333 2333 SPC_Samples[i] = spc_sub / (numpy.nansum(spc_sub) * delta_x)
2334 2334
2335 2335 '''********************** FITTING MEAN SPC GAUSSIAN **********************'''
2336 2336
2337 2337 """ the gaussian of the mean: first subtract noise, then normalize. this is legal because
2338 2338 you only fit the curve and don't need the absolute value of height for calculation,
2339 2339 only for estimation of width. for normalization of cross spectra, you need initial,
2340 2340 unnormalized self-spectra With noise.
2341 2341
2342 2342 Technically, you don't even need to normalize the self-spectra, as you only need the
2343 2343 width of the peak. However, it was left this way. Note that the normalization has a flaw:
2344 2344 due to subtraction of the noise, some values are below zero. Raw "spc" values should be
2345 2345 >= 0, as it is the modulus squared of the signals (complex * it's conjugate)
2346 2346 """
2347 2347 # initial conditions
2348 2348 popt = [1e-10,0,1e-10]
2349 2349 # Spectra average
2350 2350 SPCMean = numpy.average(SPC_Samples,0)
2351 2351 # Moments in frequency
2352 2352 SPCMoments = self.Moments(SPCMean[xvalid], xSamples_zoom)
2353 2353
2354 2354 # Gauss Fit SPC in frequency domain
2355 2355 if dbSNR > SNRlimit: # only if SNR > SNRth
2356 2356 try:
2357 2357 popt,pcov = curve_fit(self.gaus,xSamples_zoom,SPCMean[xvalid],p0=SPCMoments)
2358 2358 if popt[2] <= 0 or popt[2] > widthlimit: # CONDITION
2359 2359 return self.StopWindEstimation(error_code = 1)
2360 2360 FitGauss = self.gaus(xSamples_zoom,*popt)
2361 2361 except :#RuntimeError:
2362 2362 return self.StopWindEstimation(error_code = 2)
2363 2363 else:
2364 2364 return self.StopWindEstimation(error_code = 3)
2365 2365
2366 2366 '''***************************** CSPC Normalization *************************
2367 2367 The Spc spectra are used to normalize the crossspectra. Peaks from precipitation
2368 2368 influence the norm which is not desired. First, a range is identified where the
2369 2369 wind peak is estimated -> sum_wind is sum of those frequencies. Next, the area
2370 2370 around it gets cut off and values replaced by mean determined by the boundary
2371 2371 data -> sum_noise (spc is not normalized here, thats why the noise is important)
2372 2372
2373 2373 The sums are then added and multiplied by range/datapoints, because you need
2374 2374 an integral and not a sum for normalization.
2375 2375
2376 2376 A norm is found according to Briggs 92.
2377 2377 '''
2378 2378 # for each pair
2379 2379 for i in range(nPair):
2380 2380 cspc_norm = cspc[i,:].copy()
2381 2381 chan_index0 = pairsList[i][0]
2382 2382 chan_index1 = pairsList[i][1]
2383 2383 CSPC_Samples[i] = cspc_norm / (numpy.sqrt(numpy.nansum(spc_norm[chan_index0])*numpy.nansum(spc_norm[chan_index1])) * delta_x)
2384 2384 phase[i] = numpy.arctan2(CSPC_Samples[i].imag, CSPC_Samples[i].real)
2385 2385
2386 2386 CSPCmoments = numpy.vstack([self.Moments(numpy.abs(CSPC_Samples[0,xvalid]), xSamples_zoom),
2387 2387 self.Moments(numpy.abs(CSPC_Samples[1,xvalid]), xSamples_zoom),
2388 2388 self.Moments(numpy.abs(CSPC_Samples[2,xvalid]), xSamples_zoom)])
2389 2389
2390 2390 popt01, popt02, popt12 = [1e-10,0,1e-10], [1e-10,0,1e-10] ,[1e-10,0,1e-10]
2391 2391 FitGauss01, FitGauss02, FitGauss12 = numpy.zeros(len(xSamples)), numpy.zeros(len(xSamples)), numpy.zeros(len(xSamples))
2392 2392
2393 2393 '''*******************************FIT GAUSS CSPC************************************'''
2394 2394 try:
2395 2395 popt01,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[0][xvalid]),p0=CSPCmoments[0])
2396 2396 if popt01[2] > widthlimit: # CONDITION
2397 2397 return self.StopWindEstimation(error_code = 4)
2398 2398 popt02,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[1][xvalid]),p0=CSPCmoments[1])
2399 2399 if popt02[2] > widthlimit: # CONDITION
2400 2400 return self.StopWindEstimation(error_code = 4)
2401 2401 popt12,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[2][xvalid]),p0=CSPCmoments[2])
2402 2402 if popt12[2] > widthlimit: # CONDITION
2403 2403 return self.StopWindEstimation(error_code = 4)
2404 2404
2405 2405 FitGauss01 = self.gaus(xSamples_zoom, *popt01)
2406 2406 FitGauss02 = self.gaus(xSamples_zoom, *popt02)
2407 2407 FitGauss12 = self.gaus(xSamples_zoom, *popt12)
2408 2408 except:
2409 2409 return self.StopWindEstimation(error_code = 5)
2410 2410
2411 2411
2412 2412 '''************* Getting Fij ***************'''
2413 2413 # x-axis point of the gaussian where the center is located from GaussFit of spectra
2414 2414 GaussCenter = popt[1]
2415 2415 ClosestCenter = xSamples_zoom[numpy.abs(xSamples_zoom-GaussCenter).argmin()]
2416 2416 PointGauCenter = numpy.where(xSamples_zoom==ClosestCenter)[0][0]
2417 2417
2418 2418 # Point where e^-1 is located in the gaussian
2419 2419 PeMinus1 = numpy.max(FitGauss) * numpy.exp(-1)
2420 2420 FijClosest = FitGauss[numpy.abs(FitGauss-PeMinus1).argmin()] # The closest point to"Peminus1" in "FitGauss"
2421 2421 PointFij = numpy.where(FitGauss==FijClosest)[0][0]
2422 2422 Fij = numpy.abs(xSamples_zoom[PointFij] - xSamples_zoom[PointGauCenter])
2423 2423
2424 2424 '''********** Taking frequency ranges from mean SPCs **********'''
2425 2425 GauWidth = popt[2] * 3/2 # Bandwidth of Gau01
2426 2426 Range = numpy.empty(2)
2427 2427 Range[0] = GaussCenter - GauWidth
2428 2428 Range[1] = GaussCenter + GauWidth
2429 2429 # Point in x-axis where the bandwidth is located (min:max)
2430 2430 ClosRangeMin = xSamples_zoom[numpy.abs(xSamples_zoom-Range[0]).argmin()]
2431 2431 ClosRangeMax = xSamples_zoom[numpy.abs(xSamples_zoom-Range[1]).argmin()]
2432 2432 PointRangeMin = numpy.where(xSamples_zoom==ClosRangeMin)[0][0]
2433 2433 PointRangeMax = numpy.where(xSamples_zoom==ClosRangeMax)[0][0]
2434 2434 Range = numpy.array([ PointRangeMin, PointRangeMax ])
2435 2435 FrecRange = xSamples_zoom[ Range[0] : Range[1] ]
2436 2436
2437 2437 '''************************** Getting Phase Slope ***************************'''
2438 2438 for i in range(nPair):
2439 2439 if len(FrecRange) > 5:
2440 2440 PhaseRange = phase[i, xvalid[0][Range[0]:Range[1]]].copy()
2441 2441 mask = ~numpy.isnan(FrecRange) & ~numpy.isnan(PhaseRange)
2442 2442 if len(FrecRange) == len(PhaseRange):
2443 2443 try:
2444 2444 slope, intercept, _, _, _ = stats.linregress(FrecRange[mask], self.AntiAliasing(PhaseRange[mask], 4.5))
2445 2445 PhaseSlope[i] = slope
2446 2446 PhaseInter[i] = intercept
2447 2447 except:
2448 2448 return self.StopWindEstimation(error_code = 6)
2449 2449 else:
2450 2450 return self.StopWindEstimation(error_code = 7)
2451 2451 else:
2452 2452 return self.StopWindEstimation(error_code = 8)
2453 2453
2454 2454 '''*** Constants A-H correspond to the convention as in Briggs and Vincent 1992 ***'''
2455 2455
2456 2456 '''Getting constant C'''
2457 2457 cC=(Fij*numpy.pi)**2
2458 2458
2459 2459 '''****** Getting constants F and G ******'''
2460 2460 MijEijNij = numpy.array([[Xi02,Eta02], [Xi12,Eta12]])
2461 2461 # MijEijNij = numpy.array([[Xi01,Eta01], [Xi02,Eta02], [Xi12,Eta12]])
2462 2462 # MijResult0 = (-PhaseSlope[0] * cC) / (2*numpy.pi)
2463 2463 MijResult1 = (-PhaseSlope[1] * cC) / (2*numpy.pi)
2464 2464 MijResult2 = (-PhaseSlope[2] * cC) / (2*numpy.pi)
2465 2465 # MijResults = numpy.array([MijResult0, MijResult1, MijResult2])
2466 2466 MijResults = numpy.array([MijResult1, MijResult2])
2467 2467 (cF,cG) = numpy.linalg.solve(MijEijNij, MijResults)
2468 2468
2469 2469 '''****** Getting constants A, B and H ******'''
2470 2470 W01 = numpy.nanmax( FitGauss01 )
2471 2471 W02 = numpy.nanmax( FitGauss02 )
2472 2472 W12 = numpy.nanmax( FitGauss12 )
2473 2473
2474 2474 WijResult01 = ((cF * Xi01 + cG * Eta01)**2)/cC - numpy.log(W01 / numpy.sqrt(numpy.pi / cC))
2475 2475 WijResult02 = ((cF * Xi02 + cG * Eta02)**2)/cC - numpy.log(W02 / numpy.sqrt(numpy.pi / cC))
2476 2476 WijResult12 = ((cF * Xi12 + cG * Eta12)**2)/cC - numpy.log(W12 / numpy.sqrt(numpy.pi / cC))
2477 2477 WijResults = numpy.array([WijResult01, WijResult02, WijResult12])
2478 2478
2479 2479 WijEijNij = numpy.array([ [Xi01**2, Eta01**2, 2*Xi01*Eta01] , [Xi02**2, Eta02**2, 2*Xi02*Eta02] , [Xi12**2, Eta12**2, 2*Xi12*Eta12] ])
2480 2480 (cA,cB,cH) = numpy.linalg.solve(WijEijNij, WijResults)
2481 2481
2482 2482 VxVy = numpy.array([[cA,cH],[cH,cB]])
2483 2483 VxVyResults = numpy.array([-cF,-cG])
2484 2484 (Vmer,Vzon) = numpy.linalg.solve(VxVy, VxVyResults)
2485 2485 Vver = -SPCMoments[1]*SPEED_OF_LIGHT/(2*radfreq)
2486 2486 error_code = 0
2487 2487
2488 2488 return Vzon, Vmer, Vver, error_code
2489 2489
2490 2490 class SpectralMoments(Operation):
2491 2491
2492 2492 '''
2493 2493 Function SpectralMoments()
2494 2494
2495 2495 Calculates moments (power, mean, standard deviation) and SNR of the signal
2496 2496
2497 2497 Type of dataIn: Spectra
2498 2498
2499 2499 Configuration Parameters:
2500 2500
2501 2501 proc_type : (0) First spectral moments routine (Default),
2502 2502 (1) Spectral moment routine similar to JULIA.
2503 2503 mode_fit : (0) No gaussian fit
2504 2504 (1) One gaussian fit for 150Km processing.
2505 2505
2506 2506 exp : '150EEJ' To select 128 points window
2507 2507 'ESF_EW' To select full window.
2508 2508
2509 2509 Input:
2510 2510 channelList : simple channel list to select e.g. [2,3,7]
2511 2511 self.dataOut.data_pre : Spectral data
2512 2512 self.dataOut.abscissaList : List of frequencies
2513 2513 self.dataOut.noise : Noise level per channel
2514 2514
2515 2515 Affected:
2516 2516 self.dataOut.moments : Parameters per channel
2517 2517 self.dataOut.data_snr : SNR per channel
2518 2518
2519 2519 '''
2520 2520
2521 2521 def __calculateMoments(self, oldspec, oldfreq, n0,
2522 2522 nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, \
2523 2523 snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None, \
2524 2524 vers= None, Hei= None, debug=False, dbg_hei=None, ymax=0.1, curr_ch=0, sel_ch=[0,1]):
2525 2525
2526 2526 def __GAUSSWINFIT1(A, flagPDER=0):
2527 2527 nonlocal truex, xvalid
2528 2528 nparams = 4
2529 2529 M=truex.size
2530 2530 mm=numpy.arange(M,dtype='f4')
2531 2531 delta = numpy.zeros(M,dtype='f4')
2532 2532 delta[0] = 1.0
2533 2533 Ts = numpy.array([1.0/(2*truex[0])],dtype='f4')[0]
2534 2534 jj = -1j
2535 2535 #if self.winauto is None: self.winauto = (1.0 - mm/M)
2536 2536 winauto = (1.0 - mm/M)
2537 2537 winauto = winauto/winauto.max() # Normalized to 1
2538 2538 #ON_ERROR,2 # IDL sentence: Return to caller if an error occurs
2539 2539 A[0] = numpy.abs(A[0])
2540 2540 A[2] = numpy.abs(A[2])
2541 2541 A[3] = numpy.abs(A[3])
2542 2542 pi=numpy.array([numpy.pi],dtype='f4')[0]
2543 2543 if A[2] != 0:
2544 2544 Z = numpy.exp(-2*numpy.power((pi*A[2]*mm*Ts),2,dtype='f4')+jj*2*pi*A[1]*mm*Ts, dtype='c8') # Get Z
2545 2545 else:
2546 2546 Z = mm*0.0
2547 2547 A[0] = 0.0
2548 2548 junkF = numpy.roll(2*fft(winauto*(A[0]*Z+A[3]*delta)).real - \
2549 2549 winauto[0]*(A[0]+A[3]), M//2) # *M scale for fft not needed in python
2550 2550 F = junkF[xvalid]
2551 2551 if flagPDER == 0: #NEED PARTIAL?
2552 2552 return F
2553 2553 PDER = numpy.zeros((M,nparams)) #YES, MAKE ARRAY.
2554 2554 PDER[:,0] = numpy.shift(2*(fft(winauto*Z)*M) - winauto[0], M/2)
2555 2555 PDER[:,1] = numpy.shift(2*(fft(winauto*jj*2*numpy.pi*mm*Ts*A[0]*Z)*M), M/2)
2556 2556 PDER[:,2] = numpy.shift(2*(fft(winauto*(-4*numpy.power(numpy.pi*mm*Ts,2)*A[2]*A[0]*Z))*M), M/2)
2557 2557 PDER[:,3] = numpy.shift(2*(fft(winauto*delta)*M) - winauto[0], M/2)
2558 2558 PDER = PDER[xvalid,:]
2559 2559 return F, PDER
2560 2560
2561 2561 def __curvefit_koki(y, a, Weights, FlagNoDerivative=1,
2562 2562 itmax=20, tol=None):
2563 2563 #ON_ERROR,2 IDL SENTENCE: RETURN TO THE CALLER IF ERROR
2564 2564 if tol == None:
2565 2565 tol = numpy.array([1.e-3],dtype='f4')[0]
2566 2566 typ=a.dtype
2567 2567 double = 1 if typ == numpy.float64 else 0
2568 2568 if typ != numpy.float32:
2569 2569 a=a.astype(numpy.float32) #Make params floating
2570 2570 # if we will be estimating partial derivates then compute machine precision
2571 2571 if FlagNoDerivative == 1:
2572 2572 res=numpy.MachAr(float_conv=numpy.float32)
2573 2573 eps=numpy.sqrt(res.eps)
2574 2574
2575 2575 nterms = a.size # Number of parameters
2576 2576 nfree=numpy.array([numpy.size(y) - nterms],dtype='f4')[0] # Degrees of freedom
2577 2577 if nfree <= 0: print('Curvefit - not enough data points.')
2578 2578 flambda= numpy.array([0.001],dtype='f4')[0] # Initial lambda
2579 2579 #diag=numpy.arange(nterms)*(nterms+1) # Subscripta of diagonal elements
2580 2580 # Use diag method in python
2581 2581 converge=1
2582 2582
2583 2583 #Define the partial derivative array
2584 2584 PDER = numpy.zeros((nterms,numpy.size(y)),dtype='f8') if double == 1 else numpy.zeros((nterms,numpy.size(y)),dtype='f4')
2585 2585
2586 2586 for Niter in range(itmax): #Iteration loop
2587 2587
2588 2588 if FlagNoDerivative == 1:
2589 2589 #Evaluate function and estimate partial derivatives
2590 2590 yfit = __GAUSSWINFIT1(a)
2591 2591 for term in range(nterms):
2592 2592 p=a.copy() # Copy current parameters
2593 2593 #Increment size for forward difference derivative
2594 2594 inc = eps * abs(p[term])
2595 2595 if inc == 0: inc = eps
2596 2596 p[term] = p[term] + inc
2597 2597 yfit1 = __GAUSSWINFIT1(p)
2598 2598 PDER[term,:] = (yfit1-yfit)/inc
2599 2599 else:
2600 2600 #The user's procedure will return partial derivatives
2601 2601 yfit,PDER=__GAUSSWINFIT1(a, flagPDER=1)
2602 2602
2603 2603 beta = numpy.dot(PDER,(y-yfit)*Weights)
2604 2604 alpha = numpy.dot(PDER * numpy.tile(Weights,(nterms,1)), numpy.transpose(PDER))
2605 2605 # save current values of return parameters
2606 2606 sigma1 = numpy.sqrt( 1.0 / numpy.diag(alpha) ) # Current sigma.
2607 2607 sigma = sigma1
2608 2608
2609 2609 chisq1 = numpy.sum(Weights*numpy.power(y-yfit,2,dtype='f4'),dtype='f4')/nfree # Current chi squared.
2610 2610 chisq = chisq1
2611 2611 yfit1 = yfit
2612 2612 elev7=numpy.array([1.0e7],dtype='f4')[0]
2613 2613 compara =numpy.sum(abs(y))/elev7/nfree
2614 2614 done_early = chisq1 < compara
2615 2615
2616 2616 if done_early:
2617 2617 chi2 = chisq # Return chi-squared (chi2 obsolete-still works)
2618 2618 if done_early: Niter -= 1
2619 2619 #save_tp(chisq,Niter,yfit)
2620 2620 return yfit, a, converge, sigma, chisq # return result
2621 2621 #c = numpy.dot(c, c) # this operator implemented at the next lines
2622 2622 c_tmp = numpy.sqrt(numpy.diag(alpha))
2623 2623 siz=len(c_tmp)
2624 2624 c=numpy.dot(c_tmp.reshape(siz,1),c_tmp.reshape(1,siz))
2625 2625 lambdaCount = 0
2626 2626 while True:
2627 2627 lambdaCount += 1
2628 2628 # Normalize alpha to have unit diagonal.
2629 2629 array = alpha / c
2630 2630 # Augment the diagonal.
2631 2631 one=numpy.array([1.],dtype='f4')[0]
2632 2632 numpy.fill_diagonal(array,numpy.diag(array)*(one+flambda))
2633 2633 # Invert modified curvature matrix to find new parameters.
2634 2634
2635 2635 try:
2636 2636 array = (1.0/array) if array.size == 1 else numpy.linalg.inv(array)
2637 2637 except Exception as e:
2638 2638 print(e)
2639 2639 array[:]=numpy.NaN
2640 2640
2641 2641 b = a + numpy.dot(numpy.transpose(beta),array/c) # New params
2642 2642 yfit = __GAUSSWINFIT1(b) # Evaluate function
2643 2643 chisq = numpy.sum(Weights*numpy.power(y-yfit,2,dtype='f4'),dtype='f4')/nfree # New chisq
2644 2644 sigma = numpy.sqrt(numpy.diag(array)/numpy.diag(alpha)) # New sigma
2645 2645 if (numpy.isfinite(chisq) == 0) or \
2646 2646 (lambdaCount > 30 and chisq >= chisq1):
2647 2647 # Reject changes made this iteration, use old values.
2648 2648 yfit = yfit1
2649 2649 sigma = sigma1
2650 2650 chisq = chisq1
2651 2651 converge = 0
2652 2652 #print('Failed to converge.')
2653 2653 chi2 = chisq # Return chi-squared (chi2 obsolete-still works)
2654 2654 if done_early: Niter -= 1
2655 2655 return yfit, a, converge, sigma, chisq, chi2 # return result
2656 2656 ten=numpy.array([10.0],dtype='f4')[0]
2657 2657 flambda *= ten # Assume fit got worse
2658 2658 if chisq <= chisq1:
2659 2659 break
2660 2660 hundred=numpy.array([100.0],dtype='f4')[0]
2661 2661 flambda /= hundred
2662 2662
2663 2663 a=b # Save new parameter estimate.
2664 2664 if ((chisq1-chisq)/chisq1) <= tol: # Finished?
2665 2665 chi2 = chisq # Return chi-squared (chi2 obsolete-still works)
2666 2666 if done_early: Niter -= 1
2667 2667 return yfit, a, converge, sigma, chisq, chi2 # return result
2668 2668 converge = 0
2669 2669 chi2 = chisq
2670 2670 #print('Failed to converge.')
2671 2671 return yfit, a, converge, sigma, chisq, chi2
2672 2672
2673 2673
2674 2674 def spectral_cut(Hei, ind, dbg_hei, freq, fd, snr, n1, w, ymax, spec, spec2, n0, max_spec, ss1, m, bb0, curr_ch, sel_ch):
2675 2675 if Hei[ind] > dbg_hei[0] and Hei[ind] < dbg_hei[1] and (curr_ch in sel_ch):
2676 2676 nsa=len(freq)
2677 2677 aux='H=%iKm, dop: %4.1f, snr: %4.1f, noise: %4.1f, sw: %4.1f'%(Hei[ind],fd, 10*numpy.log10(snr),10*numpy.log10(n1), w)
2678 2678 plt.subplots()
2679 2679 plt.ylim(0,ymax)
2680 2680 plt.plot(freq,spec,'b-',freq,spec2,'b--', freq,numpy.repeat(n1, nsa),'k--', freq,numpy.repeat(n0, nsa),'k-', freq,numpy.repeat(max_spec, nsa),'y.-', numpy.repeat(fd, nsa),numpy.linspace(0,ymax,nsa),'r--', numpy.repeat(freq[ss1], nsa),numpy.linspace(0,ymax,nsa),'g-.', numpy.repeat(freq[m + bb0], nsa),numpy.linspace(0,ymax,nsa),'g-.')
2681 2681 plt.title(aux)
2682 2682 plt.show()
2683 2683
2684 2684
2685 2685 if (nicoh is None): nicoh = 1
2686 2686 if (smooth is None): smooth = 0
2687 2687 if (type1 is None): type1 = 0
2688 2688 if (vers is None): vers = 0
2689 2689 if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1
2690 2690 if (snrth is None): snrth = -20.0
2691 2691 if (dc is None): dc = 0
2692 2692 if (aliasing is None): aliasing = 0
2693 2693 if (oldfd is None): oldfd = 0
2694 2694 if (wwauto is None): wwauto = 0
2695 2695
2696 2696 if (n0 < 1.e-20): n0 = 1.e-20
2697 2697
2698 2698 xvalid = numpy.where(fwindow == 1)[0]
2699 2699 freq = oldfreq
2700 2700 truex = oldfreq
2701 2701 vec_power = numpy.zeros(oldspec.shape[1])
2702 2702 vec_fd = numpy.zeros(oldspec.shape[1])
2703 2703 vec_w = numpy.zeros(oldspec.shape[1])
2704 2704 vec_snr = numpy.zeros(oldspec.shape[1])
2705 2705 vec_n1 = numpy.empty(oldspec.shape[1])
2706 2706 vec_fp = numpy.empty(oldspec.shape[1])
2707 2707 vec_sigma_fd = numpy.empty(oldspec.shape[1])
2708 2708
2709 2709 for ind in range(oldspec.shape[1]):
2710 2710 spec = oldspec[:,ind]
2711 2711 if (smooth == 0):
2712 2712 spec2 = spec
2713 2713 else:
2714 2714 spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth)
2715 2715
2716 2716 aux = spec2*fwindow
2717 2717 max_spec = aux.max()
2718 2718 m = aux.tolist().index(max_spec)
2719 2719
2720 2720 if m > 2 and m < oldfreq.size - 3:
2721 2721 newindex = m + numpy.array([-2,-1,0,1,2])
2722 2722 newfreq = numpy.arange(20)/20.0*(numpy.max(freq[newindex])-numpy.min(freq[newindex]))+numpy.min(freq[newindex])
2723 2723 tck = interpolate.splrep(freq[newindex], spec2[newindex])
2724 2724 peakspec = interpolate.splev(newfreq, tck)
2725 2725 max_spec = numpy.max(peakspec)
2726 2726 mnew = numpy.argmax(peakspec)
2727 2727 fp = newfreq[mnew]
2728 2728 else:
2729 2729 fp = freq[m]
2730 2730
2731 2731 if vers ==0:
2732 2732
2733 2733 # Moments Estimation
2734 2734 bb = spec2[numpy.arange(m,spec2.size)]
2735 2735 bb = (bb<n0).nonzero()
2736 2736 bb = bb[0]
2737 2737
2738 2738 ss = spec2[numpy.arange(0,m + 1)]
2739 2739 ss = (ss<n0).nonzero()
2740 2740 ss = ss[0]
2741 2741
2742 2742 if (bb.size == 0):
2743 2743 bb0 = spec.size - 1 - m
2744 2744 else:
2745 2745 bb0 = bb[0] - 1
2746 2746 if (bb0 < 0):
2747 2747 bb0 = 0
2748 2748
2749 2749 if (ss.size == 0):
2750 2750 ss1 = 1
2751 2751 else:
2752 2752 ss1 = max(ss) + 1
2753 2753
2754 2754 if (ss1 > m):
2755 2755 ss1 = m
2756 2756
2757 2757 valid = numpy.arange(int(m + bb0 - ss1 + 1)) + ss1
2758 2758
2759 2759 signal_power = ((spec2[valid] - n0) * fwindow[valid]).mean() # D. ScipiΓ³n added with correct definition
2760 2760 total_power = (spec2[valid] * fwindow[valid]).mean() # D. ScipiΓ³n added with correct definition
2761 2761 power = ((spec2[valid] - n0) * fwindow[valid]).sum()
2762 2762 fd = ((spec2[valid]- n0)*freq[valid] * fwindow[valid]).sum() / power
2763 2763 w = numpy.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum() / power)
2764 2764 snr = (spec2.mean()-n0)/n0
2765 2765 if (snr < 1.e-20): snr = 1.e-20
2766 2766
2767 2767 vec_power[ind] = total_power
2768 2768 vec_fd[ind] = fd
2769 2769 vec_w[ind] = w
2770 2770 vec_snr[ind] = snr
2771 2771 else:
2772 2772 # Noise by heights
2773 2773 n1, stdv = self.__get_noise2(spec, nicoh)
2774 2774 # Moments Estimation
2775 2775 bb = spec2[numpy.arange(m,spec2.size)]
2776 2776 bb = (bb<n1).nonzero()
2777 2777 bb = bb[0]
2778 2778
2779 2779 ss = spec2[numpy.arange(0,m + 1)]
2780 2780 ss = (ss<n1).nonzero()
2781 2781 ss = ss[0]
2782 2782
2783 2783 if (bb.size == 0):
2784 2784 bb0 = spec.size - 1 - m
2785 2785 else:
2786 2786 bb0 = bb[0] - 1
2787 2787 if (bb0 < 0):
2788 2788 bb0 = 0
2789 2789
2790 2790 if (ss.size == 0):
2791 2791 ss1 = 1
2792 2792 else:
2793 2793 ss1 = max(ss) + 1
2794 2794
2795 2795 if (ss1 > m):
2796 2796 ss1 = m
2797 2797
2798 2798 valid = numpy.arange(int(m + bb0 - ss1 + 1)) + ss1
2799 2799
2800 2800 power = ((spec[valid] - n1)*fwindow[valid]).sum()
2801 2801 fd = ((spec[valid]- n1)*freq[valid]*fwindow[valid]).sum()/power
2802 2802 try:
2803 2803 w = numpy.sqrt(((spec[valid] - n1)*fwindow[valid]*(freq[valid]- fd)**2).sum()/power)
2804 2804 except:
2805 2805 w = float("NaN")
2806 2806 snr = power/(n0*fwindow.sum())
2807 2807
2808 2808 if debug:
2809 2809 spectral_cut(Hei, ind, dbg_hei, freq, fd, snr, n1, w, ymax, spec, spec2, n0, max_spec, ss1, m, bb0, curr_ch, sel_ch)
2810 2810
2811 2811 if snr < 1.e-20: snr = 1.e-20
2812 2812
2813 2813 # Here start gaussean adjustment
2814 2814
2815 2815 if type1 == 1 and snr > numpy.power(10,0.1*snrth):
2816 2816
2817 2817 a = numpy.zeros(4,dtype='f4')
2818 2818 a[0] = snr * n0
2819 2819 a[1] = fd
2820 2820 a[2] = w
2821 2821 a[3] = n0
2822 2822
2823 2823 np = spec.size
2824 2824 aold = a.copy()
2825 2825 spec2 = spec.copy()
2826 2826 oldxvalid = xvalid.copy()
2827 2827
2828 2828 for i in range(2):
2829 2829
2830 2830 ww = 1.0/(numpy.power(spec2,2)/nicoh)
2831 2831 ww[np//2] = 0.0
2832 2832
2833 2833 a = aold.copy()
2834 2834 xvalid = oldxvalid.copy()
2835 2835 #self.show_var(xvalid)
2836 2836
2837 2837 gaussfn = __curvefit_koki(spec[xvalid], a, ww[xvalid])
2838 2838 a = gaussfn[1]
2839 2839 converge = gaussfn[2]
2840 2840
2841 2841 xvalid = numpy.arange(np)
2842 2842 spec2 = __GAUSSWINFIT1(a)
2843 2843
2844 2844 xvalid = oldxvalid.copy()
2845 2845 power = a[0] * np
2846 2846 fd = a[1]
2847 2847 sigma_fd = gaussfn[3][1]
2848 2848 snr = max(power/ (max(a[3],n0) * len(oldxvalid)) * converge, 1e-20)
2849 2849 w = numpy.abs(a[2])
2850 2850 n1 = max(a[3], n0)
2851 2851
2852 2852 #gauss_adj=[fd,w,snr,n1,fp,sigma_fd]
2853 2853 else:
2854 2854 sigma_fd=numpy.nan # to avoid UnboundLocalError: local variable 'sigma_fd' referenced before assignment
2855 2855
2856 2856 vec_fd[ind] = fd
2857 2857 vec_w[ind] = w
2858 2858 vec_snr[ind] = snr
2859 2859 vec_n1[ind] = n1
2860 2860 vec_fp[ind] = fp
2861 2861 vec_sigma_fd[ind] = sigma_fd
2862 2862 vec_power[ind] = power # to compare with type 0 proccessing
2863 2863
2864 2864 if vers==1:
2865 2865 return numpy.vstack((vec_snr, vec_w, vec_fd, vec_n1, vec_fp, vec_sigma_fd, vec_power))
2866 2866 else:
2867 2867 return numpy.vstack((vec_snr, vec_power, vec_fd, vec_w))
2868 2868
2869 2869 def __get_noise2(self,POWER, fft_avg, TALK=0):
2870 2870 '''
2871 2871 Rutina para cΓ‘lculo de ruido por alturas(n1). Similar a IDL
2872 2872 '''
2873 2873 SPECT_PTS = len(POWER)
2874 2874 fft_avg = fft_avg*1.0
2875 2875 NOMIT = 0
2876 2876 NN = SPECT_PTS - NOMIT
2877 2877 N = NN//2
2878 2878 ARR = numpy.concatenate((POWER[0:N+1],POWER[N+NOMIT+1:SPECT_PTS]))
2879 2879 ARR = numpy.sort(ARR)
2880 2880 NUMS_MIN = (SPECT_PTS+7)//8
2881 2881 RTEST = (1.0+1.0/fft_avg)
2882 2882 SUM = 0.0
2883 2883 SUMSQ = 0.0
2884 2884 J = 0
2885 2885 for I in range(NN):
2886 2886 J = J + 1
2887 2887 SUM = SUM + ARR[I]
2888 2888 SUMSQ = SUMSQ + ARR[I]*ARR[I]
2889 2889 AVE = SUM*1.0/J
2890 2890 if J > NUMS_MIN:
2891 2891 if (SUMSQ*J <= RTEST*SUM*SUM): RNOISE = AVE
2892 2892 else:
2893 2893 if J == NUMS_MIN: RNOISE = AVE
2894 2894 if TALK == 1: print('Noise Power (2):%4.4f' %RNOISE)
2895 2895 stdv = numpy.sqrt(SUMSQ/J - numpy.power(SUM/J,2))
2896 2896 return RNOISE, stdv
2897 2897
2898 2898 def __get_noise1(self, power, fft_avg, TALK=0):
2899 2899 '''
2900 2900 Rutina para cΓ‘lculo de ruido por alturas(n0). Similar a IDL
2901 2901 '''
2902 2902 num_pts = numpy.size(power)
2903 2903 fft_avg = fft_avg*1.0
2904 2904 ind = numpy.argsort(power, axis=None, kind='stable')
2905 2905 ARR = numpy.reshape(power,-1)[ind]
2906 2906 NUMS_MIN = num_pts//10
2907 2907 RTEST = (1.0+1.0/fft_avg)
2908 2908 SUM = 0.0
2909 2909 SUMSQ = 0.0
2910 2910 J = 0
2911 2911 cont = 1
2912 2912 while cont == 1 and J < num_pts:
2913 2913
2914 2914 SUM = SUM + ARR[J]
2915 2915 SUMSQ = SUMSQ + ARR[J]*ARR[J]
2916 2916 J = J + 1
2917 2917
2918 2918 if J > NUMS_MIN:
2919 2919 if (SUMSQ*J <= RTEST*SUM*SUM):
2920 2920 LNOISE = SUM*1.0/J
2921 2921 else:
2922 2922 J = J - 1
2923 2923 SUM = SUM - ARR[J]
2924 2924 SUMSQ = SUMSQ - ARR[J]*ARR[J]
2925 2925 cont = 0
2926 2926 else:
2927 2927 if J == NUMS_MIN: LNOISE = SUM*1.0/J
2928 2928 if TALK == 1: print('Noise Power (1):%8.8f' %LNOISE)
2929 2929 stdv = numpy.sqrt(SUMSQ/J - numpy.power(SUM/J,2))
2930 2930 return LNOISE, stdv
2931 2931
2932 2932 def __NoiseByChannel(self, num_prof, num_incoh, spectra,talk=0):
2933 2933
2934 2934 val_frq = numpy.arange(num_prof-2)+1
2935 2935 val_frq[(num_prof-2)//2:] = val_frq[(num_prof-2)//2:] + 1
2936 2936 junkspc = numpy.sum(spectra[val_frq,:], axis=1)
2937 2937 junkid = numpy.argsort(junkspc)
2938 2938 noisezone = val_frq[junkid[0:num_prof//2]]
2939 2939 specnoise = spectra[noisezone,:]
2940 2940 noise, stdvnoise = self.__get_noise1(specnoise,num_incoh)
2941 2941
2942 2942 if talk:
2943 2943 print('noise =', noise)
2944 2944 return noise, stdvnoise
2945 2945
2946 2946 def run(self, dataOut, proc_type=0, mode_fit=0, exp='150EEJ', debug=False, dbg_hei=None, ymax=1, sel_ch=[0,1]):
2947 2947
2948 2948 absc = dataOut.abscissaList[:-1]
2949 2949 nChannel = dataOut.data_pre[0].shape[0]
2950 2950 nHei = dataOut.data_pre[0].shape[2]
2951 2951 Hei=dataOut.heightList
2952 2952 data_param = numpy.zeros((nChannel, 4 + proc_type*3, nHei))
2953 2953 nProfiles = dataOut.nProfiles
2954 2954 nCohInt = dataOut.nCohInt
2955 2955 nIncohInt = dataOut.nIncohInt
2956 2956 M = numpy.power(numpy.array(1/(nProfiles * nCohInt) ,dtype='float32'),2)
2957 2957 N = numpy.array(M / nIncohInt,dtype='float32')
2958 2958
2959 2959 if proc_type == 1:
2960 2960 type1 = mode_fit
2961 2961 fwindow = numpy.zeros(absc.size) + 1
2962 2962 if exp == '150EEJ':
2963 2963 b=64
2964 2964 fwindow[0:absc.size//2 - b] = 0
2965 2965 fwindow[absc.size//2 + b:] = 0
2966 2966 vers = 1 # new
2967 2967
2968 2968 data = dataOut.data_pre[0] * N
2969 2969
2970 2970 noise = numpy.zeros(nChannel)
2971 2971 stdvnoise = numpy.zeros(nChannel)
2972 2972 for ind in range(nChannel):
2973 2973 noise[ind], stdvnoise[ind] = self.__NoiseByChannel(nProfiles, nIncohInt, data[ind,:,:])
2974 2974 smooth=3
2975 2975 else:
2976 2976 data = dataOut.data_pre[0]
2977 2977 noise = dataOut.noise
2978 2978 fwindow = None
2979 2979 type1 = None
2980 2980 vers = 0 # old
2981 2981 nIncohInt = None
2982 2982 smooth=None
2983 2983
2984 2984 for ind in range(nChannel):
2985 2985 data_param[ind,:,:] = self.__calculateMoments(data[ind,:,:] , absc , noise[ind], nicoh=nIncohInt, smooth=smooth, type1=type1, fwindow=fwindow, vers=vers, Hei=Hei, debug=debug, dbg_hei=dbg_hei, ymax=ymax, curr_ch=ind, sel_ch=sel_ch)
2986 2986 #data_param[ind,:,:] = self.__calculateMoments(data[ind,:,:] , absc , noise[ind], nicoh=nIncohInt, smooth=smooth, type1=type1, fwindow=fwindow, vers=vers, Hei=Hei, debug=debug)
2987 2987 if exp == 'ESF_EW':
2988 2988 data_param[ind,0,:]*=(noise[ind]/stdvnoise[ind])
2989 2989 data_param[ind,3,:]*=(1.0/M)
2990 2990
2991 2991 if proc_type == 1:
2992 2992 dataOut.moments = data_param[:,1:,:]
2993 2993 dataOut.data_dop = data_param[:,2]
2994 2994 dataOut.data_width = data_param[:,1]
2995 2995 dataOut.data_snr = data_param[:,0]
2996 2996 dataOut.data_pow = data_param[:,6] # to compare with type0 proccessing
2997 2997 dataOut.spcpar=numpy.stack((dataOut.data_dop,dataOut.data_width,dataOut.data_snr, data_param[:,3], data_param[:,4],data_param[:,5]),axis=2)
2998 2998
2999 2999 if exp == 'ESF_EW':
3000 3000 spc=dataOut.data_pre[0]* N
3001 3001 cspc=dataOut.data_pre[1]* N
3002 3002 nHei=dataOut.data_pre[1].shape[2]
3003 3003 cross_pairs=dataOut.pairsList
3004 3004 nDiffIncohInt = dataOut.nDiffIncohInt
3005 3005 N2=numpy.array(1 / nDiffIncohInt,dtype='float32')
3006 3006 diffcspectra=dataOut.data_diffcspc.copy()* N2 * M * M
3007 3007 num_pairs=len(dataOut.pairsList)
3008 3008
3009 3009 if num_pairs >= 0:
3010 3010 fbinv=numpy.where(absc != 0)[0]
3011 3011 ccf=numpy.sum(cspc[:,fbinv,:], axis=1)
3012 3012 jvpower=numpy.sum(spc[:,fbinv,:], axis=1)
3013 3013 coh=ccf/numpy.sqrt(jvpower[cross_pairs[0][0],:]*jvpower[cross_pairs[0][1],:])
3014 3014 dccf=numpy.sum(diffcspectra[:,fbinv,:], axis=1)
3015 3015 dataOut.ccfpar = numpy.zeros((num_pairs,nHei,3))
3016 3016 dataOut.ccfpar[:,:,0]=numpy.abs(coh)
3017 3017 dataOut.ccfpar[:,:,1]=numpy.arctan(numpy.imag(coh)/numpy.real(coh))
3018 3018 dataOut.ccfpar[:,:,2]=numpy.arctan(numpy.imag(dccf)/numpy.real(dccf))
3019 3019 else:
3020 3020 dataOut.moments = data_param[:,1:,:]
3021 3021 dataOut.data_snr = data_param[:,0]
3022 3022 dataOut.data_pow = data_param[:,1]
3023 3023 dataOut.data_dop = data_param[:,2]
3024 3024 dataOut.data_width = data_param[:,3]
3025 3025 dataOut.spcpar=numpy.stack((dataOut.data_dop,dataOut.data_width,dataOut.data_snr, dataOut.data_pow),axis=2)
3026 3026
3027 3027 return dataOut
3028 3028
3029 3029
3030 3030 class JULIA_DayVelocities(Operation):
3031 3031 '''
3032 3032 Function SpectralMoments()
3033 3033
3034 3034 From espectral parameters calculates:
3035 3035
3036 3036 1. Signal to noise level (SNL)
3037 3037 2. Vertical velocity
3038 3038 3. Zonal velocity
3039 3039 4. Vertical velocity error
3040 3040 5. Zonal velocity error.
3041 3041
3042 3042 Type of dataIn: SpectralMoments
3043 3043
3044 3044 Configuration Parameters:
3045 3045
3046 3046 zenith : Pairs of angles corresponding to the two beams related to the perpendicular to B from the center of the antenna.
3047 3047 zenithCorrection : Adjustment angle for the zenith. Default 0.
3048 3048 heights : Range to process 150kM echoes. By default [125,185].
3049 3049 nchan : To process 2 or 1 channel. 2 by default.
3050 3050 chan : If nchan = 1, chan indicates which of the 2 channels to process.
3051 3051 clean : 2nd cleaning processing (Graphical). Default False
3052 3052 driftstdv_th : Diferencia mΓ‘xima entre valores promedio consecutivos de vertical.
3053 3053 zonalstdv_th : Diferencia mΓ‘xima entre valores promedio consecutivos de zonal.
3054 3054
3055 3055 Input:
3056 3056
3057 3057 Affected:
3058 3058
3059 3059 '''
3060 3060
3061 3061 def __init__(self):
3062 3062 Operation.__init__(self)
3063 3063 self.old_drift=None
3064 3064 self.old_zonal=None
3065 3065 self.count_drift=0
3066 3066 self.count_zonal=0
3067 3067 self.oldTime_drift=None
3068 3068 self.oldTime_zonal=None
3069 3069
3070 3070 def newtotal(self, data):
3071 3071 return numpy.nansum(data)
3072 3072
3073 3073 def data_filter(self, parm, snrth=-20, swth=20, wErrth=500):
3074 3074
3075 3075 Sz0 = parm.shape # Sz0: h,p
3076 3076 drift = parm[:,0]
3077 3077 sw = 2*parm[:,1]
3078 3078 snr = 10*numpy.log10(parm[:,2])
3079 3079 Sz = drift.shape # Sz: h
3080 3080 mask = numpy.ones((Sz[0]))
3081 3081 th=0
3082 3082 valid=numpy.where(numpy.isfinite(snr))
3083 3083 cvalid = len(valid[0])
3084 3084 if cvalid >= 1:
3085 3085 # CΓ‘lculo del ruido promedio de snr para el i-Γ©simo grupo de alturas
3086 3086 nbins = int(numpy.max(snr)-numpy.min(snr))+1 # bin size = 1, similar to IDL
3087 3087 h = numpy.histogram(snr,bins=nbins)
3088 3088 hist = h[0]
3089 3089 values = numpy.round_(h[1])
3090 3090 moda = values[numpy.where(hist == numpy.max(hist))]
3091 3091 indNoise = numpy.where(numpy.abs(snr - numpy.min(moda)) < 3)[0]
3092 3092
3093 3093 noise = snr[indNoise]
3094 3094 noise_mean = numpy.sum(noise)/len(noise)
3095 3095 # CΓ‘lculo de media de snr
3096 3096 med = numpy.median(snr)
3097 3097 # Establece el umbral de snr
3098 3098 if noise_mean > med + 3:
3099 3099 th = med
3100 3100 else:
3101 3101 th = noise_mean + 3
3102 3102 # Establece mΓ‘scara
3103 3103 novalid = numpy.where(snr <= th)[0]
3104 3104 mask[novalid] = numpy.nan
3105 3105 # Elimina datos que no sobrepasen el umbral: PARAMETRO
3106 3106 novalid = numpy.where(snr <= snrth)
3107 3107 cnovalid = len(novalid[0])
3108 3108 if cnovalid > 0:
3109 3109 mask[novalid] = numpy.nan
3110 3110 novalid = numpy.where(numpy.isnan(snr))
3111 3111 cnovalid = len(novalid[0])
3112 3112 if cnovalid > 0:
3113 3113 mask[novalid] = numpy.nan
3114 3114
3115 3115 new_parm = numpy.zeros((Sz0[0],Sz0[1]))
3116 3116 for i in range(Sz0[1]):
3117 3117 new_parm[:,i] = parm[:,i] * mask
3118 3118
3119 3119 return new_parm, th
3120 3120
3121 3121
3122 3122 def statistics150km(self, veloc , sigma , threshold , old_veloc=None, count=0, \
3123 currTime=None, oldTime=None, amountdata=2, clearAll = None, timeFactor=1800, debug = False):
3123 currTime=None, oldTime=None, amountdata=3, clearAll = None, timeFactor=1800, debug = False):
3124 3124
3125 3125 if oldTime == None:
3126 3126 oldTime = currTime
3127 3127
3128 3128 step = (threshold/2)*(numpy.abs(currTime - oldTime)//timeFactor + 1)
3129 3129 factor = 2
3130 3130 avg_threshold = 100
3131 3131 # Calcula la mediana en todas las alturas por tiempo
3132 3132 val1=numpy.nanmedian(veloc)
3133 3133
3134 3134 # Calcula la media ponderada en todas las alturas por tiempo
3135 3135 val2 = self.newtotal(veloc/numpy.power(sigma,2))/self.newtotal(1/numpy.power(sigma,2))
3136 3136
3137 3137 # Verifica la cercanΓ­a de los valores calculados de mediana y media, si son cercanos escoge la media ponderada
3138 3138 op1=numpy.abs(val2-val1)
3139 3139 op2=threshold/factor
3140 3140 cond = op1 < op2
3141 3141
3142 3142 veloc_prof = val2 if cond else val1
3143 3143 sigma_prof = numpy.nan
3144 3144 sets=numpy.array([-1])
3145 3145
3146 3146 if op1 > avg_threshold: #Si son muy lejanos no toma en cuenta estos datos
3147 3147 veloc_prof = numpy.nan
3148 3148
3149 3149 # Se calcula nuevamente media ponderada, en base a estimado inicial de la media
3150 3150 # a fin de eliminar valores que estΓ‘n muy lejanos a dicho valor
3151 3151
3152 3152 if debug:
3153 3153 print('veloc_prof:', veloc_prof)
3154 3154 print('veloc:',veloc)
3155 3155 print('threshold:',threshold)
3156 3156 print('factor:',factor)
3157 3157 print('threshold/factor:',threshold/factor)
3158 3158 print('numpy.abs(veloc-veloc_prof):', numpy.abs(veloc-veloc_prof))
3159 3159 print('numpy.where(numpy.abs(veloc-veloc_prof) < threshold/factor)[0]:', numpy.where(numpy.abs(veloc-veloc_prof) < threshold/factor)[0])
3160 3160
3161 3161 junk = numpy.where(numpy.abs(veloc-veloc_prof) < threshold/factor)[0]
3162 if junk.size > 2:
3162 if junk.size >= amountdata:
3163 3163 veloc_prof = self.newtotal(veloc[junk]/numpy.power(sigma[junk],2))/self.newtotal(1/numpy.power(sigma[junk],2))
3164 3164 sigma_prof1 = numpy.sqrt(1/self.newtotal(1/numpy.power(sigma[junk],2)))
3165 3165 sigma_prof2 = numpy.sqrt(self.newtotal(numpy.power(veloc[junk]-veloc_prof,2)/numpy.power(sigma[junk],2)))*sigma_prof1
3166 3166 sigma_prof = numpy.sqrt(numpy.power(sigma_prof1,2)+numpy.power(sigma_prof2,2))
3167 3167 sets = junk
3168 3168
3169 3169 # Compara con valor anterior para evitar presencia de "outliers"
3170 3170 if debug:
3171 3171 print('old_veloc:',old_veloc)
3172 3172 print('step:', step)
3173 3173
3174 3174 if old_veloc == None:
3175 3175 valid=numpy.isfinite(veloc_prof)
3176 3176 else:
3177 3177 valid=numpy.abs(veloc_prof-old_veloc) < step
3178 3178
3179 3179 if debug:
3180 3180 print('valid:', valid)
3181 3181
3182 3182 if not valid:
3183 3183 aver_veloc=numpy.nan
3184 3184 aver_sigma=numpy.nan
3185 3185 sets=numpy.array([-1])
3186 3186 else:
3187 3187 aver_veloc=veloc_prof
3188 3188 aver_sigma=sigma_prof
3189 3189 clearAll=0
3190 3190 if old_veloc != None and count < 5:
3191 3191 if numpy.abs(veloc_prof-old_veloc) > step:
3192 3192 clearAll=1
3193 3193 count=0
3194 3194 old_veloc=None
3195 3195 if numpy.isfinite(aver_veloc):
3196 3196
3197 3197 count+=1
3198 3198 if old_veloc != None:
3199 3199 old_veloc = (old_veloc + aver_veloc) * 0.5
3200 3200 else:
3201 3201 old_veloc=aver_veloc
3202 3202 oldTime=currTime
3203 3203 if debug:
3204 3204 print('count:',count)
3205 3205 print('sets:',sets)
3206 3206 return sets, old_veloc, count, oldTime, aver_veloc, aver_sigma, clearAll
3207 3207
3208 3208
3209 def run(self, dataOut, zenith, zenithCorrection=0.0, heights=[125, 185], nchan=2, chan=0, clean=False, driftstdv_th=100, zonalstdv_th=200):
3209 def run(self, dataOut, zenith, zenithCorrection=0.0, heights=[125, 185], nchan=2, chan=0, clean=False, driftstdv_th=100, zonalstdv_th=200, amountdata=3):
3210 3210
3211 3211 dataOut.lat=-11.95
3212 3212 dataOut.lon=-76.87
3213 3213
3214 3214 nCh=dataOut.spcpar.shape[0]
3215 3215 nHei=dataOut.spcpar.shape[1]
3216 3216 nParam=dataOut.spcpar.shape[2]
3217 3217
3218 3218 # SelecciΓ³n de alturas
3219 3219 hei=dataOut.heightList
3220 3220 hvalid=numpy.where([hei >= heights[0]][0] & [hei <= heights[1]][0])[0]
3221 3221 nhvalid=len(hvalid)
3222 3222 dataOut.heightList = hei[hvalid]
3223 3223 parm=numpy.empty((nCh,nhvalid,nParam)); parm[:]=numpy.nan
3224 3224 parm[:] = dataOut.spcpar[:,hvalid,:]
3225 3225 # Primer filtrado: Umbral de SNR
3226 3226 for i in range(nCh):
3227 3227 parm[i,:,:] = self.data_filter(parm[i,:,:])[0]
3228 3228
3229 3229 zenith = numpy.array(zenith)
3230 3230 zenith -= zenithCorrection
3231 3231 zenith *= numpy.pi/180
3232 3232 alpha = zenith[0]
3233 3233 beta = zenith[1]
3234 3234 dopplerCH0 = parm[0,:,0]
3235 3235 dopplerCH1 = parm[1,:,0]
3236 3236 swCH0 = parm[0,:,1]
3237 3237 swCH1 = parm[1,:,1]
3238 3238 snrCH0 = 10*numpy.log10(parm[0,:,2])
3239 3239 snrCH1 = 10*numpy.log10(parm[1,:,2])
3240 3240 noiseCH0 = parm[0,:,3]
3241 3241 noiseCH1 = parm[1,:,3]
3242 3242 wErrCH0 = parm[0,:,5]
3243 3243 wErrCH1 = parm[1,:,5]
3244 3244
3245 3245 # Vertical and zonal calculation: nchan=2 by default
3246 3246 # Only vertical calculation, for offline processing with only one channel with good signal
3247 3247 if nchan == 1:
3248 3248 if chan == 1:
3249 3249 drift = - dopplerCH1
3250 3250 snr = snrCH1
3251 3251 noise = noiseCH1
3252 3252 sw = swCH1
3253 3253 w_w_err = wErrCH1
3254 3254 elif chan == 0:
3255 3255 drift = - dopplerCH0
3256 3256 snr = snrCH0
3257 3257 noise = noiseCH0
3258 3258 sw = swCH0
3259 3259 w_w_err = wErrCH0
3260 3260
3261 3261 elif nchan == 2:
3262 3262 sinB_A = numpy.sin(beta)*numpy.cos(alpha) - numpy.sin(alpha)* numpy.cos(beta)
3263 3263 drift = -(dopplerCH0 * numpy.sin(beta) - dopplerCH1 * numpy.sin(alpha))/ sinB_A
3264 3264 zonal = (dopplerCH0 * numpy.cos(beta) - dopplerCH1 * numpy.cos(alpha))/ sinB_A
3265 3265 snr = (snrCH0 + snrCH1)/2
3266 3266 noise = (noiseCH0 + noiseCH1)/2
3267 3267 sw = (swCH0 + swCH1)/2
3268 3268 w_w_err= numpy.sqrt(numpy.power(wErrCH0 * numpy.sin(beta)/numpy.abs(sinB_A),2) + numpy.power(wErrCH1 * numpy.sin(alpha)/numpy.abs(sinB_A),2))
3269 3269 w_e_err= numpy.sqrt(numpy.power(wErrCH0 * numpy.cos(beta)/numpy.abs(-1*sinB_A),2) + numpy.power(wErrCH1 * numpy.cos(alpha)/numpy.abs(-1*sinB_A),2))
3270 3270
3271 3271 # 150Km statistics to clean data
3272 3272 clean_drift = drift.copy()
3273 3273 clean_drift[:] = numpy.nan
3274 3274 if nchan == 2:
3275 3275 clean_zonal = zonal.copy()
3276 3276 clean_zonal[:] = numpy.nan
3277 3277
3278 3278 # Vertical
3279 3279 sets1, self.old_drift, self.count_drift, self.oldTime_drift, aver_veloc, aver_sigma, clearAll = self.statistics150km(drift, w_w_err, driftstdv_th, \
3280 3280 old_veloc=self.old_drift, count=self.count_drift, currTime=dataOut.utctime, \
3281 oldTime=self.oldTime_drift, timeFactor=120)
3281 oldTime=self.oldTime_drift, amountdata = amountdata, timeFactor=120, debug = False)
3282 3282 if clearAll == 1:
3283 3283 mean_zonal = numpy.nan
3284 3284 sigma_zonal = numpy.nan
3285 3285 mean_drift = aver_veloc
3286 3286 sigma_drift = aver_sigma
3287 3287
3288 3288 if sets1.size != 1:
3289 3289 clean_drift[sets1] = drift[sets1]
3290 3290
3291 3291 novalid=numpy.where(numpy.isnan(clean_drift))[0]; cnovalid=novalid.size
3292 3292 if cnovalid > 0: drift[novalid] = numpy.nan
3293 3293 if cnovalid > 0: snr[novalid] = numpy.nan
3294 3294
3295 3295 # Zonal
3296 3296 if nchan == 2:
3297 3297 sets2, self.old_zonal, self.count_zonal, self.oldTime_zonal, aver_veloc, aver_sigma, clearAll = self.statistics150km(zonal, w_e_err, zonalstdv_th, \
3298 3298 old_veloc=self.old_zonal, count=self.count_zonal, currTime=dataOut.utctime, \
3299 oldTime=self.oldTime_zonal, timeFactor=600)
3299 oldTime=self.oldTime_zonal, amountdata = amountdata, timeFactor=600, debug = False)
3300 3300 if clearAll == 1:
3301 3301 mean_zonal = numpy.nan
3302 3302 sigma_zonal = numpy.nan
3303 3303 mean_zonal = aver_veloc
3304 3304 sigma_zonal = aver_sigma
3305 3305 if sets2.size != 1:
3306 3306 clean_zonal[sets2] = zonal[sets2]
3307 3307
3308 3308 novalid=numpy.where(numpy.isnan(clean_zonal))[0]; cnovalid=novalid.size
3309 3309 if cnovalid > 0: zonal[novalid] = numpy.nan
3310 3310 if cnovalid > 0: snr[novalid] = numpy.nan
3311 3311
3312 3312 n_avg_par=4
3313 3313 avg_par=numpy.empty((n_avg_par,)); avg_par[:] = numpy.nan
3314 3314 avg_par[0,]=mean_drift
3315 3315 avg_par[1,]=mean_zonal
3316 3316 avg_par[2,]=sigma_drift
3317 3317 avg_par[3,]=sigma_zonal
3318 3318
3319 3319 set1 = 1.0
3320 3320 navg = set1
3321 3321 nci = dataOut.nCohInt
3322 3322 # ----------------------------------
3323 3323 ipp = 252.0
3324 3324 nincoh = dataOut.nIncohInt
3325 3325 nptsfft = dataOut.nProfiles
3326 3326 hardcoded=False # if True, similar to IDL processing
3327 3327 if hardcoded:
3328 3328 ipp=200.1
3329 3329 nincoh=22
3330 3330 nptsfft=128
3331 3331 # ----------------------------------
3332 3332 nipp = ipp * nci
3333 3333 height = dataOut.heightList
3334 3334 nHei = len(height)
3335 3335 kd = 213.6
3336 3336 nint = nptsfft * nincoh
3337 3337 drift1D = drift.copy()
3338 3338 if nchan == 2:
3339 3339 zonal1D=zonal.copy()
3340 3340 snr1D = snr.copy()
3341 3341 snr1D = 10*numpy.power(10, 0.1*snr1D)
3342 3342 noise1D = noise.copy()
3343 3343 noise0 = numpy.nanmedian(noise1D)
3344 3344 noise = noise0 + noise0
3345 3345 sw1D = sw.copy()
3346 3346 pow0 = snr1D * noise0 + noise0
3347 3347 acf0 = snr1D * noise0 * numpy.exp((-drift1D*nipp*numpy.pi/(1.5e5*1.5))*1j) * (1-0.5*numpy.power(sw1D*nipp*numpy.pi/(1.5e5*1.5),2))
3348 3348 acf0 /= pow0
3349 3349 acf1 = acf0
3350 3350 dt= nint * nipp /1.5e5
3351 3351
3352 3352 if nchan == 2:
3353 3353 dccf = pow0 * pow0 * numpy.exp((zonal1D*kd*dt/(height*1e3))*(1j))
3354 3354 else:
3355 3355 dccf = numpy.empty(nHei); dccf[:]=numpy.nan # complex?
3356 3356 dccf /= pow0 * pow0
3357 3357 sno=(pow0+pow0-noise)/noise
3358 3358
3359 3359 # First parameter: Signal to noise ratio and its error
3360 3360 sno = numpy.log10(sno)
3361 3361 sno10 = 10 * sno
3362 3362 dsno = 1.0/numpy.sqrt(nint*navg)*(1+1/sno10)
3363 3363
3364 3364 # Second parameter: Vertical Drifts
3365 3365 s=numpy.sqrt(numpy.abs(acf0)*numpy.abs(acf1))
3366 3366 sp = s*(1.0 + 1.0/sno10)
3367 3367 vzo = -numpy.arctan2(numpy.imag(acf0+acf1),numpy.real(acf0+acf1))* \
3368 3368 1.5e5*1.5/(nipp*numpy.pi)
3369 3369 dvzo = numpy.sqrt(1-sp*sp)*0.338*1.5e5/(numpy.sqrt(nint*navg)*sp*nipp)
3370 3370
3371 3371 # Third parameter: Zonal Drifts
3372 3372 dt = nint*nipp/1.5e5
3373 3373 ss = numpy.sqrt(numpy.abs(dccf))
3374 3374 vxo = numpy.arctan2(numpy.imag(dccf),numpy.real(dccf))*height*1e3/(kd*dt)
3375 3375 dvxo = numpy.sqrt(1.0-ss*ss)*height*1e3/(numpy.sqrt(nint*navg)*ss*kd*dt)
3376 3376
3377 3377 npar = 5
3378 3378 par = numpy.empty((npar, nHei)); par[:] = numpy.nan
3379 3379
3380 3380 par[0,:] = sno
3381 3381 par[1,:] = vzo
3382 3382 par[2,:] = vxo
3383 3383 par[3,:] = dvzo
3384 3384 par[4,:] = dvxo
3385 3385
3386 3386 # Segundo filtrado:
3387 3387 # RemociΓ³n por altura: Menos de dos datos finitos no son considerados como eco 150Km.
3388 3388 clean_par=numpy.empty((npar,nHei)); clean_par[:]=numpy.nan
3389 3389 if clean:
3390 3390
3391 3391 for p in range(npar):
3392 3392 ih=0
3393 3393 while ih < nHei-1:
3394 3394 j=ih
3395 3395 if numpy.isfinite(snr1D[ih]):
3396 3396 while numpy.isfinite(snr1D[j]):
3397 3397 j+=1
3398 3398 if j >= nHei:
3399 3399 break
3400 3400 if j > ih + 1:
3401 3401 for k in range(ih,j):
3402 3402 clean_par[p][k] = par[p][k]
3403 3403 ih = j - 1
3404 3404 ih+=1
3405 3405 else:
3406 3406 clean_par[:] = par[:]
3407 3407
3408 3408 mad_output = numpy.vstack((clean_par[0,:], clean_par[1,:], clean_par[2,:], clean_par[3,:], clean_par[4,:]))
3409 3409 graph = numpy.vstack((clean_par[0,:], clean_par[1,:], clean_par[2,:]))
3410 3410 dataOut.data_output = mad_output
3411 3411 dataOut.data_graph = graph
3412 3412 dataOut.avg_output = avg_par
3413 3413 dataOut.utctimeInit = dataOut.utctime
3414 3414 dataOut.outputInterval = dataOut.timeInterval
3415 3415
3416 3416 dataOut.flagNoData = numpy.all(numpy.isnan(dataOut.data_output[0])) # NAN vectors are not written
3417 3417
3418 3418 return dataOut
3419 3419
3420 3420
3421 3421 class JULIA_NightVelocities(Operation):
3422 3422 '''
3423 3423 Function SpreadFVelocities()
3424 3424
3425 3425 Calculates SNL and drifts
3426 3426
3427 3427 Type of dataIn: Parameters
3428 3428
3429 3429 Configuration Parameters:
3430 3430
3431 3431 mymode : (0) Interferometry,
3432 3432 (1) Doppler beam swinging.
3433 3433 myproc : (0) JULIA_V,
3434 3434 (1) JULIA_EW.
3435 3435 myantenna : (0) 1/4 antenna,
3436 3436 (1) 1/2 antenna.
3437 3437 jset : Number of Incoherent integrations.
3438 3438
3439 3439
3440 3440 Input:
3441 3441 channelList : simple channel list to select e.g. [2,3,7]
3442 3442 self.dataOut.data_pre : Spectral data
3443 3443 self.dataOut.abscissaList : List of frequencies
3444 3444 self.dataOut.noise : Noise level per channel
3445 3445
3446 3446 Affected:
3447 3447 self.dataOut.moments : Parameters per channel
3448 3448 self.dataOut.data_snr : SNR per channel
3449 3449
3450 3450 '''
3451 3451 def __init__(self):
3452 3452 Operation.__init__(self)
3453 3453
3454 3454 def newtotal(self, data):
3455 3455 return numpy.nansum(data)
3456 3456
3457 3457 def data_filter(self, parm, snrth=-17, swth=20, dopth=500.0, debug=False):
3458 3458
3459 3459 Sz0 = parm.shape # Sz0: h,p
3460 3460 drift = parm[:,0]
3461 3461 sw = 2*parm[:,1]
3462 3462 snr = 10*numpy.log10(parm[:,2])
3463 3463 Sz = drift.shape # Sz: h
3464 3464 mask = numpy.ones((Sz[0]))
3465 3465 th=0
3466 3466 valid=numpy.where(numpy.isfinite(snr))
3467 3467 cvalid = len(valid[0])
3468 3468 if cvalid >= 1:
3469 3469 # CΓ‘lculo del ruido promedio de snr para el i-Γ©simo grupo de alturas
3470 3470 nbins = int(numpy.max(snr)-numpy.min(snr))+1 # bin size = 1, similar to IDL
3471 3471 h = numpy.histogram(snr,bins=nbins)
3472 3472 hist = h[0]
3473 3473 values = numpy.round_(h[1])
3474 3474 moda = values[numpy.where(hist == numpy.max(hist))]
3475 3475 indNoise = numpy.where(numpy.abs(snr - numpy.min(moda)) < 3)[0]
3476 3476
3477 3477 noise = snr[indNoise]
3478 3478 noise_mean = numpy.sum(noise)/len(noise)
3479 3479 # CΓ‘lculo de media de snr
3480 3480 med = numpy.median(snr)
3481 3481 # Establece el umbral de snr
3482 3482 if noise_mean > med + 3:
3483 3483 th = med
3484 3484 else:
3485 3485 th = noise_mean + 3
3486 3486 # Establece mΓ‘scara
3487 3487 novalid = numpy.where(snr <= th)[0]
3488 3488 mask[novalid] = numpy.nan
3489 3489 # Elimina datos que no sobrepasen el umbral: PARAMETRO
3490 3490 novalid = numpy.where(snr <= snrth)
3491 3491 cnovalid = len(novalid[0])
3492 3492 if cnovalid > 0:
3493 3493 mask[novalid] = numpy.nan
3494 3494 novalid = numpy.where(numpy.isnan(snr))
3495 3495 cnovalid = len(novalid[0])
3496 3496 if cnovalid > 0:
3497 3497 mask[novalid] = numpy.nan
3498 3498 # umbral de velocidad
3499 3499 if dopth != None:
3500 3500 novalid = numpy.where(numpy.logical_or(drift< dopth*(-1), drift > dopth))
3501 3501 cnovalid = len(novalid[0])
3502 3502 if cnovalid > 0:
3503 3503 mask[novalid] = numpy.nan
3504 3504 if debug:
3505 3505 print('Descartados:%i de %i:' %(cnovalid, len(drift)))
3506 3506 print('Porcentaje:%3.1f' %(100.0*cnovalid/len(drift)))
3507 3507
3508 3508 new_parm = numpy.zeros((Sz0[0],Sz0[1]))
3509 3509 for i in range(Sz0[1]):
3510 3510 new_parm[:,i] = parm[:,i] * mask
3511 3511
3512 3512 return new_parm, mask
3513 3513
3514 3514
3515 3515 def run(self, dataOut, zenith, zenithCorrection, mymode=1, dbs_sel=0, myproc=0, myantenna=0, jset=None, clean=False):
3516 3516
3517 3517
3518 3518 dataOut.lat=-11.95
3519 3519 dataOut.lon=-76.87
3520 3520 mode=mymode
3521 3521 proc=myproc
3522 3522 antenna=myantenna
3523 3523 nci=dataOut.nCohInt
3524 3524 nptsfft=dataOut.nProfiles
3525 3525 navg= 3 if jset is None else jset
3526 3526 nint=dataOut.nIncohInt//navg
3527 3527 navg1=dataOut.nProfiles * nint * navg
3528 3528 tau1=dataOut.ippSeconds
3529 3529 nipp=dataOut.radarControllerHeaderObj.ipp
3530 3530 jlambda=6
3531 3531 kd=213.6
3532 3532 hei=dataOut.heightList.copy()
3533 3533
3534 3534 nCh=dataOut.spcpar.shape[0]
3535 3535 nHei=dataOut.spcpar.shape[1]
3536 3536 nParam=dataOut.spcpar.shape[2]
3537 3537
3538 3538 parm = numpy.zeros((nCh,nHei,nParam))
3539 3539 parm[:] = dataOut.spcpar[:]
3540 3540 mask=numpy.ones(nHei)
3541 3541 mask0=mask.copy()
3542 3542 # Primer filtrado: Umbral de SNR
3543 3543 for i in range(nCh):
3544 3544 parm[i,:,:], mask = self.data_filter(parm[i,:,:], snrth = 0.1) # umbral 0.1 filtra seΓ±al que no corresponde a ESF, para interferometrΓ­a usar -17dB
3545 3545 mask0 *= mask
3546 3546
3547 3547 ccf_results=numpy.transpose(dataOut.ccfpar,(2,1,0))
3548 3548
3549 3549 for i in range(3):
3550 3550 ccf_results[i,:,0] *= mask0
3551 3551
3552 3552 zenith = numpy.array(zenith)
3553 3553 zenith -= zenithCorrection
3554 3554 zenith *= numpy.pi/180
3555 3555 alpha = zenith[0]
3556 3556 beta = zenith[1]
3557 3557
3558 3558 w_w = parm[0,:,0]
3559 3559 w_e = parm[1,:,0]
3560 3560
3561 3561 if mode==1:
3562 3562 # Vertical and zonal calculation
3563 3563 sinB_A = numpy.sin(beta)*numpy.cos(alpha) - numpy.sin(alpha)* numpy.cos(beta)
3564 3564 w = -(w_w * numpy.sin(beta) - w_e * numpy.sin(alpha))/ sinB_A
3565 3565 u = (w_w * numpy.cos(beta) - w_e * numpy.cos(alpha))/ sinB_A
3566 3566
3567 3567 #Noise
3568 3568 n0 = parm[0,:,3]
3569 3569 n1 = parm[1,:,3]
3570 3570 jn0_1 = numpy.nanmedian(n0)
3571 3571 jn0_2 = numpy.nanmean(n0)
3572 3572 jn1_1 = numpy.nanmedian(n1)
3573 3573 jn1_2 = numpy.nanmean(n1)
3574 3574 noise0 = jn0_2 if numpy.abs(jn0_1-jn0_2)/(jn0_1+jn0_2) <= 0.1 else jn0_1
3575 3575 noise1 = jn1_2 if numpy.abs(jn1_1-jn1_2)/(jn1_1+jn1_2) <= 0.1 else jn1_1
3576 3576
3577 3577 noise = noise0 + noise0 if mode == 1 else noise0 + noise1
3578 3578
3579 3579 #Power
3580 3580 apow1 = (parm[0,:,2]/numpy.sqrt(nint))*noise0 + n0
3581 3581 apow2 = (parm[1,:,2]/numpy.sqrt(nint))*noise1 + n1
3582 3582
3583 3583 #SNR SNR=Detectability/ SQRT(nint) or (Pow-Noise)/Noise
3584 3584 s_n0 = (apow1 - noise0)/noise0
3585 3585 s_n1 = (apow2 - noise1)/noise1
3586 3586
3587 3587 swCH0 = parm[0,:,1]
3588 3588 swCH1 = parm[1,:,1]
3589 3589
3590 3590 if mode == 1:
3591 3591 aacf1=(1-numpy.square(tau1)*numpy.square(4*numpy.pi/jlambda*swCH0)/2)* \
3592 3592 numpy.exp(-4*numpy.pi/jlambda*w*tau1*1j)* \
3593 3593 apow1
3594 3594 aacf2=(1-numpy.square(tau1)*numpy.square(4*numpy.pi/jlambda*swCH1)/2)* \
3595 3595 numpy.exp(-4*numpy.pi/jlambda*w*tau1*1j)* \
3596 3596 apow2
3597 3597 dccf_0=numpy.zeros(nHei, dtype=complex)
3598 3598
3599 3599 else:
3600 3600 aacf1=(1-numpy.square(tau1)*numpy.square(4*numpy.pi/jlambda*swCH0)/2)* \
3601 3601 numpy.exp(4*numpy.pi/jlambda*w_w*tau1*1j)* \
3602 3602 apow1
3603 3603 aacf2=(1-numpy.square(tau1)*numpy.square(4*numpy.pi/jlambda*swCH1)/2)* \
3604 3604 numpy.exp(4*numpy.pi/jlambda*w_e*tau1*1j)* \
3605 3605 apow2
3606 3606 dccf_0=numpy.power(ccf_results[0,:,0],2)*apow1*apow2* \
3607 3607 numpy.exp( \
3608 3608 ( \
3609 3609 (1+1*(antenna==1))* \
3610 3610 (-1+2*(proc == 1))* \
3611 3611 ccf_results[2,:,0] \
3612 3612 )*1j)
3613 3613
3614 3614 nsamp=len(hei)
3615 3615 pow0 = numpy.empty(nsamp); pow0[:] = numpy.nan
3616 3616 pow1 = numpy.empty(nsamp); pow1[:] = numpy.nan
3617 3617 acf0 = numpy.empty(nsamp, dtype=complex); acf0[:] = numpy.nan
3618 3618 acf1 = numpy.empty(nsamp, dtype=complex); acf1[:] = numpy.nan
3619 3619 dccf = numpy.empty(nsamp, dtype=complex); dccf[:] = numpy.nan
3620 3620 dop0 = numpy.empty(nsamp); dop0[:] = numpy.nan
3621 3621 dop1 = numpy.empty(nsamp); dop1[:] = numpy.nan
3622 3622 p_w = numpy.empty(nsamp); p_w[:] = numpy.nan
3623 3623 p_u = numpy.empty(nsamp); p_u[:] = numpy.nan
3624 3624
3625 3625 if mode == 0 or (mode == 1 and dbs_sel == 0):
3626 3626 ih=0
3627 3627 while ih < nsamp-10:
3628 3628 j=ih
3629 3629 if numpy.isfinite(s_n0[ih]) and numpy.isfinite(s_n1[ih]):
3630 3630 while numpy.isfinite(s_n0[j]) and numpy.isfinite(s_n1[j]):
3631 3631 j+=1
3632 3632 if j > ih + 2:
3633 3633 for k in range(ih,j):
3634 3634 pow0[k] = apow1[k]
3635 3635 pow1[k] = apow2[k]
3636 3636 acf0[k] = aacf1[k]
3637 3637 acf1[k] = aacf2[k]
3638 3638 dccf[k] = dccf_0[k]
3639 3639 ih = j - 1
3640 3640 ih+=1
3641 3641 else:
3642 3642 ih=0
3643 3643 while ih < nsamp-10:
3644 3644 j=ih
3645 3645 if numpy.isfinite(s_n0[ih]):
3646 3646 while numpy.isfinite(s_n0[j]) and j < nsamp-10:
3647 3647 j+=1
3648 3648 #if j > ih + 6:
3649 3649 if j > ih + 2:
3650 3650 #if j > ih + 3:
3651 3651 for k in range(ih,j):
3652 3652 pow0[k] = apow1[k]
3653 3653 #acf0[k] = aacf1[k]
3654 3654 #dccf[k] = dccf_0[k]
3655 3655 p_w[k] = w[k]
3656 3656 dop0[k] = w_w[k]
3657 3657 ih = j - 1
3658 3658 ih+=1
3659 3659 ih=0
3660 3660 while ih < nsamp-10:
3661 3661 j=ih
3662 3662 if numpy.isfinite(s_n1[ih]):
3663 3663 while numpy.isfinite(s_n1[j]) and j < nsamp-10:
3664 3664 j+=1
3665 3665 #if j > ih + 6:
3666 3666 if j > ih + 2:
3667 3667 #if j > ih + 3:
3668 3668 for k in range(ih,j):
3669 3669 pow1[k] = apow2[k]
3670 3670 #acf1[k] = aacf2[k]
3671 3671 p_u[k] = u[k]
3672 3672 dop1[k] = w_e[k]
3673 3673 ih = j - 1
3674 3674 ih+=1
3675 3675
3676 3676 acf0 = numpy.zeros(nsamp, dtype=complex)
3677 3677 acf1 = numpy.zeros(nsamp, dtype=complex)
3678 3678 dccf = numpy.zeros(nsamp, dtype=complex)
3679 3679
3680 3680 acf0 /= pow0
3681 3681 acf1 /= pow1
3682 3682 dccf /= pow0 * pow1
3683 3683
3684 3684 if mode == 0 or (mode == 1 and dbs_sel == 0):
3685 3685 sno=(pow0+pow1-noise)/noise
3686 3686 # First parameter: Signal to noise ratio and its error
3687 3687 sno=numpy.log10(sno)
3688 3688 dsno=1.0/numpy.sqrt(nint*navg)*(1+1/sno)
3689 3689 # Second parameter: Vertical Drifts
3690 3690 s=numpy.sqrt(numpy.abs(acf0)*numpy.abs(acf1))
3691 3691 ind=numpy.where(numpy.abs(s)>=1.0)
3692 3692 if numpy.size(ind)>0:
3693 3693 s[ind]=numpy.sqrt(0.9999)
3694 3694 sp=s*(1.0 + 1.0/sno)
3695 3695 vzo=-numpy.arctan2(numpy.imag(acf0+acf1),numpy.real(acf0+acf1))* \
3696 3696 1.5e5*1.5/(nipp*numpy.pi)
3697 3697 dvzo=numpy.sqrt(1-sp*sp)*0.338*1.5e5/(numpy.sqrt(nint*navg)*sp*nipp)
3698 3698 ind=numpy.where(dvzo<=0.1)
3699 3699 if numpy.size(ind)>0:
3700 3700 dvzo[ind]=0.1
3701 3701 # Third parameter: Zonal Drifts
3702 3702 dt=nint*nipp/1.5e5
3703 3703 ss=numpy.sqrt(numpy.abs(dccf))
3704 3704 ind=numpy.where(ss>=1.0)
3705 3705 if numpy.size(ind)>0:
3706 3706 ss[ind]=numpy.sqrt(0.99999)
3707 3707 ind=numpy.where(ss<=0.1)
3708 3708 if numpy.size(ind)>0:
3709 3709 ss[ind]=numpy.sqrt(0.1)
3710 3710 vxo=numpy.arctan2(numpy.imag(dccf),numpy.real(dccf))*hei*1e3/(kd*dt)
3711 3711 dvxo=numpy.sqrt(1.0-ss*ss)*hei*1e3/(numpy.sqrt(nint*navg)*ss*kd*dt)
3712 3712 ind=numpy.where(dvxo<=0.1)
3713 3713 if numpy.size(ind)>0:
3714 3714 dvxo[ind]=0.1
3715 3715 else:
3716 3716 sno0=(pow0-noise0)/noise0
3717 3717 sno1=(pow1-noise1)/noise1
3718 3718
3719 3719 # First parameter: Signal to noise ratio and its error
3720 3720 sno0=numpy.log10(sno0)
3721 3721 dsno0=1.0/numpy.sqrt(nint*navg)*(1+1/sno0)
3722 3722 sno1=numpy.log10(sno1)
3723 3723 dsno1=1.0/numpy.sqrt(nint*navg)*(1+1/sno1)
3724 3724
3725 3725 npar=6
3726 3726 par = numpy.empty((npar, nHei)); par[:] = numpy.nan
3727 3727
3728 3728 if mode == 0:
3729 3729 par[0,:] = sno
3730 3730 par[1,:] = vxo
3731 3731 par[2,:] = dvxo
3732 3732 par[3,:] = vzo
3733 3733 par[4,:] = dvzo
3734 3734
3735 3735 elif mode == 1 and dbs_sel == 0:
3736 3736 par[0,:] = sno
3737 3737 par[1,:] = vzo
3738 3738 else:
3739 3739 par[0,:] = sno0
3740 3740 par[1,:] = sno1
3741 3741 par[2,:] = dop0
3742 3742 par[3,:] = dop1
3743 3743 #par[4,:] = p_w
3744 3744 #par[5,:] = p_u
3745 3745
3746 3746 if mode == 0:
3747 3747 winds = numpy.vstack((par[0,:], par[1,:], par[2,:], par[3,:], par[4,:]))
3748 3748 elif mode == 1 and dbs_sel == 0:
3749 3749 winds = numpy.vstack((par[0,:], par[1,:]))
3750 3750 else:
3751 3751 winds = numpy.vstack((par[0,:], par[1,:], par[2,:], par[3,:]))
3752 3752
3753 3753 dataOut.data_output = winds
3754 3754 dataOut.data_snr = par[0,:]
3755 3755
3756 3756 dataOut.utctimeInit = dataOut.utctime
3757 3757 dataOut.outputInterval = dataOut.timeInterval
3758 3758
3759 3759 aux1= numpy.all(numpy.isnan(dataOut.data_output[0])) # NAN vectors are not written
3760 3760 aux2= numpy.all(numpy.isnan(dataOut.data_output[1])) # NAN vectors are not written
3761 3761 dataOut.flagNoData = aux1 or aux2
3762 3762
3763 3763 return dataOut
3764 3764
3765 3765 class SALags(Operation):
3766 3766 '''
3767 3767 Function GetMoments()
3768 3768
3769 3769 Input:
3770 3770 self.dataOut.data_pre
3771 3771 self.dataOut.abscissaList
3772 3772 self.dataOut.noise
3773 3773 self.dataOut.normFactor
3774 3774 self.dataOut.data_snr
3775 3775 self.dataOut.groupList
3776 3776 self.dataOut.nChannels
3777 3777
3778 3778 Affected:
3779 3779 self.dataOut.data_param
3780 3780
3781 3781 '''
3782 3782 def run(self, dataOut):
3783 3783 data_acf = dataOut.data_pre[0]
3784 3784 data_ccf = dataOut.data_pre[1]
3785 3785 normFactor_acf = dataOut.normFactor[0]
3786 3786 normFactor_ccf = dataOut.normFactor[1]
3787 3787 pairs_acf = dataOut.groupList[0]
3788 3788 pairs_ccf = dataOut.groupList[1]
3789 3789
3790 3790 nHeights = dataOut.nHeights
3791 3791 absc = dataOut.abscissaList
3792 3792 noise = dataOut.noise
3793 3793 SNR = dataOut.data_snr
3794 3794 nChannels = dataOut.nChannels
3795 3795 # pairsList = dataOut.groupList
3796 3796 # pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairsList, nChannels)
3797 3797
3798 3798 for l in range(len(pairs_acf)):
3799 3799 data_acf[l,:,:] = data_acf[l,:,:]/normFactor_acf[l,:]
3800 3800
3801 3801 for l in range(len(pairs_ccf)):
3802 3802 data_ccf[l,:,:] = data_ccf[l,:,:]/normFactor_ccf[l,:]
3803 3803
3804 3804 dataOut.data_param = numpy.zeros((len(pairs_ccf)*2 + 1, nHeights))
3805 3805 dataOut.data_param[:-1,:] = self.__calculateTaus(data_acf, data_ccf, absc)
3806 3806 dataOut.data_param[-1,:] = self.__calculateLag1Phase(data_acf, absc)
3807 3807 return
3808 3808
3809 3809 # def __getPairsAutoCorr(self, pairsList, nChannels):
3810 3810 #
3811 3811 # pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan
3812 3812 #
3813 3813 # for l in range(len(pairsList)):
3814 3814 # firstChannel = pairsList[l][0]
3815 3815 # secondChannel = pairsList[l][1]
3816 3816 #
3817 3817 # #Obteniendo pares de Autocorrelacion
3818 3818 # if firstChannel == secondChannel:
3819 3819 # pairsAutoCorr[firstChannel] = int(l)
3820 3820 #
3821 3821 # pairsAutoCorr = pairsAutoCorr.astype(int)
3822 3822 #
3823 3823 # pairsCrossCorr = range(len(pairsList))
3824 3824 # pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr)
3825 3825 #
3826 3826 # return pairsAutoCorr, pairsCrossCorr
3827 3827
3828 3828 def __calculateTaus(self, data_acf, data_ccf, lagRange):
3829 3829
3830 3830 lag0 = data_acf.shape[1]/2
3831 3831 #Funcion de Autocorrelacion
3832 3832 mean_acf = stats.nanmean(data_acf, axis = 0)
3833 3833
3834 3834 #Obtencion Indice de TauCross
3835 3835 ind_ccf = data_ccf.argmax(axis = 1)
3836 3836 #Obtencion Indice de TauAuto
3837 3837 ind_acf = numpy.zeros(ind_ccf.shape,dtype = 'int')
3838 3838 ccf_lag0 = data_ccf[:,lag0,:]
3839 3839
3840 3840 for i in range(ccf_lag0.shape[0]):
3841 3841 ind_acf[i,:] = numpy.abs(mean_acf - ccf_lag0[i,:]).argmin(axis = 0)
3842 3842
3843 3843 #Obtencion de TauCross y TauAuto
3844 3844 tau_ccf = lagRange[ind_ccf]
3845 3845 tau_acf = lagRange[ind_acf]
3846 3846
3847 3847 Nan1, Nan2 = numpy.where(tau_ccf == lagRange[0])
3848 3848
3849 3849 tau_ccf[Nan1,Nan2] = numpy.nan
3850 3850 tau_acf[Nan1,Nan2] = numpy.nan
3851 3851 tau = numpy.vstack((tau_ccf,tau_acf))
3852 3852
3853 3853 return tau
3854 3854
3855 3855 def __calculateLag1Phase(self, data, lagTRange):
3856 3856 data1 = stats.nanmean(data, axis = 0)
3857 3857 lag1 = numpy.where(lagTRange == 0)[0][0] + 1
3858 3858
3859 3859 phase = numpy.angle(data1[lag1,:])
3860 3860
3861 3861 return phase
3862 3862
3863 3863 def fit_func( x, a0, a1, a2): #, a3, a4, a5):
3864 3864 z = (x - a1) / a2
3865 3865 y = a0 * numpy.exp(-z**2 / a2) #+ a3 + a4 * x + a5 * x**2
3866 3866 return y
3867 3867
3868 3868
3869 3869 class SpectralFitting(Operation):
3870 3870 '''
3871 3871 Function GetMoments()
3872 3872
3873 3873 Input:
3874 3874 Output:
3875 3875 Variables modified:
3876 3876 '''
3877 3877 def __calculateMoments(self, oldspec, oldfreq, n0, nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None):
3878 3878
3879 3879 if (nicoh is None): nicoh = 1
3880 3880 if (graph is None): graph = 0
3881 3881 if (smooth is None): smooth = 0
3882 3882 elif (self.smooth < 3): smooth = 0
3883 3883
3884 3884 if (type1 is None): type1 = 0
3885 3885 if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1
3886 3886 if (snrth is None): snrth = -3
3887 3887 if (dc is None): dc = 0
3888 3888 if (aliasing is None): aliasing = 0
3889 3889 if (oldfd is None): oldfd = 0
3890 3890 if (wwauto is None): wwauto = 0
3891 3891
3892 3892 if (n0 < 1.e-20): n0 = 1.e-20
3893 3893
3894 3894 freq = oldfreq
3895 3895 vec_power = numpy.zeros(oldspec.shape[1])
3896 3896 vec_fd = numpy.zeros(oldspec.shape[1])
3897 3897 vec_w = numpy.zeros(oldspec.shape[1])
3898 3898 vec_snr = numpy.zeros(oldspec.shape[1])
3899 3899
3900 3900 oldspec = numpy.ma.masked_invalid(oldspec)
3901 3901
3902 3902 for ind in range(oldspec.shape[1]):
3903 3903
3904 3904 spec = oldspec[:,ind]
3905 3905 aux = spec*fwindow
3906 3906 max_spec = aux.max()
3907 3907 m = list(aux).index(max_spec)
3908 3908
3909 3909 #Smooth
3910 3910 if (smooth == 0): spec2 = spec
3911 3911 else: spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth)
3912 3912
3913 3913 # Calculo de Momentos
3914 3914 bb = spec2[list(range(m,spec2.size))]
3915 3915 bb = (bb<n0).nonzero()
3916 3916 bb = bb[0]
3917 3917
3918 3918 ss = spec2[list(range(0,m + 1))]
3919 3919 ss = (ss<n0).nonzero()
3920 3920 ss = ss[0]
3921 3921
3922 3922 if (bb.size == 0):
3923 3923 bb0 = spec.size - 1 - m
3924 3924 else:
3925 3925 bb0 = bb[0] - 1
3926 3926 if (bb0 < 0):
3927 3927 bb0 = 0
3928 3928
3929 3929 if (ss.size == 0): ss1 = 1
3930 3930 else: ss1 = max(ss) + 1
3931 3931
3932 3932 if (ss1 > m): ss1 = m
3933 3933
3934 3934 valid = numpy.asarray(list(range(int(m + bb0 - ss1 + 1)))) + ss1
3935 3935 power = ((spec2[valid] - n0)*fwindow[valid]).sum()
3936 3936 fd = ((spec2[valid]- n0)*freq[valid]*fwindow[valid]).sum()/power
3937 3937 w = math.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum()/power)
3938 3938 snr = (spec2.mean()-n0)/n0
3939 3939
3940 3940 if (snr < 1.e-20) :
3941 3941 snr = 1.e-20
3942 3942
3943 3943 vec_power[ind] = power
3944 3944 vec_fd[ind] = fd
3945 3945 vec_w[ind] = w
3946 3946 vec_snr[ind] = snr
3947 3947
3948 3948 moments = numpy.vstack((vec_snr, vec_power, vec_fd, vec_w))
3949 3949 return moments
3950 3950
3951 3951 #def __DiffCoherent(self,snrth, spectra, cspectra, nProf, heights,nChan, nHei, nPairs, channels, noise, crosspairs):
3952 3952 def __DiffCoherent(self, spectra, cspectra, dataOut, noise, snrth, coh_th, hei_th):
3953 3953
3954 3954 import matplotlib.pyplot as plt
3955 3955 nProf = dataOut.nProfiles
3956 3956 heights = dataOut.heightList
3957 3957 nHei = len(heights)
3958 3958 channels = dataOut.channelList
3959 3959 nChan = len(channels)
3960 3960 crosspairs = dataOut.groupList
3961 3961 nPairs = len(crosspairs)
3962 3962 #Separar espectros incoherentes de coherentes snr > 20 dB'
3963 3963 snr_th = 10**(snrth/10.0)
3964 3964 my_incoh_spectra = numpy.zeros([nChan, nProf,nHei], dtype='float')
3965 3965 my_incoh_cspectra = numpy.zeros([nPairs,nProf, nHei], dtype='complex')
3966 3966 my_incoh_aver = numpy.zeros([nChan, nHei])
3967 3967 my_coh_aver = numpy.zeros([nChan, nHei])
3968 3968
3969 3969 coh_spectra = numpy.zeros([nChan, nProf, nHei], dtype='float')
3970 3970 coh_cspectra = numpy.zeros([nPairs, nProf, nHei], dtype='complex')
3971 3971 coh_aver = numpy.zeros([nChan, nHei])
3972 3972
3973 3973 incoh_spectra = numpy.zeros([nChan, nProf, nHei], dtype='float')
3974 3974 incoh_cspectra = numpy.zeros([nPairs, nProf, nHei], dtype='complex')
3975 3975 incoh_aver = numpy.zeros([nChan, nHei])
3976 3976 power = numpy.sum(spectra, axis=1)
3977 3977
3978 3978 if coh_th == None : coh_th = numpy.array([0.75,0.65,0.15]) # 0.65
3979 3979 if hei_th == None : hei_th = numpy.array([60,300,650])
3980 3980 for ic in range(2):
3981 3981 pair = crosspairs[ic]
3982 3982 #si el SNR es mayor que el SNR threshold los datos se toman coherentes
3983 3983 s_n0 = power[pair[0],:]/noise[pair[0]]
3984 3984 s_n1 = power[pair[1],:]/noise[pair[1]]
3985 3985
3986 3986 valid1 =(s_n0>=snr_th).nonzero()
3987 3987 valid2 = (s_n1>=snr_th).nonzero()
3988 3988 #valid = valid2 + valid1 #numpy.concatenate((valid1,valid2), axis=None)
3989 3989 valid1 = numpy.array(valid1[0])
3990 3990 valid2 = numpy.array(valid2[0])
3991 3991 valid = valid1
3992 3992 for iv in range(len(valid2)):
3993 3993 #for ivv in range(len(valid1)) :
3994 3994 indv = numpy.array((valid1 == valid2[iv]).nonzero())
3995 3995 if len(indv[0]) == 0 :
3996 3996 valid = numpy.concatenate((valid,valid2[iv]), axis=None)
3997 3997 if len(valid)>0:
3998 3998 my_coh_aver[pair[0],valid]=1
3999 3999 my_coh_aver[pair[1],valid]=1
4000 4000 # si la coherencia es mayor a la coherencia threshold los datos se toman
4001 4001 #print my_coh_aver[0,:]
4002 4002 coh = numpy.squeeze(numpy.nansum(cspectra[ic,:,:], axis=0)/numpy.sqrt(numpy.nansum(spectra[pair[0],:,:], axis=0)*numpy.nansum(spectra[pair[1],:,:], axis=0)))
4003 4003 #print('coh',numpy.absolute(coh))
4004 4004 for ih in range(len(hei_th)):
4005 4005 hvalid = (heights>hei_th[ih]).nonzero()
4006 4006 hvalid = hvalid[0]
4007 4007 if len(hvalid)>0:
4008 4008 valid = (numpy.absolute(coh[hvalid])>coh_th[ih]).nonzero()
4009 4009 valid = valid[0]
4010 4010 #print('hvalid:',hvalid)
4011 4011 #print('valid', valid)
4012 4012 if len(valid)>0:
4013 4013 my_coh_aver[pair[0],hvalid[valid]] =1
4014 4014 my_coh_aver[pair[1],hvalid[valid]] =1
4015 4015
4016 4016 coh_echoes = (my_coh_aver[pair[0],:] == 1).nonzero()
4017 4017 incoh_echoes = (my_coh_aver[pair[0],:] != 1).nonzero()
4018 4018 incoh_echoes = incoh_echoes[0]
4019 4019 if len(incoh_echoes) > 0:
4020 4020 my_incoh_spectra[pair[0],:,incoh_echoes] = spectra[pair[0],:,incoh_echoes]
4021 4021 my_incoh_spectra[pair[1],:,incoh_echoes] = spectra[pair[1],:,incoh_echoes]
4022 4022 my_incoh_cspectra[ic,:,incoh_echoes] = cspectra[ic,:,incoh_echoes]
4023 4023 my_incoh_aver[pair[0],incoh_echoes] = 1
4024 4024 my_incoh_aver[pair[1],incoh_echoes] = 1
4025 4025
4026 4026
4027 4027 for ic in range(2):
4028 4028 pair = crosspairs[ic]
4029 4029
4030 4030 valid1 =(my_coh_aver[pair[0],:]==1 ).nonzero()
4031 4031 valid2 = (my_coh_aver[pair[1],:]==1).nonzero()
4032 4032 valid1 = numpy.array(valid1[0])
4033 4033 valid2 = numpy.array(valid2[0])
4034 4034 valid = valid1
4035 4035 #print valid1 , valid2
4036 4036 for iv in range(len(valid2)):
4037 4037 #for ivv in range(len(valid1)) :
4038 4038 indv = numpy.array((valid1 == valid2[iv]).nonzero())
4039 4039 if len(indv[0]) == 0 :
4040 4040 valid = numpy.concatenate((valid,valid2[iv]), axis=None)
4041 4041 #print valid
4042 4042 #valid = numpy.concatenate((valid1,valid2), axis=None)
4043 4043 valid1 =(my_coh_aver[pair[0],:] !=1 ).nonzero()
4044 4044 valid2 = (my_coh_aver[pair[1],:] !=1).nonzero()
4045 4045 valid1 = numpy.array(valid1[0])
4046 4046 valid2 = numpy.array(valid2[0])
4047 4047 incoh_echoes = valid1
4048 4048 #print valid1, valid2
4049 4049 #incoh_echoes= numpy.concatenate((valid1,valid2), axis=None)
4050 4050 for iv in range(len(valid2)):
4051 4051 #for ivv in range(len(valid1)) :
4052 4052 indv = numpy.array((valid1 == valid2[iv]).nonzero())
4053 4053 if len(indv[0]) == 0 :
4054 4054 incoh_echoes = numpy.concatenate(( incoh_echoes,valid2[iv]), axis=None)
4055 4055 #print incoh_echoes
4056 4056 if len(valid)>0:
4057 4057 #print pair
4058 4058 coh_spectra[pair[0],:,valid] = spectra[pair[0],:,valid]
4059 4059 coh_spectra[pair[1],:,valid] = spectra[pair[1],:,valid]
4060 4060 coh_cspectra[ic,:,valid] = cspectra[ic,:,valid]
4061 4061 coh_aver[pair[0],valid]=1
4062 4062 coh_aver[pair[1],valid]=1
4063 4063 if len(incoh_echoes)>0:
4064 4064 incoh_spectra[pair[0],:,incoh_echoes] = spectra[pair[0],:,incoh_echoes]
4065 4065 incoh_spectra[pair[1],:,incoh_echoes] = spectra[pair[1],:,incoh_echoes]
4066 4066 incoh_cspectra[ic,:,incoh_echoes] = cspectra[ic,:,incoh_echoes]
4067 4067 incoh_aver[pair[0],incoh_echoes]=1
4068 4068 incoh_aver[pair[1],incoh_echoes]=1
4069 4069 #plt.imshow(spectra[0,:,:],vmin=20000000)
4070 4070 #plt.show()
4071 4071 #my_incoh_aver = my_incoh_aver+1
4072 4072
4073 4073 #spec = my_incoh_spectra.copy()
4074 4074 #cspec = my_incoh_cspectra.copy()
4075 4075 #print('######################', spec)
4076 4076 #print(self.numpy)
4077 4077 #return spec, cspec,coh_aver
4078 4078 return my_incoh_spectra ,my_incoh_cspectra,my_incoh_aver,my_coh_aver, incoh_spectra, coh_spectra, incoh_cspectra, coh_cspectra, incoh_aver, coh_aver
4079 4079
4080 4080 def __CleanCoherent(self,snrth, spectra, cspectra, coh_aver,dataOut, noise,clean_coh_echoes,index):
4081 4081
4082 4082 import matplotlib.pyplot as plt
4083 4083 nProf = dataOut.nProfiles
4084 4084 heights = dataOut.heightList
4085 4085 nHei = len(heights)
4086 4086 channels = dataOut.channelList
4087 4087 nChan = len(channels)
4088 4088 crosspairs = dataOut.groupList
4089 4089 nPairs = len(crosspairs)
4090 4090
4091 4091 #data = dataOut.data_pre[0]
4092 4092 absc = dataOut.abscissaList[:-1]
4093 4093 #noise = dataOut.noise
4094 4094 #nChannel = data.shape[0]
4095 4095 data_param = numpy.zeros((nChan, 4, spectra.shape[2]))
4096 4096
4097 4097
4098 4098 #plt.plot(absc)
4099 4099 #plt.show()
4100 4100 clean_coh_spectra = spectra.copy()
4101 4101 clean_coh_cspectra = cspectra.copy()
4102 4102 clean_coh_aver = coh_aver.copy()
4103 4103
4104 4104 spwd_th=[10,6] #spwd_th[0] --> For satellites ; spwd_th[1] --> For special events like SUN.
4105 4105 coh_th = 0.75
4106 4106
4107 4107 rtime0 = [6,18] # periodo sin ESF
4108 4108 rtime1 = [10.5,13.5] # periodo con alta coherencia y alto ancho espectral (esperado): SOL.
4109 4109
4110 4110 time = index*5./60
4111 4111 if clean_coh_echoes == 1 :
4112 4112 for ind in range(nChan):
4113 4113 data_param[ind,:,:] = self.__calculateMoments( spectra[ind,:,:] , absc , noise[ind] )
4114 4114 #print data_param[:,3]
4115 4115 spwd = data_param[:,3]
4116 4116 #print spwd.shape
4117 4117 # SPECB_JULIA,header=anal_header,jspectra=spectra,vel=velocities,hei=heights, num_aver=1, mode_fit=0,smoothing=smoothing,jvelr=velr,jspwd=spwd,jsnr=snr,jnoise=noise,jstdvnoise=stdvnoise
4118 4118 #spwd1=[ 1.65607, 1.43416, 0.500373, 0.208361, 0.000000, 26.7767, 22.5936, 26.7530, 20.6962, 29.1098, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 28.0300, 27.0511, 27.8810, 26.3126, 27.8445, 24.6181, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000]
4119 4119 #spwd=numpy.array([spwd1,spwd1,spwd1,spwd1])
4120 4120 #print spwd.shape, heights.shape,coh_aver.shape
4121 4121 # para obtener spwd
4122 4122 for ic in range(nPairs):
4123 4123 pair = crosspairs[ic]
4124 4124 coh = numpy.squeeze(numpy.sum(cspectra[ic,:,:], axis=1)/numpy.sqrt(numpy.sum(spectra[pair[0],:,:], axis=1)*numpy.sum(spectra[pair[1],:,:], axis=1)))
4125 4125 for ih in range(nHei) :
4126 4126 # Considering heights higher than 200km in order to avoid removing phenomena like EEJ.
4127 4127 if heights[ih] >= 200 and coh_aver[pair[0],ih] == 1 and coh_aver[pair[1],ih] == 1 :
4128 4128 # Checking coherence
4129 4129 if (numpy.abs(coh[ih]) <= coh_th) or (time >= rtime0[0] and time <= rtime0[1]) :
4130 4130 # Checking spectral widths
4131 4131 if (spwd[pair[0],ih] > spwd_th[0]) or (spwd[pair[1],ih] > spwd_th[0]) :
4132 4132 # satelite
4133 4133 clean_coh_spectra[pair,ih,:] = 0.0
4134 4134 clean_coh_cspectra[ic,ih,:] = 0.0
4135 4135 clean_coh_aver[pair,ih] = 0
4136 4136 else :
4137 4137 if ((spwd[pair[0],ih] < spwd_th[1]) or (spwd[pair[1],ih] < spwd_th[1])) :
4138 4138 # Especial event like sun.
4139 4139 clean_coh_spectra[pair,ih,:] = 0.0
4140 4140 clean_coh_cspectra[ic,ih,:] = 0.0
4141 4141 clean_coh_aver[pair,ih] = 0
4142 4142
4143 4143 return clean_coh_spectra, clean_coh_cspectra, clean_coh_aver
4144 4144
4145 4145 isConfig = False
4146 4146 __dataReady = False
4147 4147 bloques = None
4148 4148 bloque0 = None
4149 4149
4150 4150 def __init__(self):
4151 4151 Operation.__init__(self)
4152 4152 self.i=0
4153 4153 self.isConfig = False
4154 4154
4155 4155
4156 4156 def setup(self,nChan,nProf,nHei,nBlocks):
4157 4157 self.__dataReady = False
4158 4158 self.bloques = numpy.zeros([2, nProf, nHei,nBlocks], dtype= complex)
4159 4159 self.bloque0 = numpy.zeros([nChan, nProf, nHei, nBlocks])
4160 4160
4161 4161 #def CleanRayleigh(self,dataOut,spectra,cspectra,out_spectra,out_cspectra,sat_spectra,sat_cspectra,crosspairs,heights, channels, nProf,nHei,nChan,nPairs,nIncohInt,nBlocks):
4162 4162 def CleanRayleigh(self,dataOut,spectra,cspectra,save_drifts):
4163 4163 #import matplotlib.pyplot as plt
4164 4164 #for k in range(149):
4165 4165
4166 4166 # self.bloque0[:,:,:,k] = spectra[:,:,0:nHei]
4167 4167 # self.bloques[:,:,:,k] = cspectra[:,:,0:nHei]
4168 4168 #if self.i==nBlocks:
4169 4169 # self.i==0
4170 4170 rfunc = cspectra.copy() #self.bloques
4171 4171 n_funct = len(rfunc[0,:,0,0])
4172 4172 val_spc = spectra*0.0 #self.bloque0*0.0
4173 4173 val_cspc = cspectra*0.0 #self.bloques*0.0
4174 4174 in_sat_spectra = spectra.copy() #self.bloque0
4175 4175 in_sat_cspectra = cspectra.copy() #self.bloques
4176 4176
4177 4177 #print( rfunc.shape)
4178 4178 min_hei = 200
4179 4179 nProf = dataOut.nProfiles
4180 4180 heights = dataOut.heightList
4181 4181 nHei = len(heights)
4182 4182 channels = dataOut.channelList
4183 4183 nChan = len(channels)
4184 4184 crosspairs = dataOut.groupList
4185 4185 nPairs = len(crosspairs)
4186 4186 hval=(heights >= min_hei).nonzero()
4187 4187 ih=hval[0]
4188 4188 #print numpy.absolute(rfunc[:,0,0,14])
4189 4189 for ih in range(hval[0][0],nHei):
4190 4190 for ifreq in range(nProf):
4191 4191 for ii in range(n_funct):
4192 4192
4193 4193 func2clean = 10*numpy.log10(numpy.absolute(rfunc[:,ii,ifreq,ih]))
4194 4194 #print numpy.amin(func2clean)
4195 4195 val = (numpy.isfinite(func2clean)==True).nonzero()
4196 4196 if len(val)>0:
4197 4197 min_val = numpy.around(numpy.amin(func2clean)-2) #> (-40)
4198 4198 if min_val <= -40 : min_val = -40
4199 4199 max_val = numpy.around(numpy.amax(func2clean)+2) #< 200
4200 4200 if max_val >= 200 : max_val = 200
4201 4201 #print min_val, max_val
4202 4202 step = 1
4203 4203 #Getting bins and the histogram
4204 4204 x_dist = min_val + numpy.arange(1 + ((max_val-(min_val))/step))*step
4205 4205 y_dist,binstep = numpy.histogram(func2clean,bins=range(int(min_val),int(max_val+2),step))
4206 4206 mean = numpy.sum(x_dist * y_dist) / numpy.sum(y_dist)
4207 4207 sigma = numpy.sqrt(numpy.sum(y_dist * (x_dist - mean)**2) / numpy.sum(y_dist))
4208 4208 parg = [numpy.amax(y_dist),mean,sigma]
4209 4209 try :
4210 4210 gauss_fit, covariance = curve_fit(fit_func, x_dist, y_dist,p0=parg)
4211 4211 mode = gauss_fit[1]
4212 4212 stdv = gauss_fit[2]
4213 4213 except:
4214 4214 mode = mean
4215 4215 stdv = sigma
4216 4216 # if ih == 14 and ii == 0 and ifreq ==0 :
4217 4217 # print x_dist.shape, y_dist.shape
4218 4218 # print x_dist, y_dist
4219 4219 # print min_val, max_val, binstep
4220 4220 # print func2clean
4221 4221 # print mean,sigma
4222 4222 # mean1,std = norm.fit(y_dist)
4223 4223 # print mean1, std, gauss_fit
4224 4224 # print fit_func(x_dist,gauss_fit[0],gauss_fit[1],gauss_fit[2])
4225 4225 # 7.84616 53.9307 3.61863
4226 4226 #stdv = 3.61863 # 2.99089
4227 4227 #mode = 53.9307 #7.79008
4228 4228
4229 4229 #Removing echoes greater than mode + 3*stdv
4230 4230 factor_stdv = 2.5
4231 4231 noval = (abs(func2clean - mode)>=(factor_stdv*stdv)).nonzero()
4232 4232
4233 4233 if len(noval[0]) > 0:
4234 4234 novall = ((func2clean - mode) >= (factor_stdv*stdv)).nonzero()
4235 4235 cross_pairs = crosspairs[ii]
4236 4236 #Getting coherent echoes which are removed.
4237 4237 if len(novall[0]) > 0:
4238 4238 #val_spc[(0,1),novall[a],ih] = 1
4239 4239 #val_spc[,(2,3),novall[a],ih] = 1
4240 4240 val_spc[novall[0],cross_pairs[0],ifreq,ih] = 1
4241 4241 val_spc[novall[0],cross_pairs[1],ifreq,ih] = 1
4242 4242 val_cspc[novall[0],ii,ifreq,ih] = 1
4243 4243 #print("OUT NOVALL 1")
4244 4244 #Removing coherent from ISR data
4245 4245 # if ih == 17 and ii == 0 and ifreq ==0 :
4246 4246 # print spectra[:,cross_pairs[0],ifreq,ih]
4247 4247 spectra[noval,cross_pairs[0],ifreq,ih] = numpy.nan
4248 4248 spectra[noval,cross_pairs[1],ifreq,ih] = numpy.nan
4249 4249 cspectra[noval,ii,ifreq,ih] = numpy.nan
4250 4250 # if ih == 17 and ii == 0 and ifreq ==0 :
4251 4251 # print spectra[:,cross_pairs[0],ifreq,ih]
4252 4252 # print noval, len(noval[0])
4253 4253 # print novall, len(novall[0])
4254 4254 # print factor_stdv*stdv
4255 4255 # print func2clean-mode
4256 4256 # print val_spc[:,cross_pairs[0],ifreq,ih]
4257 4257 # print spectra[:,cross_pairs[0],ifreq,ih]
4258 4258 #no sale es para savedrifts >2
4259 4259 ''' channels = channels
4260 4260 cross_pairs = cross_pairs
4261 4261 #print("OUT NOVALL 2")
4262 4262
4263 4263 vcross0 = (cross_pairs[0] == channels[ii]).nonzero()
4264 4264 vcross1 = (cross_pairs[1] == channels[ii]).nonzero()
4265 4265 vcross = numpy.concatenate((vcross0,vcross1),axis=None)
4266 4266 #print('vcros =', vcross)
4267 4267
4268 4268 #Getting coherent echoes which are removed.
4269 4269 if len(novall) > 0:
4270 4270 #val_spc[novall,ii,ifreq,ih] = 1
4271 4271 val_spc[ii,ifreq,ih,novall] = 1
4272 4272 if len(vcross) > 0:
4273 4273 val_cspc[vcross,ifreq,ih,novall] = 1
4274 4274
4275 4275 #Removing coherent from ISR data.
4276 4276 self.bloque0[ii,ifreq,ih,noval] = numpy.nan
4277 4277 if len(vcross) > 0:
4278 4278 self.bloques[vcross,ifreq,ih,noval] = numpy.nan
4279 4279 '''
4280 4280 #Getting average of the spectra and cross-spectra from incoherent echoes.
4281 4281 out_spectra = numpy.zeros([nChan,nProf,nHei], dtype=float) #+numpy.nan
4282 4282 out_cspectra = numpy.zeros([nPairs,nProf,nHei], dtype=complex) #+numpy.nan
4283 4283 for ih in range(nHei):
4284 4284 for ifreq in range(nProf):
4285 4285 for ich in range(nChan):
4286 4286 tmp = spectra[:,ich,ifreq,ih]
4287 4287 valid = (numpy.isfinite(tmp[:])==True).nonzero()
4288 4288 # if ich == 0 and ifreq == 0 and ih == 17 :
4289 4289 # print tmp
4290 4290 # print valid
4291 4291 # print len(valid[0])
4292 4292 #print('TMP',tmp)
4293 4293 if len(valid[0]) >0 :
4294 4294 out_spectra[ich,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
4295 4295 #for icr in range(nPairs):
4296 4296 for icr in range(nPairs):
4297 4297 tmp = numpy.squeeze(cspectra[:,icr,ifreq,ih])
4298 4298 valid = (numpy.isfinite(tmp)==True).nonzero()
4299 4299 if len(valid[0]) > 0:
4300 4300 out_cspectra[icr,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
4301 4301 # print('##########################################################')
4302 4302 #Removing fake coherent echoes (at least 4 points around the point)
4303 4303
4304 4304 val_spectra = numpy.sum(val_spc,0)
4305 4305 val_cspectra = numpy.sum(val_cspc,0)
4306 4306
4307 4307 val_spectra = self.REM_ISOLATED_POINTS(val_spectra,4)
4308 4308 val_cspectra = self.REM_ISOLATED_POINTS(val_cspectra,4)
4309 4309
4310 4310 for i in range(nChan):
4311 4311 for j in range(nProf):
4312 4312 for k in range(nHei):
4313 4313 if numpy.isfinite(val_spectra[i,j,k]) and val_spectra[i,j,k] < 1 :
4314 4314 val_spc[:,i,j,k] = 0.0
4315 4315 for i in range(nPairs):
4316 4316 for j in range(nProf):
4317 4317 for k in range(nHei):
4318 4318 if numpy.isfinite(val_cspectra[i,j,k]) and val_cspectra[i,j,k] < 1 :
4319 4319 val_cspc[:,i,j,k] = 0.0
4320 4320 # val_spc = numpy.reshape(val_spc, (len(spectra[:,0,0,0]),nProf*nHei*nChan))
4321 4321 # if numpy.isfinite(val_spectra)==str(True):
4322 4322 # noval = (val_spectra<1).nonzero()
4323 4323 # if len(noval) > 0:
4324 4324 # val_spc[:,noval] = 0.0
4325 4325 # val_spc = numpy.reshape(val_spc, (149,nChan,nProf,nHei))
4326 4326
4327 4327 #val_cspc = numpy.reshape(val_spc, (149,nChan*nHei*nProf))
4328 4328 #if numpy.isfinite(val_cspectra)==str(True):
4329 4329 # noval = (val_cspectra<1).nonzero()
4330 4330 # if len(noval) > 0:
4331 4331 # val_cspc[:,noval] = 0.0
4332 4332 # val_cspc = numpy.reshape(val_cspc, (149,nChan,nProf,nHei))
4333 4333
4334 4334 tmp_sat_spectra = spectra.copy()
4335 4335 tmp_sat_spectra = tmp_sat_spectra*numpy.nan
4336 4336 tmp_sat_cspectra = cspectra.copy()
4337 4337 tmp_sat_cspectra = tmp_sat_cspectra*numpy.nan
4338 4338
4339 4339 # fig = plt.figure(figsize=(6,5))
4340 4340 # left, bottom, width, height = 0.1, 0.1, 0.8, 0.8
4341 4341 # ax = fig.add_axes([left, bottom, width, height])
4342 4342 # cp = ax.contour(10*numpy.log10(numpy.absolute(spectra[0,0,:,:])))
4343 4343 # ax.clabel(cp, inline=True,fontsize=10)
4344 4344 # plt.show()
4345 4345
4346 4346 val = (val_spc > 0).nonzero()
4347 4347 if len(val[0]) > 0:
4348 4348 tmp_sat_spectra[val] = in_sat_spectra[val]
4349 4349
4350 4350 val = (val_cspc > 0).nonzero()
4351 4351 if len(val[0]) > 0:
4352 4352 tmp_sat_cspectra[val] = in_sat_cspectra[val]
4353 4353
4354 4354 #Getting average of the spectra and cross-spectra from incoherent echoes.
4355 4355 sat_spectra = numpy.zeros((nChan,nProf,nHei), dtype=float)
4356 4356 sat_cspectra = numpy.zeros((nPairs,nProf,nHei), dtype=complex)
4357 4357 for ih in range(nHei):
4358 4358 for ifreq in range(nProf):
4359 4359 for ich in range(nChan):
4360 4360 tmp = numpy.squeeze(tmp_sat_spectra[:,ich,ifreq,ih])
4361 4361 valid = (numpy.isfinite(tmp)).nonzero()
4362 4362 if len(valid[0]) > 0:
4363 4363 sat_spectra[ich,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
4364 4364
4365 4365 for icr in range(nPairs):
4366 4366 tmp = numpy.squeeze(tmp_sat_cspectra[:,icr,ifreq,ih])
4367 4367 valid = (numpy.isfinite(tmp)).nonzero()
4368 4368 if len(valid[0]) > 0:
4369 4369 sat_cspectra[icr,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
4370 4370 #self.__dataReady= True
4371 4371 #sat_spectra, sat_cspectra= sat_spectra, sat_cspectra
4372 4372 #if not self.__dataReady:
4373 4373 #return None, None
4374 4374 return out_spectra, out_cspectra,sat_spectra,sat_cspectra
4375 4375 def REM_ISOLATED_POINTS(self,array,rth):
4376 4376 # import matplotlib.pyplot as plt
4377 4377 if rth == None : rth = 4
4378 4378
4379 4379 num_prof = len(array[0,:,0])
4380 4380 num_hei = len(array[0,0,:])
4381 4381 n2d = len(array[:,0,0])
4382 4382
4383 4383 for ii in range(n2d) :
4384 4384 #print ii,n2d
4385 4385 tmp = array[ii,:,:]
4386 4386 #print tmp.shape, array[ii,101,:],array[ii,102,:]
4387 4387
4388 4388 # fig = plt.figure(figsize=(6,5))
4389 4389 # left, bottom, width, height = 0.1, 0.1, 0.8, 0.8
4390 4390 # ax = fig.add_axes([left, bottom, width, height])
4391 4391 # x = range(num_prof)
4392 4392 # y = range(num_hei)
4393 4393 # cp = ax.contour(y,x,tmp)
4394 4394 # ax.clabel(cp, inline=True,fontsize=10)
4395 4395 # plt.show()
4396 4396
4397 4397 #indxs = WHERE(FINITE(tmp) AND tmp GT 0,cindxs)
4398 4398 tmp = numpy.reshape(tmp,num_prof*num_hei)
4399 4399 indxs1 = (numpy.isfinite(tmp)==True).nonzero()
4400 4400 indxs2 = (tmp > 0).nonzero()
4401 4401
4402 4402 indxs1 = (indxs1[0])
4403 4403 indxs2 = indxs2[0]
4404 4404 #indxs1 = numpy.array(indxs1[0])
4405 4405 #indxs2 = numpy.array(indxs2[0])
4406 4406 indxs = None
4407 4407 #print indxs1 , indxs2
4408 4408 for iv in range(len(indxs2)):
4409 4409 indv = numpy.array((indxs1 == indxs2[iv]).nonzero())
4410 4410 #print len(indxs2), indv
4411 4411 if len(indv[0]) > 0 :
4412 4412 indxs = numpy.concatenate((indxs,indxs2[iv]), axis=None)
4413 4413 # print indxs
4414 4414 indxs = indxs[1:]
4415 4415 #print indxs, len(indxs)
4416 4416 if len(indxs) < 4 :
4417 4417 array[ii,:,:] = 0.
4418 4418 return
4419 4419
4420 4420 xpos = numpy.mod(indxs ,num_hei)
4421 4421 ypos = (indxs / num_hei)
4422 4422 sx = numpy.argsort(xpos) # Ordering respect to "x" (time)
4423 4423 #print sx
4424 4424 xpos = xpos[sx]
4425 4425 ypos = ypos[sx]
4426 4426
4427 4427 # *********************************** Cleaning isolated points **********************************
4428 4428 ic = 0
4429 4429 while True :
4430 4430 r = numpy.sqrt(list(numpy.power((xpos[ic]-xpos),2)+ numpy.power((ypos[ic]-ypos),2)))
4431 4431 #no_coh = WHERE(FINITE(r) AND (r LE rth),cno_coh)
4432 4432 #plt.plot(r)
4433 4433 #plt.show()
4434 4434 no_coh1 = (numpy.isfinite(r)==True).nonzero()
4435 4435 no_coh2 = (r <= rth).nonzero()
4436 4436 #print r, no_coh1, no_coh2
4437 4437 no_coh1 = numpy.array(no_coh1[0])
4438 4438 no_coh2 = numpy.array(no_coh2[0])
4439 4439 no_coh = None
4440 4440 #print valid1 , valid2
4441 4441 for iv in range(len(no_coh2)):
4442 4442 indv = numpy.array((no_coh1 == no_coh2[iv]).nonzero())
4443 4443 if len(indv[0]) > 0 :
4444 4444 no_coh = numpy.concatenate((no_coh,no_coh2[iv]), axis=None)
4445 4445 no_coh = no_coh[1:]
4446 4446 #print len(no_coh), no_coh
4447 4447 if len(no_coh) < 4 :
4448 4448 #print xpos[ic], ypos[ic], ic
4449 4449 # plt.plot(r)
4450 4450 # plt.show()
4451 4451 xpos[ic] = numpy.nan
4452 4452 ypos[ic] = numpy.nan
4453 4453
4454 4454 ic = ic + 1
4455 4455 if (ic == len(indxs)) :
4456 4456 break
4457 4457 #print( xpos, ypos)
4458 4458
4459 4459 indxs = (numpy.isfinite(list(xpos))==True).nonzero()
4460 4460 #print indxs[0]
4461 4461 if len(indxs[0]) < 4 :
4462 4462 array[ii,:,:] = 0.
4463 4463 return
4464 4464
4465 4465 xpos = xpos[indxs[0]]
4466 4466 ypos = ypos[indxs[0]]
4467 4467 for i in range(0,len(ypos)):
4468 4468 ypos[i]=int(ypos[i])
4469 4469 junk = tmp
4470 4470 tmp = junk*0.0
4471 4471
4472 4472 tmp[list(xpos + (ypos*num_hei))] = junk[list(xpos + (ypos*num_hei))]
4473 4473 array[ii,:,:] = numpy.reshape(tmp,(num_prof,num_hei))
4474 4474
4475 4475 #print array.shape
4476 4476 #tmp = numpy.reshape(tmp,(num_prof,num_hei))
4477 4477 #print tmp.shape
4478 4478
4479 4479 # fig = plt.figure(figsize=(6,5))
4480 4480 # left, bottom, width, height = 0.1, 0.1, 0.8, 0.8
4481 4481 # ax = fig.add_axes([left, bottom, width, height])
4482 4482 # x = range(num_prof)
4483 4483 # y = range(num_hei)
4484 4484 # cp = ax.contour(y,x,array[ii,:,:])
4485 4485 # ax.clabel(cp, inline=True,fontsize=10)
4486 4486 # plt.show()
4487 4487 return array
4488 4488 def moments(self,doppler,yarray,npoints):
4489 4489 ytemp = yarray
4490 4490 #val = WHERE(ytemp GT 0,cval)
4491 4491 #if cval == 0 : val = range(npoints-1)
4492 4492 val = (ytemp > 0).nonzero()
4493 4493 val = val[0]
4494 4494 #print('hvalid:',hvalid)
4495 4495 #print('valid', valid)
4496 4496 if len(val) == 0 : val = range(npoints-1)
4497 4497
4498 4498 ynew = 0.5*(ytemp[val[0]]+ytemp[val[len(val)-1]])
4499 4499 ytemp[len(ytemp):] = [ynew]
4500 4500
4501 4501 index = 0
4502 4502 index = numpy.argmax(ytemp)
4503 4503 ytemp = numpy.roll(ytemp,int(npoints/2)-1-index)
4504 4504 ytemp = ytemp[0:npoints-1]
4505 4505
4506 4506 fmom = numpy.sum(doppler*ytemp)/numpy.sum(ytemp)+(index-(npoints/2-1))*numpy.abs(doppler[1]-doppler[0])
4507 4507 smom = numpy.sum(doppler*doppler*ytemp)/numpy.sum(ytemp)
4508 4508 return [fmom,numpy.sqrt(smom)]
4509 4509 # **********************************************************************************************
4510 4510 index = 0
4511 4511 fint = 0
4512 4512 buffer = 0
4513 4513 buffer2 = 0
4514 4514 buffer3 = 0
4515 4515 def run(self, dataOut, getSNR = True, path=None, file=None, groupList=None):
4516 4516 nChannels = dataOut.nChannels
4517 4517 nHeights= dataOut.heightList.size
4518 4518 nProf = dataOut.nProfiles
4519 4519 tini=time.localtime(dataOut.utctime)
4520 4520 if (tini.tm_min % 5) == 0 and (tini.tm_sec < 5 and self.fint==0):
4521 4521 # print tini.tm_min
4522 4522 self.index = 0
4523 4523 jspc = self.buffer
4524 4524 jcspc = self.buffer2
4525 4525 jnoise = self.buffer3
4526 4526 self.buffer = dataOut.data_spc
4527 4527 self.buffer2 = dataOut.data_cspc
4528 4528 self.buffer3 = dataOut.noise
4529 4529 self.fint = 1
4530 4530 if numpy.any(jspc) :
4531 4531 jspc= numpy.reshape(jspc,(int(len(jspc)/4),nChannels,nProf,nHeights))
4532 4532 jcspc= numpy.reshape(jcspc,(int(len(jcspc)/2),2,nProf,nHeights))
4533 4533 jnoise= numpy.reshape(jnoise,(int(len(jnoise)/4),nChannels))
4534 4534 else:
4535 4535 dataOut.flagNoData = True
4536 4536 return dataOut
4537 4537 else :
4538 4538 if (tini.tm_min % 5) == 0 : self.fint = 1
4539 4539 else : self.fint = 0
4540 4540 self.index += 1
4541 4541 if numpy.any(self.buffer):
4542 4542 self.buffer = numpy.concatenate((self.buffer,dataOut.data_spc), axis=0)
4543 4543 self.buffer2 = numpy.concatenate((self.buffer2,dataOut.data_cspc), axis=0)
4544 4544 self.buffer3 = numpy.concatenate((self.buffer3,dataOut.noise), axis=0)
4545 4545 else:
4546 4546 self.buffer = dataOut.data_spc
4547 4547 self.buffer2 = dataOut.data_cspc
4548 4548 self.buffer3 = dataOut.noise
4549 4549 dataOut.flagNoData = True
4550 4550 return dataOut
4551 4551 if path != None:
4552 4552 sys.path.append(path)
4553 4553 self.library = importlib.import_module(file)
4554 4554
4555 4555 #To be inserted as a parameter
4556 4556 groupArray = numpy.array(groupList)
4557 4557 #groupArray = numpy.array([[0,1],[2,3]])
4558 4558 dataOut.groupList = groupArray
4559 4559
4560 4560 nGroups = groupArray.shape[0]
4561 4561 nChannels = dataOut.nChannels
4562 4562 nHeights = dataOut.heightList.size
4563 4563
4564 4564 #Parameters Array
4565 4565 dataOut.data_param = None
4566 4566 dataOut.data_paramC = None
4567 4567
4568 4568 #Set constants
4569 4569 constants = self.library.setConstants(dataOut)
4570 4570 dataOut.constants = constants
4571 4571 M = dataOut.normFactor
4572 4572 N = dataOut.nFFTPoints
4573 4573 ippSeconds = dataOut.ippSeconds
4574 4574 K = dataOut.nIncohInt
4575 4575 pairsArray = numpy.array(dataOut.pairsList)
4576 4576
4577 4577 snrth= 20
4578 4578 spectra = dataOut.data_spc
4579 4579 cspectra = dataOut.data_cspc
4580 4580 nProf = dataOut.nProfiles
4581 4581 heights = dataOut.heightList
4582 4582 nHei = len(heights)
4583 4583 channels = dataOut.channelList
4584 4584 nChan = len(channels)
4585 4585 nIncohInt = dataOut.nIncohInt
4586 4586 crosspairs = dataOut.groupList
4587 4587 noise = dataOut.noise
4588 4588 jnoise = jnoise/N
4589 4589 noise = numpy.nansum(jnoise,axis=0)#/len(jnoise)
4590 4590 power = numpy.sum(spectra, axis=1)
4591 4591 nPairs = len(crosspairs)
4592 4592 absc = dataOut.abscissaList[:-1]
4593 4593
4594 4594 if not self.isConfig:
4595 4595 self.isConfig = True
4596 4596
4597 4597 index = tini.tm_hour*12+tini.tm_min/5
4598 4598 jspc = jspc/N/N
4599 4599 jcspc = jcspc/N/N
4600 4600 tmp_spectra,tmp_cspectra,sat_spectra,sat_cspectra = self.CleanRayleigh(dataOut,jspc,jcspc,2)
4601 4601 jspectra = tmp_spectra*len(jspc[:,0,0,0])
4602 4602 jcspectra = tmp_cspectra*len(jspc[:,0,0,0])
4603 4603 my_incoh_spectra ,my_incoh_cspectra,my_incoh_aver,my_coh_aver, incoh_spectra, coh_spectra, incoh_cspectra, coh_cspectra, incoh_aver, coh_aver = self.__DiffCoherent(jspectra, jcspectra, dataOut, noise, snrth, None, None)
4604 4604 clean_coh_spectra, clean_coh_cspectra, clean_coh_aver = self.__CleanCoherent(snrth, coh_spectra, coh_cspectra, coh_aver, dataOut, noise,1,index)
4605 4605 dataOut.data_spc = incoh_spectra
4606 4606 dataOut.data_cspc = incoh_cspectra
4607 4607
4608 4608 clean_num_aver = incoh_aver*len(jspc[:,0,0,0])
4609 4609 coh_num_aver = clean_coh_aver*len(jspc[:,0,0,0])
4610 4610 #List of possible combinations
4611 4611 listComb = itertools.combinations(numpy.arange(groupArray.shape[1]),2)
4612 4612 indCross = numpy.zeros(len(list(listComb)), dtype = 'int')
4613 4613
4614 4614 if getSNR:
4615 4615 listChannels = groupArray.reshape((groupArray.size))
4616 4616 listChannels.sort()
4617 4617 dataOut.data_SNR = self.__getSNR(dataOut.data_spc[listChannels,:,:], noise[listChannels])
4618 4618 if dataOut.data_paramC is None:
4619 4619 dataOut.data_paramC = numpy.zeros((nGroups*4, nHeights,2))*numpy.nan
4620 4620 for i in range(nGroups):
4621 4621 coord = groupArray[i,:]
4622 4622 #Input data array
4623 4623 data = dataOut.data_spc[coord,:,:]/(M*N)
4624 4624 data = data.reshape((data.shape[0]*data.shape[1],data.shape[2]))
4625 4625
4626 4626 #Cross Spectra data array for Covariance Matrixes
4627 4627 ind = 0
4628 4628 for pairs in listComb:
4629 4629 pairsSel = numpy.array([coord[x],coord[y]])
4630 4630 indCross[ind] = int(numpy.where(numpy.all(pairsArray == pairsSel, axis = 1))[0][0])
4631 4631 ind += 1
4632 4632 dataCross = dataOut.data_cspc[indCross,:,:]/(M*N)
4633 4633 dataCross = dataCross**2
4634 4634 nhei = nHeights
4635 4635 poweri = numpy.sum(dataOut.data_spc[:,1:nProf-0,:],axis=1)/clean_num_aver[:,:]
4636 4636 if i == 0 : my_noises = numpy.zeros(4,dtype=float) #FLTARR(4)
4637 4637 n0i = numpy.nanmin(poweri[0+i*2,0:nhei-0])/(nProf-1)
4638 4638 n1i = numpy.nanmin(poweri[1+i*2,0:nhei-0])/(nProf-1)
4639 4639 n0 = n0i
4640 4640 n1= n1i
4641 4641 my_noises[2*i+0] = n0
4642 4642 my_noises[2*i+1] = n1
4643 4643 snrth = -16.0
4644 4644 snrth = 10**(snrth/10.0)
4645 4645
4646 4646 for h in range(nHeights):
4647 4647 d = data[:,h]
4648 4648 smooth = clean_num_aver[i+1,h] #dataOut.data_spc[:,1:nProf-0,:]
4649 4649 signalpn0 = (dataOut.data_spc[i*2,1:(nProf-0),h])/smooth
4650 4650 signalpn1 = (dataOut.data_spc[i*2+1,1:(nProf-0),h])/smooth
4651 4651 signal0 = signalpn0-n0
4652 4652 signal1 = signalpn1-n1
4653 4653 snr0 = numpy.sum(signal0/n0)/(nProf-1)
4654 4654 snr1 = numpy.sum(signal1/n1)/(nProf-1)
4655 4655 if snr0 > snrth and snr1 > snrth and clean_num_aver[i+1,h] > 0 :
4656 4656 #Covariance Matrix
4657 4657 D = numpy.diag(d**2)
4658 4658 ind = 0
4659 4659 for pairs in listComb:
4660 4660 #Coordinates in Covariance Matrix
4661 4661 x = pairs[0]
4662 4662 y = pairs[1]
4663 4663 #Channel Index
4664 4664 S12 = dataCross[ind,:,h]
4665 4665 D12 = numpy.diag(S12)
4666 4666 #Completing Covariance Matrix with Cross Spectras
4667 4667 D[x*N:(x+1)*N,y*N:(y+1)*N] = D12
4668 4668 D[y*N:(y+1)*N,x*N:(x+1)*N] = D12
4669 4669 ind += 1
4670 4670 diagD = numpy.zeros(256)
4671 4671 if h == 17 :
4672 4672 for ii in range(256): diagD[ii] = D[ii,ii]
4673 4673 #Dinv=numpy.linalg.inv(D)
4674 4674 #L=numpy.linalg.cholesky(Dinv)
4675 4675 try:
4676 4676 Dinv=numpy.linalg.inv(D)
4677 4677 L=numpy.linalg.cholesky(Dinv)
4678 4678 except:
4679 4679 Dinv = D*numpy.nan
4680 4680 L= D*numpy.nan
4681 4681 LT=L.T
4682 4682
4683 4683 dp = numpy.dot(LT,d)
4684 4684
4685 4685 #Initial values
4686 4686 data_spc = dataOut.data_spc[coord,:,h]
4687 4687
4688 4688 if (h>0)and(error1[3]<5):
4689 4689 p0 = dataOut.data_param[i,:,h-1]
4690 4690 else:
4691 4691 p0 = numpy.array(self.library.initialValuesFunction(data_spc, constants))# sin el i(data_spc, constants, i)
4692 4692 try:
4693 4693 #Least Squares
4694 4694 #print (dp,LT,constants)
4695 4695 #value =self.__residFunction(p0,dp,LT,constants)
4696 4696 #print ("valueREADY",value.shape, type(value))
4697 4697 #optimize.leastsq(value)
4698 4698 minp,covp,infodict,mesg,ier = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants),full_output=True)
4699 4699 #minp,covp = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants))
4700 4700 #Chi square error
4701 4701 #print(minp,covp.infodict,mesg,ier)
4702 4702 #print("REALIZA OPTIMIZ")
4703 4703 error0 = numpy.sum(infodict['fvec']**2)/(2*N)
4704 4704 #Error with Jacobian
4705 4705 error1 = self.library.errorFunction(minp,constants,LT)
4706 4706 # print self.__residFunction(p0,dp,LT, constants)
4707 4707 # print infodict['fvec']
4708 4708 # print self.__residFunction(minp,dp,LT,constants)
4709 4709
4710 4710 except:
4711 4711 minp = p0*numpy.nan
4712 4712 error0 = numpy.nan
4713 4713 error1 = p0*numpy.nan
4714 4714 #print ("EXCEPT 0000000000")
4715 4715 # s_sq = (self.__residFunction(minp,dp,LT,constants)).sum()/(len(dp)-len(p0))
4716 4716 # covp = covp*s_sq
4717 4717 # #print("TRY___________________________________________1")
4718 4718 # error = []
4719 4719 # for ip in range(len(minp)):
4720 4720 # try:
4721 4721 # error.append(numpy.absolute(covp[ip][ip])**0.5)
4722 4722 # except:
4723 4723 # error.append( 0.00 )
4724 4724 else :
4725 4725 data_spc = dataOut.data_spc[coord,:,h]
4726 4726 p0 = numpy.array(self.library.initialValuesFunction(data_spc, constants))
4727 4727 minp = p0*numpy.nan
4728 4728 error0 = numpy.nan
4729 4729 error1 = p0*numpy.nan
4730 4730 #Save
4731 4731 if dataOut.data_param is None:
4732 4732 dataOut.data_param = numpy.zeros((nGroups, p0.size, nHeights))*numpy.nan
4733 4733 dataOut.data_error = numpy.zeros((nGroups, p0.size + 1, nHeights))*numpy.nan
4734 4734
4735 4735 dataOut.data_error[i,:,h] = numpy.hstack((error0,error1))
4736 4736 dataOut.data_param[i,:,h] = minp
4737 4737
4738 4738 for ht in range(nHeights-1) :
4739 4739 smooth = coh_num_aver[i+1,ht] #datc[0,ht,0,beam]
4740 4740 dataOut.data_paramC[4*i,ht,1] = smooth
4741 4741 signalpn0 = (coh_spectra[i*2 ,1:(nProf-0),ht])/smooth #coh_spectra
4742 4742 signalpn1 = (coh_spectra[i*2+1,1:(nProf-0),ht])/smooth
4743 4743
4744 4744 #val0 = WHERE(signalpn0 > 0,cval0)
4745 4745 val0 = (signalpn0 > 0).nonzero()
4746 4746 val0 = val0[0]
4747 4747 #print('hvalid:',hvalid)
4748 4748 #print('valid', valid)
4749 4749 if len(val0) == 0 : val0_npoints = nProf
4750 4750 else : val0_npoints = len(val0)
4751 4751
4752 4752 #val1 = WHERE(signalpn1 > 0,cval1)
4753 4753 val1 = (signalpn1 > 0).nonzero()
4754 4754 val1 = val1[0]
4755 4755 if len(val1) == 0 : val1_npoints = nProf
4756 4756 else : val1_npoints = len(val1)
4757 4757
4758 4758 dataOut.data_paramC[0+4*i,ht,0] = numpy.sum((signalpn0/val0_npoints))/n0
4759 4759 dataOut.data_paramC[1+4*i,ht,0] = numpy.sum((signalpn1/val1_npoints))/n1
4760 4760
4761 4761 signal0 = (signalpn0-n0) # > 0
4762 4762 vali = (signal0 < 0).nonzero()
4763 4763 vali = vali[0]
4764 4764 if len(vali) > 0 : signal0[vali] = 0
4765 4765 signal1 = (signalpn1-n1) #> 0
4766 4766 vali = (signal1 < 0).nonzero()
4767 4767 vali = vali[0]
4768 4768 if len(vali) > 0 : signal1[vali] = 0
4769 4769 snr0 = numpy.sum(signal0/n0)/(nProf-1)
4770 4770 snr1 = numpy.sum(signal1/n1)/(nProf-1)
4771 4771 doppler = absc[1:]
4772 4772 if snr0 >= snrth and snr1 >= snrth and smooth :
4773 4773 signalpn0_n0 = signalpn0
4774 4774 signalpn0_n0[val0] = signalpn0[val0] - n0
4775 4775 mom0 = self.moments(doppler,signalpn0-n0,nProf)
4776 4776 # sigtmp= numpy.transpose(numpy.tile(signalpn0, [4,1]))
4777 4777 # momt= self.__calculateMoments( sigtmp, doppler , n0 )
4778 4778 signalpn1_n1 = signalpn1
4779 4779 signalpn1_n1[val1] = signalpn1[val1] - n1
4780 4780 mom1 = self.moments(doppler,signalpn1_n1,nProf)
4781 4781 dataOut.data_paramC[2+4*i,ht,0] = (mom0[0]+mom1[0])/2.
4782 4782 dataOut.data_paramC[3+4*i,ht,0] = (mom0[1]+mom1[1])/2.
4783 4783 # if graph == 1 :
4784 4784 # window, 13
4785 4785 # plot,doppler,signalpn0
4786 4786 # oplot,doppler,signalpn1,linest=1
4787 4787 # oplot,mom0(0)*doppler/doppler,signalpn0
4788 4788 # oplot,mom1(0)*doppler/doppler,signalpn1
4789 4789 # print,interval/12.,beam,45+ht*15,snr0,snr1,mom0(0),mom1(0),mom0(1),mom1(1)
4790 4790 #ENDIF
4791 4791 #ENDIF
4792 4792 #ENDFOR End height
4793 4793
4794 4794 dataOut.data_spc = jspectra
4795 4795 if getSNR:
4796 4796 listChannels = groupArray.reshape((groupArray.size))
4797 4797 listChannels.sort()
4798 4798
4799 4799 dataOut.data_snr = self.__getSNR(dataOut.data_spc[listChannels,:,:], my_noises[listChannels])
4800 4800 return dataOut
4801 4801
4802 4802 def __residFunction(self, p, dp, LT, constants):
4803 4803
4804 4804 fm = self.library.modelFunction(p, constants)
4805 4805 fmp=numpy.dot(LT,fm)
4806 4806 return dp-fmp
4807 4807
4808 4808 def __getSNR(self, z, noise):
4809 4809
4810 4810 avg = numpy.average(z, axis=1)
4811 4811 SNR = (avg.T-noise)/noise
4812 4812 SNR = SNR.T
4813 4813 return SNR
4814 4814
4815 4815 def __chisq(self, p, chindex, hindex):
4816 4816 #similar to Resid but calculates CHI**2
4817 4817 [LT,d,fm]=setupLTdfm(p,chindex,hindex)
4818 4818 dp=numpy.dot(LT,d)
4819 4819 fmp=numpy.dot(LT,fm)
4820 4820 chisq=numpy.dot((dp-fmp).T,(dp-fmp))
4821 4821 return chisq
4822 4822
4823 4823 class WindProfiler(Operation):
4824 4824
4825 4825 __isConfig = False
4826 4826
4827 4827 __initime = None
4828 4828 __lastdatatime = None
4829 4829 __integrationtime = None
4830 4830
4831 4831 __buffer = None
4832 4832
4833 4833 __dataReady = False
4834 4834
4835 4835 __firstdata = None
4836 4836
4837 4837 n = None
4838 4838
4839 4839 def __init__(self):
4840 4840 Operation.__init__(self)
4841 4841
4842 4842 def __calculateCosDir(self, elev, azim):
4843 4843 zen = (90 - elev)*numpy.pi/180
4844 4844 azim = azim*numpy.pi/180
4845 4845 cosDirX = numpy.sqrt((1-numpy.cos(zen)**2)/((1+numpy.tan(azim)**2)))
4846 4846 cosDirY = numpy.sqrt(1-numpy.cos(zen)**2-cosDirX**2)
4847 4847
4848 4848 signX = numpy.sign(numpy.cos(azim))
4849 4849 signY = numpy.sign(numpy.sin(azim))
4850 4850
4851 4851 cosDirX = numpy.copysign(cosDirX, signX)
4852 4852 cosDirY = numpy.copysign(cosDirY, signY)
4853 4853 return cosDirX, cosDirY
4854 4854
4855 4855 def __calculateAngles(self, theta_x, theta_y, azimuth):
4856 4856
4857 4857 dir_cosw = numpy.sqrt(1-theta_x**2-theta_y**2)
4858 4858 zenith_arr = numpy.arccos(dir_cosw)
4859 4859 azimuth_arr = numpy.arctan2(theta_x,theta_y) + azimuth*math.pi/180
4860 4860
4861 4861 dir_cosu = numpy.sin(azimuth_arr)*numpy.sin(zenith_arr)
4862 4862 dir_cosv = numpy.cos(azimuth_arr)*numpy.sin(zenith_arr)
4863 4863
4864 4864 return azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw
4865 4865
4866 4866 def __calculateMatA(self, dir_cosu, dir_cosv, dir_cosw, horOnly):
4867 4867
4868 4868 if horOnly:
4869 4869 A = numpy.c_[dir_cosu,dir_cosv]
4870 4870 else:
4871 4871 A = numpy.c_[dir_cosu,dir_cosv,dir_cosw]
4872 4872 A = numpy.asmatrix(A)
4873 4873 A1 = numpy.linalg.inv(A.transpose()*A)*A.transpose()
4874 4874
4875 4875 return A1
4876 4876
4877 4877 def __correctValues(self, heiRang, phi, velRadial, SNR):
4878 4878 listPhi = phi.tolist()
4879 4879 maxid = listPhi.index(max(listPhi))
4880 4880 minid = listPhi.index(min(listPhi))
4881 4881
4882 4882 rango = list(range(len(phi)))
4883 4883 # rango = numpy.delete(rango,maxid)
4884 4884
4885 4885 heiRang1 = heiRang*math.cos(phi[maxid])
4886 4886 heiRangAux = heiRang*math.cos(phi[minid])
4887 4887 indOut = (heiRang1 < heiRangAux[0]).nonzero()
4888 4888 heiRang1 = numpy.delete(heiRang1,indOut)
4889 4889
4890 4890 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
4891 4891 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
4892 4892
4893 4893 for i in rango:
4894 4894 x = heiRang*math.cos(phi[i])
4895 4895 y1 = velRadial[i,:]
4896 4896 f1 = interpolate.interp1d(x,y1,kind = 'cubic')
4897 4897
4898 4898 x1 = heiRang1
4899 4899 y11 = f1(x1)
4900 4900
4901 4901 y2 = SNR[i,:]
4902 4902 f2 = interpolate.interp1d(x,y2,kind = 'cubic')
4903 4903 y21 = f2(x1)
4904 4904
4905 4905 velRadial1[i,:] = y11
4906 4906 SNR1[i,:] = y21
4907 4907
4908 4908 return heiRang1, velRadial1, SNR1
4909 4909
4910 4910 def __calculateVelUVW(self, A, velRadial):
4911 4911
4912 4912 #Operacion Matricial
4913 4913 # velUVW = numpy.zeros((velRadial.shape[1],3))
4914 4914 # for ind in range(velRadial.shape[1]):
4915 4915 # velUVW[ind,:] = numpy.dot(A,velRadial[:,ind])
4916 4916 # velUVW = velUVW.transpose()
4917 4917 velUVW = numpy.zeros((A.shape[0],velRadial.shape[1]))
4918 4918 velUVW[:,:] = numpy.dot(A,velRadial)
4919 4919
4920 4920
4921 4921 return velUVW
4922 4922
4923 4923 # def techniqueDBS(self, velRadial0, dirCosx, disrCosy, azimuth, correct, horizontalOnly, heiRang, SNR0):
4924 4924
4925 4925 def techniqueDBS(self, kwargs):
4926 4926 """
4927 4927 Function that implements Doppler Beam Swinging (DBS) technique.
4928 4928
4929 4929 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
4930 4930 Direction correction (if necessary), Ranges and SNR
4931 4931
4932 4932 Output: Winds estimation (Zonal, Meridional and Vertical)
4933 4933
4934 4934 Parameters affected: Winds, height range, SNR
4935 4935 """
4936 4936 velRadial0 = kwargs['velRadial']
4937 4937 heiRang = kwargs['heightList']
4938 4938 SNR0 = kwargs['SNR']
4939 4939
4940 4940 if 'dirCosx' in kwargs and 'dirCosy' in kwargs:
4941 4941 theta_x = numpy.array(kwargs['dirCosx'])
4942 4942 theta_y = numpy.array(kwargs['dirCosy'])
4943 4943 else:
4944 4944 elev = numpy.array(kwargs['elevation'])
4945 4945 azim = numpy.array(kwargs['azimuth'])
4946 4946 theta_x, theta_y = self.__calculateCosDir(elev, azim)
4947 4947 azimuth = kwargs['correctAzimuth']
4948 4948 if 'horizontalOnly' in kwargs:
4949 4949 horizontalOnly = kwargs['horizontalOnly']
4950 4950 else: horizontalOnly = False
4951 4951 if 'correctFactor' in kwargs:
4952 4952 correctFactor = kwargs['correctFactor']
4953 4953 else: correctFactor = 1
4954 4954 if 'channelList' in kwargs:
4955 4955 channelList = kwargs['channelList']
4956 4956 if len(channelList) == 2:
4957 4957 horizontalOnly = True
4958 4958 arrayChannel = numpy.array(channelList)
4959 4959 param = param[arrayChannel,:,:]
4960 4960 theta_x = theta_x[arrayChannel]
4961 4961 theta_y = theta_y[arrayChannel]
4962 4962
4963 4963 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
4964 4964 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, zenith_arr, correctFactor*velRadial0, SNR0)
4965 4965 A = self.__calculateMatA(dir_cosu, dir_cosv, dir_cosw, horizontalOnly)
4966 4966
4967 4967 #Calculo de Componentes de la velocidad con DBS
4968 4968 winds = self.__calculateVelUVW(A,velRadial1)
4969 4969
4970 4970 return winds, heiRang1, SNR1
4971 4971
4972 4972 def __calculateDistance(self, posx, posy, pairs_ccf, azimuth = None):
4973 4973
4974 4974 nPairs = len(pairs_ccf)
4975 4975 posx = numpy.asarray(posx)
4976 4976 posy = numpy.asarray(posy)
4977 4977
4978 4978 #Rotacion Inversa para alinear con el azimuth
4979 4979 if azimuth!= None:
4980 4980 azimuth = azimuth*math.pi/180
4981 4981 posx1 = posx*math.cos(azimuth) + posy*math.sin(azimuth)
4982 4982 posy1 = -posx*math.sin(azimuth) + posy*math.cos(azimuth)
4983 4983 else:
4984 4984 posx1 = posx
4985 4985 posy1 = posy
4986 4986
4987 4987 #Calculo de Distancias
4988 4988 distx = numpy.zeros(nPairs)
4989 4989 disty = numpy.zeros(nPairs)
4990 4990 dist = numpy.zeros(nPairs)
4991 4991 ang = numpy.zeros(nPairs)
4992 4992
4993 4993 for i in range(nPairs):
4994 4994 distx[i] = posx1[pairs_ccf[i][1]] - posx1[pairs_ccf[i][0]]
4995 4995 disty[i] = posy1[pairs_ccf[i][1]] - posy1[pairs_ccf[i][0]]
4996 4996 dist[i] = numpy.sqrt(distx[i]**2 + disty[i]**2)
4997 4997 ang[i] = numpy.arctan2(disty[i],distx[i])
4998 4998
4999 4999 return distx, disty, dist, ang
5000 5000 #Calculo de Matrices
5001 5001 # nPairs = len(pairs)
5002 5002 # ang1 = numpy.zeros((nPairs, 2, 1))
5003 5003 # dist1 = numpy.zeros((nPairs, 2, 1))
5004 5004 #
5005 5005 # for j in range(nPairs):
5006 5006 # dist1[j,0,0] = dist[pairs[j][0]]
5007 5007 # dist1[j,1,0] = dist[pairs[j][1]]
5008 5008 # ang1[j,0,0] = ang[pairs[j][0]]
5009 5009 # ang1[j,1,0] = ang[pairs[j][1]]
5010 5010 #
5011 5011 # return distx,disty, dist1,ang1
5012 5012
5013 5013
5014 5014 def __calculateVelVer(self, phase, lagTRange, _lambda):
5015 5015
5016 5016 Ts = lagTRange[1] - lagTRange[0]
5017 5017 velW = -_lambda*phase/(4*math.pi*Ts)
5018 5018
5019 5019 return velW
5020 5020
5021 5021 def __calculateVelHorDir(self, dist, tau1, tau2, ang):
5022 5022 nPairs = tau1.shape[0]
5023 5023 nHeights = tau1.shape[1]
5024 5024 vel = numpy.zeros((nPairs,3,nHeights))
5025 5025 dist1 = numpy.reshape(dist, (dist.size,1))
5026 5026
5027 5027 angCos = numpy.cos(ang)
5028 5028 angSin = numpy.sin(ang)
5029 5029
5030 5030 vel0 = dist1*tau1/(2*tau2**2)
5031 5031 vel[:,0,:] = (vel0*angCos).sum(axis = 1)
5032 5032 vel[:,1,:] = (vel0*angSin).sum(axis = 1)
5033 5033
5034 5034 ind = numpy.where(numpy.isinf(vel))
5035 5035 vel[ind] = numpy.nan
5036 5036
5037 5037 return vel
5038 5038
5039 5039 # def __getPairsAutoCorr(self, pairsList, nChannels):
5040 5040 #
5041 5041 # pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan
5042 5042 #
5043 5043 # for l in range(len(pairsList)):
5044 5044 # firstChannel = pairsList[l][0]
5045 5045 # secondChannel = pairsList[l][1]
5046 5046 #
5047 5047 # #Obteniendo pares de Autocorrelacion
5048 5048 # if firstChannel == secondChannel:
5049 5049 # pairsAutoCorr[firstChannel] = int(l)
5050 5050 #
5051 5051 # pairsAutoCorr = pairsAutoCorr.astype(int)
5052 5052 #
5053 5053 # pairsCrossCorr = range(len(pairsList))
5054 5054 # pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr)
5055 5055 #
5056 5056 # return pairsAutoCorr, pairsCrossCorr
5057 5057
5058 5058 # def techniqueSA(self, pairsSelected, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, lagTRange, correctFactor):
5059 5059 def techniqueSA(self, kwargs):
5060 5060
5061 5061 """
5062 5062 Function that implements Spaced Antenna (SA) technique.
5063 5063
5064 5064 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
5065 5065 Direction correction (if necessary), Ranges and SNR
5066 5066
5067 5067 Output: Winds estimation (Zonal, Meridional and Vertical)
5068 5068
5069 5069 Parameters affected: Winds
5070 5070 """
5071 5071 position_x = kwargs['positionX']
5072 5072 position_y = kwargs['positionY']
5073 5073 azimuth = kwargs['azimuth']
5074 5074
5075 5075 if 'correctFactor' in kwargs:
5076 5076 correctFactor = kwargs['correctFactor']
5077 5077 else:
5078 5078 correctFactor = 1
5079 5079
5080 5080 groupList = kwargs['groupList']
5081 5081 pairs_ccf = groupList[1]
5082 5082 tau = kwargs['tau']
5083 5083 _lambda = kwargs['_lambda']
5084 5084
5085 5085 #Cross Correlation pairs obtained
5086 5086 # pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairssList, nChannels)
5087 5087 # pairsArray = numpy.array(pairsList)[pairsCrossCorr]
5088 5088 # pairsSelArray = numpy.array(pairsSelected)
5089 5089 # pairs = []
5090 5090 #
5091 5091 # #Wind estimation pairs obtained
5092 5092 # for i in range(pairsSelArray.shape[0]/2):
5093 5093 # ind1 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i], axis = 1))[0][0]
5094 5094 # ind2 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i + 1], axis = 1))[0][0]
5095 5095 # pairs.append((ind1,ind2))
5096 5096
5097 5097 indtau = tau.shape[0]/2
5098 5098 tau1 = tau[:indtau,:]
5099 5099 tau2 = tau[indtau:-1,:]
5100 5100 # tau1 = tau1[pairs,:]
5101 5101 # tau2 = tau2[pairs,:]
5102 5102 phase1 = tau[-1,:]
5103 5103
5104 5104 #---------------------------------------------------------------------
5105 5105 #Metodo Directo
5106 5106 distx, disty, dist, ang = self.__calculateDistance(position_x, position_y, pairs_ccf,azimuth)
5107 5107 winds = self.__calculateVelHorDir(dist, tau1, tau2, ang)
5108 5108 winds = stats.nanmean(winds, axis=0)
5109 5109 #---------------------------------------------------------------------
5110 5110 #Metodo General
5111 5111 # distx, disty, dist = self.calculateDistance(position_x,position_y,pairsCrossCorr, pairsList, azimuth)
5112 5112 # #Calculo Coeficientes de Funcion de Correlacion
5113 5113 # F,G,A,B,H = self.calculateCoef(tau1,tau2,distx,disty,n)
5114 5114 # #Calculo de Velocidades
5115 5115 # winds = self.calculateVelUV(F,G,A,B,H)
5116 5116
5117 5117 #---------------------------------------------------------------------
5118 5118 winds[2,:] = self.__calculateVelVer(phase1, lagTRange, _lambda)
5119 5119 winds = correctFactor*winds
5120 5120 return winds
5121 5121
5122 5122 def __checkTime(self, currentTime, paramInterval, outputInterval):
5123 5123
5124 5124 dataTime = currentTime + paramInterval
5125 5125 deltaTime = dataTime - self.__initime
5126 5126
5127 5127 if deltaTime >= outputInterval or deltaTime < 0:
5128 5128 self.__dataReady = True
5129 5129 return
5130 5130
5131 5131 def techniqueMeteors(self, arrayMeteor, meteorThresh, heightMin, heightMax):
5132 5132 '''
5133 5133 Function that implements winds estimation technique with detected meteors.
5134 5134
5135 5135 Input: Detected meteors, Minimum meteor quantity to wind estimation
5136 5136
5137 5137 Output: Winds estimation (Zonal and Meridional)
5138 5138
5139 5139 Parameters affected: Winds
5140 5140 '''
5141 5141 #Settings
5142 5142 nInt = (heightMax - heightMin)/2
5143 5143 nInt = int(nInt)
5144 5144 winds = numpy.zeros((2,nInt))*numpy.nan
5145 5145
5146 5146 #Filter errors
5147 5147 error = numpy.where(arrayMeteor[:,-1] == 0)[0]
5148 5148 finalMeteor = arrayMeteor[error,:]
5149 5149
5150 5150 #Meteor Histogram
5151 5151 finalHeights = finalMeteor[:,2]
5152 5152 hist = numpy.histogram(finalHeights, bins = nInt, range = (heightMin,heightMax))
5153 5153 nMeteorsPerI = hist[0]
5154 5154 heightPerI = hist[1]
5155 5155
5156 5156 #Sort of meteors
5157 5157 indSort = finalHeights.argsort()
5158 5158 finalMeteor2 = finalMeteor[indSort,:]
5159 5159
5160 5160 # Calculating winds
5161 5161 ind1 = 0
5162 5162 ind2 = 0
5163 5163
5164 5164 for i in range(nInt):
5165 5165 nMet = nMeteorsPerI[i]
5166 5166 ind1 = ind2
5167 5167 ind2 = ind1 + nMet
5168 5168
5169 5169 meteorAux = finalMeteor2[ind1:ind2,:]
5170 5170
5171 5171 if meteorAux.shape[0] >= meteorThresh:
5172 5172 vel = meteorAux[:, 6]
5173 5173 zen = meteorAux[:, 4]*numpy.pi/180
5174 5174 azim = meteorAux[:, 3]*numpy.pi/180
5175 5175
5176 5176 n = numpy.cos(zen)
5177 5177 # m = (1 - n**2)/(1 - numpy.tan(azim)**2)
5178 5178 # l = m*numpy.tan(azim)
5179 5179 l = numpy.sin(zen)*numpy.sin(azim)
5180 5180 m = numpy.sin(zen)*numpy.cos(azim)
5181 5181
5182 5182 A = numpy.vstack((l, m)).transpose()
5183 5183 A1 = numpy.dot(numpy.linalg.inv( numpy.dot(A.transpose(),A) ),A.transpose())
5184 5184 windsAux = numpy.dot(A1, vel)
5185 5185
5186 5186 winds[0,i] = windsAux[0]
5187 5187 winds[1,i] = windsAux[1]
5188 5188
5189 5189 return winds, heightPerI[:-1]
5190 5190
5191 5191 def techniqueNSM_SA(self, **kwargs):
5192 5192 metArray = kwargs['metArray']
5193 5193 heightList = kwargs['heightList']
5194 5194 timeList = kwargs['timeList']
5195 5195
5196 5196 rx_location = kwargs['rx_location']
5197 5197 groupList = kwargs['groupList']
5198 5198 azimuth = kwargs['azimuth']
5199 5199 dfactor = kwargs['dfactor']
5200 5200 k = kwargs['k']
5201 5201
5202 5202 azimuth1, dist = self.__calculateAzimuth1(rx_location, groupList, azimuth)
5203 5203 d = dist*dfactor
5204 5204 #Phase calculation
5205 5205 metArray1 = self.__getPhaseSlope(metArray, heightList, timeList)
5206 5206
5207 5207 metArray1[:,-2] = metArray1[:,-2]*metArray1[:,2]*1000/(k*d[metArray1[:,1].astype(int)]) #angles into velocities
5208 5208
5209 5209 velEst = numpy.zeros((heightList.size,2))*numpy.nan
5210 5210 azimuth1 = azimuth1*numpy.pi/180
5211 5211
5212 5212 for i in range(heightList.size):
5213 5213 h = heightList[i]
5214 5214 indH = numpy.where((metArray1[:,2] == h)&(numpy.abs(metArray1[:,-2]) < 100))[0]
5215 5215 metHeight = metArray1[indH,:]
5216 5216 if metHeight.shape[0] >= 2:
5217 5217 velAux = numpy.asmatrix(metHeight[:,-2]).T #Radial Velocities
5218 5218 iazim = metHeight[:,1].astype(int)
5219 5219 azimAux = numpy.asmatrix(azimuth1[iazim]).T #Azimuths
5220 5220 A = numpy.hstack((numpy.cos(azimAux),numpy.sin(azimAux)))
5221 5221 A = numpy.asmatrix(A)
5222 5222 A1 = numpy.linalg.pinv(A.transpose()*A)*A.transpose()
5223 5223 velHor = numpy.dot(A1,velAux)
5224 5224
5225 5225 velEst[i,:] = numpy.squeeze(velHor)
5226 5226 return velEst
5227 5227
5228 5228 def __getPhaseSlope(self, metArray, heightList, timeList):
5229 5229 meteorList = []
5230 5230 #utctime sec1 height SNR velRad ph0 ph1 ph2 coh0 coh1 coh2
5231 5231 #Putting back together the meteor matrix
5232 5232 utctime = metArray[:,0]
5233 5233 uniqueTime = numpy.unique(utctime)
5234 5234
5235 5235 phaseDerThresh = 0.5
5236 5236 ippSeconds = timeList[1] - timeList[0]
5237 5237 sec = numpy.where(timeList>1)[0][0]
5238 5238 nPairs = metArray.shape[1] - 6
5239 5239 nHeights = len(heightList)
5240 5240
5241 5241 for t in uniqueTime:
5242 5242 metArray1 = metArray[utctime==t,:]
5243 5243 # phaseDerThresh = numpy.pi/4 #reducir Phase thresh
5244 5244 tmet = metArray1[:,1].astype(int)
5245 5245 hmet = metArray1[:,2].astype(int)
5246 5246
5247 5247 metPhase = numpy.zeros((nPairs, heightList.size, timeList.size - 1))
5248 5248 metPhase[:,:] = numpy.nan
5249 5249 metPhase[:,hmet,tmet] = metArray1[:,6:].T
5250 5250
5251 5251 #Delete short trails
5252 5252 metBool = ~numpy.isnan(metPhase[0,:,:])
5253 5253 heightVect = numpy.sum(metBool, axis = 1)
5254 5254 metBool[heightVect<sec,:] = False
5255 5255 metPhase[:,heightVect<sec,:] = numpy.nan
5256 5256
5257 5257 #Derivative
5258 5258 metDer = numpy.abs(metPhase[:,:,1:] - metPhase[:,:,:-1])
5259 5259 phDerAux = numpy.dstack((numpy.full((nPairs,nHeights,1), False, dtype=bool),metDer > phaseDerThresh))
5260 5260 metPhase[phDerAux] = numpy.nan
5261 5261
5262 5262 #--------------------------METEOR DETECTION -----------------------------------------
5263 5263 indMet = numpy.where(numpy.any(metBool,axis=1))[0]
5264 5264
5265 5265 for p in numpy.arange(nPairs):
5266 5266 phase = metPhase[p,:,:]
5267 5267 phDer = metDer[p,:,:]
5268 5268
5269 5269 for h in indMet:
5270 5270 height = heightList[h]
5271 5271 phase1 = phase[h,:] #82
5272 5272 phDer1 = phDer[h,:]
5273 5273
5274 5274 phase1[~numpy.isnan(phase1)] = numpy.unwrap(phase1[~numpy.isnan(phase1)]) #Unwrap
5275 5275
5276 5276 indValid = numpy.where(~numpy.isnan(phase1))[0]
5277 5277 initMet = indValid[0]
5278 5278 endMet = 0
5279 5279
5280 5280 for i in range(len(indValid)-1):
5281 5281
5282 5282 #Time difference
5283 5283 inow = indValid[i]
5284 5284 inext = indValid[i+1]
5285 5285 idiff = inext - inow
5286 5286 #Phase difference
5287 5287 phDiff = numpy.abs(phase1[inext] - phase1[inow])
5288 5288
5289 5289 if idiff>sec or phDiff>numpy.pi/4 or inext==indValid[-1]: #End of Meteor
5290 5290 sizeTrail = inow - initMet + 1
5291 5291 if sizeTrail>3*sec: #Too short meteors
5292 5292 x = numpy.arange(initMet,inow+1)*ippSeconds
5293 5293 y = phase1[initMet:inow+1]
5294 5294 ynnan = ~numpy.isnan(y)
5295 5295 x = x[ynnan]
5296 5296 y = y[ynnan]
5297 5297 slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
5298 5298 ylin = x*slope + intercept
5299 5299 rsq = r_value**2
5300 5300 if rsq > 0.5:
5301 5301 vel = slope#*height*1000/(k*d)
5302 5302 estAux = numpy.array([utctime,p,height, vel, rsq])
5303 5303 meteorList.append(estAux)
5304 5304 initMet = inext
5305 5305 metArray2 = numpy.array(meteorList)
5306 5306
5307 5307 return metArray2
5308 5308
5309 5309 def __calculateAzimuth1(self, rx_location, pairslist, azimuth0):
5310 5310
5311 5311 azimuth1 = numpy.zeros(len(pairslist))
5312 5312 dist = numpy.zeros(len(pairslist))
5313 5313
5314 5314 for i in range(len(rx_location)):
5315 5315 ch0 = pairslist[i][0]
5316 5316 ch1 = pairslist[i][1]
5317 5317
5318 5318 diffX = rx_location[ch0][0] - rx_location[ch1][0]
5319 5319 diffY = rx_location[ch0][1] - rx_location[ch1][1]
5320 5320 azimuth1[i] = numpy.arctan2(diffY,diffX)*180/numpy.pi
5321 5321 dist[i] = numpy.sqrt(diffX**2 + diffY**2)
5322 5322
5323 5323 azimuth1 -= azimuth0
5324 5324 return azimuth1, dist
5325 5325
5326 5326 def techniqueNSM_DBS(self, **kwargs):
5327 5327 metArray = kwargs['metArray']
5328 5328 heightList = kwargs['heightList']
5329 5329 timeList = kwargs['timeList']
5330 5330 azimuth = kwargs['azimuth']
5331 5331 theta_x = numpy.array(kwargs['theta_x'])
5332 5332 theta_y = numpy.array(kwargs['theta_y'])
5333 5333
5334 5334 utctime = metArray[:,0]
5335 5335 cmet = metArray[:,1].astype(int)
5336 5336 hmet = metArray[:,3].astype(int)
5337 5337 SNRmet = metArray[:,4]
5338 5338 vmet = metArray[:,5]
5339 5339 spcmet = metArray[:,6]
5340 5340
5341 5341 nChan = numpy.max(cmet) + 1
5342 5342 nHeights = len(heightList)
5343 5343
5344 5344 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
5345 5345 hmet = heightList[hmet]
5346 5346 h1met = hmet*numpy.cos(zenith_arr[cmet]) #Corrected heights
5347 5347
5348 5348 velEst = numpy.zeros((heightList.size,2))*numpy.nan
5349 5349
5350 5350 for i in range(nHeights - 1):
5351 5351 hmin = heightList[i]
5352 5352 hmax = heightList[i + 1]
5353 5353
5354 5354 thisH = (h1met>=hmin) & (h1met<hmax) & (cmet!=2) & (SNRmet>8) & (vmet<50) & (spcmet<10)
5355 5355 indthisH = numpy.where(thisH)
5356 5356
5357 5357 if numpy.size(indthisH) > 3:
5358 5358
5359 5359 vel_aux = vmet[thisH]
5360 5360 chan_aux = cmet[thisH]
5361 5361 cosu_aux = dir_cosu[chan_aux]
5362 5362 cosv_aux = dir_cosv[chan_aux]
5363 5363 cosw_aux = dir_cosw[chan_aux]
5364 5364
5365 5365 nch = numpy.size(numpy.unique(chan_aux))
5366 5366 if nch > 1:
5367 5367 A = self.__calculateMatA(cosu_aux, cosv_aux, cosw_aux, True)
5368 5368 velEst[i,:] = numpy.dot(A,vel_aux)
5369 5369
5370 5370 return velEst
5371 5371
5372 5372 def run(self, dataOut, technique, nHours=1, hmin=70, hmax=110, **kwargs):
5373 5373
5374 5374 param = dataOut.data_param
5375 5375 if dataOut.abscissaList != None:
5376 5376 absc = dataOut.abscissaList[:-1]
5377 5377 # noise = dataOut.noise
5378 5378 heightList = dataOut.heightList
5379 5379 SNR = dataOut.data_snr
5380 5380
5381 5381 if technique == 'DBS':
5382 5382
5383 5383 kwargs['velRadial'] = param[:,1,:] #Radial velocity
5384 5384 kwargs['heightList'] = heightList
5385 5385 kwargs['SNR'] = SNR
5386 5386
5387 5387 dataOut.data_output, dataOut.heightList, dataOut.data_snr = self.techniqueDBS(kwargs) #DBS Function
5388 5388 dataOut.utctimeInit = dataOut.utctime
5389 5389 dataOut.outputInterval = dataOut.paramInterval
5390 5390
5391 5391 elif technique == 'SA':
5392 5392
5393 5393 #Parameters
5394 5394 # position_x = kwargs['positionX']
5395 5395 # position_y = kwargs['positionY']
5396 5396 # azimuth = kwargs['azimuth']
5397 5397 #
5398 5398 # if kwargs.has_key('crosspairsList'):
5399 5399 # pairs = kwargs['crosspairsList']
5400 5400 # else:
5401 5401 # pairs = None
5402 5402 #
5403 5403 # if kwargs.has_key('correctFactor'):
5404 5404 # correctFactor = kwargs['correctFactor']
5405 5405 # else:
5406 5406 # correctFactor = 1
5407 5407
5408 5408 # tau = dataOut.data_param
5409 5409 # _lambda = dataOut.C/dataOut.frequency
5410 5410 # pairsList = dataOut.groupList
5411 5411 # nChannels = dataOut.nChannels
5412 5412
5413 5413 kwargs['groupList'] = dataOut.groupList
5414 5414 kwargs['tau'] = dataOut.data_param
5415 5415 kwargs['_lambda'] = dataOut.C/dataOut.frequency
5416 5416 # dataOut.data_output = self.techniqueSA(pairs, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, absc, correctFactor)
5417 5417 dataOut.data_output = self.techniqueSA(kwargs)
5418 5418 dataOut.utctimeInit = dataOut.utctime
5419 5419 dataOut.outputInterval = dataOut.timeInterval
5420 5420
5421 5421 elif technique == 'Meteors':
5422 5422 dataOut.flagNoData = True
5423 5423 self.__dataReady = False
5424 5424
5425 5425 if 'nHours' in kwargs:
5426 5426 nHours = kwargs['nHours']
5427 5427 else:
5428 5428 nHours = 1
5429 5429
5430 5430 if 'meteorsPerBin' in kwargs:
5431 5431 meteorThresh = kwargs['meteorsPerBin']
5432 5432 else:
5433 5433 meteorThresh = 6
5434 5434
5435 5435 if 'hmin' in kwargs:
5436 5436 hmin = kwargs['hmin']
5437 5437 else: hmin = 70
5438 5438 if 'hmax' in kwargs:
5439 5439 hmax = kwargs['hmax']
5440 5440 else: hmax = 110
5441 5441
5442 5442 dataOut.outputInterval = nHours*3600
5443 5443
5444 5444 if self.__isConfig == False:
5445 5445 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
5446 5446 #Get Initial LTC time
5447 5447 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
5448 5448 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
5449 5449
5450 5450 self.__isConfig = True
5451 5451
5452 5452 if self.__buffer is None:
5453 5453 self.__buffer = dataOut.data_param
5454 5454 self.__firstdata = copy.copy(dataOut)
5455 5455
5456 5456 else:
5457 5457 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
5458 5458
5459 5459 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
5460 5460
5461 5461 if self.__dataReady:
5462 5462 dataOut.utctimeInit = self.__initime
5463 5463
5464 5464 self.__initime += dataOut.outputInterval #to erase time offset
5465 5465
5466 5466 dataOut.data_output, dataOut.heightList = self.techniqueMeteors(self.__buffer, meteorThresh, hmin, hmax)
5467 5467 dataOut.flagNoData = False
5468 5468 self.__buffer = None
5469 5469
5470 5470 elif technique == 'Meteors1':
5471 5471 dataOut.flagNoData = True
5472 5472 self.__dataReady = False
5473 5473
5474 5474 if 'nMins' in kwargs:
5475 5475 nMins = kwargs['nMins']
5476 5476 else: nMins = 20
5477 5477 if 'rx_location' in kwargs:
5478 5478 rx_location = kwargs['rx_location']
5479 5479 else: rx_location = [(0,1),(1,1),(1,0)]
5480 5480 if 'azimuth' in kwargs:
5481 5481 azimuth = kwargs['azimuth']
5482 5482 else: azimuth = 51.06
5483 5483 if 'dfactor' in kwargs:
5484 5484 dfactor = kwargs['dfactor']
5485 5485 if 'mode' in kwargs:
5486 5486 mode = kwargs['mode']
5487 5487 if 'theta_x' in kwargs:
5488 5488 theta_x = kwargs['theta_x']
5489 5489 if 'theta_y' in kwargs:
5490 5490 theta_y = kwargs['theta_y']
5491 5491 else: mode = 'SA'
5492 5492
5493 5493 #Borrar luego esto
5494 5494 if dataOut.groupList is None:
5495 5495 dataOut.groupList = [(0,1),(0,2),(1,2)]
5496 5496 groupList = dataOut.groupList
5497 5497 C = 3e8
5498 5498 freq = 50e6
5499 5499 lamb = C/freq
5500 5500 k = 2*numpy.pi/lamb
5501 5501
5502 5502 timeList = dataOut.abscissaList
5503 5503 heightList = dataOut.heightList
5504 5504
5505 5505 if self.__isConfig == False:
5506 5506 dataOut.outputInterval = nMins*60
5507 5507 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
5508 5508 #Get Initial LTC time
5509 5509 initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
5510 5510 minuteAux = initime.minute
5511 5511 minuteNew = int(numpy.floor(minuteAux/nMins)*nMins)
5512 5512 self.__initime = (initime.replace(minute = minuteNew, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
5513 5513
5514 5514 self.__isConfig = True
5515 5515
5516 5516 if self.__buffer is None:
5517 5517 self.__buffer = dataOut.data_param
5518 5518 self.__firstdata = copy.copy(dataOut)
5519 5519
5520 5520 else:
5521 5521 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
5522 5522
5523 5523 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
5524 5524
5525 5525 if self.__dataReady:
5526 5526 dataOut.utctimeInit = self.__initime
5527 5527 self.__initime += dataOut.outputInterval #to erase time offset
5528 5528
5529 5529 metArray = self.__buffer
5530 5530 if mode == 'SA':
5531 5531 dataOut.data_output = self.techniqueNSM_SA(rx_location=rx_location, groupList=groupList, azimuth=azimuth, dfactor=dfactor, k=k,metArray=metArray, heightList=heightList,timeList=timeList)
5532 5532 elif mode == 'DBS':
5533 5533 dataOut.data_output = self.techniqueNSM_DBS(metArray=metArray,heightList=heightList,timeList=timeList, azimuth=azimuth, theta_x=theta_x, theta_y=theta_y)
5534 5534 dataOut.data_output = dataOut.data_output.T
5535 5535 dataOut.flagNoData = False
5536 5536 self.__buffer = None
5537 5537
5538 5538 return
5539 5539
5540 5540 class EWDriftsEstimation(Operation):
5541 5541
5542 5542 def __init__(self):
5543 5543 Operation.__init__(self)
5544 5544
5545 5545 def __correctValues(self, heiRang, phi, velRadial, SNR):
5546 5546 listPhi = phi.tolist()
5547 5547 maxid = listPhi.index(max(listPhi))
5548 5548 minid = listPhi.index(min(listPhi))
5549 5549
5550 5550 rango = list(range(len(phi)))
5551 5551 # rango = numpy.delete(rango,maxid)
5552 5552
5553 5553 heiRang1 = heiRang*math.cos(phi[maxid])
5554 5554 heiRangAux = heiRang*math.cos(phi[minid])
5555 5555 indOut = (heiRang1 < heiRangAux[0]).nonzero()
5556 5556 heiRang1 = numpy.delete(heiRang1,indOut)
5557 5557
5558 5558 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
5559 5559 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
5560 5560
5561 5561 for i in rango:
5562 5562 x = heiRang*math.cos(phi[i])
5563 5563 y1 = velRadial[i,:]
5564 5564 vali= (numpy.isfinite(y1)==True).nonzero()
5565 5565 y1=y1[vali]
5566 5566 x = x[vali]
5567 5567 f1 = interpolate.interp1d(x,y1,kind = 'cubic',bounds_error=False)
5568 5568
5569 5569 #heiRang1 = x*math.cos(phi[maxid])
5570 5570 x1 = heiRang1
5571 5571 y11 = f1(x1)
5572 5572
5573 5573 y2 = SNR[i,:]
5574 5574 #print 'snr ', y2
5575 5575 x = heiRang*math.cos(phi[i])
5576 5576 vali= (y2 != -1).nonzero()
5577 5577 y2 = y2[vali]
5578 5578 x = x[vali]
5579 5579 #print 'snr ',y2
5580 5580 f2 = interpolate.interp1d(x,y2,kind = 'cubic',bounds_error=False)
5581 5581 y21 = f2(x1)
5582 5582
5583 5583 velRadial1[i,:] = y11
5584 5584 SNR1[i,:] = y21
5585 5585
5586 5586 return heiRang1, velRadial1, SNR1
5587 5587
5588 5588
5589 5589
5590 5590 def run(self, dataOut, zenith, zenithCorrection):
5591 5591
5592 5592 heiRang = dataOut.heightList
5593 5593 velRadial = dataOut.data_param[:,3,:]
5594 5594 velRadialm = dataOut.data_param[:,2:4,:]*-1
5595 5595
5596 5596 rbufc=dataOut.data_paramC[:,:,0]
5597 5597 ebufc=dataOut.data_paramC[:,:,1]
5598 5598 SNR = dataOut.data_snr
5599 5599 velRerr = dataOut.data_error[:,4,:]
5600 5600 moments=numpy.vstack(([velRadialm[0,:]],[velRadialm[0,:]],[velRadialm[1,:]],[velRadialm[1,:]]))
5601 5601 dataOut.moments=moments
5602 5602 # Coherent
5603 5603 smooth_wC = ebufc[0,:]
5604 5604 p_w0C = rbufc[0,:]
5605 5605 p_w1C = rbufc[1,:]
5606 5606 w_wC = rbufc[2,:]*-1 #*radial_sign(radial EQ 1)
5607 5607 t_wC = rbufc[3,:]
5608 5608 my_nbeams = 2
5609 5609
5610 5610 zenith = numpy.array(zenith)
5611 5611 zenith -= zenithCorrection
5612 5612 zenith *= numpy.pi/180
5613 5613 if zenithCorrection != 0 :
5614 5614 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, numpy.abs(zenith), velRadial, SNR)
5615 5615 else :
5616 5616 heiRang1 = heiRang
5617 5617 velRadial1 = velRadial
5618 5618 SNR1 = SNR
5619 5619
5620 5620 alp = zenith[0]
5621 5621 bet = zenith[1]
5622 5622
5623 5623 w_w = velRadial1[0,:]
5624 5624 w_e = velRadial1[1,:]
5625 5625 w_w_err = velRerr[0,:]
5626 5626 w_e_err = velRerr[1,:]
5627 5627
5628 5628 val = (numpy.isfinite(w_w)==False).nonzero()
5629 5629 val = val[0]
5630 5630 bad = val
5631 5631 if len(bad) > 0 :
5632 5632 w_w[bad] = w_wC[bad]
5633 5633 w_w_err[bad]= numpy.nan
5634 5634 if my_nbeams == 2:
5635 5635 smooth_eC=ebufc[4,:]
5636 5636 p_e0C = rbufc[4,:]
5637 5637 p_e1C = rbufc[5,:]
5638 5638 w_eC = rbufc[6,:]*-1
5639 5639 t_eC = rbufc[7,:]
5640 5640 val = (numpy.isfinite(w_e)==False).nonzero()
5641 5641 val = val[0]
5642 5642 bad = val
5643 5643 if len(bad) > 0 :
5644 5644 w_e[bad] = w_eC[bad]
5645 5645 w_e_err[bad]= numpy.nan
5646 5646
5647 5647 w = (w_w*numpy.sin(bet) - w_e*numpy.sin(alp))/(numpy.cos(alp)*numpy.sin(bet) - numpy.cos(bet)*numpy.sin(alp))
5648 5648 u = (w_w*numpy.cos(bet) - w_e*numpy.cos(alp))/(numpy.sin(alp)*numpy.cos(bet) - numpy.sin(bet)*numpy.cos(alp))
5649 5649
5650 5650 w_err = numpy.sqrt((w_w_err*numpy.sin(bet))**2.+(w_e_err*numpy.sin(alp))**2.)/ numpy.absolute(numpy.cos(alp)*numpy.sin(bet)-numpy.cos(bet)*numpy.sin(alp))
5651 5651 u_err = numpy.sqrt((w_w_err*numpy.cos(bet))**2.+(w_e_err*numpy.cos(alp))**2.)/ numpy.absolute(numpy.cos(alp)*numpy.sin(bet)-numpy.cos(bet)*numpy.sin(alp))
5652 5652
5653 5653 winds = numpy.vstack((w,u))
5654 5654
5655 5655 dataOut.heightList = heiRang1
5656 5656 dataOut.data_output = winds
5657 5657
5658 5658 snr1 = 10*numpy.log10(SNR1[0])
5659 5659 dataOut.data_snr1 = numpy.reshape(snr1,(1,snr1.shape[0]))
5660 5660 dataOut.utctimeInit = dataOut.utctime
5661 5661 dataOut.outputInterval = dataOut.timeInterval
5662 5662
5663 5663 hei_aver0 = 218
5664 5664 jrange = 450 #900 para HA drifts
5665 5665 deltah = 15.0 #dataOut.spacing(0)
5666 5666 h0 = 0.0 #dataOut.first_height(0)
5667 5667 heights = dataOut.heightList
5668 5668 nhei = len(heights)
5669 5669
5670 5670 range1 = numpy.arange(nhei) * deltah + h0
5671 5671
5672 5672 #jhei = WHERE(range1 GE hei_aver0 , jcount)
5673 5673 jhei = (range1 >= hei_aver0).nonzero()
5674 5674 if len(jhei[0]) > 0 :
5675 5675 h0_index = jhei[0][0] # Initial height for getting averages 218km
5676 5676
5677 5677 mynhei = 7
5678 5678 nhei_avg = int(jrange/deltah)
5679 5679 h_avgs = int(nhei_avg/mynhei)
5680 5680 nhei_avg = h_avgs*(mynhei-1)+mynhei
5681 5681
5682 5682 navgs = numpy.zeros(mynhei,dtype='float')
5683 5683 delta_h = numpy.zeros(mynhei,dtype='float')
5684 5684 range_aver = numpy.zeros(mynhei,dtype='float')
5685 5685 for ih in range( mynhei-1 ):
5686 5686 range_aver[ih] = numpy.sum(range1[h0_index+h_avgs*ih:h0_index+h_avgs*(ih+1)-0])/h_avgs
5687 5687 navgs[ih] = h_avgs
5688 5688 delta_h[ih] = deltah*h_avgs
5689 5689
5690 5690 range_aver[mynhei-1] = numpy.sum(range1[h0_index:h0_index+6*h_avgs-0])/(6*h_avgs)
5691 5691 navgs[mynhei-1] = 6*h_avgs
5692 5692 delta_h[mynhei-1] = deltah*6*h_avgs
5693 5693
5694 5694 wA = w[h0_index:h0_index+nhei_avg-0]
5695 5695 wA_err = w_err[h0_index:h0_index+nhei_avg-0]
5696 5696
5697 5697 for i in range(5) :
5698 5698 vals = wA[i*h_avgs:(i+1)*h_avgs-0]
5699 5699 errs = wA_err[i*h_avgs:(i+1)*h_avgs-0]
5700 5700 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2.)
5701 5701 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
5702 5702 wA[6*h_avgs+i] = avg
5703 5703 wA_err[6*h_avgs+i] = sigma
5704 5704
5705 5705
5706 5706 vals = wA[0:6*h_avgs-0]
5707 5707 errs=wA_err[0:6*h_avgs-0]
5708 5708 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2)
5709 5709 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
5710 5710 wA[nhei_avg-1] = avg
5711 5711 wA_err[nhei_avg-1] = sigma
5712 5712
5713 5713 wA = wA[6*h_avgs:nhei_avg-0]
5714 5714 wA_err=wA_err[6*h_avgs:nhei_avg-0]
5715 5715 if my_nbeams == 2 :
5716 5716
5717 5717 uA = u[h0_index:h0_index+nhei_avg]
5718 5718 uA_err=u_err[h0_index:h0_index+nhei_avg]
5719 5719
5720 5720 for i in range(5) :
5721 5721 vals = uA[i*h_avgs:(i+1)*h_avgs-0]
5722 5722 errs=uA_err[i*h_avgs:(i+1)*h_avgs-0]
5723 5723 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2.)
5724 5724 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
5725 5725 uA[6*h_avgs+i] = avg
5726 5726 uA_err[6*h_avgs+i]=sigma
5727 5727
5728 5728 vals = uA[0:6*h_avgs-0]
5729 5729 errs = uA_err[0:6*h_avgs-0]
5730 5730 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2.)
5731 5731 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
5732 5732 uA[nhei_avg-1] = avg
5733 5733 uA_err[nhei_avg-1] = sigma
5734 5734 uA = uA[6*h_avgs:nhei_avg-0]
5735 5735 uA_err = uA_err[6*h_avgs:nhei_avg-0]
5736 5736
5737 5737 dataOut.drifts_avg = numpy.vstack((wA,uA))
5738 5738
5739 5739 tini=time.localtime(dataOut.utctime)
5740 5740 datefile= str(tini[0]).zfill(4)+str(tini[1]).zfill(2)+str(tini[2]).zfill(2)
5741 5741 nfile = '/home/pcondor/Database/ewdriftsschain2019/jro'+datefile+'drifts_sch3.txt'
5742 5742
5743 5743 f1 = open(nfile,'a')
5744 5744
5745 5745 datedriftavg=str(tini[0])+' '+str(tini[1])+' '+str(tini[2])+' '+str(tini[3])+' '+str(tini[4])
5746 5746 driftavgstr=str(dataOut.drifts_avg)
5747 5747
5748 5748 numpy.savetxt(f1,numpy.column_stack([tini[0],tini[1],tini[2],tini[3],tini[4]]),fmt='%4i')
5749 5749 numpy.savetxt(f1,dataOut.drifts_avg,fmt='%10.2f')
5750 5750 f1.close()
5751 5751
5752 5752 return dataOut
5753 5753
5754 5754 #--------------- Non Specular Meteor ----------------
5755 5755
5756 5756 class NonSpecularMeteorDetection(Operation):
5757 5757
5758 5758 def run(self, dataOut, mode, SNRthresh=8, phaseDerThresh=0.5, cohThresh=0.8, allData = False):
5759 5759 data_acf = dataOut.data_pre[0]
5760 5760 data_ccf = dataOut.data_pre[1]
5761 5761 pairsList = dataOut.groupList[1]
5762 5762
5763 5763 lamb = dataOut.C/dataOut.frequency
5764 5764 tSamp = dataOut.ippSeconds*dataOut.nCohInt
5765 5765 paramInterval = dataOut.paramInterval
5766 5766
5767 5767 nChannels = data_acf.shape[0]
5768 5768 nLags = data_acf.shape[1]
5769 5769 nProfiles = data_acf.shape[2]
5770 5770 nHeights = dataOut.nHeights
5771 5771 nCohInt = dataOut.nCohInt
5772 5772 sec = numpy.round(nProfiles/dataOut.paramInterval)
5773 5773 heightList = dataOut.heightList
5774 5774 ippSeconds = dataOut.ippSeconds*dataOut.nCohInt*dataOut.nAvg
5775 5775 utctime = dataOut.utctime
5776 5776
5777 5777 dataOut.abscissaList = numpy.arange(0,paramInterval+ippSeconds,ippSeconds)
5778 5778
5779 5779 #------------------------ SNR --------------------------------------
5780 5780 power = data_acf[:,0,:,:].real
5781 5781 noise = numpy.zeros(nChannels)
5782 5782 SNR = numpy.zeros(power.shape)
5783 5783 for i in range(nChannels):
5784 5784 noise[i] = hildebrand_sekhon(power[i,:], nCohInt)
5785 5785 SNR[i] = (power[i]-noise[i])/noise[i]
5786 5786 SNRm = numpy.nanmean(SNR, axis = 0)
5787 5787 SNRdB = 10*numpy.log10(SNR)
5788 5788
5789 5789 if mode == 'SA':
5790 5790 dataOut.groupList = dataOut.groupList[1]
5791 5791 nPairs = data_ccf.shape[0]
5792 5792 #---------------------- Coherence and Phase --------------------------
5793 5793 phase = numpy.zeros(data_ccf[:,0,:,:].shape)
5794 5794 # phase1 = numpy.copy(phase)
5795 5795 coh1 = numpy.zeros(data_ccf[:,0,:,:].shape)
5796 5796
5797 5797 for p in range(nPairs):
5798 5798 ch0 = pairsList[p][0]
5799 5799 ch1 = pairsList[p][1]
5800 5800 ccf = data_ccf[p,0,:,:]/numpy.sqrt(data_acf[ch0,0,:,:]*data_acf[ch1,0,:,:])
5801 5801 phase[p,:,:] = ndimage.median_filter(numpy.angle(ccf), size = (5,1)) #median filter
5802 5802 # phase1[p,:,:] = numpy.angle(ccf) #median filter
5803 5803 coh1[p,:,:] = ndimage.median_filter(numpy.abs(ccf), 5) #median filter
5804 5804 # coh1[p,:,:] = numpy.abs(ccf) #median filter
5805 5805 coh = numpy.nanmax(coh1, axis = 0)
5806 5806 # struc = numpy.ones((5,1))
5807 5807 # coh = ndimage.morphology.grey_dilation(coh, size=(10,1))
5808 5808 #---------------------- Radial Velocity ----------------------------
5809 5809 phaseAux = numpy.mean(numpy.angle(data_acf[:,1,:,:]), axis = 0)
5810 5810 velRad = phaseAux*lamb/(4*numpy.pi*tSamp)
5811 5811
5812 5812 if allData:
5813 5813 boolMetFin = ~numpy.isnan(SNRm)
5814 5814 # coh[:-1,:] = numpy.nanmean(numpy.abs(phase[:,1:,:] - phase[:,:-1,:]),axis=0)
5815 5815 else:
5816 5816 #------------------------ Meteor mask ---------------------------------
5817 5817 # #SNR mask
5818 5818 # boolMet = (SNRdB>SNRthresh)#|(~numpy.isnan(SNRdB))
5819 5819 #
5820 5820 # #Erase small objects
5821 5821 # boolMet1 = self.__erase_small(boolMet, 2*sec, 5)
5822 5822 #
5823 5823 # auxEEJ = numpy.sum(boolMet1,axis=0)
5824 5824 # indOver = auxEEJ>nProfiles*0.8 #Use this later
5825 5825 # indEEJ = numpy.where(indOver)[0]
5826 5826 # indNEEJ = numpy.where(~indOver)[0]
5827 5827 #
5828 5828 # boolMetFin = boolMet1
5829 5829 #
5830 5830 # if indEEJ.size > 0:
5831 5831 # boolMet1[:,indEEJ] = False #Erase heights with EEJ
5832 5832 #
5833 5833 # boolMet2 = coh > cohThresh
5834 5834 # boolMet2 = self.__erase_small(boolMet2, 2*sec,5)
5835 5835 #
5836 5836 # #Final Meteor mask
5837 5837 # boolMetFin = boolMet1|boolMet2
5838 5838
5839 5839 #Coherence mask
5840 5840 boolMet1 = coh > 0.75
5841 5841 struc = numpy.ones((30,1))
5842 5842 boolMet1 = ndimage.morphology.binary_dilation(boolMet1, structure=struc)
5843 5843
5844 5844 #Derivative mask
5845 5845 derPhase = numpy.nanmean(numpy.abs(phase[:,1:,:] - phase[:,:-1,:]),axis=0)
5846 5846 boolMet2 = derPhase < 0.2
5847 5847 # boolMet2 = ndimage.morphology.binary_opening(boolMet2)
5848 5848 # boolMet2 = ndimage.morphology.binary_closing(boolMet2, structure = numpy.ones((10,1)))
5849 5849 boolMet2 = ndimage.median_filter(boolMet2,size=5)
5850 5850 boolMet2 = numpy.vstack((boolMet2,numpy.full((1,nHeights), True, dtype=bool)))
5851 5851 # #Final mask
5852 5852 # boolMetFin = boolMet2
5853 5853 boolMetFin = boolMet1&boolMet2
5854 5854 # boolMetFin = ndimage.morphology.binary_dilation(boolMetFin)
5855 5855 #Creating data_param
5856 5856 coordMet = numpy.where(boolMetFin)
5857 5857
5858 5858 tmet = coordMet[0]
5859 5859 hmet = coordMet[1]
5860 5860
5861 5861 data_param = numpy.zeros((tmet.size, 6 + nPairs))
5862 5862 data_param[:,0] = utctime
5863 5863 data_param[:,1] = tmet
5864 5864 data_param[:,2] = hmet
5865 5865 data_param[:,3] = SNRm[tmet,hmet]
5866 5866 data_param[:,4] = velRad[tmet,hmet]
5867 5867 data_param[:,5] = coh[tmet,hmet]
5868 5868 data_param[:,6:] = phase[:,tmet,hmet].T
5869 5869
5870 5870 elif mode == 'DBS':
5871 5871 dataOut.groupList = numpy.arange(nChannels)
5872 5872
5873 5873 #Radial Velocities
5874 5874 phase = numpy.angle(data_acf[:,1,:,:])
5875 5875 # phase = ndimage.median_filter(numpy.angle(data_acf[:,1,:,:]), size = (1,5,1))
5876 5876 velRad = phase*lamb/(4*numpy.pi*tSamp)
5877 5877
5878 5878 #Spectral width
5879 5879 # acf1 = ndimage.median_filter(numpy.abs(data_acf[:,1,:,:]), size = (1,5,1))
5880 5880 # acf2 = ndimage.median_filter(numpy.abs(data_acf[:,2,:,:]), size = (1,5,1))
5881 5881 acf1 = data_acf[:,1,:,:]
5882 5882 acf2 = data_acf[:,2,:,:]
5883 5883
5884 5884 spcWidth = (lamb/(2*numpy.sqrt(6)*numpy.pi*tSamp))*numpy.sqrt(numpy.log(acf1/acf2))
5885 5885 # velRad = ndimage.median_filter(velRad, size = (1,5,1))
5886 5886 if allData:
5887 5887 boolMetFin = ~numpy.isnan(SNRdB)
5888 5888 else:
5889 5889 #SNR
5890 5890 boolMet1 = (SNRdB>SNRthresh) #SNR mask
5891 5891 boolMet1 = ndimage.median_filter(boolMet1, size=(1,5,5))
5892 5892
5893 5893 #Radial velocity
5894 5894 boolMet2 = numpy.abs(velRad) < 20
5895 5895 boolMet2 = ndimage.median_filter(boolMet2, (1,5,5))
5896 5896
5897 5897 #Spectral Width
5898 5898 boolMet3 = spcWidth < 30
5899 5899 boolMet3 = ndimage.median_filter(boolMet3, (1,5,5))
5900 5900 # boolMetFin = self.__erase_small(boolMet1, 10,5)
5901 5901 boolMetFin = boolMet1&boolMet2&boolMet3
5902 5902
5903 5903 #Creating data_param
5904 5904 coordMet = numpy.where(boolMetFin)
5905 5905
5906 5906 cmet = coordMet[0]
5907 5907 tmet = coordMet[1]
5908 5908 hmet = coordMet[2]
5909 5909
5910 5910 data_param = numpy.zeros((tmet.size, 7))
5911 5911 data_param[:,0] = utctime
5912 5912 data_param[:,1] = cmet
5913 5913 data_param[:,2] = tmet
5914 5914 data_param[:,3] = hmet
5915 5915 data_param[:,4] = SNR[cmet,tmet,hmet].T
5916 5916 data_param[:,5] = velRad[cmet,tmet,hmet].T
5917 5917 data_param[:,6] = spcWidth[cmet,tmet,hmet].T
5918 5918
5919 5919 # self.dataOut.data_param = data_int
5920 5920 if len(data_param) == 0:
5921 5921 dataOut.flagNoData = True
5922 5922 else:
5923 5923 dataOut.data_param = data_param
5924 5924
5925 5925 def __erase_small(self, binArray, threshX, threshY):
5926 5926 labarray, numfeat = ndimage.measurements.label(binArray)
5927 5927 binArray1 = numpy.copy(binArray)
5928 5928
5929 5929 for i in range(1,numfeat + 1):
5930 5930 auxBin = (labarray==i)
5931 5931 auxSize = auxBin.sum()
5932 5932
5933 5933 x,y = numpy.where(auxBin)
5934 5934 widthX = x.max() - x.min()
5935 5935 widthY = y.max() - y.min()
5936 5936
5937 5937 #width X: 3 seg -> 12.5*3
5938 5938 #width Y:
5939 5939
5940 5940 if (auxSize < 50) or (widthX < threshX) or (widthY < threshY):
5941 5941 binArray1[auxBin] = False
5942 5942
5943 5943 return binArray1
5944 5944
5945 5945 #--------------- Specular Meteor ----------------
5946 5946
5947 5947 class SMDetection(Operation):
5948 5948 '''
5949 5949 Function DetectMeteors()
5950 5950 Project developed with paper:
5951 5951 HOLDSWORTH ET AL. 2004
5952 5952
5953 5953 Input:
5954 5954 self.dataOut.data_pre
5955 5955
5956 5956 centerReceiverIndex: From the channels, which is the center receiver
5957 5957
5958 5958 hei_ref: Height reference for the Beacon signal extraction
5959 5959 tauindex:
5960 5960 predefinedPhaseShifts: Predefined phase offset for the voltge signals
5961 5961
5962 5962 cohDetection: Whether to user Coherent detection or not
5963 5963 cohDet_timeStep: Coherent Detection calculation time step
5964 5964 cohDet_thresh: Coherent Detection phase threshold to correct phases
5965 5965
5966 5966 noise_timeStep: Noise calculation time step
5967 5967 noise_multiple: Noise multiple to define signal threshold
5968 5968
5969 5969 multDet_timeLimit: Multiple Detection Removal time limit in seconds
5970 5970 multDet_rangeLimit: Multiple Detection Removal range limit in km
5971 5971
5972 5972 phaseThresh: Maximum phase difference between receiver to be consider a meteor
5973 5973 SNRThresh: Minimum SNR threshold of the meteor signal to be consider a meteor
5974 5974
5975 5975 hmin: Minimum Height of the meteor to use it in the further wind estimations
5976 5976 hmax: Maximum Height of the meteor to use it in the further wind estimations
5977 5977 azimuth: Azimuth angle correction
5978 5978
5979 5979 Affected:
5980 5980 self.dataOut.data_param
5981 5981
5982 5982 Rejection Criteria (Errors):
5983 5983 0: No error; analysis OK
5984 5984 1: SNR < SNR threshold
5985 5985 2: angle of arrival (AOA) ambiguously determined
5986 5986 3: AOA estimate not feasible
5987 5987 4: Large difference in AOAs obtained from different antenna baselines
5988 5988 5: echo at start or end of time series
5989 5989 6: echo less than 5 examples long; too short for analysis
5990 5990 7: echo rise exceeds 0.3s
5991 5991 8: echo decay time less than twice rise time
5992 5992 9: large power level before echo
5993 5993 10: large power level after echo
5994 5994 11: poor fit to amplitude for estimation of decay time
5995 5995 12: poor fit to CCF phase variation for estimation of radial drift velocity
5996 5996 13: height unresolvable echo: not valid height within 70 to 110 km
5997 5997 14: height ambiguous echo: more then one possible height within 70 to 110 km
5998 5998 15: radial drift velocity or projected horizontal velocity exceeds 200 m/s
5999 5999 16: oscilatory echo, indicating event most likely not an underdense echo
6000 6000
6001 6001 17: phase difference in meteor Reestimation
6002 6002
6003 6003 Data Storage:
6004 6004 Meteors for Wind Estimation (8):
6005 6005 Utc Time | Range Height
6006 6006 Azimuth Zenith errorCosDir
6007 6007 VelRad errorVelRad
6008 6008 Phase0 Phase1 Phase2 Phase3
6009 6009 TypeError
6010 6010
6011 6011 '''
6012 6012
6013 6013 def run(self, dataOut, hei_ref = None, tauindex = 0,
6014 6014 phaseOffsets = None,
6015 6015 cohDetection = False, cohDet_timeStep = 1, cohDet_thresh = 25,
6016 6016 noise_timeStep = 4, noise_multiple = 4,
6017 6017 multDet_timeLimit = 1, multDet_rangeLimit = 3,
6018 6018 phaseThresh = 20, SNRThresh = 5,
6019 6019 hmin = 50, hmax=150, azimuth = 0,
6020 6020 channelPositions = None) :
6021 6021
6022 6022
6023 6023 #Getting Pairslist
6024 6024 if channelPositions is None:
6025 6025 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
6026 6026 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
6027 6027 meteorOps = SMOperations()
6028 6028 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
6029 6029 heiRang = dataOut.heightList
6030 6030 #Get Beacon signal - No Beacon signal anymore
6031 6031 # newheis = numpy.where(self.dataOut.heightList>self.dataOut.radarControllerHeaderObj.Taus[tauindex])
6032 6032 #
6033 6033 # if hei_ref != None:
6034 6034 # newheis = numpy.where(self.dataOut.heightList>hei_ref)
6035 6035 #
6036 6036
6037 6037
6038 6038 #****************REMOVING HARDWARE PHASE DIFFERENCES***************
6039 6039 # see if the user put in pre defined phase shifts
6040 6040 voltsPShift = dataOut.data_pre.copy()
6041 6041
6042 6042 # if predefinedPhaseShifts != None:
6043 6043 # hardwarePhaseShifts = numpy.array(predefinedPhaseShifts)*numpy.pi/180
6044 6044 #
6045 6045 # # elif beaconPhaseShifts:
6046 6046 # # #get hardware phase shifts using beacon signal
6047 6047 # # hardwarePhaseShifts = self.__getHardwarePhaseDiff(self.dataOut.data_pre, pairslist, newheis, 10)
6048 6048 # # hardwarePhaseShifts = numpy.insert(hardwarePhaseShifts,centerReceiverIndex,0)
6049 6049 #
6050 6050 # else:
6051 6051 # hardwarePhaseShifts = numpy.zeros(5)
6052 6052 #
6053 6053 # voltsPShift = numpy.zeros((self.dataOut.data_pre.shape[0],self.dataOut.data_pre.shape[1],self.dataOut.data_pre.shape[2]), dtype = 'complex')
6054 6054 # for i in range(self.dataOut.data_pre.shape[0]):
6055 6055 # voltsPShift[i,:,:] = self.__shiftPhase(self.dataOut.data_pre[i,:,:], hardwarePhaseShifts[i])
6056 6056
6057 6057 #******************END OF REMOVING HARDWARE PHASE DIFFERENCES*********
6058 6058
6059 6059 #Remove DC
6060 6060 voltsDC = numpy.mean(voltsPShift,1)
6061 6061 voltsDC = numpy.mean(voltsDC,1)
6062 6062 for i in range(voltsDC.shape[0]):
6063 6063 voltsPShift[i] = voltsPShift[i] - voltsDC[i]
6064 6064
6065 6065 #Don't considerate last heights, theyre used to calculate Hardware Phase Shift
6066 6066 # voltsPShift = voltsPShift[:,:,:newheis[0][0]]
6067 6067
6068 6068 #************ FIND POWER OF DATA W/COH OR NON COH DETECTION (3.4) **********
6069 6069 #Coherent Detection
6070 6070 if cohDetection:
6071 6071 #use coherent detection to get the net power
6072 6072 cohDet_thresh = cohDet_thresh*numpy.pi/180
6073 6073 voltsPShift = self.__coherentDetection(voltsPShift, cohDet_timeStep, dataOut.timeInterval, pairslist0, cohDet_thresh)
6074 6074
6075 6075 #Non-coherent detection!
6076 6076 powerNet = numpy.nansum(numpy.abs(voltsPShift[:,:,:])**2,0)
6077 6077 #********** END OF COH/NON-COH POWER CALCULATION**********************
6078 6078
6079 6079 #********** FIND THE NOISE LEVEL AND POSSIBLE METEORS ****************
6080 6080 #Get noise
6081 6081 noise, noise1 = self.__getNoise(powerNet, noise_timeStep, dataOut.timeInterval)
6082 6082 # noise = self.getNoise1(powerNet, noise_timeStep, self.dataOut.timeInterval)
6083 6083 #Get signal threshold
6084 6084 signalThresh = noise_multiple*noise
6085 6085 #Meteor echoes detection
6086 6086 listMeteors = self.__findMeteors(powerNet, signalThresh)
6087 6087 #******* END OF NOISE LEVEL AND POSSIBLE METEORS CACULATION **********
6088 6088
6089 6089 #************** REMOVE MULTIPLE DETECTIONS (3.5) ***************************
6090 6090 #Parameters
6091 6091 heiRange = dataOut.heightList
6092 6092 rangeInterval = heiRange[1] - heiRange[0]
6093 6093 rangeLimit = multDet_rangeLimit/rangeInterval
6094 6094 timeLimit = multDet_timeLimit/dataOut.timeInterval
6095 6095 #Multiple detection removals
6096 6096 listMeteors1 = self.__removeMultipleDetections(listMeteors, rangeLimit, timeLimit)
6097 6097 #************ END OF REMOVE MULTIPLE DETECTIONS **********************
6098 6098
6099 6099 #********************* METEOR REESTIMATION (3.7, 3.8, 3.9, 3.10) ********************
6100 6100 #Parameters
6101 6101 phaseThresh = phaseThresh*numpy.pi/180
6102 6102 thresh = [phaseThresh, noise_multiple, SNRThresh]
6103 6103 #Meteor reestimation (Errors N 1, 6, 12, 17)
6104 6104 listMeteors2, listMeteorsPower, listMeteorsVolts = self.__meteorReestimation(listMeteors1, voltsPShift, pairslist0, thresh, noise, dataOut.timeInterval, dataOut.frequency)
6105 6105 # listMeteors2, listMeteorsPower, listMeteorsVolts = self.meteorReestimation3(listMeteors2, listMeteorsPower, listMeteorsVolts, voltsPShift, pairslist, thresh, noise)
6106 6106 #Estimation of decay times (Errors N 7, 8, 11)
6107 6107 listMeteors3 = self.__estimateDecayTime(listMeteors2, listMeteorsPower, dataOut.timeInterval, dataOut.frequency)
6108 6108 #******************* END OF METEOR REESTIMATION *******************
6109 6109
6110 6110 #********************* METEOR PARAMETERS CALCULATION (3.11, 3.12, 3.13) **************************
6111 6111 #Calculating Radial Velocity (Error N 15)
6112 6112 radialStdThresh = 10
6113 6113 listMeteors4 = self.__getRadialVelocity(listMeteors3, listMeteorsVolts, radialStdThresh, pairslist0, dataOut.timeInterval)
6114 6114
6115 6115 if len(listMeteors4) > 0:
6116 6116 #Setting New Array
6117 6117 date = dataOut.utctime
6118 6118 arrayParameters = self.__setNewArrays(listMeteors4, date, heiRang)
6119 6119
6120 6120 #Correcting phase offset
6121 6121 if phaseOffsets != None:
6122 6122 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
6123 6123 arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
6124 6124
6125 6125 #Second Pairslist
6126 6126 pairsList = []
6127 6127 pairx = (0,1)
6128 6128 pairy = (2,3)
6129 6129 pairsList.append(pairx)
6130 6130 pairsList.append(pairy)
6131 6131
6132 6132 jph = numpy.array([0,0,0,0])
6133 6133 h = (hmin,hmax)
6134 6134 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
6135 6135
6136 6136 # #Calculate AOA (Error N 3, 4)
6137 6137 # #JONES ET AL. 1998
6138 6138 # error = arrayParameters[:,-1]
6139 6139 # AOAthresh = numpy.pi/8
6140 6140 # phases = -arrayParameters[:,9:13]
6141 6141 # arrayParameters[:,4:7], arrayParameters[:,-1] = meteorOps.getAOA(phases, pairsList, error, AOAthresh, azimuth)
6142 6142 #
6143 6143 # #Calculate Heights (Error N 13 and 14)
6144 6144 # error = arrayParameters[:,-1]
6145 6145 # Ranges = arrayParameters[:,2]
6146 6146 # zenith = arrayParameters[:,5]
6147 6147 # arrayParameters[:,3], arrayParameters[:,-1] = meteorOps.getHeights(Ranges, zenith, error, hmin, hmax)
6148 6148 # error = arrayParameters[:,-1]
6149 6149 #********************* END OF PARAMETERS CALCULATION **************************
6150 6150
6151 6151 #***************************+ PASS DATA TO NEXT STEP **********************
6152 6152 # arrayFinal = arrayParameters.reshape((1,arrayParameters.shape[0],arrayParameters.shape[1]))
6153 6153 dataOut.data_param = arrayParameters
6154 6154
6155 6155 if arrayParameters is None:
6156 6156 dataOut.flagNoData = True
6157 6157 else:
6158 6158 dataOut.flagNoData = True
6159 6159
6160 6160 return
6161 6161
6162 6162 def __getHardwarePhaseDiff(self, voltage0, pairslist, newheis, n):
6163 6163
6164 6164 minIndex = min(newheis[0])
6165 6165 maxIndex = max(newheis[0])
6166 6166
6167 6167 voltage = voltage0[:,:,minIndex:maxIndex+1]
6168 6168 nLength = voltage.shape[1]/n
6169 6169 nMin = 0
6170 6170 nMax = 0
6171 6171 phaseOffset = numpy.zeros((len(pairslist),n))
6172 6172
6173 6173 for i in range(n):
6174 6174 nMax += nLength
6175 6175 phaseCCF = -numpy.angle(self.__calculateCCF(voltage[:,nMin:nMax,:], pairslist, [0]))
6176 6176 phaseCCF = numpy.mean(phaseCCF, axis = 2)
6177 6177 phaseOffset[:,i] = phaseCCF.transpose()
6178 6178 nMin = nMax
6179 6179 # phaseDiff, phaseArrival = self.estimatePhaseDifference(voltage, pairslist)
6180 6180
6181 6181 #Remove Outliers
6182 6182 factor = 2
6183 6183 wt = phaseOffset - signal.medfilt(phaseOffset,(1,5))
6184 6184 dw = numpy.std(wt,axis = 1)
6185 6185 dw = dw.reshape((dw.size,1))
6186 6186 ind = numpy.where(numpy.logical_or(wt>dw*factor,wt<-dw*factor))
6187 6187 phaseOffset[ind] = numpy.nan
6188 6188 phaseOffset = stats.nanmean(phaseOffset, axis=1)
6189 6189
6190 6190 return phaseOffset
6191 6191
6192 6192 def __shiftPhase(self, data, phaseShift):
6193 6193 #this will shift the phase of a complex number
6194 6194 dataShifted = numpy.abs(data) * numpy.exp((numpy.angle(data)+phaseShift)*1j)
6195 6195 return dataShifted
6196 6196
6197 6197 def __estimatePhaseDifference(self, array, pairslist):
6198 6198 nChannel = array.shape[0]
6199 6199 nHeights = array.shape[2]
6200 6200 numPairs = len(pairslist)
6201 6201 # phaseCCF = numpy.zeros((nChannel, 5, nHeights))
6202 6202 phaseCCF = numpy.angle(self.__calculateCCF(array, pairslist, [-2,-1,0,1,2]))
6203 6203
6204 6204 #Correct phases
6205 6205 derPhaseCCF = phaseCCF[:,1:,:] - phaseCCF[:,0:-1,:]
6206 6206 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
6207 6207
6208 6208 if indDer[0].shape[0] > 0:
6209 6209 for i in range(indDer[0].shape[0]):
6210 6210 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i],indDer[2][i]])
6211 6211 phaseCCF[indDer[0][i],indDer[1][i]+1:,:] += signo*2*numpy.pi
6212 6212
6213 6213 # for j in range(numSides):
6214 6214 # phaseCCFAux = self.calculateCCF(arrayCenter, arraySides[j,:,:], [-2,1,0,1,2])
6215 6215 # phaseCCF[j,:,:] = numpy.angle(phaseCCFAux)
6216 6216 #
6217 6217 #Linear
6218 6218 phaseInt = numpy.zeros((numPairs,1))
6219 6219 angAllCCF = phaseCCF[:,[0,1,3,4],0]
6220 6220 for j in range(numPairs):
6221 6221 fit = stats.linregress([-2,-1,1,2],angAllCCF[j,:])
6222 6222 phaseInt[j] = fit[1]
6223 6223 #Phase Differences
6224 6224 phaseDiff = phaseInt - phaseCCF[:,2,:]
6225 6225 phaseArrival = phaseInt.reshape(phaseInt.size)
6226 6226
6227 6227 #Dealias
6228 6228 phaseArrival = numpy.angle(numpy.exp(1j*phaseArrival))
6229 6229 # indAlias = numpy.where(phaseArrival > numpy.pi)
6230 6230 # phaseArrival[indAlias] -= 2*numpy.pi
6231 6231 # indAlias = numpy.where(phaseArrival < -numpy.pi)
6232 6232 # phaseArrival[indAlias] += 2*numpy.pi
6233 6233
6234 6234 return phaseDiff, phaseArrival
6235 6235
6236 6236 def __coherentDetection(self, volts, timeSegment, timeInterval, pairslist, thresh):
6237 6237 #this function will run the coherent detection used in Holdworth et al. 2004 and return the net power
6238 6238 #find the phase shifts of each channel over 1 second intervals
6239 6239 #only look at ranges below the beacon signal
6240 6240 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
6241 6241 numBlocks = int(volts.shape[1]/numProfPerBlock)
6242 6242 numHeights = volts.shape[2]
6243 6243 nChannel = volts.shape[0]
6244 6244 voltsCohDet = volts.copy()
6245 6245
6246 6246 pairsarray = numpy.array(pairslist)
6247 6247 indSides = pairsarray[:,1]
6248 6248 # indSides = numpy.array(range(nChannel))
6249 6249 # indSides = numpy.delete(indSides, indCenter)
6250 6250 #
6251 6251 # listCenter = numpy.array_split(volts[indCenter,:,:], numBlocks, 0)
6252 6252 listBlocks = numpy.array_split(volts, numBlocks, 1)
6253 6253
6254 6254 startInd = 0
6255 6255 endInd = 0
6256 6256
6257 6257 for i in range(numBlocks):
6258 6258 startInd = endInd
6259 6259 endInd = endInd + listBlocks[i].shape[1]
6260 6260
6261 6261 arrayBlock = listBlocks[i]
6262 6262 # arrayBlockCenter = listCenter[i]
6263 6263
6264 6264 #Estimate the Phase Difference
6265 6265 phaseDiff, aux = self.__estimatePhaseDifference(arrayBlock, pairslist)
6266 6266 #Phase Difference RMS
6267 6267 arrayPhaseRMS = numpy.abs(phaseDiff)
6268 6268 phaseRMSaux = numpy.sum(arrayPhaseRMS < thresh,0)
6269 6269 indPhase = numpy.where(phaseRMSaux==4)
6270 6270 #Shifting
6271 6271 if indPhase[0].shape[0] > 0:
6272 6272 for j in range(indSides.size):
6273 6273 arrayBlock[indSides[j],:,indPhase] = self.__shiftPhase(arrayBlock[indSides[j],:,indPhase], phaseDiff[j,indPhase].transpose())
6274 6274 voltsCohDet[:,startInd:endInd,:] = arrayBlock
6275 6275
6276 6276 return voltsCohDet
6277 6277
6278 6278 def __calculateCCF(self, volts, pairslist ,laglist):
6279 6279
6280 6280 nHeights = volts.shape[2]
6281 6281 nPoints = volts.shape[1]
6282 6282 voltsCCF = numpy.zeros((len(pairslist), len(laglist), nHeights),dtype = 'complex')
6283 6283
6284 6284 for i in range(len(pairslist)):
6285 6285 volts1 = volts[pairslist[i][0]]
6286 6286 volts2 = volts[pairslist[i][1]]
6287 6287
6288 6288 for t in range(len(laglist)):
6289 6289 idxT = laglist[t]
6290 6290 if idxT >= 0:
6291 6291 vStacked = numpy.vstack((volts2[idxT:,:],
6292 6292 numpy.zeros((idxT, nHeights),dtype='complex')))
6293 6293 else:
6294 6294 vStacked = numpy.vstack((numpy.zeros((-idxT, nHeights),dtype='complex'),
6295 6295 volts2[:(nPoints + idxT),:]))
6296 6296 voltsCCF[i,t,:] = numpy.sum((numpy.conjugate(volts1)*vStacked),axis=0)
6297 6297
6298 6298 vStacked = None
6299 6299 return voltsCCF
6300 6300
6301 6301 def __getNoise(self, power, timeSegment, timeInterval):
6302 6302 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
6303 6303 numBlocks = int(power.shape[0]/numProfPerBlock)
6304 6304 numHeights = power.shape[1]
6305 6305
6306 6306 listPower = numpy.array_split(power, numBlocks, 0)
6307 6307 noise = numpy.zeros((power.shape[0], power.shape[1]))
6308 6308 noise1 = numpy.zeros((power.shape[0], power.shape[1]))
6309 6309
6310 6310 startInd = 0
6311 6311 endInd = 0
6312 6312
6313 6313 for i in range(numBlocks): #split por canal
6314 6314 startInd = endInd
6315 6315 endInd = endInd + listPower[i].shape[0]
6316 6316
6317 6317 arrayBlock = listPower[i]
6318 6318 noiseAux = numpy.mean(arrayBlock, 0)
6319 6319 # noiseAux = numpy.median(noiseAux)
6320 6320 # noiseAux = numpy.mean(arrayBlock)
6321 6321 noise[startInd:endInd,:] = noise[startInd:endInd,:] + noiseAux
6322 6322
6323 6323 noiseAux1 = numpy.mean(arrayBlock)
6324 6324 noise1[startInd:endInd,:] = noise1[startInd:endInd,:] + noiseAux1
6325 6325
6326 6326 return noise, noise1
6327 6327
6328 6328 def __findMeteors(self, power, thresh):
6329 6329 nProf = power.shape[0]
6330 6330 nHeights = power.shape[1]
6331 6331 listMeteors = []
6332 6332
6333 6333 for i in range(nHeights):
6334 6334 powerAux = power[:,i]
6335 6335 threshAux = thresh[:,i]
6336 6336
6337 6337 indUPthresh = numpy.where(powerAux > threshAux)[0]
6338 6338 indDNthresh = numpy.where(powerAux <= threshAux)[0]
6339 6339
6340 6340 j = 0
6341 6341
6342 6342 while (j < indUPthresh.size - 2):
6343 6343 if (indUPthresh[j + 2] == indUPthresh[j] + 2):
6344 6344 indDNAux = numpy.where(indDNthresh > indUPthresh[j])
6345 6345 indDNthresh = indDNthresh[indDNAux]
6346 6346
6347 6347 if (indDNthresh.size > 0):
6348 6348 indEnd = indDNthresh[0] - 1
6349 6349 indInit = indUPthresh[j]
6350 6350
6351 6351 meteor = powerAux[indInit:indEnd + 1]
6352 6352 indPeak = meteor.argmax() + indInit
6353 6353 FLA = sum(numpy.conj(meteor)*numpy.hstack((meteor[1:],0)))
6354 6354
6355 6355 listMeteors.append(numpy.array([i,indInit,indPeak,indEnd,FLA])) #CHEQUEAR!!!!!
6356 6356 j = numpy.where(indUPthresh == indEnd)[0] + 1
6357 6357 else: j+=1
6358 6358 else: j+=1
6359 6359
6360 6360 return listMeteors
6361 6361
6362 6362 def __removeMultipleDetections(self,listMeteors, rangeLimit, timeLimit):
6363 6363
6364 6364 arrayMeteors = numpy.asarray(listMeteors)
6365 6365 listMeteors1 = []
6366 6366
6367 6367 while arrayMeteors.shape[0] > 0:
6368 6368 FLAs = arrayMeteors[:,4]
6369 6369 maxFLA = FLAs.argmax()
6370 6370 listMeteors1.append(arrayMeteors[maxFLA,:])
6371 6371
6372 6372 MeteorInitTime = arrayMeteors[maxFLA,1]
6373 6373 MeteorEndTime = arrayMeteors[maxFLA,3]
6374 6374 MeteorHeight = arrayMeteors[maxFLA,0]
6375 6375
6376 6376 #Check neighborhood
6377 6377 maxHeightIndex = MeteorHeight + rangeLimit
6378 6378 minHeightIndex = MeteorHeight - rangeLimit
6379 6379 minTimeIndex = MeteorInitTime - timeLimit
6380 6380 maxTimeIndex = MeteorEndTime + timeLimit
6381 6381
6382 6382 #Check Heights
6383 6383 indHeight = numpy.logical_and(arrayMeteors[:,0] >= minHeightIndex, arrayMeteors[:,0] <= maxHeightIndex)
6384 6384 indTime = numpy.logical_and(arrayMeteors[:,3] >= minTimeIndex, arrayMeteors[:,1] <= maxTimeIndex)
6385 6385 indBoth = numpy.where(numpy.logical_and(indTime,indHeight))
6386 6386
6387 6387 arrayMeteors = numpy.delete(arrayMeteors, indBoth, axis = 0)
6388 6388
6389 6389 return listMeteors1
6390 6390
6391 6391 def __meteorReestimation(self, listMeteors, volts, pairslist, thresh, noise, timeInterval,frequency):
6392 6392 numHeights = volts.shape[2]
6393 6393 nChannel = volts.shape[0]
6394 6394
6395 6395 thresholdPhase = thresh[0]
6396 6396 thresholdNoise = thresh[1]
6397 6397 thresholdDB = float(thresh[2])
6398 6398
6399 6399 thresholdDB1 = 10**(thresholdDB/10)
6400 6400 pairsarray = numpy.array(pairslist)
6401 6401 indSides = pairsarray[:,1]
6402 6402
6403 6403 pairslist1 = list(pairslist)
6404 6404 pairslist1.append((0,1))
6405 6405 pairslist1.append((3,4))
6406 6406
6407 6407 listMeteors1 = []
6408 6408 listPowerSeries = []
6409 6409 listVoltageSeries = []
6410 6410 #volts has the war data
6411 6411
6412 6412 if frequency == 30e6:
6413 6413 timeLag = 45*10**-3
6414 6414 else:
6415 6415 timeLag = 15*10**-3
6416 6416 lag = numpy.ceil(timeLag/timeInterval)
6417 6417
6418 6418 for i in range(len(listMeteors)):
6419 6419
6420 6420 ###################### 3.6 - 3.7 PARAMETERS REESTIMATION #########################
6421 6421 meteorAux = numpy.zeros(16)
6422 6422
6423 6423 #Loading meteor Data (mHeight, mStart, mPeak, mEnd)
6424 6424 mHeight = listMeteors[i][0]
6425 6425 mStart = listMeteors[i][1]
6426 6426 mPeak = listMeteors[i][2]
6427 6427 mEnd = listMeteors[i][3]
6428 6428
6429 6429 #get the volt data between the start and end times of the meteor
6430 6430 meteorVolts = volts[:,mStart:mEnd+1,mHeight]
6431 6431 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
6432 6432
6433 6433 #3.6. Phase Difference estimation
6434 6434 phaseDiff, aux = self.__estimatePhaseDifference(meteorVolts, pairslist)
6435 6435
6436 6436 #3.7. Phase difference removal & meteor start, peak and end times reestimated
6437 6437 #meteorVolts0.- all Channels, all Profiles
6438 6438 meteorVolts0 = volts[:,:,mHeight]
6439 6439 meteorThresh = noise[:,mHeight]*thresholdNoise
6440 6440 meteorNoise = noise[:,mHeight]
6441 6441 meteorVolts0[indSides,:] = self.__shiftPhase(meteorVolts0[indSides,:], phaseDiff) #Phase Shifting
6442 6442 powerNet0 = numpy.nansum(numpy.abs(meteorVolts0)**2, axis = 0) #Power
6443 6443
6444 6444 #Times reestimation
6445 6445 mStart1 = numpy.where(powerNet0[:mPeak] < meteorThresh[:mPeak])[0]
6446 6446 if mStart1.size > 0:
6447 6447 mStart1 = mStart1[-1] + 1
6448 6448
6449 6449 else:
6450 6450 mStart1 = mPeak
6451 6451
6452 6452 mEnd1 = numpy.where(powerNet0[mPeak:] < meteorThresh[mPeak:])[0][0] + mPeak - 1
6453 6453 mEndDecayTime1 = numpy.where(powerNet0[mPeak:] < meteorNoise[mPeak:])[0]
6454 6454 if mEndDecayTime1.size == 0:
6455 6455 mEndDecayTime1 = powerNet0.size
6456 6456 else:
6457 6457 mEndDecayTime1 = mEndDecayTime1[0] + mPeak - 1
6458 6458 # mPeak1 = meteorVolts0[mStart1:mEnd1 + 1].argmax()
6459 6459
6460 6460 #meteorVolts1.- all Channels, from start to end
6461 6461 meteorVolts1 = meteorVolts0[:,mStart1:mEnd1 + 1]
6462 6462 meteorVolts2 = meteorVolts0[:,mPeak + lag:mEnd1 + 1]
6463 6463 if meteorVolts2.shape[1] == 0:
6464 6464 meteorVolts2 = meteorVolts0[:,mPeak:mEnd1 + 1]
6465 6465 meteorVolts1 = meteorVolts1.reshape(meteorVolts1.shape[0], meteorVolts1.shape[1], 1)
6466 6466 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1], 1)
6467 6467 ##################### END PARAMETERS REESTIMATION #########################
6468 6468
6469 6469 ##################### 3.8 PHASE DIFFERENCE REESTIMATION ########################
6470 6470 # if mEnd1 - mStart1 > 4: #Error Number 6: echo less than 5 samples long; too short for analysis
6471 6471 if meteorVolts2.shape[1] > 0:
6472 6472 #Phase Difference re-estimation
6473 6473 phaseDiff1, phaseDiffint = self.__estimatePhaseDifference(meteorVolts2, pairslist1) #Phase Difference Estimation
6474 6474 # phaseDiff1, phaseDiffint = self.estimatePhaseDifference(meteorVolts2, pairslist)
6475 6475 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1])
6476 6476 phaseDiff11 = numpy.reshape(phaseDiff1, (phaseDiff1.shape[0],1))
6477 6477 meteorVolts2[indSides,:] = self.__shiftPhase(meteorVolts2[indSides,:], phaseDiff11[0:4]) #Phase Shifting
6478 6478
6479 6479 #Phase Difference RMS
6480 6480 phaseRMS1 = numpy.sqrt(numpy.mean(numpy.square(phaseDiff1)))
6481 6481 powerNet1 = numpy.nansum(numpy.abs(meteorVolts1[:,:])**2,0)
6482 6482 #Data from Meteor
6483 6483 mPeak1 = powerNet1.argmax() + mStart1
6484 6484 mPeakPower1 = powerNet1.max()
6485 6485 noiseAux = sum(noise[mStart1:mEnd1 + 1,mHeight])
6486 6486 mSNR1 = (sum(powerNet1)-noiseAux)/noiseAux
6487 6487 Meteor1 = numpy.array([mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1])
6488 6488 Meteor1 = numpy.hstack((Meteor1,phaseDiffint))
6489 6489 PowerSeries = powerNet0[mStart1:mEndDecayTime1 + 1]
6490 6490 #Vectorize
6491 6491 meteorAux[0:7] = [mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1]
6492 6492 meteorAux[7:11] = phaseDiffint[0:4]
6493 6493
6494 6494 #Rejection Criterions
6495 6495 if phaseRMS1 > thresholdPhase: #Error Number 17: Phase variation
6496 6496 meteorAux[-1] = 17
6497 6497 elif mSNR1 < thresholdDB1: #Error Number 1: SNR < threshold dB
6498 6498 meteorAux[-1] = 1
6499 6499
6500 6500
6501 6501 else:
6502 6502 meteorAux[0:4] = [mHeight, mStart, mPeak, mEnd]
6503 6503 meteorAux[-1] = 6 #Error Number 6: echo less than 5 samples long; too short for analysis
6504 6504 PowerSeries = 0
6505 6505
6506 6506 listMeteors1.append(meteorAux)
6507 6507 listPowerSeries.append(PowerSeries)
6508 6508 listVoltageSeries.append(meteorVolts1)
6509 6509
6510 6510 return listMeteors1, listPowerSeries, listVoltageSeries
6511 6511
6512 6512 def __estimateDecayTime(self, listMeteors, listPower, timeInterval, frequency):
6513 6513
6514 6514 threshError = 10
6515 6515 #Depending if it is 30 or 50 MHz
6516 6516 if frequency == 30e6:
6517 6517 timeLag = 45*10**-3
6518 6518 else:
6519 6519 timeLag = 15*10**-3
6520 6520 lag = numpy.ceil(timeLag/timeInterval)
6521 6521
6522 6522 listMeteors1 = []
6523 6523
6524 6524 for i in range(len(listMeteors)):
6525 6525 meteorPower = listPower[i]
6526 6526 meteorAux = listMeteors[i]
6527 6527
6528 6528 if meteorAux[-1] == 0:
6529 6529
6530 6530 try:
6531 6531 indmax = meteorPower.argmax()
6532 6532 indlag = indmax + lag
6533 6533
6534 6534 y = meteorPower[indlag:]
6535 6535 x = numpy.arange(0, y.size)*timeLag
6536 6536
6537 6537 #first guess
6538 6538 a = y[0]
6539 6539 tau = timeLag
6540 6540 #exponential fit
6541 6541 popt, pcov = optimize.curve_fit(self.__exponential_function, x, y, p0 = [a, tau])
6542 6542 y1 = self.__exponential_function(x, *popt)
6543 6543 #error estimation
6544 6544 error = sum((y - y1)**2)/(numpy.var(y)*(y.size - popt.size))
6545 6545
6546 6546 decayTime = popt[1]
6547 6547 riseTime = indmax*timeInterval
6548 6548 meteorAux[11:13] = [decayTime, error]
6549 6549
6550 6550 #Table items 7, 8 and 11
6551 6551 if (riseTime > 0.3): #Number 7: Echo rise exceeds 0.3s
6552 6552 meteorAux[-1] = 7
6553 6553 elif (decayTime < 2*riseTime) : #Number 8: Echo decay time less than than twice rise time
6554 6554 meteorAux[-1] = 8
6555 6555 if (error > threshError): #Number 11: Poor fit to amplitude for estimation of decay time
6556 6556 meteorAux[-1] = 11
6557 6557
6558 6558
6559 6559 except:
6560 6560 meteorAux[-1] = 11
6561 6561
6562 6562
6563 6563 listMeteors1.append(meteorAux)
6564 6564
6565 6565 return listMeteors1
6566 6566
6567 6567 #Exponential Function
6568 6568
6569 6569 def __exponential_function(self, x, a, tau):
6570 6570 y = a*numpy.exp(-x/tau)
6571 6571 return y
6572 6572
6573 6573 def __getRadialVelocity(self, listMeteors, listVolts, radialStdThresh, pairslist, timeInterval):
6574 6574
6575 6575 pairslist1 = list(pairslist)
6576 6576 pairslist1.append((0,1))
6577 6577 pairslist1.append((3,4))
6578 6578 numPairs = len(pairslist1)
6579 6579 #Time Lag
6580 6580 timeLag = 45*10**-3
6581 6581 c = 3e8
6582 6582 lag = numpy.ceil(timeLag/timeInterval)
6583 6583 freq = 30e6
6584 6584
6585 6585 listMeteors1 = []
6586 6586
6587 6587 for i in range(len(listMeteors)):
6588 6588 meteorAux = listMeteors[i]
6589 6589 if meteorAux[-1] == 0:
6590 6590 mStart = listMeteors[i][1]
6591 6591 mPeak = listMeteors[i][2]
6592 6592 mLag = mPeak - mStart + lag
6593 6593
6594 6594 #get the volt data between the start and end times of the meteor
6595 6595 meteorVolts = listVolts[i]
6596 6596 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
6597 6597
6598 6598 #Get CCF
6599 6599 allCCFs = self.__calculateCCF(meteorVolts, pairslist1, [-2,-1,0,1,2])
6600 6600
6601 6601 #Method 2
6602 6602 slopes = numpy.zeros(numPairs)
6603 6603 time = numpy.array([-2,-1,1,2])*timeInterval
6604 6604 angAllCCF = numpy.angle(allCCFs[:,[0,1,3,4],0])
6605 6605
6606 6606 #Correct phases
6607 6607 derPhaseCCF = angAllCCF[:,1:] - angAllCCF[:,0:-1]
6608 6608 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
6609 6609
6610 6610 if indDer[0].shape[0] > 0:
6611 6611 for i in range(indDer[0].shape[0]):
6612 6612 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i]])
6613 6613 angAllCCF[indDer[0][i],indDer[1][i]+1:] += signo*2*numpy.pi
6614 6614
6615 6615 # fit = scipy.stats.linregress(numpy.array([-2,-1,1,2])*timeInterval, numpy.array([phaseLagN2s[i],phaseLagN1s[i],phaseLag1s[i],phaseLag2s[i]]))
6616 6616 for j in range(numPairs):
6617 6617 fit = stats.linregress(time, angAllCCF[j,:])
6618 6618 slopes[j] = fit[0]
6619 6619
6620 6620 #Remove Outlier
6621 6621 # indOut = numpy.argmax(numpy.abs(slopes - numpy.mean(slopes)))
6622 6622 # slopes = numpy.delete(slopes,indOut)
6623 6623 # indOut = numpy.argmax(numpy.abs(slopes - numpy.mean(slopes)))
6624 6624 # slopes = numpy.delete(slopes,indOut)
6625 6625
6626 6626 radialVelocity = -numpy.mean(slopes)*(0.25/numpy.pi)*(c/freq)
6627 6627 radialError = numpy.std(slopes)*(0.25/numpy.pi)*(c/freq)
6628 6628 meteorAux[-2] = radialError
6629 6629 meteorAux[-3] = radialVelocity
6630 6630
6631 6631 #Setting Error
6632 6632 #Number 15: Radial Drift velocity or projected horizontal velocity exceeds 200 m/s
6633 6633 if numpy.abs(radialVelocity) > 200:
6634 6634 meteorAux[-1] = 15
6635 6635 #Number 12: Poor fit to CCF variation for estimation of radial drift velocity
6636 6636 elif radialError > radialStdThresh:
6637 6637 meteorAux[-1] = 12
6638 6638
6639 6639 listMeteors1.append(meteorAux)
6640 6640 return listMeteors1
6641 6641
6642 6642 def __setNewArrays(self, listMeteors, date, heiRang):
6643 6643
6644 6644 #New arrays
6645 6645 arrayMeteors = numpy.array(listMeteors)
6646 6646 arrayParameters = numpy.zeros((len(listMeteors), 13))
6647 6647
6648 6648 #Date inclusion
6649 6649 # date = re.findall(r'\((.*?)\)', date)
6650 6650 # date = date[0].split(',')
6651 6651 # date = map(int, date)
6652 6652 #
6653 6653 # if len(date)<6:
6654 6654 # date.append(0)
6655 6655 #
6656 6656 # date = [date[0]*10000 + date[1]*100 + date[2], date[3]*10000 + date[4]*100 + date[5]]
6657 6657 # arrayDate = numpy.tile(date, (len(listMeteors), 1))
6658 6658 arrayDate = numpy.tile(date, (len(listMeteors)))
6659 6659
6660 6660 #Meteor array
6661 6661 # arrayMeteors[:,0] = heiRang[arrayMeteors[:,0].astype(int)]
6662 6662 # arrayMeteors = numpy.hstack((arrayDate, arrayMeteors))
6663 6663
6664 6664 #Parameters Array
6665 6665 arrayParameters[:,0] = arrayDate #Date
6666 6666 arrayParameters[:,1] = heiRang[arrayMeteors[:,0].astype(int)] #Range
6667 6667 arrayParameters[:,6:8] = arrayMeteors[:,-3:-1] #Radial velocity and its error
6668 6668 arrayParameters[:,8:12] = arrayMeteors[:,7:11] #Phases
6669 6669 arrayParameters[:,-1] = arrayMeteors[:,-1] #Error
6670 6670
6671 6671
6672 6672 return arrayParameters
6673 6673
6674 6674 class CorrectSMPhases(Operation):
6675 6675
6676 6676 def run(self, dataOut, phaseOffsets, hmin = 50, hmax = 150, azimuth = 45, channelPositions = None):
6677 6677
6678 6678 arrayParameters = dataOut.data_param
6679 6679 pairsList = []
6680 6680 pairx = (0,1)
6681 6681 pairy = (2,3)
6682 6682 pairsList.append(pairx)
6683 6683 pairsList.append(pairy)
6684 6684 jph = numpy.zeros(4)
6685 6685
6686 6686 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
6687 6687 # arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
6688 6688 arrayParameters[:,8:12] = numpy.angle(numpy.exp(1j*(arrayParameters[:,8:12] + phaseOffsets)))
6689 6689
6690 6690 meteorOps = SMOperations()
6691 6691 if channelPositions is None:
6692 6692 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
6693 6693 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
6694 6694
6695 6695 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
6696 6696 h = (hmin,hmax)
6697 6697
6698 6698 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
6699 6699
6700 6700 dataOut.data_param = arrayParameters
6701 6701 return
6702 6702
6703 6703 class SMPhaseCalibration(Operation):
6704 6704
6705 6705 __buffer = None
6706 6706
6707 6707 __initime = None
6708 6708
6709 6709 __dataReady = False
6710 6710
6711 6711 __isConfig = False
6712 6712
6713 6713 def __checkTime(self, currentTime, initTime, paramInterval, outputInterval):
6714 6714
6715 6715 dataTime = currentTime + paramInterval
6716 6716 deltaTime = dataTime - initTime
6717 6717
6718 6718 if deltaTime >= outputInterval or deltaTime < 0:
6719 6719 return True
6720 6720
6721 6721 return False
6722 6722
6723 6723 def __getGammas(self, pairs, d, phases):
6724 6724 gammas = numpy.zeros(2)
6725 6725
6726 6726 for i in range(len(pairs)):
6727 6727
6728 6728 pairi = pairs[i]
6729 6729
6730 6730 phip3 = phases[:,pairi[0]]
6731 6731 d3 = d[pairi[0]]
6732 6732 phip2 = phases[:,pairi[1]]
6733 6733 d2 = d[pairi[1]]
6734 6734 #Calculating gamma
6735 6735 # jdcos = alp1/(k*d1)
6736 6736 # jgamma = numpy.angle(numpy.exp(1j*(d0*alp1/d1 - alp0)))
6737 6737 jgamma = -phip2*d3/d2 - phip3
6738 6738 jgamma = numpy.angle(numpy.exp(1j*jgamma))
6739 6739 # jgamma[jgamma>numpy.pi] -= 2*numpy.pi
6740 6740 # jgamma[jgamma<-numpy.pi] += 2*numpy.pi
6741 6741
6742 6742 #Revised distribution
6743 6743 jgammaArray = numpy.hstack((jgamma,jgamma+0.5*numpy.pi,jgamma-0.5*numpy.pi))
6744 6744
6745 6745 #Histogram
6746 6746 nBins = 64
6747 6747 rmin = -0.5*numpy.pi
6748 6748 rmax = 0.5*numpy.pi
6749 6749 phaseHisto = numpy.histogram(jgammaArray, bins=nBins, range=(rmin,rmax))
6750 6750
6751 6751 meteorsY = phaseHisto[0]
6752 6752 phasesX = phaseHisto[1][:-1]
6753 6753 width = phasesX[1] - phasesX[0]
6754 6754 phasesX += width/2
6755 6755
6756 6756 #Gaussian aproximation
6757 6757 bpeak = meteorsY.argmax()
6758 6758 peak = meteorsY.max()
6759 6759 jmin = bpeak - 5
6760 6760 jmax = bpeak + 5 + 1
6761 6761
6762 6762 if jmin<0:
6763 6763 jmin = 0
6764 6764 jmax = 6
6765 6765 elif jmax > meteorsY.size:
6766 6766 jmin = meteorsY.size - 6
6767 6767 jmax = meteorsY.size
6768 6768
6769 6769 x0 = numpy.array([peak,bpeak,50])
6770 6770 coeff = optimize.leastsq(self.__residualFunction, x0, args=(meteorsY[jmin:jmax], phasesX[jmin:jmax]))
6771 6771
6772 6772 #Gammas
6773 6773 gammas[i] = coeff[0][1]
6774 6774
6775 6775 return gammas
6776 6776
6777 6777 def __residualFunction(self, coeffs, y, t):
6778 6778
6779 6779 return y - self.__gauss_function(t, coeffs)
6780 6780
6781 6781 def __gauss_function(self, t, coeffs):
6782 6782
6783 6783 return coeffs[0]*numpy.exp(-0.5*((t - coeffs[1]) / coeffs[2])**2)
6784 6784
6785 6785 def __getPhases(self, azimuth, h, pairsList, d, gammas, meteorsArray):
6786 6786 meteorOps = SMOperations()
6787 6787 nchan = 4
6788 6788 pairx = pairsList[0] #x es 0
6789 6789 pairy = pairsList[1] #y es 1
6790 6790 center_xangle = 0
6791 6791 center_yangle = 0
6792 6792 range_angle = numpy.array([10*numpy.pi,numpy.pi,numpy.pi/2,numpy.pi/4])
6793 6793 ntimes = len(range_angle)
6794 6794
6795 6795 nstepsx = 20
6796 6796 nstepsy = 20
6797 6797
6798 6798 for iz in range(ntimes):
6799 6799 min_xangle = -range_angle[iz]/2 + center_xangle
6800 6800 max_xangle = range_angle[iz]/2 + center_xangle
6801 6801 min_yangle = -range_angle[iz]/2 + center_yangle
6802 6802 max_yangle = range_angle[iz]/2 + center_yangle
6803 6803
6804 6804 inc_x = (max_xangle-min_xangle)/nstepsx
6805 6805 inc_y = (max_yangle-min_yangle)/nstepsy
6806 6806
6807 6807 alpha_y = numpy.arange(nstepsy)*inc_y + min_yangle
6808 6808 alpha_x = numpy.arange(nstepsx)*inc_x + min_xangle
6809 6809 penalty = numpy.zeros((nstepsx,nstepsy))
6810 6810 jph_array = numpy.zeros((nchan,nstepsx,nstepsy))
6811 6811 jph = numpy.zeros(nchan)
6812 6812
6813 6813 # Iterations looking for the offset
6814 6814 for iy in range(int(nstepsy)):
6815 6815 for ix in range(int(nstepsx)):
6816 6816 d3 = d[pairsList[1][0]]
6817 6817 d2 = d[pairsList[1][1]]
6818 6818 d5 = d[pairsList[0][0]]
6819 6819 d4 = d[pairsList[0][1]]
6820 6820
6821 6821 alp2 = alpha_y[iy] #gamma 1
6822 6822 alp4 = alpha_x[ix] #gamma 0
6823 6823
6824 6824 alp3 = -alp2*d3/d2 - gammas[1]
6825 6825 alp5 = -alp4*d5/d4 - gammas[0]
6826 6826 # jph[pairy[1]] = alpha_y[iy]
6827 6827 # jph[pairy[0]] = -gammas[1] - alpha_y[iy]*d[pairy[1]]/d[pairy[0]]
6828 6828
6829 6829 # jph[pairx[1]] = alpha_x[ix]
6830 6830 # jph[pairx[0]] = -gammas[0] - alpha_x[ix]*d[pairx[1]]/d[pairx[0]]
6831 6831 jph[pairsList[0][1]] = alp4
6832 6832 jph[pairsList[0][0]] = alp5
6833 6833 jph[pairsList[1][0]] = alp3
6834 6834 jph[pairsList[1][1]] = alp2
6835 6835 jph_array[:,ix,iy] = jph
6836 6836 # d = [2.0,2.5,2.5,2.0]
6837 6837 #falta chequear si va a leer bien los meteoros
6838 6838 meteorsArray1 = meteorOps.getMeteorParams(meteorsArray, azimuth, h, pairsList, d, jph)
6839 6839 error = meteorsArray1[:,-1]
6840 6840 ind1 = numpy.where(error==0)[0]
6841 6841 penalty[ix,iy] = ind1.size
6842 6842
6843 6843 i,j = numpy.unravel_index(penalty.argmax(), penalty.shape)
6844 6844 phOffset = jph_array[:,i,j]
6845 6845
6846 6846 center_xangle = phOffset[pairx[1]]
6847 6847 center_yangle = phOffset[pairy[1]]
6848 6848
6849 6849 phOffset = numpy.angle(numpy.exp(1j*jph_array[:,i,j]))
6850 6850 phOffset = phOffset*180/numpy.pi
6851 6851 return phOffset
6852 6852
6853 6853
6854 6854 def run(self, dataOut, hmin, hmax, channelPositions=None, nHours = 1):
6855 6855
6856 6856 dataOut.flagNoData = True
6857 6857 self.__dataReady = False
6858 6858 dataOut.outputInterval = nHours*3600
6859 6859
6860 6860 if self.__isConfig == False:
6861 6861 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
6862 6862 #Get Initial LTC time
6863 6863 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
6864 6864 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
6865 6865
6866 6866 self.__isConfig = True
6867 6867
6868 6868 if self.__buffer is None:
6869 6869 self.__buffer = dataOut.data_param.copy()
6870 6870
6871 6871 else:
6872 6872 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
6873 6873
6874 6874 self.__dataReady = self.__checkTime(dataOut.utctime, self.__initime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
6875 6875
6876 6876 if self.__dataReady:
6877 6877 dataOut.utctimeInit = self.__initime
6878 6878 self.__initime += dataOut.outputInterval #to erase time offset
6879 6879
6880 6880 freq = dataOut.frequency
6881 6881 c = dataOut.C #m/s
6882 6882 lamb = c/freq
6883 6883 k = 2*numpy.pi/lamb
6884 6884 azimuth = 0
6885 6885 h = (hmin, hmax)
6886 6886 # pairs = ((0,1),(2,3)) #Estrella
6887 6887 # pairs = ((1,0),(2,3)) #T
6888 6888
6889 6889 if channelPositions is None:
6890 6890 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
6891 6891 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
6892 6892 meteorOps = SMOperations()
6893 6893 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
6894 6894
6895 6895 #Checking correct order of pairs
6896 6896 pairs = []
6897 6897 if distances[1] > distances[0]:
6898 6898 pairs.append((1,0))
6899 6899 else:
6900 6900 pairs.append((0,1))
6901 6901
6902 6902 if distances[3] > distances[2]:
6903 6903 pairs.append((3,2))
6904 6904 else:
6905 6905 pairs.append((2,3))
6906 6906 # distances1 = [-distances[0]*lamb, distances[1]*lamb, -distances[2]*lamb, distances[3]*lamb]
6907 6907
6908 6908 meteorsArray = self.__buffer
6909 6909 error = meteorsArray[:,-1]
6910 6910 boolError = (error==0)|(error==3)|(error==4)|(error==13)|(error==14)
6911 6911 ind1 = numpy.where(boolError)[0]
6912 6912 meteorsArray = meteorsArray[ind1,:]
6913 6913 meteorsArray[:,-1] = 0
6914 6914 phases = meteorsArray[:,8:12]
6915 6915
6916 6916 #Calculate Gammas
6917 6917 gammas = self.__getGammas(pairs, distances, phases)
6918 6918 # gammas = numpy.array([-21.70409463,45.76935864])*numpy.pi/180
6919 6919 #Calculate Phases
6920 6920 phasesOff = self.__getPhases(azimuth, h, pairs, distances, gammas, meteorsArray)
6921 6921 phasesOff = phasesOff.reshape((1,phasesOff.size))
6922 6922 dataOut.data_output = -phasesOff
6923 6923 dataOut.flagNoData = False
6924 6924 self.__buffer = None
6925 6925
6926 6926
6927 6927 return
6928 6928
6929 6929 class SMOperations():
6930 6930
6931 6931 def __init__(self):
6932 6932
6933 6933 return
6934 6934
6935 6935 def getMeteorParams(self, arrayParameters0, azimuth, h, pairsList, distances, jph):
6936 6936
6937 6937 arrayParameters = arrayParameters0.copy()
6938 6938 hmin = h[0]
6939 6939 hmax = h[1]
6940 6940
6941 6941 #Calculate AOA (Error N 3, 4)
6942 6942 #JONES ET AL. 1998
6943 6943 AOAthresh = numpy.pi/8
6944 6944 error = arrayParameters[:,-1]
6945 6945 phases = -arrayParameters[:,8:12] + jph
6946 6946 # phases = numpy.unwrap(phases)
6947 6947 arrayParameters[:,3:6], arrayParameters[:,-1] = self.__getAOA(phases, pairsList, distances, error, AOAthresh, azimuth)
6948 6948
6949 6949 #Calculate Heights (Error N 13 and 14)
6950 6950 error = arrayParameters[:,-1]
6951 6951 Ranges = arrayParameters[:,1]
6952 6952 zenith = arrayParameters[:,4]
6953 6953 arrayParameters[:,2], arrayParameters[:,-1] = self.__getHeights(Ranges, zenith, error, hmin, hmax)
6954 6954
6955 6955 #----------------------- Get Final data ------------------------------------
6956 6956 # error = arrayParameters[:,-1]
6957 6957 # ind1 = numpy.where(error==0)[0]
6958 6958 # arrayParameters = arrayParameters[ind1,:]
6959 6959
6960 6960 return arrayParameters
6961 6961
6962 6962 def __getAOA(self, phases, pairsList, directions, error, AOAthresh, azimuth):
6963 6963
6964 6964 arrayAOA = numpy.zeros((phases.shape[0],3))
6965 6965 cosdir0, cosdir = self.__getDirectionCosines(phases, pairsList,directions)
6966 6966
6967 6967 arrayAOA[:,:2] = self.__calculateAOA(cosdir, azimuth)
6968 6968 cosDirError = numpy.sum(numpy.abs(cosdir0 - cosdir), axis = 1)
6969 6969 arrayAOA[:,2] = cosDirError
6970 6970
6971 6971 azimuthAngle = arrayAOA[:,0]
6972 6972 zenithAngle = arrayAOA[:,1]
6973 6973
6974 6974 #Setting Error
6975 6975 indError = numpy.where(numpy.logical_or(error == 3, error == 4))[0]
6976 6976 error[indError] = 0
6977 6977 #Number 3: AOA not fesible
6978 6978 indInvalid = numpy.where(numpy.logical_and((numpy.logical_or(numpy.isnan(zenithAngle), numpy.isnan(azimuthAngle))),error == 0))[0]
6979 6979 error[indInvalid] = 3
6980 6980 #Number 4: Large difference in AOAs obtained from different antenna baselines
6981 6981 indInvalid = numpy.where(numpy.logical_and(cosDirError > AOAthresh,error == 0))[0]
6982 6982 error[indInvalid] = 4
6983 6983 return arrayAOA, error
6984 6984
6985 6985 def __getDirectionCosines(self, arrayPhase, pairsList, distances):
6986 6986
6987 6987 #Initializing some variables
6988 6988 ang_aux = numpy.array([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])*2*numpy.pi
6989 6989 ang_aux = ang_aux.reshape(1,ang_aux.size)
6990 6990
6991 6991 cosdir = numpy.zeros((arrayPhase.shape[0],2))
6992 6992 cosdir0 = numpy.zeros((arrayPhase.shape[0],2))
6993 6993
6994 6994
6995 6995 for i in range(2):
6996 6996 ph0 = arrayPhase[:,pairsList[i][0]]
6997 6997 ph1 = arrayPhase[:,pairsList[i][1]]
6998 6998 d0 = distances[pairsList[i][0]]
6999 6999 d1 = distances[pairsList[i][1]]
7000 7000
7001 7001 ph0_aux = ph0 + ph1
7002 7002 ph0_aux = numpy.angle(numpy.exp(1j*ph0_aux))
7003 7003 # ph0_aux[ph0_aux > numpy.pi] -= 2*numpy.pi
7004 7004 # ph0_aux[ph0_aux < -numpy.pi] += 2*numpy.pi
7005 7005 #First Estimation
7006 7006 cosdir0[:,i] = (ph0_aux)/(2*numpy.pi*(d0 - d1))
7007 7007
7008 7008 #Most-Accurate Second Estimation
7009 7009 phi1_aux = ph0 - ph1
7010 7010 phi1_aux = phi1_aux.reshape(phi1_aux.size,1)
7011 7011 #Direction Cosine 1
7012 7012 cosdir1 = (phi1_aux + ang_aux)/(2*numpy.pi*(d0 + d1))
7013 7013
7014 7014 #Searching the correct Direction Cosine
7015 7015 cosdir0_aux = cosdir0[:,i]
7016 7016 cosdir0_aux = cosdir0_aux.reshape(cosdir0_aux.size,1)
7017 7017 #Minimum Distance
7018 7018 cosDiff = (cosdir1 - cosdir0_aux)**2
7019 7019 indcos = cosDiff.argmin(axis = 1)
7020 7020 #Saving Value obtained
7021 7021 cosdir[:,i] = cosdir1[numpy.arange(len(indcos)),indcos]
7022 7022
7023 7023 return cosdir0, cosdir
7024 7024
7025 7025 def __calculateAOA(self, cosdir, azimuth):
7026 7026 cosdirX = cosdir[:,0]
7027 7027 cosdirY = cosdir[:,1]
7028 7028
7029 7029 zenithAngle = numpy.arccos(numpy.sqrt(1 - cosdirX**2 - cosdirY**2))*180/numpy.pi
7030 7030 azimuthAngle = numpy.arctan2(cosdirX,cosdirY)*180/numpy.pi + azimuth#0 deg north, 90 deg east
7031 7031 angles = numpy.vstack((azimuthAngle, zenithAngle)).transpose()
7032 7032
7033 7033 return angles
7034 7034
7035 7035 def __getHeights(self, Ranges, zenith, error, minHeight, maxHeight):
7036 7036
7037 7037 Ramb = 375 #Ramb = c/(2*PRF)
7038 7038 Re = 6371 #Earth Radius
7039 7039 heights = numpy.zeros(Ranges.shape)
7040 7040
7041 7041 R_aux = numpy.array([0,1,2])*Ramb
7042 7042 R_aux = R_aux.reshape(1,R_aux.size)
7043 7043
7044 7044 Ranges = Ranges.reshape(Ranges.size,1)
7045 7045
7046 7046 Ri = Ranges + R_aux
7047 7047 hi = numpy.sqrt(Re**2 + Ri**2 + (2*Re*numpy.cos(zenith*numpy.pi/180)*Ri.transpose()).transpose()) - Re
7048 7048
7049 7049 #Check if there is a height between 70 and 110 km
7050 7050 h_bool = numpy.sum(numpy.logical_and(hi > minHeight, hi < maxHeight), axis = 1)
7051 7051 ind_h = numpy.where(h_bool == 1)[0]
7052 7052
7053 7053 hCorr = hi[ind_h, :]
7054 7054 ind_hCorr = numpy.where(numpy.logical_and(hi > minHeight, hi < maxHeight))
7055 7055
7056 7056 hCorr = hi[ind_hCorr][:len(ind_h)]
7057 7057 heights[ind_h] = hCorr
7058 7058
7059 7059 #Setting Error
7060 7060 #Number 13: Height unresolvable echo: not valid height within 70 to 110 km
7061 7061 #Number 14: Height ambiguous echo: more than one possible height within 70 to 110 km
7062 7062 indError = numpy.where(numpy.logical_or(error == 13, error == 14))[0]
7063 7063 error[indError] = 0
7064 7064 indInvalid2 = numpy.where(numpy.logical_and(h_bool > 1, error == 0))[0]
7065 7065 error[indInvalid2] = 14
7066 7066 indInvalid1 = numpy.where(numpy.logical_and(h_bool == 0, error == 0))[0]
7067 7067 error[indInvalid1] = 13
7068 7068
7069 7069 return heights, error
7070 7070
7071 7071 def getPhasePairs(self, channelPositions):
7072 7072 chanPos = numpy.array(channelPositions)
7073 7073 listOper = list(itertools.combinations(list(range(5)),2))
7074 7074
7075 7075 distances = numpy.zeros(4)
7076 7076 axisX = []
7077 7077 axisY = []
7078 7078 distX = numpy.zeros(3)
7079 7079 distY = numpy.zeros(3)
7080 7080 ix = 0
7081 7081 iy = 0
7082 7082
7083 7083 pairX = numpy.zeros((2,2))
7084 7084 pairY = numpy.zeros((2,2))
7085 7085
7086 7086 for i in range(len(listOper)):
7087 7087 pairi = listOper[i]
7088 7088
7089 7089 posDif = numpy.abs(chanPos[pairi[0],:] - chanPos[pairi[1],:])
7090 7090
7091 7091 if posDif[0] == 0:
7092 7092 axisY.append(pairi)
7093 7093 distY[iy] = posDif[1]
7094 7094 iy += 1
7095 7095 elif posDif[1] == 0:
7096 7096 axisX.append(pairi)
7097 7097 distX[ix] = posDif[0]
7098 7098 ix += 1
7099 7099
7100 7100 for i in range(2):
7101 7101 if i==0:
7102 7102 dist0 = distX
7103 7103 axis0 = axisX
7104 7104 else:
7105 7105 dist0 = distY
7106 7106 axis0 = axisY
7107 7107
7108 7108 side = numpy.argsort(dist0)[:-1]
7109 7109 axis0 = numpy.array(axis0)[side,:]
7110 7110 chanC = int(numpy.intersect1d(axis0[0,:], axis0[1,:])[0])
7111 7111 axis1 = numpy.unique(numpy.reshape(axis0,4))
7112 7112 side = axis1[axis1 != chanC]
7113 7113 diff1 = chanPos[chanC,i] - chanPos[side[0],i]
7114 7114 diff2 = chanPos[chanC,i] - chanPos[side[1],i]
7115 7115 if diff1<0:
7116 7116 chan2 = side[0]
7117 7117 d2 = numpy.abs(diff1)
7118 7118 chan1 = side[1]
7119 7119 d1 = numpy.abs(diff2)
7120 7120 else:
7121 7121 chan2 = side[1]
7122 7122 d2 = numpy.abs(diff2)
7123 7123 chan1 = side[0]
7124 7124 d1 = numpy.abs(diff1)
7125 7125
7126 7126 if i==0:
7127 7127 chanCX = chanC
7128 7128 chan1X = chan1
7129 7129 chan2X = chan2
7130 7130 distances[0:2] = numpy.array([d1,d2])
7131 7131 else:
7132 7132 chanCY = chanC
7133 7133 chan1Y = chan1
7134 7134 chan2Y = chan2
7135 7135 distances[2:4] = numpy.array([d1,d2])
7136 7136 # axisXsides = numpy.reshape(axisX[ix,:],4)
7137 7137 #
7138 7138 # channelCentX = int(numpy.intersect1d(pairX[0,:], pairX[1,:])[0])
7139 7139 # channelCentY = int(numpy.intersect1d(pairY[0,:], pairY[1,:])[0])
7140 7140 #
7141 7141 # ind25X = numpy.where(pairX[0,:] != channelCentX)[0][0]
7142 7142 # ind20X = numpy.where(pairX[1,:] != channelCentX)[0][0]
7143 7143 # channel25X = int(pairX[0,ind25X])
7144 7144 # channel20X = int(pairX[1,ind20X])
7145 7145 # ind25Y = numpy.where(pairY[0,:] != channelCentY)[0][0]
7146 7146 # ind20Y = numpy.where(pairY[1,:] != channelCentY)[0][0]
7147 7147 # channel25Y = int(pairY[0,ind25Y])
7148 7148 # channel20Y = int(pairY[1,ind20Y])
7149 7149
7150 7150 # pairslist = [(channelCentX, channel25X),(channelCentX, channel20X),(channelCentY,channel25Y),(channelCentY, channel20Y)]
7151 7151 pairslist = [(chanCX, chan1X),(chanCX, chan2X),(chanCY,chan1Y),(chanCY, chan2Y)]
7152 7152
7153 7153 return pairslist, distances
7154 7154 # def __getAOA(self, phases, pairsList, error, AOAthresh, azimuth):
7155 7155 #
7156 7156 # arrayAOA = numpy.zeros((phases.shape[0],3))
7157 7157 # cosdir0, cosdir = self.__getDirectionCosines(phases, pairsList)
7158 7158 #
7159 7159 # arrayAOA[:,:2] = self.__calculateAOA(cosdir, azimuth)
7160 7160 # cosDirError = numpy.sum(numpy.abs(cosdir0 - cosdir), axis = 1)
7161 7161 # arrayAOA[:,2] = cosDirError
7162 7162 #
7163 7163 # azimuthAngle = arrayAOA[:,0]
7164 7164 # zenithAngle = arrayAOA[:,1]
7165 7165 #
7166 7166 # #Setting Error
7167 7167 # #Number 3: AOA not fesible
7168 7168 # indInvalid = numpy.where(numpy.logical_and((numpy.logical_or(numpy.isnan(zenithAngle), numpy.isnan(azimuthAngle))),error == 0))[0]
7169 7169 # error[indInvalid] = 3
7170 7170 # #Number 4: Large difference in AOAs obtained from different antenna baselines
7171 7171 # indInvalid = numpy.where(numpy.logical_and(cosDirError > AOAthresh,error == 0))[0]
7172 7172 # error[indInvalid] = 4
7173 7173 # return arrayAOA, error
7174 7174 #
7175 7175 # def __getDirectionCosines(self, arrayPhase, pairsList):
7176 7176 #
7177 7177 # #Initializing some variables
7178 7178 # ang_aux = numpy.array([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])*2*numpy.pi
7179 7179 # ang_aux = ang_aux.reshape(1,ang_aux.size)
7180 7180 #
7181 7181 # cosdir = numpy.zeros((arrayPhase.shape[0],2))
7182 7182 # cosdir0 = numpy.zeros((arrayPhase.shape[0],2))
7183 7183 #
7184 7184 #
7185 7185 # for i in range(2):
7186 7186 # #First Estimation
7187 7187 # phi0_aux = arrayPhase[:,pairsList[i][0]] + arrayPhase[:,pairsList[i][1]]
7188 7188 # #Dealias
7189 7189 # indcsi = numpy.where(phi0_aux > numpy.pi)
7190 7190 # phi0_aux[indcsi] -= 2*numpy.pi
7191 7191 # indcsi = numpy.where(phi0_aux < -numpy.pi)
7192 7192 # phi0_aux[indcsi] += 2*numpy.pi
7193 7193 # #Direction Cosine 0
7194 7194 # cosdir0[:,i] = -(phi0_aux)/(2*numpy.pi*0.5)
7195 7195 #
7196 7196 # #Most-Accurate Second Estimation
7197 7197 # phi1_aux = arrayPhase[:,pairsList[i][0]] - arrayPhase[:,pairsList[i][1]]
7198 7198 # phi1_aux = phi1_aux.reshape(phi1_aux.size,1)
7199 7199 # #Direction Cosine 1
7200 7200 # cosdir1 = -(phi1_aux + ang_aux)/(2*numpy.pi*4.5)
7201 7201 #
7202 7202 # #Searching the correct Direction Cosine
7203 7203 # cosdir0_aux = cosdir0[:,i]
7204 7204 # cosdir0_aux = cosdir0_aux.reshape(cosdir0_aux.size,1)
7205 7205 # #Minimum Distance
7206 7206 # cosDiff = (cosdir1 - cosdir0_aux)**2
7207 7207 # indcos = cosDiff.argmin(axis = 1)
7208 7208 # #Saving Value obtained
7209 7209 # cosdir[:,i] = cosdir1[numpy.arange(len(indcos)),indcos]
7210 7210 #
7211 7211 # return cosdir0, cosdir
7212 7212 #
7213 7213 # def __calculateAOA(self, cosdir, azimuth):
7214 7214 # cosdirX = cosdir[:,0]
7215 7215 # cosdirY = cosdir[:,1]
7216 7216 #
7217 7217 # zenithAngle = numpy.arccos(numpy.sqrt(1 - cosdirX**2 - cosdirY**2))*180/numpy.pi
7218 7218 # azimuthAngle = numpy.arctan2(cosdirX,cosdirY)*180/numpy.pi + azimuth #0 deg north, 90 deg east
7219 7219 # angles = numpy.vstack((azimuthAngle, zenithAngle)).transpose()
7220 7220 #
7221 7221 # return angles
7222 7222 #
7223 7223 # def __getHeights(self, Ranges, zenith, error, minHeight, maxHeight):
7224 7224 #
7225 7225 # Ramb = 375 #Ramb = c/(2*PRF)
7226 7226 # Re = 6371 #Earth Radius
7227 7227 # heights = numpy.zeros(Ranges.shape)
7228 7228 #
7229 7229 # R_aux = numpy.array([0,1,2])*Ramb
7230 7230 # R_aux = R_aux.reshape(1,R_aux.size)
7231 7231 #
7232 7232 # Ranges = Ranges.reshape(Ranges.size,1)
7233 7233 #
7234 7234 # Ri = Ranges + R_aux
7235 7235 # hi = numpy.sqrt(Re**2 + Ri**2 + (2*Re*numpy.cos(zenith*numpy.pi/180)*Ri.transpose()).transpose()) - Re
7236 7236 #
7237 7237 # #Check if there is a height between 70 and 110 km
7238 7238 # h_bool = numpy.sum(numpy.logical_and(hi > minHeight, hi < maxHeight), axis = 1)
7239 7239 # ind_h = numpy.where(h_bool == 1)[0]
7240 7240 #
7241 7241 # hCorr = hi[ind_h, :]
7242 7242 # ind_hCorr = numpy.where(numpy.logical_and(hi > minHeight, hi < maxHeight))
7243 7243 #
7244 7244 # hCorr = hi[ind_hCorr]
7245 7245 # heights[ind_h] = hCorr
7246 7246 #
7247 7247 # #Setting Error
7248 7248 # #Number 13: Height unresolvable echo: not valid height within 70 to 110 km
7249 7249 # #Number 14: Height ambiguous echo: more than one possible height within 70 to 110 km
7250 7250 #
7251 7251 # indInvalid2 = numpy.where(numpy.logical_and(h_bool > 1, error == 0))[0]
7252 7252 # error[indInvalid2] = 14
7253 7253 # indInvalid1 = numpy.where(numpy.logical_and(h_bool == 0, error == 0))[0]
7254 7254 # error[indInvalid1] = 13
7255 7255 #
7256 7256 # return heights, error
7257 7257
7258 7258
7259 7259
7260 7260 class IGRFModel(Operation):
7261 7261 '''
7262 7262 Written by R. Flores
7263 7263 '''
7264 7264 """Operation to calculate Geomagnetic parameters.
7265 7265
7266 7266 Parameters:
7267 7267 -----------
7268 7268 None
7269 7269
7270 7270 Example
7271 7271 --------
7272 7272
7273 7273 op = proc_unit.addOperation(name='IGRFModel', optype='other')
7274 7274
7275 7275 """
7276 7276
7277 7277 def __init__(self, **kwargs):
7278 7278
7279 7279 Operation.__init__(self, **kwargs)
7280 7280
7281 7281 self.aux=1
7282 7282
7283 7283 def run(self,dataOut):
7284 7284
7285 7285 try:
7286 7286 from schainpy.model.proc import mkfact_short_2020_2
7287 7287 except:
7288 7288 log.warning('You should install "mkfact_short_2020" module to process IGRF Model')
7289 7289
7290 7290 if self.aux==1:
7291 7291
7292 7292 #dataOut.TimeBlockSeconds_First_Time=time.mktime(time.strptime(dataOut.TimeBlockDate))
7293 7293 #### we do not use dataOut.datatime.ctime() because it's the time of the second (next) block
7294 7294 dataOut.TimeBlockSeconds_First_Time=dataOut.TimeBlockSeconds
7295 7295 dataOut.bd_time=time.gmtime(dataOut.TimeBlockSeconds_First_Time)
7296 7296 dataOut.year=dataOut.bd_time.tm_year+(dataOut.bd_time.tm_yday-1)/364.0
7297 7297 dataOut.ut=dataOut.bd_time.tm_hour+dataOut.bd_time.tm_min/60.0+dataOut.bd_time.tm_sec/3600.0
7298 7298
7299 7299 self.aux=0
7300 7300 dh = dataOut.heightList[1]-dataOut.heightList[0]
7301 7301 dataOut.h=numpy.arange(0.0,dh*dataOut.MAXNRANGENDT,dh,dtype='float32')
7302 7302 dataOut.bfm=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32')
7303 7303 dataOut.bfm=numpy.array(dataOut.bfm,order='F')
7304 7304 dataOut.thb=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32')
7305 7305 dataOut.thb=numpy.array(dataOut.thb,order='F')
7306 7306 dataOut.bki=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32')
7307 7307 dataOut.bki=numpy.array(dataOut.bki,order='F')
7308 7308 mkfact_short_2020_2.mkfact(dataOut.year,dataOut.h,dataOut.bfm,dataOut.thb,dataOut.bki,dataOut.MAXNRANGENDT)
7309 7309
7310 7310
7311 7311 return dataOut
7312 7312
7313 7313 class MergeProc(ProcessingUnit):
7314 7314
7315 7315 def __init__(self):
7316 7316 ProcessingUnit.__init__(self)
7317 7317
7318 7318 def run(self, attr_data, attr_data_2 = None, attr_data_3 = None, attr_data_4 = None, attr_data_5 = None, mode=0):
7319 7319 #print("*****************************Merge***************")
7320 7320
7321 7321 self.dataOut = getattr(self, self.inputs[0])
7322 7322 data_inputs = [getattr(self, attr) for attr in self.inputs]
7323 7323 #print(data_inputs)
7324 7324 #print("Run: ",self.dataOut.runNextUnit)
7325 7325 #exit(1)
7326 7326 #print(self.dataOut.nHeights)
7327 7327 #exit(1)
7328 7328 #print("a:", [getattr(data, attr_data) for data in data_inputs][1])
7329 7329 #exit(1)
7330 7330 if mode==0:
7331 7331 data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs])
7332 7332 setattr(self.dataOut, attr_data, data)
7333 7333
7334 7334 if mode==1: #Hybrid
7335 7335 #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1)
7336 7336 #setattr(self.dataOut, attr_data, data)
7337 7337 setattr(self.dataOut, 'dataLag_spc', [getattr(data, attr_data) for data in data_inputs][0])
7338 7338 setattr(self.dataOut, 'dataLag_spc_LP', [getattr(data, attr_data) for data in data_inputs][1])
7339 7339 setattr(self.dataOut, 'dataLag_cspc', [getattr(data, attr_data_2) for data in data_inputs][0])
7340 7340 setattr(self.dataOut, 'dataLag_cspc_LP', [getattr(data, attr_data_2) for data in data_inputs][1])
7341 7341 #setattr(self.dataOut, 'nIncohInt', [getattr(data, attr_data_3) for data in data_inputs][0])
7342 7342 #setattr(self.dataOut, 'nIncohInt_LP', [getattr(data, attr_data_3) for data in data_inputs][1])
7343 7343 '''
7344 7344 print(self.dataOut.dataLag_spc_LP.shape)
7345 7345 print(self.dataOut.dataLag_cspc_LP.shape)
7346 7346 exit(1)
7347 7347 '''
7348 7348
7349 7349 #self.dataOut.dataLag_spc_LP = numpy.transpose(self.dataOut.dataLag_spc_LP[0],(2,0,1))
7350 7350 #self.dataOut.dataLag_cspc_LP = numpy.transpose(self.dataOut.dataLag_cspc_LP,(3,1,2,0))
7351 7351 '''
7352 7352 print("Merge")
7353 7353 print(numpy.shape(self.dataOut.dataLag_spc))
7354 7354 print(numpy.shape(self.dataOut.dataLag_spc_LP))
7355 7355 print(numpy.shape(self.dataOut.dataLag_cspc))
7356 7356 print(numpy.shape(self.dataOut.dataLag_cspc_LP))
7357 7357 exit(1)
7358 7358 '''
7359 7359 #print(numpy.sum(self.dataOut.dataLag_spc_LP[2,:,164])/128)
7360 7360 #print(numpy.sum(self.dataOut.dataLag_cspc_LP[0,:,30,1])/128)
7361 7361 #exit(1)
7362 7362 #print(self.dataOut.NDP)
7363 7363 #print(self.dataOut.nNoiseProfiles)
7364 7364
7365 7365 #self.dataOut.nIncohInt_LP = 128
7366 7366 self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP
7367 7367 self.dataOut.nIncohInt_LP = self.dataOut.nIncohInt
7368 7368 self.dataOut.NLAG = 16
7369 7369 self.dataOut.NRANGE = 200
7370 7370 self.dataOut.NSCAN = 128
7371 7371 #print(numpy.shape(self.dataOut.data_spc))
7372 7372
7373 7373 #exit(1)
7374 7374
7375 7375 if mode==2: #HAE 2022
7376 7376 data = numpy.sum([getattr(data, attr_data) for data in data_inputs],axis=0)
7377 7377 setattr(self.dataOut, attr_data, data)
7378 7378
7379 7379 self.dataOut.nIncohInt *= 2
7380 7380 #meta = self.dataOut.getFreqRange(1)/1000.
7381 7381 self.dataOut.freqRange = self.dataOut.getFreqRange(1)/1000.
7382 7382
7383 7383 #exit(1)
7384 7384
7385 7385 if mode==4: #Hybrid LP-SSheightProfiles
7386 7386 #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1)
7387 7387 #setattr(self.dataOut, attr_data, data)
7388 7388 setattr(self.dataOut, 'dataLag_spc', getattr(data_inputs[0], attr_data)) #DP
7389 7389 setattr(self.dataOut, 'dataLag_cspc', getattr(data_inputs[0], attr_data_2)) #DP
7390 7390 setattr(self.dataOut, 'dataLag_spc_LP', getattr(data_inputs[1], attr_data_3)) #LP
7391 7391 #setattr(self.dataOut, 'dataLag_cspc_LP', getattr(data_inputs[1], attr_data_4)) #LP
7392 7392 #setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP
7393 7393 setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP
7394 7394 #print("Merge data_acf: ",self.dataOut.data_acf.shape)
7395 7395
7396 7396
7397 7397 #self.dataOut.nIncohInt_LP = 128
7398 7398 #self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP
7399 7399 self.dataOut.nProfiles_LP = 16#28#self.dataOut.nIncohInt_LP
7400 7400 self.dataOut.nProfiles_LP = self.dataOut.data_acf.shape[1]#28#self.dataOut.nIncohInt_LP
7401 7401 self.dataOut.NSCAN = 128
7402 7402 self.dataOut.nIncohInt_LP = self.dataOut.nIncohInt*self.dataOut.NSCAN
7403 7403 #print("sahpi",self.dataOut.nIncohInt_LP)
7404 7404 #exit(1)
7405 7405 self.dataOut.NLAG = 16
7406 7406 self.dataOut.NLAG = self.dataOut.data_acf.shape[1]
7407 7407 self.dataOut.NRANGE = self.dataOut.data_acf.shape[-1]
7408 7408
7409 7409 #print(numpy.shape(self.dataOut.data_spc))
7410 7410
7411 7411 #exit(1)
7412 7412 if mode==5:
7413 7413 data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs])
7414 7414 setattr(self.dataOut, attr_data, data)
7415 7415 data = numpy.concatenate([getattr(data, attr_data_2) for data in data_inputs])
7416 7416 setattr(self.dataOut, attr_data_2, data)
7417 7417
7418 7418 if mode==6: #Hybrid Spectra-Voltage
7419 7419 #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1)
7420 7420 #setattr(self.dataOut, attr_data, data)
7421 7421 setattr(self.dataOut, 'dataLag_spc', getattr(data_inputs[1], attr_data)) #DP
7422 7422 setattr(self.dataOut, 'dataLag_cspc', getattr(data_inputs[1], attr_data_2)) #DP
7423 7423 setattr(self.dataOut, 'output_LP_integrated', getattr(data_inputs[0], attr_data_3)) #LP
7424 7424 #setattr(self.dataOut, 'dataLag_cspc_LP', getattr(data_inputs[1], attr_data_4)) #LP
7425 7425 #setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP
7426 7426 #setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP
7427 7427 #print("Merge data_acf: ",self.dataOut.data_acf.shape)
7428 7428 #print(self.dataOut.NSCAN)
7429 7429 self.dataOut.nIncohInt = int(self.dataOut.NAVG * self.dataOut.nint)
7430 7430 #print(self.dataOut.dataLag_spc.shape)
7431 7431 self.dataOut.nProfiles = self.dataOut.nProfiles_DP = self.dataOut.dataLag_spc.shape[1]
7432 7432 '''
7433 7433 #self.dataOut.nIncohInt_LP = 128
7434 7434 #self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP
7435 7435 self.dataOut.nProfiles_LP = 16#28#self.dataOut.nIncohInt_LP
7436 7436 self.dataOut.nProfiles_LP = self.dataOut.data_acf.shape[1]#28#self.dataOut.nIncohInt_LP
7437 7437 self.dataOut.NSCAN = 128
7438 7438 self.dataOut.nIncohInt_LP = self.dataOut.nIncohInt*self.dataOut.NSCAN
7439 7439 #print("sahpi",self.dataOut.nIncohInt_LP)
7440 7440 #exit(1)
7441 7441 self.dataOut.NLAG = 16
7442 7442 self.dataOut.NLAG = self.dataOut.data_acf.shape[1]
7443 7443 self.dataOut.NRANGE = self.dataOut.data_acf.shape[-1]
7444 7444 '''
7445 7445 #print(numpy.shape(self.dataOut.data_spc))
7446 7446 #print("*************************GOOD*************************")
7447 7447 #exit(1)
7448 7448
7449 7449 if mode==11: #MST ISR
7450 7450 #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1)
7451 7451 #setattr(self.dataOut, attr_data, data)
7452 7452 #setattr(self.dataOut, 'ph2', [getattr(data, attr_data) for data in data_inputs][1])
7453 7453 #setattr(self.dataOut, 'dphi', [getattr(data, attr_data_2) for data in data_inputs][1])
7454 7454 #setattr(self.dataOut, 'sdp2', [getattr(data, attr_data_3) for data in data_inputs][1])
7455 7455
7456 7456 setattr(self.dataOut, 'ph2', getattr(data_inputs[1], attr_data)) #DP
7457 7457 setattr(self.dataOut, 'dphi', getattr(data_inputs[1], attr_data_2)) #DP
7458 7458 setattr(self.dataOut, 'sdp2', getattr(data_inputs[1], attr_data_3)) #DP
7459 7459
7460 7460 print("MST Density", numpy.shape(self.dataOut.ph2))
7461 7461 print("cf MST: ", self.dataOut.cf)
7462 7462 #exit(1)
7463 7463 #print("MST Density", self.dataOut.ph2[116:283])
7464 7464 print("MST Density", self.dataOut.ph2[80:120])
7465 7465 print("MST dPhi", self.dataOut.dphi[80:120])
7466 7466 self.dataOut.ph2 *= self.dataOut.cf#0.0008136899
7467 7467 #print("MST Density", self.dataOut.ph2[116:283])
7468 7468 self.dataOut.sdp2 *= 0#self.dataOut.cf#0.0008136899
7469 7469 #print("MST Density", self.dataOut.ph2[116:283])
7470 7470 print("MST Density", self.dataOut.ph2[80:120])
7471 7471 self.dataOut.NSHTS = int(numpy.shape(self.dataOut.ph2)[0])
7472 7472 dH = self.dataOut.heightList[1]-self.dataOut.heightList[0]
7473 7473 dH /= self.dataOut.windowOfFilter
7474 7474 self.dataOut.heightList = numpy.arange(0,self.dataOut.NSHTS)*dH + dH
7475 7475 #print("heightList: ", self.dataOut.heightList)
7476 7476 self.dataOut.NDP = self.dataOut.NSHTS
7477 7477 #exit(1)
7478 7478 #print(self.dataOut.heightList)
7479 7479
7480 7480 class MST_Den_Conv(Operation):
7481 7481 '''
7482 7482 Written by R. Flores
7483 7483 '''
7484 7484 """Operation to calculate Geomagnetic parameters.
7485 7485
7486 7486 Parameters:
7487 7487 -----------
7488 7488 None
7489 7489
7490 7490 Example
7491 7491 --------
7492 7492
7493 7493 op = proc_unit.addOperation(name='MST_Den_Conv', optype='other')
7494 7494
7495 7495 """
7496 7496
7497 7497 def __init__(self, **kwargs):
7498 7498
7499 7499 Operation.__init__(self, **kwargs)
7500 7500
7501 7501 def run(self,dataOut):
7502 7502
7503 7503 dataOut.PowDen = numpy.zeros((1,dataOut.NDP))
7504 7504 dataOut.PowDen[0] = numpy.copy(dataOut.ph2[:dataOut.NDP])
7505 7505
7506 7506 dataOut.FarDen = numpy.zeros((1,dataOut.NDP))
7507 7507 dataOut.FarDen[0] = numpy.copy(dataOut.dphi[:dataOut.NDP])
7508 7508 print("pow den shape", numpy.shape(dataOut.PowDen))
7509 7509 print("far den shape", numpy.shape(dataOut.FarDen))
7510 7510 return dataOut
General Comments 0
You need to be logged in to leave comments. Login now