##// END OF EJS Templates
Drifts proccesing for some heights of interest. Some parameters are needded for madrigal writting: lat, lon. NAN vectors are not written to avoid errors at madrigal view.
imanay -
r1690:571f81d70cd0
parent child
Show More
@@ -1,5597 +1,5583
1 1 import numpy
2 2 import math
3 3 from scipy import optimize, interpolate, signal, stats, ndimage
4 4 from scipy.fftpack import fft
5 5 import scipy
6 6 import re
7 7 import datetime
8 8 import copy
9 9 import sys
10 10 import importlib
11 11 import itertools
12 12 from multiprocessing import Pool, TimeoutError
13 13 from multiprocessing.pool import ThreadPool
14 14 import time
15 15
16 16 from scipy.optimize import fmin_l_bfgs_b #optimize with bounds on state papameters
17 17 from .jroproc_base import ProcessingUnit, Operation, MPDecorator
18 18 from schainpy.model.data.jrodata import Parameters, hildebrand_sekhon
19 19 from scipy import asarray as ar,exp
20 20 from scipy.optimize import fmin, curve_fit
21 21 from schainpy.utils import log
22 22 import warnings
23 23 from numpy import NaN
24 24 from scipy.optimize.optimize import OptimizeWarning
25 25 warnings.filterwarnings('ignore')
26 26
27 27
28 28 SPEED_OF_LIGHT = 299792458
29 29
30 30 '''solving pickling issue'''
31 31
32 32 def _pickle_method(method):
33 33 func_name = method.__func__.__name__
34 34 obj = method.__self__
35 35 cls = method.__self__.__class__
36 36 return _unpickle_method, (func_name, obj, cls)
37 37
38 38 def _unpickle_method(func_name, obj, cls):
39 39 for cls in cls.mro():
40 40 try:
41 41 func = cls.__dict__[func_name]
42 42 except KeyError:
43 43 pass
44 44 else:
45 45 break
46 46 return func.__get__(obj, cls)
47 47
48 48 # @MPDecorator
49 49 class ParametersProc(ProcessingUnit):
50 50
51 51 METHODS = {}
52 52 nSeconds = None
53 53
54 54 def __init__(self):
55 55 ProcessingUnit.__init__(self)
56 56
57 57 self.buffer = None
58 58 self.firstdatatime = None
59 59 self.profIndex = 0
60 60 self.dataOut = Parameters()
61 61 self.setupReq = False #Agregar a todas las unidades de proc
62 62
63 63 def __updateObjFromInput(self):
64 64
65 65 self.dataOut.inputUnit = self.dataIn.type
66 66
67 67 self.dataOut.timeZone = self.dataIn.timeZone
68 68 self.dataOut.dstFlag = self.dataIn.dstFlag
69 69 self.dataOut.errorCount = self.dataIn.errorCount
70 70 self.dataOut.useLocalTime = self.dataIn.useLocalTime
71 71
72 72 self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy()
73 73 self.dataOut.systemHeaderObj = self.dataIn.systemHeaderObj.copy()
74 74 self.dataOut.channelList = self.dataIn.channelList
75 75 self.dataOut.heightList = self.dataIn.heightList
76 76 self.dataOut.dtype = numpy.dtype([('real','<f4'),('imag','<f4')])
77 77 # self.dataOut.nHeights = self.dataIn.nHeights
78 78 # self.dataOut.nChannels = self.dataIn.nChannels
79 79 # self.dataOut.nBaud = self.dataIn.nBaud
80 80 # self.dataOut.nCode = self.dataIn.nCode
81 81 # self.dataOut.code = self.dataIn.code
82 82 # self.dataOut.nProfiles = self.dataOut.nFFTPoints
83 83 self.dataOut.flagDiscontinuousBlock = self.dataIn.flagDiscontinuousBlock
84 84 # self.dataOut.utctime = self.firstdatatime
85 85 self.dataOut.utctime = self.dataIn.utctime
86 86 self.dataOut.flagDecodeData = self.dataIn.flagDecodeData #asumo q la data esta decodificada
87 87 self.dataOut.flagDeflipData = self.dataIn.flagDeflipData #asumo q la data esta sin flip
88 88 self.dataOut.nCohInt = self.dataIn.nCohInt
89 89 # self.dataOut.nIncohInt = 1
90 90 # self.dataOut.ippSeconds = self.dataIn.ippSeconds
91 91 # self.dataOut.windowOfFilter = self.dataIn.windowOfFilter
92 92 self.dataOut.timeInterval1 = self.dataIn.timeInterval
93 93 self.dataOut.heightList = self.dataIn.heightList
94 94 self.dataOut.frequency = self.dataIn.frequency
95 95 #self.dataOut.noise = self.dataIn.noise
96 96
97 97 def run(self):
98 98
99 99 #---------------------- Voltage Data ---------------------------
100 100
101 101 if self.dataIn.type == "Voltage":
102 102
103 103 self.__updateObjFromInput()
104 104 self.dataOut.data_pre = self.dataIn.data.copy()
105 105 self.dataOut.flagNoData = False
106 106 self.dataOut.utctimeInit = self.dataIn.utctime
107 107 self.dataOut.paramInterval = self.dataIn.nProfiles*self.dataIn.nCohInt*self.dataIn.ippSeconds
108 108 if hasattr(self.dataIn, 'dataPP_POW'):
109 109 self.dataOut.dataPP_POW = self.dataIn.dataPP_POW
110 110
111 111 if hasattr(self.dataIn, 'dataPP_POWER'):
112 112 self.dataOut.dataPP_POWER = self.dataIn.dataPP_POWER
113 113
114 114 if hasattr(self.dataIn, 'dataPP_DOP'):
115 115 self.dataOut.dataPP_DOP = self.dataIn.dataPP_DOP
116 116
117 117 if hasattr(self.dataIn, 'dataPP_SNR'):
118 118 self.dataOut.dataPP_SNR = self.dataIn.dataPP_SNR
119 119
120 120 if hasattr(self.dataIn, 'dataPP_WIDTH'):
121 121 self.dataOut.dataPP_WIDTH = self.dataIn.dataPP_WIDTH
122 122 return
123 123
124 124 #---------------------- Spectra Data ---------------------------
125 125
126 126 if self.dataIn.type == "Spectra":
127 127
128 128 self.dataOut.data_pre = [self.dataIn.data_spc, self.dataIn.data_cspc]
129 129 self.dataOut.data_spc = self.dataIn.data_spc
130 130 self.dataOut.data_cspc = self.dataIn.data_cspc
131 131 self.dataOut.nProfiles = self.dataIn.nProfiles
132 132 self.dataOut.nIncohInt = self.dataIn.nIncohInt
133 133 self.dataOut.nFFTPoints = self.dataIn.nFFTPoints
134 134 self.dataOut.ippFactor = self.dataIn.ippFactor
135 135 self.dataOut.abscissaList = self.dataIn.getVelRange(1)
136 136 self.dataOut.spc_noise = self.dataIn.getNoise()
137 137 self.dataOut.spc_range = (self.dataIn.getFreqRange(1) , self.dataIn.getAcfRange(1) , self.dataIn.getVelRange(1))
138 138 # self.dataOut.normFactor = self.dataIn.normFactor
139 139 self.dataOut.pairsList = self.dataIn.pairsList
140 140 self.dataOut.groupList = self.dataIn.pairsList
141 141 self.dataOut.flagNoData = False
142 142
143 143 if hasattr(self.dataIn, 'ChanDist'): #Distances of receiver channels
144 144 self.dataOut.ChanDist = self.dataIn.ChanDist
145 145 else: self.dataOut.ChanDist = None
146 146
147 147 #if hasattr(self.dataIn, 'VelRange'): #Velocities range
148 148 # self.dataOut.VelRange = self.dataIn.VelRange
149 149 #else: self.dataOut.VelRange = None
150 150
151 151 if hasattr(self.dataIn, 'RadarConst'): #Radar Constant
152 152 self.dataOut.RadarConst = self.dataIn.RadarConst
153 153
154 154 if hasattr(self.dataIn, 'NPW'): #NPW
155 155 self.dataOut.NPW = self.dataIn.NPW
156 156
157 157 if hasattr(self.dataIn, 'COFA'): #COFA
158 158 self.dataOut.COFA = self.dataIn.COFA
159 159
160 160
161 161
162 162 #---------------------- Correlation Data ---------------------------
163 163
164 164 if self.dataIn.type == "Correlation":
165 165 acf_ind, ccf_ind, acf_pairs, ccf_pairs, data_acf, data_ccf = self.dataIn.splitFunctions()
166 166
167 167 self.dataOut.data_pre = (self.dataIn.data_cf[acf_ind,:], self.dataIn.data_cf[ccf_ind,:,:])
168 168 self.dataOut.normFactor = (self.dataIn.normFactor[acf_ind,:], self.dataIn.normFactor[ccf_ind,:])
169 169 self.dataOut.groupList = (acf_pairs, ccf_pairs)
170 170
171 171 self.dataOut.abscissaList = self.dataIn.lagRange
172 172 self.dataOut.noise = self.dataIn.noise
173 173 self.dataOut.data_snr = self.dataIn.SNR
174 174 self.dataOut.flagNoData = False
175 175 self.dataOut.nAvg = self.dataIn.nAvg
176 176
177 177 #---------------------- Parameters Data ---------------------------
178 178
179 179 if self.dataIn.type == "Parameters":
180 180 self.dataOut.copy(self.dataIn)
181 181 self.dataOut.flagNoData = False
182 182
183 183 return True
184 184
185 185 self.__updateObjFromInput()
186 186 self.dataOut.utctimeInit = self.dataIn.utctime
187 187 self.dataOut.paramInterval = self.dataIn.timeInterval
188 188
189 189 return
190 190
191 191
192 192 def target(tups):
193 193
194 194 obj, args = tups
195 195
196 196 return obj.FitGau(args)
197 197
198 198 class RemoveWideGC(Operation):
199 199 ''' This class remove the wide clutter and replace it with a simple interpolation points
200 200 This mainly applies to CLAIRE radar
201 201
202 202 ClutterWidth : Width to look for the clutter peak
203 203
204 204 Input:
205 205
206 206 self.dataOut.data_pre : SPC and CSPC
207 207 self.dataOut.spc_range : To select wind and rainfall velocities
208 208
209 209 Affected:
210 210
211 211 self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
212 212
213 213 Written by D. ScipiΓ³n 25.02.2021
214 214 '''
215 215 def __init__(self):
216 216 Operation.__init__(self)
217 217 self.i = 0
218 218 self.ich = 0
219 219 self.ir = 0
220 220
221 221 def run(self, dataOut, ClutterWidth=2.5):
222 222
223 223 self.spc = dataOut.data_pre[0].copy()
224 224 self.spc_out = dataOut.data_pre[0].copy()
225 225 self.Num_Chn = self.spc.shape[0]
226 226 self.Num_Hei = self.spc.shape[2]
227 227 VelRange = dataOut.spc_range[2][:-1]
228 228 dv = VelRange[1]-VelRange[0]
229 229
230 230 # Find the velocities that corresponds to zero
231 231 gc_values = numpy.squeeze(numpy.where(numpy.abs(VelRange) <= ClutterWidth))
232 232
233 233 # Removing novalid data from the spectra
234 234 for ich in range(self.Num_Chn) :
235 235 for ir in range(self.Num_Hei) :
236 236 # Estimate the noise at each range
237 237 HSn = hildebrand_sekhon(self.spc[ich,:,ir],dataOut.nIncohInt)
238 238
239 239 # Removing the noise floor at each range
240 240 novalid = numpy.where(self.spc[ich,:,ir] < HSn)
241 241 self.spc[ich,novalid,ir] = HSn
242 242
243 243 junk = numpy.append(numpy.insert(numpy.squeeze(self.spc[ich,gc_values,ir]),0,HSn),HSn)
244 244 j1index = numpy.squeeze(numpy.where(numpy.diff(junk)>0))
245 245 j2index = numpy.squeeze(numpy.where(numpy.diff(junk)<0))
246 246 if ((numpy.size(j1index)<=1) | (numpy.size(j2index)<=1)) :
247 247 continue
248 248 junk3 = numpy.squeeze(numpy.diff(j1index))
249 249 junk4 = numpy.squeeze(numpy.diff(j2index))
250 250
251 251 valleyindex = j2index[numpy.where(junk4>1)]
252 252 peakindex = j1index[numpy.where(junk3>1)]
253 253
254 254 isvalid = numpy.squeeze(numpy.where(numpy.abs(VelRange[gc_values[peakindex]]) <= 2.5*dv))
255 255 if numpy.size(isvalid) == 0 :
256 256 continue
257 257 if numpy.size(isvalid) >1 :
258 258 vindex = numpy.argmax(self.spc[ich,gc_values[peakindex[isvalid]],ir])
259 259 isvalid = isvalid[vindex]
260 260
261 261 # clutter peak
262 262 gcpeak = peakindex[isvalid]
263 263 vl = numpy.where(valleyindex < gcpeak)
264 264 if numpy.size(vl) == 0:
265 265 continue
266 266 gcvl = valleyindex[vl[0][-1]]
267 267 vr = numpy.where(valleyindex > gcpeak)
268 268 if numpy.size(vr) == 0:
269 269 continue
270 270 gcvr = valleyindex[vr[0][0]]
271 271
272 272 # Removing the clutter
273 273 interpindex = numpy.array([gc_values[gcvl], gc_values[gcvr]])
274 274 gcindex = gc_values[gcvl+1:gcvr-1]
275 275 self.spc_out[ich,gcindex,ir] = numpy.interp(VelRange[gcindex],VelRange[interpindex],self.spc[ich,interpindex,ir])
276 276
277 277 dataOut.data_pre[0] = self.spc_out
278 278
279 279 return dataOut
280 280
281 281 class SpectralFilters(Operation):
282 282 ''' This class allows to replace the novalid values with noise for each channel
283 283 This applies to CLAIRE RADAR
284 284
285 285 PositiveLimit : RightLimit of novalid data
286 286 NegativeLimit : LeftLimit of novalid data
287 287
288 288 Input:
289 289
290 290 self.dataOut.data_pre : SPC and CSPC
291 291 self.dataOut.spc_range : To select wind and rainfall velocities
292 292
293 293 Affected:
294 294
295 295 self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
296 296
297 297 Written by D. ScipiΓ³n 29.01.2021
298 298 '''
299 299 def __init__(self):
300 300 Operation.__init__(self)
301 301 self.i = 0
302 302
303 303 def run(self, dataOut, ):
304 304
305 305 self.spc = dataOut.data_pre[0].copy()
306 306 self.Num_Chn = self.spc.shape[0]
307 307 VelRange = dataOut.spc_range[2]
308 308
309 309 # novalid corresponds to data within the Negative and PositiveLimit
310 310
311 311
312 312 # Removing novalid data from the spectra
313 313 for i in range(self.Num_Chn):
314 314 self.spc[i,novalid,:] = dataOut.noise[i]
315 315 dataOut.data_pre[0] = self.spc
316 316 return dataOut
317 317
318 318
319 319
320 320 class GaussianFit(Operation):
321 321
322 322 '''
323 323 Function that fit of one and two generalized gaussians (gg) based
324 324 on the PSD shape across an "power band" identified from a cumsum of
325 325 the measured spectrum - noise.
326 326
327 327 Input:
328 328 self.dataOut.data_pre : SelfSpectra
329 329
330 330 Output:
331 331 self.dataOut.SPCparam : SPC_ch1, SPC_ch2
332 332
333 333 '''
334 334 def __init__(self):
335 335 Operation.__init__(self)
336 336 self.i=0
337 337
338 338
339 339 # def run(self, dataOut, num_intg=7, pnoise=1., SNRlimit=-9): #num_intg: Incoherent integrations, pnoise: Noise, vel_arr: range of velocities, similar to the ftt points
340 340 def run(self, dataOut, SNRdBlimit=-9, method='generalized'):
341 341 """This routine will find a couple of generalized Gaussians to a power spectrum
342 342 methods: generalized, squared
343 343 input: spc
344 344 output:
345 345 noise, amplitude0,shift0,width0,p0,Amplitude1,shift1,width1,p1
346 346 """
347 347 print ('Entering ',method,' double Gaussian fit')
348 348 self.spc = dataOut.data_pre[0].copy()
349 349 self.Num_Hei = self.spc.shape[2]
350 350 self.Num_Bin = self.spc.shape[1]
351 351 self.Num_Chn = self.spc.shape[0]
352 352
353 353 start_time = time.time()
354 354
355 355 pool = Pool(processes=self.Num_Chn)
356 356 args = [(dataOut.spc_range[2], ich, dataOut.spc_noise[ich], dataOut.nIncohInt, SNRdBlimit) for ich in range(self.Num_Chn)]
357 357 objs = [self for __ in range(self.Num_Chn)]
358 358 attrs = list(zip(objs, args))
359 359 DGauFitParam = pool.map(target, attrs)
360 360 # Parameters:
361 361 # 0. Noise, 1. Amplitude, 2. Shift, 3. Width 4. Power
362 362 dataOut.DGauFitParams = numpy.asarray(DGauFitParam)
363 363
364 364 # Double Gaussian Curves
365 365 gau0 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
366 366 gau0[:] = numpy.NaN
367 367 gau1 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
368 368 gau1[:] = numpy.NaN
369 369 x_mtr = numpy.transpose(numpy.tile(dataOut.getVelRange(1)[:-1], (self.Num_Hei,1)))
370 370 for iCh in range(self.Num_Chn):
371 371 N0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,0]] * self.Num_Bin))
372 372 N1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,1]] * self.Num_Bin))
373 373 A0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,0]] * self.Num_Bin))
374 374 A1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,1]] * self.Num_Bin))
375 375 v0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,0]] * self.Num_Bin))
376 376 v1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,1]] * self.Num_Bin))
377 377 s0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,0]] * self.Num_Bin))
378 378 s1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,1]] * self.Num_Bin))
379 379 if method == 'generalized':
380 380 p0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,0]] * self.Num_Bin))
381 381 p1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,1]] * self.Num_Bin))
382 382 elif method == 'squared':
383 383 p0 = 2.
384 384 p1 = 2.
385 385 gau0[iCh] = A0*numpy.exp(-0.5*numpy.abs((x_mtr-v0)/s0)**p0)+N0
386 386 gau1[iCh] = A1*numpy.exp(-0.5*numpy.abs((x_mtr-v1)/s1)**p1)+N1
387 387 dataOut.GaussFit0 = gau0
388 388 dataOut.GaussFit1 = gau1
389 389
390 390 print('Leaving ',method ,' double Gaussian fit')
391 391 return dataOut
392 392
393 393 def FitGau(self, X):
394 394 # print('Entering FitGau')
395 395 # Assigning the variables
396 396 Vrange, ch, wnoise, num_intg, SNRlimit = X
397 397 # Noise Limits
398 398 noisebl = wnoise * 0.9
399 399 noisebh = wnoise * 1.1
400 400 # Radar Velocity
401 401 Va = max(Vrange)
402 402 deltav = Vrange[1] - Vrange[0]
403 403 x = numpy.arange(self.Num_Bin)
404 404
405 405 # print ('stop 0')
406 406
407 407 # 5 parameters, 2 Gaussians
408 408 DGauFitParam = numpy.zeros([5, self.Num_Hei,2])
409 409 DGauFitParam[:] = numpy.NaN
410 410
411 411 # SPCparam = []
412 412 # SPC_ch1 = numpy.zeros([self.Num_Bin,self.Num_Hei])
413 413 # SPC_ch2 = numpy.zeros([self.Num_Bin,self.Num_Hei])
414 414 # SPC_ch1[:] = 0 #numpy.NaN
415 415 # SPC_ch2[:] = 0 #numpy.NaN
416 416 # print ('stop 1')
417 417 for ht in range(self.Num_Hei):
418 418 # print (ht)
419 419 # print ('stop 2')
420 420 # Spectra at each range
421 421 spc = numpy.asarray(self.spc)[ch,:,ht]
422 422 snr = ( spc.mean() - wnoise ) / wnoise
423 423 snrdB = 10.*numpy.log10(snr)
424 424
425 425 #print ('stop 3')
426 426 if snrdB < SNRlimit :
427 427 # snr = numpy.NaN
428 428 # SPC_ch1[:,ht] = 0#numpy.NaN
429 429 # SPC_ch1[:,ht] = 0#numpy.NaN
430 430 # SPCparam = (SPC_ch1,SPC_ch2)
431 431 # print ('SNR less than SNRth')
432 432 continue
433 433 # wnoise = hildebrand_sekhon(spc,num_intg)
434 434 # print ('stop 2.01')
435 435 #############################################
436 436 # normalizing spc and noise
437 437 # This part differs from gg1
438 438 # spc_norm_max = max(spc) #commented by D. ScipiΓ³n 19.03.2021
439 439 #spc = spc / spc_norm_max
440 440 # pnoise = pnoise #/ spc_norm_max #commented by D. ScipiΓ³n 19.03.2021
441 441 #############################################
442 442
443 443 # print ('stop 2.1')
444 444 fatspectra=1.0
445 445 # noise per channel.... we might want to use the noise at each range
446 446
447 447 # wnoise = noise_ #/ spc_norm_max #commented by D. ScipiΓ³n 19.03.2021
448 448 #wnoise,stdv,i_max,index =enoise(spc,num_intg) #noise estimate using Hildebrand Sekhon, only wnoise is used
449 449 #if wnoise>1.1*pnoise: # to be tested later
450 450 # wnoise=pnoise
451 451 # noisebl = wnoise*0.9
452 452 # noisebh = wnoise*1.1
453 453 spc = spc - wnoise # signal
454 454
455 455 # print ('stop 2.2')
456 456 minx = numpy.argmin(spc)
457 457 #spcs=spc.copy()
458 458 spcs = numpy.roll(spc,-minx)
459 459 cum = numpy.cumsum(spcs)
460 460 # tot_noise = wnoise * self.Num_Bin #64;
461 461
462 462 # print ('stop 2.3')
463 463 # snr = sum(spcs) / tot_noise
464 464 # snrdB = 10.*numpy.log10(snr)
465 465 #print ('stop 3')
466 466 # if snrdB < SNRlimit :
467 467 # snr = numpy.NaN
468 468 # SPC_ch1[:,ht] = 0#numpy.NaN
469 469 # SPC_ch1[:,ht] = 0#numpy.NaN
470 470 # SPCparam = (SPC_ch1,SPC_ch2)
471 471 # print ('SNR less than SNRth')
472 472 # continue
473 473
474 474
475 475 #if snrdB<-18 or numpy.isnan(snrdB) or num_intg<4:
476 476 # return [None,]*4,[None,]*4,None,snrdB,None,None,[None,]*5,[None,]*9,None
477 477 # print ('stop 4')
478 478 cummax = max(cum)
479 479 epsi = 0.08 * fatspectra # cumsum to narrow down the energy region
480 480 cumlo = cummax * epsi
481 481 cumhi = cummax * (1-epsi)
482 482 powerindex = numpy.array(numpy.where(numpy.logical_and(cum>cumlo, cum<cumhi))[0])
483 483
484 484 # print ('stop 5')
485 485 if len(powerindex) < 1:# case for powerindex 0
486 486 # print ('powerindex < 1')
487 487 continue
488 488 powerlo = powerindex[0]
489 489 powerhi = powerindex[-1]
490 490 powerwidth = powerhi-powerlo
491 491 if powerwidth <= 1:
492 492 # print('powerwidth <= 1')
493 493 continue
494 494
495 495 # print ('stop 6')
496 496 firstpeak = powerlo + powerwidth/10.# first gaussian energy location
497 497 secondpeak = powerhi - powerwidth/10. #second gaussian energy location
498 498 midpeak = (firstpeak + secondpeak)/2.
499 499 firstamp = spcs[int(firstpeak)]
500 500 secondamp = spcs[int(secondpeak)]
501 501 midamp = spcs[int(midpeak)]
502 502
503 503 y_data = spc + wnoise
504 504
505 505 ''' single Gaussian '''
506 506 shift0 = numpy.mod(midpeak+minx, self.Num_Bin )
507 507 width0 = powerwidth/4.#Initialization entire power of spectrum divided by 4
508 508 power0 = 2.
509 509 amplitude0 = midamp
510 510 state0 = [shift0,width0,amplitude0,power0,wnoise]
511 511 bnds = ((0,self.Num_Bin-1),(1,powerwidth),(0,None),(0.5,3.),(noisebl,noisebh))
512 512 lsq1 = fmin_l_bfgs_b(self.misfit1, state0, args=(y_data,x,num_intg), bounds=bnds, approx_grad=True)
513 513 # print ('stop 7.1')
514 514 # print (bnds)
515 515
516 516 chiSq1=lsq1[1]
517 517
518 518 # print ('stop 8')
519 519 if fatspectra<1.0 and powerwidth<4:
520 520 choice=0
521 521 Amplitude0=lsq1[0][2]
522 522 shift0=lsq1[0][0]
523 523 width0=lsq1[0][1]
524 524 p0=lsq1[0][3]
525 525 Amplitude1=0.
526 526 shift1=0.
527 527 width1=0.
528 528 p1=0.
529 529 noise=lsq1[0][4]
530 530 #return (numpy.array([shift0,width0,Amplitude0,p0]),
531 531 # numpy.array([shift1,width1,Amplitude1,p1]),noise,snrdB,chiSq1,6.,sigmas1,[None,]*9,choice)
532 532 # print ('stop 9')
533 533 ''' two Gaussians '''
534 534 #shift0=numpy.mod(firstpeak+minx,64); shift1=numpy.mod(secondpeak+minx,64)
535 535 shift0 = numpy.mod(firstpeak+minx, self.Num_Bin )
536 536 shift1 = numpy.mod(secondpeak+minx, self.Num_Bin )
537 537 width0 = powerwidth/6.
538 538 width1 = width0
539 539 power0 = 2.
540 540 power1 = power0
541 541 amplitude0 = firstamp
542 542 amplitude1 = secondamp
543 543 state0 = [shift0,width0,amplitude0,power0,shift1,width1,amplitude1,power1,wnoise]
544 544 #bnds=((0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
545 545 bnds=((0,self.Num_Bin-1),(1,powerwidth/2.),(0,None),(0.5,3.),(0,self.Num_Bin-1),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
546 546 #bnds=(( 0,(self.Num_Bin-1) ),(1,powerwidth/2.),(0,None),(0.5,3.),( 0,(self.Num_Bin-1)),(1,powerwidth/2.),(0,None),(0.5,3.),(0.1,0.5))
547 547
548 548 # print ('stop 10')
549 549 lsq2 = fmin_l_bfgs_b( self.misfit2 , state0 , args=(y_data,x,num_intg) , bounds=bnds , approx_grad=True )
550 550
551 551 # print ('stop 11')
552 552 chiSq2 = lsq2[1]
553 553
554 554 # print ('stop 12')
555 555
556 556 oneG = (chiSq1<5 and chiSq1/chiSq2<2.0) and (abs(lsq2[0][0]-lsq2[0][4])<(lsq2[0][1]+lsq2[0][5])/3. or abs(lsq2[0][0]-lsq2[0][4])<10)
557 557
558 558 # print ('stop 13')
559 559 if snrdB>-12: # when SNR is strong pick the peak with least shift (LOS velocity) error
560 560 if oneG:
561 561 choice = 0
562 562 else:
563 563 w1 = lsq2[0][1]; w2 = lsq2[0][5]
564 564 a1 = lsq2[0][2]; a2 = lsq2[0][6]
565 565 p1 = lsq2[0][3]; p2 = lsq2[0][7]
566 566 s1 = (2**(1+1./p1))*scipy.special.gamma(1./p1)/p1
567 567 s2 = (2**(1+1./p2))*scipy.special.gamma(1./p2)/p2
568 568 gp1 = a1*w1*s1; gp2 = a2*w2*s2 # power content of each ggaussian with proper p scaling
569 569
570 570 if gp1>gp2:
571 571 if a1>0.7*a2:
572 572 choice = 1
573 573 else:
574 574 choice = 2
575 575 elif gp2>gp1:
576 576 if a2>0.7*a1:
577 577 choice = 2
578 578 else:
579 579 choice = 1
580 580 else:
581 581 choice = numpy.argmax([a1,a2])+1
582 582 #else:
583 583 #choice=argmin([std2a,std2b])+1
584 584
585 585 else: # with low SNR go to the most energetic peak
586 586 choice = numpy.argmax([lsq1[0][2]*lsq1[0][1],lsq2[0][2]*lsq2[0][1],lsq2[0][6]*lsq2[0][5]])
587 587
588 588 # print ('stop 14')
589 589 shift0 = lsq2[0][0]
590 590 vel0 = Vrange[0] + shift0 * deltav
591 591 shift1 = lsq2[0][4]
592 592 # vel1=Vrange[0] + shift1 * deltav
593 593
594 594 # max_vel = 1.0
595 595 # Va = max(Vrange)
596 596 # deltav = Vrange[1]-Vrange[0]
597 597 # print ('stop 15')
598 598 #first peak will be 0, second peak will be 1
599 599 # if vel0 > -1.0 and vel0 < max_vel : #first peak is in the correct range # Commented by D.ScipiΓ³n 19.03.2021
600 600 if vel0 > -Va and vel0 < Va : #first peak is in the correct range
601 601 shift0 = lsq2[0][0]
602 602 width0 = lsq2[0][1]
603 603 Amplitude0 = lsq2[0][2]
604 604 p0 = lsq2[0][3]
605 605
606 606 shift1 = lsq2[0][4]
607 607 width1 = lsq2[0][5]
608 608 Amplitude1 = lsq2[0][6]
609 609 p1 = lsq2[0][7]
610 610 noise = lsq2[0][8]
611 611 else:
612 612 shift1 = lsq2[0][0]
613 613 width1 = lsq2[0][1]
614 614 Amplitude1 = lsq2[0][2]
615 615 p1 = lsq2[0][3]
616 616
617 617 shift0 = lsq2[0][4]
618 618 width0 = lsq2[0][5]
619 619 Amplitude0 = lsq2[0][6]
620 620 p0 = lsq2[0][7]
621 621 noise = lsq2[0][8]
622 622
623 623 if Amplitude0<0.05: # in case the peak is noise
624 624 shift0,width0,Amplitude0,p0 = 4*[numpy.NaN]
625 625 if Amplitude1<0.05:
626 626 shift1,width1,Amplitude1,p1 = 4*[numpy.NaN]
627 627
628 628 # print ('stop 16 ')
629 629 # SPC_ch1[:,ht] = noise + Amplitude0*numpy.exp(-0.5*(abs(x-shift0)/width0)**p0)
630 630 # SPC_ch2[:,ht] = noise + Amplitude1*numpy.exp(-0.5*(abs(x-shift1)/width1)**p1)
631 631 # SPCparam = (SPC_ch1,SPC_ch2)
632 632
633 633 DGauFitParam[0,ht,0] = noise
634 634 DGauFitParam[0,ht,1] = noise
635 635 DGauFitParam[1,ht,0] = Amplitude0
636 636 DGauFitParam[1,ht,1] = Amplitude1
637 637 DGauFitParam[2,ht,0] = Vrange[0] + shift0 * deltav
638 638 DGauFitParam[2,ht,1] = Vrange[0] + shift1 * deltav
639 639 DGauFitParam[3,ht,0] = width0 * deltav
640 640 DGauFitParam[3,ht,1] = width1 * deltav
641 641 DGauFitParam[4,ht,0] = p0
642 642 DGauFitParam[4,ht,1] = p1
643 643
644 644 return DGauFitParam
645 645
646 646 def y_model1(self,x,state):
647 647 shift0, width0, amplitude0, power0, noise = state
648 648 model0 = amplitude0*numpy.exp(-0.5*abs((x - shift0)/width0)**power0)
649 649 model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0)
650 650 model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0)
651 651 return model0 + model0u + model0d + noise
652 652
653 653 def y_model2(self,x,state): #Equation for two generalized Gaussians with Nyquist
654 654 shift0, width0, amplitude0, power0, shift1, width1, amplitude1, power1, noise = state
655 655 model0 = amplitude0*numpy.exp(-0.5*abs((x-shift0)/width0)**power0)
656 656 model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0)
657 657 model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0)
658 658
659 659 model1 = amplitude1*numpy.exp(-0.5*abs((x - shift1)/width1)**power1)
660 660 model1u = amplitude1*numpy.exp(-0.5*abs((x - shift1 - self.Num_Bin)/width1)**power1)
661 661 model1d = amplitude1*numpy.exp(-0.5*abs((x - shift1 + self.Num_Bin)/width1)**power1)
662 662 return model0 + model0u + model0d + model1 + model1u + model1d + noise
663 663
664 664 def misfit1(self,state,y_data,x,num_intg): # This function compares how close real data is with the model data, the close it is, the better it is.
665 665
666 666 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model1(x,state)))**2)#/(64-5.) # /(64-5.) can be commented
667 667
668 668 def misfit2(self,state,y_data,x,num_intg):
669 669 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model2(x,state)))**2)#/(64-9.)
670 670
671 671 class Oblique_Gauss_Fit(Operation):
672 672
673 673 def __init__(self):
674 674 Operation.__init__(self)
675 675
676 676 def Gauss_fit(self,spc,x,nGauss):
677 677
678 678
679 679 def gaussian(x, a, b, c, d):
680 680 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
681 681 return val
682 682
683 683 if nGauss == 'first':
684 684 spc_1_aux = numpy.copy(spc[:numpy.argmax(spc)+1])
685 685 spc_2_aux = numpy.flip(spc_1_aux)
686 686 spc_3_aux = numpy.concatenate((spc_1_aux,spc_2_aux[1:]))
687 687
688 688 len_dif = len(x)-len(spc_3_aux)
689 689
690 690 spc_zeros = numpy.ones(len_dif)*spc_1_aux[0]
691 691
692 692 spc_new = numpy.concatenate((spc_3_aux,spc_zeros))
693 693
694 694 y = spc_new
695 695
696 696 elif nGauss == 'second':
697 697 y = spc
698 698
699 699
700 700 # estimate starting values from the data
701 701 a = y.max()
702 702 b = x[numpy.argmax(y)]
703 703 if nGauss == 'first':
704 704 c = 1.#b#b#numpy.std(spc)
705 705 elif nGauss == 'second':
706 706 c = b
707 707 else:
708 708 print("ERROR")
709 709
710 710 d = numpy.mean(y[-100:])
711 711
712 712 # define a least squares function to optimize
713 713 def minfunc(params):
714 714 return sum((y-gaussian(x,params[0],params[1],params[2],params[3]))**2)
715 715
716 716 # fit
717 717 popt = fmin(minfunc,[a,b,c,d],disp=False)
718 718 #popt,fopt,niter,funcalls = fmin(minfunc,[a,b,c,d])
719 719
720 720
721 721 return gaussian(x, popt[0], popt[1], popt[2], popt[3]), popt[0], popt[1], popt[2], popt[3]
722 722
723 723
724 724 def Gauss_fit_2(self,spc,x,nGauss):
725 725
726 726
727 727 def gaussian(x, a, b, c, d):
728 728 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
729 729 return val
730 730
731 731 if nGauss == 'first':
732 732 spc_1_aux = numpy.copy(spc[:numpy.argmax(spc)+1])
733 733 spc_2_aux = numpy.flip(spc_1_aux)
734 734 spc_3_aux = numpy.concatenate((spc_1_aux,spc_2_aux[1:]))
735 735
736 736 len_dif = len(x)-len(spc_3_aux)
737 737
738 738 spc_zeros = numpy.ones(len_dif)*spc_1_aux[0]
739 739
740 740 spc_new = numpy.concatenate((spc_3_aux,spc_zeros))
741 741
742 742 y = spc_new
743 743
744 744 elif nGauss == 'second':
745 745 y = spc
746 746
747 747
748 748 # estimate starting values from the data
749 749 a = y.max()
750 750 b = x[numpy.argmax(y)]
751 751 if nGauss == 'first':
752 752 c = 1.#b#b#numpy.std(spc)
753 753 elif nGauss == 'second':
754 754 c = b
755 755 else:
756 756 print("ERROR")
757 757
758 758 d = numpy.mean(y[-100:])
759 759
760 760 # define a least squares function to optimize
761 761 popt,pcov = curve_fit(gaussian,x,y,p0=[a,b,c,d])
762 762 #popt,fopt,niter,funcalls = fmin(minfunc,[a,b,c,d])
763 763
764 764
765 765 #return gaussian(x, popt[0], popt[1], popt[2], popt[3]), popt[0], popt[1], popt[2], popt[3]
766 766 return gaussian(x, popt[0], popt[1], popt[2], popt[3]),popt[0], popt[1], popt[2], popt[3]
767 767
768 768 def Double_Gauss_fit(self,spc,x,A1,B1,C1,A2,B2,C2,D):
769 769
770 770 def double_gaussian(x, a1, b1, c1, a2, b2, c2, d):
771 771 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
772 772 return val
773 773
774 774
775 775 y = spc
776 776
777 777 # estimate starting values from the data
778 778 a1 = A1
779 779 b1 = B1
780 780 c1 = C1#numpy.std(spc)
781 781
782 782 a2 = A2#y.max()
783 783 b2 = B2#x[numpy.argmax(y)]
784 784 c2 = C2#numpy.std(spc)
785 785 d = D
786 786
787 787 # define a least squares function to optimize
788 788 def minfunc(params):
789 789 return sum((y-double_gaussian(x,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2)
790 790
791 791 # fit
792 792 popt = fmin(minfunc,[a1,b1,c1,a2,b2,c2,d],disp=False)
793 793
794 794 return double_gaussian(x, popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
795 795
796 796 def Double_Gauss_fit_2(self,spc,x,A1,B1,C1,A2,B2,C2,D):
797 797
798 798 def double_gaussian(x, a1, b1, c1, a2, b2, c2, d):
799 799 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
800 800 return val
801 801
802 802
803 803 y = spc
804 804
805 805 # estimate starting values from the data
806 806 a1 = A1
807 807 b1 = B1
808 808 c1 = C1#numpy.std(spc)
809 809
810 810 a2 = A2#y.max()
811 811 b2 = B2#x[numpy.argmax(y)]
812 812 c2 = C2#numpy.std(spc)
813 813 d = D
814 814
815 815 # fit
816 816
817 817 popt,pcov = curve_fit(double_gaussian,x,y,p0=[a1,b1,c1,a2,b2,c2,d])
818 818
819 819 error = numpy.sqrt(numpy.diag(pcov))
820 820
821 821 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6], error[0], error[1], error[2], error[3], error[4], error[5], error[6]
822 822
823 823 def run(self, dataOut):
824 824
825 825 pwcode = 1
826 826
827 827 if dataOut.flagDecodeData:
828 828 pwcode = numpy.sum(dataOut.code[0]**2)
829 829 #normFactor = min(self.nFFTPoints,self.nProfiles)*self.nIncohInt*self.nCohInt*pwcode*self.windowOfFilter
830 830 normFactor = dataOut.nProfiles * dataOut.nIncohInt * dataOut.nCohInt * pwcode * dataOut.windowOfFilter
831 831 factor = normFactor
832 832 z = dataOut.data_spc / factor
833 833 z = numpy.where(numpy.isfinite(z), z, numpy.NAN)
834 834 dataOut.power = numpy.average(z, axis=1)
835 835 dataOut.powerdB = 10 * numpy.log10(dataOut.power)
836 836
837 837
838 838 x = dataOut.getVelRange(0)
839 839
840 840 dataOut.Oblique_params = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
841 841 dataOut.Oblique_param_errors = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
842 842
843 843 dataOut.VelRange = x
844 844
845 845
846 846 l1=range(22,36)
847 847 l2=range(58,99)
848 848
849 849 for hei in itertools.chain(l1, l2):
850 850
851 851 try:
852 852 spc = dataOut.data_spc[0,:,hei]
853 853
854 854 spc_fit, A1, B1, C1, D1 = self.Gauss_fit_2(spc,x,'first')
855 855
856 856 spc_diff = spc - spc_fit
857 857 spc_diff[spc_diff < 0] = 0
858 858
859 859 spc_fit_diff, A2, B2, C2, D2 = self.Gauss_fit_2(spc_diff,x,'second')
860 860
861 861 D = (D1+D2)
862 862
863 863 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_param_errors[0,0,hei],dataOut.Oblique_param_errors[0,1,hei],dataOut.Oblique_param_errors[0,2,hei],dataOut.Oblique_param_errors[0,3,hei],dataOut.Oblique_param_errors[0,4,hei],dataOut.Oblique_param_errors[0,5,hei],dataOut.Oblique_param_errors[0,6,hei] = self.Double_Gauss_fit_2(spc,x,A1,B1,C1,A2,B2,C2,D)
864 864 #spc_double_fit,dataOut.Oblique_params = self.Double_Gauss_fit(spc,x,A1,B1,C1,A2,B2,C2,D)
865 865
866 866 except:
867 867 ###dataOut.Oblique_params[0,:,hei] = dataOut.Oblique_params[0,:,hei]*numpy.NAN
868 868 pass
869 869
870 870 return dataOut
871 871
872 872 class PrecipitationProc(Operation):
873 873
874 874 '''
875 875 Operator that estimates Reflectivity factor (Z), and estimates rainfall Rate (R)
876 876
877 877 Input:
878 878 self.dataOut.data_pre : SelfSpectra
879 879
880 880 Output:
881 881
882 882 self.dataOut.data_output : Reflectivity factor, rainfall Rate
883 883
884 884
885 885 Parameters affected:
886 886 '''
887 887
888 888 def __init__(self):
889 889 Operation.__init__(self)
890 890 self.i=0
891 891
892 892 def run(self, dataOut, radar=None, Pt=5000, Gt=295.1209, Gr=70.7945, Lambda=0.6741, aL=2.5118,
893 893 tauW=4e-06, ThetaT=0.1656317, ThetaR=0.36774087, Km2 = 0.93, Altitude=3350, SNRdBlimit=-30,
894 894 channel=None):
895 895
896 896 # print ('Entering PrecepitationProc ... ')
897 897
898 898 if radar == "MIRA35C" :
899 899
900 900 self.spc = dataOut.data_pre[0].copy()
901 901 self.Num_Hei = self.spc.shape[2]
902 902 self.Num_Bin = self.spc.shape[1]
903 903 self.Num_Chn = self.spc.shape[0]
904 904 Ze = self.dBZeMODE2(dataOut)
905 905
906 906 else:
907 907
908 908 self.spc = dataOut.data_pre[0].copy()
909 909
910 910 #NOTA SE DEBE REMOVER EL RANGO DEL PULSO TX
911 911 self.spc[:,:,0:7]= numpy.NaN
912 912
913 913 self.Num_Hei = self.spc.shape[2]
914 914 self.Num_Bin = self.spc.shape[1]
915 915 self.Num_Chn = self.spc.shape[0]
916 916
917 917 VelRange = dataOut.spc_range[2]
918 918
919 919 ''' Se obtiene la constante del RADAR '''
920 920
921 921 self.Pt = Pt
922 922 self.Gt = Gt
923 923 self.Gr = Gr
924 924 self.Lambda = Lambda
925 925 self.aL = aL
926 926 self.tauW = tauW
927 927 self.ThetaT = ThetaT
928 928 self.ThetaR = ThetaR
929 929 self.GSys = 10**(36.63/10) # Ganancia de los LNA 36.63 dB
930 930 self.lt = 10**(1.67/10) # Perdida en cables Tx 1.67 dB
931 931 self.lr = 10**(5.73/10) # Perdida en cables Rx 5.73 dB
932 932
933 933 Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
934 934 Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * tauW * numpy.pi * ThetaT * ThetaR)
935 935 RadarConstant = 10e-26 * Numerator / Denominator #
936 936 ExpConstant = 10**(40/10) #Constante Experimental
937 937
938 938 SignalPower = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
939 939 for i in range(self.Num_Chn):
940 940 SignalPower[i,:,:] = self.spc[i,:,:] - dataOut.noise[i]
941 941 SignalPower[numpy.where(SignalPower < 0)] = 1e-20
942 942
943 943 if channel is None:
944 944 SPCmean = numpy.mean(SignalPower, 0)
945 945 else:
946 946 SPCmean = SignalPower[channel]
947 947 Pr = SPCmean[:,:]/dataOut.normFactor
948 948
949 949 # Declaring auxiliary variables
950 950 Range = dataOut.heightList*1000. #Range in m
951 951 # replicate the heightlist to obtain a matrix [Num_Bin,Num_Hei]
952 952 rMtrx = numpy.transpose(numpy.transpose([dataOut.heightList*1000.] * self.Num_Bin))
953 953 zMtrx = rMtrx+Altitude
954 954 # replicate the VelRange to obtain a matrix [Num_Bin,Num_Hei]
955 955 VelMtrx = numpy.transpose(numpy.tile(VelRange[:-1], (self.Num_Hei,1)))
956 956
957 957 # height dependence to air density Foote and Du Toit (1969)
958 958 delv_z = 1 + 3.68e-5 * zMtrx + 1.71e-9 * zMtrx**2
959 959 VMtrx = VelMtrx / delv_z #Normalized velocity
960 960 VMtrx[numpy.where(VMtrx> 9.6)] = numpy.NaN
961 961 # Diameter is related to the fall speed of falling drops
962 962 D_Vz = -1.667 * numpy.log( 0.9369 - 0.097087 * VMtrx ) # D in [mm]
963 963 # Only valid for D>= 0.16 mm
964 964 D_Vz[numpy.where(D_Vz < 0.16)] = numpy.NaN
965 965
966 966 #Calculate Radar Reflectivity ETAn
967 967 ETAn = (RadarConstant *ExpConstant) * Pr * rMtrx**2 #Reflectivity (ETA)
968 968 ETAd = ETAn * 6.18 * exp( -0.6 * D_Vz ) * delv_z
969 969 # Radar Cross Section
970 970 sigmaD = Km2 * (D_Vz * 1e-3 )**6 * numpy.pi**5 / Lambda**4
971 971 # Drop Size Distribution
972 972 DSD = ETAn / sigmaD
973 973 # Equivalente Reflectivy
974 974 Ze_eqn = numpy.nansum( DSD * D_Vz**6 ,axis=0)
975 975 Ze_org = numpy.nansum(ETAn * Lambda**4, axis=0) / (1e-18*numpy.pi**5 * Km2) # [mm^6 /m^3]
976 976 # RainFall Rate
977 977 RR = 0.0006*numpy.pi * numpy.nansum( D_Vz**3 * DSD * VelMtrx ,0) #mm/hr
978 978
979 979 # Censoring the data
980 980 # Removing data with SNRth < 0dB se debe considerar el SNR por canal
981 981 SNRth = 10**(SNRdBlimit/10) #-30dB
982 982 novalid = numpy.where((dataOut.data_snr[0,:] <SNRth) | (dataOut.data_snr[1,:] <SNRth) | (dataOut.data_snr[2,:] <SNRth)) # AND condition. Maybe OR condition better
983 983 W = numpy.nanmean(dataOut.data_dop,0)
984 984 W[novalid] = numpy.NaN
985 985 Ze_org[novalid] = numpy.NaN
986 986 RR[novalid] = numpy.NaN
987 987
988 988 dataOut.data_output = RR[8]
989 989 dataOut.data_param = numpy.ones([3,self.Num_Hei])
990 990 dataOut.channelList = [0,1,2]
991 991
992 992 dataOut.data_param[0]=10*numpy.log10(Ze_org)
993 993 dataOut.data_param[1]=-W
994 994 dataOut.data_param[2]=RR
995 995
996 996 # print ('Leaving PrecepitationProc ... ')
997 997 return dataOut
998 998
999 999 def dBZeMODE2(self, dataOut): # Processing for MIRA35C
1000 1000
1001 1001 NPW = dataOut.NPW
1002 1002 COFA = dataOut.COFA
1003 1003
1004 1004 SNR = numpy.array([self.spc[0,:,:] / NPW[0]]) #, self.spc[1,:,:] / NPW[1]])
1005 1005 RadarConst = dataOut.RadarConst
1006 1006 #frequency = 34.85*10**9
1007 1007
1008 1008 ETA = numpy.zeros(([self.Num_Chn ,self.Num_Hei]))
1009 1009 data_output = numpy.ones([self.Num_Chn , self.Num_Hei])*numpy.NaN
1010 1010
1011 1011 ETA = numpy.sum(SNR,1)
1012 1012
1013 1013 ETA = numpy.where(ETA != 0. , ETA, numpy.NaN)
1014 1014
1015 1015 Ze = numpy.ones([self.Num_Chn, self.Num_Hei] )
1016 1016
1017 1017 for r in range(self.Num_Hei):
1018 1018
1019 1019 Ze[0,r] = ( ETA[0,r] ) * COFA[0,r][0] * RadarConst * ((r/5000.)**2)
1020 1020 #Ze[1,r] = ( ETA[1,r] ) * COFA[1,r][0] * RadarConst * ((r/5000.)**2)
1021 1021
1022 1022 return Ze
1023 1023
1024 1024 # def GetRadarConstant(self):
1025 1025 #
1026 1026 # """
1027 1027 # Constants:
1028 1028 #
1029 1029 # Pt: Transmission Power dB 5kW 5000
1030 1030 # Gt: Transmission Gain dB 24.7 dB 295.1209
1031 1031 # Gr: Reception Gain dB 18.5 dB 70.7945
1032 1032 # Lambda: Wavelenght m 0.6741 m 0.6741
1033 1033 # aL: Attenuation loses dB 4dB 2.5118
1034 1034 # tauW: Width of transmission pulse s 4us 4e-6
1035 1035 # ThetaT: Transmission antenna bean angle rad 0.1656317 rad 0.1656317
1036 1036 # ThetaR: Reception antenna beam angle rad 0.36774087 rad 0.36774087
1037 1037 #
1038 1038 # """
1039 1039 #
1040 1040 # Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
1041 1041 # Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * TauW * numpy.pi * ThetaT * TheraR)
1042 1042 # RadarConstant = Numerator / Denominator
1043 1043 #
1044 1044 # return RadarConstant
1045 1045
1046 1046
1047 1047 class FullSpectralAnalysis(Operation):
1048 1048
1049 1049 """
1050 1050 Function that implements Full Spectral Analysis technique.
1051 1051
1052 1052 Input:
1053 1053 self.dataOut.data_pre : SelfSpectra and CrossSpectra data
1054 1054 self.dataOut.groupList : Pairlist of channels
1055 1055 self.dataOut.ChanDist : Physical distance between receivers
1056 1056
1057 1057
1058 1058 Output:
1059 1059
1060 1060 self.dataOut.data_output : Zonal wind, Meridional wind, and Vertical wind
1061 1061
1062 1062
1063 1063 Parameters affected: Winds, height range, SNR
1064 1064
1065 1065 """
1066 1066 def run(self, dataOut, Xi01=None, Xi02=None, Xi12=None, Eta01=None, Eta02=None, Eta12=None, SNRdBlimit=-30,
1067 1067 minheight=None, maxheight=None, NegativeLimit=None, PositiveLimit=None):
1068 1068
1069 1069 spc = dataOut.data_pre[0].copy()
1070 1070 cspc = dataOut.data_pre[1]
1071 1071 nHeights = spc.shape[2]
1072 1072
1073 1073 # first_height = 0.75 #km (ref: data header 20170822)
1074 1074 # resolution_height = 0.075 #km
1075 1075 '''
1076 1076 finding height range. check this when radar parameters are changed!
1077 1077 '''
1078 1078 if maxheight is not None:
1079 1079 # range_max = math.ceil((maxheight - first_height) / resolution_height) # theoretical
1080 1080 range_max = math.ceil(13.26 * maxheight - 3) # empirical, works better
1081 1081 else:
1082 1082 range_max = nHeights
1083 1083 if minheight is not None:
1084 1084 # range_min = int((minheight - first_height) / resolution_height) # theoretical
1085 1085 range_min = int(13.26 * minheight - 5) # empirical, works better
1086 1086 if range_min < 0:
1087 1087 range_min = 0
1088 1088 else:
1089 1089 range_min = 0
1090 1090
1091 1091 pairsList = dataOut.groupList
1092 1092 if dataOut.ChanDist is not None :
1093 1093 ChanDist = dataOut.ChanDist
1094 1094 else:
1095 1095 ChanDist = numpy.array([[Xi01, Eta01],[Xi02,Eta02],[Xi12,Eta12]])
1096 1096
1097 1097 # 4 variables: zonal, meridional, vertical, and average SNR
1098 1098 data_param = numpy.zeros([4,nHeights]) * numpy.NaN
1099 1099 velocityX = numpy.zeros([nHeights]) * numpy.NaN
1100 1100 velocityY = numpy.zeros([nHeights]) * numpy.NaN
1101 1101 velocityZ = numpy.zeros([nHeights]) * numpy.NaN
1102 1102
1103 1103 dbSNR = 10*numpy.log10(numpy.average(dataOut.data_snr,0))
1104 1104
1105 1105 '''***********************************************WIND ESTIMATION**************************************'''
1106 1106 for Height in range(nHeights):
1107 1107
1108 1108 if Height >= range_min and Height < range_max:
1109 1109 # error_code will be useful in future analysis
1110 1110 [Vzon,Vmer,Vver, error_code] = self.WindEstimation(spc[:,:,Height], cspc[:,:,Height], pairsList,
1111 1111 ChanDist, Height, dataOut.noise, dataOut.spc_range, dbSNR[Height], SNRdBlimit, NegativeLimit, PositiveLimit,dataOut.frequency)
1112 1112
1113 1113 if abs(Vzon) < 100. and abs(Vmer) < 100.:
1114 1114 velocityX[Height] = Vzon
1115 1115 velocityY[Height] = -Vmer
1116 1116 velocityZ[Height] = Vver
1117 1117
1118 1118 # Censoring data with SNR threshold
1119 1119 dbSNR [dbSNR < SNRdBlimit] = numpy.NaN
1120 1120
1121 1121 data_param[0] = velocityX
1122 1122 data_param[1] = velocityY
1123 1123 data_param[2] = velocityZ
1124 1124 data_param[3] = dbSNR
1125 1125 dataOut.data_param = data_param
1126 1126 return dataOut
1127 1127
1128 1128 def moving_average(self,x, N=2):
1129 1129 """ convolution for smoothenig data. note that last N-1 values are convolution with zeroes """
1130 1130 return numpy.convolve(x, numpy.ones((N,))/N)[(N-1):]
1131 1131
1132 1132 def gaus(self,xSamples,Amp,Mu,Sigma):
1133 1133 return Amp * numpy.exp(-0.5*((xSamples - Mu)/Sigma)**2)
1134 1134
1135 1135 def Moments(self, ySamples, xSamples):
1136 1136 Power = numpy.nanmean(ySamples) # Power, 0th Moment
1137 1137 yNorm = ySamples / numpy.nansum(ySamples)
1138 1138 RadVel = numpy.nansum(xSamples * yNorm) # Radial Velocity, 1st Moment
1139 1139 Sigma2 = numpy.nansum(yNorm * (xSamples - RadVel)**2) # Spectral Width, 2nd Moment
1140 1140 StdDev = numpy.sqrt(numpy.abs(Sigma2)) # Desv. Estandar, Ancho espectral
1141 1141 return numpy.array([Power,RadVel,StdDev])
1142 1142
1143 1143 def StopWindEstimation(self, error_code):
1144 1144 Vzon = numpy.NaN
1145 1145 Vmer = numpy.NaN
1146 1146 Vver = numpy.NaN
1147 1147 return Vzon, Vmer, Vver, error_code
1148 1148
1149 1149 def AntiAliasing(self, interval, maxstep):
1150 1150 """
1151 1151 function to prevent errors from aliased values when computing phaseslope
1152 1152 """
1153 1153 antialiased = numpy.zeros(len(interval))
1154 1154 copyinterval = interval.copy()
1155 1155
1156 1156 antialiased[0] = copyinterval[0]
1157 1157
1158 1158 for i in range(1,len(antialiased)):
1159 1159 step = interval[i] - interval[i-1]
1160 1160 if step > maxstep:
1161 1161 copyinterval -= 2*numpy.pi
1162 1162 antialiased[i] = copyinterval[i]
1163 1163 elif step < maxstep*(-1):
1164 1164 copyinterval += 2*numpy.pi
1165 1165 antialiased[i] = copyinterval[i]
1166 1166 else:
1167 1167 antialiased[i] = copyinterval[i].copy()
1168 1168
1169 1169 return antialiased
1170 1170
1171 1171 def WindEstimation(self, spc, cspc, pairsList, ChanDist, Height, noise, AbbsisaRange, dbSNR, SNRlimit, NegativeLimit, PositiveLimit, radfreq):
1172 1172 """
1173 1173 Function that Calculates Zonal, Meridional and Vertical wind velocities.
1174 1174 Initial Version by E. Bocanegra updated by J. Zibell until Nov. 2019.
1175 1175
1176 1176 Input:
1177 1177 spc, cspc : self spectra and cross spectra data. In Briggs notation something like S_i*(S_i)_conj, (S_j)_conj respectively.
1178 1178 pairsList : Pairlist of channels
1179 1179 ChanDist : array of xi_ij and eta_ij
1180 1180 Height : height at which data is processed
1181 1181 noise : noise in [channels] format for specific height
1182 1182 Abbsisarange : range of the frequencies or velocities
1183 1183 dbSNR, SNRlimit : signal to noise ratio in db, lower limit
1184 1184
1185 1185 Output:
1186 1186 Vzon, Vmer, Vver : wind velocities
1187 1187 error_code : int that states where code is terminated
1188 1188
1189 1189 0 : no error detected
1190 1190 1 : Gaussian of mean spc exceeds widthlimit
1191 1191 2 : no Gaussian of mean spc found
1192 1192 3 : SNR to low or velocity to high -> prec. e.g.
1193 1193 4 : at least one Gaussian of cspc exceeds widthlimit
1194 1194 5 : zero out of three cspc Gaussian fits converged
1195 1195 6 : phase slope fit could not be found
1196 1196 7 : arrays used to fit phase have different length
1197 1197 8 : frequency range is either too short (len <= 5) or very long (> 30% of cspc)
1198 1198
1199 1199 """
1200 1200
1201 1201 error_code = 0
1202 1202
1203 1203 nChan = spc.shape[0]
1204 1204 nProf = spc.shape[1]
1205 1205 nPair = cspc.shape[0]
1206 1206
1207 1207 SPC_Samples = numpy.zeros([nChan, nProf]) # for normalized spc values for one height
1208 1208 CSPC_Samples = numpy.zeros([nPair, nProf], dtype=numpy.complex_) # for normalized cspc values
1209 1209 phase = numpy.zeros([nPair, nProf]) # phase between channels
1210 1210 PhaseSlope = numpy.zeros(nPair) # slope of the phases, channelwise
1211 1211 PhaseInter = numpy.zeros(nPair) # intercept to the slope of the phases, channelwise
1212 1212 xFrec = AbbsisaRange[0][:-1] # frequency range
1213 1213 xVel = AbbsisaRange[2][:-1] # velocity range
1214 1214 xSamples = xFrec # the frequency range is taken
1215 1215 delta_x = xSamples[1] - xSamples[0] # delta_f or delta_x
1216 1216
1217 1217 # only consider velocities with in NegativeLimit and PositiveLimit
1218 1218 if (NegativeLimit is None):
1219 1219 NegativeLimit = numpy.min(xVel)
1220 1220 if (PositiveLimit is None):
1221 1221 PositiveLimit = numpy.max(xVel)
1222 1222 xvalid = numpy.where((xVel > NegativeLimit) & (xVel < PositiveLimit))
1223 1223 xSamples_zoom = xSamples[xvalid]
1224 1224
1225 1225 '''Getting Eij and Nij'''
1226 1226 Xi01, Xi02, Xi12 = ChanDist[:,0]
1227 1227 Eta01, Eta02, Eta12 = ChanDist[:,1]
1228 1228
1229 1229 # spwd limit - updated by D. ScipiΓ³n 30.03.2021
1230 1230 widthlimit = 10
1231 1231 '''************************* SPC is normalized ********************************'''
1232 1232 spc_norm = spc.copy()
1233 1233 # For each channel
1234 1234 for i in range(nChan):
1235 1235 spc_sub = spc_norm[i,:] - noise[i] # only the signal power
1236 1236 SPC_Samples[i] = spc_sub / (numpy.nansum(spc_sub) * delta_x)
1237 1237
1238 1238 '''********************** FITTING MEAN SPC GAUSSIAN **********************'''
1239 1239
1240 1240 """ the gaussian of the mean: first subtract noise, then normalize. this is legal because
1241 1241 you only fit the curve and don't need the absolute value of height for calculation,
1242 1242 only for estimation of width. for normalization of cross spectra, you need initial,
1243 1243 unnormalized self-spectra With noise.
1244 1244
1245 1245 Technically, you don't even need to normalize the self-spectra, as you only need the
1246 1246 width of the peak. However, it was left this way. Note that the normalization has a flaw:
1247 1247 due to subtraction of the noise, some values are below zero. Raw "spc" values should be
1248 1248 >= 0, as it is the modulus squared of the signals (complex * it's conjugate)
1249 1249 """
1250 1250 # initial conditions
1251 1251 popt = [1e-10,0,1e-10]
1252 1252 # Spectra average
1253 1253 SPCMean = numpy.average(SPC_Samples,0)
1254 1254 # Moments in frequency
1255 1255 SPCMoments = self.Moments(SPCMean[xvalid], xSamples_zoom)
1256 1256
1257 1257 # Gauss Fit SPC in frequency domain
1258 1258 if dbSNR > SNRlimit: # only if SNR > SNRth
1259 1259 try:
1260 1260 popt,pcov = curve_fit(self.gaus,xSamples_zoom,SPCMean[xvalid],p0=SPCMoments)
1261 1261 if popt[2] <= 0 or popt[2] > widthlimit: # CONDITION
1262 1262 return self.StopWindEstimation(error_code = 1)
1263 1263 FitGauss = self.gaus(xSamples_zoom,*popt)
1264 1264 except :#RuntimeError:
1265 1265 return self.StopWindEstimation(error_code = 2)
1266 1266 else:
1267 1267 return self.StopWindEstimation(error_code = 3)
1268 1268
1269 1269 '''***************************** CSPC Normalization *************************
1270 1270 The Spc spectra are used to normalize the crossspectra. Peaks from precipitation
1271 1271 influence the norm which is not desired. First, a range is identified where the
1272 1272 wind peak is estimated -> sum_wind is sum of those frequencies. Next, the area
1273 1273 around it gets cut off and values replaced by mean determined by the boundary
1274 1274 data -> sum_noise (spc is not normalized here, thats why the noise is important)
1275 1275
1276 1276 The sums are then added and multiplied by range/datapoints, because you need
1277 1277 an integral and not a sum for normalization.
1278 1278
1279 1279 A norm is found according to Briggs 92.
1280 1280 '''
1281 1281 # for each pair
1282 1282 for i in range(nPair):
1283 1283 cspc_norm = cspc[i,:].copy()
1284 1284 chan_index0 = pairsList[i][0]
1285 1285 chan_index1 = pairsList[i][1]
1286 1286 CSPC_Samples[i] = cspc_norm / (numpy.sqrt(numpy.nansum(spc_norm[chan_index0])*numpy.nansum(spc_norm[chan_index1])) * delta_x)
1287 1287 phase[i] = numpy.arctan2(CSPC_Samples[i].imag, CSPC_Samples[i].real)
1288 1288
1289 1289 CSPCmoments = numpy.vstack([self.Moments(numpy.abs(CSPC_Samples[0,xvalid]), xSamples_zoom),
1290 1290 self.Moments(numpy.abs(CSPC_Samples[1,xvalid]), xSamples_zoom),
1291 1291 self.Moments(numpy.abs(CSPC_Samples[2,xvalid]), xSamples_zoom)])
1292 1292
1293 1293 popt01, popt02, popt12 = [1e-10,0,1e-10], [1e-10,0,1e-10] ,[1e-10,0,1e-10]
1294 1294 FitGauss01, FitGauss02, FitGauss12 = numpy.zeros(len(xSamples)), numpy.zeros(len(xSamples)), numpy.zeros(len(xSamples))
1295 1295
1296 1296 '''*******************************FIT GAUSS CSPC************************************'''
1297 1297 try:
1298 1298 popt01,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[0][xvalid]),p0=CSPCmoments[0])
1299 1299 if popt01[2] > widthlimit: # CONDITION
1300 1300 return self.StopWindEstimation(error_code = 4)
1301 1301 popt02,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[1][xvalid]),p0=CSPCmoments[1])
1302 1302 if popt02[2] > widthlimit: # CONDITION
1303 1303 return self.StopWindEstimation(error_code = 4)
1304 1304 popt12,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[2][xvalid]),p0=CSPCmoments[2])
1305 1305 if popt12[2] > widthlimit: # CONDITION
1306 1306 return self.StopWindEstimation(error_code = 4)
1307 1307
1308 1308 FitGauss01 = self.gaus(xSamples_zoom, *popt01)
1309 1309 FitGauss02 = self.gaus(xSamples_zoom, *popt02)
1310 1310 FitGauss12 = self.gaus(xSamples_zoom, *popt12)
1311 1311 except:
1312 1312 return self.StopWindEstimation(error_code = 5)
1313 1313
1314 1314
1315 1315 '''************* Getting Fij ***************'''
1316 1316 # x-axis point of the gaussian where the center is located from GaussFit of spectra
1317 1317 GaussCenter = popt[1]
1318 1318 ClosestCenter = xSamples_zoom[numpy.abs(xSamples_zoom-GaussCenter).argmin()]
1319 1319 PointGauCenter = numpy.where(xSamples_zoom==ClosestCenter)[0][0]
1320 1320
1321 1321 # Point where e^-1 is located in the gaussian
1322 1322 PeMinus1 = numpy.max(FitGauss) * numpy.exp(-1)
1323 1323 FijClosest = FitGauss[numpy.abs(FitGauss-PeMinus1).argmin()] # The closest point to"Peminus1" in "FitGauss"
1324 1324 PointFij = numpy.where(FitGauss==FijClosest)[0][0]
1325 1325 Fij = numpy.abs(xSamples_zoom[PointFij] - xSamples_zoom[PointGauCenter])
1326 1326
1327 1327 '''********** Taking frequency ranges from mean SPCs **********'''
1328 1328 GauWidth = popt[2] * 3/2 # Bandwidth of Gau01
1329 1329 Range = numpy.empty(2)
1330 1330 Range[0] = GaussCenter - GauWidth
1331 1331 Range[1] = GaussCenter + GauWidth
1332 1332 # Point in x-axis where the bandwidth is located (min:max)
1333 1333 ClosRangeMin = xSamples_zoom[numpy.abs(xSamples_zoom-Range[0]).argmin()]
1334 1334 ClosRangeMax = xSamples_zoom[numpy.abs(xSamples_zoom-Range[1]).argmin()]
1335 1335 PointRangeMin = numpy.where(xSamples_zoom==ClosRangeMin)[0][0]
1336 1336 PointRangeMax = numpy.where(xSamples_zoom==ClosRangeMax)[0][0]
1337 1337 Range = numpy.array([ PointRangeMin, PointRangeMax ])
1338 1338 FrecRange = xSamples_zoom[ Range[0] : Range[1] ]
1339 1339
1340 1340 '''************************** Getting Phase Slope ***************************'''
1341 1341 for i in range(nPair):
1342 1342 if len(FrecRange) > 5:
1343 1343 PhaseRange = phase[i, xvalid[0][Range[0]:Range[1]]].copy()
1344 1344 mask = ~numpy.isnan(FrecRange) & ~numpy.isnan(PhaseRange)
1345 1345 if len(FrecRange) == len(PhaseRange):
1346 1346 try:
1347 1347 slope, intercept, _, _, _ = stats.linregress(FrecRange[mask], self.AntiAliasing(PhaseRange[mask], 4.5))
1348 1348 PhaseSlope[i] = slope
1349 1349 PhaseInter[i] = intercept
1350 1350 except:
1351 1351 return self.StopWindEstimation(error_code = 6)
1352 1352 else:
1353 1353 return self.StopWindEstimation(error_code = 7)
1354 1354 else:
1355 1355 return self.StopWindEstimation(error_code = 8)
1356 1356
1357 1357 '''*** Constants A-H correspond to the convention as in Briggs and Vincent 1992 ***'''
1358 1358
1359 1359 '''Getting constant C'''
1360 1360 cC=(Fij*numpy.pi)**2
1361 1361
1362 1362 '''****** Getting constants F and G ******'''
1363 1363 MijEijNij = numpy.array([[Xi02,Eta02], [Xi12,Eta12]])
1364 1364 # MijEijNij = numpy.array([[Xi01,Eta01], [Xi02,Eta02], [Xi12,Eta12]])
1365 1365 # MijResult0 = (-PhaseSlope[0] * cC) / (2*numpy.pi)
1366 1366 MijResult1 = (-PhaseSlope[1] * cC) / (2*numpy.pi)
1367 1367 MijResult2 = (-PhaseSlope[2] * cC) / (2*numpy.pi)
1368 1368 # MijResults = numpy.array([MijResult0, MijResult1, MijResult2])
1369 1369 MijResults = numpy.array([MijResult1, MijResult2])
1370 1370 (cF,cG) = numpy.linalg.solve(MijEijNij, MijResults)
1371 1371
1372 1372 '''****** Getting constants A, B and H ******'''
1373 1373 W01 = numpy.nanmax( FitGauss01 )
1374 1374 W02 = numpy.nanmax( FitGauss02 )
1375 1375 W12 = numpy.nanmax( FitGauss12 )
1376 1376
1377 1377 WijResult01 = ((cF * Xi01 + cG * Eta01)**2)/cC - numpy.log(W01 / numpy.sqrt(numpy.pi / cC))
1378 1378 WijResult02 = ((cF * Xi02 + cG * Eta02)**2)/cC - numpy.log(W02 / numpy.sqrt(numpy.pi / cC))
1379 1379 WijResult12 = ((cF * Xi12 + cG * Eta12)**2)/cC - numpy.log(W12 / numpy.sqrt(numpy.pi / cC))
1380 1380 WijResults = numpy.array([WijResult01, WijResult02, WijResult12])
1381 1381
1382 1382 WijEijNij = numpy.array([ [Xi01**2, Eta01**2, 2*Xi01*Eta01] , [Xi02**2, Eta02**2, 2*Xi02*Eta02] , [Xi12**2, Eta12**2, 2*Xi12*Eta12] ])
1383 1383 (cA,cB,cH) = numpy.linalg.solve(WijEijNij, WijResults)
1384 1384
1385 1385 VxVy = numpy.array([[cA,cH],[cH,cB]])
1386 1386 VxVyResults = numpy.array([-cF,-cG])
1387 1387 (Vmer,Vzon) = numpy.linalg.solve(VxVy, VxVyResults)
1388 1388 Vver = -SPCMoments[1]*SPEED_OF_LIGHT/(2*radfreq)
1389 1389 error_code = 0
1390 1390
1391 1391 return Vzon, Vmer, Vver, error_code
1392 1392
1393 1393 class SpectralMoments(Operation):
1394 1394
1395 1395 '''
1396 1396 Function SpectralMoments()
1397 1397
1398 1398 Calculates moments (power, mean, standard deviation) and SNR of the signal
1399 1399
1400 1400 Type of dataIn: Spectra
1401 1401
1402 1402 Configuration Parameters:
1403 1403
1404 1404 dirCosx : Cosine director in X axis
1405 1405 dirCosy : Cosine director in Y axis
1406 1406
1407 1407 elevation :
1408 1408 azimuth :
1409 1409
1410 1410 Input:
1411 1411 channelList : simple channel list to select e.g. [2,3,7]
1412 1412 self.dataOut.data_pre : Spectral data
1413 1413 self.dataOut.abscissaList : List of frequencies
1414 1414 self.dataOut.noise : Noise level per channel
1415 1415
1416 1416 Affected:
1417 1417 self.dataOut.moments : Parameters per channel
1418 1418 self.dataOut.data_snr : SNR per channel
1419 1419
1420 1420 '''
1421 1421
1422 1422 def run(self, dataOut, proc_type=0):
1423 1423
1424 1424 absc = dataOut.abscissaList[:-1]
1425 1425 #noise = dataOut.noise
1426 1426 nChannel = dataOut.data_pre[0].shape[0]
1427 1427 nHei = dataOut.data_pre[0].shape[2]
1428 1428 data_param = numpy.zeros((nChannel, 4 + proc_type*3, nHei))
1429 1429
1430 1430 if proc_type == 1:
1431 1431 fwindow = numpy.zeros(absc.size) + 1
1432 1432 b=64
1433 1433 #b=16
1434 1434 fwindow[0:absc.size//2 - b] = 0
1435 1435 fwindow[absc.size//2 + b:] = 0
1436 1436 type1 = 1 # moments calculation
1437 1437 nProfiles = dataOut.nProfiles
1438 1438 nCohInt = dataOut.nCohInt
1439 1439 nIncohInt = dataOut.nIncohInt
1440 1440 M = numpy.power(numpy.array(1/(nProfiles * nCohInt) ,dtype='float32'),2)
1441 1441 N = numpy.array(M / nIncohInt,dtype='float32')
1442 1442 data = dataOut.data_pre[0] * N
1443 1443 #noise = dataOut.noise * N
1444 1444 noise = numpy.zeros(nChannel)
1445 1445 for ind in range(nChannel):
1446 1446 noise[ind] = self.__NoiseByChannel(nProfiles, nIncohInt, data[ind,:,:])
1447 1447 smooth=3
1448 1448 else:
1449 1449 data = dataOut.data_pre[0]
1450 1450 noise = dataOut.noise
1451 1451 fwindow = None
1452 1452 type1 = 0
1453 1453 nIncohInt = None
1454 1454 smooth=None
1455 1455
1456 1456 for ind in range(nChannel):
1457 1457 data_param[ind,:,:] = self.__calculateMoments( data[ind,:,:] , absc , noise[ind], nicoh=nIncohInt, smooth=smooth, type1=type1, fwindow=fwindow)
1458 #print('snr:',data_param[:,0])
1458 1459
1459 1460 if proc_type == 1:
1460 1461 dataOut.moments = data_param[:,1:,:]
1461 1462 #dataOut.data_dop = data_param[:,0]
1462 1463 dataOut.data_dop = data_param[:,2]
1463 1464 dataOut.data_width = data_param[:,1]
1464 1465 # dataOut.data_snr = data_param[:,2]
1465 1466 dataOut.data_snr = data_param[:,0]
1466 1467 dataOut.data_pow = data_param[:,6] # to compare with type0 proccessing
1467 1468 dataOut.spcpar=numpy.stack((dataOut.data_dop,dataOut.data_width,dataOut.data_snr, data_param[:,3], data_param[:,4],data_param[:,5]),axis=2)
1468 1469
1469 1470 else:
1470 1471 dataOut.moments = data_param[:,1:,:]
1471 1472 dataOut.data_snr = data_param[:,0]
1472 1473 dataOut.data_pow = data_param[:,1]
1473 1474 dataOut.data_dop = data_param[:,2]
1474 1475 dataOut.data_width = data_param[:,3]
1475 1476 dataOut.spcpar=numpy.stack((dataOut.data_dop,dataOut.data_width,dataOut.data_snr, dataOut.data_pow),axis=2)
1476 1477
1477 1478 return dataOut
1478 1479
1479 1480 def __calculateMoments(self, oldspec, oldfreq, n0,
1480 1481 nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None):
1481 1482
1482 1483 def __GAUSSWINFIT1(A, flagPDER=0):
1483 1484 nonlocal truex, xvalid
1484 1485 nparams = 4
1485 1486 M=truex.size
1486 1487 mm=numpy.arange(M,dtype='f4')
1487 1488 delta = numpy.zeros(M,dtype='f4')
1488 1489 delta[0] = 1.0
1489 1490 Ts = numpy.array([1.0/(2*truex[0])],dtype='f4')[0]
1490 1491 jj = -1j
1491 1492 #if self.winauto is None: self.winauto = (1.0 - mm/M)
1492 1493 winauto = (1.0 - mm/M)
1493 1494 winauto = winauto/winauto.max() # Normalized to 1
1494 1495 #ON_ERROR,2 # IDL sentence: Return to caller if an error occurs
1495 1496 A[0] = numpy.abs(A[0])
1496 1497 A[2] = numpy.abs(A[2])
1497 1498 A[3] = numpy.abs(A[3])
1498 1499 pi=numpy.array([numpy.pi],dtype='f4')[0]
1499 1500 if A[2] != 0:
1500 1501 Z = numpy.exp(-2*numpy.power((pi*A[2]*mm*Ts),2,dtype='f4')+jj*2*pi*A[1]*mm*Ts, dtype='c8') # Get Z
1501 1502 else:
1502 1503 Z = mm*0.0
1503 1504 A[0] = 0.0
1504 1505 junkF = numpy.roll(2*fft(winauto*(A[0]*Z+A[3]*delta)).real - \
1505 1506 winauto[0]*(A[0]+A[3]), M//2) # *M scale for fft not needed in python
1506 1507 F = junkF[xvalid]
1507 1508 if flagPDER == 0: #NEED PARTIAL?
1508 1509 return F
1509 1510 PDER = numpy.zeros((M,nparams)) #YES, MAKE ARRAY.
1510 1511 PDER[:,0] = numpy.shift(2*(fft(winauto*Z)*M) - winauto[0], M/2)
1511 1512 PDER[:,1] = numpy.shift(2*(fft(winauto*jj*2*numpy.pi*mm*Ts*A[0]*Z)*M), M/2)
1512 1513 PDER[:,2] = numpy.shift(2*(fft(winauto*(-4*numpy.power(numpy.pi*mm*Ts,2)*A[2]*A[0]*Z))*M), M/2)
1513 1514 PDER[:,3] = numpy.shift(2*(fft(winauto*delta)*M) - winauto[0], M/2)
1514 1515 PDER = PDER[xvalid,:]
1515 1516 return F, PDER
1516 1517
1517 1518 def __curvefit_koki(y, a, Weights, FlagNoDerivative=1,
1518 1519 itmax=20, tol=None):
1519 1520 #ON_ERROR,2 IDL SENTENCE: RETURN TO THE CALLER IF ERROR
1520 1521 if tol == None:
1521 1522 tol = numpy.array([1.e-3],dtype='f4')[0]
1522 1523 typ=a.dtype
1523 1524 double = 1 if typ == numpy.float64 else 0
1524 1525 if typ != numpy.float32:
1525 1526 a=a.astype(numpy.float32) #Make params floating
1526 1527 # if we will be estimating partial derivates then compute machine precision
1527 1528 if FlagNoDerivative == 1:
1528 1529 res=numpy.MachAr(float_conv=numpy.float32)
1529 1530 eps=numpy.sqrt(res.eps)
1530 1531
1531 1532 nterms = a.size # Number of parameters
1532 1533 nfree=numpy.array([numpy.size(y) - nterms],dtype='f4')[0] # Degrees of freedom
1533 1534 if nfree <= 0: print('Curvefit - not enough data points.')
1534 1535 flambda= numpy.array([0.001],dtype='f4')[0] # Initial lambda
1535 1536 #diag=numpy.arange(nterms)*(nterms+1) # Subscripta of diagonal elements
1536 1537 # Use diag method in python
1537 1538 converge=1
1538 1539
1539 1540 #Define the partial derivative array
1540 1541 PDER = numpy.zeros((nterms,numpy.size(y)),dtype='f8') if double == 1 else numpy.zeros((nterms,numpy.size(y)),dtype='f4')
1541 1542
1542 1543 for Niter in range(itmax): #Iteration loop
1543 1544
1544 1545 if FlagNoDerivative == 1:
1545 1546 #Evaluate function and estimate partial derivatives
1546 1547 yfit = __GAUSSWINFIT1(a)
1547 1548 for term in range(nterms):
1548 1549 p=a.copy() # Copy current parameters
1549 1550 #Increment size for forward difference derivative
1550 1551 inc = eps * abs(p[term])
1551 1552 if inc == 0: inc = eps
1552 1553 p[term] = p[term] + inc
1553 1554 yfit1 = __GAUSSWINFIT1(p)
1554 1555 PDER[term,:] = (yfit1-yfit)/inc
1555 1556 else:
1556 1557 #The user's procedure will return partial derivatives
1557 1558 yfit,PDER=__GAUSSWINFIT1(a, flagPDER=1)
1558 1559
1559 1560 beta = numpy.dot(PDER,(y-yfit)*Weights)
1560 1561 alpha = numpy.dot(PDER * numpy.tile(Weights,(nterms,1)), numpy.transpose(PDER))
1561 1562 # save current values of return parameters
1562 1563 sigma1 = numpy.sqrt( 1.0 / numpy.diag(alpha) ) # Current sigma.
1563 1564 sigma = sigma1
1564 1565
1565 1566 chisq1 = numpy.sum(Weights*numpy.power(y-yfit,2,dtype='f4'),dtype='f4')/nfree # Current chi squared.
1566 1567 chisq = chisq1
1567 1568 yfit1 = yfit
1568 1569 elev7=numpy.array([1.0e7],dtype='f4')[0]
1569 1570 compara =numpy.sum(abs(y))/elev7/nfree
1570 1571 done_early = chisq1 < compara
1571 1572
1572 1573 if done_early:
1573 1574 chi2 = chisq # Return chi-squared (chi2 obsolete-still works)
1574 1575 if done_early: Niter -= 1
1575 1576 #save_tp(chisq,Niter,yfit)
1576 1577 return yfit, a, converge, sigma, chisq # return result
1577 1578 #c = numpy.dot(c, c) # this operator implemented at the next lines
1578 1579 c_tmp = numpy.sqrt(numpy.diag(alpha))
1579 1580 siz=len(c_tmp)
1580 1581 c=numpy.dot(c_tmp.reshape(siz,1),c_tmp.reshape(1,siz))
1581 1582 lambdaCount = 0
1582 1583 while True:
1583 1584 lambdaCount += 1
1584 1585 # Normalize alpha to have unit diagonal.
1585 1586 array = alpha / c
1586 1587 # Augment the diagonal.
1587 1588 one=numpy.array([1.],dtype='f4')[0]
1588 1589 numpy.fill_diagonal(array,numpy.diag(array)*(one+flambda))
1589 1590 # Invert modified curvature matrix to find new parameters.
1590 1591
1591 1592 try:
1592 1593 array = (1.0/array) if array.size == 1 else numpy.linalg.inv(array)
1593 1594 except Exception as e:
1594 1595 print(e)
1595 1596 array[:]=numpy.NaN
1596 1597
1597 1598 b = a + numpy.dot(numpy.transpose(beta),array/c) # New params
1598 1599 yfit = __GAUSSWINFIT1(b) # Evaluate function
1599 1600 chisq = numpy.sum(Weights*numpy.power(y-yfit,2,dtype='f4'),dtype='f4')/nfree # New chisq
1600 1601 sigma = numpy.sqrt(numpy.diag(array)/numpy.diag(alpha)) # New sigma
1601 1602 if (numpy.isfinite(chisq) == 0) or \
1602 1603 (lambdaCount > 30 and chisq >= chisq1):
1603 1604 # Reject changes made this iteration, use old values.
1604 1605 yfit = yfit1
1605 1606 sigma = sigma1
1606 1607 chisq = chisq1
1607 1608 converge = 0
1608 1609 #print('Failed to converge.')
1609 1610 chi2 = chisq # Return chi-squared (chi2 obsolete-still works)
1610 1611 if done_early: Niter -= 1
1611 1612 #save_tp(chisq,Niter,yfit)
1612 1613 return yfit, a, converge, sigma, chisq, chi2 # return result
1613 1614 ten=numpy.array([10.0],dtype='f4')[0]
1614 1615 flambda *= ten # Assume fit got worse
1615 1616 if chisq <= chisq1:
1616 1617 break
1617 1618 hundred=numpy.array([100.0],dtype='f4')[0]
1618 1619 flambda /= hundred
1619 1620
1620 1621 a=b # Save new parameter estimate.
1621 1622 if ((chisq1-chisq)/chisq1) <= tol: # Finished?
1622 1623 chi2 = chisq # Return chi-squared (chi2 obsolete-still works)
1623 1624 if done_early: Niter -= 1
1624 1625 #save_tp(chisq,Niter,yfit)
1625 1626 return yfit, a, converge, sigma, chisq, chi2 # return result
1626 1627 converge = 0
1627 1628 chi2 = chisq
1628 1629 #print('Failed to converge.')
1629 1630 #save_tp(chisq,Niter,yfit)
1630 1631 return yfit, a, converge, sigma, chisq, chi2
1631 1632
1632 1633 if (nicoh is None): nicoh = 1
1633 1634 if (smooth is None): smooth = 0
1634 1635 if (type1 is None): type1 = 0
1635 1636 if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1
1636 1637 if (snrth is None): snrth = -20.0
1637 1638 if (dc is None): dc = 0
1638 1639 if (aliasing is None): aliasing = 0
1639 1640 if (oldfd is None): oldfd = 0
1640 1641 if (wwauto is None): wwauto = 0
1641 1642
1642 1643 if (n0 < 1.e-20): n0 = 1.e-20
1643 1644
1644 1645 xvalid = numpy.where(fwindow == 1)[0]
1645 1646 freq = oldfreq
1646 1647 truex = oldfreq
1647 1648 vec_power = numpy.zeros(oldspec.shape[1])
1648 1649 vec_fd = numpy.zeros(oldspec.shape[1])
1649 1650 vec_w = numpy.zeros(oldspec.shape[1])
1650 1651 vec_snr = numpy.zeros(oldspec.shape[1])
1651 1652 vec_n1 = numpy.empty(oldspec.shape[1])
1652 1653 vec_fp = numpy.empty(oldspec.shape[1])
1653 1654 vec_sigma_fd = numpy.empty(oldspec.shape[1])
1654 1655
1655 1656 for ind in range(oldspec.shape[1]):
1656 1657
1657 1658 spec = oldspec[:,ind]
1658 1659 if (smooth == 0):
1659 1660 spec2 = spec
1660 1661 else:
1661 1662 spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth)
1662 1663
1663 1664 aux = spec2*fwindow
1664 1665 max_spec = aux.max()
1665 1666 m = aux.tolist().index(max_spec)
1666 1667
1667 1668 if m > 2 and m < oldfreq.size - 3:
1668 1669 newindex = m + numpy.array([-2,-1,0,1,2])
1669 1670 newfreq = numpy.arange(20)/20.0*(numpy.max(freq[newindex])-numpy.min(freq[newindex]))+numpy.min(freq[newindex])
1670 1671 #peakspec = SPLINE(,)
1671 1672 tck = interpolate.splrep(freq[newindex], spec2[newindex])
1672 1673 peakspec = interpolate.splev(newfreq, tck)
1673 1674 # max_spec = MAX(peakspec,)
1674 1675 max_spec = numpy.max(peakspec)
1675 1676 mnew = numpy.argmax(peakspec)
1676 1677 #fp = newfreq(mnew)
1677 1678 fp = newfreq[mnew]
1678 1679 else:
1679 1680 fp = freq[m]
1680 1681
1681 1682 if type1==0:
1682 1683
1683 1684 # Moments Estimation
1684 1685 bb = spec2[numpy.arange(m,spec2.size)]
1685 1686 bb = (bb<n0).nonzero()
1686 1687 bb = bb[0]
1687 1688
1688 1689 ss = spec2[numpy.arange(0,m + 1)]
1689 1690 ss = (ss<n0).nonzero()
1690 1691 ss = ss[0]
1691 1692
1692 1693 if (bb.size == 0):
1693 1694 bb0 = spec.size - 1 - m
1694 1695 else:
1695 1696 bb0 = bb[0] - 1
1696 1697 if (bb0 < 0):
1697 1698 bb0 = 0
1698 1699
1699 1700 if (ss.size == 0):
1700 1701 ss1 = 1
1701 1702 else:
1702 1703 ss1 = max(ss) + 1
1703 1704
1704 1705 if (ss1 > m):
1705 1706 ss1 = m
1706 1707
1707 1708 valid = numpy.arange(int(m + bb0 - ss1 + 1)) + ss1
1708 1709
1709 1710 signal_power = ((spec2[valid] - n0) * fwindow[valid]).mean() # D. ScipiΓ³n added with correct definition
1710 1711 total_power = (spec2[valid] * fwindow[valid]).mean() # D. ScipiΓ³n added with correct definition
1711 1712 power = ((spec2[valid] - n0) * fwindow[valid]).sum()
1712 1713 fd = ((spec2[valid]- n0)*freq[valid] * fwindow[valid]).sum() / power
1713 1714 w = numpy.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum() / power)
1714 1715 snr = (spec2.mean()-n0)/n0
1715 1716 if (snr < 1.e-20): snr = 1.e-20
1716 1717
1717 1718 vec_power[ind] = total_power
1718 1719 vec_fd[ind] = fd
1719 1720 vec_w[ind] = w
1720 1721 vec_snr[ind] = snr
1721 1722 else:
1722 1723 # Noise by heights
1723 1724 n1, stdv = self.__get_noise2(spec, nicoh)
1724 1725 # Moments Estimation
1725 1726 bb = spec2[numpy.arange(m,spec2.size)]
1726 1727 bb = (bb<n1).nonzero()
1727 1728 bb = bb[0]
1728 1729
1729 1730 ss = spec2[numpy.arange(0,m + 1)]
1730 1731 ss = (ss<n1).nonzero()
1731 1732 ss = ss[0]
1732 1733
1733 1734 if (bb.size == 0):
1734 1735 bb0 = spec.size - 1 - m
1735 1736 else:
1736 1737 bb0 = bb[0] - 1
1737 1738 if (bb0 < 0):
1738 1739 bb0 = 0
1739 1740
1740 1741 if (ss.size == 0):
1741 1742 ss1 = 1
1742 1743 else:
1743 1744 ss1 = max(ss) + 1
1744 1745
1745 1746 if (ss1 > m):
1746 1747 ss1 = m
1747 1748
1748 1749 valid = numpy.arange(int(m + bb0 - ss1 + 1)) + ss1
1749 1750
1750 1751 power = ((spec[valid] - n1)*fwindow[valid]).sum()
1751 1752 fd = ((spec[valid]- n1)*freq[valid]*fwindow[valid]).sum()/power
1752 1753 try:
1753 1754 w = numpy.sqrt(((spec[valid] - n1)*fwindow[valid]*(freq[valid]- fd)**2).sum()/power)
1754 1755 except:
1755 1756 w = float("NaN")
1756 1757 snr = power/(n0*fwindow.sum())
1757 1758 if snr < 1.e-20: snr = 1.e-20
1758 1759
1759 1760 # Here start gaussean adjustment
1760 1761
1761 1762 if snr > numpy.power(10,0.1*snrth):
1762 1763
1763 1764 a = numpy.zeros(4,dtype='f4')
1764 1765 a[0] = snr * n0
1765 1766 a[1] = fd
1766 1767 a[2] = w
1767 1768 a[3] = n0
1768 1769
1769 1770 np = spec.size
1770 1771 aold = a.copy()
1771 1772 spec2 = spec.copy()
1772 1773 oldxvalid = xvalid.copy()
1773 1774
1774 1775 for i in range(2):
1775 1776
1776 1777 ww = 1.0/(numpy.power(spec2,2)/nicoh)
1777 1778 ww[np//2] = 0.0
1778 1779
1779 1780 a = aold.copy()
1780 1781 xvalid = oldxvalid.copy()
1781 1782 #self.show_var(xvalid)
1782 1783
1783 1784 gaussfn = __curvefit_koki(spec[xvalid], a, ww[xvalid])
1784 1785 a = gaussfn[1]
1785 1786 converge = gaussfn[2]
1786 1787
1787 1788 xvalid = numpy.arange(np)
1788 1789 spec2 = __GAUSSWINFIT1(a)
1789 1790
1790 1791 xvalid = oldxvalid.copy()
1791 1792 power = a[0] * np
1792 1793 fd = a[1]
1793 1794 sigma_fd = gaussfn[3][1]
1794 1795 snr = max(power/ (max(a[3],n0) * len(oldxvalid)) * converge, 1e-20)
1795 1796 w = numpy.abs(a[2])
1796 1797 n1 = max(a[3], n0)
1797 1798
1798 1799 #gauss_adj=[fd,w,snr,n1,fp,sigma_fd]
1799 1800 else:
1800 1801 sigma_fd=numpy.nan # to avoid UnboundLocalError: local variable 'sigma_fd' referenced before assignment
1801 1802
1802 1803 vec_fd[ind] = fd
1803 1804 vec_w[ind] = w
1804 1805 vec_snr[ind] = snr
1805 1806 vec_n1[ind] = n1
1806 1807 vec_fp[ind] = fp
1807 1808 vec_sigma_fd[ind] = sigma_fd
1808 1809 vec_power[ind] = power # to compare with type 0 proccessing
1809 1810
1810 1811 if type1==1:
1811 1812 #return numpy.vstack((vec_fd, vec_w, vec_snr, vec_n1, vec_fp, vec_sigma_fd, vec_power))
1812 1813 return numpy.vstack((vec_snr, vec_w, vec_fd, vec_n1, vec_fp, vec_sigma_fd, vec_power)) # snr and fd exchanged to compare doppler of both types
1813 1814 else:
1814 1815 return numpy.vstack((vec_snr, vec_power, vec_fd, vec_w))
1815 1816
1816 1817 def __get_noise2(self,POWER, fft_avg, TALK=0):
1817 1818 '''
1818 1819 Rutina para cΓ‘lculo de ruido por alturas(n1). Similar a IDL
1819 1820 '''
1820 1821 SPECT_PTS = len(POWER)
1821 1822 fft_avg = fft_avg*1.0
1822 1823 NOMIT = 0
1823 1824 NN = SPECT_PTS - NOMIT
1824 1825 N = NN//2
1825 1826 ARR = numpy.concatenate((POWER[0:N+1],POWER[N+NOMIT+1:SPECT_PTS]))
1826 1827 ARR = numpy.sort(ARR)
1827 1828 NUMS_MIN = (SPECT_PTS+7)//8
1828 1829 RTEST = (1.0+1.0/fft_avg)
1829 1830 SUM = 0.0
1830 1831 SUMSQ = 0.0
1831 1832 J = 0
1832 1833 for I in range(NN):
1833 1834 J = J + 1
1834 1835 SUM = SUM + ARR[I]
1835 1836 SUMSQ = SUMSQ + ARR[I]*ARR[I]
1836 1837 AVE = SUM*1.0/J
1837 1838 if J > NUMS_MIN:
1838 1839 if (SUMSQ*J <= RTEST*SUM*SUM): RNOISE = AVE
1839 1840 else:
1840 1841 if J == NUMS_MIN: RNOISE = AVE
1841 1842 if TALK == 1: print('Noise Power (2):%4.4f' %RNOISE)
1842 1843 stdv = numpy.sqrt(SUMSQ/J - numpy.power(SUM/J,2))
1843 1844 return RNOISE, stdv
1844 1845
1845 1846 def __get_noise1(self, power, fft_avg, TALK=0):
1846 1847 '''
1847 1848 Rutina para cΓ‘lculo de ruido por alturas(n0). Similar a IDL
1848 1849 '''
1849 1850 num_pts = numpy.size(power)
1850 1851 #print('num_pts',num_pts)
1851 1852 #print('power',power.shape)
1852 1853 #print(power[256:267,0:2])
1853 1854 fft_avg = fft_avg*1.0
1854 1855
1855 1856 ind = numpy.argsort(power, axis=None, kind='stable')
1856 1857 #ind = numpy.argsort(numpy.reshape(power,-1))
1857 1858 #print(ind.shape)
1858 1859 #print(ind[0:11])
1859 1860 #print(numpy.reshape(power,-1)[ind[0:11]])
1860 1861 ARR = numpy.reshape(power,-1)[ind]
1861 1862 #print('ARR',len(ARR))
1862 1863 #print('ARR',ARR.shape)
1863 1864 NUMS_MIN = num_pts//10
1864 1865 RTEST = (1.0+1.0/fft_avg)
1865 1866 SUM = 0.0
1866 1867 SUMSQ = 0.0
1867 1868 J = 0
1868 1869 cont = 1
1869 1870 while cont == 1 and J < num_pts:
1870 1871
1871 1872 SUM = SUM + ARR[J]
1872 1873 SUMSQ = SUMSQ + ARR[J]*ARR[J]
1873 1874 J = J + 1
1874 1875
1875 1876 if J > NUMS_MIN:
1876 1877 if (SUMSQ*J <= RTEST*SUM*SUM):
1877 1878 LNOISE = SUM*1.0/J
1878 1879 else:
1879 1880 J = J - 1
1880 1881 SUM = SUM - ARR[J]
1881 1882 SUMSQ = SUMSQ - ARR[J]*ARR[J]
1882 1883 cont = 0
1883 1884 else:
1884 1885 if J == NUMS_MIN: LNOISE = SUM*1.0/J
1885 1886 if TALK == 1: print('Noise Power (1):%8.8f' %LNOISE)
1886 1887 stdv = numpy.sqrt(SUMSQ/J - numpy.power(SUM/J,2))
1887 1888 return LNOISE, stdv
1888 1889
1889 1890 def __NoiseByChannel(self, num_prof, num_incoh, spectra,talk=0):
1890 1891
1891 1892 val_frq = numpy.arange(num_prof-2)+1
1892 1893 val_frq[(num_prof-2)//2:] = val_frq[(num_prof-2)//2:] + 1
1893 1894 junkspc = numpy.sum(spectra[val_frq,:], axis=1)
1894 1895 junkid = numpy.argsort(junkspc)
1895 1896 noisezone = val_frq[junkid[0:num_prof//2]]
1896 1897 specnoise = spectra[noisezone,:]
1897 1898 noise, stdvnoise = self.__get_noise1(specnoise,num_incoh)
1898 1899
1899 1900 if talk:
1900 1901 print('noise =', noise)
1901 1902 return noise
1902 1903
1903 1904 class JULIADriftsEstimation(Operation):
1904 1905
1905 1906 def __init__(self):
1906 1907 Operation.__init__(self)
1907 1908
1908 1909
1909 1910 def newtotal(self, data):
1910 1911 return numpy.nansum(data)
1911 1912
1912 1913 #def data_filter(self, parm, snrth=-19.5, swth=20, wErrth=500):
1913 1914 def data_filter(self, parm, snrth=-20, swth=20, wErrth=500):
1914 1915
1915 1916 Sz0 = parm.shape # Sz0: h,p
1916 1917 drift = parm[:,0]
1917 1918 sw = 2*parm[:,1]
1918 1919 snr = 10*numpy.log10(parm[:,2])
1919 1920 Sz = drift.shape # Sz: h
1920 1921 mask = numpy.ones((Sz[0]))
1921 1922 th=0
1922 1923 valid=numpy.where(numpy.isfinite(snr))
1923 1924 cvalid = len(valid[0])
1924 1925 if cvalid >= 1:
1925 1926 # CΓ‘lculo del ruido promedio de snr para el i-Γ©simo grupo de alturas
1926 1927 nbins = int(numpy.max(snr)-numpy.min(snr))+1 # bin size = 1, similar to IDL
1927 1928 h = numpy.histogram(snr,bins=nbins)
1928 1929 hist = h[0]
1929 1930 values = numpy.round_(h[1])
1930 1931 moda = values[numpy.where(hist == numpy.max(hist))]
1931 1932 indNoise = numpy.where(numpy.abs(snr - numpy.min(moda)) < 3)[0]
1932 1933
1933 1934 noise = snr[indNoise]
1934 1935 noise_mean = numpy.sum(noise)/len(noise)
1935 1936 # CΓ‘lculo de media de snr
1936 1937 med = numpy.median(snr)
1937 1938 # Establece el umbral de snr
1938 1939 if noise_mean > med + 3:
1939 1940 th = med
1940 1941 else:
1941 1942 th = noise_mean + 3
1942 1943 # Establece mΓ‘scara
1943 1944 novalid = numpy.where(snr <= th)[0]
1944 1945 mask[novalid] = numpy.nan
1945 1946 # Elimina datos que no sobrepasen el umbral: PARAMETRO
1946 1947 novalid = numpy.where(snr <= snrth)
1947 1948 cnovalid = len(novalid[0])
1948 1949 if cnovalid > 0:
1949 1950 mask[novalid] = numpy.nan
1950 1951 novalid = numpy.where(numpy.isnan(snr))
1951 1952 cnovalid = len(novalid[0])
1952 1953 if cnovalid > 0:
1953 1954 mask[novalid] = numpy.nan
1954 1955 new_parm = numpy.zeros((Sz0[0],Sz0[1]))
1955 1956 for h in range(Sz0[0]):
1956 1957 for p in range(Sz0[1]):
1957 1958 if numpy.isnan(mask[h]):
1958 1959 new_parm[h,p]=numpy.nan
1959 1960 else:
1960 1961 new_parm[h,p]=parm[h,p]
1961 1962
1962 1963 return new_parm, th
1963 1964
1964 1965 def run(self, dataOut, zenith, zenithCorrection,heights=None, statistics=0, otype=0):
1965 1966
1966 nCh=dataOut.spcpar.shape[0]
1967
1968 dataOut.lat=-11.95
1969 dataOut.lon=-76.87
1967 1970
1971 nCh=dataOut.spcpar.shape[0]
1968 1972 nHei=dataOut.spcpar.shape[1]
1969 1973 nParam=dataOut.spcpar.shape[2]
1970 # Solo las alturas de interes
1971 hei=dataOut.heightList
1972 hvalid=numpy.where([hei >= heights[0]][0] & [hei <= heights[1]][0])[0]
1973 nhvalid=len(hvalid)
1974 parm = numpy.zeros((nCh,nhvalid,nParam))
1975 parm = dataOut.spcpar[:,hvalid,:]
1974 # SelecciΓ³n de alturas
1975
1976 if not heights:
1977 parm = numpy.zeros((nCh,nHei,nParam))
1978 parm[:] = dataOut.spcpar[:]
1979 else:
1980 hei=dataOut.heightList
1981 hvalid=numpy.where([hei >= heights[0]][0] & [hei <= heights[1]][0])[0]
1982 nhvalid=len(hvalid)
1983 dataOut.heightList = hei[hvalid]
1984 parm = numpy.zeros((nCh,nhvalid,nParam))
1985 parm[:] = dataOut.spcpar[:,hvalid,:]
1986
1976 1987
1977 1988 # Primer filtrado: Umbral de SNR
1978 #snrth=-19
1979 1989 for i in range(nCh):
1980 #print('snr:',parm[i,:,2])
1981 #dataOut.spcpar[i,hvalid,:] = self.data_filter(parm[i,:,:],snrth)[0]
1982 dataOut.spcpar[i,hvalid,:] = self.data_filter(parm[i,:,:])[0]
1983 #print('dataOut.spcpar[0,:,2]',dataOut.spcpar[0,:,2])
1984 #print('dataOut.spcpar[1,:,2]',dataOut.spcpar[1,:,2])
1990 parm[i,:,:] = self.data_filter(parm[i,:,:])[0]
1991
1985 1992 zenith = numpy.array(zenith)
1986 1993 zenith -= zenithCorrection
1987 1994 zenith *= numpy.pi/180
1988 1995 alpha = zenith[0]
1989 1996 beta = zenith[1]
1990
1991 dopplerCH0 = dataOut.spcpar[0,:,0]
1992 dopplerCH1 = dataOut.spcpar[1,:,0]
1993 swCH0 = dataOut.spcpar[0,:,1]
1994 swCH1 = dataOut.spcpar[1,:,1]
1995 snrCH0 = 10*numpy.log10(dataOut.spcpar[0,:,2])
1996 snrCH1 = 10*numpy.log10(dataOut.spcpar[1,:,2])
1997 noiseCH0 = dataOut.spcpar[0,:,3]
1998 noiseCH1 = dataOut.spcpar[1,:,3]
1999 wErrCH0 = dataOut.spcpar[0,:,5]
2000 wErrCH1 = dataOut.spcpar[1,:,5]
1997 dopplerCH0 = parm[0,:,0]
1998 dopplerCH1 = parm[1,:,0]
1999 swCH0 = parm[0,:,1]
2000 swCH1 = parm[1,:,1]
2001 snrCH0 = 10*numpy.log10(parm[0,:,2])
2002 snrCH1 = 10*numpy.log10(parm[1,:,2])
2003 noiseCH0 = parm[0,:,3]
2004 noiseCH1 = parm[1,:,3]
2005 wErrCH0 = parm[0,:,5]
2006 wErrCH1 = parm[1,:,5]
2001 2007
2002 2008 # Vertical and zonal calculation according to geometry
2003 2009 sinB_A = numpy.sin(beta)*numpy.cos(alpha) - numpy.sin(alpha)* numpy.cos(beta)
2004 2010 drift = -(dopplerCH0 * numpy.sin(beta) - dopplerCH1 * numpy.sin(alpha))/ sinB_A
2005 '''
2006 print('drift.shape:',drift.shape)
2007 print('drift min:', numpy.nanmin(drift))
2008 print('drift max:', numpy.nanmax(drift))
2009 '''
2010
2011 '''
2012 print('shape:', dopplerCH0[hvalid].shape)
2013 print('dopplerCH0:', dopplerCH0[hvalid])
2014 print('dopplerCH1:', dopplerCH1[hvalid])
2015 print('drift:', drift[hvalid])
2016 '''
2017 2011 zonal = (dopplerCH0 * numpy.cos(beta) - dopplerCH1 * numpy.cos(alpha))/ sinB_A
2018 '''
2019 print('zonal min:', numpy.nanmin(zonal))
2020 print('zonal max:', numpy.nanmax(zonal))
2021 '''
2022 #print('zonal:', zonal[hvalid])
2023 2012 snr = (snrCH0 + snrCH1)/2
2024 '''
2025 print('snr min:', 10*numpy.log10(numpy.nanmin(snr)))
2026 print('snr max:', 10*numpy.log10(numpy.nanmax(snr)))
2027 '''
2028 2013 noise = (noiseCH0 + noiseCH1)/2
2029 2014 sw = (swCH0 + swCH1)/2
2030 2015 w_w_err= numpy.sqrt(numpy.power(wErrCH0 * numpy.sin(beta)/numpy.abs(sinB_A),2) + numpy.power(wErrCH1 * numpy.sin(alpha)/numpy.abs(sinB_A),2))
2031 2016 w_e_err= numpy.sqrt(numpy.power(wErrCH0 * numpy.cos(beta)/numpy.abs(-1*sinB_A),2) + numpy.power(wErrCH1 * numpy.cos(alpha)/numpy.abs(-1*sinB_A),2))
2032 2017
2033 2018 # for statistics150km
2034 2019 if statistics:
2035 2020 print('Implemented offline.')
2036
2021
2037 2022 if otype == 0:
2038 2023 winds = numpy.vstack((snr, drift, zonal, noise, sw, w_w_err, w_e_err)) # to process statistics drifts
2039 2024 elif otype == 3:
2040 2025 winds = numpy.vstack((snr, drift, zonal)) # to generic plot: 3 RTI's
2041 2026 elif otype == 4:
2042 2027 winds = numpy.vstack((snrCH0, drift, snrCH1, zonal)) # to generic plot: 4 RTI's
2043 2028
2044 2029 snr1 = numpy.vstack((snrCH0, snrCH1))
2045
2046 2030 dataOut.data_output = winds
2047 2031 dataOut.data_snr = snr1
2048 2032
2049 2033 dataOut.utctimeInit = dataOut.utctime
2050 2034 dataOut.outputInterval = dataOut.timeInterval
2051
2035
2036 dataOut.flagNoData = numpy.all(numpy.isnan(dataOut.data_output[0])) # NAN vectors are not written
2037
2052 2038 return dataOut
2053 2039
2054 2040 class SALags(Operation):
2055 2041 '''
2056 2042 Function GetMoments()
2057 2043
2058 2044 Input:
2059 2045 self.dataOut.data_pre
2060 2046 self.dataOut.abscissaList
2061 2047 self.dataOut.noise
2062 2048 self.dataOut.normFactor
2063 2049 self.dataOut.data_snr
2064 2050 self.dataOut.groupList
2065 2051 self.dataOut.nChannels
2066 2052
2067 2053 Affected:
2068 2054 self.dataOut.data_param
2069 2055
2070 2056 '''
2071 2057 def run(self, dataOut):
2072 2058 data_acf = dataOut.data_pre[0]
2073 2059 data_ccf = dataOut.data_pre[1]
2074 2060 normFactor_acf = dataOut.normFactor[0]
2075 2061 normFactor_ccf = dataOut.normFactor[1]
2076 2062 pairs_acf = dataOut.groupList[0]
2077 2063 pairs_ccf = dataOut.groupList[1]
2078 2064
2079 2065 nHeights = dataOut.nHeights
2080 2066 absc = dataOut.abscissaList
2081 2067 noise = dataOut.noise
2082 2068 SNR = dataOut.data_snr
2083 2069 nChannels = dataOut.nChannels
2084 2070 # pairsList = dataOut.groupList
2085 2071 # pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairsList, nChannels)
2086 2072
2087 2073 for l in range(len(pairs_acf)):
2088 2074 data_acf[l,:,:] = data_acf[l,:,:]/normFactor_acf[l,:]
2089 2075
2090 2076 for l in range(len(pairs_ccf)):
2091 2077 data_ccf[l,:,:] = data_ccf[l,:,:]/normFactor_ccf[l,:]
2092 2078
2093 2079 dataOut.data_param = numpy.zeros((len(pairs_ccf)*2 + 1, nHeights))
2094 2080 dataOut.data_param[:-1,:] = self.__calculateTaus(data_acf, data_ccf, absc)
2095 2081 dataOut.data_param[-1,:] = self.__calculateLag1Phase(data_acf, absc)
2096 2082 return
2097 2083
2098 2084 # def __getPairsAutoCorr(self, pairsList, nChannels):
2099 2085 #
2100 2086 # pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan
2101 2087 #
2102 2088 # for l in range(len(pairsList)):
2103 2089 # firstChannel = pairsList[l][0]
2104 2090 # secondChannel = pairsList[l][1]
2105 2091 #
2106 2092 # #Obteniendo pares de Autocorrelacion
2107 2093 # if firstChannel == secondChannel:
2108 2094 # pairsAutoCorr[firstChannel] = int(l)
2109 2095 #
2110 2096 # pairsAutoCorr = pairsAutoCorr.astype(int)
2111 2097 #
2112 2098 # pairsCrossCorr = range(len(pairsList))
2113 2099 # pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr)
2114 2100 #
2115 2101 # return pairsAutoCorr, pairsCrossCorr
2116 2102
2117 2103 def __calculateTaus(self, data_acf, data_ccf, lagRange):
2118 2104
2119 2105 lag0 = data_acf.shape[1]/2
2120 2106 #Funcion de Autocorrelacion
2121 2107 mean_acf = stats.nanmean(data_acf, axis = 0)
2122 2108
2123 2109 #Obtencion Indice de TauCross
2124 2110 ind_ccf = data_ccf.argmax(axis = 1)
2125 2111 #Obtencion Indice de TauAuto
2126 2112 ind_acf = numpy.zeros(ind_ccf.shape,dtype = 'int')
2127 2113 ccf_lag0 = data_ccf[:,lag0,:]
2128 2114
2129 2115 for i in range(ccf_lag0.shape[0]):
2130 2116 ind_acf[i,:] = numpy.abs(mean_acf - ccf_lag0[i,:]).argmin(axis = 0)
2131 2117
2132 2118 #Obtencion de TauCross y TauAuto
2133 2119 tau_ccf = lagRange[ind_ccf]
2134 2120 tau_acf = lagRange[ind_acf]
2135 2121
2136 2122 Nan1, Nan2 = numpy.where(tau_ccf == lagRange[0])
2137 2123
2138 2124 tau_ccf[Nan1,Nan2] = numpy.nan
2139 2125 tau_acf[Nan1,Nan2] = numpy.nan
2140 2126 tau = numpy.vstack((tau_ccf,tau_acf))
2141 2127
2142 2128 return tau
2143 2129
2144 2130 def __calculateLag1Phase(self, data, lagTRange):
2145 2131 data1 = stats.nanmean(data, axis = 0)
2146 2132 lag1 = numpy.where(lagTRange == 0)[0][0] + 1
2147 2133
2148 2134 phase = numpy.angle(data1[lag1,:])
2149 2135
2150 2136 return phase
2151 2137
2152 2138 def fit_func( x, a0, a1, a2): #, a3, a4, a5):
2153 2139 z = (x - a1) / a2
2154 2140 y = a0 * numpy.exp(-z**2 / a2) #+ a3 + a4 * x + a5 * x**2
2155 2141 return y
2156 2142
2157 2143
2158 2144 class SpectralFitting(Operation):
2159 2145 '''
2160 2146 Function GetMoments()
2161 2147
2162 2148 Input:
2163 2149 Output:
2164 2150 Variables modified:
2165 2151 '''
2166 2152 def __calculateMoments(self,oldspec, oldfreq, n0, nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None):
2167 2153
2168 2154 if (nicoh is None): nicoh = 1
2169 2155 if (graph is None): graph = 0
2170 2156 if (smooth is None): smooth = 0
2171 2157 elif (self.smooth < 3): smooth = 0
2172 2158
2173 2159 if (type1 is None): type1 = 0
2174 2160 if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1
2175 2161 if (snrth is None): snrth = -3
2176 2162 if (dc is None): dc = 0
2177 2163 if (aliasing is None): aliasing = 0
2178 2164 if (oldfd is None): oldfd = 0
2179 2165 if (wwauto is None): wwauto = 0
2180 2166
2181 2167 if (n0 < 1.e-20): n0 = 1.e-20
2182 2168
2183 2169 freq = oldfreq
2184 2170 vec_power = numpy.zeros(oldspec.shape[1])
2185 2171 vec_fd = numpy.zeros(oldspec.shape[1])
2186 2172 vec_w = numpy.zeros(oldspec.shape[1])
2187 2173 vec_snr = numpy.zeros(oldspec.shape[1])
2188 2174
2189 2175 oldspec = numpy.ma.masked_invalid(oldspec)
2190 2176
2191 2177 for ind in range(oldspec.shape[1]):
2192 2178
2193 2179 spec = oldspec[:,ind]
2194 2180 aux = spec*fwindow
2195 2181 max_spec = aux.max()
2196 2182 m = list(aux).index(max_spec)
2197 2183
2198 2184 #Smooth
2199 2185 if (smooth == 0): spec2 = spec
2200 2186 else: spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth)
2201 2187
2202 2188 # Calculo de Momentos
2203 2189 bb = spec2[list(range(m,spec2.size))]
2204 2190 bb = (bb<n0).nonzero()
2205 2191 bb = bb[0]
2206 2192
2207 2193 ss = spec2[list(range(0,m + 1))]
2208 2194 ss = (ss<n0).nonzero()
2209 2195 ss = ss[0]
2210 2196
2211 2197 if (bb.size == 0):
2212 2198 bb0 = spec.size - 1 - m
2213 2199 else:
2214 2200 bb0 = bb[0] - 1
2215 2201 if (bb0 < 0):
2216 2202 bb0 = 0
2217 2203
2218 2204 if (ss.size == 0): ss1 = 1
2219 2205 else: ss1 = max(ss) + 1
2220 2206
2221 2207 if (ss1 > m): ss1 = m
2222 2208
2223 2209 valid = numpy.asarray(list(range(int(m + bb0 - ss1 + 1)))) + ss1
2224 2210 power = ((spec2[valid] - n0)*fwindow[valid]).sum()
2225 2211 fd = ((spec2[valid]- n0)*freq[valid]*fwindow[valid]).sum()/power
2226 2212 w = math.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum()/power)
2227 2213 snr = (spec2.mean()-n0)/n0
2228 2214
2229 2215 if (snr < 1.e-20) :
2230 2216 snr = 1.e-20
2231 2217
2232 2218 vec_power[ind] = power
2233 2219 vec_fd[ind] = fd
2234 2220 vec_w[ind] = w
2235 2221 vec_snr[ind] = snr
2236 2222
2237 2223 moments = numpy.vstack((vec_snr, vec_power, vec_fd, vec_w))
2238 2224 return moments
2239 2225
2240 2226 #def __DiffCoherent(self,snrth, spectra, cspectra, nProf, heights,nChan, nHei, nPairs, channels, noise, crosspairs):
2241 2227 def __DiffCoherent(self, spectra, cspectra, dataOut, noise, snrth, coh_th, hei_th):
2242 2228
2243 2229 import matplotlib.pyplot as plt
2244 2230 nProf = dataOut.nProfiles
2245 2231 heights = dataOut.heightList
2246 2232 nHei = len(heights)
2247 2233 channels = dataOut.channelList
2248 2234 nChan = len(channels)
2249 2235 crosspairs = dataOut.groupList
2250 2236 nPairs = len(crosspairs)
2251 2237 #Separar espectros incoherentes de coherentes snr > 20 dB'
2252 2238 snr_th = 10**(snrth/10.0)
2253 2239 my_incoh_spectra = numpy.zeros([nChan, nProf,nHei], dtype='float')
2254 2240 my_incoh_cspectra = numpy.zeros([nPairs,nProf, nHei], dtype='complex')
2255 2241 my_incoh_aver = numpy.zeros([nChan, nHei])
2256 2242 my_coh_aver = numpy.zeros([nChan, nHei])
2257 2243
2258 2244 coh_spectra = numpy.zeros([nChan, nProf, nHei], dtype='float')
2259 2245 coh_cspectra = numpy.zeros([nPairs, nProf, nHei], dtype='complex')
2260 2246 coh_aver = numpy.zeros([nChan, nHei])
2261 2247
2262 2248 incoh_spectra = numpy.zeros([nChan, nProf, nHei], dtype='float')
2263 2249 incoh_cspectra = numpy.zeros([nPairs, nProf, nHei], dtype='complex')
2264 2250 incoh_aver = numpy.zeros([nChan, nHei])
2265 2251 power = numpy.sum(spectra, axis=1)
2266 2252
2267 2253 if coh_th == None : coh_th = numpy.array([0.75,0.65,0.15]) # 0.65
2268 2254 if hei_th == None : hei_th = numpy.array([60,300,650])
2269 2255 for ic in range(2):
2270 2256 pair = crosspairs[ic]
2271 2257 #si el SNR es mayor que el SNR threshold los datos se toman coherentes
2272 2258 s_n0 = power[pair[0],:]/noise[pair[0]]
2273 2259 s_n1 = power[pair[1],:]/noise[pair[1]]
2274 2260
2275 2261 valid1 =(s_n0>=snr_th).nonzero()
2276 2262 valid2 = (s_n1>=snr_th).nonzero()
2277 2263 #valid = valid2 + valid1 #numpy.concatenate((valid1,valid2), axis=None)
2278 2264 valid1 = numpy.array(valid1[0])
2279 2265 valid2 = numpy.array(valid2[0])
2280 2266 valid = valid1
2281 2267 for iv in range(len(valid2)):
2282 2268 #for ivv in range(len(valid1)) :
2283 2269 indv = numpy.array((valid1 == valid2[iv]).nonzero())
2284 2270 if len(indv[0]) == 0 :
2285 2271 valid = numpy.concatenate((valid,valid2[iv]), axis=None)
2286 2272 if len(valid)>0:
2287 2273 my_coh_aver[pair[0],valid]=1
2288 2274 my_coh_aver[pair[1],valid]=1
2289 2275 # si la coherencia es mayor a la coherencia threshold los datos se toman
2290 2276 #print my_coh_aver[0,:]
2291 2277 coh = numpy.squeeze(numpy.nansum(cspectra[ic,:,:], axis=0)/numpy.sqrt(numpy.nansum(spectra[pair[0],:,:], axis=0)*numpy.nansum(spectra[pair[1],:,:], axis=0)))
2292 2278 #print('coh',numpy.absolute(coh))
2293 2279 for ih in range(len(hei_th)):
2294 2280 hvalid = (heights>hei_th[ih]).nonzero()
2295 2281 hvalid = hvalid[0]
2296 2282 if len(hvalid)>0:
2297 2283 valid = (numpy.absolute(coh[hvalid])>coh_th[ih]).nonzero()
2298 2284 valid = valid[0]
2299 2285 #print('hvalid:',hvalid)
2300 2286 #print('valid', valid)
2301 2287 if len(valid)>0:
2302 2288 my_coh_aver[pair[0],hvalid[valid]] =1
2303 2289 my_coh_aver[pair[1],hvalid[valid]] =1
2304 2290
2305 2291 coh_echoes = (my_coh_aver[pair[0],:] == 1).nonzero()
2306 2292 incoh_echoes = (my_coh_aver[pair[0],:] != 1).nonzero()
2307 2293 incoh_echoes = incoh_echoes[0]
2308 2294 if len(incoh_echoes) > 0:
2309 2295 my_incoh_spectra[pair[0],:,incoh_echoes] = spectra[pair[0],:,incoh_echoes]
2310 2296 my_incoh_spectra[pair[1],:,incoh_echoes] = spectra[pair[1],:,incoh_echoes]
2311 2297 my_incoh_cspectra[ic,:,incoh_echoes] = cspectra[ic,:,incoh_echoes]
2312 2298 my_incoh_aver[pair[0],incoh_echoes] = 1
2313 2299 my_incoh_aver[pair[1],incoh_echoes] = 1
2314 2300
2315 2301
2316 2302 for ic in range(2):
2317 2303 pair = crosspairs[ic]
2318 2304
2319 2305 valid1 =(my_coh_aver[pair[0],:]==1 ).nonzero()
2320 2306 valid2 = (my_coh_aver[pair[1],:]==1).nonzero()
2321 2307 valid1 = numpy.array(valid1[0])
2322 2308 valid2 = numpy.array(valid2[0])
2323 2309 valid = valid1
2324 2310 #print valid1 , valid2
2325 2311 for iv in range(len(valid2)):
2326 2312 #for ivv in range(len(valid1)) :
2327 2313 indv = numpy.array((valid1 == valid2[iv]).nonzero())
2328 2314 if len(indv[0]) == 0 :
2329 2315 valid = numpy.concatenate((valid,valid2[iv]), axis=None)
2330 2316 #print valid
2331 2317 #valid = numpy.concatenate((valid1,valid2), axis=None)
2332 2318 valid1 =(my_coh_aver[pair[0],:] !=1 ).nonzero()
2333 2319 valid2 = (my_coh_aver[pair[1],:] !=1).nonzero()
2334 2320 valid1 = numpy.array(valid1[0])
2335 2321 valid2 = numpy.array(valid2[0])
2336 2322 incoh_echoes = valid1
2337 2323 #print valid1, valid2
2338 2324 #incoh_echoes= numpy.concatenate((valid1,valid2), axis=None)
2339 2325 for iv in range(len(valid2)):
2340 2326 #for ivv in range(len(valid1)) :
2341 2327 indv = numpy.array((valid1 == valid2[iv]).nonzero())
2342 2328 if len(indv[0]) == 0 :
2343 2329 incoh_echoes = numpy.concatenate(( incoh_echoes,valid2[iv]), axis=None)
2344 2330 #print incoh_echoes
2345 2331 if len(valid)>0:
2346 2332 #print pair
2347 2333 coh_spectra[pair[0],:,valid] = spectra[pair[0],:,valid]
2348 2334 coh_spectra[pair[1],:,valid] = spectra[pair[1],:,valid]
2349 2335 coh_cspectra[ic,:,valid] = cspectra[ic,:,valid]
2350 2336 coh_aver[pair[0],valid]=1
2351 2337 coh_aver[pair[1],valid]=1
2352 2338 if len(incoh_echoes)>0:
2353 2339 incoh_spectra[pair[0],:,incoh_echoes] = spectra[pair[0],:,incoh_echoes]
2354 2340 incoh_spectra[pair[1],:,incoh_echoes] = spectra[pair[1],:,incoh_echoes]
2355 2341 incoh_cspectra[ic,:,incoh_echoes] = cspectra[ic,:,incoh_echoes]
2356 2342 incoh_aver[pair[0],incoh_echoes]=1
2357 2343 incoh_aver[pair[1],incoh_echoes]=1
2358 2344 #plt.imshow(spectra[0,:,:],vmin=20000000)
2359 2345 #plt.show()
2360 2346 #my_incoh_aver = my_incoh_aver+1
2361 2347
2362 2348 #spec = my_incoh_spectra.copy()
2363 2349 #cspec = my_incoh_cspectra.copy()
2364 2350 #print('######################', spec)
2365 2351 #print(self.numpy)
2366 2352 #return spec, cspec,coh_aver
2367 2353 return my_incoh_spectra ,my_incoh_cspectra,my_incoh_aver,my_coh_aver, incoh_spectra, coh_spectra, incoh_cspectra, coh_cspectra, incoh_aver, coh_aver
2368 2354
2369 2355 def __CleanCoherent(self,snrth, spectra, cspectra, coh_aver,dataOut, noise,clean_coh_echoes,index):
2370 2356
2371 2357 import matplotlib.pyplot as plt
2372 2358 nProf = dataOut.nProfiles
2373 2359 heights = dataOut.heightList
2374 2360 nHei = len(heights)
2375 2361 channels = dataOut.channelList
2376 2362 nChan = len(channels)
2377 2363 crosspairs = dataOut.groupList
2378 2364 nPairs = len(crosspairs)
2379 2365
2380 2366 #data = dataOut.data_pre[0]
2381 2367 absc = dataOut.abscissaList[:-1]
2382 2368 #noise = dataOut.noise
2383 2369 #nChannel = data.shape[0]
2384 2370 data_param = numpy.zeros((nChan, 4, spectra.shape[2]))
2385 2371
2386 2372
2387 2373 #plt.plot(absc)
2388 2374 #plt.show()
2389 2375 clean_coh_spectra = spectra.copy()
2390 2376 clean_coh_cspectra = cspectra.copy()
2391 2377 clean_coh_aver = coh_aver.copy()
2392 2378
2393 2379 spwd_th=[10,6] #spwd_th[0] --> For satellites ; spwd_th[1] --> For special events like SUN.
2394 2380 coh_th = 0.75
2395 2381
2396 2382 rtime0 = [6,18] # periodo sin ESF
2397 2383 rtime1 = [10.5,13.5] # periodo con alta coherencia y alto ancho espectral (esperado): SOL.
2398 2384
2399 2385 time = index*5./60
2400 2386 if clean_coh_echoes == 1 :
2401 2387 for ind in range(nChan):
2402 2388 data_param[ind,:,:] = self.__calculateMoments( spectra[ind,:,:] , absc , noise[ind] )
2403 2389 #print data_param[:,3]
2404 2390 spwd = data_param[:,3]
2405 2391 #print spwd.shape
2406 2392 # SPECB_JULIA,header=anal_header,jspectra=spectra,vel=velocities,hei=heights, num_aver=1, mode_fit=0,smoothing=smoothing,jvelr=velr,jspwd=spwd,jsnr=snr,jnoise=noise,jstdvnoise=stdvnoise
2407 2393 #spwd1=[ 1.65607, 1.43416, 0.500373, 0.208361, 0.000000, 26.7767, 22.5936, 26.7530, 20.6962, 29.1098, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 28.0300, 27.0511, 27.8810, 26.3126, 27.8445, 24.6181, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000]
2408 2394 #spwd=numpy.array([spwd1,spwd1,spwd1,spwd1])
2409 2395 #print spwd.shape, heights.shape,coh_aver.shape
2410 2396 # para obtener spwd
2411 2397 for ic in range(nPairs):
2412 2398 pair = crosspairs[ic]
2413 2399 coh = numpy.squeeze(numpy.sum(cspectra[ic,:,:], axis=1)/numpy.sqrt(numpy.sum(spectra[pair[0],:,:], axis=1)*numpy.sum(spectra[pair[1],:,:], axis=1)))
2414 2400 for ih in range(nHei) :
2415 2401 # Considering heights higher than 200km in order to avoid removing phenomena like EEJ.
2416 2402 if heights[ih] >= 200 and coh_aver[pair[0],ih] == 1 and coh_aver[pair[1],ih] == 1 :
2417 2403 # Checking coherence
2418 2404 if (numpy.abs(coh[ih]) <= coh_th) or (time >= rtime0[0] and time <= rtime0[1]) :
2419 2405 # Checking spectral widths
2420 2406 if (spwd[pair[0],ih] > spwd_th[0]) or (spwd[pair[1],ih] > spwd_th[0]) :
2421 2407 # satelite
2422 2408 clean_coh_spectra[pair,ih,:] = 0.0
2423 2409 clean_coh_cspectra[ic,ih,:] = 0.0
2424 2410 clean_coh_aver[pair,ih] = 0
2425 2411 else :
2426 2412 if ((spwd[pair[0],ih] < spwd_th[1]) or (spwd[pair[1],ih] < spwd_th[1])) :
2427 2413 # Especial event like sun.
2428 2414 clean_coh_spectra[pair,ih,:] = 0.0
2429 2415 clean_coh_cspectra[ic,ih,:] = 0.0
2430 2416 clean_coh_aver[pair,ih] = 0
2431 2417
2432 2418 return clean_coh_spectra, clean_coh_cspectra, clean_coh_aver
2433 2419
2434 2420 isConfig = False
2435 2421 __dataReady = False
2436 2422 bloques = None
2437 2423 bloque0 = None
2438 2424
2439 2425 def __init__(self):
2440 2426 Operation.__init__(self)
2441 2427 self.i=0
2442 2428 self.isConfig = False
2443 2429
2444 2430
2445 2431 def setup(self,nChan,nProf,nHei,nBlocks):
2446 2432 self.__dataReady = False
2447 2433 self.bloques = numpy.zeros([2, nProf, nHei,nBlocks], dtype= complex)
2448 2434 self.bloque0 = numpy.zeros([nChan, nProf, nHei, nBlocks])
2449 2435
2450 2436 #def CleanRayleigh(self,dataOut,spectra,cspectra,out_spectra,out_cspectra,sat_spectra,sat_cspectra,crosspairs,heights, channels, nProf,nHei,nChan,nPairs,nIncohInt,nBlocks):
2451 2437 def CleanRayleigh(self,dataOut,spectra,cspectra,save_drifts):
2452 2438 #import matplotlib.pyplot as plt
2453 2439 #for k in range(149):
2454 2440
2455 2441 # self.bloque0[:,:,:,k] = spectra[:,:,0:nHei]
2456 2442 # self.bloques[:,:,:,k] = cspectra[:,:,0:nHei]
2457 2443 #if self.i==nBlocks:
2458 2444 # self.i==0
2459 2445 rfunc = cspectra.copy() #self.bloques
2460 2446 n_funct = len(rfunc[0,:,0,0])
2461 2447 val_spc = spectra*0.0 #self.bloque0*0.0
2462 2448 val_cspc = cspectra*0.0 #self.bloques*0.0
2463 2449 in_sat_spectra = spectra.copy() #self.bloque0
2464 2450 in_sat_cspectra = cspectra.copy() #self.bloques
2465 2451
2466 2452 #print( rfunc.shape)
2467 2453 min_hei = 200
2468 2454 nProf = dataOut.nProfiles
2469 2455 heights = dataOut.heightList
2470 2456 nHei = len(heights)
2471 2457 channels = dataOut.channelList
2472 2458 nChan = len(channels)
2473 2459 crosspairs = dataOut.groupList
2474 2460 nPairs = len(crosspairs)
2475 2461 hval=(heights >= min_hei).nonzero()
2476 2462 ih=hval[0]
2477 2463 #print numpy.absolute(rfunc[:,0,0,14])
2478 2464 for ih in range(hval[0][0],nHei):
2479 2465 for ifreq in range(nProf):
2480 2466 for ii in range(n_funct):
2481 2467
2482 2468 func2clean = 10*numpy.log10(numpy.absolute(rfunc[:,ii,ifreq,ih]))
2483 2469 #print numpy.amin(func2clean)
2484 2470 val = (numpy.isfinite(func2clean)==True).nonzero()
2485 2471 if len(val)>0:
2486 2472 min_val = numpy.around(numpy.amin(func2clean)-2) #> (-40)
2487 2473 if min_val <= -40 : min_val = -40
2488 2474 max_val = numpy.around(numpy.amax(func2clean)+2) #< 200
2489 2475 if max_val >= 200 : max_val = 200
2490 2476 #print min_val, max_val
2491 2477 step = 1
2492 2478 #Getting bins and the histogram
2493 2479 x_dist = min_val + numpy.arange(1 + ((max_val-(min_val))/step))*step
2494 2480 y_dist,binstep = numpy.histogram(func2clean,bins=range(int(min_val),int(max_val+2),step))
2495 2481 mean = numpy.sum(x_dist * y_dist) / numpy.sum(y_dist)
2496 2482 sigma = numpy.sqrt(numpy.sum(y_dist * (x_dist - mean)**2) / numpy.sum(y_dist))
2497 2483 parg = [numpy.amax(y_dist),mean,sigma]
2498 2484 try :
2499 2485 gauss_fit, covariance = curve_fit(fit_func, x_dist, y_dist,p0=parg)
2500 2486 mode = gauss_fit[1]
2501 2487 stdv = gauss_fit[2]
2502 2488 except:
2503 2489 mode = mean
2504 2490 stdv = sigma
2505 2491 # if ih == 14 and ii == 0 and ifreq ==0 :
2506 2492 # print x_dist.shape, y_dist.shape
2507 2493 # print x_dist, y_dist
2508 2494 # print min_val, max_val, binstep
2509 2495 # print func2clean
2510 2496 # print mean,sigma
2511 2497 # mean1,std = norm.fit(y_dist)
2512 2498 # print mean1, std, gauss_fit
2513 2499 # print fit_func(x_dist,gauss_fit[0],gauss_fit[1],gauss_fit[2])
2514 2500 # 7.84616 53.9307 3.61863
2515 2501 #stdv = 3.61863 # 2.99089
2516 2502 #mode = 53.9307 #7.79008
2517 2503
2518 2504 #Removing echoes greater than mode + 3*stdv
2519 2505 factor_stdv = 2.5
2520 2506 noval = (abs(func2clean - mode)>=(factor_stdv*stdv)).nonzero()
2521 2507
2522 2508 if len(noval[0]) > 0:
2523 2509 novall = ((func2clean - mode) >= (factor_stdv*stdv)).nonzero()
2524 2510 cross_pairs = crosspairs[ii]
2525 2511 #Getting coherent echoes which are removed.
2526 2512 if len(novall[0]) > 0:
2527 2513 #val_spc[(0,1),novall[a],ih] = 1
2528 2514 #val_spc[,(2,3),novall[a],ih] = 1
2529 2515 val_spc[novall[0],cross_pairs[0],ifreq,ih] = 1
2530 2516 val_spc[novall[0],cross_pairs[1],ifreq,ih] = 1
2531 2517 val_cspc[novall[0],ii,ifreq,ih] = 1
2532 2518 #print("OUT NOVALL 1")
2533 2519 #Removing coherent from ISR data
2534 2520 # if ih == 17 and ii == 0 and ifreq ==0 :
2535 2521 # print spectra[:,cross_pairs[0],ifreq,ih]
2536 2522 spectra[noval,cross_pairs[0],ifreq,ih] = numpy.nan
2537 2523 spectra[noval,cross_pairs[1],ifreq,ih] = numpy.nan
2538 2524 cspectra[noval,ii,ifreq,ih] = numpy.nan
2539 2525 # if ih == 17 and ii == 0 and ifreq ==0 :
2540 2526 # print spectra[:,cross_pairs[0],ifreq,ih]
2541 2527 # print noval, len(noval[0])
2542 2528 # print novall, len(novall[0])
2543 2529 # print factor_stdv*stdv
2544 2530 # print func2clean-mode
2545 2531 # print val_spc[:,cross_pairs[0],ifreq,ih]
2546 2532 # print spectra[:,cross_pairs[0],ifreq,ih]
2547 2533 #no sale es para savedrifts >2
2548 2534 ''' channels = channels
2549 2535 cross_pairs = cross_pairs
2550 2536 #print("OUT NOVALL 2")
2551 2537
2552 2538 vcross0 = (cross_pairs[0] == channels[ii]).nonzero()
2553 2539 vcross1 = (cross_pairs[1] == channels[ii]).nonzero()
2554 2540 vcross = numpy.concatenate((vcross0,vcross1),axis=None)
2555 2541 #print('vcros =', vcross)
2556 2542
2557 2543 #Getting coherent echoes which are removed.
2558 2544 if len(novall) > 0:
2559 2545 #val_spc[novall,ii,ifreq,ih] = 1
2560 2546 val_spc[ii,ifreq,ih,novall] = 1
2561 2547 if len(vcross) > 0:
2562 2548 val_cspc[vcross,ifreq,ih,novall] = 1
2563 2549
2564 2550 #Removing coherent from ISR data.
2565 2551 self.bloque0[ii,ifreq,ih,noval] = numpy.nan
2566 2552 if len(vcross) > 0:
2567 2553 self.bloques[vcross,ifreq,ih,noval] = numpy.nan
2568 2554 '''
2569 2555 #Getting average of the spectra and cross-spectra from incoherent echoes.
2570 2556 out_spectra = numpy.zeros([nChan,nProf,nHei], dtype=float) #+numpy.nan
2571 2557 out_cspectra = numpy.zeros([nPairs,nProf,nHei], dtype=complex) #+numpy.nan
2572 2558 for ih in range(nHei):
2573 2559 for ifreq in range(nProf):
2574 2560 for ich in range(nChan):
2575 2561 tmp = spectra[:,ich,ifreq,ih]
2576 2562 valid = (numpy.isfinite(tmp[:])==True).nonzero()
2577 2563 # if ich == 0 and ifreq == 0 and ih == 17 :
2578 2564 # print tmp
2579 2565 # print valid
2580 2566 # print len(valid[0])
2581 2567 #print('TMP',tmp)
2582 2568 if len(valid[0]) >0 :
2583 2569 out_spectra[ich,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
2584 2570 #for icr in range(nPairs):
2585 2571 for icr in range(nPairs):
2586 2572 tmp = numpy.squeeze(cspectra[:,icr,ifreq,ih])
2587 2573 valid = (numpy.isfinite(tmp)==True).nonzero()
2588 2574 if len(valid[0]) > 0:
2589 2575 out_cspectra[icr,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
2590 2576 # print('##########################################################')
2591 2577 #Removing fake coherent echoes (at least 4 points around the point)
2592 2578
2593 2579 val_spectra = numpy.sum(val_spc,0)
2594 2580 val_cspectra = numpy.sum(val_cspc,0)
2595 2581
2596 2582 val_spectra = self.REM_ISOLATED_POINTS(val_spectra,4)
2597 2583 val_cspectra = self.REM_ISOLATED_POINTS(val_cspectra,4)
2598 2584
2599 2585 for i in range(nChan):
2600 2586 for j in range(nProf):
2601 2587 for k in range(nHei):
2602 2588 if numpy.isfinite(val_spectra[i,j,k]) and val_spectra[i,j,k] < 1 :
2603 2589 val_spc[:,i,j,k] = 0.0
2604 2590 for i in range(nPairs):
2605 2591 for j in range(nProf):
2606 2592 for k in range(nHei):
2607 2593 if numpy.isfinite(val_cspectra[i,j,k]) and val_cspectra[i,j,k] < 1 :
2608 2594 val_cspc[:,i,j,k] = 0.0
2609 2595 # val_spc = numpy.reshape(val_spc, (len(spectra[:,0,0,0]),nProf*nHei*nChan))
2610 2596 # if numpy.isfinite(val_spectra)==str(True):
2611 2597 # noval = (val_spectra<1).nonzero()
2612 2598 # if len(noval) > 0:
2613 2599 # val_spc[:,noval] = 0.0
2614 2600 # val_spc = numpy.reshape(val_spc, (149,nChan,nProf,nHei))
2615 2601
2616 2602 #val_cspc = numpy.reshape(val_spc, (149,nChan*nHei*nProf))
2617 2603 #if numpy.isfinite(val_cspectra)==str(True):
2618 2604 # noval = (val_cspectra<1).nonzero()
2619 2605 # if len(noval) > 0:
2620 2606 # val_cspc[:,noval] = 0.0
2621 2607 # val_cspc = numpy.reshape(val_cspc, (149,nChan,nProf,nHei))
2622 2608
2623 2609 tmp_sat_spectra = spectra.copy()
2624 2610 tmp_sat_spectra = tmp_sat_spectra*numpy.nan
2625 2611 tmp_sat_cspectra = cspectra.copy()
2626 2612 tmp_sat_cspectra = tmp_sat_cspectra*numpy.nan
2627 2613
2628 2614 # fig = plt.figure(figsize=(6,5))
2629 2615 # left, bottom, width, height = 0.1, 0.1, 0.8, 0.8
2630 2616 # ax = fig.add_axes([left, bottom, width, height])
2631 2617 # cp = ax.contour(10*numpy.log10(numpy.absolute(spectra[0,0,:,:])))
2632 2618 # ax.clabel(cp, inline=True,fontsize=10)
2633 2619 # plt.show()
2634 2620
2635 2621 val = (val_spc > 0).nonzero()
2636 2622 if len(val[0]) > 0:
2637 2623 tmp_sat_spectra[val] = in_sat_spectra[val]
2638 2624
2639 2625 val = (val_cspc > 0).nonzero()
2640 2626 if len(val[0]) > 0:
2641 2627 tmp_sat_cspectra[val] = in_sat_cspectra[val]
2642 2628
2643 2629 #Getting average of the spectra and cross-spectra from incoherent echoes.
2644 2630 sat_spectra = numpy.zeros((nChan,nProf,nHei), dtype=float)
2645 2631 sat_cspectra = numpy.zeros((nPairs,nProf,nHei), dtype=complex)
2646 2632 for ih in range(nHei):
2647 2633 for ifreq in range(nProf):
2648 2634 for ich in range(nChan):
2649 2635 tmp = numpy.squeeze(tmp_sat_spectra[:,ich,ifreq,ih])
2650 2636 valid = (numpy.isfinite(tmp)).nonzero()
2651 2637 if len(valid[0]) > 0:
2652 2638 sat_spectra[ich,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
2653 2639
2654 2640 for icr in range(nPairs):
2655 2641 tmp = numpy.squeeze(tmp_sat_cspectra[:,icr,ifreq,ih])
2656 2642 valid = (numpy.isfinite(tmp)).nonzero()
2657 2643 if len(valid[0]) > 0:
2658 2644 sat_cspectra[icr,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
2659 2645 #self.__dataReady= True
2660 2646 #sat_spectra, sat_cspectra= sat_spectra, sat_cspectra
2661 2647 #if not self.__dataReady:
2662 2648 #return None, None
2663 2649 return out_spectra, out_cspectra,sat_spectra,sat_cspectra
2664 2650 def REM_ISOLATED_POINTS(self,array,rth):
2665 2651 # import matplotlib.pyplot as plt
2666 2652 if rth == None : rth = 4
2667 2653
2668 2654 num_prof = len(array[0,:,0])
2669 2655 num_hei = len(array[0,0,:])
2670 2656 n2d = len(array[:,0,0])
2671 2657
2672 2658 for ii in range(n2d) :
2673 2659 #print ii,n2d
2674 2660 tmp = array[ii,:,:]
2675 2661 #print tmp.shape, array[ii,101,:],array[ii,102,:]
2676 2662
2677 2663 # fig = plt.figure(figsize=(6,5))
2678 2664 # left, bottom, width, height = 0.1, 0.1, 0.8, 0.8
2679 2665 # ax = fig.add_axes([left, bottom, width, height])
2680 2666 # x = range(num_prof)
2681 2667 # y = range(num_hei)
2682 2668 # cp = ax.contour(y,x,tmp)
2683 2669 # ax.clabel(cp, inline=True,fontsize=10)
2684 2670 # plt.show()
2685 2671
2686 2672 #indxs = WHERE(FINITE(tmp) AND tmp GT 0,cindxs)
2687 2673 tmp = numpy.reshape(tmp,num_prof*num_hei)
2688 2674 indxs1 = (numpy.isfinite(tmp)==True).nonzero()
2689 2675 indxs2 = (tmp > 0).nonzero()
2690 2676
2691 2677 indxs1 = (indxs1[0])
2692 2678 indxs2 = indxs2[0]
2693 2679 #indxs1 = numpy.array(indxs1[0])
2694 2680 #indxs2 = numpy.array(indxs2[0])
2695 2681 indxs = None
2696 2682 #print indxs1 , indxs2
2697 2683 for iv in range(len(indxs2)):
2698 2684 indv = numpy.array((indxs1 == indxs2[iv]).nonzero())
2699 2685 #print len(indxs2), indv
2700 2686 if len(indv[0]) > 0 :
2701 2687 indxs = numpy.concatenate((indxs,indxs2[iv]), axis=None)
2702 2688 # print indxs
2703 2689 indxs = indxs[1:]
2704 2690 #print indxs, len(indxs)
2705 2691 if len(indxs) < 4 :
2706 2692 array[ii,:,:] = 0.
2707 2693 return
2708 2694
2709 2695 xpos = numpy.mod(indxs ,num_hei)
2710 2696 ypos = (indxs / num_hei)
2711 2697 sx = numpy.argsort(xpos) # Ordering respect to "x" (time)
2712 2698 #print sx
2713 2699 xpos = xpos[sx]
2714 2700 ypos = ypos[sx]
2715 2701
2716 2702 # *********************************** Cleaning isolated points **********************************
2717 2703 ic = 0
2718 2704 while True :
2719 2705 r = numpy.sqrt(list(numpy.power((xpos[ic]-xpos),2)+ numpy.power((ypos[ic]-ypos),2)))
2720 2706 #no_coh = WHERE(FINITE(r) AND (r LE rth),cno_coh)
2721 2707 #plt.plot(r)
2722 2708 #plt.show()
2723 2709 no_coh1 = (numpy.isfinite(r)==True).nonzero()
2724 2710 no_coh2 = (r <= rth).nonzero()
2725 2711 #print r, no_coh1, no_coh2
2726 2712 no_coh1 = numpy.array(no_coh1[0])
2727 2713 no_coh2 = numpy.array(no_coh2[0])
2728 2714 no_coh = None
2729 2715 #print valid1 , valid2
2730 2716 for iv in range(len(no_coh2)):
2731 2717 indv = numpy.array((no_coh1 == no_coh2[iv]).nonzero())
2732 2718 if len(indv[0]) > 0 :
2733 2719 no_coh = numpy.concatenate((no_coh,no_coh2[iv]), axis=None)
2734 2720 no_coh = no_coh[1:]
2735 2721 #print len(no_coh), no_coh
2736 2722 if len(no_coh) < 4 :
2737 2723 #print xpos[ic], ypos[ic], ic
2738 2724 # plt.plot(r)
2739 2725 # plt.show()
2740 2726 xpos[ic] = numpy.nan
2741 2727 ypos[ic] = numpy.nan
2742 2728
2743 2729 ic = ic + 1
2744 2730 if (ic == len(indxs)) :
2745 2731 break
2746 2732 #print( xpos, ypos)
2747 2733
2748 2734 indxs = (numpy.isfinite(list(xpos))==True).nonzero()
2749 2735 #print indxs[0]
2750 2736 if len(indxs[0]) < 4 :
2751 2737 array[ii,:,:] = 0.
2752 2738 return
2753 2739
2754 2740 xpos = xpos[indxs[0]]
2755 2741 ypos = ypos[indxs[0]]
2756 2742 for i in range(0,len(ypos)):
2757 2743 ypos[i]=int(ypos[i])
2758 2744 junk = tmp
2759 2745 tmp = junk*0.0
2760 2746
2761 2747 tmp[list(xpos + (ypos*num_hei))] = junk[list(xpos + (ypos*num_hei))]
2762 2748 array[ii,:,:] = numpy.reshape(tmp,(num_prof,num_hei))
2763 2749
2764 2750 #print array.shape
2765 2751 #tmp = numpy.reshape(tmp,(num_prof,num_hei))
2766 2752 #print tmp.shape
2767 2753
2768 2754 # fig = plt.figure(figsize=(6,5))
2769 2755 # left, bottom, width, height = 0.1, 0.1, 0.8, 0.8
2770 2756 # ax = fig.add_axes([left, bottom, width, height])
2771 2757 # x = range(num_prof)
2772 2758 # y = range(num_hei)
2773 2759 # cp = ax.contour(y,x,array[ii,:,:])
2774 2760 # ax.clabel(cp, inline=True,fontsize=10)
2775 2761 # plt.show()
2776 2762 return array
2777 2763 def moments(self,doppler,yarray,npoints):
2778 2764 ytemp = yarray
2779 2765 #val = WHERE(ytemp GT 0,cval)
2780 2766 #if cval == 0 : val = range(npoints-1)
2781 2767 val = (ytemp > 0).nonzero()
2782 2768 val = val[0]
2783 2769 #print('hvalid:',hvalid)
2784 2770 #print('valid', valid)
2785 2771 if len(val) == 0 : val = range(npoints-1)
2786 2772
2787 2773 ynew = 0.5*(ytemp[val[0]]+ytemp[val[len(val)-1]])
2788 2774 ytemp[len(ytemp):] = [ynew]
2789 2775
2790 2776 index = 0
2791 2777 index = numpy.argmax(ytemp)
2792 2778 ytemp = numpy.roll(ytemp,int(npoints/2)-1-index)
2793 2779 ytemp = ytemp[0:npoints-1]
2794 2780
2795 2781 fmom = numpy.sum(doppler*ytemp)/numpy.sum(ytemp)+(index-(npoints/2-1))*numpy.abs(doppler[1]-doppler[0])
2796 2782 smom = numpy.sum(doppler*doppler*ytemp)/numpy.sum(ytemp)
2797 2783 return [fmom,numpy.sqrt(smom)]
2798 2784 # **********************************************************************************************
2799 2785 index = 0
2800 2786 fint = 0
2801 2787 buffer = 0
2802 2788 buffer2 = 0
2803 2789 buffer3 = 0
2804 2790 def run(self, dataOut, getSNR = True, path=None, file=None, groupList=None):
2805 2791 nChannels = dataOut.nChannels
2806 2792 nHeights= dataOut.heightList.size
2807 2793 nProf = dataOut.nProfiles
2808 2794 tini=time.localtime(dataOut.utctime)
2809 2795 if (tini.tm_min % 5) == 0 and (tini.tm_sec < 5 and self.fint==0):
2810 2796 # print tini.tm_min
2811 2797 self.index = 0
2812 2798 jspc = self.buffer
2813 2799 jcspc = self.buffer2
2814 2800 jnoise = self.buffer3
2815 2801 self.buffer = dataOut.data_spc
2816 2802 self.buffer2 = dataOut.data_cspc
2817 2803 self.buffer3 = dataOut.noise
2818 2804 self.fint = 1
2819 2805 if numpy.any(jspc) :
2820 2806 jspc= numpy.reshape(jspc,(int(len(jspc)/4),nChannels,nProf,nHeights))
2821 2807 jcspc= numpy.reshape(jcspc,(int(len(jcspc)/2),2,nProf,nHeights))
2822 2808 jnoise= numpy.reshape(jnoise,(int(len(jnoise)/4),nChannels))
2823 2809 else:
2824 2810 dataOut.flagNoData = True
2825 2811 return dataOut
2826 2812 else :
2827 2813 if (tini.tm_min % 5) == 0 : self.fint = 1
2828 2814 else : self.fint = 0
2829 2815 self.index += 1
2830 2816 if numpy.any(self.buffer):
2831 2817 self.buffer = numpy.concatenate((self.buffer,dataOut.data_spc), axis=0)
2832 2818 self.buffer2 = numpy.concatenate((self.buffer2,dataOut.data_cspc), axis=0)
2833 2819 self.buffer3 = numpy.concatenate((self.buffer3,dataOut.noise), axis=0)
2834 2820 else:
2835 2821 self.buffer = dataOut.data_spc
2836 2822 self.buffer2 = dataOut.data_cspc
2837 2823 self.buffer3 = dataOut.noise
2838 2824 dataOut.flagNoData = True
2839 2825 return dataOut
2840 2826 if path != None:
2841 2827 sys.path.append(path)
2842 2828 self.library = importlib.import_module(file)
2843 2829
2844 2830 #To be inserted as a parameter
2845 2831 groupArray = numpy.array(groupList)
2846 2832 #groupArray = numpy.array([[0,1],[2,3]])
2847 2833 dataOut.groupList = groupArray
2848 2834
2849 2835 nGroups = groupArray.shape[0]
2850 2836 nChannels = dataOut.nChannels
2851 2837 nHeights = dataOut.heightList.size
2852 2838
2853 2839 #Parameters Array
2854 2840 dataOut.data_param = None
2855 2841 dataOut.data_paramC = None
2856 2842
2857 2843 #Set constants
2858 2844 constants = self.library.setConstants(dataOut)
2859 2845 dataOut.constants = constants
2860 2846 M = dataOut.normFactor
2861 2847 N = dataOut.nFFTPoints
2862 2848 ippSeconds = dataOut.ippSeconds
2863 2849 K = dataOut.nIncohInt
2864 2850 pairsArray = numpy.array(dataOut.pairsList)
2865 2851
2866 2852 snrth= 20
2867 2853 spectra = dataOut.data_spc
2868 2854 cspectra = dataOut.data_cspc
2869 2855 nProf = dataOut.nProfiles
2870 2856 heights = dataOut.heightList
2871 2857 nHei = len(heights)
2872 2858 channels = dataOut.channelList
2873 2859 nChan = len(channels)
2874 2860 nIncohInt = dataOut.nIncohInt
2875 2861 crosspairs = dataOut.groupList
2876 2862 noise = dataOut.noise
2877 2863 jnoise = jnoise/N
2878 2864 noise = numpy.nansum(jnoise,axis=0)#/len(jnoise)
2879 2865 power = numpy.sum(spectra, axis=1)
2880 2866 nPairs = len(crosspairs)
2881 2867 absc = dataOut.abscissaList[:-1]
2882 2868
2883 2869 if not self.isConfig:
2884 2870 self.isConfig = True
2885 2871
2886 2872 index = tini.tm_hour*12+tini.tm_min/5
2887 2873 jspc = jspc/N/N
2888 2874 jcspc = jcspc/N/N
2889 2875 tmp_spectra,tmp_cspectra,sat_spectra,sat_cspectra = self.CleanRayleigh(dataOut,jspc,jcspc,2)
2890 2876 jspectra = tmp_spectra*len(jspc[:,0,0,0])
2891 2877 jcspectra = tmp_cspectra*len(jspc[:,0,0,0])
2892 2878 my_incoh_spectra ,my_incoh_cspectra,my_incoh_aver,my_coh_aver, incoh_spectra, coh_spectra, incoh_cspectra, coh_cspectra, incoh_aver, coh_aver = self.__DiffCoherent(jspectra, jcspectra, dataOut, noise, snrth, None, None)
2893 2879 clean_coh_spectra, clean_coh_cspectra, clean_coh_aver = self.__CleanCoherent(snrth, coh_spectra, coh_cspectra, coh_aver, dataOut, noise,1,index)
2894 2880 dataOut.data_spc = incoh_spectra
2895 2881 dataOut.data_cspc = incoh_cspectra
2896 2882
2897 2883 clean_num_aver = incoh_aver*len(jspc[:,0,0,0])
2898 2884 coh_num_aver = clean_coh_aver*len(jspc[:,0,0,0])
2899 2885 #List of possible combinations
2900 2886 listComb = itertools.combinations(numpy.arange(groupArray.shape[1]),2)
2901 2887 indCross = numpy.zeros(len(list(listComb)), dtype = 'int')
2902 2888
2903 2889 if getSNR:
2904 2890 listChannels = groupArray.reshape((groupArray.size))
2905 2891 listChannels.sort()
2906 2892 dataOut.data_SNR = self.__getSNR(dataOut.data_spc[listChannels,:,:], noise[listChannels])
2907 2893 if dataOut.data_paramC is None:
2908 2894 dataOut.data_paramC = numpy.zeros((nGroups*4, nHeights,2))*numpy.nan
2909 2895 for i in range(nGroups):
2910 2896 coord = groupArray[i,:]
2911 2897 #Input data array
2912 2898 data = dataOut.data_spc[coord,:,:]/(M*N)
2913 2899 data = data.reshape((data.shape[0]*data.shape[1],data.shape[2]))
2914 2900
2915 2901 #Cross Spectra data array for Covariance Matrixes
2916 2902 ind = 0
2917 2903 for pairs in listComb:
2918 2904 pairsSel = numpy.array([coord[x],coord[y]])
2919 2905 indCross[ind] = int(numpy.where(numpy.all(pairsArray == pairsSel, axis = 1))[0][0])
2920 2906 ind += 1
2921 2907 dataCross = dataOut.data_cspc[indCross,:,:]/(M*N)
2922 2908 dataCross = dataCross**2
2923 2909 nhei = nHeights
2924 2910 poweri = numpy.sum(dataOut.data_spc[:,1:nProf-0,:],axis=1)/clean_num_aver[:,:]
2925 2911 if i == 0 : my_noises = numpy.zeros(4,dtype=float) #FLTARR(4)
2926 2912 n0i = numpy.nanmin(poweri[0+i*2,0:nhei-0])/(nProf-1)
2927 2913 n1i = numpy.nanmin(poweri[1+i*2,0:nhei-0])/(nProf-1)
2928 2914 n0 = n0i
2929 2915 n1= n1i
2930 2916 my_noises[2*i+0] = n0
2931 2917 my_noises[2*i+1] = n1
2932 2918 snrth = -16.0
2933 2919 snrth = 10**(snrth/10.0)
2934 2920
2935 2921 for h in range(nHeights):
2936 2922 d = data[:,h]
2937 2923 smooth = clean_num_aver[i+1,h] #dataOut.data_spc[:,1:nProf-0,:]
2938 2924 signalpn0 = (dataOut.data_spc[i*2,1:(nProf-0),h])/smooth
2939 2925 signalpn1 = (dataOut.data_spc[i*2+1,1:(nProf-0),h])/smooth
2940 2926 signal0 = signalpn0-n0
2941 2927 signal1 = signalpn1-n1
2942 2928 snr0 = numpy.sum(signal0/n0)/(nProf-1)
2943 2929 snr1 = numpy.sum(signal1/n1)/(nProf-1)
2944 2930 if snr0 > snrth and snr1 > snrth and clean_num_aver[i+1,h] > 0 :
2945 2931 #Covariance Matrix
2946 2932 D = numpy.diag(d**2)
2947 2933 ind = 0
2948 2934 for pairs in listComb:
2949 2935 #Coordinates in Covariance Matrix
2950 2936 x = pairs[0]
2951 2937 y = pairs[1]
2952 2938 #Channel Index
2953 2939 S12 = dataCross[ind,:,h]
2954 2940 D12 = numpy.diag(S12)
2955 2941 #Completing Covariance Matrix with Cross Spectras
2956 2942 D[x*N:(x+1)*N,y*N:(y+1)*N] = D12
2957 2943 D[y*N:(y+1)*N,x*N:(x+1)*N] = D12
2958 2944 ind += 1
2959 2945 diagD = numpy.zeros(256)
2960 2946 if h == 17 :
2961 2947 for ii in range(256): diagD[ii] = D[ii,ii]
2962 2948 #Dinv=numpy.linalg.inv(D)
2963 2949 #L=numpy.linalg.cholesky(Dinv)
2964 2950 try:
2965 2951 Dinv=numpy.linalg.inv(D)
2966 2952 L=numpy.linalg.cholesky(Dinv)
2967 2953 except:
2968 2954 Dinv = D*numpy.nan
2969 2955 L= D*numpy.nan
2970 2956 LT=L.T
2971 2957
2972 2958 dp = numpy.dot(LT,d)
2973 2959
2974 2960 #Initial values
2975 2961 data_spc = dataOut.data_spc[coord,:,h]
2976 2962
2977 2963 if (h>0)and(error1[3]<5):
2978 2964 p0 = dataOut.data_param[i,:,h-1]
2979 2965 else:
2980 2966 p0 = numpy.array(self.library.initialValuesFunction(data_spc, constants))# sin el i(data_spc, constants, i)
2981 2967 try:
2982 2968 #Least Squares
2983 2969 #print (dp,LT,constants)
2984 2970 #value =self.__residFunction(p0,dp,LT,constants)
2985 2971 #print ("valueREADY",value.shape, type(value))
2986 2972 #optimize.leastsq(value)
2987 2973 minp,covp,infodict,mesg,ier = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants),full_output=True)
2988 2974 #minp,covp = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants))
2989 2975 #Chi square error
2990 2976 #print(minp,covp.infodict,mesg,ier)
2991 2977 #print("REALIZA OPTIMIZ")
2992 2978 error0 = numpy.sum(infodict['fvec']**2)/(2*N)
2993 2979 #Error with Jacobian
2994 2980 error1 = self.library.errorFunction(minp,constants,LT)
2995 2981 # print self.__residFunction(p0,dp,LT, constants)
2996 2982 # print infodict['fvec']
2997 2983 # print self.__residFunction(minp,dp,LT,constants)
2998 2984
2999 2985 except:
3000 2986 minp = p0*numpy.nan
3001 2987 error0 = numpy.nan
3002 2988 error1 = p0*numpy.nan
3003 2989 #print ("EXCEPT 0000000000")
3004 2990 # s_sq = (self.__residFunction(minp,dp,LT,constants)).sum()/(len(dp)-len(p0))
3005 2991 # covp = covp*s_sq
3006 2992 # #print("TRY___________________________________________1")
3007 2993 # error = []
3008 2994 # for ip in range(len(minp)):
3009 2995 # try:
3010 2996 # error.append(numpy.absolute(covp[ip][ip])**0.5)
3011 2997 # except:
3012 2998 # error.append( 0.00 )
3013 2999 else :
3014 3000 data_spc = dataOut.data_spc[coord,:,h]
3015 3001 p0 = numpy.array(self.library.initialValuesFunction(data_spc, constants))
3016 3002 minp = p0*numpy.nan
3017 3003 error0 = numpy.nan
3018 3004 error1 = p0*numpy.nan
3019 3005 #Save
3020 3006 if dataOut.data_param is None:
3021 3007 dataOut.data_param = numpy.zeros((nGroups, p0.size, nHeights))*numpy.nan
3022 3008 dataOut.data_error = numpy.zeros((nGroups, p0.size + 1, nHeights))*numpy.nan
3023 3009
3024 3010 dataOut.data_error[i,:,h] = numpy.hstack((error0,error1))
3025 3011 dataOut.data_param[i,:,h] = minp
3026 3012
3027 3013 for ht in range(nHeights-1) :
3028 3014 smooth = coh_num_aver[i+1,ht] #datc[0,ht,0,beam]
3029 3015 dataOut.data_paramC[4*i,ht,1] = smooth
3030 3016 signalpn0 = (coh_spectra[i*2 ,1:(nProf-0),ht])/smooth #coh_spectra
3031 3017 signalpn1 = (coh_spectra[i*2+1,1:(nProf-0),ht])/smooth
3032 3018
3033 3019 #val0 = WHERE(signalpn0 > 0,cval0)
3034 3020 val0 = (signalpn0 > 0).nonzero()
3035 3021 val0 = val0[0]
3036 3022 #print('hvalid:',hvalid)
3037 3023 #print('valid', valid)
3038 3024 if len(val0) == 0 : val0_npoints = nProf
3039 3025 else : val0_npoints = len(val0)
3040 3026
3041 3027 #val1 = WHERE(signalpn1 > 0,cval1)
3042 3028 val1 = (signalpn1 > 0).nonzero()
3043 3029 val1 = val1[0]
3044 3030 if len(val1) == 0 : val1_npoints = nProf
3045 3031 else : val1_npoints = len(val1)
3046 3032
3047 3033 dataOut.data_paramC[0+4*i,ht,0] = numpy.sum((signalpn0/val0_npoints))/n0
3048 3034 dataOut.data_paramC[1+4*i,ht,0] = numpy.sum((signalpn1/val1_npoints))/n1
3049 3035
3050 3036 signal0 = (signalpn0-n0) # > 0
3051 3037 vali = (signal0 < 0).nonzero()
3052 3038 vali = vali[0]
3053 3039 if len(vali) > 0 : signal0[vali] = 0
3054 3040 signal1 = (signalpn1-n1) #> 0
3055 3041 vali = (signal1 < 0).nonzero()
3056 3042 vali = vali[0]
3057 3043 if len(vali) > 0 : signal1[vali] = 0
3058 3044 snr0 = numpy.sum(signal0/n0)/(nProf-1)
3059 3045 snr1 = numpy.sum(signal1/n1)/(nProf-1)
3060 3046 doppler = absc[1:]
3061 3047 if snr0 >= snrth and snr1 >= snrth and smooth :
3062 3048 signalpn0_n0 = signalpn0
3063 3049 signalpn0_n0[val0] = signalpn0[val0] - n0
3064 3050 mom0 = self.moments(doppler,signalpn0-n0,nProf)
3065 3051 # sigtmp= numpy.transpose(numpy.tile(signalpn0, [4,1]))
3066 3052 # momt= self.__calculateMoments( sigtmp, doppler , n0 )
3067 3053 signalpn1_n1 = signalpn1
3068 3054 signalpn1_n1[val1] = signalpn1[val1] - n1
3069 3055 mom1 = self.moments(doppler,signalpn1_n1,nProf)
3070 3056 dataOut.data_paramC[2+4*i,ht,0] = (mom0[0]+mom1[0])/2.
3071 3057 dataOut.data_paramC[3+4*i,ht,0] = (mom0[1]+mom1[1])/2.
3072 3058 # if graph == 1 :
3073 3059 # window, 13
3074 3060 # plot,doppler,signalpn0
3075 3061 # oplot,doppler,signalpn1,linest=1
3076 3062 # oplot,mom0(0)*doppler/doppler,signalpn0
3077 3063 # oplot,mom1(0)*doppler/doppler,signalpn1
3078 3064 # print,interval/12.,beam,45+ht*15,snr0,snr1,mom0(0),mom1(0),mom0(1),mom1(1)
3079 3065 #ENDIF
3080 3066 #ENDIF
3081 3067 #ENDFOR End height
3082 3068
3083 3069 dataOut.data_spc = jspectra
3084 3070 if getSNR:
3085 3071 listChannels = groupArray.reshape((groupArray.size))
3086 3072 listChannels.sort()
3087 3073
3088 3074 dataOut.data_snr = self.__getSNR(dataOut.data_spc[listChannels,:,:], my_noises[listChannels])
3089 3075 return dataOut
3090 3076
3091 3077 def __residFunction(self, p, dp, LT, constants):
3092 3078
3093 3079 fm = self.library.modelFunction(p, constants)
3094 3080 fmp=numpy.dot(LT,fm)
3095 3081 return dp-fmp
3096 3082
3097 3083 def __getSNR(self, z, noise):
3098 3084
3099 3085 avg = numpy.average(z, axis=1)
3100 3086 SNR = (avg.T-noise)/noise
3101 3087 SNR = SNR.T
3102 3088 return SNR
3103 3089
3104 3090 def __chisq(self, p, chindex, hindex):
3105 3091 #similar to Resid but calculates CHI**2
3106 3092 [LT,d,fm]=setupLTdfm(p,chindex,hindex)
3107 3093 dp=numpy.dot(LT,d)
3108 3094 fmp=numpy.dot(LT,fm)
3109 3095 chisq=numpy.dot((dp-fmp).T,(dp-fmp))
3110 3096 return chisq
3111 3097
3112 3098 class WindProfiler(Operation):
3113 3099
3114 3100 __isConfig = False
3115 3101
3116 3102 __initime = None
3117 3103 __lastdatatime = None
3118 3104 __integrationtime = None
3119 3105
3120 3106 __buffer = None
3121 3107
3122 3108 __dataReady = False
3123 3109
3124 3110 __firstdata = None
3125 3111
3126 3112 n = None
3127 3113
3128 3114 def __init__(self):
3129 3115 Operation.__init__(self)
3130 3116
3131 3117 def __calculateCosDir(self, elev, azim):
3132 3118 zen = (90 - elev)*numpy.pi/180
3133 3119 azim = azim*numpy.pi/180
3134 3120 cosDirX = numpy.sqrt((1-numpy.cos(zen)**2)/((1+numpy.tan(azim)**2)))
3135 3121 cosDirY = numpy.sqrt(1-numpy.cos(zen)**2-cosDirX**2)
3136 3122
3137 3123 signX = numpy.sign(numpy.cos(azim))
3138 3124 signY = numpy.sign(numpy.sin(azim))
3139 3125
3140 3126 cosDirX = numpy.copysign(cosDirX, signX)
3141 3127 cosDirY = numpy.copysign(cosDirY, signY)
3142 3128 return cosDirX, cosDirY
3143 3129
3144 3130 def __calculateAngles(self, theta_x, theta_y, azimuth):
3145 3131
3146 3132 dir_cosw = numpy.sqrt(1-theta_x**2-theta_y**2)
3147 3133 zenith_arr = numpy.arccos(dir_cosw)
3148 3134 azimuth_arr = numpy.arctan2(theta_x,theta_y) + azimuth*math.pi/180
3149 3135
3150 3136 dir_cosu = numpy.sin(azimuth_arr)*numpy.sin(zenith_arr)
3151 3137 dir_cosv = numpy.cos(azimuth_arr)*numpy.sin(zenith_arr)
3152 3138
3153 3139 return azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw
3154 3140
3155 3141 def __calculateMatA(self, dir_cosu, dir_cosv, dir_cosw, horOnly):
3156 3142
3157 3143 if horOnly:
3158 3144 A = numpy.c_[dir_cosu,dir_cosv]
3159 3145 else:
3160 3146 A = numpy.c_[dir_cosu,dir_cosv,dir_cosw]
3161 3147 A = numpy.asmatrix(A)
3162 3148 A1 = numpy.linalg.inv(A.transpose()*A)*A.transpose()
3163 3149
3164 3150 return A1
3165 3151
3166 3152 def __correctValues(self, heiRang, phi, velRadial, SNR):
3167 3153 listPhi = phi.tolist()
3168 3154 maxid = listPhi.index(max(listPhi))
3169 3155 minid = listPhi.index(min(listPhi))
3170 3156
3171 3157 rango = list(range(len(phi)))
3172 3158 # rango = numpy.delete(rango,maxid)
3173 3159
3174 3160 heiRang1 = heiRang*math.cos(phi[maxid])
3175 3161 heiRangAux = heiRang*math.cos(phi[minid])
3176 3162 indOut = (heiRang1 < heiRangAux[0]).nonzero()
3177 3163 heiRang1 = numpy.delete(heiRang1,indOut)
3178 3164
3179 3165 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
3180 3166 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
3181 3167
3182 3168 for i in rango:
3183 3169 x = heiRang*math.cos(phi[i])
3184 3170 y1 = velRadial[i,:]
3185 3171 f1 = interpolate.interp1d(x,y1,kind = 'cubic')
3186 3172
3187 3173 x1 = heiRang1
3188 3174 y11 = f1(x1)
3189 3175
3190 3176 y2 = SNR[i,:]
3191 3177 f2 = interpolate.interp1d(x,y2,kind = 'cubic')
3192 3178 y21 = f2(x1)
3193 3179
3194 3180 velRadial1[i,:] = y11
3195 3181 SNR1[i,:] = y21
3196 3182
3197 3183 return heiRang1, velRadial1, SNR1
3198 3184
3199 3185 def __calculateVelUVW(self, A, velRadial):
3200 3186
3201 3187 #Operacion Matricial
3202 3188 # velUVW = numpy.zeros((velRadial.shape[1],3))
3203 3189 # for ind in range(velRadial.shape[1]):
3204 3190 # velUVW[ind,:] = numpy.dot(A,velRadial[:,ind])
3205 3191 # velUVW = velUVW.transpose()
3206 3192 velUVW = numpy.zeros((A.shape[0],velRadial.shape[1]))
3207 3193 velUVW[:,:] = numpy.dot(A,velRadial)
3208 3194
3209 3195
3210 3196 return velUVW
3211 3197
3212 3198 # def techniqueDBS(self, velRadial0, dirCosx, disrCosy, azimuth, correct, horizontalOnly, heiRang, SNR0):
3213 3199
3214 3200 def techniqueDBS(self, kwargs):
3215 3201 """
3216 3202 Function that implements Doppler Beam Swinging (DBS) technique.
3217 3203
3218 3204 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
3219 3205 Direction correction (if necessary), Ranges and SNR
3220 3206
3221 3207 Output: Winds estimation (Zonal, Meridional and Vertical)
3222 3208
3223 3209 Parameters affected: Winds, height range, SNR
3224 3210 """
3225 3211 velRadial0 = kwargs['velRadial']
3226 3212 heiRang = kwargs['heightList']
3227 3213 SNR0 = kwargs['SNR']
3228 3214
3229 3215 if 'dirCosx' in kwargs and 'dirCosy' in kwargs:
3230 3216 theta_x = numpy.array(kwargs['dirCosx'])
3231 3217 theta_y = numpy.array(kwargs['dirCosy'])
3232 3218 else:
3233 3219 elev = numpy.array(kwargs['elevation'])
3234 3220 azim = numpy.array(kwargs['azimuth'])
3235 3221 theta_x, theta_y = self.__calculateCosDir(elev, azim)
3236 3222 azimuth = kwargs['correctAzimuth']
3237 3223 if 'horizontalOnly' in kwargs:
3238 3224 horizontalOnly = kwargs['horizontalOnly']
3239 3225 else: horizontalOnly = False
3240 3226 if 'correctFactor' in kwargs:
3241 3227 correctFactor = kwargs['correctFactor']
3242 3228 else: correctFactor = 1
3243 3229 if 'channelList' in kwargs:
3244 3230 channelList = kwargs['channelList']
3245 3231 if len(channelList) == 2:
3246 3232 horizontalOnly = True
3247 3233 arrayChannel = numpy.array(channelList)
3248 3234 param = param[arrayChannel,:,:]
3249 3235 theta_x = theta_x[arrayChannel]
3250 3236 theta_y = theta_y[arrayChannel]
3251 3237
3252 3238 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
3253 3239 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, zenith_arr, correctFactor*velRadial0, SNR0)
3254 3240 A = self.__calculateMatA(dir_cosu, dir_cosv, dir_cosw, horizontalOnly)
3255 3241
3256 3242 #Calculo de Componentes de la velocidad con DBS
3257 3243 winds = self.__calculateVelUVW(A,velRadial1)
3258 3244
3259 3245 return winds, heiRang1, SNR1
3260 3246
3261 3247 def __calculateDistance(self, posx, posy, pairs_ccf, azimuth = None):
3262 3248
3263 3249 nPairs = len(pairs_ccf)
3264 3250 posx = numpy.asarray(posx)
3265 3251 posy = numpy.asarray(posy)
3266 3252
3267 3253 #Rotacion Inversa para alinear con el azimuth
3268 3254 if azimuth!= None:
3269 3255 azimuth = azimuth*math.pi/180
3270 3256 posx1 = posx*math.cos(azimuth) + posy*math.sin(azimuth)
3271 3257 posy1 = -posx*math.sin(azimuth) + posy*math.cos(azimuth)
3272 3258 else:
3273 3259 posx1 = posx
3274 3260 posy1 = posy
3275 3261
3276 3262 #Calculo de Distancias
3277 3263 distx = numpy.zeros(nPairs)
3278 3264 disty = numpy.zeros(nPairs)
3279 3265 dist = numpy.zeros(nPairs)
3280 3266 ang = numpy.zeros(nPairs)
3281 3267
3282 3268 for i in range(nPairs):
3283 3269 distx[i] = posx1[pairs_ccf[i][1]] - posx1[pairs_ccf[i][0]]
3284 3270 disty[i] = posy1[pairs_ccf[i][1]] - posy1[pairs_ccf[i][0]]
3285 3271 dist[i] = numpy.sqrt(distx[i]**2 + disty[i]**2)
3286 3272 ang[i] = numpy.arctan2(disty[i],distx[i])
3287 3273
3288 3274 return distx, disty, dist, ang
3289 3275 #Calculo de Matrices
3290 3276 # nPairs = len(pairs)
3291 3277 # ang1 = numpy.zeros((nPairs, 2, 1))
3292 3278 # dist1 = numpy.zeros((nPairs, 2, 1))
3293 3279 #
3294 3280 # for j in range(nPairs):
3295 3281 # dist1[j,0,0] = dist[pairs[j][0]]
3296 3282 # dist1[j,1,0] = dist[pairs[j][1]]
3297 3283 # ang1[j,0,0] = ang[pairs[j][0]]
3298 3284 # ang1[j,1,0] = ang[pairs[j][1]]
3299 3285 #
3300 3286 # return distx,disty, dist1,ang1
3301 3287
3302 3288
3303 3289 def __calculateVelVer(self, phase, lagTRange, _lambda):
3304 3290
3305 3291 Ts = lagTRange[1] - lagTRange[0]
3306 3292 velW = -_lambda*phase/(4*math.pi*Ts)
3307 3293
3308 3294 return velW
3309 3295
3310 3296 def __calculateVelHorDir(self, dist, tau1, tau2, ang):
3311 3297 nPairs = tau1.shape[0]
3312 3298 nHeights = tau1.shape[1]
3313 3299 vel = numpy.zeros((nPairs,3,nHeights))
3314 3300 dist1 = numpy.reshape(dist, (dist.size,1))
3315 3301
3316 3302 angCos = numpy.cos(ang)
3317 3303 angSin = numpy.sin(ang)
3318 3304
3319 3305 vel0 = dist1*tau1/(2*tau2**2)
3320 3306 vel[:,0,:] = (vel0*angCos).sum(axis = 1)
3321 3307 vel[:,1,:] = (vel0*angSin).sum(axis = 1)
3322 3308
3323 3309 ind = numpy.where(numpy.isinf(vel))
3324 3310 vel[ind] = numpy.nan
3325 3311
3326 3312 return vel
3327 3313
3328 3314 # def __getPairsAutoCorr(self, pairsList, nChannels):
3329 3315 #
3330 3316 # pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan
3331 3317 #
3332 3318 # for l in range(len(pairsList)):
3333 3319 # firstChannel = pairsList[l][0]
3334 3320 # secondChannel = pairsList[l][1]
3335 3321 #
3336 3322 # #Obteniendo pares de Autocorrelacion
3337 3323 # if firstChannel == secondChannel:
3338 3324 # pairsAutoCorr[firstChannel] = int(l)
3339 3325 #
3340 3326 # pairsAutoCorr = pairsAutoCorr.astype(int)
3341 3327 #
3342 3328 # pairsCrossCorr = range(len(pairsList))
3343 3329 # pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr)
3344 3330 #
3345 3331 # return pairsAutoCorr, pairsCrossCorr
3346 3332
3347 3333 # def techniqueSA(self, pairsSelected, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, lagTRange, correctFactor):
3348 3334 def techniqueSA(self, kwargs):
3349 3335
3350 3336 """
3351 3337 Function that implements Spaced Antenna (SA) technique.
3352 3338
3353 3339 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
3354 3340 Direction correction (if necessary), Ranges and SNR
3355 3341
3356 3342 Output: Winds estimation (Zonal, Meridional and Vertical)
3357 3343
3358 3344 Parameters affected: Winds
3359 3345 """
3360 3346 position_x = kwargs['positionX']
3361 3347 position_y = kwargs['positionY']
3362 3348 azimuth = kwargs['azimuth']
3363 3349
3364 3350 if 'correctFactor' in kwargs:
3365 3351 correctFactor = kwargs['correctFactor']
3366 3352 else:
3367 3353 correctFactor = 1
3368 3354
3369 3355 groupList = kwargs['groupList']
3370 3356 pairs_ccf = groupList[1]
3371 3357 tau = kwargs['tau']
3372 3358 _lambda = kwargs['_lambda']
3373 3359
3374 3360 #Cross Correlation pairs obtained
3375 3361 # pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairssList, nChannels)
3376 3362 # pairsArray = numpy.array(pairsList)[pairsCrossCorr]
3377 3363 # pairsSelArray = numpy.array(pairsSelected)
3378 3364 # pairs = []
3379 3365 #
3380 3366 # #Wind estimation pairs obtained
3381 3367 # for i in range(pairsSelArray.shape[0]/2):
3382 3368 # ind1 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i], axis = 1))[0][0]
3383 3369 # ind2 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i + 1], axis = 1))[0][0]
3384 3370 # pairs.append((ind1,ind2))
3385 3371
3386 3372 indtau = tau.shape[0]/2
3387 3373 tau1 = tau[:indtau,:]
3388 3374 tau2 = tau[indtau:-1,:]
3389 3375 # tau1 = tau1[pairs,:]
3390 3376 # tau2 = tau2[pairs,:]
3391 3377 phase1 = tau[-1,:]
3392 3378
3393 3379 #---------------------------------------------------------------------
3394 3380 #Metodo Directo
3395 3381 distx, disty, dist, ang = self.__calculateDistance(position_x, position_y, pairs_ccf,azimuth)
3396 3382 winds = self.__calculateVelHorDir(dist, tau1, tau2, ang)
3397 3383 winds = stats.nanmean(winds, axis=0)
3398 3384 #---------------------------------------------------------------------
3399 3385 #Metodo General
3400 3386 # distx, disty, dist = self.calculateDistance(position_x,position_y,pairsCrossCorr, pairsList, azimuth)
3401 3387 # #Calculo Coeficientes de Funcion de Correlacion
3402 3388 # F,G,A,B,H = self.calculateCoef(tau1,tau2,distx,disty,n)
3403 3389 # #Calculo de Velocidades
3404 3390 # winds = self.calculateVelUV(F,G,A,B,H)
3405 3391
3406 3392 #---------------------------------------------------------------------
3407 3393 winds[2,:] = self.__calculateVelVer(phase1, lagTRange, _lambda)
3408 3394 winds = correctFactor*winds
3409 3395 return winds
3410 3396
3411 3397 def __checkTime(self, currentTime, paramInterval, outputInterval):
3412 3398
3413 3399 dataTime = currentTime + paramInterval
3414 3400 deltaTime = dataTime - self.__initime
3415 3401
3416 3402 if deltaTime >= outputInterval or deltaTime < 0:
3417 3403 self.__dataReady = True
3418 3404 return
3419 3405
3420 3406 def techniqueMeteors(self, arrayMeteor, meteorThresh, heightMin, heightMax):
3421 3407 '''
3422 3408 Function that implements winds estimation technique with detected meteors.
3423 3409
3424 3410 Input: Detected meteors, Minimum meteor quantity to wind estimation
3425 3411
3426 3412 Output: Winds estimation (Zonal and Meridional)
3427 3413
3428 3414 Parameters affected: Winds
3429 3415 '''
3430 3416 #Settings
3431 3417 nInt = (heightMax - heightMin)/2
3432 3418 nInt = int(nInt)
3433 3419 winds = numpy.zeros((2,nInt))*numpy.nan
3434 3420
3435 3421 #Filter errors
3436 3422 error = numpy.where(arrayMeteor[:,-1] == 0)[0]
3437 3423 finalMeteor = arrayMeteor[error,:]
3438 3424
3439 3425 #Meteor Histogram
3440 3426 finalHeights = finalMeteor[:,2]
3441 3427 hist = numpy.histogram(finalHeights, bins = nInt, range = (heightMin,heightMax))
3442 3428 nMeteorsPerI = hist[0]
3443 3429 heightPerI = hist[1]
3444 3430
3445 3431 #Sort of meteors
3446 3432 indSort = finalHeights.argsort()
3447 3433 finalMeteor2 = finalMeteor[indSort,:]
3448 3434
3449 3435 # Calculating winds
3450 3436 ind1 = 0
3451 3437 ind2 = 0
3452 3438
3453 3439 for i in range(nInt):
3454 3440 nMet = nMeteorsPerI[i]
3455 3441 ind1 = ind2
3456 3442 ind2 = ind1 + nMet
3457 3443
3458 3444 meteorAux = finalMeteor2[ind1:ind2,:]
3459 3445
3460 3446 if meteorAux.shape[0] >= meteorThresh:
3461 3447 vel = meteorAux[:, 6]
3462 3448 zen = meteorAux[:, 4]*numpy.pi/180
3463 3449 azim = meteorAux[:, 3]*numpy.pi/180
3464 3450
3465 3451 n = numpy.cos(zen)
3466 3452 # m = (1 - n**2)/(1 - numpy.tan(azim)**2)
3467 3453 # l = m*numpy.tan(azim)
3468 3454 l = numpy.sin(zen)*numpy.sin(azim)
3469 3455 m = numpy.sin(zen)*numpy.cos(azim)
3470 3456
3471 3457 A = numpy.vstack((l, m)).transpose()
3472 3458 A1 = numpy.dot(numpy.linalg.inv( numpy.dot(A.transpose(),A) ),A.transpose())
3473 3459 windsAux = numpy.dot(A1, vel)
3474 3460
3475 3461 winds[0,i] = windsAux[0]
3476 3462 winds[1,i] = windsAux[1]
3477 3463
3478 3464 return winds, heightPerI[:-1]
3479 3465
3480 3466 def techniqueNSM_SA(self, **kwargs):
3481 3467 metArray = kwargs['metArray']
3482 3468 heightList = kwargs['heightList']
3483 3469 timeList = kwargs['timeList']
3484 3470
3485 3471 rx_location = kwargs['rx_location']
3486 3472 groupList = kwargs['groupList']
3487 3473 azimuth = kwargs['azimuth']
3488 3474 dfactor = kwargs['dfactor']
3489 3475 k = kwargs['k']
3490 3476
3491 3477 azimuth1, dist = self.__calculateAzimuth1(rx_location, groupList, azimuth)
3492 3478 d = dist*dfactor
3493 3479 #Phase calculation
3494 3480 metArray1 = self.__getPhaseSlope(metArray, heightList, timeList)
3495 3481
3496 3482 metArray1[:,-2] = metArray1[:,-2]*metArray1[:,2]*1000/(k*d[metArray1[:,1].astype(int)]) #angles into velocities
3497 3483
3498 3484 velEst = numpy.zeros((heightList.size,2))*numpy.nan
3499 3485 azimuth1 = azimuth1*numpy.pi/180
3500 3486
3501 3487 for i in range(heightList.size):
3502 3488 h = heightList[i]
3503 3489 indH = numpy.where((metArray1[:,2] == h)&(numpy.abs(metArray1[:,-2]) < 100))[0]
3504 3490 metHeight = metArray1[indH,:]
3505 3491 if metHeight.shape[0] >= 2:
3506 3492 velAux = numpy.asmatrix(metHeight[:,-2]).T #Radial Velocities
3507 3493 iazim = metHeight[:,1].astype(int)
3508 3494 azimAux = numpy.asmatrix(azimuth1[iazim]).T #Azimuths
3509 3495 A = numpy.hstack((numpy.cos(azimAux),numpy.sin(azimAux)))
3510 3496 A = numpy.asmatrix(A)
3511 3497 A1 = numpy.linalg.pinv(A.transpose()*A)*A.transpose()
3512 3498 velHor = numpy.dot(A1,velAux)
3513 3499
3514 3500 velEst[i,:] = numpy.squeeze(velHor)
3515 3501 return velEst
3516 3502
3517 3503 def __getPhaseSlope(self, metArray, heightList, timeList):
3518 3504 meteorList = []
3519 3505 #utctime sec1 height SNR velRad ph0 ph1 ph2 coh0 coh1 coh2
3520 3506 #Putting back together the meteor matrix
3521 3507 utctime = metArray[:,0]
3522 3508 uniqueTime = numpy.unique(utctime)
3523 3509
3524 3510 phaseDerThresh = 0.5
3525 3511 ippSeconds = timeList[1] - timeList[0]
3526 3512 sec = numpy.where(timeList>1)[0][0]
3527 3513 nPairs = metArray.shape[1] - 6
3528 3514 nHeights = len(heightList)
3529 3515
3530 3516 for t in uniqueTime:
3531 3517 metArray1 = metArray[utctime==t,:]
3532 3518 # phaseDerThresh = numpy.pi/4 #reducir Phase thresh
3533 3519 tmet = metArray1[:,1].astype(int)
3534 3520 hmet = metArray1[:,2].astype(int)
3535 3521
3536 3522 metPhase = numpy.zeros((nPairs, heightList.size, timeList.size - 1))
3537 3523 metPhase[:,:] = numpy.nan
3538 3524 metPhase[:,hmet,tmet] = metArray1[:,6:].T
3539 3525
3540 3526 #Delete short trails
3541 3527 metBool = ~numpy.isnan(metPhase[0,:,:])
3542 3528 heightVect = numpy.sum(metBool, axis = 1)
3543 3529 metBool[heightVect<sec,:] = False
3544 3530 metPhase[:,heightVect<sec,:] = numpy.nan
3545 3531
3546 3532 #Derivative
3547 3533 metDer = numpy.abs(metPhase[:,:,1:] - metPhase[:,:,:-1])
3548 3534 phDerAux = numpy.dstack((numpy.full((nPairs,nHeights,1), False, dtype=bool),metDer > phaseDerThresh))
3549 3535 metPhase[phDerAux] = numpy.nan
3550 3536
3551 3537 #--------------------------METEOR DETECTION -----------------------------------------
3552 3538 indMet = numpy.where(numpy.any(metBool,axis=1))[0]
3553 3539
3554 3540 for p in numpy.arange(nPairs):
3555 3541 phase = metPhase[p,:,:]
3556 3542 phDer = metDer[p,:,:]
3557 3543
3558 3544 for h in indMet:
3559 3545 height = heightList[h]
3560 3546 phase1 = phase[h,:] #82
3561 3547 phDer1 = phDer[h,:]
3562 3548
3563 3549 phase1[~numpy.isnan(phase1)] = numpy.unwrap(phase1[~numpy.isnan(phase1)]) #Unwrap
3564 3550
3565 3551 indValid = numpy.where(~numpy.isnan(phase1))[0]
3566 3552 initMet = indValid[0]
3567 3553 endMet = 0
3568 3554
3569 3555 for i in range(len(indValid)-1):
3570 3556
3571 3557 #Time difference
3572 3558 inow = indValid[i]
3573 3559 inext = indValid[i+1]
3574 3560 idiff = inext - inow
3575 3561 #Phase difference
3576 3562 phDiff = numpy.abs(phase1[inext] - phase1[inow])
3577 3563
3578 3564 if idiff>sec or phDiff>numpy.pi/4 or inext==indValid[-1]: #End of Meteor
3579 3565 sizeTrail = inow - initMet + 1
3580 3566 if sizeTrail>3*sec: #Too short meteors
3581 3567 x = numpy.arange(initMet,inow+1)*ippSeconds
3582 3568 y = phase1[initMet:inow+1]
3583 3569 ynnan = ~numpy.isnan(y)
3584 3570 x = x[ynnan]
3585 3571 y = y[ynnan]
3586 3572 slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
3587 3573 ylin = x*slope + intercept
3588 3574 rsq = r_value**2
3589 3575 if rsq > 0.5:
3590 3576 vel = slope#*height*1000/(k*d)
3591 3577 estAux = numpy.array([utctime,p,height, vel, rsq])
3592 3578 meteorList.append(estAux)
3593 3579 initMet = inext
3594 3580 metArray2 = numpy.array(meteorList)
3595 3581
3596 3582 return metArray2
3597 3583
3598 3584 def __calculateAzimuth1(self, rx_location, pairslist, azimuth0):
3599 3585
3600 3586 azimuth1 = numpy.zeros(len(pairslist))
3601 3587 dist = numpy.zeros(len(pairslist))
3602 3588
3603 3589 for i in range(len(rx_location)):
3604 3590 ch0 = pairslist[i][0]
3605 3591 ch1 = pairslist[i][1]
3606 3592
3607 3593 diffX = rx_location[ch0][0] - rx_location[ch1][0]
3608 3594 diffY = rx_location[ch0][1] - rx_location[ch1][1]
3609 3595 azimuth1[i] = numpy.arctan2(diffY,diffX)*180/numpy.pi
3610 3596 dist[i] = numpy.sqrt(diffX**2 + diffY**2)
3611 3597
3612 3598 azimuth1 -= azimuth0
3613 3599 return azimuth1, dist
3614 3600
3615 3601 def techniqueNSM_DBS(self, **kwargs):
3616 3602 metArray = kwargs['metArray']
3617 3603 heightList = kwargs['heightList']
3618 3604 timeList = kwargs['timeList']
3619 3605 azimuth = kwargs['azimuth']
3620 3606 theta_x = numpy.array(kwargs['theta_x'])
3621 3607 theta_y = numpy.array(kwargs['theta_y'])
3622 3608
3623 3609 utctime = metArray[:,0]
3624 3610 cmet = metArray[:,1].astype(int)
3625 3611 hmet = metArray[:,3].astype(int)
3626 3612 SNRmet = metArray[:,4]
3627 3613 vmet = metArray[:,5]
3628 3614 spcmet = metArray[:,6]
3629 3615
3630 3616 nChan = numpy.max(cmet) + 1
3631 3617 nHeights = len(heightList)
3632 3618
3633 3619 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
3634 3620 hmet = heightList[hmet]
3635 3621 h1met = hmet*numpy.cos(zenith_arr[cmet]) #Corrected heights
3636 3622
3637 3623 velEst = numpy.zeros((heightList.size,2))*numpy.nan
3638 3624
3639 3625 for i in range(nHeights - 1):
3640 3626 hmin = heightList[i]
3641 3627 hmax = heightList[i + 1]
3642 3628
3643 3629 thisH = (h1met>=hmin) & (h1met<hmax) & (cmet!=2) & (SNRmet>8) & (vmet<50) & (spcmet<10)
3644 3630 indthisH = numpy.where(thisH)
3645 3631
3646 3632 if numpy.size(indthisH) > 3:
3647 3633
3648 3634 vel_aux = vmet[thisH]
3649 3635 chan_aux = cmet[thisH]
3650 3636 cosu_aux = dir_cosu[chan_aux]
3651 3637 cosv_aux = dir_cosv[chan_aux]
3652 3638 cosw_aux = dir_cosw[chan_aux]
3653 3639
3654 3640 nch = numpy.size(numpy.unique(chan_aux))
3655 3641 if nch > 1:
3656 3642 A = self.__calculateMatA(cosu_aux, cosv_aux, cosw_aux, True)
3657 3643 velEst[i,:] = numpy.dot(A,vel_aux)
3658 3644
3659 3645 return velEst
3660 3646
3661 3647 def run(self, dataOut, technique, nHours=1, hmin=70, hmax=110, **kwargs):
3662 3648
3663 3649 param = dataOut.data_param
3664 3650 if dataOut.abscissaList != None:
3665 3651 absc = dataOut.abscissaList[:-1]
3666 3652 # noise = dataOut.noise
3667 3653 heightList = dataOut.heightList
3668 3654 SNR = dataOut.data_snr
3669 3655
3670 3656 if technique == 'DBS':
3671 3657
3672 3658 kwargs['velRadial'] = param[:,1,:] #Radial velocity
3673 3659 kwargs['heightList'] = heightList
3674 3660 kwargs['SNR'] = SNR
3675 3661
3676 3662 dataOut.data_output, dataOut.heightList, dataOut.data_snr = self.techniqueDBS(kwargs) #DBS Function
3677 3663 dataOut.utctimeInit = dataOut.utctime
3678 3664 dataOut.outputInterval = dataOut.paramInterval
3679 3665
3680 3666 elif technique == 'SA':
3681 3667
3682 3668 #Parameters
3683 3669 # position_x = kwargs['positionX']
3684 3670 # position_y = kwargs['positionY']
3685 3671 # azimuth = kwargs['azimuth']
3686 3672 #
3687 3673 # if kwargs.has_key('crosspairsList'):
3688 3674 # pairs = kwargs['crosspairsList']
3689 3675 # else:
3690 3676 # pairs = None
3691 3677 #
3692 3678 # if kwargs.has_key('correctFactor'):
3693 3679 # correctFactor = kwargs['correctFactor']
3694 3680 # else:
3695 3681 # correctFactor = 1
3696 3682
3697 3683 # tau = dataOut.data_param
3698 3684 # _lambda = dataOut.C/dataOut.frequency
3699 3685 # pairsList = dataOut.groupList
3700 3686 # nChannels = dataOut.nChannels
3701 3687
3702 3688 kwargs['groupList'] = dataOut.groupList
3703 3689 kwargs['tau'] = dataOut.data_param
3704 3690 kwargs['_lambda'] = dataOut.C/dataOut.frequency
3705 3691 # dataOut.data_output = self.techniqueSA(pairs, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, absc, correctFactor)
3706 3692 dataOut.data_output = self.techniqueSA(kwargs)
3707 3693 dataOut.utctimeInit = dataOut.utctime
3708 3694 dataOut.outputInterval = dataOut.timeInterval
3709 3695
3710 3696 elif technique == 'Meteors':
3711 3697 dataOut.flagNoData = True
3712 3698 self.__dataReady = False
3713 3699
3714 3700 if 'nHours' in kwargs:
3715 3701 nHours = kwargs['nHours']
3716 3702 else:
3717 3703 nHours = 1
3718 3704
3719 3705 if 'meteorsPerBin' in kwargs:
3720 3706 meteorThresh = kwargs['meteorsPerBin']
3721 3707 else:
3722 3708 meteorThresh = 6
3723 3709
3724 3710 if 'hmin' in kwargs:
3725 3711 hmin = kwargs['hmin']
3726 3712 else: hmin = 70
3727 3713 if 'hmax' in kwargs:
3728 3714 hmax = kwargs['hmax']
3729 3715 else: hmax = 110
3730 3716
3731 3717 dataOut.outputInterval = nHours*3600
3732 3718
3733 3719 if self.__isConfig == False:
3734 3720 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
3735 3721 #Get Initial LTC time
3736 3722 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
3737 3723 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
3738 3724
3739 3725 self.__isConfig = True
3740 3726
3741 3727 if self.__buffer is None:
3742 3728 self.__buffer = dataOut.data_param
3743 3729 self.__firstdata = copy.copy(dataOut)
3744 3730
3745 3731 else:
3746 3732 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
3747 3733
3748 3734 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
3749 3735
3750 3736 if self.__dataReady:
3751 3737 dataOut.utctimeInit = self.__initime
3752 3738
3753 3739 self.__initime += dataOut.outputInterval #to erase time offset
3754 3740
3755 3741 dataOut.data_output, dataOut.heightList = self.techniqueMeteors(self.__buffer, meteorThresh, hmin, hmax)
3756 3742 dataOut.flagNoData = False
3757 3743 self.__buffer = None
3758 3744
3759 3745 elif technique == 'Meteors1':
3760 3746 dataOut.flagNoData = True
3761 3747 self.__dataReady = False
3762 3748
3763 3749 if 'nMins' in kwargs:
3764 3750 nMins = kwargs['nMins']
3765 3751 else: nMins = 20
3766 3752 if 'rx_location' in kwargs:
3767 3753 rx_location = kwargs['rx_location']
3768 3754 else: rx_location = [(0,1),(1,1),(1,0)]
3769 3755 if 'azimuth' in kwargs:
3770 3756 azimuth = kwargs['azimuth']
3771 3757 else: azimuth = 51.06
3772 3758 if 'dfactor' in kwargs:
3773 3759 dfactor = kwargs['dfactor']
3774 3760 if 'mode' in kwargs:
3775 3761 mode = kwargs['mode']
3776 3762 if 'theta_x' in kwargs:
3777 3763 theta_x = kwargs['theta_x']
3778 3764 if 'theta_y' in kwargs:
3779 3765 theta_y = kwargs['theta_y']
3780 3766 else: mode = 'SA'
3781 3767
3782 3768 #Borrar luego esto
3783 3769 if dataOut.groupList is None:
3784 3770 dataOut.groupList = [(0,1),(0,2),(1,2)]
3785 3771 groupList = dataOut.groupList
3786 3772 C = 3e8
3787 3773 freq = 50e6
3788 3774 lamb = C/freq
3789 3775 k = 2*numpy.pi/lamb
3790 3776
3791 3777 timeList = dataOut.abscissaList
3792 3778 heightList = dataOut.heightList
3793 3779
3794 3780 if self.__isConfig == False:
3795 3781 dataOut.outputInterval = nMins*60
3796 3782 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
3797 3783 #Get Initial LTC time
3798 3784 initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
3799 3785 minuteAux = initime.minute
3800 3786 minuteNew = int(numpy.floor(minuteAux/nMins)*nMins)
3801 3787 self.__initime = (initime.replace(minute = minuteNew, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
3802 3788
3803 3789 self.__isConfig = True
3804 3790
3805 3791 if self.__buffer is None:
3806 3792 self.__buffer = dataOut.data_param
3807 3793 self.__firstdata = copy.copy(dataOut)
3808 3794
3809 3795 else:
3810 3796 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
3811 3797
3812 3798 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
3813 3799
3814 3800 if self.__dataReady:
3815 3801 dataOut.utctimeInit = self.__initime
3816 3802 self.__initime += dataOut.outputInterval #to erase time offset
3817 3803
3818 3804 metArray = self.__buffer
3819 3805 if mode == 'SA':
3820 3806 dataOut.data_output = self.techniqueNSM_SA(rx_location=rx_location, groupList=groupList, azimuth=azimuth, dfactor=dfactor, k=k,metArray=metArray, heightList=heightList,timeList=timeList)
3821 3807 elif mode == 'DBS':
3822 3808 dataOut.data_output = self.techniqueNSM_DBS(metArray=metArray,heightList=heightList,timeList=timeList, azimuth=azimuth, theta_x=theta_x, theta_y=theta_y)
3823 3809 dataOut.data_output = dataOut.data_output.T
3824 3810 dataOut.flagNoData = False
3825 3811 self.__buffer = None
3826 3812
3827 3813 return
3828 3814
3829 3815 class EWDriftsEstimation(Operation):
3830 3816
3831 3817 def __init__(self):
3832 3818 Operation.__init__(self)
3833 3819
3834 3820 def __correctValues(self, heiRang, phi, velRadial, SNR):
3835 3821 listPhi = phi.tolist()
3836 3822 maxid = listPhi.index(max(listPhi))
3837 3823 minid = listPhi.index(min(listPhi))
3838 3824
3839 3825 rango = list(range(len(phi)))
3840 3826 # rango = numpy.delete(rango,maxid)
3841 3827
3842 3828 heiRang1 = heiRang*math.cos(phi[maxid])
3843 3829 heiRangAux = heiRang*math.cos(phi[minid])
3844 3830 indOut = (heiRang1 < heiRangAux[0]).nonzero()
3845 3831 heiRang1 = numpy.delete(heiRang1,indOut)
3846 3832
3847 3833 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
3848 3834 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
3849 3835
3850 3836 for i in rango:
3851 3837 x = heiRang*math.cos(phi[i])
3852 3838 y1 = velRadial[i,:]
3853 3839 vali= (numpy.isfinite(y1)==True).nonzero()
3854 3840 y1=y1[vali]
3855 3841 x = x[vali]
3856 3842 f1 = interpolate.interp1d(x,y1,kind = 'cubic',bounds_error=False)
3857 3843
3858 3844 #heiRang1 = x*math.cos(phi[maxid])
3859 3845 x1 = heiRang1
3860 3846 y11 = f1(x1)
3861 3847
3862 3848 y2 = SNR[i,:]
3863 3849 #print 'snr ', y2
3864 3850 x = heiRang*math.cos(phi[i])
3865 3851 vali= (y2 != -1).nonzero()
3866 3852 y2 = y2[vali]
3867 3853 x = x[vali]
3868 3854 #print 'snr ',y2
3869 3855 f2 = interpolate.interp1d(x,y2,kind = 'cubic',bounds_error=False)
3870 3856 y21 = f2(x1)
3871 3857
3872 3858 velRadial1[i,:] = y11
3873 3859 SNR1[i,:] = y21
3874 3860
3875 3861 return heiRang1, velRadial1, SNR1
3876 3862
3877 3863
3878 3864
3879 3865 def run(self, dataOut, zenith, zenithCorrection):
3880 3866
3881 3867 heiRang = dataOut.heightList
3882 3868 velRadial = dataOut.data_param[:,3,:]
3883 3869 velRadialm = dataOut.data_param[:,2:4,:]*-1
3884 3870
3885 3871 rbufc=dataOut.data_paramC[:,:,0]
3886 3872 ebufc=dataOut.data_paramC[:,:,1]
3887 3873 SNR = dataOut.data_snr
3888 3874 velRerr = dataOut.data_error[:,4,:]
3889 3875 moments=numpy.vstack(([velRadialm[0,:]],[velRadialm[0,:]],[velRadialm[1,:]],[velRadialm[1,:]]))
3890 3876 dataOut.moments=moments
3891 3877 # Coherent
3892 3878 smooth_wC = ebufc[0,:]
3893 3879 p_w0C = rbufc[0,:]
3894 3880 p_w1C = rbufc[1,:]
3895 3881 w_wC = rbufc[2,:]*-1 #*radial_sign(radial EQ 1)
3896 3882 t_wC = rbufc[3,:]
3897 3883 my_nbeams = 2
3898 3884
3899 3885 zenith = numpy.array(zenith)
3900 3886 zenith -= zenithCorrection
3901 3887 zenith *= numpy.pi/180
3902 3888 if zenithCorrection != 0 :
3903 3889 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, numpy.abs(zenith), velRadial, SNR)
3904 3890 else :
3905 3891 heiRang1 = heiRang
3906 3892 velRadial1 = velRadial
3907 3893 SNR1 = SNR
3908 3894
3909 3895 alp = zenith[0]
3910 3896 bet = zenith[1]
3911 3897
3912 3898 w_w = velRadial1[0,:]
3913 3899 w_e = velRadial1[1,:]
3914 3900 w_w_err = velRerr[0,:]
3915 3901 w_e_err = velRerr[1,:]
3916 3902
3917 3903 val = (numpy.isfinite(w_w)==False).nonzero()
3918 3904 val = val[0]
3919 3905 bad = val
3920 3906 if len(bad) > 0 :
3921 3907 w_w[bad] = w_wC[bad]
3922 3908 w_w_err[bad]= numpy.nan
3923 3909 if my_nbeams == 2:
3924 3910 smooth_eC=ebufc[4,:]
3925 3911 p_e0C = rbufc[4,:]
3926 3912 p_e1C = rbufc[5,:]
3927 3913 w_eC = rbufc[6,:]*-1
3928 3914 t_eC = rbufc[7,:]
3929 3915 val = (numpy.isfinite(w_e)==False).nonzero()
3930 3916 val = val[0]
3931 3917 bad = val
3932 3918 if len(bad) > 0 :
3933 3919 w_e[bad] = w_eC[bad]
3934 3920 w_e_err[bad]= numpy.nan
3935 3921
3936 3922 w = (w_w*numpy.sin(bet) - w_e*numpy.sin(alp))/(numpy.cos(alp)*numpy.sin(bet) - numpy.cos(bet)*numpy.sin(alp))
3937 3923 u = (w_w*numpy.cos(bet) - w_e*numpy.cos(alp))/(numpy.sin(alp)*numpy.cos(bet) - numpy.sin(bet)*numpy.cos(alp))
3938 3924
3939 3925 w_err = numpy.sqrt((w_w_err*numpy.sin(bet))**2.+(w_e_err*numpy.sin(alp))**2.)/ numpy.absolute(numpy.cos(alp)*numpy.sin(bet)-numpy.cos(bet)*numpy.sin(alp))
3940 3926 u_err = numpy.sqrt((w_w_err*numpy.cos(bet))**2.+(w_e_err*numpy.cos(alp))**2.)/ numpy.absolute(numpy.cos(alp)*numpy.sin(bet)-numpy.cos(bet)*numpy.sin(alp))
3941 3927
3942 3928 winds = numpy.vstack((w,u))
3943 3929
3944 3930 dataOut.heightList = heiRang1
3945 3931 dataOut.data_output = winds
3946 3932
3947 3933 snr1 = 10*numpy.log10(SNR1[0])
3948 3934 dataOut.data_snr1 = numpy.reshape(snr1,(1,snr1.shape[0]))
3949 3935 dataOut.utctimeInit = dataOut.utctime
3950 3936 dataOut.outputInterval = dataOut.timeInterval
3951 3937
3952 3938 hei_aver0 = 218
3953 3939 jrange = 450 #900 para HA drifts
3954 3940 deltah = 15.0 #dataOut.spacing(0)
3955 3941 h0 = 0.0 #dataOut.first_height(0)
3956 3942 heights = dataOut.heightList
3957 3943 nhei = len(heights)
3958 3944
3959 3945 range1 = numpy.arange(nhei) * deltah + h0
3960 3946
3961 3947 #jhei = WHERE(range1 GE hei_aver0 , jcount)
3962 3948 jhei = (range1 >= hei_aver0).nonzero()
3963 3949 if len(jhei[0]) > 0 :
3964 3950 h0_index = jhei[0][0] # Initial height for getting averages 218km
3965 3951
3966 3952 mynhei = 7
3967 3953 nhei_avg = int(jrange/deltah)
3968 3954 h_avgs = int(nhei_avg/mynhei)
3969 3955 nhei_avg = h_avgs*(mynhei-1)+mynhei
3970 3956
3971 3957 navgs = numpy.zeros(mynhei,dtype='float')
3972 3958 delta_h = numpy.zeros(mynhei,dtype='float')
3973 3959 range_aver = numpy.zeros(mynhei,dtype='float')
3974 3960 for ih in range( mynhei-1 ):
3975 3961 range_aver[ih] = numpy.sum(range1[h0_index+h_avgs*ih:h0_index+h_avgs*(ih+1)-0])/h_avgs
3976 3962 navgs[ih] = h_avgs
3977 3963 delta_h[ih] = deltah*h_avgs
3978 3964
3979 3965 range_aver[mynhei-1] = numpy.sum(range1[h0_index:h0_index+6*h_avgs-0])/(6*h_avgs)
3980 3966 navgs[mynhei-1] = 6*h_avgs
3981 3967 delta_h[mynhei-1] = deltah*6*h_avgs
3982 3968
3983 3969 wA = w[h0_index:h0_index+nhei_avg-0]
3984 3970 wA_err = w_err[h0_index:h0_index+nhei_avg-0]
3985 3971
3986 3972 for i in range(5) :
3987 3973 vals = wA[i*h_avgs:(i+1)*h_avgs-0]
3988 3974 errs = wA_err[i*h_avgs:(i+1)*h_avgs-0]
3989 3975 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2.)
3990 3976 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
3991 3977 wA[6*h_avgs+i] = avg
3992 3978 wA_err[6*h_avgs+i] = sigma
3993 3979
3994 3980
3995 3981 vals = wA[0:6*h_avgs-0]
3996 3982 errs=wA_err[0:6*h_avgs-0]
3997 3983 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2)
3998 3984 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
3999 3985 wA[nhei_avg-1] = avg
4000 3986 wA_err[nhei_avg-1] = sigma
4001 3987
4002 3988 wA = wA[6*h_avgs:nhei_avg-0]
4003 3989 wA_err=wA_err[6*h_avgs:nhei_avg-0]
4004 3990 if my_nbeams == 2 :
4005 3991
4006 3992 uA = u[h0_index:h0_index+nhei_avg]
4007 3993 uA_err=u_err[h0_index:h0_index+nhei_avg]
4008 3994
4009 3995 for i in range(5) :
4010 3996 vals = uA[i*h_avgs:(i+1)*h_avgs-0]
4011 3997 errs=uA_err[i*h_avgs:(i+1)*h_avgs-0]
4012 3998 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2.)
4013 3999 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
4014 4000 uA[6*h_avgs+i] = avg
4015 4001 uA_err[6*h_avgs+i]=sigma
4016 4002
4017 4003 vals = uA[0:6*h_avgs-0]
4018 4004 errs = uA_err[0:6*h_avgs-0]
4019 4005 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2.)
4020 4006 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
4021 4007 uA[nhei_avg-1] = avg
4022 4008 uA_err[nhei_avg-1] = sigma
4023 4009 uA = uA[6*h_avgs:nhei_avg-0]
4024 4010 uA_err = uA_err[6*h_avgs:nhei_avg-0]
4025 4011
4026 4012 dataOut.drifts_avg = numpy.vstack((wA,uA))
4027 4013
4028 4014 tini=time.localtime(dataOut.utctime)
4029 4015 datefile= str(tini[0]).zfill(4)+str(tini[1]).zfill(2)+str(tini[2]).zfill(2)
4030 4016 nfile = '/home/pcondor/Database/ewdriftsschain2019/jro'+datefile+'drifts_sch3.txt'
4031 4017
4032 4018 f1 = open(nfile,'a')
4033 4019
4034 4020 datedriftavg=str(tini[0])+' '+str(tini[1])+' '+str(tini[2])+' '+str(tini[3])+' '+str(tini[4])
4035 4021 driftavgstr=str(dataOut.drifts_avg)
4036 4022
4037 4023 numpy.savetxt(f1,numpy.column_stack([tini[0],tini[1],tini[2],tini[3],tini[4]]),fmt='%4i')
4038 4024 numpy.savetxt(f1,dataOut.drifts_avg,fmt='%10.2f')
4039 4025 f1.close()
4040 4026
4041 4027 return dataOut
4042 4028
4043 4029 #--------------- Non Specular Meteor ----------------
4044 4030
4045 4031 class NonSpecularMeteorDetection(Operation):
4046 4032
4047 4033 def run(self, dataOut, mode, SNRthresh=8, phaseDerThresh=0.5, cohThresh=0.8, allData = False):
4048 4034 data_acf = dataOut.data_pre[0]
4049 4035 data_ccf = dataOut.data_pre[1]
4050 4036 pairsList = dataOut.groupList[1]
4051 4037
4052 4038 lamb = dataOut.C/dataOut.frequency
4053 4039 tSamp = dataOut.ippSeconds*dataOut.nCohInt
4054 4040 paramInterval = dataOut.paramInterval
4055 4041
4056 4042 nChannels = data_acf.shape[0]
4057 4043 nLags = data_acf.shape[1]
4058 4044 nProfiles = data_acf.shape[2]
4059 4045 nHeights = dataOut.nHeights
4060 4046 nCohInt = dataOut.nCohInt
4061 4047 sec = numpy.round(nProfiles/dataOut.paramInterval)
4062 4048 heightList = dataOut.heightList
4063 4049 ippSeconds = dataOut.ippSeconds*dataOut.nCohInt*dataOut.nAvg
4064 4050 utctime = dataOut.utctime
4065 4051
4066 4052 dataOut.abscissaList = numpy.arange(0,paramInterval+ippSeconds,ippSeconds)
4067 4053
4068 4054 #------------------------ SNR --------------------------------------
4069 4055 power = data_acf[:,0,:,:].real
4070 4056 noise = numpy.zeros(nChannels)
4071 4057 SNR = numpy.zeros(power.shape)
4072 4058 for i in range(nChannels):
4073 4059 noise[i] = hildebrand_sekhon(power[i,:], nCohInt)
4074 4060 SNR[i] = (power[i]-noise[i])/noise[i]
4075 4061 SNRm = numpy.nanmean(SNR, axis = 0)
4076 4062 SNRdB = 10*numpy.log10(SNR)
4077 4063
4078 4064 if mode == 'SA':
4079 4065 dataOut.groupList = dataOut.groupList[1]
4080 4066 nPairs = data_ccf.shape[0]
4081 4067 #---------------------- Coherence and Phase --------------------------
4082 4068 phase = numpy.zeros(data_ccf[:,0,:,:].shape)
4083 4069 # phase1 = numpy.copy(phase)
4084 4070 coh1 = numpy.zeros(data_ccf[:,0,:,:].shape)
4085 4071
4086 4072 for p in range(nPairs):
4087 4073 ch0 = pairsList[p][0]
4088 4074 ch1 = pairsList[p][1]
4089 4075 ccf = data_ccf[p,0,:,:]/numpy.sqrt(data_acf[ch0,0,:,:]*data_acf[ch1,0,:,:])
4090 4076 phase[p,:,:] = ndimage.median_filter(numpy.angle(ccf), size = (5,1)) #median filter
4091 4077 # phase1[p,:,:] = numpy.angle(ccf) #median filter
4092 4078 coh1[p,:,:] = ndimage.median_filter(numpy.abs(ccf), 5) #median filter
4093 4079 # coh1[p,:,:] = numpy.abs(ccf) #median filter
4094 4080 coh = numpy.nanmax(coh1, axis = 0)
4095 4081 # struc = numpy.ones((5,1))
4096 4082 # coh = ndimage.morphology.grey_dilation(coh, size=(10,1))
4097 4083 #---------------------- Radial Velocity ----------------------------
4098 4084 phaseAux = numpy.mean(numpy.angle(data_acf[:,1,:,:]), axis = 0)
4099 4085 velRad = phaseAux*lamb/(4*numpy.pi*tSamp)
4100 4086
4101 4087 if allData:
4102 4088 boolMetFin = ~numpy.isnan(SNRm)
4103 4089 # coh[:-1,:] = numpy.nanmean(numpy.abs(phase[:,1:,:] - phase[:,:-1,:]),axis=0)
4104 4090 else:
4105 4091 #------------------------ Meteor mask ---------------------------------
4106 4092 # #SNR mask
4107 4093 # boolMet = (SNRdB>SNRthresh)#|(~numpy.isnan(SNRdB))
4108 4094 #
4109 4095 # #Erase small objects
4110 4096 # boolMet1 = self.__erase_small(boolMet, 2*sec, 5)
4111 4097 #
4112 4098 # auxEEJ = numpy.sum(boolMet1,axis=0)
4113 4099 # indOver = auxEEJ>nProfiles*0.8 #Use this later
4114 4100 # indEEJ = numpy.where(indOver)[0]
4115 4101 # indNEEJ = numpy.where(~indOver)[0]
4116 4102 #
4117 4103 # boolMetFin = boolMet1
4118 4104 #
4119 4105 # if indEEJ.size > 0:
4120 4106 # boolMet1[:,indEEJ] = False #Erase heights with EEJ
4121 4107 #
4122 4108 # boolMet2 = coh > cohThresh
4123 4109 # boolMet2 = self.__erase_small(boolMet2, 2*sec,5)
4124 4110 #
4125 4111 # #Final Meteor mask
4126 4112 # boolMetFin = boolMet1|boolMet2
4127 4113
4128 4114 #Coherence mask
4129 4115 boolMet1 = coh > 0.75
4130 4116 struc = numpy.ones((30,1))
4131 4117 boolMet1 = ndimage.morphology.binary_dilation(boolMet1, structure=struc)
4132 4118
4133 4119 #Derivative mask
4134 4120 derPhase = numpy.nanmean(numpy.abs(phase[:,1:,:] - phase[:,:-1,:]),axis=0)
4135 4121 boolMet2 = derPhase < 0.2
4136 4122 # boolMet2 = ndimage.morphology.binary_opening(boolMet2)
4137 4123 # boolMet2 = ndimage.morphology.binary_closing(boolMet2, structure = numpy.ones((10,1)))
4138 4124 boolMet2 = ndimage.median_filter(boolMet2,size=5)
4139 4125 boolMet2 = numpy.vstack((boolMet2,numpy.full((1,nHeights), True, dtype=bool)))
4140 4126 # #Final mask
4141 4127 # boolMetFin = boolMet2
4142 4128 boolMetFin = boolMet1&boolMet2
4143 4129 # boolMetFin = ndimage.morphology.binary_dilation(boolMetFin)
4144 4130 #Creating data_param
4145 4131 coordMet = numpy.where(boolMetFin)
4146 4132
4147 4133 tmet = coordMet[0]
4148 4134 hmet = coordMet[1]
4149 4135
4150 4136 data_param = numpy.zeros((tmet.size, 6 + nPairs))
4151 4137 data_param[:,0] = utctime
4152 4138 data_param[:,1] = tmet
4153 4139 data_param[:,2] = hmet
4154 4140 data_param[:,3] = SNRm[tmet,hmet]
4155 4141 data_param[:,4] = velRad[tmet,hmet]
4156 4142 data_param[:,5] = coh[tmet,hmet]
4157 4143 data_param[:,6:] = phase[:,tmet,hmet].T
4158 4144
4159 4145 elif mode == 'DBS':
4160 4146 dataOut.groupList = numpy.arange(nChannels)
4161 4147
4162 4148 #Radial Velocities
4163 4149 phase = numpy.angle(data_acf[:,1,:,:])
4164 4150 # phase = ndimage.median_filter(numpy.angle(data_acf[:,1,:,:]), size = (1,5,1))
4165 4151 velRad = phase*lamb/(4*numpy.pi*tSamp)
4166 4152
4167 4153 #Spectral width
4168 4154 # acf1 = ndimage.median_filter(numpy.abs(data_acf[:,1,:,:]), size = (1,5,1))
4169 4155 # acf2 = ndimage.median_filter(numpy.abs(data_acf[:,2,:,:]), size = (1,5,1))
4170 4156 acf1 = data_acf[:,1,:,:]
4171 4157 acf2 = data_acf[:,2,:,:]
4172 4158
4173 4159 spcWidth = (lamb/(2*numpy.sqrt(6)*numpy.pi*tSamp))*numpy.sqrt(numpy.log(acf1/acf2))
4174 4160 # velRad = ndimage.median_filter(velRad, size = (1,5,1))
4175 4161 if allData:
4176 4162 boolMetFin = ~numpy.isnan(SNRdB)
4177 4163 else:
4178 4164 #SNR
4179 4165 boolMet1 = (SNRdB>SNRthresh) #SNR mask
4180 4166 boolMet1 = ndimage.median_filter(boolMet1, size=(1,5,5))
4181 4167
4182 4168 #Radial velocity
4183 4169 boolMet2 = numpy.abs(velRad) < 20
4184 4170 boolMet2 = ndimage.median_filter(boolMet2, (1,5,5))
4185 4171
4186 4172 #Spectral Width
4187 4173 boolMet3 = spcWidth < 30
4188 4174 boolMet3 = ndimage.median_filter(boolMet3, (1,5,5))
4189 4175 # boolMetFin = self.__erase_small(boolMet1, 10,5)
4190 4176 boolMetFin = boolMet1&boolMet2&boolMet3
4191 4177
4192 4178 #Creating data_param
4193 4179 coordMet = numpy.where(boolMetFin)
4194 4180
4195 4181 cmet = coordMet[0]
4196 4182 tmet = coordMet[1]
4197 4183 hmet = coordMet[2]
4198 4184
4199 4185 data_param = numpy.zeros((tmet.size, 7))
4200 4186 data_param[:,0] = utctime
4201 4187 data_param[:,1] = cmet
4202 4188 data_param[:,2] = tmet
4203 4189 data_param[:,3] = hmet
4204 4190 data_param[:,4] = SNR[cmet,tmet,hmet].T
4205 4191 data_param[:,5] = velRad[cmet,tmet,hmet].T
4206 4192 data_param[:,6] = spcWidth[cmet,tmet,hmet].T
4207 4193
4208 4194 # self.dataOut.data_param = data_int
4209 4195 if len(data_param) == 0:
4210 4196 dataOut.flagNoData = True
4211 4197 else:
4212 4198 dataOut.data_param = data_param
4213 4199
4214 4200 def __erase_small(self, binArray, threshX, threshY):
4215 4201 labarray, numfeat = ndimage.measurements.label(binArray)
4216 4202 binArray1 = numpy.copy(binArray)
4217 4203
4218 4204 for i in range(1,numfeat + 1):
4219 4205 auxBin = (labarray==i)
4220 4206 auxSize = auxBin.sum()
4221 4207
4222 4208 x,y = numpy.where(auxBin)
4223 4209 widthX = x.max() - x.min()
4224 4210 widthY = y.max() - y.min()
4225 4211
4226 4212 #width X: 3 seg -> 12.5*3
4227 4213 #width Y:
4228 4214
4229 4215 if (auxSize < 50) or (widthX < threshX) or (widthY < threshY):
4230 4216 binArray1[auxBin] = False
4231 4217
4232 4218 return binArray1
4233 4219
4234 4220 #--------------- Specular Meteor ----------------
4235 4221
4236 4222 class SMDetection(Operation):
4237 4223 '''
4238 4224 Function DetectMeteors()
4239 4225 Project developed with paper:
4240 4226 HOLDSWORTH ET AL. 2004
4241 4227
4242 4228 Input:
4243 4229 self.dataOut.data_pre
4244 4230
4245 4231 centerReceiverIndex: From the channels, which is the center receiver
4246 4232
4247 4233 hei_ref: Height reference for the Beacon signal extraction
4248 4234 tauindex:
4249 4235 predefinedPhaseShifts: Predefined phase offset for the voltge signals
4250 4236
4251 4237 cohDetection: Whether to user Coherent detection or not
4252 4238 cohDet_timeStep: Coherent Detection calculation time step
4253 4239 cohDet_thresh: Coherent Detection phase threshold to correct phases
4254 4240
4255 4241 noise_timeStep: Noise calculation time step
4256 4242 noise_multiple: Noise multiple to define signal threshold
4257 4243
4258 4244 multDet_timeLimit: Multiple Detection Removal time limit in seconds
4259 4245 multDet_rangeLimit: Multiple Detection Removal range limit in km
4260 4246
4261 4247 phaseThresh: Maximum phase difference between receiver to be consider a meteor
4262 4248 SNRThresh: Minimum SNR threshold of the meteor signal to be consider a meteor
4263 4249
4264 4250 hmin: Minimum Height of the meteor to use it in the further wind estimations
4265 4251 hmax: Maximum Height of the meteor to use it in the further wind estimations
4266 4252 azimuth: Azimuth angle correction
4267 4253
4268 4254 Affected:
4269 4255 self.dataOut.data_param
4270 4256
4271 4257 Rejection Criteria (Errors):
4272 4258 0: No error; analysis OK
4273 4259 1: SNR < SNR threshold
4274 4260 2: angle of arrival (AOA) ambiguously determined
4275 4261 3: AOA estimate not feasible
4276 4262 4: Large difference in AOAs obtained from different antenna baselines
4277 4263 5: echo at start or end of time series
4278 4264 6: echo less than 5 examples long; too short for analysis
4279 4265 7: echo rise exceeds 0.3s
4280 4266 8: echo decay time less than twice rise time
4281 4267 9: large power level before echo
4282 4268 10: large power level after echo
4283 4269 11: poor fit to amplitude for estimation of decay time
4284 4270 12: poor fit to CCF phase variation for estimation of radial drift velocity
4285 4271 13: height unresolvable echo: not valid height within 70 to 110 km
4286 4272 14: height ambiguous echo: more then one possible height within 70 to 110 km
4287 4273 15: radial drift velocity or projected horizontal velocity exceeds 200 m/s
4288 4274 16: oscilatory echo, indicating event most likely not an underdense echo
4289 4275
4290 4276 17: phase difference in meteor Reestimation
4291 4277
4292 4278 Data Storage:
4293 4279 Meteors for Wind Estimation (8):
4294 4280 Utc Time | Range Height
4295 4281 Azimuth Zenith errorCosDir
4296 4282 VelRad errorVelRad
4297 4283 Phase0 Phase1 Phase2 Phase3
4298 4284 TypeError
4299 4285
4300 4286 '''
4301 4287
4302 4288 def run(self, dataOut, hei_ref = None, tauindex = 0,
4303 4289 phaseOffsets = None,
4304 4290 cohDetection = False, cohDet_timeStep = 1, cohDet_thresh = 25,
4305 4291 noise_timeStep = 4, noise_multiple = 4,
4306 4292 multDet_timeLimit = 1, multDet_rangeLimit = 3,
4307 4293 phaseThresh = 20, SNRThresh = 5,
4308 4294 hmin = 50, hmax=150, azimuth = 0,
4309 4295 channelPositions = None) :
4310 4296
4311 4297
4312 4298 #Getting Pairslist
4313 4299 if channelPositions is None:
4314 4300 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
4315 4301 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
4316 4302 meteorOps = SMOperations()
4317 4303 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
4318 4304 heiRang = dataOut.heightList
4319 4305 #Get Beacon signal - No Beacon signal anymore
4320 4306 # newheis = numpy.where(self.dataOut.heightList>self.dataOut.radarControllerHeaderObj.Taus[tauindex])
4321 4307 #
4322 4308 # if hei_ref != None:
4323 4309 # newheis = numpy.where(self.dataOut.heightList>hei_ref)
4324 4310 #
4325 4311
4326 4312
4327 4313 #****************REMOVING HARDWARE PHASE DIFFERENCES***************
4328 4314 # see if the user put in pre defined phase shifts
4329 4315 voltsPShift = dataOut.data_pre.copy()
4330 4316
4331 4317 # if predefinedPhaseShifts != None:
4332 4318 # hardwarePhaseShifts = numpy.array(predefinedPhaseShifts)*numpy.pi/180
4333 4319 #
4334 4320 # # elif beaconPhaseShifts:
4335 4321 # # #get hardware phase shifts using beacon signal
4336 4322 # # hardwarePhaseShifts = self.__getHardwarePhaseDiff(self.dataOut.data_pre, pairslist, newheis, 10)
4337 4323 # # hardwarePhaseShifts = numpy.insert(hardwarePhaseShifts,centerReceiverIndex,0)
4338 4324 #
4339 4325 # else:
4340 4326 # hardwarePhaseShifts = numpy.zeros(5)
4341 4327 #
4342 4328 # voltsPShift = numpy.zeros((self.dataOut.data_pre.shape[0],self.dataOut.data_pre.shape[1],self.dataOut.data_pre.shape[2]), dtype = 'complex')
4343 4329 # for i in range(self.dataOut.data_pre.shape[0]):
4344 4330 # voltsPShift[i,:,:] = self.__shiftPhase(self.dataOut.data_pre[i,:,:], hardwarePhaseShifts[i])
4345 4331
4346 4332 #******************END OF REMOVING HARDWARE PHASE DIFFERENCES*********
4347 4333
4348 4334 #Remove DC
4349 4335 voltsDC = numpy.mean(voltsPShift,1)
4350 4336 voltsDC = numpy.mean(voltsDC,1)
4351 4337 for i in range(voltsDC.shape[0]):
4352 4338 voltsPShift[i] = voltsPShift[i] - voltsDC[i]
4353 4339
4354 4340 #Don't considerate last heights, theyre used to calculate Hardware Phase Shift
4355 4341 # voltsPShift = voltsPShift[:,:,:newheis[0][0]]
4356 4342
4357 4343 #************ FIND POWER OF DATA W/COH OR NON COH DETECTION (3.4) **********
4358 4344 #Coherent Detection
4359 4345 if cohDetection:
4360 4346 #use coherent detection to get the net power
4361 4347 cohDet_thresh = cohDet_thresh*numpy.pi/180
4362 4348 voltsPShift = self.__coherentDetection(voltsPShift, cohDet_timeStep, dataOut.timeInterval, pairslist0, cohDet_thresh)
4363 4349
4364 4350 #Non-coherent detection!
4365 4351 powerNet = numpy.nansum(numpy.abs(voltsPShift[:,:,:])**2,0)
4366 4352 #********** END OF COH/NON-COH POWER CALCULATION**********************
4367 4353
4368 4354 #********** FIND THE NOISE LEVEL AND POSSIBLE METEORS ****************
4369 4355 #Get noise
4370 4356 noise, noise1 = self.__getNoise(powerNet, noise_timeStep, dataOut.timeInterval)
4371 4357 # noise = self.getNoise1(powerNet, noise_timeStep, self.dataOut.timeInterval)
4372 4358 #Get signal threshold
4373 4359 signalThresh = noise_multiple*noise
4374 4360 #Meteor echoes detection
4375 4361 listMeteors = self.__findMeteors(powerNet, signalThresh)
4376 4362 #******* END OF NOISE LEVEL AND POSSIBLE METEORS CACULATION **********
4377 4363
4378 4364 #************** REMOVE MULTIPLE DETECTIONS (3.5) ***************************
4379 4365 #Parameters
4380 4366 heiRange = dataOut.heightList
4381 4367 rangeInterval = heiRange[1] - heiRange[0]
4382 4368 rangeLimit = multDet_rangeLimit/rangeInterval
4383 4369 timeLimit = multDet_timeLimit/dataOut.timeInterval
4384 4370 #Multiple detection removals
4385 4371 listMeteors1 = self.__removeMultipleDetections(listMeteors, rangeLimit, timeLimit)
4386 4372 #************ END OF REMOVE MULTIPLE DETECTIONS **********************
4387 4373
4388 4374 #********************* METEOR REESTIMATION (3.7, 3.8, 3.9, 3.10) ********************
4389 4375 #Parameters
4390 4376 phaseThresh = phaseThresh*numpy.pi/180
4391 4377 thresh = [phaseThresh, noise_multiple, SNRThresh]
4392 4378 #Meteor reestimation (Errors N 1, 6, 12, 17)
4393 4379 listMeteors2, listMeteorsPower, listMeteorsVolts = self.__meteorReestimation(listMeteors1, voltsPShift, pairslist0, thresh, noise, dataOut.timeInterval, dataOut.frequency)
4394 4380 # listMeteors2, listMeteorsPower, listMeteorsVolts = self.meteorReestimation3(listMeteors2, listMeteorsPower, listMeteorsVolts, voltsPShift, pairslist, thresh, noise)
4395 4381 #Estimation of decay times (Errors N 7, 8, 11)
4396 4382 listMeteors3 = self.__estimateDecayTime(listMeteors2, listMeteorsPower, dataOut.timeInterval, dataOut.frequency)
4397 4383 #******************* END OF METEOR REESTIMATION *******************
4398 4384
4399 4385 #********************* METEOR PARAMETERS CALCULATION (3.11, 3.12, 3.13) **************************
4400 4386 #Calculating Radial Velocity (Error N 15)
4401 4387 radialStdThresh = 10
4402 4388 listMeteors4 = self.__getRadialVelocity(listMeteors3, listMeteorsVolts, radialStdThresh, pairslist0, dataOut.timeInterval)
4403 4389
4404 4390 if len(listMeteors4) > 0:
4405 4391 #Setting New Array
4406 4392 date = dataOut.utctime
4407 4393 arrayParameters = self.__setNewArrays(listMeteors4, date, heiRang)
4408 4394
4409 4395 #Correcting phase offset
4410 4396 if phaseOffsets != None:
4411 4397 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
4412 4398 arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
4413 4399
4414 4400 #Second Pairslist
4415 4401 pairsList = []
4416 4402 pairx = (0,1)
4417 4403 pairy = (2,3)
4418 4404 pairsList.append(pairx)
4419 4405 pairsList.append(pairy)
4420 4406
4421 4407 jph = numpy.array([0,0,0,0])
4422 4408 h = (hmin,hmax)
4423 4409 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
4424 4410
4425 4411 # #Calculate AOA (Error N 3, 4)
4426 4412 # #JONES ET AL. 1998
4427 4413 # error = arrayParameters[:,-1]
4428 4414 # AOAthresh = numpy.pi/8
4429 4415 # phases = -arrayParameters[:,9:13]
4430 4416 # arrayParameters[:,4:7], arrayParameters[:,-1] = meteorOps.getAOA(phases, pairsList, error, AOAthresh, azimuth)
4431 4417 #
4432 4418 # #Calculate Heights (Error N 13 and 14)
4433 4419 # error = arrayParameters[:,-1]
4434 4420 # Ranges = arrayParameters[:,2]
4435 4421 # zenith = arrayParameters[:,5]
4436 4422 # arrayParameters[:,3], arrayParameters[:,-1] = meteorOps.getHeights(Ranges, zenith, error, hmin, hmax)
4437 4423 # error = arrayParameters[:,-1]
4438 4424 #********************* END OF PARAMETERS CALCULATION **************************
4439 4425
4440 4426 #***************************+ PASS DATA TO NEXT STEP **********************
4441 4427 # arrayFinal = arrayParameters.reshape((1,arrayParameters.shape[0],arrayParameters.shape[1]))
4442 4428 dataOut.data_param = arrayParameters
4443 4429
4444 4430 if arrayParameters is None:
4445 4431 dataOut.flagNoData = True
4446 4432 else:
4447 4433 dataOut.flagNoData = True
4448 4434
4449 4435 return
4450 4436
4451 4437 def __getHardwarePhaseDiff(self, voltage0, pairslist, newheis, n):
4452 4438
4453 4439 minIndex = min(newheis[0])
4454 4440 maxIndex = max(newheis[0])
4455 4441
4456 4442 voltage = voltage0[:,:,minIndex:maxIndex+1]
4457 4443 nLength = voltage.shape[1]/n
4458 4444 nMin = 0
4459 4445 nMax = 0
4460 4446 phaseOffset = numpy.zeros((len(pairslist),n))
4461 4447
4462 4448 for i in range(n):
4463 4449 nMax += nLength
4464 4450 phaseCCF = -numpy.angle(self.__calculateCCF(voltage[:,nMin:nMax,:], pairslist, [0]))
4465 4451 phaseCCF = numpy.mean(phaseCCF, axis = 2)
4466 4452 phaseOffset[:,i] = phaseCCF.transpose()
4467 4453 nMin = nMax
4468 4454 # phaseDiff, phaseArrival = self.estimatePhaseDifference(voltage, pairslist)
4469 4455
4470 4456 #Remove Outliers
4471 4457 factor = 2
4472 4458 wt = phaseOffset - signal.medfilt(phaseOffset,(1,5))
4473 4459 dw = numpy.std(wt,axis = 1)
4474 4460 dw = dw.reshape((dw.size,1))
4475 4461 ind = numpy.where(numpy.logical_or(wt>dw*factor,wt<-dw*factor))
4476 4462 phaseOffset[ind] = numpy.nan
4477 4463 phaseOffset = stats.nanmean(phaseOffset, axis=1)
4478 4464
4479 4465 return phaseOffset
4480 4466
4481 4467 def __shiftPhase(self, data, phaseShift):
4482 4468 #this will shift the phase of a complex number
4483 4469 dataShifted = numpy.abs(data) * numpy.exp((numpy.angle(data)+phaseShift)*1j)
4484 4470 return dataShifted
4485 4471
4486 4472 def __estimatePhaseDifference(self, array, pairslist):
4487 4473 nChannel = array.shape[0]
4488 4474 nHeights = array.shape[2]
4489 4475 numPairs = len(pairslist)
4490 4476 # phaseCCF = numpy.zeros((nChannel, 5, nHeights))
4491 4477 phaseCCF = numpy.angle(self.__calculateCCF(array, pairslist, [-2,-1,0,1,2]))
4492 4478
4493 4479 #Correct phases
4494 4480 derPhaseCCF = phaseCCF[:,1:,:] - phaseCCF[:,0:-1,:]
4495 4481 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
4496 4482
4497 4483 if indDer[0].shape[0] > 0:
4498 4484 for i in range(indDer[0].shape[0]):
4499 4485 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i],indDer[2][i]])
4500 4486 phaseCCF[indDer[0][i],indDer[1][i]+1:,:] += signo*2*numpy.pi
4501 4487
4502 4488 # for j in range(numSides):
4503 4489 # phaseCCFAux = self.calculateCCF(arrayCenter, arraySides[j,:,:], [-2,1,0,1,2])
4504 4490 # phaseCCF[j,:,:] = numpy.angle(phaseCCFAux)
4505 4491 #
4506 4492 #Linear
4507 4493 phaseInt = numpy.zeros((numPairs,1))
4508 4494 angAllCCF = phaseCCF[:,[0,1,3,4],0]
4509 4495 for j in range(numPairs):
4510 4496 fit = stats.linregress([-2,-1,1,2],angAllCCF[j,:])
4511 4497 phaseInt[j] = fit[1]
4512 4498 #Phase Differences
4513 4499 phaseDiff = phaseInt - phaseCCF[:,2,:]
4514 4500 phaseArrival = phaseInt.reshape(phaseInt.size)
4515 4501
4516 4502 #Dealias
4517 4503 phaseArrival = numpy.angle(numpy.exp(1j*phaseArrival))
4518 4504 # indAlias = numpy.where(phaseArrival > numpy.pi)
4519 4505 # phaseArrival[indAlias] -= 2*numpy.pi
4520 4506 # indAlias = numpy.where(phaseArrival < -numpy.pi)
4521 4507 # phaseArrival[indAlias] += 2*numpy.pi
4522 4508
4523 4509 return phaseDiff, phaseArrival
4524 4510
4525 4511 def __coherentDetection(self, volts, timeSegment, timeInterval, pairslist, thresh):
4526 4512 #this function will run the coherent detection used in Holdworth et al. 2004 and return the net power
4527 4513 #find the phase shifts of each channel over 1 second intervals
4528 4514 #only look at ranges below the beacon signal
4529 4515 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
4530 4516 numBlocks = int(volts.shape[1]/numProfPerBlock)
4531 4517 numHeights = volts.shape[2]
4532 4518 nChannel = volts.shape[0]
4533 4519 voltsCohDet = volts.copy()
4534 4520
4535 4521 pairsarray = numpy.array(pairslist)
4536 4522 indSides = pairsarray[:,1]
4537 4523 # indSides = numpy.array(range(nChannel))
4538 4524 # indSides = numpy.delete(indSides, indCenter)
4539 4525 #
4540 4526 # listCenter = numpy.array_split(volts[indCenter,:,:], numBlocks, 0)
4541 4527 listBlocks = numpy.array_split(volts, numBlocks, 1)
4542 4528
4543 4529 startInd = 0
4544 4530 endInd = 0
4545 4531
4546 4532 for i in range(numBlocks):
4547 4533 startInd = endInd
4548 4534 endInd = endInd + listBlocks[i].shape[1]
4549 4535
4550 4536 arrayBlock = listBlocks[i]
4551 4537 # arrayBlockCenter = listCenter[i]
4552 4538
4553 4539 #Estimate the Phase Difference
4554 4540 phaseDiff, aux = self.__estimatePhaseDifference(arrayBlock, pairslist)
4555 4541 #Phase Difference RMS
4556 4542 arrayPhaseRMS = numpy.abs(phaseDiff)
4557 4543 phaseRMSaux = numpy.sum(arrayPhaseRMS < thresh,0)
4558 4544 indPhase = numpy.where(phaseRMSaux==4)
4559 4545 #Shifting
4560 4546 if indPhase[0].shape[0] > 0:
4561 4547 for j in range(indSides.size):
4562 4548 arrayBlock[indSides[j],:,indPhase] = self.__shiftPhase(arrayBlock[indSides[j],:,indPhase], phaseDiff[j,indPhase].transpose())
4563 4549 voltsCohDet[:,startInd:endInd,:] = arrayBlock
4564 4550
4565 4551 return voltsCohDet
4566 4552
4567 4553 def __calculateCCF(self, volts, pairslist ,laglist):
4568 4554
4569 4555 nHeights = volts.shape[2]
4570 4556 nPoints = volts.shape[1]
4571 4557 voltsCCF = numpy.zeros((len(pairslist), len(laglist), nHeights),dtype = 'complex')
4572 4558
4573 4559 for i in range(len(pairslist)):
4574 4560 volts1 = volts[pairslist[i][0]]
4575 4561 volts2 = volts[pairslist[i][1]]
4576 4562
4577 4563 for t in range(len(laglist)):
4578 4564 idxT = laglist[t]
4579 4565 if idxT >= 0:
4580 4566 vStacked = numpy.vstack((volts2[idxT:,:],
4581 4567 numpy.zeros((idxT, nHeights),dtype='complex')))
4582 4568 else:
4583 4569 vStacked = numpy.vstack((numpy.zeros((-idxT, nHeights),dtype='complex'),
4584 4570 volts2[:(nPoints + idxT),:]))
4585 4571 voltsCCF[i,t,:] = numpy.sum((numpy.conjugate(volts1)*vStacked),axis=0)
4586 4572
4587 4573 vStacked = None
4588 4574 return voltsCCF
4589 4575
4590 4576 def __getNoise(self, power, timeSegment, timeInterval):
4591 4577 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
4592 4578 numBlocks = int(power.shape[0]/numProfPerBlock)
4593 4579 numHeights = power.shape[1]
4594 4580
4595 4581 listPower = numpy.array_split(power, numBlocks, 0)
4596 4582 noise = numpy.zeros((power.shape[0], power.shape[1]))
4597 4583 noise1 = numpy.zeros((power.shape[0], power.shape[1]))
4598 4584
4599 4585 startInd = 0
4600 4586 endInd = 0
4601 4587
4602 4588 for i in range(numBlocks): #split por canal
4603 4589 startInd = endInd
4604 4590 endInd = endInd + listPower[i].shape[0]
4605 4591
4606 4592 arrayBlock = listPower[i]
4607 4593 noiseAux = numpy.mean(arrayBlock, 0)
4608 4594 # noiseAux = numpy.median(noiseAux)
4609 4595 # noiseAux = numpy.mean(arrayBlock)
4610 4596 noise[startInd:endInd,:] = noise[startInd:endInd,:] + noiseAux
4611 4597
4612 4598 noiseAux1 = numpy.mean(arrayBlock)
4613 4599 noise1[startInd:endInd,:] = noise1[startInd:endInd,:] + noiseAux1
4614 4600
4615 4601 return noise, noise1
4616 4602
4617 4603 def __findMeteors(self, power, thresh):
4618 4604 nProf = power.shape[0]
4619 4605 nHeights = power.shape[1]
4620 4606 listMeteors = []
4621 4607
4622 4608 for i in range(nHeights):
4623 4609 powerAux = power[:,i]
4624 4610 threshAux = thresh[:,i]
4625 4611
4626 4612 indUPthresh = numpy.where(powerAux > threshAux)[0]
4627 4613 indDNthresh = numpy.where(powerAux <= threshAux)[0]
4628 4614
4629 4615 j = 0
4630 4616
4631 4617 while (j < indUPthresh.size - 2):
4632 4618 if (indUPthresh[j + 2] == indUPthresh[j] + 2):
4633 4619 indDNAux = numpy.where(indDNthresh > indUPthresh[j])
4634 4620 indDNthresh = indDNthresh[indDNAux]
4635 4621
4636 4622 if (indDNthresh.size > 0):
4637 4623 indEnd = indDNthresh[0] - 1
4638 4624 indInit = indUPthresh[j]
4639 4625
4640 4626 meteor = powerAux[indInit:indEnd + 1]
4641 4627 indPeak = meteor.argmax() + indInit
4642 4628 FLA = sum(numpy.conj(meteor)*numpy.hstack((meteor[1:],0)))
4643 4629
4644 4630 listMeteors.append(numpy.array([i,indInit,indPeak,indEnd,FLA])) #CHEQUEAR!!!!!
4645 4631 j = numpy.where(indUPthresh == indEnd)[0] + 1
4646 4632 else: j+=1
4647 4633 else: j+=1
4648 4634
4649 4635 return listMeteors
4650 4636
4651 4637 def __removeMultipleDetections(self,listMeteors, rangeLimit, timeLimit):
4652 4638
4653 4639 arrayMeteors = numpy.asarray(listMeteors)
4654 4640 listMeteors1 = []
4655 4641
4656 4642 while arrayMeteors.shape[0] > 0:
4657 4643 FLAs = arrayMeteors[:,4]
4658 4644 maxFLA = FLAs.argmax()
4659 4645 listMeteors1.append(arrayMeteors[maxFLA,:])
4660 4646
4661 4647 MeteorInitTime = arrayMeteors[maxFLA,1]
4662 4648 MeteorEndTime = arrayMeteors[maxFLA,3]
4663 4649 MeteorHeight = arrayMeteors[maxFLA,0]
4664 4650
4665 4651 #Check neighborhood
4666 4652 maxHeightIndex = MeteorHeight + rangeLimit
4667 4653 minHeightIndex = MeteorHeight - rangeLimit
4668 4654 minTimeIndex = MeteorInitTime - timeLimit
4669 4655 maxTimeIndex = MeteorEndTime + timeLimit
4670 4656
4671 4657 #Check Heights
4672 4658 indHeight = numpy.logical_and(arrayMeteors[:,0] >= minHeightIndex, arrayMeteors[:,0] <= maxHeightIndex)
4673 4659 indTime = numpy.logical_and(arrayMeteors[:,3] >= minTimeIndex, arrayMeteors[:,1] <= maxTimeIndex)
4674 4660 indBoth = numpy.where(numpy.logical_and(indTime,indHeight))
4675 4661
4676 4662 arrayMeteors = numpy.delete(arrayMeteors, indBoth, axis = 0)
4677 4663
4678 4664 return listMeteors1
4679 4665
4680 4666 def __meteorReestimation(self, listMeteors, volts, pairslist, thresh, noise, timeInterval,frequency):
4681 4667 numHeights = volts.shape[2]
4682 4668 nChannel = volts.shape[0]
4683 4669
4684 4670 thresholdPhase = thresh[0]
4685 4671 thresholdNoise = thresh[1]
4686 4672 thresholdDB = float(thresh[2])
4687 4673
4688 4674 thresholdDB1 = 10**(thresholdDB/10)
4689 4675 pairsarray = numpy.array(pairslist)
4690 4676 indSides = pairsarray[:,1]
4691 4677
4692 4678 pairslist1 = list(pairslist)
4693 4679 pairslist1.append((0,1))
4694 4680 pairslist1.append((3,4))
4695 4681
4696 4682 listMeteors1 = []
4697 4683 listPowerSeries = []
4698 4684 listVoltageSeries = []
4699 4685 #volts has the war data
4700 4686
4701 4687 if frequency == 30e6:
4702 4688 timeLag = 45*10**-3
4703 4689 else:
4704 4690 timeLag = 15*10**-3
4705 4691 lag = numpy.ceil(timeLag/timeInterval)
4706 4692
4707 4693 for i in range(len(listMeteors)):
4708 4694
4709 4695 ###################### 3.6 - 3.7 PARAMETERS REESTIMATION #########################
4710 4696 meteorAux = numpy.zeros(16)
4711 4697
4712 4698 #Loading meteor Data (mHeight, mStart, mPeak, mEnd)
4713 4699 mHeight = listMeteors[i][0]
4714 4700 mStart = listMeteors[i][1]
4715 4701 mPeak = listMeteors[i][2]
4716 4702 mEnd = listMeteors[i][3]
4717 4703
4718 4704 #get the volt data between the start and end times of the meteor
4719 4705 meteorVolts = volts[:,mStart:mEnd+1,mHeight]
4720 4706 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
4721 4707
4722 4708 #3.6. Phase Difference estimation
4723 4709 phaseDiff, aux = self.__estimatePhaseDifference(meteorVolts, pairslist)
4724 4710
4725 4711 #3.7. Phase difference removal & meteor start, peak and end times reestimated
4726 4712 #meteorVolts0.- all Channels, all Profiles
4727 4713 meteorVolts0 = volts[:,:,mHeight]
4728 4714 meteorThresh = noise[:,mHeight]*thresholdNoise
4729 4715 meteorNoise = noise[:,mHeight]
4730 4716 meteorVolts0[indSides,:] = self.__shiftPhase(meteorVolts0[indSides,:], phaseDiff) #Phase Shifting
4731 4717 powerNet0 = numpy.nansum(numpy.abs(meteorVolts0)**2, axis = 0) #Power
4732 4718
4733 4719 #Times reestimation
4734 4720 mStart1 = numpy.where(powerNet0[:mPeak] < meteorThresh[:mPeak])[0]
4735 4721 if mStart1.size > 0:
4736 4722 mStart1 = mStart1[-1] + 1
4737 4723
4738 4724 else:
4739 4725 mStart1 = mPeak
4740 4726
4741 4727 mEnd1 = numpy.where(powerNet0[mPeak:] < meteorThresh[mPeak:])[0][0] + mPeak - 1
4742 4728 mEndDecayTime1 = numpy.where(powerNet0[mPeak:] < meteorNoise[mPeak:])[0]
4743 4729 if mEndDecayTime1.size == 0:
4744 4730 mEndDecayTime1 = powerNet0.size
4745 4731 else:
4746 4732 mEndDecayTime1 = mEndDecayTime1[0] + mPeak - 1
4747 4733 # mPeak1 = meteorVolts0[mStart1:mEnd1 + 1].argmax()
4748 4734
4749 4735 #meteorVolts1.- all Channels, from start to end
4750 4736 meteorVolts1 = meteorVolts0[:,mStart1:mEnd1 + 1]
4751 4737 meteorVolts2 = meteorVolts0[:,mPeak + lag:mEnd1 + 1]
4752 4738 if meteorVolts2.shape[1] == 0:
4753 4739 meteorVolts2 = meteorVolts0[:,mPeak:mEnd1 + 1]
4754 4740 meteorVolts1 = meteorVolts1.reshape(meteorVolts1.shape[0], meteorVolts1.shape[1], 1)
4755 4741 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1], 1)
4756 4742 ##################### END PARAMETERS REESTIMATION #########################
4757 4743
4758 4744 ##################### 3.8 PHASE DIFFERENCE REESTIMATION ########################
4759 4745 # if mEnd1 - mStart1 > 4: #Error Number 6: echo less than 5 samples long; too short for analysis
4760 4746 if meteorVolts2.shape[1] > 0:
4761 4747 #Phase Difference re-estimation
4762 4748 phaseDiff1, phaseDiffint = self.__estimatePhaseDifference(meteorVolts2, pairslist1) #Phase Difference Estimation
4763 4749 # phaseDiff1, phaseDiffint = self.estimatePhaseDifference(meteorVolts2, pairslist)
4764 4750 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1])
4765 4751 phaseDiff11 = numpy.reshape(phaseDiff1, (phaseDiff1.shape[0],1))
4766 4752 meteorVolts2[indSides,:] = self.__shiftPhase(meteorVolts2[indSides,:], phaseDiff11[0:4]) #Phase Shifting
4767 4753
4768 4754 #Phase Difference RMS
4769 4755 phaseRMS1 = numpy.sqrt(numpy.mean(numpy.square(phaseDiff1)))
4770 4756 powerNet1 = numpy.nansum(numpy.abs(meteorVolts1[:,:])**2,0)
4771 4757 #Data from Meteor
4772 4758 mPeak1 = powerNet1.argmax() + mStart1
4773 4759 mPeakPower1 = powerNet1.max()
4774 4760 noiseAux = sum(noise[mStart1:mEnd1 + 1,mHeight])
4775 4761 mSNR1 = (sum(powerNet1)-noiseAux)/noiseAux
4776 4762 Meteor1 = numpy.array([mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1])
4777 4763 Meteor1 = numpy.hstack((Meteor1,phaseDiffint))
4778 4764 PowerSeries = powerNet0[mStart1:mEndDecayTime1 + 1]
4779 4765 #Vectorize
4780 4766 meteorAux[0:7] = [mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1]
4781 4767 meteorAux[7:11] = phaseDiffint[0:4]
4782 4768
4783 4769 #Rejection Criterions
4784 4770 if phaseRMS1 > thresholdPhase: #Error Number 17: Phase variation
4785 4771 meteorAux[-1] = 17
4786 4772 elif mSNR1 < thresholdDB1: #Error Number 1: SNR < threshold dB
4787 4773 meteorAux[-1] = 1
4788 4774
4789 4775
4790 4776 else:
4791 4777 meteorAux[0:4] = [mHeight, mStart, mPeak, mEnd]
4792 4778 meteorAux[-1] = 6 #Error Number 6: echo less than 5 samples long; too short for analysis
4793 4779 PowerSeries = 0
4794 4780
4795 4781 listMeteors1.append(meteorAux)
4796 4782 listPowerSeries.append(PowerSeries)
4797 4783 listVoltageSeries.append(meteorVolts1)
4798 4784
4799 4785 return listMeteors1, listPowerSeries, listVoltageSeries
4800 4786
4801 4787 def __estimateDecayTime(self, listMeteors, listPower, timeInterval, frequency):
4802 4788
4803 4789 threshError = 10
4804 4790 #Depending if it is 30 or 50 MHz
4805 4791 if frequency == 30e6:
4806 4792 timeLag = 45*10**-3
4807 4793 else:
4808 4794 timeLag = 15*10**-3
4809 4795 lag = numpy.ceil(timeLag/timeInterval)
4810 4796
4811 4797 listMeteors1 = []
4812 4798
4813 4799 for i in range(len(listMeteors)):
4814 4800 meteorPower = listPower[i]
4815 4801 meteorAux = listMeteors[i]
4816 4802
4817 4803 if meteorAux[-1] == 0:
4818 4804
4819 4805 try:
4820 4806 indmax = meteorPower.argmax()
4821 4807 indlag = indmax + lag
4822 4808
4823 4809 y = meteorPower[indlag:]
4824 4810 x = numpy.arange(0, y.size)*timeLag
4825 4811
4826 4812 #first guess
4827 4813 a = y[0]
4828 4814 tau = timeLag
4829 4815 #exponential fit
4830 4816 popt, pcov = optimize.curve_fit(self.__exponential_function, x, y, p0 = [a, tau])
4831 4817 y1 = self.__exponential_function(x, *popt)
4832 4818 #error estimation
4833 4819 error = sum((y - y1)**2)/(numpy.var(y)*(y.size - popt.size))
4834 4820
4835 4821 decayTime = popt[1]
4836 4822 riseTime = indmax*timeInterval
4837 4823 meteorAux[11:13] = [decayTime, error]
4838 4824
4839 4825 #Table items 7, 8 and 11
4840 4826 if (riseTime > 0.3): #Number 7: Echo rise exceeds 0.3s
4841 4827 meteorAux[-1] = 7
4842 4828 elif (decayTime < 2*riseTime) : #Number 8: Echo decay time less than than twice rise time
4843 4829 meteorAux[-1] = 8
4844 4830 if (error > threshError): #Number 11: Poor fit to amplitude for estimation of decay time
4845 4831 meteorAux[-1] = 11
4846 4832
4847 4833
4848 4834 except:
4849 4835 meteorAux[-1] = 11
4850 4836
4851 4837
4852 4838 listMeteors1.append(meteorAux)
4853 4839
4854 4840 return listMeteors1
4855 4841
4856 4842 #Exponential Function
4857 4843
4858 4844 def __exponential_function(self, x, a, tau):
4859 4845 y = a*numpy.exp(-x/tau)
4860 4846 return y
4861 4847
4862 4848 def __getRadialVelocity(self, listMeteors, listVolts, radialStdThresh, pairslist, timeInterval):
4863 4849
4864 4850 pairslist1 = list(pairslist)
4865 4851 pairslist1.append((0,1))
4866 4852 pairslist1.append((3,4))
4867 4853 numPairs = len(pairslist1)
4868 4854 #Time Lag
4869 4855 timeLag = 45*10**-3
4870 4856 c = 3e8
4871 4857 lag = numpy.ceil(timeLag/timeInterval)
4872 4858 freq = 30e6
4873 4859
4874 4860 listMeteors1 = []
4875 4861
4876 4862 for i in range(len(listMeteors)):
4877 4863 meteorAux = listMeteors[i]
4878 4864 if meteorAux[-1] == 0:
4879 4865 mStart = listMeteors[i][1]
4880 4866 mPeak = listMeteors[i][2]
4881 4867 mLag = mPeak - mStart + lag
4882 4868
4883 4869 #get the volt data between the start and end times of the meteor
4884 4870 meteorVolts = listVolts[i]
4885 4871 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
4886 4872
4887 4873 #Get CCF
4888 4874 allCCFs = self.__calculateCCF(meteorVolts, pairslist1, [-2,-1,0,1,2])
4889 4875
4890 4876 #Method 2
4891 4877 slopes = numpy.zeros(numPairs)
4892 4878 time = numpy.array([-2,-1,1,2])*timeInterval
4893 4879 angAllCCF = numpy.angle(allCCFs[:,[0,1,3,4],0])
4894 4880
4895 4881 #Correct phases
4896 4882 derPhaseCCF = angAllCCF[:,1:] - angAllCCF[:,0:-1]
4897 4883 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
4898 4884
4899 4885 if indDer[0].shape[0] > 0:
4900 4886 for i in range(indDer[0].shape[0]):
4901 4887 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i]])
4902 4888 angAllCCF[indDer[0][i],indDer[1][i]+1:] += signo*2*numpy.pi
4903 4889
4904 4890 # fit = scipy.stats.linregress(numpy.array([-2,-1,1,2])*timeInterval, numpy.array([phaseLagN2s[i],phaseLagN1s[i],phaseLag1s[i],phaseLag2s[i]]))
4905 4891 for j in range(numPairs):
4906 4892 fit = stats.linregress(time, angAllCCF[j,:])
4907 4893 slopes[j] = fit[0]
4908 4894
4909 4895 #Remove Outlier
4910 4896 # indOut = numpy.argmax(numpy.abs(slopes - numpy.mean(slopes)))
4911 4897 # slopes = numpy.delete(slopes,indOut)
4912 4898 # indOut = numpy.argmax(numpy.abs(slopes - numpy.mean(slopes)))
4913 4899 # slopes = numpy.delete(slopes,indOut)
4914 4900
4915 4901 radialVelocity = -numpy.mean(slopes)*(0.25/numpy.pi)*(c/freq)
4916 4902 radialError = numpy.std(slopes)*(0.25/numpy.pi)*(c/freq)
4917 4903 meteorAux[-2] = radialError
4918 4904 meteorAux[-3] = radialVelocity
4919 4905
4920 4906 #Setting Error
4921 4907 #Number 15: Radial Drift velocity or projected horizontal velocity exceeds 200 m/s
4922 4908 if numpy.abs(radialVelocity) > 200:
4923 4909 meteorAux[-1] = 15
4924 4910 #Number 12: Poor fit to CCF variation for estimation of radial drift velocity
4925 4911 elif radialError > radialStdThresh:
4926 4912 meteorAux[-1] = 12
4927 4913
4928 4914 listMeteors1.append(meteorAux)
4929 4915 return listMeteors1
4930 4916
4931 4917 def __setNewArrays(self, listMeteors, date, heiRang):
4932 4918
4933 4919 #New arrays
4934 4920 arrayMeteors = numpy.array(listMeteors)
4935 4921 arrayParameters = numpy.zeros((len(listMeteors), 13))
4936 4922
4937 4923 #Date inclusion
4938 4924 # date = re.findall(r'\((.*?)\)', date)
4939 4925 # date = date[0].split(',')
4940 4926 # date = map(int, date)
4941 4927 #
4942 4928 # if len(date)<6:
4943 4929 # date.append(0)
4944 4930 #
4945 4931 # date = [date[0]*10000 + date[1]*100 + date[2], date[3]*10000 + date[4]*100 + date[5]]
4946 4932 # arrayDate = numpy.tile(date, (len(listMeteors), 1))
4947 4933 arrayDate = numpy.tile(date, (len(listMeteors)))
4948 4934
4949 4935 #Meteor array
4950 4936 # arrayMeteors[:,0] = heiRang[arrayMeteors[:,0].astype(int)]
4951 4937 # arrayMeteors = numpy.hstack((arrayDate, arrayMeteors))
4952 4938
4953 4939 #Parameters Array
4954 4940 arrayParameters[:,0] = arrayDate #Date
4955 4941 arrayParameters[:,1] = heiRang[arrayMeteors[:,0].astype(int)] #Range
4956 4942 arrayParameters[:,6:8] = arrayMeteors[:,-3:-1] #Radial velocity and its error
4957 4943 arrayParameters[:,8:12] = arrayMeteors[:,7:11] #Phases
4958 4944 arrayParameters[:,-1] = arrayMeteors[:,-1] #Error
4959 4945
4960 4946
4961 4947 return arrayParameters
4962 4948
4963 4949 class CorrectSMPhases(Operation):
4964 4950
4965 4951 def run(self, dataOut, phaseOffsets, hmin = 50, hmax = 150, azimuth = 45, channelPositions = None):
4966 4952
4967 4953 arrayParameters = dataOut.data_param
4968 4954 pairsList = []
4969 4955 pairx = (0,1)
4970 4956 pairy = (2,3)
4971 4957 pairsList.append(pairx)
4972 4958 pairsList.append(pairy)
4973 4959 jph = numpy.zeros(4)
4974 4960
4975 4961 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
4976 4962 # arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
4977 4963 arrayParameters[:,8:12] = numpy.angle(numpy.exp(1j*(arrayParameters[:,8:12] + phaseOffsets)))
4978 4964
4979 4965 meteorOps = SMOperations()
4980 4966 if channelPositions is None:
4981 4967 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
4982 4968 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
4983 4969
4984 4970 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
4985 4971 h = (hmin,hmax)
4986 4972
4987 4973 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
4988 4974
4989 4975 dataOut.data_param = arrayParameters
4990 4976 return
4991 4977
4992 4978 class SMPhaseCalibration(Operation):
4993 4979
4994 4980 __buffer = None
4995 4981
4996 4982 __initime = None
4997 4983
4998 4984 __dataReady = False
4999 4985
5000 4986 __isConfig = False
5001 4987
5002 4988 def __checkTime(self, currentTime, initTime, paramInterval, outputInterval):
5003 4989
5004 4990 dataTime = currentTime + paramInterval
5005 4991 deltaTime = dataTime - initTime
5006 4992
5007 4993 if deltaTime >= outputInterval or deltaTime < 0:
5008 4994 return True
5009 4995
5010 4996 return False
5011 4997
5012 4998 def __getGammas(self, pairs, d, phases):
5013 4999 gammas = numpy.zeros(2)
5014 5000
5015 5001 for i in range(len(pairs)):
5016 5002
5017 5003 pairi = pairs[i]
5018 5004
5019 5005 phip3 = phases[:,pairi[0]]
5020 5006 d3 = d[pairi[0]]
5021 5007 phip2 = phases[:,pairi[1]]
5022 5008 d2 = d[pairi[1]]
5023 5009 #Calculating gamma
5024 5010 # jdcos = alp1/(k*d1)
5025 5011 # jgamma = numpy.angle(numpy.exp(1j*(d0*alp1/d1 - alp0)))
5026 5012 jgamma = -phip2*d3/d2 - phip3
5027 5013 jgamma = numpy.angle(numpy.exp(1j*jgamma))
5028 5014 # jgamma[jgamma>numpy.pi] -= 2*numpy.pi
5029 5015 # jgamma[jgamma<-numpy.pi] += 2*numpy.pi
5030 5016
5031 5017 #Revised distribution
5032 5018 jgammaArray = numpy.hstack((jgamma,jgamma+0.5*numpy.pi,jgamma-0.5*numpy.pi))
5033 5019
5034 5020 #Histogram
5035 5021 nBins = 64
5036 5022 rmin = -0.5*numpy.pi
5037 5023 rmax = 0.5*numpy.pi
5038 5024 phaseHisto = numpy.histogram(jgammaArray, bins=nBins, range=(rmin,rmax))
5039 5025
5040 5026 meteorsY = phaseHisto[0]
5041 5027 phasesX = phaseHisto[1][:-1]
5042 5028 width = phasesX[1] - phasesX[0]
5043 5029 phasesX += width/2
5044 5030
5045 5031 #Gaussian aproximation
5046 5032 bpeak = meteorsY.argmax()
5047 5033 peak = meteorsY.max()
5048 5034 jmin = bpeak - 5
5049 5035 jmax = bpeak + 5 + 1
5050 5036
5051 5037 if jmin<0:
5052 5038 jmin = 0
5053 5039 jmax = 6
5054 5040 elif jmax > meteorsY.size:
5055 5041 jmin = meteorsY.size - 6
5056 5042 jmax = meteorsY.size
5057 5043
5058 5044 x0 = numpy.array([peak,bpeak,50])
5059 5045 coeff = optimize.leastsq(self.__residualFunction, x0, args=(meteorsY[jmin:jmax], phasesX[jmin:jmax]))
5060 5046
5061 5047 #Gammas
5062 5048 gammas[i] = coeff[0][1]
5063 5049
5064 5050 return gammas
5065 5051
5066 5052 def __residualFunction(self, coeffs, y, t):
5067 5053
5068 5054 return y - self.__gauss_function(t, coeffs)
5069 5055
5070 5056 def __gauss_function(self, t, coeffs):
5071 5057
5072 5058 return coeffs[0]*numpy.exp(-0.5*((t - coeffs[1]) / coeffs[2])**2)
5073 5059
5074 5060 def __getPhases(self, azimuth, h, pairsList, d, gammas, meteorsArray):
5075 5061 meteorOps = SMOperations()
5076 5062 nchan = 4
5077 5063 pairx = pairsList[0] #x es 0
5078 5064 pairy = pairsList[1] #y es 1
5079 5065 center_xangle = 0
5080 5066 center_yangle = 0
5081 5067 range_angle = numpy.array([10*numpy.pi,numpy.pi,numpy.pi/2,numpy.pi/4])
5082 5068 ntimes = len(range_angle)
5083 5069
5084 5070 nstepsx = 20
5085 5071 nstepsy = 20
5086 5072
5087 5073 for iz in range(ntimes):
5088 5074 min_xangle = -range_angle[iz]/2 + center_xangle
5089 5075 max_xangle = range_angle[iz]/2 + center_xangle
5090 5076 min_yangle = -range_angle[iz]/2 + center_yangle
5091 5077 max_yangle = range_angle[iz]/2 + center_yangle
5092 5078
5093 5079 inc_x = (max_xangle-min_xangle)/nstepsx
5094 5080 inc_y = (max_yangle-min_yangle)/nstepsy
5095 5081
5096 5082 alpha_y = numpy.arange(nstepsy)*inc_y + min_yangle
5097 5083 alpha_x = numpy.arange(nstepsx)*inc_x + min_xangle
5098 5084 penalty = numpy.zeros((nstepsx,nstepsy))
5099 5085 jph_array = numpy.zeros((nchan,nstepsx,nstepsy))
5100 5086 jph = numpy.zeros(nchan)
5101 5087
5102 5088 # Iterations looking for the offset
5103 5089 for iy in range(int(nstepsy)):
5104 5090 for ix in range(int(nstepsx)):
5105 5091 d3 = d[pairsList[1][0]]
5106 5092 d2 = d[pairsList[1][1]]
5107 5093 d5 = d[pairsList[0][0]]
5108 5094 d4 = d[pairsList[0][1]]
5109 5095
5110 5096 alp2 = alpha_y[iy] #gamma 1
5111 5097 alp4 = alpha_x[ix] #gamma 0
5112 5098
5113 5099 alp3 = -alp2*d3/d2 - gammas[1]
5114 5100 alp5 = -alp4*d5/d4 - gammas[0]
5115 5101 # jph[pairy[1]] = alpha_y[iy]
5116 5102 # jph[pairy[0]] = -gammas[1] - alpha_y[iy]*d[pairy[1]]/d[pairy[0]]
5117 5103
5118 5104 # jph[pairx[1]] = alpha_x[ix]
5119 5105 # jph[pairx[0]] = -gammas[0] - alpha_x[ix]*d[pairx[1]]/d[pairx[0]]
5120 5106 jph[pairsList[0][1]] = alp4
5121 5107 jph[pairsList[0][0]] = alp5
5122 5108 jph[pairsList[1][0]] = alp3
5123 5109 jph[pairsList[1][1]] = alp2
5124 5110 jph_array[:,ix,iy] = jph
5125 5111 # d = [2.0,2.5,2.5,2.0]
5126 5112 #falta chequear si va a leer bien los meteoros
5127 5113 meteorsArray1 = meteorOps.getMeteorParams(meteorsArray, azimuth, h, pairsList, d, jph)
5128 5114 error = meteorsArray1[:,-1]
5129 5115 ind1 = numpy.where(error==0)[0]
5130 5116 penalty[ix,iy] = ind1.size
5131 5117
5132 5118 i,j = numpy.unravel_index(penalty.argmax(), penalty.shape)
5133 5119 phOffset = jph_array[:,i,j]
5134 5120
5135 5121 center_xangle = phOffset[pairx[1]]
5136 5122 center_yangle = phOffset[pairy[1]]
5137 5123
5138 5124 phOffset = numpy.angle(numpy.exp(1j*jph_array[:,i,j]))
5139 5125 phOffset = phOffset*180/numpy.pi
5140 5126 return phOffset
5141 5127
5142 5128
5143 5129 def run(self, dataOut, hmin, hmax, channelPositions=None, nHours = 1):
5144 5130
5145 5131 dataOut.flagNoData = True
5146 5132 self.__dataReady = False
5147 5133 dataOut.outputInterval = nHours*3600
5148 5134
5149 5135 if self.__isConfig == False:
5150 5136 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
5151 5137 #Get Initial LTC time
5152 5138 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
5153 5139 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
5154 5140
5155 5141 self.__isConfig = True
5156 5142
5157 5143 if self.__buffer is None:
5158 5144 self.__buffer = dataOut.data_param.copy()
5159 5145
5160 5146 else:
5161 5147 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
5162 5148
5163 5149 self.__dataReady = self.__checkTime(dataOut.utctime, self.__initime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
5164 5150
5165 5151 if self.__dataReady:
5166 5152 dataOut.utctimeInit = self.__initime
5167 5153 self.__initime += dataOut.outputInterval #to erase time offset
5168 5154
5169 5155 freq = dataOut.frequency
5170 5156 c = dataOut.C #m/s
5171 5157 lamb = c/freq
5172 5158 k = 2*numpy.pi/lamb
5173 5159 azimuth = 0
5174 5160 h = (hmin, hmax)
5175 5161 # pairs = ((0,1),(2,3)) #Estrella
5176 5162 # pairs = ((1,0),(2,3)) #T
5177 5163
5178 5164 if channelPositions is None:
5179 5165 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
5180 5166 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
5181 5167 meteorOps = SMOperations()
5182 5168 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
5183 5169
5184 5170 #Checking correct order of pairs
5185 5171 pairs = []
5186 5172 if distances[1] > distances[0]:
5187 5173 pairs.append((1,0))
5188 5174 else:
5189 5175 pairs.append((0,1))
5190 5176
5191 5177 if distances[3] > distances[2]:
5192 5178 pairs.append((3,2))
5193 5179 else:
5194 5180 pairs.append((2,3))
5195 5181 # distances1 = [-distances[0]*lamb, distances[1]*lamb, -distances[2]*lamb, distances[3]*lamb]
5196 5182
5197 5183 meteorsArray = self.__buffer
5198 5184 error = meteorsArray[:,-1]
5199 5185 boolError = (error==0)|(error==3)|(error==4)|(error==13)|(error==14)
5200 5186 ind1 = numpy.where(boolError)[0]
5201 5187 meteorsArray = meteorsArray[ind1,:]
5202 5188 meteorsArray[:,-1] = 0
5203 5189 phases = meteorsArray[:,8:12]
5204 5190
5205 5191 #Calculate Gammas
5206 5192 gammas = self.__getGammas(pairs, distances, phases)
5207 5193 # gammas = numpy.array([-21.70409463,45.76935864])*numpy.pi/180
5208 5194 #Calculate Phases
5209 5195 phasesOff = self.__getPhases(azimuth, h, pairs, distances, gammas, meteorsArray)
5210 5196 phasesOff = phasesOff.reshape((1,phasesOff.size))
5211 5197 dataOut.data_output = -phasesOff
5212 5198 dataOut.flagNoData = False
5213 5199 self.__buffer = None
5214 5200
5215 5201
5216 5202 return
5217 5203
5218 5204 class SMOperations():
5219 5205
5220 5206 def __init__(self):
5221 5207
5222 5208 return
5223 5209
5224 5210 def getMeteorParams(self, arrayParameters0, azimuth, h, pairsList, distances, jph):
5225 5211
5226 5212 arrayParameters = arrayParameters0.copy()
5227 5213 hmin = h[0]
5228 5214 hmax = h[1]
5229 5215
5230 5216 #Calculate AOA (Error N 3, 4)
5231 5217 #JONES ET AL. 1998
5232 5218 AOAthresh = numpy.pi/8
5233 5219 error = arrayParameters[:,-1]
5234 5220 phases = -arrayParameters[:,8:12] + jph
5235 5221 # phases = numpy.unwrap(phases)
5236 5222 arrayParameters[:,3:6], arrayParameters[:,-1] = self.__getAOA(phases, pairsList, distances, error, AOAthresh, azimuth)
5237 5223
5238 5224 #Calculate Heights (Error N 13 and 14)
5239 5225 error = arrayParameters[:,-1]
5240 5226 Ranges = arrayParameters[:,1]
5241 5227 zenith = arrayParameters[:,4]
5242 5228 arrayParameters[:,2], arrayParameters[:,-1] = self.__getHeights(Ranges, zenith, error, hmin, hmax)
5243 5229
5244 5230 #----------------------- Get Final data ------------------------------------
5245 5231 # error = arrayParameters[:,-1]
5246 5232 # ind1 = numpy.where(error==0)[0]
5247 5233 # arrayParameters = arrayParameters[ind1,:]
5248 5234
5249 5235 return arrayParameters
5250 5236
5251 5237 def __getAOA(self, phases, pairsList, directions, error, AOAthresh, azimuth):
5252 5238
5253 5239 arrayAOA = numpy.zeros((phases.shape[0],3))
5254 5240 cosdir0, cosdir = self.__getDirectionCosines(phases, pairsList,directions)
5255 5241
5256 5242 arrayAOA[:,:2] = self.__calculateAOA(cosdir, azimuth)
5257 5243 cosDirError = numpy.sum(numpy.abs(cosdir0 - cosdir), axis = 1)
5258 5244 arrayAOA[:,2] = cosDirError
5259 5245
5260 5246 azimuthAngle = arrayAOA[:,0]
5261 5247 zenithAngle = arrayAOA[:,1]
5262 5248
5263 5249 #Setting Error
5264 5250 indError = numpy.where(numpy.logical_or(error == 3, error == 4))[0]
5265 5251 error[indError] = 0
5266 5252 #Number 3: AOA not fesible
5267 5253 indInvalid = numpy.where(numpy.logical_and((numpy.logical_or(numpy.isnan(zenithAngle), numpy.isnan(azimuthAngle))),error == 0))[0]
5268 5254 error[indInvalid] = 3
5269 5255 #Number 4: Large difference in AOAs obtained from different antenna baselines
5270 5256 indInvalid = numpy.where(numpy.logical_and(cosDirError > AOAthresh,error == 0))[0]
5271 5257 error[indInvalid] = 4
5272 5258 return arrayAOA, error
5273 5259
5274 5260 def __getDirectionCosines(self, arrayPhase, pairsList, distances):
5275 5261
5276 5262 #Initializing some variables
5277 5263 ang_aux = numpy.array([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])*2*numpy.pi
5278 5264 ang_aux = ang_aux.reshape(1,ang_aux.size)
5279 5265
5280 5266 cosdir = numpy.zeros((arrayPhase.shape[0],2))
5281 5267 cosdir0 = numpy.zeros((arrayPhase.shape[0],2))
5282 5268
5283 5269
5284 5270 for i in range(2):
5285 5271 ph0 = arrayPhase[:,pairsList[i][0]]
5286 5272 ph1 = arrayPhase[:,pairsList[i][1]]
5287 5273 d0 = distances[pairsList[i][0]]
5288 5274 d1 = distances[pairsList[i][1]]
5289 5275
5290 5276 ph0_aux = ph0 + ph1
5291 5277 ph0_aux = numpy.angle(numpy.exp(1j*ph0_aux))
5292 5278 # ph0_aux[ph0_aux > numpy.pi] -= 2*numpy.pi
5293 5279 # ph0_aux[ph0_aux < -numpy.pi] += 2*numpy.pi
5294 5280 #First Estimation
5295 5281 cosdir0[:,i] = (ph0_aux)/(2*numpy.pi*(d0 - d1))
5296 5282
5297 5283 #Most-Accurate Second Estimation
5298 5284 phi1_aux = ph0 - ph1
5299 5285 phi1_aux = phi1_aux.reshape(phi1_aux.size,1)
5300 5286 #Direction Cosine 1
5301 5287 cosdir1 = (phi1_aux + ang_aux)/(2*numpy.pi*(d0 + d1))
5302 5288
5303 5289 #Searching the correct Direction Cosine
5304 5290 cosdir0_aux = cosdir0[:,i]
5305 5291 cosdir0_aux = cosdir0_aux.reshape(cosdir0_aux.size,1)
5306 5292 #Minimum Distance
5307 5293 cosDiff = (cosdir1 - cosdir0_aux)**2
5308 5294 indcos = cosDiff.argmin(axis = 1)
5309 5295 #Saving Value obtained
5310 5296 cosdir[:,i] = cosdir1[numpy.arange(len(indcos)),indcos]
5311 5297
5312 5298 return cosdir0, cosdir
5313 5299
5314 5300 def __calculateAOA(self, cosdir, azimuth):
5315 5301 cosdirX = cosdir[:,0]
5316 5302 cosdirY = cosdir[:,1]
5317 5303
5318 5304 zenithAngle = numpy.arccos(numpy.sqrt(1 - cosdirX**2 - cosdirY**2))*180/numpy.pi
5319 5305 azimuthAngle = numpy.arctan2(cosdirX,cosdirY)*180/numpy.pi + azimuth#0 deg north, 90 deg east
5320 5306 angles = numpy.vstack((azimuthAngle, zenithAngle)).transpose()
5321 5307
5322 5308 return angles
5323 5309
5324 5310 def __getHeights(self, Ranges, zenith, error, minHeight, maxHeight):
5325 5311
5326 5312 Ramb = 375 #Ramb = c/(2*PRF)
5327 5313 Re = 6371 #Earth Radius
5328 5314 heights = numpy.zeros(Ranges.shape)
5329 5315
5330 5316 R_aux = numpy.array([0,1,2])*Ramb
5331 5317 R_aux = R_aux.reshape(1,R_aux.size)
5332 5318
5333 5319 Ranges = Ranges.reshape(Ranges.size,1)
5334 5320
5335 5321 Ri = Ranges + R_aux
5336 5322 hi = numpy.sqrt(Re**2 + Ri**2 + (2*Re*numpy.cos(zenith*numpy.pi/180)*Ri.transpose()).transpose()) - Re
5337 5323
5338 5324 #Check if there is a height between 70 and 110 km
5339 5325 h_bool = numpy.sum(numpy.logical_and(hi > minHeight, hi < maxHeight), axis = 1)
5340 5326 ind_h = numpy.where(h_bool == 1)[0]
5341 5327
5342 5328 hCorr = hi[ind_h, :]
5343 5329 ind_hCorr = numpy.where(numpy.logical_and(hi > minHeight, hi < maxHeight))
5344 5330
5345 5331 hCorr = hi[ind_hCorr][:len(ind_h)]
5346 5332 heights[ind_h] = hCorr
5347 5333
5348 5334 #Setting Error
5349 5335 #Number 13: Height unresolvable echo: not valid height within 70 to 110 km
5350 5336 #Number 14: Height ambiguous echo: more than one possible height within 70 to 110 km
5351 5337 indError = numpy.where(numpy.logical_or(error == 13, error == 14))[0]
5352 5338 error[indError] = 0
5353 5339 indInvalid2 = numpy.where(numpy.logical_and(h_bool > 1, error == 0))[0]
5354 5340 error[indInvalid2] = 14
5355 5341 indInvalid1 = numpy.where(numpy.logical_and(h_bool == 0, error == 0))[0]
5356 5342 error[indInvalid1] = 13
5357 5343
5358 5344 return heights, error
5359 5345
5360 5346 def getPhasePairs(self, channelPositions):
5361 5347 chanPos = numpy.array(channelPositions)
5362 5348 listOper = list(itertools.combinations(list(range(5)),2))
5363 5349
5364 5350 distances = numpy.zeros(4)
5365 5351 axisX = []
5366 5352 axisY = []
5367 5353 distX = numpy.zeros(3)
5368 5354 distY = numpy.zeros(3)
5369 5355 ix = 0
5370 5356 iy = 0
5371 5357
5372 5358 pairX = numpy.zeros((2,2))
5373 5359 pairY = numpy.zeros((2,2))
5374 5360
5375 5361 for i in range(len(listOper)):
5376 5362 pairi = listOper[i]
5377 5363
5378 5364 posDif = numpy.abs(chanPos[pairi[0],:] - chanPos[pairi[1],:])
5379 5365
5380 5366 if posDif[0] == 0:
5381 5367 axisY.append(pairi)
5382 5368 distY[iy] = posDif[1]
5383 5369 iy += 1
5384 5370 elif posDif[1] == 0:
5385 5371 axisX.append(pairi)
5386 5372 distX[ix] = posDif[0]
5387 5373 ix += 1
5388 5374
5389 5375 for i in range(2):
5390 5376 if i==0:
5391 5377 dist0 = distX
5392 5378 axis0 = axisX
5393 5379 else:
5394 5380 dist0 = distY
5395 5381 axis0 = axisY
5396 5382
5397 5383 side = numpy.argsort(dist0)[:-1]
5398 5384 axis0 = numpy.array(axis0)[side,:]
5399 5385 chanC = int(numpy.intersect1d(axis0[0,:], axis0[1,:])[0])
5400 5386 axis1 = numpy.unique(numpy.reshape(axis0,4))
5401 5387 side = axis1[axis1 != chanC]
5402 5388 diff1 = chanPos[chanC,i] - chanPos[side[0],i]
5403 5389 diff2 = chanPos[chanC,i] - chanPos[side[1],i]
5404 5390 if diff1<0:
5405 5391 chan2 = side[0]
5406 5392 d2 = numpy.abs(diff1)
5407 5393 chan1 = side[1]
5408 5394 d1 = numpy.abs(diff2)
5409 5395 else:
5410 5396 chan2 = side[1]
5411 5397 d2 = numpy.abs(diff2)
5412 5398 chan1 = side[0]
5413 5399 d1 = numpy.abs(diff1)
5414 5400
5415 5401 if i==0:
5416 5402 chanCX = chanC
5417 5403 chan1X = chan1
5418 5404 chan2X = chan2
5419 5405 distances[0:2] = numpy.array([d1,d2])
5420 5406 else:
5421 5407 chanCY = chanC
5422 5408 chan1Y = chan1
5423 5409 chan2Y = chan2
5424 5410 distances[2:4] = numpy.array([d1,d2])
5425 5411 # axisXsides = numpy.reshape(axisX[ix,:],4)
5426 5412 #
5427 5413 # channelCentX = int(numpy.intersect1d(pairX[0,:], pairX[1,:])[0])
5428 5414 # channelCentY = int(numpy.intersect1d(pairY[0,:], pairY[1,:])[0])
5429 5415 #
5430 5416 # ind25X = numpy.where(pairX[0,:] != channelCentX)[0][0]
5431 5417 # ind20X = numpy.where(pairX[1,:] != channelCentX)[0][0]
5432 5418 # channel25X = int(pairX[0,ind25X])
5433 5419 # channel20X = int(pairX[1,ind20X])
5434 5420 # ind25Y = numpy.where(pairY[0,:] != channelCentY)[0][0]
5435 5421 # ind20Y = numpy.where(pairY[1,:] != channelCentY)[0][0]
5436 5422 # channel25Y = int(pairY[0,ind25Y])
5437 5423 # channel20Y = int(pairY[1,ind20Y])
5438 5424
5439 5425 # pairslist = [(channelCentX, channel25X),(channelCentX, channel20X),(channelCentY,channel25Y),(channelCentY, channel20Y)]
5440 5426 pairslist = [(chanCX, chan1X),(chanCX, chan2X),(chanCY,chan1Y),(chanCY, chan2Y)]
5441 5427
5442 5428 return pairslist, distances
5443 5429 # def __getAOA(self, phases, pairsList, error, AOAthresh, azimuth):
5444 5430 #
5445 5431 # arrayAOA = numpy.zeros((phases.shape[0],3))
5446 5432 # cosdir0, cosdir = self.__getDirectionCosines(phases, pairsList)
5447 5433 #
5448 5434 # arrayAOA[:,:2] = self.__calculateAOA(cosdir, azimuth)
5449 5435 # cosDirError = numpy.sum(numpy.abs(cosdir0 - cosdir), axis = 1)
5450 5436 # arrayAOA[:,2] = cosDirError
5451 5437 #
5452 5438 # azimuthAngle = arrayAOA[:,0]
5453 5439 # zenithAngle = arrayAOA[:,1]
5454 5440 #
5455 5441 # #Setting Error
5456 5442 # #Number 3: AOA not fesible
5457 5443 # indInvalid = numpy.where(numpy.logical_and((numpy.logical_or(numpy.isnan(zenithAngle), numpy.isnan(azimuthAngle))),error == 0))[0]
5458 5444 # error[indInvalid] = 3
5459 5445 # #Number 4: Large difference in AOAs obtained from different antenna baselines
5460 5446 # indInvalid = numpy.where(numpy.logical_and(cosDirError > AOAthresh,error == 0))[0]
5461 5447 # error[indInvalid] = 4
5462 5448 # return arrayAOA, error
5463 5449 #
5464 5450 # def __getDirectionCosines(self, arrayPhase, pairsList):
5465 5451 #
5466 5452 # #Initializing some variables
5467 5453 # ang_aux = numpy.array([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])*2*numpy.pi
5468 5454 # ang_aux = ang_aux.reshape(1,ang_aux.size)
5469 5455 #
5470 5456 # cosdir = numpy.zeros((arrayPhase.shape[0],2))
5471 5457 # cosdir0 = numpy.zeros((arrayPhase.shape[0],2))
5472 5458 #
5473 5459 #
5474 5460 # for i in range(2):
5475 5461 # #First Estimation
5476 5462 # phi0_aux = arrayPhase[:,pairsList[i][0]] + arrayPhase[:,pairsList[i][1]]
5477 5463 # #Dealias
5478 5464 # indcsi = numpy.where(phi0_aux > numpy.pi)
5479 5465 # phi0_aux[indcsi] -= 2*numpy.pi
5480 5466 # indcsi = numpy.where(phi0_aux < -numpy.pi)
5481 5467 # phi0_aux[indcsi] += 2*numpy.pi
5482 5468 # #Direction Cosine 0
5483 5469 # cosdir0[:,i] = -(phi0_aux)/(2*numpy.pi*0.5)
5484 5470 #
5485 5471 # #Most-Accurate Second Estimation
5486 5472 # phi1_aux = arrayPhase[:,pairsList[i][0]] - arrayPhase[:,pairsList[i][1]]
5487 5473 # phi1_aux = phi1_aux.reshape(phi1_aux.size,1)
5488 5474 # #Direction Cosine 1
5489 5475 # cosdir1 = -(phi1_aux + ang_aux)/(2*numpy.pi*4.5)
5490 5476 #
5491 5477 # #Searching the correct Direction Cosine
5492 5478 # cosdir0_aux = cosdir0[:,i]
5493 5479 # cosdir0_aux = cosdir0_aux.reshape(cosdir0_aux.size,1)
5494 5480 # #Minimum Distance
5495 5481 # cosDiff = (cosdir1 - cosdir0_aux)**2
5496 5482 # indcos = cosDiff.argmin(axis = 1)
5497 5483 # #Saving Value obtained
5498 5484 # cosdir[:,i] = cosdir1[numpy.arange(len(indcos)),indcos]
5499 5485 #
5500 5486 # return cosdir0, cosdir
5501 5487 #
5502 5488 # def __calculateAOA(self, cosdir, azimuth):
5503 5489 # cosdirX = cosdir[:,0]
5504 5490 # cosdirY = cosdir[:,1]
5505 5491 #
5506 5492 # zenithAngle = numpy.arccos(numpy.sqrt(1 - cosdirX**2 - cosdirY**2))*180/numpy.pi
5507 5493 # azimuthAngle = numpy.arctan2(cosdirX,cosdirY)*180/numpy.pi + azimuth #0 deg north, 90 deg east
5508 5494 # angles = numpy.vstack((azimuthAngle, zenithAngle)).transpose()
5509 5495 #
5510 5496 # return angles
5511 5497 #
5512 5498 # def __getHeights(self, Ranges, zenith, error, minHeight, maxHeight):
5513 5499 #
5514 5500 # Ramb = 375 #Ramb = c/(2*PRF)
5515 5501 # Re = 6371 #Earth Radius
5516 5502 # heights = numpy.zeros(Ranges.shape)
5517 5503 #
5518 5504 # R_aux = numpy.array([0,1,2])*Ramb
5519 5505 # R_aux = R_aux.reshape(1,R_aux.size)
5520 5506 #
5521 5507 # Ranges = Ranges.reshape(Ranges.size,1)
5522 5508 #
5523 5509 # Ri = Ranges + R_aux
5524 5510 # hi = numpy.sqrt(Re**2 + Ri**2 + (2*Re*numpy.cos(zenith*numpy.pi/180)*Ri.transpose()).transpose()) - Re
5525 5511 #
5526 5512 # #Check if there is a height between 70 and 110 km
5527 5513 # h_bool = numpy.sum(numpy.logical_and(hi > minHeight, hi < maxHeight), axis = 1)
5528 5514 # ind_h = numpy.where(h_bool == 1)[0]
5529 5515 #
5530 5516 # hCorr = hi[ind_h, :]
5531 5517 # ind_hCorr = numpy.where(numpy.logical_and(hi > minHeight, hi < maxHeight))
5532 5518 #
5533 5519 # hCorr = hi[ind_hCorr]
5534 5520 # heights[ind_h] = hCorr
5535 5521 #
5536 5522 # #Setting Error
5537 5523 # #Number 13: Height unresolvable echo: not valid height within 70 to 110 km
5538 5524 # #Number 14: Height ambiguous echo: more than one possible height within 70 to 110 km
5539 5525 #
5540 5526 # indInvalid2 = numpy.where(numpy.logical_and(h_bool > 1, error == 0))[0]
5541 5527 # error[indInvalid2] = 14
5542 5528 # indInvalid1 = numpy.where(numpy.logical_and(h_bool == 0, error == 0))[0]
5543 5529 # error[indInvalid1] = 13
5544 5530 #
5545 5531 # return heights, error
5546 5532
5547 5533
5548 5534
5549 5535 class IGRFModel(Operation):
5550 5536 """Operation to calculate Geomagnetic parameters.
5551 5537
5552 5538 Parameters:
5553 5539 -----------
5554 5540 None
5555 5541
5556 5542 Example
5557 5543 --------
5558 5544
5559 5545 op = proc_unit.addOperation(name='IGRFModel', optype='other')
5560 5546
5561 5547 """
5562 5548
5563 5549 def __init__(self, **kwargs):
5564 5550
5565 5551 Operation.__init__(self, **kwargs)
5566 5552
5567 5553 self.aux=1
5568 5554
5569 5555 def run(self,dataOut):
5570 5556
5571 5557 try:
5572 5558 from schainpy.model.proc import mkfact_short_2020
5573 5559 except:
5574 5560 log.warning('You should install "mkfact_short_2020" module to process IGRF Model')
5575 5561
5576 5562 if self.aux==1:
5577 5563
5578 5564 #dataOut.TimeBlockSeconds_First_Time=time.mktime(time.strptime(dataOut.TimeBlockDate))
5579 5565 #### we do not use dataOut.datatime.ctime() because it's the time of the second (next) block
5580 5566 dataOut.TimeBlockSeconds_First_Time=dataOut.TimeBlockSeconds
5581 5567 dataOut.bd_time=time.gmtime(dataOut.TimeBlockSeconds_First_Time)
5582 5568 dataOut.year=dataOut.bd_time.tm_year+(dataOut.bd_time.tm_yday-1)/364.0
5583 5569 dataOut.ut=dataOut.bd_time.tm_hour+dataOut.bd_time.tm_min/60.0+dataOut.bd_time.tm_sec/3600.0
5584 5570
5585 5571 self.aux=0
5586 5572
5587 5573 dataOut.h=numpy.arange(0.0,15.0*dataOut.MAXNRANGENDT,15.0,dtype='float32')
5588 5574 dataOut.bfm=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32')
5589 5575 dataOut.bfm=numpy.array(dataOut.bfm,order='F')
5590 5576 dataOut.thb=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32')
5591 5577 dataOut.thb=numpy.array(dataOut.thb,order='F')
5592 5578 dataOut.bki=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32')
5593 5579 dataOut.bki=numpy.array(dataOut.bki,order='F')
5594 5580
5595 5581 mkfact_short_2020.mkfact(dataOut.year,dataOut.h,dataOut.bfm,dataOut.thb,dataOut.bki,dataOut.MAXNRANGENDT)
5596 5582
5597 5583 return dataOut
General Comments 0
You need to be logged in to leave comments. Login now