##// END OF EJS Templates
Update from schain_tmp Joab Version - AVP
sebastianVP -
r1753:0a6e6a51ad73
parent child
Show More
@@ -1,7573 +1,7613
1 1 # MASTER
2 2 import numpy
3 3 import math
4 4 from scipy import optimize, interpolate, signal, stats, ndimage
5 5 from scipy.fftpack import fft
6 6 import scipy
7 7 import re
8 8 import datetime
9 9 import copy
10 10 import sys
11 11 import importlib
12 12 import itertools
13 13 from multiprocessing import Pool, TimeoutError
14 14 from multiprocessing.pool import ThreadPool
15 15 import time
16 16
17 17 from scipy.optimize import fmin_l_bfgs_b #optimize with bounds on state papameters
18 18 from .jroproc_base import ProcessingUnit, Operation, MPDecorator
19 19 from schainpy.model.data.jrodata import Parameters, hildebrand_sekhon
20 20 from schainpy.model.data.jrodata import Spectra
21 21 #from scipy import asarray as ar,exp
22 22 from scipy.optimize import fmin, curve_fit
23 23 from schainpy.utils import log
24 24 import warnings
25 25 from numpy import NaN
26 26 from scipy.optimize.optimize import OptimizeWarning
27 27 warnings.filterwarnings('ignore')
28 28
29 import os
30 import csv
31 from scipy import signal
32 import matplotlib.pyplot as plt
29 33
30 34 SPEED_OF_LIGHT = 299792458
31 35
32 36 '''solving pickling issue'''
33 37
34 38 def _pickle_method(method):
35 39 func_name = method.__func__.__name__
36 40 obj = method.__self__
37 41 cls = method.__self__.__class__
38 42 return _unpickle_method, (func_name, obj, cls)
39 43
40 44 def _unpickle_method(func_name, obj, cls):
41 45 for cls in cls.mro():
42 46 try:
43 47 func = cls.__dict__[func_name]
44 48 except KeyError:
45 49 pass
46 50 else:
47 51 break
48 52 return func.__get__(obj, cls)
49 53
50 54
51 55 class ParametersProc(ProcessingUnit):
52 56
53 57 METHODS = {}
54 58 nSeconds = None
55 59
56 60 def __init__(self):
57 61 ProcessingUnit.__init__(self)
58 62
59 63 self.buffer = None
60 64 self.firstdatatime = None
61 65 self.profIndex = 0
62 66 self.dataOut = Parameters()
63 67 self.setupReq = False #Agregar a todas las unidades de proc
64 68
65 69 def __updateObjFromInput(self):
66 70
67 71 self.dataOut.inputUnit = self.dataIn.type
68 72
69 73 self.dataOut.timeZone = self.dataIn.timeZone
70 74 self.dataOut.dstFlag = self.dataIn.dstFlag
71 75 self.dataOut.errorCount = self.dataIn.errorCount
72 76 self.dataOut.useLocalTime = self.dataIn.useLocalTime
73 77
74 78 self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy()
79 self.dataOut.processingHeaderObj = self.dataIn.processingHeaderObj.copy()
75 80 self.dataOut.systemHeaderObj = self.dataIn.systemHeaderObj.copy()
81
76 82 self.dataOut.channelList = self.dataIn.channelList
77 83 self.dataOut.heightList = self.dataIn.heightList
84 self.dataOut.ipp = self.dataIn.ipp
85 self.dataOut.ippSeconds = self.dataIn.ippSeconds
86 self.dataOut.deltaHeight = self.dataIn.deltaHeight
78 87 self.dataOut.dtype = numpy.dtype([('real','<f4'),('imag','<f4')])
79 # self.dataOut.nBaud = self.dataIn.nBaud
80 # self.dataOut.nCode = self.dataIn.nCode
81 # self.dataOut.code = self.dataIn.code
88
89 self.dataOut.nBaud = self.dataIn.nBaud
90 self.dataOut.nCode = self.dataIn.nCode
91 self.dataOut.code = self.dataIn.code
92 self.dataOut.nProfiles = self.dataIn.nProfiles
93
82 94 self.dataOut.flagDiscontinuousBlock = self.dataIn.flagDiscontinuousBlock
83 95 self.dataOut.utctime = self.dataIn.utctime
84 96 self.dataOut.flagDecodeData = self.dataIn.flagDecodeData #asumo q la data esta decodificada
85 97 self.dataOut.flagDeflipData = self.dataIn.flagDeflipData #asumo q la data esta sin flip
86 98 self.dataOut.nCohInt = self.dataIn.nCohInt
99 self.dataOut.nIncohInt = self.dataIn.nIncohInt
100 self.dataOut.ippSeconds = self.dataIn.ippSeconds
101 self.dataOut.windowOfFilter = self.dataIn.windowOfFilter
102
87 103 self.dataOut.timeInterval1 = self.dataIn.timeInterval
88 104 self.dataOut.heightList = self.dataIn.heightList
89 105 self.dataOut.frequency = self.dataIn.frequency
106 self.dataOut.codeList = self.dataIn.codeList
107 self.dataOut.azimuthList = self.dataIn.azimuthList
108 self.dataOut.elevationList = self.dataIn.elevationList
90 109 self.dataOut.runNextUnit = self.dataIn.runNextUnit
91 110
92 111 def run(self, runNextUnit=0):
93 112
94 113 self.dataIn.runNextUnit = runNextUnit
95 114 #---------------------- Voltage Data ---------------------------
115 try:
116 intype = self.dataIn.type.decode("utf-8")
117 self.dataIn.type = intype
118 except:
119 pass
96 120
97 121 if self.dataIn.type == "Voltage":
98 122
99 123 self.__updateObjFromInput()
100 124 self.dataOut.data_pre = self.dataIn.data.copy()
101 125 self.dataOut.flagNoData = False
102 126 self.dataOut.utctimeInit = self.dataIn.utctime
103 127 self.dataOut.paramInterval = self.dataIn.nProfiles*self.dataIn.nCohInt*self.dataIn.ippSeconds
104 128 if hasattr(self.dataIn, 'dataPP_POW'):
105 129 self.dataOut.dataPP_POW = self.dataIn.dataPP_POW
106 130
107 131 if hasattr(self.dataIn, 'dataPP_POWER'):
108 132 self.dataOut.dataPP_POWER = self.dataIn.dataPP_POWER
109 133
110 134 if hasattr(self.dataIn, 'dataPP_DOP'):
111 135 self.dataOut.dataPP_DOP = self.dataIn.dataPP_DOP
112 136
113 137 if hasattr(self.dataIn, 'dataPP_SNR'):
114 138 self.dataOut.dataPP_SNR = self.dataIn.dataPP_SNR
115 139
116 140 if hasattr(self.dataIn, 'dataPP_WIDTH'):
117 141 self.dataOut.dataPP_WIDTH = self.dataIn.dataPP_WIDTH
118 142 return
119 143
120 144 #---------------------- Spectra Data ---------------------------
121 145
122 146 if self.dataIn.type == "Spectra":
123 147
124 148 self.dataOut.data_pre = [self.dataIn.data_spc, self.dataIn.data_cspc]
125 149 self.dataOut.data_spc = self.dataIn.data_spc
126 150 self.dataOut.data_cspc = self.dataIn.data_cspc
151 if hasattr(self.dataIn, 'data_outlier'):
152 self.dataOut.data_outlier = self.dataIn.data_outlier
153 if hasattr(self.dataIn,'flagPRofilesByRange'):
154 self.dataOut.flagProfilesByRange = self.dataIn.flagProfilesByRange
155 if hasattr(self.dataIn,'nProfilesByRange'):
156 self.dataOut.nProfilesByRange = self.dataIn.nProfilesByRange
157 if hasattr(self.dataIn,'deltaHeight'):
158 self.dataOut.deltaHeight = self.dataIn.deltaHeight
159 if hasattr(self.dataIn,'noise_estimation'):
160 self.dataOut.noise_estimation = self.dataIn.noise_estimation
161 if hasattr(self.dataIn, 'channelList'):
162 self.dataOut.channelList = self.dataIn.channelList
163 if hasattr(self.dataIn, 'pairsList'):
164 self.dataOut.pairsList = self.dataIn.pairsList
165 self.dataOut.groupList = self.dataIn.pairsList
127 166 self.dataOut.nProfiles = self.dataIn.nProfiles
128 167 self.dataOut.nIncohInt = self.dataIn.nIncohInt
129 168 self.dataOut.nFFTPoints = self.dataIn.nFFTPoints
130 169 self.dataOut.ippFactor = self.dataIn.ippFactor
131 170 self.dataOut.abscissaList = self.dataIn.getVelRange(1)
132 171 self.dataOut.spc_noise = self.dataIn.getNoise()
133 172 self.dataOut.spc_range = (self.dataIn.getFreqRange(1) , self.dataIn.getAcfRange(1) , self.dataIn.getVelRange(1))
134 173 # self.dataOut.normFactor = self.dataIn.normFactor
135 self.dataOut.pairsList = self.dataIn.pairsList
136 self.dataOut.groupList = self.dataIn.pairsList
137 174 self.dataOut.flagNoData = False
138 175
139 176 if hasattr(self.dataIn, 'ChanDist'): #Distances of receiver channels
140 177 self.dataOut.ChanDist = self.dataIn.ChanDist
141 178 else: self.dataOut.ChanDist = None
142 179
143 180 #if hasattr(self.dataIn, 'VelRange'): #Velocities range
144 181 # self.dataOut.VelRange = self.dataIn.VelRange
145 182 #else: self.dataOut.VelRange = None
146 183
147 184 if hasattr(self.dataIn, 'RadarConst'): #Radar Constant
148 185 self.dataOut.RadarConst = self.dataIn.RadarConst
149 186
150 187 if hasattr(self.dataIn, 'NPW'): #NPW
151 188 self.dataOut.NPW = self.dataIn.NPW
152 189
153 190 if hasattr(self.dataIn, 'COFA'): #COFA
154 191 self.dataOut.COFA = self.dataIn.COFA
155 192
156 193
157 194
158 195 #---------------------- Correlation Data ---------------------------
159 196
160 197 if self.dataIn.type == "Correlation":
161 198 acf_ind, ccf_ind, acf_pairs, ccf_pairs, data_acf, data_ccf = self.dataIn.splitFunctions()
162 199
163 200 self.dataOut.data_pre = (self.dataIn.data_cf[acf_ind,:], self.dataIn.data_cf[ccf_ind,:,:])
164 201 self.dataOut.normFactor = (self.dataIn.normFactor[acf_ind,:], self.dataIn.normFactor[ccf_ind,:])
165 202 self.dataOut.groupList = (acf_pairs, ccf_pairs)
166 203
167 204 self.dataOut.abscissaList = self.dataIn.lagRange
168 205 self.dataOut.noise = self.dataIn.noise
169 206 self.dataOut.data_snr = self.dataIn.SNR
170 207 self.dataOut.flagNoData = False
171 208 self.dataOut.nAvg = self.dataIn.nAvg
172 209
173 210 #---------------------- Parameters Data ---------------------------
174 211
175 212 if self.dataIn.type == "Parameters":
176 213 self.dataOut.copy(self.dataIn)
214 self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy()
215 self.dataOut.processingHeaderObj = self.dataIn.processingHeaderObj.copy()
177 216 self.dataOut.flagNoData = False
178
217 if isinstance(self.dataIn.nIncohInt,numpy.ndarray):
218 nch, nheis = self.dataIn.nIncohInt.shape
219 if nch != self.dataIn.nChannels:
220 aux = numpy.repeat(self.dataIn.nIncohInt, self.dataIn.nChannels, axis=0)
221 self.dataOut.nIncohInt = aux
179 222 return True
180 223
181 224 self.__updateObjFromInput()
182 225 self.dataOut.utctimeInit = self.dataIn.utctime
183 226 self.dataOut.paramInterval = self.dataIn.timeInterval
184 227 return
185 228
186 229
187 230 def target(tups):
188 231
189 232 obj, args = tups
190 233
191 234 return obj.FitGau(args)
192 235
193 236 class RemoveWideGC(Operation):
194 237 ''' This class remove the wide clutter and replace it with a simple interpolation points
195 238 This mainly applies to CLAIRE radar
196 239
197 240 ClutterWidth : Width to look for the clutter peak
198 241
199 242 Input:
200 243
201 244 self.dataOut.data_pre : SPC and CSPC
202 245 self.dataOut.spc_range : To select wind and rainfall velocities
203 246
204 247 Affected:
205 248
206 249 self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
207 250
208 251 Written by D. ScipiΓ³n 25.02.2021
209 252 '''
210 253 def __init__(self):
211 254 Operation.__init__(self)
212 255 self.i = 0
213 256 self.ich = 0
214 257 self.ir = 0
215 258
216 259 def run(self, dataOut, ClutterWidth=2.5):
217 260
218 261 self.spc = dataOut.data_pre[0].copy()
219 262 self.spc_out = dataOut.data_pre[0].copy()
220 263 self.Num_Chn = self.spc.shape[0]
221 264 self.Num_Hei = self.spc.shape[2]
222 265 VelRange = dataOut.spc_range[2][:-1]
223 266 dv = VelRange[1]-VelRange[0]
224 267
225 268 # Find the velocities that corresponds to zero
226 269 gc_values = numpy.squeeze(numpy.where(numpy.abs(VelRange) <= ClutterWidth))
227 270
228 271 # Removing novalid data from the spectra
229 272 for ich in range(self.Num_Chn) :
230 273 for ir in range(self.Num_Hei) :
231 274 # Estimate the noise at each range
232 275 HSn = hildebrand_sekhon(self.spc[ich,:,ir],dataOut.nIncohInt)
233 276
234 277 # Removing the noise floor at each range
235 278 novalid = numpy.where(self.spc[ich,:,ir] < HSn)
236 279 self.spc[ich,novalid,ir] = HSn
237 280
238 281 junk = numpy.append(numpy.insert(numpy.squeeze(self.spc[ich,gc_values,ir]),0,HSn),HSn)
239 282 j1index = numpy.squeeze(numpy.where(numpy.diff(junk)>0))
240 283 j2index = numpy.squeeze(numpy.where(numpy.diff(junk)<0))
241 284 if ((numpy.size(j1index)<=1) | (numpy.size(j2index)<=1)) :
242 285 continue
243 286 junk3 = numpy.squeeze(numpy.diff(j1index))
244 287 junk4 = numpy.squeeze(numpy.diff(j2index))
245 288 valleyindex = j2index[numpy.where(junk4>1)]
246 289 peakindex = j1index[numpy.where(junk3>1)]
247 290
248 291 isvalid = numpy.squeeze(numpy.where(numpy.abs(VelRange[gc_values[peakindex]]) <= 2.5*dv))
249 292 if numpy.size(isvalid) == 0 :
250 293 continue
251 294 if numpy.size(isvalid) >1 :
252 295 vindex = numpy.argmax(self.spc[ich,gc_values[peakindex[isvalid]],ir])
253 296 isvalid = isvalid[vindex]
254 297 # clutter peak
255 298 gcpeak = peakindex[isvalid]
256 299 vl = numpy.where(valleyindex < gcpeak)
257 300 if numpy.size(vl) == 0:
258 301 continue
259 302 gcvl = valleyindex[vl[0][-1]]
260 303 vr = numpy.where(valleyindex > gcpeak)
261 304 if numpy.size(vr) == 0:
262 305 continue
263 306 gcvr = valleyindex[vr[0][0]]
264 307
265 308 # Removing the clutter
266 309 interpindex = numpy.array([gc_values[gcvl], gc_values[gcvr]])
267 310 gcindex = gc_values[gcvl+1:gcvr-1]
268 311 self.spc_out[ich,gcindex,ir] = numpy.interp(VelRange[gcindex],VelRange[interpindex],self.spc[ich,interpindex,ir])
269 312
270 313 dataOut.data_pre[0] = self.spc_out
271 314
272 315 return dataOut
273 316
274 317 class SpectralFilters(Operation):
275 318 ''' This class allows to replace the novalid values with noise for each channel
276 319 This applies to CLAIRE RADAR
277 320
278 321 PositiveLimit : RightLimit of novalid data
279 322 NegativeLimit : LeftLimit of novalid data
280 323
281 324 Input:
282 325
283 326 self.dataOut.data_pre : SPC and CSPC
284 327 self.dataOut.spc_range : To select wind and rainfall velocities
285 328
286 329 Affected:
287 330
288 331 self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
289 332
290 333 Written by D. ScipiΓ³n 29.01.2021
291 334 '''
292 335 def __init__(self):
293 336 Operation.__init__(self)
294 337 self.i = 0
295 338
296 339 def run(self, dataOut, ):
297 340
298 341 self.spc = dataOut.data_pre[0].copy()
299 342 self.Num_Chn = self.spc.shape[0]
300 343 VelRange = dataOut.spc_range[2]
301 344
302 345 # novalid corresponds to data within the Negative and PositiveLimit
303 346 # Removing novalid data from the spectra
304 347 for i in range(self.Num_Chn):
305 348 self.spc[i,novalid,:] = dataOut.noise[i]
306 349 dataOut.data_pre[0] = self.spc
307 350 return dataOut
308 351
309 352
310 353 class GaussianFit(Operation):
311 354
312 355 '''
313 356 Function that fit of one and two generalized gaussians (gg) based
314 357 on the PSD shape across an "power band" identified from a cumsum of
315 358 the measured spectrum - noise.
316 359
317 360 Input:
318 361 self.dataOut.data_pre : SelfSpectra
319 362
320 363 Output:
321 364 self.dataOut.SPCparam : SPC_ch1, SPC_ch2
322 365
323 366 '''
324 367 def __init__(self):
325 368 Operation.__init__(self)
326 369 self.i=0
327 370
328 371 def run(self, dataOut, SNRdBlimit=-9, method='generalized'):
329 372 """This routine will find a couple of generalized Gaussians to a power spectrum
330 373 methods: generalized, squared
331 374 input: spc
332 375 output:
333 376 noise, amplitude0,shift0,width0,p0,Amplitude1,shift1,width1,p1
334 377 """
335 378 print ('Entering ',method,' double Gaussian fit')
336 379 self.spc = dataOut.data_pre[0].copy()
337 380 self.Num_Hei = self.spc.shape[2]
338 381 self.Num_Bin = self.spc.shape[1]
339 382 self.Num_Chn = self.spc.shape[0]
340 383
341 384 start_time = time.time()
342 385
343 386 pool = Pool(processes=self.Num_Chn)
344 387 args = [(dataOut.spc_range[2], ich, dataOut.spc_noise[ich], dataOut.nIncohInt, SNRdBlimit) for ich in range(self.Num_Chn)]
345 388 objs = [self for __ in range(self.Num_Chn)]
346 389 attrs = list(zip(objs, args))
347 390 DGauFitParam = pool.map(target, attrs)
348 391 # Parameters:
349 392 # 0. Noise, 1. Amplitude, 2. Shift, 3. Width 4. Power
350 393 dataOut.DGauFitParams = numpy.asarray(DGauFitParam)
351 394
352 395 # Double Gaussian Curves
353 396 gau0 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
354 397 gau0[:] = numpy.NaN
355 398 gau1 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
356 399 gau1[:] = numpy.NaN
357 400 x_mtr = numpy.transpose(numpy.tile(dataOut.getVelRange(1)[:-1], (self.Num_Hei,1)))
358 401 for iCh in range(self.Num_Chn):
359 402 N0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,0]] * self.Num_Bin))
360 403 N1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,1]] * self.Num_Bin))
361 404 A0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,0]] * self.Num_Bin))
362 405 A1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,1]] * self.Num_Bin))
363 406 v0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,0]] * self.Num_Bin))
364 407 v1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,1]] * self.Num_Bin))
365 408 s0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,0]] * self.Num_Bin))
366 409 s1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,1]] * self.Num_Bin))
367 410 if method == 'generalized':
368 411 p0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,0]] * self.Num_Bin))
369 412 p1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,1]] * self.Num_Bin))
370 413 elif method == 'squared':
371 414 p0 = 2.
372 415 p1 = 2.
373 416 gau0[iCh] = A0*numpy.exp(-0.5*numpy.abs((x_mtr-v0)/s0)**p0)+N0
374 417 gau1[iCh] = A1*numpy.exp(-0.5*numpy.abs((x_mtr-v1)/s1)**p1)+N1
375 418 dataOut.GaussFit0 = gau0
376 419 dataOut.GaussFit1 = gau1
377 420
378 421 print('Leaving ',method ,' double Gaussian fit')
379 422 return dataOut
380 423
381 424 def FitGau(self, X):
382 425 # print('Entering FitGau')
383 426 # Assigning the variables
384 427 Vrange, ch, wnoise, num_intg, SNRlimit = X
385 428 # Noise Limits
386 429 noisebl = wnoise * 0.9
387 430 noisebh = wnoise * 1.1
388 431 # Radar Velocity
389 432 Va = max(Vrange)
390 433 deltav = Vrange[1] - Vrange[0]
391 434 x = numpy.arange(self.Num_Bin)
392 435
393 436 # print ('stop 0')
394 437
395 438 # 5 parameters, 2 Gaussians
396 439 DGauFitParam = numpy.zeros([5, self.Num_Hei,2])
397 440 DGauFitParam[:] = numpy.NaN
398 441
399 442 # SPCparam = []
400 443 # SPC_ch1 = numpy.zeros([self.Num_Bin,self.Num_Hei])
401 444 # SPC_ch2 = numpy.zeros([self.Num_Bin,self.Num_Hei])
402 445 # SPC_ch1[:] = 0 #numpy.NaN
403 446 # SPC_ch2[:] = 0 #numpy.NaN
404 447 # print ('stop 1')
405 448 for ht in range(self.Num_Hei):
406 449 # print (ht)
407 450 # print ('stop 2')
408 451 # Spectra at each range
409 452 spc = numpy.asarray(self.spc)[ch,:,ht]
410 453 snr = ( spc.mean() - wnoise ) / wnoise
411 454 snrdB = 10.*numpy.log10(snr)
412 455
413 456 #print ('stop 3')
414 457 if snrdB < SNRlimit :
415 458 # snr = numpy.NaN
416 459 # SPC_ch1[:,ht] = 0#numpy.NaN
417 460 # SPC_ch1[:,ht] = 0#numpy.NaN
418 461 # SPCparam = (SPC_ch1,SPC_ch2)
419 462 # print ('SNR less than SNRth')
420 463 continue
421 464 # wnoise = hildebrand_sekhon(spc,num_intg)
422 465 # print ('stop 2.01')
423 466 #############################################
424 467 # normalizing spc and noise
425 468 # This part differs from gg1
426 469 # spc_norm_max = max(spc) #commented by D. ScipiΓ³n 19.03.2021
427 470 #spc = spc / spc_norm_max
428 471 # pnoise = pnoise #/ spc_norm_max #commented by D. ScipiΓ³n 19.03.2021
429 472 #############################################
430 473
431 474 # print ('stop 2.1')
432 475 fatspectra=1.0
433 476 # noise per channel.... we might want to use the noise at each range
434 477 # wnoise = noise_ #/ spc_norm_max #commented by D. ScipiΓ³n 19.03.2021
435 478 #wnoise,stdv,i_max,index =enoise(spc,num_intg) #noise estimate using Hildebrand Sekhon, only wnoise is used
436 479 #if wnoise>1.1*pnoise: # to be tested later
437 480 # wnoise=pnoise
438 481 # noisebl = wnoise*0.9
439 482 # noisebh = wnoise*1.1
440 483 spc = spc - wnoise # signal
441 484
442 485 # print ('stop 2.2')
443 486 minx = numpy.argmin(spc)
444 487 #spcs=spc.copy()
445 488 spcs = numpy.roll(spc,-minx)
446 489 cum = numpy.cumsum(spcs)
447 490 # tot_noise = wnoise * self.Num_Bin #64;
448 491
449 492 # print ('stop 2.3')
450 493 # snr = sum(spcs) / tot_noise
451 494 # snrdB = 10.*numpy.log10(snr)
452 495 #print ('stop 3')
453 496 # if snrdB < SNRlimit :
454 497 # snr = numpy.NaN
455 498 # SPC_ch1[:,ht] = 0#numpy.NaN
456 499 # SPC_ch1[:,ht] = 0#numpy.NaN
457 500 # SPCparam = (SPC_ch1,SPC_ch2)
458 501 # print ('SNR less than SNRth')
459 502 # continue
460 503
461 504
462 505 #if snrdB<-18 or numpy.isnan(snrdB) or num_intg<4:
463 506 # return [None,]*4,[None,]*4,None,snrdB,None,None,[None,]*5,[None,]*9,None
464 507 # print ('stop 4')
465 508 cummax = max(cum)
466 509 epsi = 0.08 * fatspectra # cumsum to narrow down the energy region
467 510 cumlo = cummax * epsi
468 511 cumhi = cummax * (1-epsi)
469 512 powerindex = numpy.array(numpy.where(numpy.logical_and(cum>cumlo, cum<cumhi))[0])
470 513
471 514 # print ('stop 5')
472 515 if len(powerindex) < 1:# case for powerindex 0
473 516 # print ('powerindex < 1')
474 517 continue
475 518 powerlo = powerindex[0]
476 519 powerhi = powerindex[-1]
477 520 powerwidth = powerhi-powerlo
478 521 if powerwidth <= 1:
479 522 # print('powerwidth <= 1')
480 523 continue
481 524
482 525 # print ('stop 6')
483 526 firstpeak = powerlo + powerwidth/10.# first gaussian energy location
484 527 secondpeak = powerhi - powerwidth/10. #second gaussian energy location
485 528 midpeak = (firstpeak + secondpeak)/2.
486 529 firstamp = spcs[int(firstpeak)]
487 530 secondamp = spcs[int(secondpeak)]
488 531 midamp = spcs[int(midpeak)]
489 532
490 533 y_data = spc + wnoise
491 534
492 535 ''' single Gaussian '''
493 536 shift0 = numpy.mod(midpeak+minx, self.Num_Bin )
494 537 width0 = powerwidth/4.#Initialization entire power of spectrum divided by 4
495 538 power0 = 2.
496 539 amplitude0 = midamp
497 540 state0 = [shift0,width0,amplitude0,power0,wnoise]
498 541 bnds = ((0,self.Num_Bin-1),(1,powerwidth),(0,None),(0.5,3.),(noisebl,noisebh))
499 542 lsq1 = fmin_l_bfgs_b(self.misfit1, state0, args=(y_data,x,num_intg), bounds=bnds, approx_grad=True)
500 543 # print ('stop 7.1')
501 544 # print (bnds)
502 545
503 546 chiSq1=lsq1[1]
504 547
505 548 # print ('stop 8')
506 549 if fatspectra<1.0 and powerwidth<4:
507 550 choice=0
508 551 Amplitude0=lsq1[0][2]
509 552 shift0=lsq1[0][0]
510 553 width0=lsq1[0][1]
511 554 p0=lsq1[0][3]
512 555 Amplitude1=0.
513 556 shift1=0.
514 557 width1=0.
515 558 p1=0.
516 559 noise=lsq1[0][4]
517 560 #return (numpy.array([shift0,width0,Amplitude0,p0]),
518 561 # numpy.array([shift1,width1,Amplitude1,p1]),noise,snrdB,chiSq1,6.,sigmas1,[None,]*9,choice)
519 562 # print ('stop 9')
520 563 ''' two Gaussians '''
521 564 #shift0=numpy.mod(firstpeak+minx,64); shift1=numpy.mod(secondpeak+minx,64)
522 565 shift0 = numpy.mod(firstpeak+minx, self.Num_Bin )
523 566 shift1 = numpy.mod(secondpeak+minx, self.Num_Bin )
524 567 width0 = powerwidth/6.
525 568 width1 = width0
526 569 power0 = 2.
527 570 power1 = power0
528 571 amplitude0 = firstamp
529 572 amplitude1 = secondamp
530 573 state0 = [shift0,width0,amplitude0,power0,shift1,width1,amplitude1,power1,wnoise]
531 574 #bnds=((0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
532 575 bnds=((0,self.Num_Bin-1),(1,powerwidth/2.),(0,None),(0.5,3.),(0,self.Num_Bin-1),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
533 576 #bnds=(( 0,(self.Num_Bin-1) ),(1,powerwidth/2.),(0,None),(0.5,3.),( 0,(self.Num_Bin-1)),(1,powerwidth/2.),(0,None),(0.5,3.),(0.1,0.5))
534 577
535 578 # print ('stop 10')
536 579 lsq2 = fmin_l_bfgs_b( self.misfit2 , state0 , args=(y_data,x,num_intg) , bounds=bnds , approx_grad=True )
537 580
538 581 # print ('stop 11')
539 582 chiSq2 = lsq2[1]
540 583
541 584 # print ('stop 12')
542 585
543 586 oneG = (chiSq1<5 and chiSq1/chiSq2<2.0) and (abs(lsq2[0][0]-lsq2[0][4])<(lsq2[0][1]+lsq2[0][5])/3. or abs(lsq2[0][0]-lsq2[0][4])<10)
544 587
545 588 # print ('stop 13')
546 589 if snrdB>-12: # when SNR is strong pick the peak with least shift (LOS velocity) error
547 590 if oneG:
548 591 choice = 0
549 592 else:
550 593 w1 = lsq2[0][1]; w2 = lsq2[0][5]
551 594 a1 = lsq2[0][2]; a2 = lsq2[0][6]
552 595 p1 = lsq2[0][3]; p2 = lsq2[0][7]
553 596 s1 = (2**(1+1./p1))*scipy.special.gamma(1./p1)/p1
554 597 s2 = (2**(1+1./p2))*scipy.special.gamma(1./p2)/p2
555 598 gp1 = a1*w1*s1; gp2 = a2*w2*s2 # power content of each ggaussian with proper p scaling
556 599
557 600 if gp1>gp2:
558 601 if a1>0.7*a2:
559 602 choice = 1
560 603 else:
561 604 choice = 2
562 605 elif gp2>gp1:
563 606 if a2>0.7*a1:
564 607 choice = 2
565 608 else:
566 609 choice = 1
567 610 else:
568 611 choice = numpy.argmax([a1,a2])+1
569 612 #else:
570 613 #choice=argmin([std2a,std2b])+1
571 614
572 615 else: # with low SNR go to the most energetic peak
573 616 choice = numpy.argmax([lsq1[0][2]*lsq1[0][1],lsq2[0][2]*lsq2[0][1],lsq2[0][6]*lsq2[0][5]])
574 617
575 618 # print ('stop 14')
576 619 shift0 = lsq2[0][0]
577 620 vel0 = Vrange[0] + shift0 * deltav
578 621 shift1 = lsq2[0][4]
579 622 # vel1=Vrange[0] + shift1 * deltav
580 623
581 624 # max_vel = 1.0
582 625 # Va = max(Vrange)
583 626 # deltav = Vrange[1]-Vrange[0]
584 627 # print ('stop 15')
585 628 #first peak will be 0, second peak will be 1
586 629 # if vel0 > -1.0 and vel0 < max_vel : #first peak is in the correct range # Commented by D.ScipiΓ³n 19.03.2021
587 630 if vel0 > -Va and vel0 < Va : #first peak is in the correct range
588 631 shift0 = lsq2[0][0]
589 632 width0 = lsq2[0][1]
590 633 Amplitude0 = lsq2[0][2]
591 634 p0 = lsq2[0][3]
592 635
593 636 shift1 = lsq2[0][4]
594 637 width1 = lsq2[0][5]
595 638 Amplitude1 = lsq2[0][6]
596 639 p1 = lsq2[0][7]
597 640 noise = lsq2[0][8]
598 641 else:
599 642 shift1 = lsq2[0][0]
600 643 width1 = lsq2[0][1]
601 644 Amplitude1 = lsq2[0][2]
602 645 p1 = lsq2[0][3]
603 646
604 647 shift0 = lsq2[0][4]
605 648 width0 = lsq2[0][5]
606 649 Amplitude0 = lsq2[0][6]
607 650 p0 = lsq2[0][7]
608 651 noise = lsq2[0][8]
609 652
610 653 if Amplitude0<0.05: # in case the peak is noise
611 654 shift0,width0,Amplitude0,p0 = 4*[numpy.NaN]
612 655 if Amplitude1<0.05:
613 656 shift1,width1,Amplitude1,p1 = 4*[numpy.NaN]
614 657
615 658 # print ('stop 16 ')
616 659 # SPC_ch1[:,ht] = noise + Amplitude0*numpy.exp(-0.5*(abs(x-shift0)/width0)**p0)
617 660 # SPC_ch2[:,ht] = noise + Amplitude1*numpy.exp(-0.5*(abs(x-shift1)/width1)**p1)
618 661 # SPCparam = (SPC_ch1,SPC_ch2)
619 662
620 663 DGauFitParam[0,ht,0] = noise
621 664 DGauFitParam[0,ht,1] = noise
622 665 DGauFitParam[1,ht,0] = Amplitude0
623 666 DGauFitParam[1,ht,1] = Amplitude1
624 667 DGauFitParam[2,ht,0] = Vrange[0] + shift0 * deltav
625 668 DGauFitParam[2,ht,1] = Vrange[0] + shift1 * deltav
626 669 DGauFitParam[3,ht,0] = width0 * deltav
627 670 DGauFitParam[3,ht,1] = width1 * deltav
628 671 DGauFitParam[4,ht,0] = p0
629 672 DGauFitParam[4,ht,1] = p1
630 673
631 674 return DGauFitParam
632 675
633 676 def y_model1(self,x,state):
634 677 shift0, width0, amplitude0, power0, noise = state
635 678 model0 = amplitude0*numpy.exp(-0.5*abs((x - shift0)/width0)**power0)
636 679 model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0)
637 680 model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0)
638 681 return model0 + model0u + model0d + noise
639 682
640 683 def y_model2(self,x,state): #Equation for two generalized Gaussians with Nyquist
641 684 shift0, width0, amplitude0, power0, shift1, width1, amplitude1, power1, noise = state
642 685 model0 = amplitude0*numpy.exp(-0.5*abs((x-shift0)/width0)**power0)
643 686 model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0)
644 687 model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0)
645 688
646 689 model1 = amplitude1*numpy.exp(-0.5*abs((x - shift1)/width1)**power1)
647 690 model1u = amplitude1*numpy.exp(-0.5*abs((x - shift1 - self.Num_Bin)/width1)**power1)
648 691 model1d = amplitude1*numpy.exp(-0.5*abs((x - shift1 + self.Num_Bin)/width1)**power1)
649 692 return model0 + model0u + model0d + model1 + model1u + model1d + noise
650 693
651 694 def misfit1(self,state,y_data,x,num_intg): # This function compares how close real data is with the model data, the close it is, the better it is.
652 695
653 696 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model1(x,state)))**2)#/(64-5.) # /(64-5.) can be commented
654 697
655 698 def misfit2(self,state,y_data,x,num_intg):
656 699 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model2(x,state)))**2)#/(64-9.)
657 700
658 701 class Oblique_Gauss_Fit(Operation):
659 702 '''
660 703 Written by R. Flores
661 704 '''
662 705 def __init__(self):
663 706 Operation.__init__(self)
664 707
665 708 def Gauss_fit(self,spc,x,nGauss):
666 709
667 710
668 711 def gaussian(x, a, b, c, d):
669 712 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
670 713 return val
671 714
672 715 if nGauss == 'first':
673 716 spc_1_aux = numpy.copy(spc[:numpy.argmax(spc)+1])
674 717 spc_2_aux = numpy.flip(spc_1_aux)
675 718 spc_3_aux = numpy.concatenate((spc_1_aux,spc_2_aux[1:]))
676 719
677 720 len_dif = len(x)-len(spc_3_aux)
678 721
679 722 spc_zeros = numpy.ones(len_dif)*spc_1_aux[0]
680 723
681 724 spc_new = numpy.concatenate((spc_3_aux,spc_zeros))
682 725
683 726 y = spc_new
684 727
685 728 elif nGauss == 'second':
686 729 y = spc
687 730
688 731
689 732 # estimate starting values from the data
690 733 a = y.max()
691 734 b = x[numpy.argmax(y)]
692 735 if nGauss == 'first':
693 736 c = 1.#b#b#numpy.std(spc)
694 737 elif nGauss == 'second':
695 738 c = b
696 739 else:
697 740 print("ERROR")
698 741
699 742 d = numpy.mean(y[-100:])
700 743
701 744 # define a least squares function to optimize
702 745 def minfunc(params):
703 746 return sum((y-gaussian(x,params[0],params[1],params[2],params[3]))**2)
704 747
705 748 # fit
706 749 popt = fmin(minfunc,[a,b,c,d],disp=False)
707 750 #popt,fopt,niter,funcalls = fmin(minfunc,[a,b,c,d])
708 751
709 752
710 753 return gaussian(x, popt[0], popt[1], popt[2], popt[3]), popt[0], popt[1], popt[2], popt[3]
711 754
712 755 def Gauss_fit_2(self,spc,x,nGauss):
713 756
714 757
715 758 def gaussian(x, a, b, c, d):
716 759 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
717 760 return val
718 761
719 762 if nGauss == 'first':
720 763 spc_1_aux = numpy.copy(spc[:numpy.argmax(spc)+1])
721 764 spc_2_aux = numpy.flip(spc_1_aux)
722 765 spc_3_aux = numpy.concatenate((spc_1_aux,spc_2_aux[1:]))
723 766
724 767 len_dif = len(x)-len(spc_3_aux)
725 768
726 769 spc_zeros = numpy.ones(len_dif)*spc_1_aux[0]
727 770
728 771 spc_new = numpy.concatenate((spc_3_aux,spc_zeros))
729 772
730 773 y = spc_new
731 774
732 775 elif nGauss == 'second':
733 776 y = spc
734 777
735 778
736 779 # estimate starting values from the data
737 780 a = y.max()
738 781 b = x[numpy.argmax(y)]
739 782 if nGauss == 'first':
740 783 c = 1.#b#b#numpy.std(spc)
741 784 elif nGauss == 'second':
742 785 c = b
743 786 else:
744 787 print("ERROR")
745 788
746 789 d = numpy.mean(y[-100:])
747 790 popt,pcov = curve_fit(gaussian,x,y,p0=[a,b,c,d])
748 791 return gaussian(x, popt[0], popt[1], popt[2], popt[3]),popt[0], popt[1], popt[2], popt[3]
749 792
750 793 def Double_Gauss_fit(self,spc,x,A1,B1,C1,A2,B2,C2,D):
751 794
752 795 def double_gaussian(x, a1, b1, c1, a2, b2, c2, d):
753 796 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
754 797 return val
755 798
756 799
757 800 y = spc
758 801
759 802 # estimate starting values from the data
760 803 a1 = A1
761 804 b1 = B1
762 805 c1 = C1#numpy.std(spc)
763 806
764 807 a2 = A2#y.max()
765 808 b2 = B2#x[numpy.argmax(y)]
766 809 c2 = C2#numpy.std(spc)
767 810 d = D
768 811
769 812 # define a least squares function to optimize
770 813 def minfunc(params):
771 814 return sum((y-double_gaussian(x,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2)
772 815
773 816 # fit
774 817 popt = fmin(minfunc,[a1,b1,c1,a2,b2,c2,d],disp=False)
775 818
776 819 return double_gaussian(x, popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
777 820
778 821 def Double_Gauss_fit_2(self,spc,x,A1,B1,C1,A2,B2,C2,D):
779 822
780 823 def double_gaussian(x, a1, b1, c1, a2, b2, c2, d):
781 824 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
782 825 return val
783 826
784 827
785 828 y = spc
786 829
787 830 # estimate starting values from the data
788 831 a1 = A1
789 832 b1 = B1
790 833 c1 = C1#numpy.std(spc)
791 834
792 835 a2 = A2#y.max()
793 836 b2 = B2#x[numpy.argmax(y)]
794 837 c2 = C2#numpy.std(spc)
795 838 d = D
796 839
797 840 # fit
798 841 popt,pcov = curve_fit(double_gaussian,x,y,p0=[a1,b1,c1,a2,b2,c2,d])
799 842 error = numpy.sqrt(numpy.diag(pcov))
800 843
801 844 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6], error[0], error[1], error[2], error[3], error[4], error[5], error[6]
802 845
803 846 def windowing_double(self,spc,x,A1,B1,C1,A2,B2,C2,D):
804 847 from scipy.optimize import curve_fit,fmin
805 848
806 849 def R_gaussian(x, a, b, c):
807 850 N = int(numpy.shape(x)[0])
808 851 val = a * numpy.exp(-((x)*c*2*2*numpy.pi)**2 / (2))* numpy.exp(1.j*b*x*4*numpy.pi)
809 852 return val
810 853
811 854 def T(x,N):
812 855 T = 1-abs(x)/N
813 856 return T
814 857
815 858 def R_T_spc_fun(x, a1, b1, c1, a2, b2, c2, d):
816 859
817 860 N = int(numpy.shape(x)[0])
818 861
819 862 x_max = x[-1]
820 863
821 864 x_pos = x[1600:]
822 865 x_neg = x[:1600]
823 866
824 867 R_T_neg_1 = R_gaussian(x, a1, b1, c1)[:1600]*T(x_neg,-x[0])
825 868 R_T_pos_1 = R_gaussian(x, a1, b1, c1)[1600:]*T(x_pos,x[-1])
826 869 R_T_sum_1 = R_T_pos_1 + R_T_neg_1
827 870 R_T_spc_1 = numpy.fft.fft(R_T_sum_1).real
828 871 R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1)
829 872 max_val_1 = numpy.max(R_T_spc_1)
830 873 R_T_spc_1 = R_T_spc_1*a1/max_val_1
831 874
832 875 R_T_neg_2 = R_gaussian(x, a2, b2, c2)[:1600]*T(x_neg,-x[0])
833 876 R_T_pos_2 = R_gaussian(x, a2, b2, c2)[1600:]*T(x_pos,x[-1])
834 877 R_T_sum_2 = R_T_pos_2 + R_T_neg_2
835 878 R_T_spc_2 = numpy.fft.fft(R_T_sum_2).real
836 879 R_T_spc_2 = numpy.fft.fftshift(R_T_spc_2)
837 880 max_val_2 = numpy.max(R_T_spc_2)
838 881 R_T_spc_2 = R_T_spc_2*a2/max_val_2
839 882
840 883 R_T_d = d*numpy.fft.fftshift(signal.unit_impulse(N))
841 884 R_T_d_neg = R_T_d[:1600]*T(x_neg,-x[0])
842 885 R_T_d_pos = R_T_d[1600:]*T(x_pos,x[-1])
843 886 R_T_d_sum = R_T_d_pos + R_T_d_neg
844 887 R_T_spc_3 = numpy.fft.fft(R_T_d_sum).real
845 888 R_T_spc_3 = numpy.fft.fftshift(R_T_spc_3)
846 889
847 890 R_T_final = R_T_spc_1 + R_T_spc_2 + R_T_spc_3
848 891
849 892 return R_T_final
850 893
851 894 y = spc#gaussian(x, a, meanY, sigmaY) + a*0.1*numpy.random.normal(0, 1, size=len(x))
852 895
853 896 from scipy.stats import norm
854 897 mean,std=norm.fit(spc)
855 898
856 899 # estimate starting values from the data
857 900 a1 = A1
858 901 b1 = B1
859 902 c1 = C1#numpy.std(spc)
860 903
861 904 a2 = A2#y.max()
862 905 b2 = B2#x[numpy.argmax(y)]
863 906 c2 = C2#numpy.std(spc)
864 907 d = D
865 908
866 909 ippSeconds = 250*20*1.e-6/3
867 910
868 911 x_t = ippSeconds * (numpy.arange(1600) -1600 / 2.)
869 912
870 913 x_t = numpy.linspace(x_t[0],x_t[-1],3200)
871 914
872 915 x_freq = numpy.fft.fftfreq(1600,d=ippSeconds)
873 916 x_freq = numpy.fft.fftshift(x_freq)
874 917
875 918 # define a least squares function to optimize
876 919 def minfunc(params):
877 920 return sum((y-R_T_spc_fun(x_t,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2/1)#y**2)
878 921
879 922 # fit
880 923 popt_full = fmin(minfunc,[a1,b1,c1,a2,b2,c2,d],full_output=True)
881 924 popt = popt_full[0]
882 925
883 926 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
884 927
885 928 def Double_Gauss_fit_weight(self,spc,x,A1,B1,C1,A2,B2,C2,D):
886 929 from scipy.optimize import curve_fit,fmin
887 930
888 931 def double_gaussian(x, a1, b1, c1, a2, b2, c2, d):
889 932 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
890 933 return val
891 934
892 935 y = spc
893 936
894 937 from scipy.stats import norm
895 938 mean,std=norm.fit(spc)
896 939
897 940 # estimate starting values from the data
898 941 a1 = A1
899 942 b1 = B1
900 943 c1 = C1#numpy.std(spc)
901 944
902 945 a2 = A2#y.max()
903 946 b2 = B2#x[numpy.argmax(y)]
904 947 c2 = C2#numpy.std(spc)
905 948 d = D
906 949
907 950 y_clean = signal.medfilt(y)
908 951 # define a least squares function to optimize
909 952 def minfunc(params):
910 953 return sum((y-double_gaussian(x,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2/(y_clean**2/1))
911 954
912 955 # fit
913 956 popt_full = fmin(minfunc,[a1,b1,c1,a2,b2,c2,d], disp =False, full_output=True)
914 957 #print("nIter", popt_full[2])
915 958 popt = popt_full[0]
916 959 #popt,pcov = curve_fit(double_gaussian,x,y,p0=[a1,b1,c1,a2,b2,c2,d])
917 960
918 961 #return double_gaussian(x, popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
919 962 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
920 963
921 964 def DH_mode(self,spectra,VelRange):
922 965
923 966 from scipy.optimize import curve_fit
924 967
925 968 def double_gauss(x, a1,b1,c1, a2,b2,c2, d):
926 969 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
927 970 return val
928 971
929 972 spec = (spectra.copy()).flatten()
930 973 amp=spec.max()
931 974 params=numpy.array([amp,-400,30,amp/4,-200,150,1.0e7])
932 975 #try:
933 976 popt,pcov=curve_fit(double_gauss, VelRange, spec, p0=params,bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf]))
934 977
935 978 error = numpy.sqrt(numpy.diag(pcov))
936 979 #doppler_2=popt[4]
937 980 #err_2 = numpy.sqrt(pcov[4][4])
938 981
939 982 #except:
940 983 #pass
941 984 #doppler_2=numpy.NAN
942 985 #err_2 = numpy.NAN
943 986
944 987 #return doppler_2, err_2
945 988
946 989 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6], error[0], error[1], error[2], error[3], error[4], error[5], error[6]
947 990
948 991 def Tri_Marco(self,spc,freq,a1,b1,c1,a2,b2,c2,d):
949 992
950 993 from scipy.optimize import least_squares
951 994
952 995 freq_max = numpy.max(numpy.abs(freq))
953 996 spc_max = numpy.max(spc)
954 997
955 998 def tri_gaussian(x, a1, b1, c1, a2, b2, c2, a3, b3, c3, d):
956 999 z1 = (x-b1)/c1
957 1000 z2 = (x-b2)/c2
958 1001 z3 = (x-b3)/c3
959 1002 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-z2**2/2) + a3 * numpy.exp(-z3**2/2) + d
960 1003 return val
961 1004
962 1005 from scipy.signal import medfilt
963 1006 Nincoh = 20
964 1007 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
965 1008 c1 = abs(c1)
966 1009 c2 = abs(c2)
967 1010
968 1011 # define a least squares function to optimize
969 1012 def lsq_func(params):
970 1013 return (spc-tri_gaussian(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7],params[8],params[9]))/spcm
971 1014
972 1015 # fit
973 1016 bounds=([0,-numpy.inf,0,0,-numpy.inf,0,0,0,0,0],[numpy.inf,-100,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,600,numpy.inf,numpy.inf])
974 1017
975 1018 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,spc_max]
976 1019 #print(a1,b1,c1,a2,b2,c2,d)
977 1020 popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,a2/4,-b1,c1,d],x_scale=params_scale,bounds=bounds)
978 1021
979 1022 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
980 1023 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]
981 1024 A3f = popt.x[6]; B3f = popt.x[7]; C3f = popt.x[8]
982 1025 Df = popt.x[9]
983 1026
984 1027 return A1f, B1f, C1f, A2f, B2f, C2f, Df
985 1028
986 1029 def Tri_Marco(self,spc,freq,a1,b1,c1,a2,b2,c2,d):
987 1030
988 1031 from scipy.optimize import least_squares
989 1032
990 1033 freq_max = numpy.max(numpy.abs(freq))
991 1034 spc_max = numpy.max(spc)
992 1035
993 1036 def duo_gaussian(x, a1, b1, c1, a2, b2, c2, d):
994 1037 z1 = (x-b1)/c1
995 1038 z2 = (x-b2)/c2
996 1039 #z3 = (x-b3)/c3
997 1040 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-z2**2/2) + d
998 1041 return val
999 1042
1000 1043 from scipy.signal import medfilt
1001 1044 Nincoh = 20
1002 1045 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1003 1046 c1 = abs(c1)
1004 1047 c2 = abs(c2)
1005 1048
1006 1049 # define a least squares function to optimize
1007 1050 def lsq_func(params):
1008 1051 return (spc-tri_gaussian(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))/spcm
1009 1052
1010 1053 # fit
1011 1054 bounds=([0,-numpy.inf,0,0,-numpy.inf,0,0],[numpy.inf,-100,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf])
1012 1055
1013 1056 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,spc_max]
1014 1057 popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,d],x_scale=params_scale,bounds=bounds)
1015 1058
1016 1059 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1017 1060 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]
1018 1061 #A3f = popt.x[6]; B3f = popt.x[7]; C3f = popt.x[8]
1019 1062 Df = popt.x[9]
1020 1063
1021 1064 return A1f, B1f, C1f, A2f, B2f, C2f, Df
1022 1065
1023 1066 def double_gaussian_skew(self,x, a1, b1, c1, a2, b2, c2, k2, d):
1024 1067 z1 = (x-b1)/c1
1025 1068 z2 = (x-b2)/c2
1026 1069 h2 = 1-k2*z2
1027 1070 h2[h2<0] = 0
1028 1071 y2 = -1/k2*numpy.log(h2)
1029 1072 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-y2**2/2)/(1-k2*z2) + d
1030 1073 return val
1031 1074
1032 1075 def gaussian(self, x, a, b, c, d):
1033 1076 z = (x-b)/c
1034 1077 val = a * numpy.exp(-z**2/2) + d
1035 1078 return val
1036 1079
1037 1080 def double_gaussian(self, x, a1, b1, c1, a2, b2, c2, d):
1038 1081 z1 = (x-b1)/c1
1039 1082 z2 = (x-b2)/c2
1040 1083 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-z2**2/2) + d
1041 1084 return val
1042 1085
1043 1086 def double_gaussian_double_skew(self,x, a1, b1, c1, k1, a2, b2, c2, k2, d):
1044 1087
1045 1088 z1 = (x-b1)/c1
1046 1089 h1 = 1-k1*z1
1047 1090 h1[h1<0] = 0
1048 1091 y1 = -1/k1*numpy.log(h1)
1049 1092
1050 1093 z2 = (x-b2)/c2
1051 1094 h2 = 1-k2*z2
1052 1095 h2[h2<0] = 0
1053 1096 y2 = -1/k2*numpy.log(h2)
1054 1097
1055 1098 val = a1 * numpy.exp(-y1**2/2)/(1-k1*z1) + a2 * numpy.exp(-y2**2/2)/(1-k2*z2) + d
1056 1099 return val
1057 1100
1058 1101 def gaussian_skew(self,x, a2, b2, c2, k2, d):
1059 1102 z2 = (x-b2)/c2
1060 1103 h2 = 1-k2*z2
1061 1104 h2[h2<0] = 0
1062 1105 y2 = -1/k2*numpy.log(h2)
1063 1106 val = a2 * numpy.exp(-y2**2/2)/(1-k2*z2) + d
1064 1107 return val
1065 1108
1066 1109 def triple_gaussian_skew(self,x, a1, b1, c1, a2, b2, c2, k2, a3, b3, c3, k3, d):
1067 1110 z1 = (x-b1)/c1
1068 1111 z2 = (x-b2)/c2
1069 1112 z3 = (x-b3)/c3
1070 1113 h2 = 1-k2*z2
1071 1114 h2[h2<0] = 0
1072 1115 y2 = -1/k2*numpy.log(h2)
1073 1116 h3 = 1-k3*z3
1074 1117 h3[h3<0] = 0
1075 1118 y3 = -1/k3*numpy.log(h3)
1076 1119 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-y2**2/2)/(1-k2*z2) + a3 * numpy.exp(-y3**2/2)/(1-k3*z3) + d
1077 1120 return val
1078 1121
1079 1122 def Double_Gauss_Skew_fit_weight_bound_no_inputs(self,spc,freq):
1080 1123
1081 1124 from scipy.optimize import least_squares
1082 1125
1083 1126 freq_max = numpy.max(numpy.abs(freq))
1084 1127 spc_max = numpy.max(spc)
1085 1128
1086 1129 from scipy.signal import medfilt
1087 1130 Nincoh = 20
1088 1131 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1089 1132
1090 1133 # define a least squares function to optimize
1091 1134 def lsq_func(params):
1092 1135 return (spc-self.double_gaussian_skew(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7]))/spcm
1093 1136
1094 1137 # fit
1095 1138 bounds=([0,-numpy.inf,0,0,-400,0,0,0],[numpy.inf,-340,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1096 1139 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,1,spc_max]
1097 1140 x0_value = numpy.array([spc_max,-400,30,spc_max/4,-200,150,1,1.0e7])
1098 1141 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1099 1142 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1100 1143 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]; K2f = popt.x[6]
1101 1144 Df = popt.x[7]
1102 1145
1103 1146 aux = self.gaussian_skew(freq, A2f, B2f, C2f, K2f, Df)
1104 1147 doppler = freq[numpy.argmax(aux)]
1105 1148
1106 1149 return A1f, B1f, C1f, A2f, B2f, C2f, K2f, Df, doppler
1107 1150
1108 1151 def Double_Gauss_Double_Skew_fit_weight_bound_no_inputs(self,spc,freq,Nincoh,hei):
1109 1152
1110 1153 from scipy.optimize import least_squares
1111 1154
1112 1155 freq_max = numpy.max(numpy.abs(freq))
1113 1156 spc_max = numpy.max(spc)
1114 1157
1115 1158 #from scipy.signal import medfilt
1116 1159 #Nincoh = 20
1117 1160 #Nincoh = 80
1118 1161 Nincoh = Nincoh
1119 1162 #spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1120 1163 spcm = spc/numpy.sqrt(Nincoh)
1121 1164
1122 1165 # define a least squares function to optimize
1123 1166 def lsq_func(params):
1124 1167 return (spc-self.double_gaussian_double_skew(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7],params[8]))/spcm
1125 1168
1126 1169 # fit
1127 1170 bounds=([0,-numpy.inf,0,-5,0,-400,0,0,0],[numpy.inf,-200,numpy.inf,5,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1128 1171
1129 1172 params_scale = [spc_max,freq_max,freq_max,1,spc_max,freq_max,freq_max,1,spc_max]
1130 1173
1131 1174 dop1_x0 = freq[numpy.argmax(spc)]
1132 1175 if dop1_x0 < 0:
1133 1176 dop2_x0 = dop1_x0 + 100
1134 1177 if dop1_x0 > 0:
1135 1178 dop2_x0 = dop1_x0 - 100
1136 1179
1137 1180 x0_value = numpy.array([spc_max,dop1_x0,30,-.1,spc_max/4, dop2_x0,150,1,1.0e7])
1138 1181 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1139 1182 J = popt.jac
1140 1183
1141 1184 try:
1142 1185 cov = numpy.linalg.inv(J.T.dot(J))
1143 1186 error = numpy.sqrt(numpy.diagonal(cov))
1144 1187 except:
1145 1188 error = numpy.ones((9))*numpy.NAN
1146 1189
1147 1190 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]; K1f = popt.x[3]
1148 1191 A2f = popt.x[4]; B2f = popt.x[5]; C2f = popt.x[6]; K2f = popt.x[7]
1149 1192 Df = popt.x[8]
1150 1193 aux1 = self.gaussian_skew(freq, A1f, B1f, C1f, K1f, Df)
1151 1194 doppler1 = freq[numpy.argmax(aux1)]
1152 1195
1153 1196 aux2 = self.gaussian_skew(freq, A2f, B2f, C2f, K2f, Df)
1154 1197 doppler2 = freq[numpy.argmax(aux2)]
1155 1198 #print("error",error)
1156 1199 #exit(1)
1157 1200
1158 1201
1159 1202 return A1f, B1f, C1f, K1f, A2f, B2f, C2f, K2f, Df, doppler1, doppler2, error
1160 1203
1161 1204 def Double_Gauss_fit_weight_bound_no_inputs(self,spc,freq,Nincoh):
1162 1205
1163 1206 from scipy.optimize import least_squares
1164 1207
1165 1208 freq_max = numpy.max(numpy.abs(freq))
1166 1209 spc_max = numpy.max(spc)
1167 1210
1168 1211 from scipy.signal import medfilt
1169 1212 Nincoh = 20
1170 1213 Nincoh = 80
1171 1214 Nincoh = Nincoh
1172 1215 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1173 1216
1174 1217 # define a least squares function to optimize
1175 1218 def lsq_func(params):
1176 1219 return (spc-self.double_gaussian(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))/spcm
1177 1220
1178 1221 # fit
1179 1222 # bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf])
1180 1223 # bounds=([0,-numpy.inf,0,0,-numpy.inf,0,-numpy.inf,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,0,numpy.inf])
1181 1224 #print(a1,b1,c1,a2,b2,c2,k2,d)
1182 1225
1183 1226 dop1_x0 = freq[numpy.argmax(spcm)]
1184 1227
1185 1228 bounds=([0,-numpy.inf,0,0,dop1_x0-50,0,0],[numpy.inf,-300,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf])
1186 1229 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,spc_max]
1187 1230 x0_value = numpy.array([spc_max,-400.5,30,spc_max/4,dop1_x0,150,1.0e7])
1188 1231 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1189 1232 J = popt.jac
1190 1233
1191 1234 try:
1192 1235 cov = numpy.linalg.inv(J.T.dot(J))
1193 1236 error = numpy.sqrt(numpy.diagonal(cov))
1194 1237 except:
1195 1238 error = numpy.ones((7))*numpy.NAN
1196 1239
1197 1240 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1198 1241 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]
1199 1242 Df = popt.x[6]
1200 1243 return A1f, B1f, C1f, A2f, B2f, C2f, Df, error
1201 1244
1202 1245 def Double_Gauss_Double_Skew_fit_weight_bound_with_inputs(self, spc, freq, a1, b1, c1, a2, b2, c2, k2, d):
1203 1246
1204 1247 from scipy.optimize import least_squares
1205 1248
1206 1249 freq_max = numpy.max(numpy.abs(freq))
1207 1250 spc_max = numpy.max(spc)
1208 1251
1209 1252 from scipy.signal import medfilt
1210 1253 Nincoh = dataOut.nIncohInt
1211 1254 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1212 1255
1213 1256 # define a least squares function to optimize
1214 1257 def lsq_func(params):
1215 1258 return (spc-self.double_gaussian_double_skew(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7],params[8]))/spcm
1216 1259
1217 1260
1218 1261 bounds=([0,-numpy.inf,0,-numpy.inf,0,-400,0,0,0],[numpy.inf,-340,numpy.inf,0,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1219 1262
1220 1263 params_scale = [spc_max,freq_max,freq_max,1,spc_max,freq_max,freq_max,1,spc_max]
1221 1264
1222 1265 x0_value = numpy.array([a1,b1,c1,-.1,a2,b2,c2,k2,d])
1223 1266
1224 1267 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1225 1268
1226 1269 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]; K1f = popt.x[3]
1227 1270 A2f = popt.x[4]; B2f = popt.x[5]; C2f = popt.x[6]; K2f = popt.x[7]
1228 1271 Df = popt.x[8]
1229 1272
1230 1273 aux = self.gaussian_skew(freq, A2f, B2f, C2f, K2f, Df)
1231 1274 doppler = x[numpy.argmax(aux)]
1232 1275
1233 1276 return A1f, B1f, C1f, K1f, A2f, B2f, C2f, K2f, Df, doppler
1234 1277
1235 1278 def Triple_Gauss_Skew_fit_weight_bound_no_inputs(self,spc,freq):
1236 1279
1237 1280 from scipy.optimize import least_squares
1238 1281
1239 1282 freq_max = numpy.max(numpy.abs(freq))
1240 1283 spc_max = numpy.max(spc)
1241 1284
1242 1285 from scipy.signal import medfilt
1243 1286 Nincoh = 20
1244 1287 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1245 1288
1246 1289 # define a least squares function to optimize
1247 1290 def lsq_func(params):
1248 1291 return (spc-self.triple_gaussian_skew(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7],params[8],params[9],params[10],params[11]))/spcm
1249 1292
1250 1293 # fit
1251 1294 bounds=([0,-numpy.inf,0,0,-400,0,0,0,0,0,0,0],[numpy.inf,-340,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf,numpy.inf,numpy.inf,numpy.inf,numpy.inf])
1252 1295
1253 1296 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,1,spc_max,freq_max,freq_max,1,spc_max]
1254 1297 x0_value = numpy.array([spc_max,-400,30,spc_max/4,-200,150,1,spc_max/4,400,150,1,1.0e7])
1255 1298 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1256 1299
1257 1300 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1258 1301 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]; K2f = popt.x[6]
1259 1302 A3f = popt.x[7]; B3f = popt.x[8]; C3f = popt.x[9]; K3f = popt.x[10]
1260 1303 Df = popt.x[11]
1261 1304
1262 1305 aux = self.gaussian_skew(freq, A2f, B2f, C2f, K2f, Df)
1263 1306 doppler = freq[numpy.argmax(aux)]
1264 1307
1265 1308 return A1f, B1f, C1f, A2f, B2f, C2f, K2f, A3f, B3f, C3f, K3f, Df, doppler
1266 1309
1267 1310 def CEEJ_Skew_fit_weight_bound_no_inputs(self,spc,freq,Nincoh):
1268 1311
1269 1312 from scipy.optimize import least_squares
1270 1313
1271 1314 freq_max = numpy.max(numpy.abs(freq))
1272 1315 spc_max = numpy.max(spc)
1273 1316
1274 1317 from scipy.signal import medfilt
1275 1318 Nincoh = 20
1276 1319 Nincoh = 80
1277 1320 Nincoh = Nincoh
1278 1321 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1279 1322
1280 1323 # define a least squares function to optimize
1281 1324 def lsq_func(params):
1282 1325 return (spc-self.gaussian_skew(freq,params[0],params[1],params[2],params[3],params[4]))#/spcm
1283 1326
1284 1327
1285 1328 bounds=([0,0,0,-numpy.inf,0],[numpy.inf,numpy.inf,numpy.inf,0,numpy.inf])
1286 1329
1287 1330 params_scale = [spc_max,freq_max,freq_max,1,spc_max]
1288 1331
1289 1332 x0_value = numpy.array([spc_max,freq[numpy.argmax(spc)],30,-.1,numpy.mean(spc[:50])])
1290 1333
1291 1334 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1292 1335
1293 1336 J = popt.jac
1294 1337
1295 1338 try:
1296 1339 error = numpy.ones((9))*numpy.NAN
1297 1340 cov = numpy.linalg.inv(J.T.dot(J))
1298 1341 error[:4] = numpy.sqrt(numpy.diagonal(cov))[:4]
1299 1342 error[-1] = numpy.sqrt(numpy.diagonal(cov))[-1]
1300 1343 except:
1301 1344 error = numpy.ones((9))*numpy.NAN
1302 1345
1303 1346 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]; K1f = popt.x[3]
1304 1347 Df = popt.x[4]
1305 1348
1306 1349 aux1 = self.gaussian_skew(freq, A1f, B1f, C1f, K1f, Df)
1307 1350 doppler1 = freq[numpy.argmax(aux1)]
1308 1351 #print("CEEJ ERROR:",error)
1309 1352
1310 1353 return A1f, B1f, C1f, K1f, numpy.NAN, numpy.NAN, numpy.NAN, numpy.NAN, Df, doppler1, numpy.NAN, error
1311 1354
1312 1355 def CEEJ_fit_weight_bound_no_inputs(self,spc,freq,Nincoh):
1313 1356
1314 1357 from scipy.optimize import least_squares
1315 1358
1316 1359 freq_max = numpy.max(numpy.abs(freq))
1317 1360 spc_max = numpy.max(spc)
1318 1361
1319 1362 from scipy.signal import medfilt
1320 1363 Nincoh = 20
1321 1364 Nincoh = 80
1322 1365 Nincoh = Nincoh
1323 1366 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1324 1367
1325 1368 # define a least squares function to optimize
1326 1369 def lsq_func(params):
1327 1370 return (spc-self.gaussian(freq,params[0],params[1],params[2],params[3]))#/spcm
1328 1371
1329 1372
1330 1373 bounds=([0,0,0,0],[numpy.inf,numpy.inf,numpy.inf,numpy.inf])
1331 1374
1332 1375 params_scale = [spc_max,freq_max,freq_max,spc_max]
1333 1376
1334 1377 x0_value = numpy.array([spc_max,freq[numpy.argmax(spcm)],30,numpy.mean(spc[:50])])
1335 1378
1336 1379 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1337 1380
1338 1381 J = popt.jac
1339 1382
1340 1383 try:
1341 1384 error = numpy.ones((4))*numpy.NAN
1342 1385 cov = numpy.linalg.inv(J.T.dot(J))
1343 1386 error = numpy.sqrt(numpy.diagonal(cov))
1344 1387 except:
1345 1388 error = numpy.ones((4))*numpy.NAN
1346 1389
1347 1390 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1348 1391 Df = popt.x[3]
1349 1392
1350 1393 return A1f, B1f, C1f, Df, error
1351 1394
1352 1395 def Simple_fit_bound(self,spc,freq,Nincoh):
1353 1396
1354 1397 freq_max = numpy.max(numpy.abs(freq))
1355 1398 spc_max = numpy.max(spc)
1356 1399
1357 1400 Nincoh = Nincoh
1358 1401
1359 1402 def lsq_func(params):
1360 1403 return (spc-self.gaussian(freq,params[0],params[1],params[2],params[3]))
1361 1404
1362 1405 bounds=([0,-50,0,0],[numpy.inf,+50,numpy.inf,numpy.inf])
1363 1406
1364 1407 params_scale = [spc_max,freq_max,freq_max,spc_max]
1365 1408
1366 1409 x0_value = numpy.array([spc_max,-20.5,5,1.0e7])
1367 1410
1368 1411 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1369 1412
1370 1413 J = popt.jac
1371 1414
1372 1415 try:
1373 1416 cov = numpy.linalg.inv(J.T.dot(J))
1374 1417 error = numpy.sqrt(numpy.diagonal(cov))
1375 1418 except:
1376 1419 error = numpy.ones((4))*numpy.NAN
1377 1420
1378 1421 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1379 1422 Df = popt.x[3]
1380 1423
1381 1424 return A1f, B1f, C1f, Df, error
1382 1425
1383 1426 def clean_outliers(self,param):
1384 1427
1385 1428 threshold = 700
1386 1429
1387 1430 param = numpy.where(param < -threshold, numpy.nan, param)
1388 1431 param = numpy.where(param > +threshold, numpy.nan, param)
1389 1432
1390 1433 return param
1391 1434
1392 1435 def windowing_single(self,spc,x,A,B,C,D,nFFTPoints):
1393 1436 from scipy.optimize import curve_fit,fmin
1394 1437
1395 1438 def R_gaussian(x, a, b, c):
1396 1439 N = int(numpy.shape(x)[0])
1397 1440 val = a * numpy.exp(-((x)*c*2*2*numpy.pi)**2 / (2))* numpy.exp(1.j*b*x*4*numpy.pi)
1398 1441 return val
1399 1442
1400 1443 def T(x,N):
1401 1444 T = 1-abs(x)/N
1402 1445 return T
1403 1446
1404 1447 def R_T_spc_fun(x, a, b, c, d, nFFTPoints):
1405 1448
1406 1449 N = int(numpy.shape(x)[0])
1407 1450
1408 1451 x_max = x[-1]
1409 1452
1410 1453 x_pos = x[int(nFFTPoints/2):]
1411 1454 x_neg = x[:int(nFFTPoints/2)]
1412 1455
1413 1456 R_T_neg_1 = R_gaussian(x, a, b, c)[:int(nFFTPoints/2)]*T(x_neg,-x[0])
1414 1457 R_T_pos_1 = R_gaussian(x, a, b, c)[int(nFFTPoints/2):]*T(x_pos,x[-1])
1415 1458 R_T_sum_1 = R_T_pos_1 + R_T_neg_1
1416 1459 R_T_spc_1 = numpy.fft.fft(R_T_sum_1).real
1417 1460 R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1)
1418 1461 max_val_1 = numpy.max(R_T_spc_1)
1419 1462 R_T_spc_1 = R_T_spc_1*a/max_val_1
1420 1463
1421 1464 R_T_d = d*numpy.fft.fftshift(signal.unit_impulse(N))
1422 1465 R_T_d_neg = R_T_d[:int(nFFTPoints/2)]*T(x_neg,-x[0])
1423 1466 R_T_d_pos = R_T_d[int(nFFTPoints/2):]*T(x_pos,x[-1])
1424 1467 R_T_d_sum = R_T_d_pos + R_T_d_neg
1425 1468 R_T_spc_3 = numpy.fft.fft(R_T_d_sum).real
1426 1469 R_T_spc_3 = numpy.fft.fftshift(R_T_spc_3)
1427 1470
1428 1471 R_T_final = R_T_spc_1 + R_T_spc_3
1429 1472
1430 1473 return R_T_final
1431 1474
1432 1475 y = spc#gaussian(x, a, meanY, sigmaY) + a*0.1*numpy.random.normal(0, 1, size=len(x))
1433 1476
1434 1477 from scipy.stats import norm
1435 1478 mean,std=norm.fit(spc)
1436 1479
1437 1480 # estimate starting values from the data
1438 1481 a = A
1439 1482 b = B
1440 1483 c = C#numpy.std(spc)
1441 1484 d = D
1442 1485 '''
1443 1486 ippSeconds = 250*20*1.e-6/3
1444 1487
1445 1488 x_t = ippSeconds * (numpy.arange(1600) -1600 / 2.)
1446 1489
1447 1490 x_t = numpy.linspace(x_t[0],x_t[-1],3200)
1448 1491
1449 1492 x_freq = numpy.fft.fftfreq(1600,d=ippSeconds)
1450 1493 x_freq = numpy.fft.fftshift(x_freq)
1451 1494 '''
1452 1495 # define a least squares function to optimize
1453 1496 def minfunc(params):
1454 1497 return sum((y-R_T_spc_fun(x,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2/1)#y**2)
1455 1498
1456 1499 # fit
1457 1500
1458 1501 popt_full = fmin(minfunc,[a,b,c,d],full_output=True)
1459 1502 #print("nIter", popt_full[2])
1460 1503 popt = popt_full[0]
1461 1504
1462 1505 #return R_T_spc_fun(x_t,popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
1463 1506 return popt[0], popt[1], popt[2], popt[3]
1464 1507
1465 1508 def run(self, dataOut, mode = 0, Hmin1 = None, Hmax1 = None, Hmin2 = None, Hmax2 = None, Dop = 'Shift'):
1466 1509
1467 1510 pwcode = 1
1468 1511
1469 1512 if dataOut.flagDecodeData:
1470 1513 pwcode = numpy.sum(dataOut.code[0]**2)
1471 1514 #normFactor = min(self.nFFTPoints,self.nProfiles)*self.nIncohInt*self.nCohInt*pwcode*self.windowOfFilter
1472 1515 normFactor = dataOut.nProfiles * dataOut.nIncohInt * dataOut.nCohInt * pwcode * dataOut.windowOfFilter
1473 1516 factor = normFactor
1474 1517 z = dataOut.data_spc / factor
1475 1518 z = numpy.where(numpy.isfinite(z), z, numpy.NAN)
1476 1519 dataOut.power = numpy.average(z, axis=1)
1477 1520 dataOut.powerdB = 10 * numpy.log10(dataOut.power)
1478 1521
1479 1522 x = dataOut.getVelRange(0)
1480 1523
1481 1524 dataOut.Oblique_params = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
1482 1525 dataOut.Oblique_param_errors = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
1483 1526 dataOut.dplr_2_u = numpy.ones((1,1,dataOut.nHeights))*numpy.NAN
1484 1527
1485 1528 if mode == 6:
1486 1529 dataOut.Oblique_params = numpy.ones((1,9,dataOut.nHeights))*numpy.NAN
1487 1530 elif mode == 7:
1488 1531 dataOut.Oblique_params = numpy.ones((1,13,dataOut.nHeights))*numpy.NAN
1489 1532 elif mode == 8:
1490 1533 dataOut.Oblique_params = numpy.ones((1,10,dataOut.nHeights))*numpy.NAN
1491 1534 elif mode == 9:
1492 1535 dataOut.Oblique_params = numpy.ones((1,11,dataOut.nHeights))*numpy.NAN
1493 1536 dataOut.Oblique_param_errors = numpy.ones((1,9,dataOut.nHeights))*numpy.NAN
1494 1537 elif mode == 11:
1495 1538 dataOut.Oblique_params = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
1496 1539 dataOut.Oblique_param_errors = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
1497 1540 elif mode == 10: #150 km
1498 1541 dataOut.Oblique_params = numpy.ones((1,4,dataOut.nHeights))*numpy.NAN
1499 1542 dataOut.Oblique_param_errors = numpy.ones((1,4,dataOut.nHeights))*numpy.NAN
1500 1543 dataOut.snr_log10 = numpy.ones((1,dataOut.nHeights))*numpy.NAN
1501 1544
1502 1545 dataOut.VelRange = x
1503 1546
1504 1547
1505 1548
1506 1549 #l1=range(22,36) #+62
1507 1550 #l1=range(32,36)
1508 1551 #l2=range(58,99) #+62
1509 1552
1510 1553 #if Hmin1 == None or Hmax1 == None or Hmin2 == None or Hmax2 == None:
1511 1554
1512 1555 minHei1 = 105.
1513 1556 maxHei1 = 122.5
1514 1557 maxHei1 = 130.5
1515 1558
1516 1559 if mode == 10: #150 km
1517 1560 minHei1 = 100
1518 1561 maxHei1 = 100
1519 1562
1520 1563 inda1 = numpy.where(dataOut.heightList >= minHei1)
1521 1564 indb1 = numpy.where(dataOut.heightList <= maxHei1)
1522 1565
1523 1566 minIndex1 = inda1[0][0]
1524 1567 maxIndex1 = indb1[0][-1]
1525 1568
1526 1569 minHei2 = 150.
1527 1570 maxHei2 = 201.25
1528 1571 maxHei2 = 225.3
1529 1572
1530 1573 if mode == 10: #150 km
1531 1574 minHei2 = 110
1532 1575 maxHei2 = 165
1533 1576
1534 1577 inda2 = numpy.where(dataOut.heightList >= minHei2)
1535 1578 indb2 = numpy.where(dataOut.heightList <= maxHei2)
1536 1579
1537 1580 minIndex2 = inda2[0][0]
1538 1581 maxIndex2 = indb2[0][-1]
1539 1582
1540 1583 l1=range(minIndex1,maxIndex1)
1541 1584 l2=range(minIndex2,maxIndex2)
1542 1585
1543 1586 if mode == 4:
1544 1587 '''
1545 1588 for ind in range(dataOut.nHeights):
1546 1589 if(dataOut.heightList[ind]>=168 and dataOut.heightList[ind]<188):
1547 1590 try:
1548 1591 dataOut.Oblique_params[0,0,ind],dataOut.Oblique_params[0,1,ind],dataOut.Oblique_params[0,2,ind],dataOut.Oblique_params[0,3,ind],dataOut.Oblique_params[0,4,ind],dataOut.Oblique_params[0,5,ind],dataOut.Oblique_params[0,6,ind],dataOut.Oblique_param_errors[0,0,ind],dataOut.Oblique_param_errors[0,1,ind],dataOut.Oblique_param_errors[0,2,ind],dataOut.Oblique_param_errors[0,3,ind],dataOut.Oblique_param_errors[0,4,ind],dataOut.Oblique_param_errors[0,5,ind],dataOut.Oblique_param_errors[0,6,ind] = self.DH_mode(dataOut.data_spc[0,:,ind],dataOut.VelRange)
1549 1592 except:
1550 1593 pass
1551 1594 '''
1552 1595 for ind in itertools.chain(l1, l2):
1553 1596
1554 1597 try:
1555 1598 dataOut.Oblique_params[0,0,ind],dataOut.Oblique_params[0,1,ind],dataOut.Oblique_params[0,2,ind],dataOut.Oblique_params[0,3,ind],dataOut.Oblique_params[0,4,ind],dataOut.Oblique_params[0,5,ind],dataOut.Oblique_params[0,6,ind],dataOut.Oblique_param_errors[0,0,ind],dataOut.Oblique_param_errors[0,1,ind],dataOut.Oblique_param_errors[0,2,ind],dataOut.Oblique_param_errors[0,3,ind],dataOut.Oblique_param_errors[0,4,ind],dataOut.Oblique_param_errors[0,5,ind],dataOut.Oblique_param_errors[0,6,ind] = self.DH_mode(dataOut.data_spc[0,:,ind],dataOut.VelRange)
1556 1599 dataOut.dplr_2_u[0,0,ind] = dataOut.Oblique_params[0,4,ind]/numpy.sin(numpy.arccos(102/dataOut.heightList[ind]))
1557 1600 except:
1558 1601 pass
1559 1602
1560 1603 else:
1561 1604 for hei in itertools.chain(l1, l2):
1562 1605 if numpy.isnan(dataOut.snl[0,hei]) or dataOut.snl[0,hei]<.0:
1563 1606
1564 1607 continue #Avoids the analysis when there is only noise
1565 1608
1566 1609 try:
1567 1610 spc = dataOut.data_spc[0,:,hei]
1568 1611
1569 1612 if mode == 6: #Skew Weighted Bounded
1570 1613 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei] = self.Double_Gauss_Skew_fit_weight_bound_no_inputs(spc,x)
1571 1614 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,8,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1572 1615
1573 1616 elif mode == 7: #Triple Skew Weighted Bounded
1574 1617 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei],dataOut.Oblique_params[0,10,hei],dataOut.Oblique_params[0,11,hei],dataOut.Oblique_params[0,12,hei] = self.Triple_Gauss_Skew_fit_weight_bound_no_inputs(spc,x)
1575 1618 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,12,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1576 1619
1577 1620 elif mode == 8: #Double Skewed Weighted Bounded with inputs
1578 1621 a1, b1, c1, a2, b2, c2, k2, d, dopp = self.Double_Gauss_Skew_fit_weight_bound_no_inputs(spc,x)
1579 1622 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei] = self.Double_Gauss_Skew_fit_weight_bound_no_inputs(spc,x, a1, b1, c1, a2, b2, c2, k2, d)
1580 1623 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,9,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1581 1624
1582 1625 elif mode == 9: #Double Skewed Weighted Bounded no inputs
1583 1626 #if numpy.max(spc) <= 0:
1584 1627 from scipy.signal import medfilt
1585 1628 spcm = medfilt(spc,11)
1586 1629 if x[numpy.argmax(spcm)] <= 0:
1587 1630 #print("EEJ", dataOut.heightList[hei], hei)
1588 1631 #if hei != 70:
1589 1632 #continue
1590 1633 #else:
1591 1634 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei],dataOut.Oblique_params[0,10,hei],dataOut.Oblique_param_errors[0,:,hei] = self.Double_Gauss_Double_Skew_fit_weight_bound_no_inputs(spcm,x,dataOut.nIncohInt,dataOut.heightList[hei])
1592 1635 #if dataOut.Oblique_params[0,-2,hei] < -500 or dataOut.Oblique_params[0,-2,hei] > 500 or dataOut.Oblique_params[0,-1,hei] < -500 or dataOut.Oblique_params[0,-1,hei] > 500:
1593 1636 # dataOut.Oblique_params[0,:,hei] *= numpy.NAN
1594 1637 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,10,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1595 1638
1596 1639 else:
1597 1640 #print("CEEJ")
1598 1641 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei],dataOut.Oblique_params[0,10,hei],dataOut.Oblique_param_errors[0,:,hei] = self.CEEJ_Skew_fit_weight_bound_no_inputs(spcm,x,dataOut.nIncohInt)
1599 1642 #if dataOut.Oblique_params[0,-2,hei] < -500 or dataOut.Oblique_params[0,-2,hei] > 500 or dataOut.Oblique_params[0,-1,hei] < -500 or dataOut.Oblique_params[0,-1,hei] > 500:
1600 1643 # dataOut.Oblique_params[0,:,hei] *= numpy.NAN
1601 1644 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,10,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1602 1645 elif mode == 11: #Double Weighted Bounded no inputs
1603 1646 #if numpy.max(spc) <= 0:
1604 1647 from scipy.signal import medfilt
1605 1648 spcm = medfilt(spc,11)
1606 1649
1607 1650 if x[numpy.argmax(spcm)] <= 0:
1608 1651 #print("EEJ")
1609 1652 #print("EEJ",dataOut.heightList[hei])
1610 1653 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_param_errors[0,:,hei] = self.Double_Gauss_fit_weight_bound_no_inputs(spc,x,dataOut.nIncohInt)
1611 1654 #if dataOut.Oblique_params[0,-2,hei] < -500 or dataOut.Oblique_params[0,-2,hei] > 500 or dataOut.Oblique_params[0,-1,hei] < -500 or dataOut.Oblique_params[0,-1,hei] > 500:
1612 1655 # dataOut.Oblique_params[0,:,hei] *= numpy.NAN
1613 1656 else:
1614 1657 #print("CEEJ",dataOut.heightList[hei])
1615 1658 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_param_errors[0,:,hei] = self.CEEJ_fit_weight_bound_no_inputs(spc,x,dataOut.nIncohInt)
1616 1659
1617 1660 elif mode == 10: #150km
1618 1661 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_param_errors[0,:,hei] = self.Simple_fit_bound(spc,x,dataOut.nIncohInt)
1619 1662 snr = (dataOut.power[0,hei]*factor - dataOut.Oblique_params[0,3,hei])/dataOut.Oblique_params[0,3,hei]
1620 1663 dataOut.snr_log10[0,hei] = numpy.log10(snr)
1621 1664
1622 1665 else:
1623 1666 spc_fit, A1, B1, C1, D1 = self.Gauss_fit_2(spc,x,'first')
1624 1667
1625 1668 spc_diff = spc - spc_fit
1626 1669 spc_diff[spc_diff < 0] = 0
1627 1670
1628 1671 spc_fit_diff, A2, B2, C2, D2 = self.Gauss_fit_2(spc_diff,x,'second')
1629 1672
1630 1673 D = (D1+D2)
1631 1674
1632 1675 if mode == 0: #Double Fit
1633 1676 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_param_errors[0,0,hei],dataOut.Oblique_param_errors[0,1,hei],dataOut.Oblique_param_errors[0,2,hei],dataOut.Oblique_param_errors[0,3,hei],dataOut.Oblique_param_errors[0,4,hei],dataOut.Oblique_param_errors[0,5,hei],dataOut.Oblique_param_errors[0,6,hei] = self.Double_Gauss_fit_2(spc,x,A1,B1,C1,A2,B2,C2,D)
1634 1677 #spc_double_fit,dataOut.Oblique_params = self.Double_Gauss_fit(spc,x,A1,B1,C1,A2,B2,C2,D)
1635 1678
1636 1679 elif mode == 1: #Double Fit Windowed
1637 1680 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei] = self.windowing_double(spc,dataOut.getFreqRange(0),A1,B1,C1,A2,B2,C2,D)
1638 1681
1639 1682 elif mode == 2: #Double Fit Weight
1640 1683 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei] = self.Double_Gauss_fit_weight(spc,x,A1,B1,C1,A2,B2,C2,D)
1641 1684
1642 1685 elif mode == 3: #Simple Fit
1643 1686 dataOut.Oblique_params[0,0,hei] = A1
1644 1687 dataOut.Oblique_params[0,1,hei] = B1
1645 1688 dataOut.Oblique_params[0,2,hei] = C1
1646 1689 dataOut.Oblique_params[0,3,hei] = A2
1647 1690 dataOut.Oblique_params[0,4,hei] = B2
1648 1691 dataOut.Oblique_params[0,5,hei] = C2
1649 1692 dataOut.Oblique_params[0,6,hei] = D
1650 1693
1651 1694 elif mode == 5: #Triple Fit Weight
1652 1695 if hei in l1:
1653 1696 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei] = self.duo_Marco(spc,x,A1,B1,C1,A2,B2,C2,D)
1654 1697 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,4,hei]/numpy.sin(numpy.arccos(102/dataOut.heightList[hei]))
1655 1698 #print(dataOut.Oblique_params[0,0,hei])
1656 1699 #print(dataOut.dplr_2_u[0,0,hei])
1657 1700 else:
1658 1701 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei] = self.Double_Gauss_fit_weight(spc,x,A1,B1,C1,A2,B2,C2,D)
1659 1702 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,4,hei]/numpy.sin(numpy.arccos(102/dataOut.heightList[hei]))
1660 1703
1661 1704
1662 1705 except:
1663 1706 ###dataOut.Oblique_params[0,:,hei] = dataOut.Oblique_params[0,:,hei]*numpy.NAN
1664 1707 pass
1665 1708
1666 1709 #exit(1)
1667 1710 dataOut.paramInterval = dataOut.nProfiles*dataOut.nCohInt*dataOut.ippSeconds
1668 1711 dataOut.lat=-11.95
1669 1712 dataOut.lon=-76.87
1670 1713 '''
1671 1714 dataOut.Oblique_params = numpy.where(dataOut.Oblique_params<-700, numpy.nan, dop_t1)
1672 1715 dataOut.Oblique_params = numpy.where(dataOut.Oblique_params<+700, numpy.nan, dop_t1)
1673 1716 AquΓ­ debo exceptuar las amplitudes
1674 1717 '''
1675 1718 if mode == 9: #Double Skew Gaussian
1676 1719 #dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,-2,:] #Pos[Max_value]
1677 1720 #dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,1,:] #Shift
1678 1721 dataOut.Spec_W_T1 = dataOut.Oblique_params[:,2,:]
1679 1722 #dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,-1,:] #Pos[Max_value]
1680 1723 #dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,5,:] #Shift
1681 1724 dataOut.Spec_W_T2 = dataOut.Oblique_params[:,6,:]
1682 1725 if Dop == 'Shift':
1683 1726 dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,1,:] #Shift
1684 1727 dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,5,:] #Shift
1685 1728 elif Dop == 'Max':
1686 1729 dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,-2,:] #Pos[Max_value]
1687 1730 dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,-1,:] #Pos[Max_value]
1688 1731
1689 1732 dataOut.Err_Dop_EEJ_T1 = dataOut.Oblique_param_errors[:,1,:] #En realidad este es el error?
1690 1733 dataOut.Err_Spec_W_T1 = dataOut.Oblique_param_errors[:,2,:]
1691 1734 dataOut.Err_Dop_EEJ_T2 = dataOut.Oblique_param_errors[:,5,:] #En realidad este es el error?
1692 1735 dataOut.Err_Spec_W_T2 = dataOut.Oblique_param_errors[:,6,:]
1693 1736
1694 1737 elif mode == 11: #Double Gaussian
1695 1738 dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,1,:]
1696 1739 dataOut.Spec_W_T1 = dataOut.Oblique_params[:,2,:]
1697 1740 dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,4,:]
1698 1741 dataOut.Spec_W_T2 = dataOut.Oblique_params[:,5,:]
1699 1742
1700 1743 dataOut.Err_Dop_EEJ_T1 = dataOut.Oblique_param_errors[:,1,:]
1701 1744 dataOut.Err_Spec_W_T1 = dataOut.Oblique_param_errors[:,2,:]
1702 1745 dataOut.Err_Dop_EEJ_T2 = dataOut.Oblique_param_errors[:,4,:]
1703 1746 dataOut.Err_Spec_W_T2 = dataOut.Oblique_param_errors[:,5,:]
1704 1747
1705 1748 #print("Before: ", dataOut.Dop_EEJ_T2)
1706 1749 dataOut.Spec_W_T1 = self.clean_outliers(dataOut.Spec_W_T1)
1707 1750 dataOut.Spec_W_T2 = self.clean_outliers(dataOut.Spec_W_T2)
1708 1751 dataOut.Dop_EEJ_T1 = self.clean_outliers(dataOut.Dop_EEJ_T1)
1709 1752 dataOut.Dop_EEJ_T2 = self.clean_outliers(dataOut.Dop_EEJ_T2)
1710 1753 #print("After: ", dataOut.Dop_EEJ_T2)
1711 1754 dataOut.Err_Spec_W_T1 = self.clean_outliers(dataOut.Err_Spec_W_T1)
1712 1755 dataOut.Err_Spec_W_T2 = self.clean_outliers(dataOut.Err_Spec_W_T2)
1713 1756 dataOut.Err_Dop_EEJ_T1 = self.clean_outliers(dataOut.Err_Dop_EEJ_T1)
1714 1757 dataOut.Err_Dop_EEJ_T2 = self.clean_outliers(dataOut.Err_Dop_EEJ_T2)
1715 1758 #print("Before data_snr: ", dataOut.data_snr)
1716 1759 #dataOut.data_snr = numpy.where(numpy.isnan(dataOut.Dop_EEJ_T1), numpy.nan, dataOut.data_snr)
1717 1760 dataOut.snl = numpy.where(numpy.isnan(dataOut.Dop_EEJ_T1), numpy.nan, dataOut.snl)
1718 1761
1719 1762 #print("After data_snr: ", dataOut.data_snr)
1720 1763 dataOut.mode = mode
1721 1764 dataOut.flagNoData = numpy.all(numpy.isnan(dataOut.Dop_EEJ_T1)) #Si todos los valores son NaN no se prosigue
1722 1765 ###dataOut.flagNoData = False #Descomentar solo para ploteo sino mantener comentado (para guardado)
1723 1766
1724 1767 return dataOut
1725 1768
1726 1769 class Gaussian_Windowed(Operation):
1727 1770 '''
1728 1771 Written by R. Flores
1729 1772 '''
1730 1773 def __init__(self):
1731 1774 Operation.__init__(self)
1732 1775
1733 1776 def windowing_single(self,spc,x,A,B,C,D,nFFTPoints):
1734 1777 from scipy.optimize import curve_fit,fmin
1735 1778
1736 1779 def gaussian(x, a, b, c, d):
1737 1780 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
1738 1781 return val
1739 1782
1740 1783 def R_gaussian(x, a, b, c):
1741 1784 N = int(numpy.shape(x)[0])
1742 1785 val = a * numpy.exp(-((x)*c*2*2*numpy.pi)**2 / (2))* numpy.exp(1.j*b*x*4*numpy.pi)
1743 1786 return val
1744 1787
1745 1788 def T(x,N):
1746 1789 T = 1-abs(x)/N
1747 1790 return T
1748 1791
1749 1792 def R_T_spc_fun(x, a, b, c, d, nFFTPoints):
1750 1793
1751 1794 N = int(numpy.shape(x)[0])
1752 1795
1753 1796 x_max = x[-1]
1754 1797
1755 1798 x_pos = x[nFFTPoints:]
1756 1799 x_neg = x[:nFFTPoints]
1757 1800 #print([int(nFFTPoints/2))
1758 1801 #print("x: ", x)
1759 1802 #print("x_neg: ", x_neg)
1760 1803 #print("x_pos: ", x_pos)
1761 1804
1762 1805
1763 1806 R_T_neg_1 = R_gaussian(x, a, b, c)[:nFFTPoints]*T(x_neg,-x[0])
1764 1807 R_T_pos_1 = R_gaussian(x, a, b, c)[nFFTPoints:]*T(x_pos,x[-1])
1765 1808 #print(T(x_pos,x[-1]),x_pos,x[-1])
1766 1809 #print(R_T_neg_1.shape,R_T_pos_1.shape)
1767 1810 R_T_sum_1 = R_T_pos_1 + R_T_neg_1
1768 1811 R_T_spc_1 = numpy.fft.fft(R_T_sum_1).real
1769 1812 R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1)
1770 1813 max_val_1 = numpy.max(R_T_spc_1)
1771 1814 R_T_spc_1 = R_T_spc_1*a/max_val_1
1772 1815
1773 1816 R_T_d = d*numpy.fft.fftshift(signal.unit_impulse(N))
1774 1817 R_T_d_neg = R_T_d[:nFFTPoints]*T(x_neg,-x[0])
1775 1818 R_T_d_pos = R_T_d[nFFTPoints:]*T(x_pos,x[-1])
1776 1819 R_T_d_sum = R_T_d_pos + R_T_d_neg
1777 1820 R_T_spc_3 = numpy.fft.fft(R_T_d_sum).real
1778 1821 R_T_spc_3 = numpy.fft.fftshift(R_T_spc_3)
1779 1822
1780 1823 R_T_final = R_T_spc_1 + R_T_spc_3
1781 1824
1782 1825 return R_T_final
1783 1826
1784 1827 y = spc#gaussian(x, a, meanY, sigmaY) + a*0.1*numpy.random.normal(0, 1, size=len(x))
1785 1828
1786 1829 from scipy.stats import norm
1787 1830 mean,std=norm.fit(spc)
1788 1831
1789 1832 # estimate starting values from the data
1790 1833 a = A
1791 1834 b = B
1792 1835 c = C#numpy.std(spc)
1793 1836 d = D
1794 1837 #'''
1795 1838 #ippSeconds = 250*20*1.e-6/3
1796 1839
1797 1840 #x_t = ippSeconds * (numpy.arange(nFFTPoints) - nFFTPoints / 2.)
1798 1841
1799 1842 #x_t = numpy.linspace(x_t[0],x_t[-1],3200)
1800 1843 #print("x_t: ", x_t)
1801 1844 #print("nFFTPoints: ", nFFTPoints)
1802 1845 x_vel = numpy.linspace(x[0],x[-1],int(2*nFFTPoints))
1803 1846 #print("x_vel: ", x_vel)
1804 1847 #x_freq = numpy.fft.fftfreq(1600,d=ippSeconds)
1805 1848 #x_freq = numpy.fft.fftshift(x_freq)
1806 1849 #'''
1807 1850 # define a least squares function to optimize
1808 1851 def minfunc(params):
1809 1852 #print("y.shape: ", numpy.shape(y))
1810 1853 return sum((y-R_T_spc_fun(x_vel,params[0],params[1],params[2],params[3],nFFTPoints))**2/1)#y**2)
1811 1854
1812 1855 # fit
1813 1856
1814 1857 popt_full = fmin(minfunc,[a,b,c,d], disp=False)
1815 1858 #print("nIter", popt_full[2])
1816 1859 popt = popt_full#[0]
1817 1860
1818 1861 fun = gaussian(x, popt[0], popt[1], popt[2], popt[3])
1819 1862
1820 1863 #return R_T_spc_fun(x_t,popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
1821 1864 return fun, popt[0], popt[1], popt[2], popt[3]
1822 1865
1823 1866 def run(self, dataOut):
1824 1867
1825 1868 from scipy.signal import medfilt
1826 1869 import matplotlib.pyplot as plt
1827 1870 dataOut.moments = numpy.ones((dataOut.nChannels,4,dataOut.nHeights))*numpy.NAN
1828 1871 dataOut.VelRange = dataOut.getVelRange(0)
1829 1872 for nChannel in range(dataOut.nChannels):
1830 1873 for hei in range(dataOut.heightList.shape[0]):
1831 1874 #print("ipp: ", dataOut.ippSeconds)
1832 1875 spc = numpy.copy(dataOut.data_spc[nChannel,:,hei])
1833 1876
1834 1877 #print(VelRange)
1835 1878 #print(dataOut.getFreqRange(64))
1836 1879 spcm = medfilt(spc,11)
1837 1880 spc_max = numpy.max(spcm)
1838 1881 dop1_x0 = dataOut.VelRange[numpy.argmax(spcm)]
1839 1882 D = numpy.min(spcm)
1840 1883
1841 1884 fun, A, B, C, D = self.windowing_single(spc,dataOut.VelRange,spc_max,dop1_x0,abs(dop1_x0),D,dataOut.nFFTPoints)
1842 1885 dataOut.moments[nChannel,0,hei] = A
1843 1886 dataOut.moments[nChannel,1,hei] = B
1844 1887 dataOut.moments[nChannel,2,hei] = C
1845 1888 dataOut.moments[nChannel,3,hei] = D
1846 1889 '''
1847 1890 plt.figure()
1848 1891 plt.plot(VelRange,spc,marker='*',linestyle='')
1849 1892 plt.plot(VelRange,fun)
1850 1893 plt.title(dataOut.heightList[hei])
1851 1894 plt.show()
1852 1895 '''
1853 1896
1854 1897 return dataOut
1855 1898
1856 1899 class PrecipitationProc(Operation):
1857 1900
1858 1901 '''
1859 1902 Operator that estimates Reflectivity factor (Z), and estimates rainfall Rate (R)
1860 1903
1861 1904 Input:
1862 1905 self.dataOut.data_pre : SelfSpectra
1863 1906
1864 1907 Output:
1865 1908
1866 1909 self.dataOut.data_output : Reflectivity factor, rainfall Rate
1867 1910
1868 1911
1869 1912 Parameters affected:
1870 1913 '''
1871 1914
1872 1915 def __init__(self):
1873 1916 Operation.__init__(self)
1874 1917 self.i=0
1875 1918
1876 1919 def run(self, dataOut, radar=None, Pt=5000, Gt=295.1209, Gr=70.7945, Lambda=0.6741, aL=2.5118,
1877 1920 tauW=4e-06, ThetaT=0.1656317, ThetaR=0.36774087, Km2 = 0.93, Altitude=3350,SNRdBlimit=-30,channel=None):
1878 1921
1879 1922 # print ('Entering PrecepitationProc ... ')
1880 1923
1881 1924 if radar == "MIRA35C" :
1882 1925
1883 1926 self.spc = dataOut.data_pre[0].copy()
1884 1927 self.Num_Hei = self.spc.shape[2]
1885 1928 self.Num_Bin = self.spc.shape[1]
1886 1929 self.Num_Chn = self.spc.shape[0]
1887 1930 Ze = self.dBZeMODE2(dataOut)
1888 1931
1889 1932 else:
1890 1933
1891 1934 self.spc = dataOut.data_pre[0].copy()
1892 1935
1893 1936 #NOTA SE DEBE REMOVER EL RANGO DEL PULSO TX
1894 1937 self.spc[:,:,0:7]= numpy.NaN
1895 1938
1896 1939 self.Num_Hei = self.spc.shape[2]
1897 1940 self.Num_Bin = self.spc.shape[1]
1898 1941 self.Num_Chn = self.spc.shape[0]
1899 1942
1900 1943 VelRange = dataOut.spc_range[2]
1901 1944
1902 1945 ''' Se obtiene la constante del RADAR '''
1903 1946
1904 1947 self.Pt = Pt
1905 1948 self.Gt = Gt
1906 1949 self.Gr = Gr
1907 1950 self.Lambda = Lambda
1908 1951 self.aL = aL
1909 1952 self.tauW = tauW
1910 1953 self.ThetaT = ThetaT
1911 1954 self.ThetaR = ThetaR
1912 1955 self.GSys = 10**(36.63/10) # Ganancia de los LNA 36.63 dB
1913 1956 self.lt = 10**(1.67/10) # Perdida en cables Tx 1.67 dB
1914 1957 self.lr = 10**(5.73/10) # Perdida en cables Rx 5.73 dB
1915 1958
1916 1959 Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
1917 1960 Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * tauW * numpy.pi * ThetaT * ThetaR)
1918 1961 RadarConstant = 10e-26 * Numerator / Denominator #
1919 1962 ExpConstant = 10**(40/10) #Constante Experimental
1920 1963
1921 1964 SignalPower = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
1922 1965 for i in range(self.Num_Chn):
1923 1966 SignalPower[i,:,:] = self.spc[i,:,:] - dataOut.noise[i]
1924 1967 SignalPower[numpy.where(SignalPower < 0)] = 1e-20
1925 1968
1926 1969 if channel is None:
1927 1970 SPCmean = numpy.mean(SignalPower, 0)
1928 1971 else:
1929 1972 SPCmean = SignalPower[channel]
1930 1973 Pr = SPCmean[:,:]/dataOut.normFactor
1931 1974
1932 1975 # Declaring auxiliary variables
1933 1976 Range = dataOut.heightList*1000. #Range in m
1934 1977 # replicate the heightlist to obtain a matrix [Num_Bin,Num_Hei]
1935 1978 rMtrx = numpy.transpose(numpy.transpose([dataOut.heightList*1000.] * self.Num_Bin))
1936 1979 zMtrx = rMtrx+Altitude
1937 1980 # replicate the VelRange to obtain a matrix [Num_Bin,Num_Hei]
1938 1981 VelMtrx = numpy.transpose(numpy.tile(VelRange[:-1], (self.Num_Hei,1)))
1939 1982
1940 1983 # height dependence to air density Foote and Du Toit (1969)
1941 1984 delv_z = 1 + 3.68e-5 * zMtrx + 1.71e-9 * zMtrx**2
1942 1985 VMtrx = VelMtrx / delv_z #Normalized velocity
1943 1986 VMtrx[numpy.where(VMtrx> 9.6)] = numpy.NaN
1944 1987 # Diameter is related to the fall speed of falling drops
1945 1988 D_Vz = -1.667 * numpy.log( 0.9369 - 0.097087 * VMtrx ) # D in [mm]
1946 1989 # Only valid for D>= 0.16 mm
1947 1990 D_Vz[numpy.where(D_Vz < 0.16)] = numpy.NaN
1948 1991
1949 1992 #Calculate Radar Reflectivity ETAn
1950 1993 ETAn = (RadarConstant *ExpConstant) * Pr * rMtrx**2 #Reflectivity (ETA)
1951 1994 ETAd = ETAn * 6.18 * exp( -0.6 * D_Vz ) * delv_z
1952 1995 # Radar Cross Section
1953 1996 sigmaD = Km2 * (D_Vz * 1e-3 )**6 * numpy.pi**5 / Lambda**4
1954 1997 # Drop Size Distribution
1955 1998 DSD = ETAn / sigmaD
1956 1999 # Equivalente Reflectivy
1957 2000 Ze_eqn = numpy.nansum( DSD * D_Vz**6 ,axis=0)
1958 2001 Ze_org = numpy.nansum(ETAn * Lambda**4, axis=0) / (1e-18*numpy.pi**5 * Km2) # [mm^6 /m^3]
1959 2002 # RainFall Rate
1960 2003 RR = 0.0006*numpy.pi * numpy.nansum( D_Vz**3 * DSD * VelMtrx ,0) #mm/hr
1961 2004
1962 2005 # Censoring the data
1963 2006 # Removing data with SNRth < 0dB se debe considerar el SNR por canal
1964 2007 SNRth = 10**(SNRdBlimit/10) #-30dB
1965 2008 novalid = numpy.where((dataOut.data_snr[0,:] <SNRth) | (dataOut.data_snr[1,:] <SNRth) | (dataOut.data_snr[2,:] <SNRth)) # AND condition. Maybe OR condition better
1966 2009 W = numpy.nanmean(dataOut.data_dop,0)
1967 2010 W[novalid] = numpy.NaN
1968 2011 Ze_org[novalid] = numpy.NaN
1969 2012 RR[novalid] = numpy.NaN
1970 2013
1971 2014 dataOut.data_output = RR[8]
1972 2015 dataOut.data_param = numpy.ones([3,self.Num_Hei])
1973 2016 dataOut.channelList = [0,1,2]
1974 2017
1975 2018 dataOut.data_param[0]=10*numpy.log10(Ze_org)
1976 2019 dataOut.data_param[1]=-W
1977 2020 dataOut.data_param[2]=RR
1978 2021
1979 2022 # print ('Leaving PrecepitationProc ... ')
1980 2023 return dataOut
1981 2024
1982 2025 def dBZeMODE2(self, dataOut): # Processing for MIRA35C
1983 2026
1984 2027 NPW = dataOut.NPW
1985 2028 COFA = dataOut.COFA
1986 2029
1987 2030 SNR = numpy.array([self.spc[0,:,:] / NPW[0]]) #, self.spc[1,:,:] / NPW[1]])
1988 2031 RadarConst = dataOut.RadarConst
1989 2032 #frequency = 34.85*10**9
1990 2033
1991 2034 ETA = numpy.zeros(([self.Num_Chn ,self.Num_Hei]))
1992 2035 data_output = numpy.ones([self.Num_Chn , self.Num_Hei])*numpy.NaN
1993 2036
1994 2037 ETA = numpy.sum(SNR,1)
1995 2038
1996 2039 ETA = numpy.where(ETA != 0. , ETA, numpy.NaN)
1997 2040
1998 2041 Ze = numpy.ones([self.Num_Chn, self.Num_Hei] )
1999 2042
2000 2043 for r in range(self.Num_Hei):
2001 2044
2002 2045 Ze[0,r] = ( ETA[0,r] ) * COFA[0,r][0] * RadarConst * ((r/5000.)**2)
2003 2046 #Ze[1,r] = ( ETA[1,r] ) * COFA[1,r][0] * RadarConst * ((r/5000.)**2)
2004 2047
2005 2048 return Ze
2006 2049
2007 2050 # def GetRadarConstant(self):
2008 2051 #
2009 2052 # """
2010 2053 # Constants:
2011 2054 #
2012 2055 # Pt: Transmission Power dB 5kW 5000
2013 2056 # Gt: Transmission Gain dB 24.7 dB 295.1209
2014 2057 # Gr: Reception Gain dB 18.5 dB 70.7945
2015 2058 # Lambda: Wavelenght m 0.6741 m 0.6741
2016 2059 # aL: Attenuation loses dB 4dB 2.5118
2017 2060 # tauW: Width of transmission pulse s 4us 4e-6
2018 2061 # ThetaT: Transmission antenna bean angle rad 0.1656317 rad 0.1656317
2019 2062 # ThetaR: Reception antenna beam angle rad 0.36774087 rad 0.36774087
2020 2063 #
2021 2064 # """
2022 2065 #
2023 2066 # Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
2024 2067 # Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * TauW * numpy.pi * ThetaT * TheraR)
2025 2068 # RadarConstant = Numerator / Denominator
2026 2069 #
2027 2070 # return RadarConstant
2028 2071
2029 2072
2030 2073 class FullSpectralAnalysis(Operation):
2031 2074
2032 2075 """
2033 2076 Function that implements Full Spectral Analysis technique.
2034 2077
2035 2078 Input:
2036 2079 self.dataOut.data_pre : SelfSpectra and CrossSpectra data
2037 2080 self.dataOut.groupList : Pairlist of channels
2038 2081 self.dataOut.ChanDist : Physical distance between receivers
2039 2082
2040 2083
2041 2084 Output:
2042 2085
2043 2086 self.dataOut.data_output : Zonal wind, Meridional wind, and Vertical wind
2044 2087
2045 2088
2046 2089 Parameters affected: Winds, height range, SNR
2047 2090
2048 2091 """
2049 2092 def run(self, dataOut, Xi01=None, Xi02=None, Xi12=None, Eta01=None, Eta02=None, Eta12=None, SNRdBlimit=-30,minheight=None, maxheight=None, NegativeLimit=None, PositiveLimit=None):
2050 2093
2051 2094 spc = dataOut.data_pre[0].copy()
2052 2095 cspc = dataOut.data_pre[1]
2053 2096 nHeights = spc.shape[2]
2054 2097
2055 2098 # first_height = 0.75 #km (ref: data header 20170822)
2056 2099 # resolution_height = 0.075 #km
2057 2100 '''
2058 2101 finding height range. check this when radar parameters are changed!
2059 2102 '''
2060 2103 if maxheight is not None:
2061 2104 # range_max = math.ceil((maxheight - first_height) / resolution_height) # theoretical
2062 2105 range_max = math.ceil(13.26 * maxheight - 3) # empirical, works better
2063 2106 else:
2064 2107 range_max = nHeights
2065 2108 if minheight is not None:
2066 2109 # range_min = int((minheight - first_height) / resolution_height) # theoretical
2067 2110 range_min = int(13.26 * minheight - 5) # empirical, works better
2068 2111 if range_min < 0:
2069 2112 range_min = 0
2070 2113 else:
2071 2114 range_min = 0
2072 2115
2073 2116 pairsList = dataOut.groupList
2074 2117 if dataOut.ChanDist is not None :
2075 2118 ChanDist = dataOut.ChanDist
2076 2119 else:
2077 2120 ChanDist = numpy.array([[Xi01, Eta01],[Xi02,Eta02],[Xi12,Eta12]])
2078 2121
2079 2122 # 4 variables: zonal, meridional, vertical, and average SNR
2080 2123 data_param = numpy.zeros([4,nHeights]) * numpy.NaN
2081 2124 velocityX = numpy.zeros([nHeights]) * numpy.NaN
2082 2125 velocityY = numpy.zeros([nHeights]) * numpy.NaN
2083 2126 velocityZ = numpy.zeros([nHeights]) * numpy.NaN
2084 2127
2085 2128 dbSNR = 10*numpy.log10(numpy.average(dataOut.data_snr,0))
2086 2129
2087 2130 '''***********************************************WIND ESTIMATION**************************************'''
2088 2131 for Height in range(nHeights):
2089 2132
2090 2133 if Height >= range_min and Height < range_max:
2091 2134 # error_code will be useful in future analysis
2092 2135 [Vzon,Vmer,Vver, error_code] = self.WindEstimation(spc[:,:,Height], cspc[:,:,Height], pairsList,
2093 2136 ChanDist, Height, dataOut.noise, dataOut.spc_range, dbSNR[Height], SNRdBlimit, NegativeLimit, PositiveLimit,dataOut.frequency)
2094 2137
2095 2138 if abs(Vzon) < 100. and abs(Vmer) < 100.:
2096 2139 velocityX[Height] = Vzon
2097 2140 velocityY[Height] = -Vmer
2098 2141 velocityZ[Height] = Vver
2099 2142
2100 2143 # Censoring data with SNR threshold
2101 2144 dbSNR [dbSNR < SNRdBlimit] = numpy.NaN
2102 2145
2103 2146 data_param[0] = velocityX
2104 2147 data_param[1] = velocityY
2105 2148 data_param[2] = velocityZ
2106 2149 data_param[3] = dbSNR
2107 2150 dataOut.data_param = data_param
2108 2151 return dataOut
2109 2152
2110 2153 def moving_average(self,x, N=2):
2111 2154 """ convolution for smoothenig data. note that last N-1 values are convolution with zeroes """
2112 2155 return numpy.convolve(x, numpy.ones((N,))/N)[(N-1):]
2113 2156
2114 2157 def gaus(self,xSamples,Amp,Mu,Sigma):
2115 2158 return Amp * numpy.exp(-0.5*((xSamples - Mu)/Sigma)**2)
2116 2159
2117 2160 def Moments(self, ySamples, xSamples):
2118 2161 Power = numpy.nanmean(ySamples) # Power, 0th Moment
2119 2162 yNorm = ySamples / numpy.nansum(ySamples)
2120 2163 RadVel = numpy.nansum(xSamples * yNorm) # Radial Velocity, 1st Moment
2121 2164 Sigma2 = numpy.nansum(yNorm * (xSamples - RadVel)**2) # Spectral Width, 2nd Moment
2122 2165 StdDev = numpy.sqrt(numpy.abs(Sigma2)) # Desv. Estandar, Ancho espectral
2123 2166 return numpy.array([Power,RadVel,StdDev])
2124 2167
2125 2168 def StopWindEstimation(self, error_code):
2126 2169 Vzon = numpy.NaN
2127 2170 Vmer = numpy.NaN
2128 2171 Vver = numpy.NaN
2129 2172 return Vzon, Vmer, Vver, error_code
2130 2173
2131 2174 def AntiAliasing(self, interval, maxstep):
2132 2175 """
2133 2176 function to prevent errors from aliased values when computing phaseslope
2134 2177 """
2135 2178 antialiased = numpy.zeros(len(interval))
2136 2179 copyinterval = interval.copy()
2137 2180
2138 2181 antialiased[0] = copyinterval[0]
2139 2182
2140 2183 for i in range(1,len(antialiased)):
2141 2184 step = interval[i] - interval[i-1]
2142 2185 if step > maxstep:
2143 2186 copyinterval -= 2*numpy.pi
2144 2187 antialiased[i] = copyinterval[i]
2145 2188 elif step < maxstep*(-1):
2146 2189 copyinterval += 2*numpy.pi
2147 2190 antialiased[i] = copyinterval[i]
2148 2191 else:
2149 2192 antialiased[i] = copyinterval[i].copy()
2150 2193
2151 2194 return antialiased
2152 2195
2153 2196 def WindEstimation(self, spc, cspc, pairsList, ChanDist, Height, noise, AbbsisaRange, dbSNR, SNRlimit, NegativeLimit, PositiveLimit, radfreq):
2154 2197 """
2155 2198 Function that Calculates Zonal, Meridional and Vertical wind velocities.
2156 2199 Initial Version by E. Bocanegra updated by J. Zibell until Nov. 2019.
2157 2200
2158 2201 Input:
2159 2202 spc, cspc : self spectra and cross spectra data. In Briggs notation something like S_i*(S_i)_conj, (S_j)_conj respectively.
2160 2203 pairsList : Pairlist of channels
2161 2204 ChanDist : array of xi_ij and eta_ij
2162 2205 Height : height at which data is processed
2163 2206 noise : noise in [channels] format for specific height
2164 2207 Abbsisarange : range of the frequencies or velocities
2165 2208 dbSNR, SNRlimit : signal to noise ratio in db, lower limit
2166 2209
2167 2210 Output:
2168 2211 Vzon, Vmer, Vver : wind velocities
2169 2212 error_code : int that states where code is terminated
2170 2213
2171 2214 0 : no error detected
2172 2215 1 : Gaussian of mean spc exceeds widthlimit
2173 2216 2 : no Gaussian of mean spc found
2174 2217 3 : SNR to low or velocity to high -> prec. e.g.
2175 2218 4 : at least one Gaussian of cspc exceeds widthlimit
2176 2219 5 : zero out of three cspc Gaussian fits converged
2177 2220 6 : phase slope fit could not be found
2178 2221 7 : arrays used to fit phase have different length
2179 2222 8 : frequency range is either too short (len <= 5) or very long (> 30% of cspc)
2180 2223
2181 2224 """
2182 2225
2183 2226 error_code = 0
2184 2227
2185 2228 nChan = spc.shape[0]
2186 2229 nProf = spc.shape[1]
2187 2230 nPair = cspc.shape[0]
2188 2231
2189 2232 SPC_Samples = numpy.zeros([nChan, nProf]) # for normalized spc values for one height
2190 2233 CSPC_Samples = numpy.zeros([nPair, nProf], dtype=numpy.complex_) # for normalized cspc values
2191 2234 phase = numpy.zeros([nPair, nProf]) # phase between channels
2192 2235 PhaseSlope = numpy.zeros(nPair) # slope of the phases, channelwise
2193 2236 PhaseInter = numpy.zeros(nPair) # intercept to the slope of the phases, channelwise
2194 2237 xFrec = AbbsisaRange[0][:-1] # frequency range
2195 2238 xVel = AbbsisaRange[2][:-1] # velocity range
2196 2239 xSamples = xFrec # the frequency range is taken
2197 2240 delta_x = xSamples[1] - xSamples[0] # delta_f or delta_x
2198 2241
2199 2242 # only consider velocities with in NegativeLimit and PositiveLimit
2200 2243 if (NegativeLimit is None):
2201 2244 NegativeLimit = numpy.min(xVel)
2202 2245 if (PositiveLimit is None):
2203 2246 PositiveLimit = numpy.max(xVel)
2204 2247 xvalid = numpy.where((xVel > NegativeLimit) & (xVel < PositiveLimit))
2205 2248 xSamples_zoom = xSamples[xvalid]
2206 2249
2207 2250 '''Getting Eij and Nij'''
2208 2251 Xi01, Xi02, Xi12 = ChanDist[:,0]
2209 2252 Eta01, Eta02, Eta12 = ChanDist[:,1]
2210 2253
2211 2254 # spwd limit - updated by D. ScipiΓ³n 30.03.2021
2212 2255 widthlimit = 10
2213 2256 '''************************* SPC is normalized ********************************'''
2214 2257 spc_norm = spc.copy()
2215 2258 # For each channel
2216 2259 for i in range(nChan):
2217 2260 spc_sub = spc_norm[i,:] - noise[i] # only the signal power
2218 2261 SPC_Samples[i] = spc_sub / (numpy.nansum(spc_sub) * delta_x)
2219 2262
2220 2263 '''********************** FITTING MEAN SPC GAUSSIAN **********************'''
2221 2264
2222 2265 """ the gaussian of the mean: first subtract noise, then normalize. this is legal because
2223 2266 you only fit the curve and don't need the absolute value of height for calculation,
2224 2267 only for estimation of width. for normalization of cross spectra, you need initial,
2225 2268 unnormalized self-spectra With noise.
2226 2269
2227 2270 Technically, you don't even need to normalize the self-spectra, as you only need the
2228 2271 width of the peak. However, it was left this way. Note that the normalization has a flaw:
2229 2272 due to subtraction of the noise, some values are below zero. Raw "spc" values should be
2230 2273 >= 0, as it is the modulus squared of the signals (complex * it's conjugate)
2231 2274 """
2232 2275 # initial conditions
2233 2276 popt = [1e-10,0,1e-10]
2234 2277 # Spectra average
2235 2278 SPCMean = numpy.average(SPC_Samples,0)
2236 2279 # Moments in frequency
2237 2280 SPCMoments = self.Moments(SPCMean[xvalid], xSamples_zoom)
2238 2281
2239 2282 # Gauss Fit SPC in frequency domain
2240 2283 if dbSNR > SNRlimit: # only if SNR > SNRth
2241 2284 try:
2242 2285 popt,pcov = curve_fit(self.gaus,xSamples_zoom,SPCMean[xvalid],p0=SPCMoments)
2243 2286 if popt[2] <= 0 or popt[2] > widthlimit: # CONDITION
2244 2287 return self.StopWindEstimation(error_code = 1)
2245 2288 FitGauss = self.gaus(xSamples_zoom,*popt)
2246 2289 except :#RuntimeError:
2247 2290 return self.StopWindEstimation(error_code = 2)
2248 2291 else:
2249 2292 return self.StopWindEstimation(error_code = 3)
2250 2293
2251 2294 '''***************************** CSPC Normalization *************************
2252 2295 The Spc spectra are used to normalize the crossspectra. Peaks from precipitation
2253 2296 influence the norm which is not desired. First, a range is identified where the
2254 2297 wind peak is estimated -> sum_wind is sum of those frequencies. Next, the area
2255 2298 around it gets cut off and values replaced by mean determined by the boundary
2256 2299 data -> sum_noise (spc is not normalized here, thats why the noise is important)
2257 2300
2258 2301 The sums are then added and multiplied by range/datapoints, because you need
2259 2302 an integral and not a sum for normalization.
2260 2303
2261 2304 A norm is found according to Briggs 92.
2262 2305 '''
2263 2306 # for each pair
2264 2307 for i in range(nPair):
2265 2308 cspc_norm = cspc[i,:].copy()
2266 2309 chan_index0 = pairsList[i][0]
2267 2310 chan_index1 = pairsList[i][1]
2268 2311 CSPC_Samples[i] = cspc_norm / (numpy.sqrt(numpy.nansum(spc_norm[chan_index0])*numpy.nansum(spc_norm[chan_index1])) * delta_x)
2269 2312 phase[i] = numpy.arctan2(CSPC_Samples[i].imag, CSPC_Samples[i].real)
2270 2313
2271 2314 CSPCmoments = numpy.vstack([self.Moments(numpy.abs(CSPC_Samples[0,xvalid]), xSamples_zoom),
2272 2315 self.Moments(numpy.abs(CSPC_Samples[1,xvalid]), xSamples_zoom),
2273 2316 self.Moments(numpy.abs(CSPC_Samples[2,xvalid]), xSamples_zoom)])
2274 2317
2275 2318 popt01, popt02, popt12 = [1e-10,0,1e-10], [1e-10,0,1e-10] ,[1e-10,0,1e-10]
2276 2319 FitGauss01, FitGauss02, FitGauss12 = numpy.zeros(len(xSamples)), numpy.zeros(len(xSamples)), numpy.zeros(len(xSamples))
2277 2320
2278 2321 '''*******************************FIT GAUSS CSPC************************************'''
2279 2322 try:
2280 2323 popt01,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[0][xvalid]),p0=CSPCmoments[0])
2281 2324 if popt01[2] > widthlimit: # CONDITION
2282 2325 return self.StopWindEstimation(error_code = 4)
2283 2326 popt02,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[1][xvalid]),p0=CSPCmoments[1])
2284 2327 if popt02[2] > widthlimit: # CONDITION
2285 2328 return self.StopWindEstimation(error_code = 4)
2286 2329 popt12,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[2][xvalid]),p0=CSPCmoments[2])
2287 2330 if popt12[2] > widthlimit: # CONDITION
2288 2331 return self.StopWindEstimation(error_code = 4)
2289 2332
2290 2333 FitGauss01 = self.gaus(xSamples_zoom, *popt01)
2291 2334 FitGauss02 = self.gaus(xSamples_zoom, *popt02)
2292 2335 FitGauss12 = self.gaus(xSamples_zoom, *popt12)
2293 2336 except:
2294 2337 return self.StopWindEstimation(error_code = 5)
2295 2338
2296 2339
2297 2340 '''************* Getting Fij ***************'''
2298 2341 # x-axis point of the gaussian where the center is located from GaussFit of spectra
2299 2342 GaussCenter = popt[1]
2300 2343 ClosestCenter = xSamples_zoom[numpy.abs(xSamples_zoom-GaussCenter).argmin()]
2301 2344 PointGauCenter = numpy.where(xSamples_zoom==ClosestCenter)[0][0]
2302 2345
2303 2346 # Point where e^-1 is located in the gaussian
2304 2347 PeMinus1 = numpy.max(FitGauss) * numpy.exp(-1)
2305 2348 FijClosest = FitGauss[numpy.abs(FitGauss-PeMinus1).argmin()] # The closest point to"Peminus1" in "FitGauss"
2306 2349 PointFij = numpy.where(FitGauss==FijClosest)[0][0]
2307 2350 Fij = numpy.abs(xSamples_zoom[PointFij] - xSamples_zoom[PointGauCenter])
2308 2351
2309 2352 '''********** Taking frequency ranges from mean SPCs **********'''
2310 2353 GauWidth = popt[2] * 3/2 # Bandwidth of Gau01
2311 2354 Range = numpy.empty(2)
2312 2355 Range[0] = GaussCenter - GauWidth
2313 2356 Range[1] = GaussCenter + GauWidth
2314 2357 # Point in x-axis where the bandwidth is located (min:max)
2315 2358 ClosRangeMin = xSamples_zoom[numpy.abs(xSamples_zoom-Range[0]).argmin()]
2316 2359 ClosRangeMax = xSamples_zoom[numpy.abs(xSamples_zoom-Range[1]).argmin()]
2317 2360 PointRangeMin = numpy.where(xSamples_zoom==ClosRangeMin)[0][0]
2318 2361 PointRangeMax = numpy.where(xSamples_zoom==ClosRangeMax)[0][0]
2319 2362 Range = numpy.array([ PointRangeMin, PointRangeMax ])
2320 2363 FrecRange = xSamples_zoom[ Range[0] : Range[1] ]
2321 2364
2322 2365 '''************************** Getting Phase Slope ***************************'''
2323 2366 for i in range(nPair):
2324 2367 if len(FrecRange) > 5:
2325 2368 PhaseRange = phase[i, xvalid[0][Range[0]:Range[1]]].copy()
2326 2369 mask = ~numpy.isnan(FrecRange) & ~numpy.isnan(PhaseRange)
2327 2370 if len(FrecRange) == len(PhaseRange):
2328 2371 try:
2329 2372 slope, intercept, _, _, _ = stats.linregress(FrecRange[mask], self.AntiAliasing(PhaseRange[mask], 4.5))
2330 2373 PhaseSlope[i] = slope
2331 2374 PhaseInter[i] = intercept
2332 2375 except:
2333 2376 return self.StopWindEstimation(error_code = 6)
2334 2377 else:
2335 2378 return self.StopWindEstimation(error_code = 7)
2336 2379 else:
2337 2380 return self.StopWindEstimation(error_code = 8)
2338 2381
2339 2382 '''*** Constants A-H correspond to the convention as in Briggs and Vincent 1992 ***'''
2340 2383
2341 2384 '''Getting constant C'''
2342 2385 cC=(Fij*numpy.pi)**2
2343 2386
2344 2387 '''****** Getting constants F and G ******'''
2345 2388 MijEijNij = numpy.array([[Xi02,Eta02], [Xi12,Eta12]])
2346 2389 # MijEijNij = numpy.array([[Xi01,Eta01], [Xi02,Eta02], [Xi12,Eta12]])
2347 2390 # MijResult0 = (-PhaseSlope[0] * cC) / (2*numpy.pi)
2348 2391 MijResult1 = (-PhaseSlope[1] * cC) / (2*numpy.pi)
2349 2392 MijResult2 = (-PhaseSlope[2] * cC) / (2*numpy.pi)
2350 2393 # MijResults = numpy.array([MijResult0, MijResult1, MijResult2])
2351 2394 MijResults = numpy.array([MijResult1, MijResult2])
2352 2395 (cF,cG) = numpy.linalg.solve(MijEijNij, MijResults)
2353 2396
2354 2397 '''****** Getting constants A, B and H ******'''
2355 2398 W01 = numpy.nanmax( FitGauss01 )
2356 2399 W02 = numpy.nanmax( FitGauss02 )
2357 2400 W12 = numpy.nanmax( FitGauss12 )
2358 2401
2359 2402 WijResult01 = ((cF * Xi01 + cG * Eta01)**2)/cC - numpy.log(W01 / numpy.sqrt(numpy.pi / cC))
2360 2403 WijResult02 = ((cF * Xi02 + cG * Eta02)**2)/cC - numpy.log(W02 / numpy.sqrt(numpy.pi / cC))
2361 2404 WijResult12 = ((cF * Xi12 + cG * Eta12)**2)/cC - numpy.log(W12 / numpy.sqrt(numpy.pi / cC))
2362 2405 WijResults = numpy.array([WijResult01, WijResult02, WijResult12])
2363 2406
2364 2407 WijEijNij = numpy.array([ [Xi01**2, Eta01**2, 2*Xi01*Eta01] , [Xi02**2, Eta02**2, 2*Xi02*Eta02] , [Xi12**2, Eta12**2, 2*Xi12*Eta12] ])
2365 2408 (cA,cB,cH) = numpy.linalg.solve(WijEijNij, WijResults)
2366 2409
2367 2410 VxVy = numpy.array([[cA,cH],[cH,cB]])
2368 2411 VxVyResults = numpy.array([-cF,-cG])
2369 2412 (Vmer,Vzon) = numpy.linalg.solve(VxVy, VxVyResults)
2370 2413 Vver = -SPCMoments[1]*SPEED_OF_LIGHT/(2*radfreq)
2371 2414 error_code = 0
2372 2415
2373 2416 return Vzon, Vmer, Vver, error_code
2374 2417
2375 2418 class SpectralMoments(Operation):
2376 2419
2377 2420 '''
2378 2421 Function SpectralMoments()
2379 2422
2380 2423 Calculates moments (power, mean, standard deviation) and SNR of the signal
2381 2424
2382 2425 Type of dataIn: Spectra
2383 2426
2384 2427 Configuration Parameters:
2385 2428
2386 2429 dirCosx : Cosine director in X axis
2387 2430 dirCosy : Cosine director in Y axis
2388 2431
2389 2432 elevation :
2390 2433 azimuth :
2391 2434
2392 2435 Input:
2393 2436 channelList : simple channel list to select e.g. [2,3,7]
2394 2437 self.dataOut.data_pre : Spectral data
2395 2438 self.dataOut.abscissaList : List of frequencies
2396 2439 self.dataOut.noise : Noise level per channel
2397 2440
2398 2441 Affected:
2399 2442 self.dataOut.moments : Parameters per channel
2400 2443 self.dataOut.data_snr : SNR per channel
2401 2444
2402 2445 '''
2403 2446
2404 2447 def run(self, dataOut, proc_type=0):
2405 2448
2406 2449 absc = dataOut.abscissaList[:-1]
2407 2450 nChannel = dataOut.data_pre[0].shape[0]
2408 2451 nHei = dataOut.data_pre[0].shape[2]
2409 2452 data_param = numpy.zeros((nChannel, 4 + proc_type*3, nHei))
2410 2453
2411 2454 if proc_type == 1:
2412 2455 fwindow = numpy.zeros(absc.size) + 1
2413 2456 b=64
2414 2457 #b=16
2415 2458 fwindow[0:absc.size//2 - b] = 0
2416 2459 fwindow[absc.size//2 + b:] = 0
2417 2460 type1 = 1 # moments calculation & gaussean fitting
2418 2461 nProfiles = dataOut.nProfiles
2419 2462 nCohInt = dataOut.nCohInt
2420 2463 nIncohInt = dataOut.nIncohInt
2421 2464 M = numpy.power(numpy.array(1/(nProfiles * nCohInt) ,dtype='float32'),2)
2422 2465 N = numpy.array(M / nIncohInt,dtype='float32')
2423 2466 data = dataOut.data_pre[0] * N
2424 2467 #noise = dataOut.noise * N
2425 2468 noise = numpy.zeros(nChannel)
2426 2469 for ind in range(nChannel):
2427 2470 noise[ind] = self.__NoiseByChannel(nProfiles, nIncohInt, data[ind,:,:])
2428 2471 smooth=3
2429 2472 else:
2430 2473 data = dataOut.data_pre[0]
2431 2474 noise = dataOut.noise
2432 2475 fwindow = None
2433 2476 type1 = 0
2434 2477 nIncohInt = None
2435 2478 smooth=None
2436 2479
2437 2480 for ind in range(nChannel):
2438 2481 data_param[ind,:,:] = self.__calculateMoments( data[ind,:,:] , absc , noise[ind], nicoh=nIncohInt, smooth=smooth, type1=type1, fwindow=fwindow, id_ch=ind)
2439 2482
2440 2483 if proc_type == 1:
2441 2484 dataOut.moments = data_param[:,1:,:]
2442 2485 dataOut.data_dop = data_param[:,2]
2443 2486 dataOut.data_width = data_param[:,1]
2444 2487 dataOut.data_snr = data_param[:,0]
2445 2488 dataOut.data_pow = data_param[:,6] # to compare with type0 proccessing
2446 2489 dataOut.spcpar=numpy.stack((dataOut.data_dop,dataOut.data_width,dataOut.data_snr, data_param[:,3], data_param[:,4],data_param[:,5]),axis=2)
2447 2490 else:
2448 2491 dataOut.moments = data_param[:,1:,:]
2449 2492 dataOut.data_snr = data_param[:,0]
2450 2493 dataOut.data_pow = data_param[:,1]
2451 2494 dataOut.data_dop = data_param[:,2]
2452 2495 dataOut.data_width = data_param[:,3]
2453 2496 dataOut.spcpar=numpy.stack((dataOut.data_dop,dataOut.data_width,dataOut.data_snr, dataOut.data_pow),axis=2)
2454 2497
2455 2498 return dataOut
2456 2499
2457 def __calculateMoments(self, oldspec, oldfreq, n0,
2458 nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None,id_ch=0):
2500 def __calculateMoments(self, oldspec, oldfreq, n0, normFactor = 1,nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None,id_ch=0):
2459 2501
2460 2502 def __GAUSSWINFIT1(A, flagPDER=0):
2461 2503 nonlocal truex, xvalid
2462 2504 nparams = 4
2463 2505 M=truex.size
2464 2506 mm=numpy.arange(M,dtype='f4')
2465 2507 delta = numpy.zeros(M,dtype='f4')
2466 2508 delta[0] = 1.0
2467 2509 Ts = numpy.array([1.0/(2*truex[0])],dtype='f4')[0]
2468 2510 jj = -1j
2469 2511 #if self.winauto is None: self.winauto = (1.0 - mm/M)
2470 2512 winauto = (1.0 - mm/M)
2471 2513 winauto = winauto/winauto.max() # Normalized to 1
2472 2514 #ON_ERROR,2 # IDL sentence: Return to caller if an error occurs
2473 2515 A[0] = numpy.abs(A[0])
2474 2516 A[2] = numpy.abs(A[2])
2475 2517 A[3] = numpy.abs(A[3])
2476 2518 pi=numpy.array([numpy.pi],dtype='f4')[0]
2477 2519 if A[2] != 0:
2478 2520 Z = numpy.exp(-2*numpy.power((pi*A[2]*mm*Ts),2,dtype='f4')+jj*2*pi*A[1]*mm*Ts, dtype='c8') # Get Z
2479 2521 else:
2480 2522 Z = mm*0.0
2481 2523 A[0] = 0.0
2482 2524 junkF = numpy.roll(2*fft(winauto*(A[0]*Z+A[3]*delta)).real - \
2483 2525 winauto[0]*(A[0]+A[3]), M//2) # *M scale for fft not needed in python
2484 2526 F = junkF[xvalid]
2485 2527 if flagPDER == 0: #NEED PARTIAL?
2486 2528 return F
2487 2529 PDER = numpy.zeros((M,nparams)) #YES, MAKE ARRAY.
2488 2530 PDER[:,0] = numpy.shift(2*(fft(winauto*Z)*M) - winauto[0], M/2)
2489 2531 PDER[:,1] = numpy.shift(2*(fft(winauto*jj*2*numpy.pi*mm*Ts*A[0]*Z)*M), M/2)
2490 2532 PDER[:,2] = numpy.shift(2*(fft(winauto*(-4*numpy.power(numpy.pi*mm*Ts,2)*A[2]*A[0]*Z))*M), M/2)
2491 2533 PDER[:,3] = numpy.shift(2*(fft(winauto*delta)*M) - winauto[0], M/2)
2492 2534 PDER = PDER[xvalid,:]
2493 2535 return F, PDER
2494 2536
2495 2537 def __curvefit_koki(y, a, Weights, FlagNoDerivative=1,
2496 2538 itmax=20, tol=None):
2497 2539 #ON_ERROR,2 IDL SENTENCE: RETURN TO THE CALLER IF ERROR
2498 2540 if tol == None:
2499 2541 tol = numpy.array([1.e-3],dtype='f4')[0]
2500 2542 typ=a.dtype
2501 2543 double = 1 if typ == numpy.float64 else 0
2502 2544 if typ != numpy.float32:
2503 2545 a=a.astype(numpy.float32) #Make params floating
2504 2546 # if we will be estimating partial derivates then compute machine precision
2505 2547 if FlagNoDerivative == 1:
2506 2548 res=numpy.MachAr(float_conv=numpy.float32)
2507 2549 eps=numpy.sqrt(res.eps)
2508 2550
2509 2551 nterms = a.size # Number of parameters
2510 2552 nfree=numpy.array([numpy.size(y) - nterms],dtype='f4')[0] # Degrees of freedom
2511 2553 if nfree <= 0: print('Curvefit - not enough data points.')
2512 2554 flambda= numpy.array([0.001],dtype='f4')[0] # Initial lambda
2513 2555 #diag=numpy.arange(nterms)*(nterms+1) # Subscripta of diagonal elements
2514 2556 # Use diag method in python
2515 2557 converge=1
2516 2558
2517 2559 #Define the partial derivative array
2518 2560 PDER = numpy.zeros((nterms,numpy.size(y)),dtype='f8') if double == 1 else numpy.zeros((nterms,numpy.size(y)),dtype='f4')
2519 2561
2520 2562 for Niter in range(itmax): #Iteration loop
2521 2563
2522 2564 if FlagNoDerivative == 1:
2523 2565 #Evaluate function and estimate partial derivatives
2524 2566 yfit = __GAUSSWINFIT1(a)
2525 2567 for term in range(nterms):
2526 2568 p=a.copy() # Copy current parameters
2527 2569 #Increment size for forward difference derivative
2528 2570 inc = eps * abs(p[term])
2529 2571 if inc == 0: inc = eps
2530 2572 p[term] = p[term] + inc
2531 2573 yfit1 = __GAUSSWINFIT1(p)
2532 2574 PDER[term,:] = (yfit1-yfit)/inc
2533 2575 else:
2534 2576 #The user's procedure will return partial derivatives
2535 2577 yfit,PDER=__GAUSSWINFIT1(a, flagPDER=1)
2536 2578
2537 2579 beta = numpy.dot(PDER,(y-yfit)*Weights)
2538 2580 alpha = numpy.dot(PDER * numpy.tile(Weights,(nterms,1)), numpy.transpose(PDER))
2539 2581 # save current values of return parameters
2540 2582 sigma1 = numpy.sqrt( 1.0 / numpy.diag(alpha) ) # Current sigma.
2541 2583 sigma = sigma1
2542 2584
2543 2585 chisq1 = numpy.sum(Weights*numpy.power(y-yfit,2,dtype='f4'),dtype='f4')/nfree # Current chi squared.
2544 2586 chisq = chisq1
2545 2587 yfit1 = yfit
2546 2588 elev7=numpy.array([1.0e7],dtype='f4')[0]
2547 2589 compara =numpy.sum(abs(y))/elev7/nfree
2548 2590 done_early = chisq1 < compara
2549 2591
2550 2592 if done_early:
2551 2593 chi2 = chisq # Return chi-squared (chi2 obsolete-still works)
2552 2594 if done_early: Niter -= 1
2553 2595 #save_tp(chisq,Niter,yfit)
2554 2596 return yfit, a, converge, sigma, chisq # return result
2555 2597 #c = numpy.dot(c, c) # this operator implemented at the next lines
2556 2598 c_tmp = numpy.sqrt(numpy.diag(alpha))
2557 2599 siz=len(c_tmp)
2558 2600 c=numpy.dot(c_tmp.reshape(siz,1),c_tmp.reshape(1,siz))
2559 2601 lambdaCount = 0
2560 2602 while True:
2561 2603 lambdaCount += 1
2562 2604 # Normalize alpha to have unit diagonal.
2563 2605 array = alpha / c
2564 2606 # Augment the diagonal.
2565 2607 one=numpy.array([1.],dtype='f4')[0]
2566 2608 numpy.fill_diagonal(array,numpy.diag(array)*(one+flambda))
2567 2609 # Invert modified curvature matrix to find new parameters.
2568 2610 try:
2569 2611 array = (1.0/array) if array.size == 1 else numpy.linalg.inv(array)
2570 2612 except Exception as e:
2571 2613 print(e)
2572 2614 array[:]=numpy.NaN
2573 2615
2574 2616 b = a + numpy.dot(numpy.transpose(beta),array/c) # New params
2575 2617 yfit = __GAUSSWINFIT1(b) # Evaluate function
2576 2618 chisq = numpy.sum(Weights*numpy.power(y-yfit,2,dtype='f4'),dtype='f4')/nfree # New chisq
2577 2619 sigma = numpy.sqrt(numpy.diag(array)/numpy.diag(alpha)) # New sigma
2578 2620 if (numpy.isfinite(chisq) == 0) or \
2579 2621 (lambdaCount > 30 and chisq >= chisq1):
2580 2622 # Reject changes made this iteration, use old values.
2581 2623 yfit = yfit1
2582 2624 sigma = sigma1
2583 2625 chisq = chisq1
2584 2626 converge = 0
2585 2627 #print('Failed to converge.')
2586 2628 chi2 = chisq # Return chi-squared (chi2 obsolete-still works)
2587 2629 if done_early: Niter -= 1
2588 2630 #save_tp(chisq,Niter,yfit)
2589 2631 return yfit, a, converge, sigma, chisq, chi2 # return result
2590 2632 ten=numpy.array([10.0],dtype='f4')[0]
2591 2633 flambda *= ten # Assume fit got worse
2592 2634 if chisq <= chisq1:
2593 2635 break
2594 2636 hundred=numpy.array([100.0],dtype='f4')[0]
2595 2637 flambda /= hundred
2596 2638
2597 2639 a=b # Save new parameter estimate.
2598 2640 if ((chisq1-chisq)/chisq1) <= tol: # Finished?
2599 2641 chi2 = chisq # Return chi-squared (chi2 obsolete-still works)
2600 2642 if done_early: Niter -= 1
2601 2643 #save_tp(chisq,Niter,yfit)
2602 2644 return yfit, a, converge, sigma, chisq, chi2 # return result
2603 2645 converge = 0
2604 2646 chi2 = chisq
2605 2647 #print('Failed to converge.')
2606 2648 #save_tp(chisq,Niter,yfit)
2607 2649 return yfit, a, converge, sigma, chisq, chi2
2608 2650
2609 2651 if (nicoh is None): nicoh = 1
2610 2652 if (graph is None): graph = 0
2611 2653 if (smooth is None): smooth = 0
2612 2654 elif (self.smooth < 3): smooth = 0
2613 2655
2614 2656 if (type1 is None): type1 = 0
2615 2657 if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1
2616 2658 if (snrth is None): snrth = -3 #-20.0
2617 2659 if (dc is None): dc = 0
2618 2660 if (aliasing is None): aliasing = 0
2619 2661 if (oldfd is None): oldfd = 0
2620 2662 if (wwauto is None): wwauto = 0
2621 2663
2622 2664 if (n0 < 1.e-20): n0 = 1.e-20
2623 2665
2624 2666 xvalid = numpy.where(fwindow == 1)[0]
2625 2667 freq = oldfreq
2626 2668 truex = oldfreq
2627 2669 vec_power = numpy.zeros(oldspec.shape[1])
2628 2670 vec_fd = numpy.zeros(oldspec.shape[1])
2629 2671 vec_w = numpy.zeros(oldspec.shape[1])
2630 2672 vec_snr = numpy.zeros(oldspec.shape[1])
2631 2673 vec_n1 = numpy.empty(oldspec.shape[1])
2632 2674 vec_fp = numpy.empty(oldspec.shape[1])
2633 2675 vec_sigma_fd = numpy.empty(oldspec.shape[1])
2676 norm = 1
2634 2677
2635 2678 for ind in range(oldspec.shape[1]):
2636 2679
2637 2680 spec = oldspec[:,ind]
2638 2681 if (smooth == 0):
2639 2682 spec2 = spec
2640 2683 else:
2641 2684 spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth)
2642 2685
2643 2686 aux = spec2*fwindow
2644 2687 max_spec = aux.max()
2645 2688 m = aux.tolist().index(max_spec)
2646 2689
2690 if hasattr(normFactor, "ndim"):
2691 if normFactor.ndim >= 1:
2692 norm = normFactor[ind]
2693
2647 2694 if m > 2 and m < oldfreq.size - 3:
2648 2695 newindex = m + numpy.array([-2,-1,0,1,2])
2649 2696 newfreq = numpy.arange(20)/20.0*(numpy.max(freq[newindex])-numpy.min(freq[newindex]))+numpy.min(freq[newindex])
2650 2697 #peakspec = SPLINE(,)
2651 2698 tck = interpolate.splrep(freq[newindex], spec2[newindex])
2652 2699 peakspec = interpolate.splev(newfreq, tck)
2653 2700 # max_spec = MAX(peakspec,)
2654 2701 max_spec = numpy.max(peakspec)
2655 2702 mnew = numpy.argmax(peakspec)
2656 2703 #fp = newfreq(mnew)
2657 2704 fp = newfreq[mnew]
2658 2705 else:
2659 2706 fp = freq[m]
2660 2707
2661 2708 if type1==0:
2662 2709
2663 2710 # Moments Estimation
2664 2711 bb = spec2[numpy.arange(m,spec2.size)]
2665 2712 bb = (bb<n0).nonzero()
2666 2713 bb = bb[0]
2667 2714
2668 2715 ss = spec2[numpy.arange(0,m + 1)]
2669 2716 ss = (ss<n0).nonzero()
2670 2717 ss = ss[0]
2671 2718
2672 2719 if (bb.size == 0):
2673 2720 bb0 = spec.size - 1 - m
2674 2721 else:
2675 2722 bb0 = bb[0] - 1
2676 2723 if (bb0 < 0):
2677 2724 bb0 = 0
2678 2725
2679 2726 if (ss.size == 0):
2680 2727 ss1 = 1
2681 2728 else:
2682 2729 ss1 = max(ss) + 1
2683 2730
2684 2731 if (ss1 > m):
2685 2732 ss1 = m
2686 2733
2687 2734 valid = numpy.arange(int(m + bb0 - ss1 + 1)) + ss1
2688 2735
2689 2736 signal_power = ((spec2[valid] - n0) * fwindow[valid]).mean() # D. ScipiΓ³n added with correct definition
2690 2737 total_power = (spec2[valid] * fwindow[valid]).mean() # D. ScipiΓ³n added with correct definition
2691 2738 power = ((spec2[valid] - n0) * fwindow[valid]).sum()
2692 2739 fd = ((spec2[valid]- n0)*freq[valid] * fwindow[valid]).sum() / power
2693 2740 w = numpy.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum() / power)
2741 spec2 /=(norm) #compensation for sats remove
2694 2742 snr = (spec2.mean()-n0)/n0
2695 2743 if (snr < 1.e-20): snr = 1.e-20
2696 2744
2697 2745 vec_power[ind] = total_power
2698 2746 vec_fd[ind] = fd
2699 2747 vec_w[ind] = w
2700 2748 vec_snr[ind] = snr
2701 2749 else:
2702 2750 # Noise by heights
2703 2751 n1, stdv = self.__get_noise2(spec, nicoh)
2704 2752 # Moments Estimation
2705 2753 bb = spec2[numpy.arange(m,spec2.size)]
2706 2754 bb = (bb<n1).nonzero()
2707 2755 bb = bb[0]
2708 2756
2709 2757 ss = spec2[numpy.arange(0,m + 1)]
2710 2758 ss = (ss<n1).nonzero()
2711 2759 ss = ss[0]
2712 2760
2713 2761 if (bb.size == 0):
2714 2762 bb0 = spec.size - 1 - m
2715 2763 else:
2716 2764 bb0 = bb[0] - 1
2717 2765 if (bb0 < 0):
2718 2766 bb0 = 0
2719 2767
2720 2768 if (ss.size == 0):
2721 2769 ss1 = 1
2722 2770 else:
2723 2771 ss1 = max(ss) + 1
2724 2772
2725 2773 if (ss1 > m):
2726 2774 ss1 = m
2727 2775
2728 2776 valid = numpy.arange(int(m + bb0 - ss1 + 1)) + ss1
2729 2777 power = ((spec[valid] - n1)*fwindow[valid]).sum()
2730 2778 fd = ((spec[valid]- n1)*freq[valid]*fwindow[valid]).sum()/power
2731 2779 try:
2732 2780 w = numpy.sqrt(((spec[valid] - n1)*fwindow[valid]*(freq[valid]- fd)**2).sum()/power)
2733 2781 except:
2734 2782 w = float("NaN")
2735 2783 snr = power/(n0*fwindow.sum())
2736 2784 if snr < 1.e-20: snr = 1.e-20
2737 2785
2738 2786 # Here start gaussean adjustment
2739 2787
2740 2788 if snr > numpy.power(10,0.1*snrth):
2741 2789
2742 2790 a = numpy.zeros(4,dtype='f4')
2743 2791 a[0] = snr * n0
2744 2792 a[1] = fd
2745 2793 a[2] = w
2746 2794 a[3] = n0
2747 2795
2748 2796 np = spec.size
2749 2797 aold = a.copy()
2750 2798 spec2 = spec.copy()
2751 2799 oldxvalid = xvalid.copy()
2752 2800
2753 2801 for i in range(2):
2754 2802
2755 2803 ww = 1.0/(numpy.power(spec2,2)/nicoh)
2756 2804 ww[np//2] = 0.0
2757 2805
2758 2806 a = aold.copy()
2759 2807 xvalid = oldxvalid.copy()
2760 2808 #self.show_var(xvalid)
2761 2809
2762 2810 gaussfn = __curvefit_koki(spec[xvalid], a, ww[xvalid])
2763 2811 a = gaussfn[1]
2764 2812 converge = gaussfn[2]
2765 2813
2766 2814 xvalid = numpy.arange(np)
2767 2815 spec2 = __GAUSSWINFIT1(a)
2768 2816
2769 2817 xvalid = oldxvalid.copy()
2770 2818 power = a[0] * np
2771 2819 fd = a[1]
2772 2820 sigma_fd = gaussfn[3][1]
2773 2821 snr = max(power/ (max(a[3],n0) * len(oldxvalid)) * converge, 1e-20)
2774 2822 w = numpy.abs(a[2])
2775 2823 n1 = max(a[3], n0)
2776 2824
2777 2825 #gauss_adj=[fd,w,snr,n1,fp,sigma_fd]
2778 2826 else:
2779 2827 sigma_fd=numpy.nan # to avoid UnboundLocalError: local variable 'sigma_fd' referenced before assignment
2780 2828
2781 2829 vec_fd[ind] = fd
2782 2830 vec_w[ind] = w
2783 2831 vec_snr[ind] = snr
2784 2832 vec_n1[ind] = n1
2785 2833 vec_fp[ind] = fp
2786 2834 vec_sigma_fd[ind] = sigma_fd
2787 2835 vec_power[ind] = power # to compare with type 0 proccessing
2788 2836
2789 2837 if type1==1:
2790 2838 return numpy.vstack((vec_snr, vec_w, vec_fd, vec_n1, vec_fp, vec_sigma_fd, vec_power)) # snr and fd exchanged to compare doppler of both types
2791 2839 else:
2792 2840 return numpy.vstack((vec_snr, vec_power, vec_fd, vec_w))
2793 2841
2794 2842 def __get_noise2(self,POWER, fft_avg, TALK=0):
2795 2843 '''
2796 2844 Rutina para cΓ‘lculo de ruido por alturas(n1). Similar a IDL
2797 2845 '''
2798 2846 SPECT_PTS = len(POWER)
2799 2847 fft_avg = fft_avg*1.0
2800 2848 NOMIT = 0
2801 2849 NN = SPECT_PTS - NOMIT
2802 2850 N = NN//2
2803 2851 ARR = numpy.concatenate((POWER[0:N+1],POWER[N+NOMIT+1:SPECT_PTS]))
2804 2852 ARR = numpy.sort(ARR)
2805 2853 NUMS_MIN = (SPECT_PTS+7)//8
2806 2854 RTEST = (1.0+1.0/fft_avg)
2807 2855 SUM = 0.0
2808 2856 SUMSQ = 0.0
2809 2857 J = 0
2810 2858 for I in range(NN):
2811 2859 J = J + 1
2812 2860 SUM = SUM + ARR[I]
2813 2861 SUMSQ = SUMSQ + ARR[I]*ARR[I]
2814 2862 AVE = SUM*1.0/J
2815 2863 if J > NUMS_MIN:
2816 2864 if (SUMSQ*J <= RTEST*SUM*SUM): RNOISE = AVE
2817 2865 else:
2818 2866 if J == NUMS_MIN: RNOISE = AVE
2819 2867 if TALK == 1: print('Noise Power (2):%4.4f' %RNOISE)
2820 2868 stdv = numpy.sqrt(SUMSQ/J - numpy.power(SUM/J,2))
2821 2869 return RNOISE, stdv
2822 2870
2823 2871 def __get_noise1(self, power, fft_avg, TALK=0):
2824 2872 '''
2825 2873 Rutina para cΓ‘lculo de ruido por alturas(n0). Similar a IDL
2826 2874 '''
2827 2875 num_pts = numpy.size(power)
2828 2876 #print('num_pts',num_pts)
2829 2877 #print('power',power.shape)
2830 2878 #print(power[256:267,0:2])
2831 2879 fft_avg = fft_avg*1.0
2832 2880
2833 2881 ind = numpy.argsort(power, axis=None, kind='stable')
2834 2882 #ind = numpy.argsort(numpy.reshape(power,-1))
2835 2883 #print(ind.shape)
2836 2884 #print(ind[0:11])
2837 2885 #print(numpy.reshape(power,-1)[ind[0:11]])
2838 2886 ARR = numpy.reshape(power,-1)[ind]
2839 2887 #print('ARR',len(ARR))
2840 2888 #print('ARR',ARR.shape)
2841 2889 NUMS_MIN = num_pts//10
2842 2890 RTEST = (1.0+1.0/fft_avg)
2843 2891 SUM = 0.0
2844 2892 SUMSQ = 0.0
2845 2893 J = 0
2846 2894 cont = 1
2847 2895 while cont == 1 and J < num_pts:
2848 2896
2849 2897 SUM = SUM + ARR[J]
2850 2898 SUMSQ = SUMSQ + ARR[J]*ARR[J]
2851 2899 J = J + 1
2852 2900
2853 2901 if J > NUMS_MIN:
2854 2902 if (SUMSQ*J <= RTEST*SUM*SUM):
2855 2903 LNOISE = SUM*1.0/J
2856 2904 else:
2857 2905 J = J - 1
2858 2906 SUM = SUM - ARR[J]
2859 2907 SUMSQ = SUMSQ - ARR[J]*ARR[J]
2860 2908 cont = 0
2861 2909 else:
2862 2910 if J == NUMS_MIN: LNOISE = SUM*1.0/J
2863 2911 if TALK == 1: print('Noise Power (1):%8.8f' %LNOISE)
2864 2912 stdv = numpy.sqrt(SUMSQ/J - numpy.power(SUM/J,2))
2865 2913 return LNOISE, stdv
2866 2914
2867 2915 def __NoiseByChannel(self, num_prof, num_incoh, spectra,talk=0):
2868 2916
2869 2917 val_frq = numpy.arange(num_prof-2)+1
2870 2918 val_frq[(num_prof-2)//2:] = val_frq[(num_prof-2)//2:] + 1
2871 2919 junkspc = numpy.sum(spectra[val_frq,:], axis=1)
2872 2920 junkid = numpy.argsort(junkspc)
2873 2921 noisezone = val_frq[junkid[0:num_prof//2]]
2874 2922 specnoise = spectra[noisezone,:]
2875 2923 noise, stdvnoise = self.__get_noise1(specnoise,num_incoh)
2876 2924
2877 2925 if talk:
2878 2926 print('noise =', noise)
2879 2927 return noise
2880 2928 #------------------ Get SA Parameters --------------------------
2881 2929
2882 2930 def GetSAParameters(self):
2883 2931 #SA en frecuencia
2884 2932 pairslist = self.dataOut.groupList
2885 2933 num_pairs = len(pairslist)
2886 2934
2887 2935 vel = self.dataOut.abscissaList
2888 2936 spectra = self.dataOut.data_pre
2889 2937 cspectra = self.dataIn.data_cspc
2890 2938 delta_v = vel[1] - vel[0]
2891 2939
2892 2940 #Calculating the power spectrum
2893 2941 spc_pow = numpy.sum(spectra, 3)*delta_v
2894 2942 #Normalizing Spectra
2895 2943 norm_spectra = spectra/spc_pow
2896 2944 #Calculating the norm_spectra at peak
2897 2945 max_spectra = numpy.max(norm_spectra, 3)
2898 2946
2899 2947 #Normalizing Cross Spectra
2900 2948 norm_cspectra = numpy.zeros(cspectra.shape)
2901 2949
2902 2950 for i in range(num_chan):
2903 2951 norm_cspectra[i,:,:] = cspectra[i,:,:]/numpy.sqrt(spc_pow[pairslist[i][0],:]*spc_pow[pairslist[i][1],:])
2904 2952
2905 2953 max_cspectra = numpy.max(norm_cspectra,2)
2906 2954 max_cspectra_index = numpy.argmax(norm_cspectra, 2)
2907 2955
2908 2956 for i in range(num_pairs):
2909 2957 cspc_par[i,:,:] = __calculateMoments(norm_cspectra)
2910 2958 #------------------- Get Lags ----------------------------------
2911 2959
2912 2960 class JULIADriftsEstimation(Operation):
2913 2961
2914 2962 def __init__(self):
2915 2963 Operation.__init__(self)
2916 2964
2917 2965 def newtotal(self, data):
2918 2966 return numpy.nansum(data)
2919 2967
2920 2968 def data_filter(self, parm, snrth=-20, swth=20, wErrth=500):
2921 2969
2922 2970 Sz0 = parm.shape # Sz0: h,p
2923 2971 drift = parm[:,0]
2924 2972 sw = 2*parm[:,1]
2925 2973 snr = 10*numpy.log10(parm[:,2])
2926 2974 Sz = drift.shape # Sz: h
2927 2975 mask = numpy.ones((Sz[0]))
2928 2976 th=0
2929 2977 valid=numpy.where(numpy.isfinite(snr))
2930 2978 cvalid = len(valid[0])
2931 2979 if cvalid >= 1:
2932 2980 # CΓ‘lculo del ruido promedio de snr para el i-Γ©simo grupo de alturas
2933 2981 nbins = int(numpy.max(snr)-numpy.min(snr))+1 # bin size = 1, similar to IDL
2934 2982 h = numpy.histogram(snr,bins=nbins)
2935 2983 hist = h[0]
2936 2984 values = numpy.round_(h[1])
2937 2985 moda = values[numpy.where(hist == numpy.max(hist))]
2938 2986 indNoise = numpy.where(numpy.abs(snr - numpy.min(moda)) < 3)[0]
2939 2987
2940 2988 noise = snr[indNoise]
2941 2989 noise_mean = numpy.sum(noise)/len(noise)
2942 2990 # CΓ‘lculo de media de snr
2943 2991 med = numpy.median(snr)
2944 2992 # Establece el umbral de snr
2945 2993 if noise_mean > med + 3:
2946 2994 th = med
2947 2995 else:
2948 2996 th = noise_mean + 3
2949 2997 # Establece mΓ‘scara
2950 2998 novalid = numpy.where(snr <= th)[0]
2951 2999 mask[novalid] = numpy.nan
2952 3000 # Elimina datos que no sobrepasen el umbral: PARAMETRO
2953 3001 novalid = numpy.where(snr <= snrth)
2954 3002 cnovalid = len(novalid[0])
2955 3003 if cnovalid > 0:
2956 3004 mask[novalid] = numpy.nan
2957 3005 novalid = numpy.where(numpy.isnan(snr))
2958 3006 cnovalid = len(novalid[0])
2959 3007 if cnovalid > 0:
2960 3008 mask[novalid] = numpy.nan
2961 3009 new_parm = numpy.zeros((Sz0[0],Sz0[1]))
2962 3010 for h in range(Sz0[0]):
2963 3011 for p in range(Sz0[1]):
2964 3012 if numpy.isnan(mask[h]):
2965 3013 new_parm[h,p]=numpy.nan
2966 3014 else:
2967 3015 new_parm[h,p]=parm[h,p]
2968 3016
2969 3017 return new_parm, th
2970 3018
2971 3019 def run(self, dataOut, zenith, zenithCorrection,heights=None, statistics=0, otype=0):
2972 3020
2973 3021 dataOut.lat=-11.95
2974 3022 dataOut.lon=-76.87
2975 3023 nCh=dataOut.spcpar.shape[0]
2976 3024 nHei=dataOut.spcpar.shape[1]
2977 3025 nParam=dataOut.spcpar.shape[2]
2978 3026 # SelecciΓ³n de alturas
2979 3027
2980 3028 if not heights:
2981 3029 parm = numpy.zeros((nCh,nHei,nParam))
2982 3030 parm[:] = dataOut.spcpar[:]
2983 3031 else:
2984 3032 hei=dataOut.heightList
2985 3033 hvalid=numpy.where([hei >= heights[0]][0] & [hei <= heights[1]][0])[0]
2986 3034 nhvalid=len(hvalid)
2987 3035 dataOut.heightList = hei[hvalid]
2988 3036 parm = numpy.zeros((nCh,nhvalid,nParam))
2989 3037 parm[:] = dataOut.spcpar[:,hvalid,:]
2990 3038
2991 3039
2992 3040 # Primer filtrado: Umbral de SNR
2993 3041 for i in range(nCh):
2994 3042 parm[i,:,:] = self.data_filter(parm[i,:,:])[0]
2995 3043
2996 3044 zenith = numpy.array(zenith)
2997 3045 zenith -= zenithCorrection
2998 3046 zenith *= numpy.pi/180
2999 3047 alpha = zenith[0]
3000 3048 beta = zenith[1]
3001 3049 dopplerCH0 = parm[0,:,0]
3002 3050 dopplerCH1 = parm[1,:,0]
3003 3051 swCH0 = parm[0,:,1]
3004 3052 swCH1 = parm[1,:,1]
3005 3053 snrCH0 = 10*numpy.log10(parm[0,:,2])
3006 3054 snrCH1 = 10*numpy.log10(parm[1,:,2])
3007 3055 noiseCH0 = parm[0,:,3]
3008 3056 noiseCH1 = parm[1,:,3]
3009 3057 wErrCH0 = parm[0,:,5]
3010 3058 wErrCH1 = parm[1,:,5]
3011 3059
3012 3060 # Vertical and zonal calculation according to geometry
3013 3061 sinB_A = numpy.sin(beta)*numpy.cos(alpha) - numpy.sin(alpha)* numpy.cos(beta)
3014 3062 drift = -(dopplerCH0 * numpy.sin(beta) - dopplerCH1 * numpy.sin(alpha))/ sinB_A
3015 3063 zonal = (dopplerCH0 * numpy.cos(beta) - dopplerCH1 * numpy.cos(alpha))/ sinB_A
3016 3064 snr = (snrCH0 + snrCH1)/2
3017 3065 noise = (noiseCH0 + noiseCH1)/2
3018 3066 sw = (swCH0 + swCH1)/2
3019 3067 w_w_err= numpy.sqrt(numpy.power(wErrCH0 * numpy.sin(beta)/numpy.abs(sinB_A),2) + numpy.power(wErrCH1 * numpy.sin(alpha)/numpy.abs(sinB_A),2))
3020 3068 w_e_err= numpy.sqrt(numpy.power(wErrCH0 * numpy.cos(beta)/numpy.abs(-1*sinB_A),2) + numpy.power(wErrCH1 * numpy.cos(alpha)/numpy.abs(-1*sinB_A),2))
3021 3069
3022 3070 # for statistics150km
3023 3071 if statistics:
3024 3072 print('Implemented offline.')
3025 3073
3026 3074 if otype == 0:
3027 3075 winds = numpy.vstack((snr, drift, zonal, noise, sw, w_w_err, w_e_err)) # to process statistics drifts
3028 3076 elif otype == 3:
3029 3077 winds = numpy.vstack((snr, drift, zonal)) # to generic plot: 3 RTI's
3030 3078 elif otype == 4:
3031 3079 winds = numpy.vstack((snrCH0, drift, snrCH1, zonal)) # to generic plot: 4 RTI's
3032 3080
3033 3081 snr1 = numpy.vstack((snrCH0, snrCH1))
3034 3082 dataOut.data_output = winds
3035 3083 dataOut.data_snr = snr1
3036 3084
3037 3085 dataOut.utctimeInit = dataOut.utctime
3038 3086 dataOut.outputInterval = dataOut.timeInterval
3039 3087 try:
3040 3088 dataOut.flagNoData = numpy.all(numpy.isnan(dataOut.data_output[0])) # NAN vectors are not written MADRIGAL CASE
3041 3089 except:
3042 3090 print("Check there is no Data")
3043 3091
3044 3092 return dataOut
3045 3093
3046 3094 class SALags(Operation):
3047 3095 '''
3048 3096 Function GetMoments()
3049 3097
3050 3098 Input:
3051 3099 self.dataOut.data_pre
3052 3100 self.dataOut.abscissaList
3053 3101 self.dataOut.noise
3054 3102 self.dataOut.normFactor
3055 3103 self.dataOut.data_snr
3056 3104 self.dataOut.groupList
3057 3105 self.dataOut.nChannels
3058 3106
3059 3107 Affected:
3060 3108 self.dataOut.data_param
3061 3109
3062 3110 '''
3063 3111 def run(self, dataOut):
3064 3112 data_acf = dataOut.data_pre[0]
3065 3113 data_ccf = dataOut.data_pre[1]
3066 3114 normFactor_acf = dataOut.normFactor[0]
3067 3115 normFactor_ccf = dataOut.normFactor[1]
3068 3116 pairs_acf = dataOut.groupList[0]
3069 3117 pairs_ccf = dataOut.groupList[1]
3070 3118
3071 3119 nHeights = dataOut.nHeights
3072 3120 absc = dataOut.abscissaList
3073 3121 noise = dataOut.noise
3074 3122 SNR = dataOut.data_snr
3075 3123 nChannels = dataOut.nChannels
3076 3124 for l in range(len(pairs_acf)):
3077 3125 data_acf[l,:,:] = data_acf[l,:,:]/normFactor_acf[l,:]
3078 3126
3079 3127 for l in range(len(pairs_ccf)):
3080 3128 data_ccf[l,:,:] = data_ccf[l,:,:]/normFactor_ccf[l,:]
3081 3129
3082 3130 dataOut.data_param = numpy.zeros((len(pairs_ccf)*2 + 1, nHeights))
3083 3131 dataOut.data_param[:-1,:] = self.__calculateTaus(data_acf, data_ccf, absc)
3084 3132 dataOut.data_param[-1,:] = self.__calculateLag1Phase(data_acf, absc)
3085 3133 return
3086 3134
3087 3135 def __calculateTaus(self, data_acf, data_ccf, lagRange):
3088 3136
3089 3137 lag0 = data_acf.shape[1]/2
3090 3138 #Funcion de Autocorrelacion
3091 3139 mean_acf = stats.nanmean(data_acf, axis = 0)
3092 3140
3093 3141 #Obtencion Indice de TauCross
3094 3142 ind_ccf = data_ccf.argmax(axis = 1)
3095 3143 #Obtencion Indice de TauAuto
3096 3144 ind_acf = numpy.zeros(ind_ccf.shape,dtype = 'int')
3097 3145 ccf_lag0 = data_ccf[:,lag0,:]
3098 3146
3099 3147 for i in range(ccf_lag0.shape[0]):
3100 3148 ind_acf[i,:] = numpy.abs(mean_acf - ccf_lag0[i,:]).argmin(axis = 0)
3101 3149
3102 3150 #Obtencion de TauCross y TauAuto
3103 3151 tau_ccf = lagRange[ind_ccf]
3104 3152 tau_acf = lagRange[ind_acf]
3105 3153
3106 3154 Nan1, Nan2 = numpy.where(tau_ccf == lagRange[0])
3107 3155
3108 3156 tau_ccf[Nan1,Nan2] = numpy.nan
3109 3157 tau_acf[Nan1,Nan2] = numpy.nan
3110 3158 tau = numpy.vstack((tau_ccf,tau_acf))
3111 3159
3112 3160 return tau
3113 3161
3114 3162 def __calculateLag1Phase(self, data, lagTRange):
3115 3163 data1 = stats.nanmean(data, axis = 0)
3116 3164 lag1 = numpy.where(lagTRange == 0)[0][0] + 1
3117 3165
3118 3166 phase = numpy.angle(data1[lag1,:])
3119 3167
3120 3168 return phase
3121 3169
3122 3170 def fit_func( x, a0, a1, a2): #, a3, a4, a5):
3123 3171 z = (x - a1) / a2
3124 3172 y = a0 * numpy.exp(-z**2 / a2) #+ a3 + a4 * x + a5 * x**2
3125 3173 return y
3126 3174
3127 3175
3128 3176 class SpectralFitting(Operation):
3129 3177 '''
3130 3178 Function GetMoments()
3131 3179
3132 3180 Input:
3133 3181 Output:
3134 3182 Variables modified:
3135 3183 '''
3136 3184 isConfig = False
3137 3185 __dataReady = False
3138 3186 bloques = None
3139 3187 bloque0 = None
3140 3188
3141 3189 def __init__(self):
3142 3190 Operation.__init__(self)
3143 3191 self.i=0
3144 3192 self.isConfig = False
3145 3193
3146 3194 def setup(self,nChan,nProf,nHei,nBlocks):
3147 3195 self.__dataReady = False
3148 3196 self.bloques = numpy.zeros([2, nProf, nHei,nBlocks], dtype= complex)
3149 3197 self.bloque0 = numpy.zeros([nChan, nProf, nHei, nBlocks])
3150 3198
3151 3199 def __calculateMoments(self,oldspec, oldfreq, n0, nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None):
3200
3152 3201 if (nicoh is None): nicoh = 1
3153 3202 if (graph is None): graph = 0
3154 3203 if (smooth is None): smooth = 0
3155 3204 elif (self.smooth < 3): smooth = 0
3156 3205
3157 3206 if (type1 is None): type1 = 0
3158 3207 if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1
3159 3208 if (snrth is None): snrth = -3
3160 3209 if (dc is None): dc = 0
3161 3210 if (aliasing is None): aliasing = 0
3162 3211 if (oldfd is None): oldfd = 0
3163 3212 if (wwauto is None): wwauto = 0
3164 3213
3165 3214 if (n0 < 1.e-20): n0 = 1.e-20
3166 3215 freq = oldfreq
3167 3216 vec_power = numpy.zeros(oldspec.shape[1])
3168 3217 vec_fd = numpy.zeros(oldspec.shape[1])
3169 3218 vec_w = numpy.zeros(oldspec.shape[1])
3170 3219 vec_snr = numpy.zeros(oldspec.shape[1])
3171 3220 oldspec = numpy.ma.masked_invalid(oldspec)
3172 3221
3173 3222 for ind in range(oldspec.shape[1]):
3174 3223 spec = oldspec[:,ind]
3175 3224 aux = spec*fwindow
3176 3225 max_spec = aux.max()
3177 3226 m = list(aux).index(max_spec)
3178 3227 #Smooth
3179 3228 if (smooth == 0): spec2 = spec
3180 3229 else: spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth)
3181 3230
3182 3231 # Calculo de Momentos
3183 3232 bb = spec2[list(range(m,spec2.size))]
3184 3233 bb = (bb<n0).nonzero()
3185 3234 bb = bb[0]
3186 3235
3187 3236 ss = spec2[list(range(0,m + 1))]
3188 3237 ss = (ss<n0).nonzero()
3189 3238 ss = ss[0]
3190 3239
3191 3240 if (bb.size == 0):
3192 3241 bb0 = spec.size - 1 - m
3193 3242 else:
3194 3243 bb0 = bb[0] - 1
3195 3244 if (bb0 < 0):
3196 3245 bb0 = 0
3197 3246
3198 3247 if (ss.size == 0): ss1 = 1
3199 3248 else: ss1 = max(ss) + 1
3200 3249
3201 3250 if (ss1 > m): ss1 = m
3202 3251
3203 3252 valid = numpy.asarray(list(range(int(m + bb0 - ss1 + 1)))) + ss1
3204 3253 power = ((spec2[valid] - n0)*fwindow[valid]).sum()
3205 3254 fd = ((spec2[valid]- n0)*freq[valid]*fwindow[valid]).sum()/power
3206 3255 w = math.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum()/power)
3207 3256 snr = (spec2.mean()-n0)/n0
3208 3257
3209 3258 if (snr < 1.e-20) :
3210 3259 snr = 1.e-20
3211 3260
3212 3261 vec_power[ind] = power
3213 3262 vec_fd[ind] = fd
3214 3263 vec_w[ind] = w
3215 3264 vec_snr[ind] = snr
3216 3265
3217 3266 moments = numpy.vstack((vec_snr, vec_power, vec_fd, vec_w))
3218 3267 return moments
3219 3268
3220 3269 def __DiffCoherent(self, spectra, cspectra, dataOut, noise, snrth, coh_th, hei_th):
3221 3270
3222 3271 nProf = dataOut.nProfiles
3223 3272 heights = dataOut.heightList
3224 3273 nHei = len(heights)
3225 3274 channels = dataOut.channelList
3226 3275 nChan = len(channels)
3227 3276 crosspairs = dataOut.groupList
3228 3277 nPairs = len(crosspairs)
3229 3278 #Separar espectros incoherentes de coherentes snr > 20 dB'
3230 3279 snr_th = 10**(snrth/10.0)
3231 3280 my_incoh_spectra = numpy.zeros([nChan, nProf,nHei], dtype='float')
3232 3281 my_incoh_cspectra = numpy.zeros([nPairs,nProf, nHei], dtype='complex')
3233 3282 my_incoh_aver = numpy.zeros([nChan, nHei])
3234 3283 my_coh_aver = numpy.zeros([nChan, nHei])
3235 3284
3236 3285 coh_spectra = numpy.zeros([nChan, nProf, nHei], dtype='float')
3237 3286 coh_cspectra = numpy.zeros([nPairs, nProf, nHei], dtype='complex')
3238 3287 coh_aver = numpy.zeros([nChan, nHei])
3239 3288
3240 3289 incoh_spectra = numpy.zeros([nChan, nProf, nHei], dtype='float')
3241 3290 incoh_cspectra = numpy.zeros([nPairs, nProf, nHei], dtype='complex')
3242 3291 incoh_aver = numpy.zeros([nChan, nHei])
3243 3292 power = numpy.sum(spectra, axis=1)
3244 3293
3245 3294 if coh_th == None : coh_th = numpy.array([0.75,0.65,0.15]) # 0.65
3246 3295 if hei_th == None : hei_th = numpy.array([60,300,650])
3247 3296 for ic in range(nPairs):
3248 3297 pair = crosspairs[ic]
3249 3298 #si el SNR es mayor que el SNR threshold los datos se toman coherentes
3250 3299 s_n0 = power[pair[0],:]/noise[pair[0]]
3251 3300 s_n1 = power[pair[1],:]/noise[pair[1]]
3252 3301 valid1 =(s_n0>=snr_th).nonzero()
3253 3302 valid2 = (s_n1>=snr_th).nonzero()
3254 3303 valid1 = numpy.array(valid1[0])
3255 3304 valid2 = numpy.array(valid2[0])
3256 3305 valid = valid1
3257 3306 for iv in range(len(valid2)):
3258 3307 indv = numpy.array((valid1 == valid2[iv]).nonzero())
3259 3308 if len(indv[0]) == 0 :
3260 3309 valid = numpy.concatenate((valid,valid2[iv]), axis=None)
3261 3310 if len(valid)>0:
3262 3311 my_coh_aver[pair[0],valid]=1
3263 3312 my_coh_aver[pair[1],valid]=1
3264 3313 # si la coherencia es mayor a la coherencia threshold los datos se toman
3265 3314 coh = numpy.squeeze(numpy.nansum(cspectra[ic,:,:], axis=0)/numpy.sqrt(numpy.nansum(spectra[pair[0],:,:], axis=0)*numpy.nansum(spectra[pair[1],:,:], axis=0)))
3266 3315 for ih in range(len(hei_th)):
3267 3316 hvalid = (heights>hei_th[ih]).nonzero()
3268 3317 hvalid = hvalid[0]
3269 3318 if len(hvalid)>0:
3270 3319 valid = (numpy.absolute(coh[hvalid])>coh_th[ih]).nonzero()
3271 3320 valid = valid[0]
3272 3321 if len(valid)>0:
3273 3322 my_coh_aver[pair[0],hvalid[valid]] =1
3274 3323 my_coh_aver[pair[1],hvalid[valid]] =1
3275 3324
3276 3325 coh_echoes = (my_coh_aver[pair[0],:] == 1).nonzero()
3277 3326 incoh_echoes = (my_coh_aver[pair[0],:] != 1).nonzero()
3278 3327 incoh_echoes = incoh_echoes[0]
3279 3328 if len(incoh_echoes) > 0:
3280 3329 my_incoh_spectra[pair[0],:,incoh_echoes] = spectra[pair[0],:,incoh_echoes]
3281 3330 my_incoh_spectra[pair[1],:,incoh_echoes] = spectra[pair[1],:,incoh_echoes]
3282 3331 my_incoh_cspectra[ic,:,incoh_echoes] = cspectra[ic,:,incoh_echoes]
3283 3332 my_incoh_aver[pair[0],incoh_echoes] = 1
3284 3333 my_incoh_aver[pair[1],incoh_echoes] = 1
3285 3334
3286 3335
3287 3336 for ic in range(nPairs):
3288 3337 pair = crosspairs[ic]
3289 3338
3290 3339 valid1 =(my_coh_aver[pair[0],:]==1 ).nonzero()
3291 3340 valid2 = (my_coh_aver[pair[1],:]==1).nonzero()
3292 3341 valid1 = numpy.array(valid1[0])
3293 3342 valid2 = numpy.array(valid2[0])
3294 3343 valid = valid1
3295 3344
3296 3345 for iv in range(len(valid2)):
3297 3346
3298 3347 indv = numpy.array((valid1 == valid2[iv]).nonzero())
3299 3348 if len(indv[0]) == 0 :
3300 3349 valid = numpy.concatenate((valid,valid2[iv]), axis=None)
3301 3350 valid1 =(my_coh_aver[pair[0],:] !=1 ).nonzero()
3302 3351 valid2 = (my_coh_aver[pair[1],:] !=1).nonzero()
3303 3352 valid1 = numpy.array(valid1[0])
3304 3353 valid2 = numpy.array(valid2[0])
3305 3354 incoh_echoes = valid1
3306 3355 for iv in range(len(valid2)):
3307 3356
3308 3357 indv = numpy.array((valid1 == valid2[iv]).nonzero())
3309 3358 if len(indv[0]) == 0 :
3310 3359 incoh_echoes = numpy.concatenate(( incoh_echoes,valid2[iv]), axis=None)
3311 3360
3312 3361 if len(valid)>0:
3313 3362 coh_spectra[pair[0],:,valid] = spectra[pair[0],:,valid]
3314 3363 coh_spectra[pair[1],:,valid] = spectra[pair[1],:,valid]
3315 3364 coh_cspectra[ic,:,valid] = cspectra[ic,:,valid]
3316 3365 coh_aver[pair[0],valid]=1
3317 3366 coh_aver[pair[1],valid]=1
3318 3367 if len(incoh_echoes)>0:
3319 3368 incoh_spectra[pair[0],:,incoh_echoes] = spectra[pair[0],:,incoh_echoes]
3320 3369 incoh_spectra[pair[1],:,incoh_echoes] = spectra[pair[1],:,incoh_echoes]
3321 3370 incoh_cspectra[ic,:,incoh_echoes] = cspectra[ic,:,incoh_echoes]
3322 3371 incoh_aver[pair[0],incoh_echoes]=1
3323 3372 incoh_aver[pair[1],incoh_echoes]=1
3324 3373
3325 3374 return my_incoh_spectra ,my_incoh_cspectra,my_incoh_aver,my_coh_aver, incoh_spectra, coh_spectra, incoh_cspectra, coh_cspectra, incoh_aver, coh_aver
3326 3375
3327 3376 def __CleanCoherent(self,snrth, spectra, cspectra, coh_aver,dataOut, noise,clean_coh_echoes,index):
3328 3377
3329 3378 nProf = dataOut.nProfiles
3330 3379 heights = dataOut.heightList
3331 3380 nHei = len(heights)
3332 3381 channels = dataOut.channelList
3333 3382 nChan = len(channels)
3334 3383 crosspairs = dataOut.groupList
3335 3384 nPairs = len(crosspairs)
3336 3385
3337 3386 absc = dataOut.abscissaList[:-1]
3338 3387 data_param = numpy.zeros((nChan, 4, spectra.shape[2]))
3339 3388
3340 3389 clean_coh_spectra = spectra.copy()
3341 3390 clean_coh_cspectra = cspectra.copy()
3342 3391 clean_coh_aver = coh_aver.copy()
3343 3392
3344 3393 spwd_th=[10,6] #spwd_th[0] --> For satellites ; spwd_th[1] --> For special events like SUN.
3345 3394 coh_th = 0.75
3346 3395
3347 3396 rtime0 = [6,18] # periodo sin ESF
3348 3397 rtime1 = [10.5,13.5] # periodo con alta coherencia y alto ancho espectral (esperado): SOL.
3349 3398
3350 3399 time = index*5./60 # en base a 5 min de proceso
3351 3400 if clean_coh_echoes == 1 :
3352 3401 for ind in range(nChan):
3353 3402 data_param[ind,:,:] = self.__calculateMoments( spectra[ind,:,:] , absc , noise[ind] )
3354 3403 spwd = data_param[:,3]
3355 3404 # SPECB_JULIA,header=anal_header,jspectra=spectra,vel=velocities,hei=heights, num_aver=1, mode_fit=0,smoothing=smoothing,jvelr=velr,jspwd=spwd,jsnr=snr,jnoise=noise,jstdvnoise=stdvnoise
3356 3405 # para obtener spwd
3357 3406 for ic in range(nPairs):
3358 3407 pair = crosspairs[ic]
3359 3408 coh = numpy.squeeze(numpy.sum(cspectra[ic,:,:], axis=1)/numpy.sqrt(numpy.sum(spectra[pair[0],:,:], axis=1)*numpy.sum(spectra[pair[1],:,:], axis=1)))
3360 3409 for ih in range(nHei) :
3361 3410 # Considering heights higher than 200km in order to avoid removing phenomena like EEJ.
3362 3411 if heights[ih] >= 200 and coh_aver[pair[0],ih] == 1 and coh_aver[pair[1],ih] == 1 :
3363 3412 # Checking coherence
3364 3413 if (numpy.abs(coh[ih]) <= coh_th) or (time >= rtime0[0] and time <= rtime0[1]) :
3365 3414 # Checking spectral widths
3366 3415 if (spwd[pair[0],ih] > spwd_th[0]) or (spwd[pair[1],ih] > spwd_th[0]) :
3367 3416 # satelite
3368 3417 clean_coh_spectra[pair,:,ih] = 0.0
3369 3418 clean_coh_cspectra[ic,:,ih] = 0.0
3370 3419 clean_coh_aver[pair,ih] = 0
3371 3420 else :
3372 3421 if ((spwd[pair[0],ih] < spwd_th[1]) or (spwd[pair[1],ih] < spwd_th[1])) :
3373 3422 # Especial event like sun.
3374 3423 clean_coh_spectra[pair,:,ih] = 0.0
3375 3424 clean_coh_cspectra[ic,:,ih] = 0.0
3376 3425 clean_coh_aver[pair,ih] = 0
3377 3426
3378 3427 return clean_coh_spectra, clean_coh_cspectra, clean_coh_aver
3379 3428
3380 3429 def CleanRayleigh(self,dataOut,spectra,cspectra,save_drifts):
3381 3430
3382 3431 rfunc = cspectra.copy()
3383 3432 n_funct = len(rfunc[0,:,0,0])
3384 3433 val_spc = spectra*0.0
3385 3434 val_cspc = cspectra*0.0
3386 3435 in_sat_spectra = spectra.copy()
3387 3436 in_sat_cspectra = cspectra.copy()
3388 3437
3389 3438 min_hei = 200
3390 3439 nProf = dataOut.nProfiles
3391 3440 heights = dataOut.heightList
3392 3441 nHei = len(heights)
3393 3442 channels = dataOut.channelList
3394 3443 nChan = len(channels)
3395 3444 crosspairs = dataOut.groupList
3396 3445 nPairs = len(crosspairs)
3397 3446 hval=(heights >= min_hei).nonzero()
3398 3447 ih=hval[0]
3399 3448
3400 3449 for ih in range(hval[0][0],nHei):
3401 3450 for ifreq in range(nProf):
3402 3451 for ii in range(n_funct):
3403 3452
3404 3453 func2clean = 10*numpy.log10(numpy.absolute(rfunc[:,ii,ifreq,ih]))
3405 3454 val = (numpy.isfinite(func2clean)==True).nonzero()
3406 3455 if len(val)>0:
3407 3456 min_val = numpy.around(numpy.amin(func2clean)-2) #> (-40)
3408 3457 if min_val <= -40 : min_val = -40
3409 3458 max_val = numpy.around(numpy.amax(func2clean)+2) #< 200
3410 3459 if max_val >= 200 : max_val = 200
3411 3460 step = 1
3412 3461 #Getting bins and the histogram
3413 3462 x_dist = min_val + numpy.arange(1 + ((max_val-(min_val))/step))*step
3414 3463 y_dist,binstep = numpy.histogram(func2clean,bins=range(int(min_val),int(max_val+2),step))
3415 3464 mean = numpy.sum(x_dist * y_dist) / numpy.sum(y_dist)
3416 3465 sigma = numpy.sqrt(numpy.sum(y_dist * (x_dist - mean)**2) / numpy.sum(y_dist))
3417 3466 parg = [numpy.amax(y_dist),mean,sigma]
3418 3467 try :
3419 3468 gauss_fit, covariance = curve_fit(fit_func, x_dist, y_dist,p0=parg)
3420 3469 mode = gauss_fit[1]
3421 3470 stdv = gauss_fit[2]
3422 3471 except:
3423 3472 mode = mean
3424 3473 stdv = sigma
3425 3474
3426 3475 #Removing echoes greater than mode + 3*stdv
3427 3476 factor_stdv = 2.5
3428 3477 noval = (abs(func2clean - mode)>=(factor_stdv*stdv)).nonzero()
3429 3478
3430 3479 if len(noval[0]) > 0:
3431 3480 novall = ((func2clean - mode) >= (factor_stdv*stdv)).nonzero()
3432 3481 cross_pairs = crosspairs[ii]
3433 3482 #Getting coherent echoes which are removed.
3434 3483 if len(novall[0]) > 0:
3435 3484 val_spc[novall[0],cross_pairs[0],ifreq,ih] = 1
3436 3485 val_spc[novall[0],cross_pairs[1],ifreq,ih] = 1
3437 3486 val_cspc[novall[0],ii,ifreq,ih] = 1
3438 3487 #Removing coherent from ISR data
3439 3488 spectra[noval,cross_pairs[0],ifreq,ih] = numpy.nan
3440 3489 spectra[noval,cross_pairs[1],ifreq,ih] = numpy.nan
3441 3490 cspectra[noval,ii,ifreq,ih] = numpy.nan
3442 3491
3443 3492 #Getting average of the spectra and cross-spectra from incoherent echoes.
3444 3493 out_spectra = numpy.zeros([nChan,nProf,nHei], dtype=float) #+numpy.nan
3445 3494 out_cspectra = numpy.zeros([nPairs,nProf,nHei], dtype=complex) #+numpy.nan
3446 3495 for ih in range(nHei):
3447 3496 for ifreq in range(nProf):
3448 3497 for ich in range(nChan):
3449 3498 tmp = spectra[:,ich,ifreq,ih]
3450 3499 valid = (numpy.isfinite(tmp[:])==True).nonzero()
3451 3500 if len(valid[0]) >0 :
3452 3501 out_spectra[ich,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
3453 3502
3454 3503 for icr in range(nPairs):
3455 3504 tmp = numpy.squeeze(cspectra[:,icr,ifreq,ih])
3456 3505 valid = (numpy.isfinite(tmp)==True).nonzero()
3457 3506 if len(valid[0]) > 0:
3458 3507 out_cspectra[icr,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
3459 3508 #Removing fake coherent echoes (at least 4 points around the point)
3460 3509 val_spectra = numpy.sum(val_spc,0)
3461 3510 val_cspectra = numpy.sum(val_cspc,0)
3462 3511
3463 3512 val_spectra = self.REM_ISOLATED_POINTS(val_spectra,4)
3464 3513 val_cspectra = self.REM_ISOLATED_POINTS(val_cspectra,4)
3465 3514
3466 3515 for i in range(nChan):
3467 3516 for j in range(nProf):
3468 3517 for k in range(nHei):
3469 3518 if numpy.isfinite(val_spectra[i,j,k]) and val_spectra[i,j,k] < 1 :
3470 3519 val_spc[:,i,j,k] = 0.0
3471 3520 for i in range(nPairs):
3472 3521 for j in range(nProf):
3473 3522 for k in range(nHei):
3474 3523 if numpy.isfinite(val_cspectra[i,j,k]) and val_cspectra[i,j,k] < 1 :
3475 3524 val_cspc[:,i,j,k] = 0.0
3476 3525
3477 3526 tmp_sat_spectra = spectra.copy()
3478 3527 tmp_sat_spectra = tmp_sat_spectra*numpy.nan
3479 3528 tmp_sat_cspectra = cspectra.copy()
3480 3529 tmp_sat_cspectra = tmp_sat_cspectra*numpy.nan
3481 3530
3482 3531 val = (val_spc > 0).nonzero()
3483 3532 if len(val[0]) > 0:
3484 3533 tmp_sat_spectra[val] = in_sat_spectra[val]
3485 3534
3486 3535 val = (val_cspc > 0).nonzero()
3487 3536 if len(val[0]) > 0:
3488 3537 tmp_sat_cspectra[val] = in_sat_cspectra[val]
3489 3538
3490 3539 #Getting average of the spectra and cross-spectra from incoherent echoes.
3491 3540 sat_spectra = numpy.zeros((nChan,nProf,nHei), dtype=float)
3492 3541 sat_cspectra = numpy.zeros((nPairs,nProf,nHei), dtype=complex)
3493 3542 for ih in range(nHei):
3494 3543 for ifreq in range(nProf):
3495 3544 for ich in range(nChan):
3496 3545 tmp = numpy.squeeze(tmp_sat_spectra[:,ich,ifreq,ih])
3497 3546 valid = (numpy.isfinite(tmp)).nonzero()
3498 3547 if len(valid[0]) > 0:
3499 3548 sat_spectra[ich,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
3500 3549
3501 3550 for icr in range(nPairs):
3502 3551 tmp = numpy.squeeze(tmp_sat_cspectra[:,icr,ifreq,ih])
3503 3552 valid = (numpy.isfinite(tmp)).nonzero()
3504 3553 if len(valid[0]) > 0:
3505 3554 sat_cspectra[icr,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
3506 3555
3507 3556 return out_spectra, out_cspectra,sat_spectra,sat_cspectra
3508 3557
3509 3558 def REM_ISOLATED_POINTS(self,array,rth):
3510 3559 if rth == None : rth = 4
3511 3560 num_prof = len(array[0,:,0])
3512 3561 num_hei = len(array[0,0,:])
3513 3562 n2d = len(array[:,0,0])
3514 3563
3515 3564 for ii in range(n2d) :
3516 3565 tmp = array[ii,:,:]
3517 3566 tmp = numpy.reshape(tmp,num_prof*num_hei)
3518 3567 indxs1 = (numpy.isfinite(tmp)==True).nonzero()
3519 3568 indxs2 = (tmp > 0).nonzero()
3520 3569 indxs1 = (indxs1[0])
3521 3570 indxs2 = indxs2[0]
3522 3571 indxs = None
3523 3572
3524 3573 for iv in range(len(indxs2)):
3525 3574 indv = numpy.array((indxs1 == indxs2[iv]).nonzero())
3526 3575 if len(indv[0]) > 0 :
3527 3576 indxs = numpy.concatenate((indxs,indxs2[iv]), axis=None)
3528 3577
3529 3578 indxs = indxs[1:]
3530 3579 if len(indxs) < 4 :
3531 3580 array[ii,:,:] = 0.
3532 3581 return
3533 3582
3534 3583 xpos = numpy.mod(indxs ,num_prof)
3535 3584 ypos = (indxs / num_prof)
3536 3585 sx = numpy.argsort(xpos) # Ordering respect to "x" (time)
3537 3586 xpos = xpos[sx]
3538 3587 ypos = ypos[sx]
3539 3588
3540 3589 # *********************************** Cleaning isolated points **********************************
3541 3590 ic = 0
3542 3591 while True :
3543 3592 r = numpy.sqrt(list(numpy.power((xpos[ic]-xpos),2)+ numpy.power((ypos[ic]-ypos),2)))
3544 3593
3545 3594 no_coh1 = (numpy.isfinite(r)==True).nonzero()
3546 3595 no_coh2 = (r <= rth).nonzero()
3547 3596 no_coh1 = numpy.array(no_coh1[0])
3548 3597 no_coh2 = numpy.array(no_coh2[0])
3549 3598 no_coh = None
3550 3599 for iv in range(len(no_coh2)):
3551 3600 indv = numpy.array((no_coh1 == no_coh2[iv]).nonzero())
3552 3601 if len(indv[0]) > 0 :
3553 3602 no_coh = numpy.concatenate((no_coh,no_coh2[iv]), axis=None)
3554 3603 no_coh = no_coh[1:]
3555 3604 if len(no_coh) < 4 :
3556 3605 xpos[ic] = numpy.nan
3557 3606 ypos[ic] = numpy.nan
3558 3607
3559 3608 ic = ic + 1
3560 3609 if (ic == len(indxs)) :
3561 3610 break
3562 3611 indxs = (numpy.isfinite(list(xpos))==True).nonzero()
3563 3612 if len(indxs[0]) < 4 :
3564 3613 array[ii,:,:] = 0.
3565 3614 return
3566 3615
3567 3616 xpos = xpos[indxs[0]]
3568 3617 ypos = ypos[indxs[0]]
3569 3618 for i in range(0,len(ypos)):
3570 3619 ypos[i]=int(ypos[i])
3571 3620 junk = tmp
3572 3621 tmp = junk*0.0
3573 3622
3574 3623 tmp[list(xpos + (ypos*num_hei))] = junk[list(xpos + (ypos*num_hei))]
3575 3624 array[ii,:,:] = numpy.reshape(tmp,(num_prof,num_hei))
3576 3625
3577 3626 return array
3578 3627
3579 3628 def moments(self,doppler,yarray,npoints):
3580 3629 ytemp = yarray
3581 3630 val = (ytemp > 0).nonzero()
3582 3631 val = val[0]
3583 3632 if len(val) == 0 : val = range(npoints-1)
3584 3633
3585 3634 ynew = 0.5*(ytemp[val[0]]+ytemp[val[len(val)-1]])
3586 3635 ytemp[len(ytemp):] = [ynew]
3587 3636
3588 3637 index = 0
3589 3638 index = numpy.argmax(ytemp)
3590 3639 ytemp = numpy.roll(ytemp,int(npoints/2)-1-index)
3591 3640 ytemp = ytemp[0:npoints-1]
3592 3641
3593 3642 fmom = numpy.sum(doppler*ytemp)/numpy.sum(ytemp)+(index-(npoints/2-1))*numpy.abs(doppler[1]-doppler[0])
3594 3643 smom = numpy.sum(doppler*doppler*ytemp)/numpy.sum(ytemp)
3595 3644 return [fmom,numpy.sqrt(smom)]
3596 3645
3597 3646 def windowing_single_old(self,spc,x,A,B,C,D,nFFTPoints):
3598 3647 '''
3599 3648 Written by R. Flores
3600 3649 '''
3601 3650 from scipy.optimize import curve_fit,fmin
3602 3651
3603 3652 def gaussian(x, a, b, c, d):
3604 3653 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
3605 3654 return val
3606 3655
3607 3656 def R_gaussian(x, a, b, c):
3608 3657 N = int(numpy.shape(x)[0])
3609 3658 val = a * numpy.exp(-((x)*c*2*2*numpy.pi)**2 / (2))* numpy.exp(1.j*b*x*4*numpy.pi)
3610 3659 return val
3611 3660
3612 3661 def T(x,N):
3613 3662 T = 1-abs(x)/N
3614 3663 return T
3615 3664
3616 3665 def R_T_spc_fun(x, a, b, c, d, nFFTPoints):
3617 3666
3618 3667 N = int(numpy.shape(x)[0])
3619 3668
3620 3669 x_max = x[-1]
3621 3670
3622 3671 x_pos = x[nFFTPoints:]
3623 3672 x_neg = x[:nFFTPoints]
3624 3673
3625 3674 R_T_neg_1 = R_gaussian(x, a, b, c)[:nFFTPoints]*T(x_neg,-x[0])
3626 3675 R_T_pos_1 = R_gaussian(x, a, b, c)[nFFTPoints:]*T(x_pos,x[-1])
3627 3676 #print(T(x_pos,x[-1]),x_pos,x[-1])
3628 3677 #print(R_T_neg_1.shape,R_T_pos_1.shape)
3629 3678 R_T_sum_1 = R_T_pos_1 + R_T_neg_1
3630 3679 R_T_spc_1 = numpy.fft.fft(R_T_sum_1).real
3631 3680 R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1)
3632 3681 max_val_1 = numpy.max(R_T_spc_1)
3633 3682 R_T_spc_1 = R_T_spc_1*a/max_val_1
3634 3683 print("R_T_spc_1: ", R_T_spc_1)
3635 3684
3636 3685 R_T_d = d*numpy.fft.fftshift(signal.unit_impulse(N))
3637 3686 R_T_d_neg = R_T_d[:nFFTPoints]*T(x_neg,-x[0])
3638 3687 R_T_d_pos = R_T_d[nFFTPoints:]*T(x_pos,x[-1])
3639 3688 R_T_d_sum = R_T_d_pos + R_T_d_neg
3640 3689 R_T_spc_3 = numpy.fft.fft(R_T_d_sum).real
3641 3690 R_T_spc_3 = numpy.fft.fftshift(R_T_spc_3)
3642 3691
3643 3692 R_T_final = R_T_spc_1# + R_T_spc_3
3644 3693
3645 3694 return R_T_final
3646 3695
3647 3696 y = spc#gaussian(x, a, meanY, sigmaY) + a*0.1*numpy.random.normal(0, 1, size=len(x))
3648 3697
3649 3698 from scipy.stats import norm
3650 3699 mean,std=norm.fit(spc)
3651 3700
3652 3701 # estimate starting values from the data
3653 3702 print("A: ", A)
3654 3703 a = A-D
3655 3704 b = B
3656 3705 c = C#numpy.std(spc) #C
3657 3706 d = D
3658 3707 #'''
3659 3708 #ippSeconds = 250*20*1.e-6/3
3660 3709
3661 3710 #x_t = ippSeconds * (numpy.arange(nFFTPoints) - nFFTPoints / 2.)
3662 3711
3663 3712 #x_t = numpy.linspace(x_t[0],x_t[-1],3200)
3664 3713 #print("x_t: ", x_t)
3665 3714 #print("nFFTPoints: ", nFFTPoints)
3666 3715 x_vel = numpy.linspace(x[0],x[-1],int(2*nFFTPoints))
3667 3716 #print("x_vel: ", x_vel)
3668 3717 #x_freq = numpy.fft.fftfreq(1600,d=ippSeconds)
3669 3718 #x_freq = numpy.fft.fftshift(x_freq)
3670 3719 #'''
3671 3720 # define a least squares function to optimize
3672 3721 import matplotlib.pyplot as plt
3673 3722 aui = R_T_spc_fun(x_vel,a,b,c,d,nFFTPoints)
3674 3723 print("aux_max: ", numpy.nanmax(aui))
3675 3724 #print(dataOut.heightList[hei])
3676 3725 plt.figure()
3677 3726 plt.plot(x,spc,marker='*',linestyle='--')
3678 3727 plt.plot(x,gaussian(x, a, b, c, d),color='b',marker='^',linestyle='')
3679 3728 plt.plot(x,aui,color='k')
3680 3729 #plt.title(dataOut.heightList[hei])
3681 3730 plt.show()
3682 3731
3683 3732 def minfunc(params):
3684 3733 #print("y.shape: ", numpy.shape(y))
3685 3734 return sum((y-R_T_spc_fun(x_vel,params[0],params[1],params[2],params[3],nFFTPoints))**2/1)#y**2)
3686 3735
3687 3736 # fit
3688 3737
3689 3738 popt_full = fmin(minfunc,[a,b,c,d], disp=False)
3690 3739 #print("nIter", popt_full[2])
3691 3740 popt = popt_full#[0]
3692 3741
3693 3742 fun = gaussian(x, popt[0], popt[1], popt[2], popt[3])
3694 3743 print("pop1[0]: ", popt[0])
3695 3744 #return R_T_spc_fun(x_t,popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
3696 3745 return fun, popt[0], popt[1], popt[2], popt[3]
3697 3746
3698 3747 def windowing_single(self,spc,x,A,B,C,D,nFFTPoints):
3699 3748 '''
3700 3749 Written by R. Flores
3701 3750 '''
3702 3751 from scipy.optimize import curve_fit,fmin
3703 3752
3704 3753 def gaussian(x, a, b, c, d):
3705 3754 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
3706 3755 return val
3707 3756
3708 3757 def R_gaussian(x, a, b, c):
3709 3758 N = int(numpy.shape(x)[0])
3710 3759
3711 3760 val = (a*numpy.exp((-(1/2)*x*(x*c**2 + 2*1.j*b)))/numpy.sqrt(1/c**2))
3712 3761
3713 3762 return val
3714 3763
3715 3764 def T(x,N):
3716 3765 T = 1-abs(x)/N
3717 3766 return T
3718 3767
3719 3768 def R_T_spc_fun(x, a, id_dop, c, d, nFFTPoints):
3720 3769
3721 3770 N = int(numpy.shape(x)[0])
3722 3771 b = 0
3723 3772 x_max = x[-1]
3724 3773
3725 3774 x_pos = x[nFFTPoints:]
3726 3775 x_neg = x[:nFFTPoints]
3727 3776 R_T_neg_1 = R_gaussian(x, a, b, c)[:nFFTPoints]*T(x_neg,-x[0])
3728 3777 R_T_pos_1 = R_gaussian(x, a, b, c)[nFFTPoints:]*T(x_pos,x[-1])
3729 3778
3730 3779 R_T_sum_1 = R_T_pos_1 + R_T_neg_1
3731 3780 R_T_spc_1 = numpy.fft.fft(R_T_sum_1).real
3732 3781 R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1)
3733 3782 max_val_1 = numpy.max(R_T_spc_1)
3734 3783 R_T_spc_1 = R_T_spc_1*a/max_val_1
3735 3784 #raise NotImplementedError
3736 3785 R_T_d = d*numpy.fft.fftshift(signal.unit_impulse(N))
3737 3786 R_T_d_neg = R_T_d[:nFFTPoints]*T(x_neg,-x[0])
3738 3787 R_T_d_pos = R_T_d[nFFTPoints:]*T(x_pos,x[-1])
3739 3788 R_T_d_sum = R_T_d_pos + R_T_d_neg
3740 3789 R_T_spc_3 = numpy.fft.fft(R_T_d_sum).real
3741 3790 R_T_spc_3 = numpy.fft.fftshift(R_T_spc_3)
3742 3791
3743 3792 R_T_final = R_T_spc_1 + R_T_spc_3
3744 3793
3745 3794 id_dop = int(id_dop)
3746 3795
3747 3796 R_T_final = numpy.roll(R_T_final,-id_dop)
3748 3797
3749 3798 return R_T_final
3750 3799
3751 3800 y = spc#gaussian(x, a, meanY, sigmaY) + a*0.1*numpy.random.normal(0, 1, size=len(x))
3752 3801
3753 3802 from scipy.stats import norm
3754 3803 mean,std=norm.fit(spc)
3755 3804
3756 3805 # estimate starting values from the data
3757 3806 a = A-D
3758 3807 b = B
3759 3808 c = C#numpy.std(spc) #C
3760 3809 d = D
3761 3810
3762 3811 id_dop = numpy.argmax(spc)
3763 3812 id_dop = int(spc.shape[0]/2 - id_dop)
3764 3813
3765 3814 x_vel = numpy.linspace(x[0],x[-1],int(2*nFFTPoints))
3766 3815
3767 3816 # define a least squares function to optimize
3768 3817
3769 3818 def minfunc(params):
3770 3819 #print("y.shape: ", numpy.shape(y))
3771 3820 return sum((y-R_T_spc_fun(x_vel,params[0],params[1],params[2],params[3],nFFTPoints))**2/1)#y**2)
3772 3821
3773 3822 # fit
3774 3823 popt_full = fmin(minfunc,[a,id_dop,c,d], disp=False)
3775 3824 popt = popt_full#[0]
3776 3825
3777 3826 fun = gaussian(x, a, 0, popt[2], popt[3])
3778 3827 fun = numpy.roll(fun,-int(popt[1]))
3779 3828
3780 3829 return fun, popt[0], popt[1], popt[2], popt[3]
3781 3830
3782 3831 def windowing_single_direct(self,spc_mod,x,A,B,C,D,nFFTPoints,timeInterval):
3783 3832 '''
3784 3833 Written by R. Flores
3785 3834 '''
3786 3835 from scipy.optimize import curve_fit,fmin
3787 3836
3788 3837 def gaussian(x, a, b, c, d):
3789 3838 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
3790 3839 return val
3791 3840
3792 3841 def R_gaussian(x, a, b, c, d):
3793 3842 N = int(numpy.shape(x)[0])
3794 3843 val = (a*numpy.exp(-2*c**2*x**2 + 2*x*1.j*b))*(numpy.sqrt(2*numpy.pi)*c)/((numpy.pi)) + d*signal.unit_impulse(N)*numpy.shape(x)[0]/2
3795 3844
3796 3845 return 2*val/numpy.shape(val)[0]
3797 3846
3798 3847 def T(x,N):
3799 3848 T = 1-abs(x)/N
3800 3849 return T
3801 3850
3802 3851 def R_T_spc_fun(x, a, b, c, d, nFFTPoints, timeInterval): #"x" should be time
3803 3852
3804 3853 #timeInterval = 2
3805 3854 x_double = numpy.linspace(0,timeInterval,nFFTPoints)
3806 3855 x_double_m = numpy.flip(x_double)
3807 3856 x_double_aux = numpy.linspace(0,x_double[-2],nFFTPoints)
3808 3857 x_double_t = numpy.concatenate((x_double_m,x_double_aux))
3809 3858 x_double_t /= max(x_double_t)
3810 3859
3811 3860
3812 3861 R_T_sum_1 = R_gaussian(x, a, b, c, d)
3813 3862
3814 3863 R_T_sum_1_flip = numpy.copy(numpy.flip(R_T_sum_1))
3815 3864 R_T_sum_1_flip[-1] = R_T_sum_1_flip[0]
3816 3865 R_T_sum_1_flip = numpy.roll(R_T_sum_1_flip,1)
3817 3866
3818 3867 R_T_sum_1_flip.imag *= -1
3819 3868
3820 3869 R_T_sum_1_total = numpy.concatenate((R_T_sum_1,R_T_sum_1_flip))
3821 3870 R_T_sum_1_total *= x_double_t #times trian_fun
3822 3871
3823 3872 R_T_sum_1_total = R_T_sum_1_total[:nFFTPoints] + R_T_sum_1_total[nFFTPoints:]
3824 3873
3825 3874 R_T_spc_1 = numpy.fft.fft(R_T_sum_1_total).real
3826 3875 R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1)
3827 3876
3828 3877 freq = numpy.fft.fftfreq(nFFTPoints, d=timeInterval/nFFTPoints)
3829 3878
3830 3879 freq = numpy.fft.fftshift(freq)
3831 3880
3832 3881 freq *= 6/2 #lambda/2
3833 3882
3834 3883 return R_T_spc_1
3835 3884
3836 3885 y = spc_mod
3837 3886
3838 3887 #from scipy.stats import norm
3839 3888
3840 3889 # estimate starting values from the data
3841 3890
3842 3891 a = A-D
3843 3892 b = B
3844 3893 c = C
3845 3894 d = D
3846 3895
3847 3896 # define a least squares function to optimize
3848 3897 import matplotlib.pyplot as plt
3849 3898 #ippSeconds = 2
3850 3899 t_range = numpy.linspace(0,timeInterval,nFFTPoints)
3851 3900 #aui = R_T_spc_fun(t_range,a,b,c,d,nFFTPoints,timeInterval)
3852 3901
3853 3902 def minfunc(params):
3854 3903 return sum((y-R_T_spc_fun(t_range,params[0],params[1],params[2],params[3],nFFTPoints,timeInterval))**2/1)#y**2)
3855 3904
3856 3905 # fit
3857 3906 popt_full = fmin(minfunc,[a,b,c,d], disp=False)
3858 3907 popt = popt_full
3859 3908
3860 3909 fun = R_T_spc_fun(t_range,popt[0],popt[1],popt[2],popt[3],nFFTPoints,timeInterval)
3861 3910
3862 3911 return fun, popt[0], popt[1], popt[2], popt[3]
3863 3912 # **********************************************************************************************
3864 3913 index = 0
3865 3914 fint = 0
3866 3915 buffer = 0
3867 3916 buffer2 = 0
3868 3917 buffer3 = 0
3869 3918
3870 3919 def run(self, dataOut, getSNR = True, path=None, file=None, groupList=None, filec=None,coh_th=None, hei_th=None,taver=None,proc=None,nhei=None,nprofs=None,ipp=None,channelList=None,Gaussian_Windowed=0):
3871 3920
3872 3921 if not numpy.any(proc):
3873 3922 nChannels = dataOut.nChannels
3874 3923 nHeights= dataOut.heightList.size
3875 3924 nProf = dataOut.nProfiles
3876 3925 if numpy.any(taver): taver=int(taver)
3877 3926 else : taver = 5
3878 3927 tini=time.localtime(dataOut.utctime)
3879 3928 if (tini.tm_min % taver) == 0 and (tini.tm_sec < 5 and self.fint==0):
3880 3929 self.index = 0
3881 3930 jspc = self.buffer
3882 3931 jcspc = self.buffer2
3883 3932 jnoise = self.buffer3
3884 3933 self.buffer = dataOut.data_spc
3885 3934 self.buffer2 = dataOut.data_cspc
3886 3935 self.buffer3 = dataOut.noise
3887 3936 self.fint = 1
3888 3937 if numpy.any(jspc) :
3889 3938 jspc= numpy.reshape(jspc,(int(len(jspc)/nChannels),nChannels,nProf,nHeights))
3890 3939 jcspc= numpy.reshape(jcspc,(int(len(jcspc)/int(nChannels/2)),int(nChannels/2),nProf,nHeights))
3891 3940 jnoise= numpy.reshape(jnoise,(int(len(jnoise)/nChannels),nChannels))
3892 3941 else:
3893 3942 dataOut.flagNoData = True
3894 3943 return dataOut
3895 3944 else:
3896 3945 if (tini.tm_min % taver) == 0 : self.fint = 1
3897 3946 else : self.fint = 0
3898 3947 self.index += 1
3899 3948 if numpy.any(self.buffer):
3900 3949 self.buffer = numpy.concatenate((self.buffer,dataOut.data_spc), axis=0)
3901 3950 self.buffer2 = numpy.concatenate((self.buffer2,dataOut.data_cspc), axis=0)
3902 3951 self.buffer3 = numpy.concatenate((self.buffer3,dataOut.noise), axis=0)
3903 3952 else:
3904 3953 self.buffer = dataOut.data_spc
3905 3954 self.buffer2 = dataOut.data_cspc
3906 3955 self.buffer3 = dataOut.noise
3907 3956 dataOut.flagNoData = True
3908 3957 return dataOut
3909 3958 if path != None:
3910 3959 sys.path.append(path)
3911 3960 self.library = importlib.import_module(file)
3912 3961 if filec != None:
3913 3962 self.weightf = importlib.import_module(filec)
3914 3963 #self.weightf = importlib.import_module('weightfit')
3915 3964
3916 3965 #To be inserted as a parameter
3917 3966 groupArray = numpy.array(groupList)
3918 3967 #groupArray = numpy.array([[0,1],[2,3]])
3919 3968 dataOut.groupList = groupArray
3920 3969
3921 3970 nGroups = groupArray.shape[0]
3922 3971 nChannels = dataOut.nChannels
3923 3972 nHeights = dataOut.heightList.size
3924 3973
3925 3974 #Parameters Array
3926 3975 dataOut.data_param = None
3927 3976 dataOut.data_paramC = None
3928 3977 dataOut.clean_num_aver = None
3929 3978 dataOut.coh_num_aver = None
3930 3979 dataOut.tmp_spectra_i = None
3931 3980 dataOut.tmp_cspectra_i = None
3932 3981 dataOut.tmp_spectra_c = None
3933 3982 dataOut.tmp_cspectra_c = None
3934 3983 dataOut.sat_spectra = None
3935 3984 dataOut.sat_cspectra = None
3936 3985 dataOut.index = None
3937 3986
3938 3987 #Set constants
3939 3988 constants = self.library.setConstants(dataOut)
3940 3989 dataOut.constants = constants
3941 3990 M = dataOut.normFactor
3942 3991 N = dataOut.nFFTPoints
3943 3992
3944 3993 ippSeconds = dataOut.ippSeconds
3945 3994 K = dataOut.nIncohInt
3946 3995 pairsArray = numpy.array(dataOut.pairsList)
3947 3996
3948 3997 snrth= 15
3949 3998 spectra = dataOut.data_spc
3950 3999 cspectra = dataOut.data_cspc
3951 4000 nProf = dataOut.nProfiles
3952 4001 heights = dataOut.heightList
3953 4002 nHei = len(heights)
3954 4003 channels = dataOut.channelList
3955 4004 nChan = len(channels)
3956 4005 nIncohInt = dataOut.nIncohInt
3957 4006 crosspairs = dataOut.groupList
3958 4007 noise = dataOut.noise
3959 4008 jnoise = jnoise/N
3960 4009 noise = numpy.nansum(jnoise,axis=0)#/len(jnoise)
3961 4010 power = numpy.sum(spectra, axis=1)
3962 4011 nPairs = len(crosspairs)
3963 4012 absc = dataOut.abscissaList[:-1]
3964 4013 print('para escribir h5 ',dataOut.paramInterval)
3965 4014 if not self.isConfig:
3966 4015 self.isConfig = True
3967 4016
3968 4017 index = tini.tm_hour*12+tini.tm_min/taver
3969 4018 dataOut.index= index
3970 4019 jspc = jspc/N/N
3971 4020 jcspc = jcspc/N/N
3972 4021 tmp_spectra,tmp_cspectra,sat_spectra,sat_cspectra = self.CleanRayleigh(dataOut,jspc,jcspc,2)
3973 4022 jspectra = tmp_spectra*len(jspc[:,0,0,0])
3974 4023 jcspectra = tmp_cspectra*len(jspc[:,0,0,0])
3975 4024 my_incoh_spectra ,my_incoh_cspectra,my_incoh_aver,my_coh_aver, incoh_spectra, coh_spectra, incoh_cspectra, coh_cspectra, incoh_aver, coh_aver = self.__DiffCoherent(jspectra, jcspectra, dataOut, noise, snrth,coh_th, hei_th)
3976 4025
3977 4026 clean_coh_spectra, clean_coh_cspectra, clean_coh_aver = self.__CleanCoherent(snrth, coh_spectra, coh_cspectra, coh_aver, dataOut, noise,1,index)
3978 4027 dataOut.data_spc = incoh_spectra
3979 4028 dataOut.data_cspc = incoh_cspectra
3980 4029 dataOut.sat_spectra = sat_spectra
3981 4030 dataOut.sat_cspectra = sat_cspectra
3982 4031 # dataOut.data_spc = tmp_spectra
3983 4032 # dataOut.data_cspc = tmp_cspectra
3984 4033
3985 4034 clean_num_aver = incoh_aver*len(jspc[:,0,0,0])
3986 4035 coh_num_aver = clean_coh_aver*len(jspc[:,0,0,0])
3987 4036 # clean_num_aver = (numpy.zeros([nChan, nHei])+1)*len(jspc[:,0,0,0])
3988 4037 # coh_num_aver = numpy.zeros([nChan, nHei])*0*len(jspc[:,0,0,0])
3989 4038 dataOut.clean_num_aver = clean_num_aver
3990 4039 dataOut.coh_num_aver = coh_num_aver
3991 4040 dataOut.tmp_spectra_i = incoh_spectra
3992 4041 dataOut.tmp_cspectra_i = incoh_cspectra
3993 4042 dataOut.tmp_spectra_c = clean_coh_spectra
3994 4043 dataOut.tmp_cspectra_c = clean_coh_cspectra
3995 4044 #List of possible combinations
3996 4045 listComb = itertools.combinations(numpy.arange(groupArray.shape[1]),2)
3997 4046 indCross = numpy.zeros(len(list(listComb)), dtype = 'int')
3998 4047 if Gaussian_Windowed == 1:
3999 4048 #dataOut.data_spc = jspectra
4000 4049 '''
4001 4050 Written by R. Flores
4002 4051 '''
4003 4052 print("normFactor: ", dataOut.normFactor)
4004 4053 data_spc_aux = numpy.copy(dataOut.data_spc)#[:,0,:]
4005 4054 data_spc_aux[:,0,:] = (data_spc_aux[:,1,:]+data_spc_aux[:,-1,:])/2
4006 4055 #'''
4007 4056 from scipy.signal import medfilt
4008 4057 import matplotlib.pyplot as plt
4009 4058 dataOut.moments = numpy.ones((dataOut.nChannels,4,dataOut.nHeights))*numpy.NAN
4010 4059 dataOut.VelRange = dataOut.getVelRange(0)
4011 4060 for nChannel in range(dataOut.nChannels):
4012 4061 for hei in range(dataOut.heightList.shape[0]):
4013 4062 #print("ipp: ", dataOut.ippSeconds)
4014 4063 #spc = numpy.copy(dataOut.data_spc[nChannel,:,hei])
4015 4064 spc = data_spc_aux[nChannel,:,hei]
4016 4065 if spc.all() == 0.:
4017 4066 print("CONTINUE")
4018 4067 continue
4019 4068 #print(VelRange)
4020 4069 #print(dataOut.getFreqRange(64))
4021 4070 #print("Hei: ", dataOut.heightList[hei])
4022 4071
4023 4072 spc_mod = numpy.copy(spc)
4024 4073 spcm = medfilt(spc_mod,11)
4025 4074 spc_max = numpy.max(spcm)
4026 4075 dop1_x0 = dataOut.VelRange[numpy.argmax(spcm)]
4027 4076 #D = numpy.min(spcm)
4028 4077 D_in = (numpy.mean(spcm[:15])+numpy.mean(spcm[-15:]))/2.
4029 4078 #print("spc_max: ", spc_max)
4030 4079 #print("dataOut.ippSeconds: ", dataOut.ippSeconds, dataOut.timeInterval)
4031 4080 ##fun, A, B, C, D = self.windowing_single(spc,dataOut.VelRange,spc_max,dop1_x0,abs(dop1_x0),D,dataOut.nFFTPoints)
4032 4081 #fun, A, B, C, D = self.windowing_single(spc,dataOut.VelRange,spc_max,dop1_x0,abs(dop1_x0),D,dataOut.nFFTPoints)
4033 4082 fun, A, B, C, D = self.windowing_single_direct(spc_mod,dataOut.VelRange,spc_max,dop1_x0,abs(dop1_x0/5),D_in,dataOut.nFFTPoints,dataOut.timeInterval)
4034 4083
4035 4084 dataOut.moments[nChannel,0,hei] = A
4036 4085 dataOut.moments[nChannel,1,hei] = B
4037 4086 dataOut.moments[nChannel,2,hei] = C
4038 4087 dataOut.moments[nChannel,3,hei] = D
4039 4088 '''
4040 4089 if nChannel == 0:
4041 4090 print(dataOut.heightList[hei])
4042 4091 plt.figure()
4043 4092 plt.plot(dataOut.VelRange,spc,marker='*',linestyle='--')
4044 4093 plt.plot(dataOut.VelRange,fun)
4045 4094 plt.title(dataOut.heightList[hei])
4046 4095 plt.show()
4047 4096 '''
4048 4097 #plt.show()
4049 4098 #'''
4050 4099 dataOut.data_spc = jspectra
4051 4100 print("SUCCESS")
4052 4101 return dataOut
4053 4102
4054 4103 elif Gaussian_Windowed == 2: #Only to clean spc
4055 4104 dataOut.VelRange = dataOut.getVelRange(0)
4056 4105 return dataOut
4057 4106
4058 4107 if getSNR:
4059 4108 listChannels = groupArray.reshape((groupArray.size))
4060 4109 listChannels.sort()
4110 # norm Este factor debe estar implementado para ploteo o grabado como metadata
4111 # norm = dataOut.nProfiles * dataOut.nIncohInt * dataOut.nCohInt * dataOut.windowOfFilter
4061 4112 dataOut.data_snr = self.__getSNR(dataOut.data_spc[listChannels,:,:], noise[listChannels])
4062 4113 else:
4063 4114 if numpy.any(taver): taver=int(taver)
4064 4115 else : taver = 5
4065 4116 tini=time.localtime(dataOut.utctime)
4066 4117 index = tini.tm_hour*12+tini.tm_min/taver
4067 4118 clean_num_aver = dataOut.clean_num_aver
4068 4119 coh_num_aver = dataOut.coh_num_aver
4069 4120 dataOut.data_spc = dataOut.tmp_spectra_i
4070 4121 dataOut.data_cspc = dataOut.tmp_cspectra_i
4071 4122 clean_coh_spectra = dataOut.tmp_spectra_c
4072 4123 clean_coh_cspectra = dataOut.tmp_cspectra_c
4073 4124 jspectra = dataOut.data_spc+clean_coh_spectra
4074 4125 nHeights = len(dataOut.heightList) # nhei
4075 4126 nProf = int(dataOut.nProfiles)
4076 4127 dataOut.nProfiles = nProf
4077 4128 dataOut.data_param = None
4078 4129 dataOut.data_paramC = None
4079 4130 dataOut.code = numpy.array([[-1.,-1.,1.],[1.,1.,-1.]])
4080 4131 #dataOut.paramInterval = 2.0
4081 4132 #M=600
4082 4133 #N=200
4083 4134 dataOut.flagDecodeData=True
4084 4135 M = int(dataOut.normFactor)
4085 4136 N = int(dataOut.nFFTPoints)
4086 4137 dataOut.nFFTPoints = N
4087 4138 dataOut.nIncohInt= int(dataOut.nIncohInt)
4088 4139 dataOut.nProfiles = int(dataOut.nProfiles)
4089 4140 dataOut.nCohInt = int(dataOut.nCohInt)
4090 4141 print('sale',dataOut.nProfiles,dataOut.nHeights)
4091 4142 #dataOut.nFFTPoints=nprofs
4092 4143 #dataOut.normFactor = nprofs
4093 4144 dataOut.channelList = channelList
4094 4145 nChan = len(channelList)
4095 4146 #dataOut.ippFactor=1
4096 4147 #ipp = ipp/150*1.e-3
4097 4148 vmax = (300000000/49920000.0/2) / (dataOut.ippSeconds)
4098 4149 #dataOut.ippSeconds=ipp
4099 4150 absc = vmax*( numpy.arange(nProf,dtype='float')-nProf/2.)/nProf
4100 4151 print('sale 2',dataOut.ippSeconds,M,N)
4101 4152 print('Empieza procesamiento offline')
4102 4153 if path != None:
4103 4154 sys.path.append(path)
4104 4155 self.library = importlib.import_module(file)
4105 4156 constants = self.library.setConstants(dataOut)
4106 4157 constants['M'] = M
4107 4158 dataOut.constants = constants
4108 4159 if filec != None:
4109 4160 self.weightf = importlib.import_module(filec)
4110 4161
4111 4162 groupArray = numpy.array(groupList)
4112 4163 dataOut.groupList = groupArray
4113 4164 nGroups = groupArray.shape[0]
4114 4165 #List of possible combinations
4115 4166 listComb = itertools.combinations(numpy.arange(groupArray.shape[1]),2)
4116 4167 indCross = numpy.zeros(len(list(listComb)), dtype = 'int')
4117 4168 if dataOut.data_paramC is None:
4118 4169 dataOut.data_paramC = numpy.zeros((nGroups*4, nHeights,2))*numpy.nan
4119 4170 dataOut.data_snr1_i = numpy.zeros((nGroups*2, nHeights))*numpy.nan
4120 4171 # dataOut.smooth_i = numpy.zeros((nGroups*2, nHeights))*numpy.nan
4121 4172
4122 4173 for i in range(nGroups):
4123 4174 coord = groupArray[i,:]
4124 4175 #Input data array
4125 4176 data = dataOut.data_spc[coord,:,:]/(M*N)
4126 4177 data = data.reshape((data.shape[0]*data.shape[1],data.shape[2]))
4127 4178
4128 4179 #Cross Spectra data array for Covariance Matrixes
4129 4180 ind = 0
4130 4181 for pairs in listComb:
4131 4182 pairsSel = numpy.array([coord[x],coord[y]])
4132 4183 indCross[ind] = int(numpy.where(numpy.all(pairsArray == pairsSel, axis = 1))[0][0])
4133 4184 ind += 1
4134 4185 dataCross = dataOut.data_cspc[indCross,:,:]/(M*N)
4135 4186 dataCross = dataCross**2
4136 4187 nhei = nHeights
4137 4188 poweri = numpy.sum(dataOut.data_spc[:,1:nProf-0,:],axis=1)/clean_num_aver[:,:]
4138 4189 if i == 0 : my_noises = numpy.zeros(4,dtype=float)
4139 4190 n0i = numpy.nanmin(poweri[0+i*2,0:nhei-0])/(nProf-1)
4140 4191 n1i = numpy.nanmin(poweri[1+i*2,0:nhei-0])/(nProf-1)
4141 4192 n0 = n0i
4142 4193 n1= n1i
4143 4194 my_noises[2*i+0] = n0
4144 4195 my_noises[2*i+1] = n1
4145 4196 snrth = -13 #-13.0 # -4 -16 -25
4146 4197 snrth = 10**(snrth/10.0)
4147 4198 jvelr = numpy.zeros(nHeights, dtype = 'float')
4148 4199 #snr0 = numpy.zeros(nHeights, dtype = 'float')
4149 4200 #snr1 = numpy.zeros(nHeights, dtype = 'float')
4150 4201 hvalid = [0]
4151 4202
4152 4203 coh2 = abs(dataOut.data_cspc[i,1:nProf,:])**2/(dataOut.data_spc[0+i*2,1:nProf-0,:]*dataOut.data_spc[1+i*2,1:nProf-0,:])
4153 4204
4154 4205 for h in range(nHeights):
4155 4206 smooth = clean_num_aver[i+1,h]
4156 4207 signalpn0 = (dataOut.data_spc[i*2,1:(nProf-0),h])/smooth
4157 4208 signalpn1 = (dataOut.data_spc[i*2+1,1:(nProf-0),h])/smooth
4158 4209 signal0 = signalpn0-n0
4159 4210 signal1 = signalpn1-n1
4160 4211 snr0 = numpy.sum(signal0/n0)/(nProf-1)
4161 4212 snr1 = numpy.sum(signal1/n1)/(nProf-1)
4162 4213 #jmax0 = MAX(signal0,maxp0)
4163 4214 #jmax1 = MAX(signal1,maxp1)
4164 4215 gamma = coh2[:,h]
4165 4216
4166 4217 indxs = (numpy.isfinite(list(gamma))==True).nonzero()
4167 4218
4168 4219 if len(indxs) >0:
4169 4220 if numpy.nanmean(gamma) > 0.07:
4170 4221 maxp0 = numpy.argmax(signal0*gamma)
4171 4222 maxp1 = numpy.argmax(signal1*gamma)
4172 4223 #print('usa gamma',numpy.nanmean(gamma))
4173 4224 else:
4174 4225 maxp0 = numpy.argmax(signal0)
4175 4226 maxp1 = numpy.argmax(signal1)
4176 4227 jvelr[h] = (absc[maxp0]+absc[maxp1])/2.
4177 4228 else: jvelr[h] = absc[0]
4178 4229 if snr0 > 0.1 and snr1 > 0.1: hvalid = numpy.concatenate((hvalid,h), axis=None)
4179 4230 #print(maxp0,absc[maxp0],snr0,jvelr[h])
4180 4231
4181 4232 if len(hvalid)> 1: fd0 = numpy.median(jvelr[hvalid[1:]])*-1
4182 4233 else: fd0 = numpy.nan
4183 4234 print(fd0)
4184 4235 for h in range(nHeights):
4185 4236 d = data[:,h]
4186 4237 smooth = clean_num_aver[i+1,h] #dataOut.data_spc[:,1:nProf-0,:]
4187 4238 signalpn0 = (dataOut.data_spc[i*2,1:(nProf-0),h])/smooth
4188 4239 signalpn1 = (dataOut.data_spc[i*2+1,1:(nProf-0),h])/smooth
4189 4240 signal0 = signalpn0-n0
4190 4241 signal1 = signalpn1-n1
4191 4242 snr0 = numpy.sum(signal0/n0)/(nProf-1)
4192 4243 snr1 = numpy.sum(signal1/n1)/(nProf-1)
4193 4244
4194 4245 if snr0 > snrth and snr1 > snrth and clean_num_aver[i+1,h] > 0 :
4195 4246 #Covariance Matrix
4196 4247 D = numpy.diag(d**2)
4197 4248 ind = 0
4198 4249 for pairs in listComb:
4199 4250 #Coordinates in Covariance Matrix
4200 4251 x = pairs[0]
4201 4252 y = pairs[1]
4202 4253 #Channel Index
4203 4254 S12 = dataCross[ind,:,h]
4204 4255 D12 = numpy.diag(S12)
4205 4256 #Completing Covariance Matrix with Cross Spectras
4206 4257 D[x*N:(x+1)*N,y*N:(y+1)*N] = D12
4207 4258 D[y*N:(y+1)*N,x*N:(x+1)*N] = D12
4208 4259 ind += 1
4209 4260 diagD = numpy.zeros(256)
4210 4261
4211 4262 try:
4212 4263 Dinv=numpy.linalg.inv(D)
4213 4264 L=numpy.linalg.cholesky(Dinv)
4214 4265 except:
4215 4266 Dinv = D*numpy.nan
4216 4267 L= D*numpy.nan
4217 4268 LT=L.T
4218 4269
4219 4270 dp = numpy.dot(LT,d)
4220 4271 #Initial values
4221 4272 data_spc = dataOut.data_spc[coord,:,h]
4222 4273 w = data_spc/data_spc
4223 4274 if filec != None:
4224 4275 w = self.weightf.weightfit(w,tini.tm_year,tini.tm_yday,index,h,i)
4225 4276 if (h>6) and (error1[3]<25):
4226 4277 p0 = dataOut.data_param[i,:,h-1].copy()
4227 4278 else:
4228 4279 p0 = numpy.array(self.library.initialValuesFunction(data_spc*w, constants))# sin el i(data_spc, constants, i)
4229 4280 p0[3] = fd0
4230 4281 if filec != None:
4231 4282 p0 = self.weightf.Vrfit(p0,tini.tm_year,tini.tm_yday,index,h,i)
4232 4283
4233 4284 try:
4234 4285 #Least Squares
4235 4286 minp,covp,infodict,mesg,ier = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants),full_output=True)
4236 4287 #minp,covp = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants))
4237 4288 #Chi square error
4238 4289 error0 = numpy.sum(infodict['fvec']**2)/(2*N)
4239 4290 #Error with Jacobian
4240 4291 error1 = self.library.errorFunction(minp,constants,LT)
4241 4292
4242 4293 except:
4243 4294 minp = p0*numpy.nan
4244 4295 error0 = numpy.nan
4245 4296 error1 = p0*numpy.nan
4246 4297 else :
4247 4298 data_spc = dataOut.data_spc[coord,:,h]
4248 4299 p0 = numpy.array(self.library.initialValuesFunction(data_spc, constants))
4249 4300 minp = p0*numpy.nan
4250 4301 error0 = numpy.nan
4251 4302 error1 = p0*numpy.nan
4252 4303 if dataOut.data_param is None:
4253 4304 dataOut.data_param = numpy.zeros((nGroups, p0.size, nHeights))*numpy.nan
4254 4305 dataOut.data_error = numpy.zeros((nGroups, p0.size + 1, nHeights))*numpy.nan
4255 4306
4256 4307 dataOut.data_error[i,:,h] = numpy.hstack((error0,error1))
4257 4308 dataOut.data_param[i,:,h] = minp
4258 4309 dataOut.data_snr1_i[i*2,h] = numpy.sum(signalpn0/(nProf-1))/n0
4259 4310 dataOut.data_snr1_i[i*2+1,h] = numpy.sum(signalpn1/(nProf-1))/n1
4260 4311
4261 4312 for ht in range(nHeights-1) :
4262 4313 smooth = coh_num_aver[i+1,ht] #datc[0,ht,0,beam]
4263 4314 dataOut.data_paramC[4*i,ht,1] = smooth
4264 4315 signalpn0 = (clean_coh_spectra[i*2 ,1:(nProf-0),ht])/smooth #coh_spectra
4265 4316 signalpn1 = (clean_coh_spectra[i*2+1,1:(nProf-0),ht])/smooth
4266 4317
4267 4318 val0 = (signalpn0 > 0).nonzero()
4268 4319 val0 = val0[0]
4269 4320
4270 4321 if len(val0) == 0 : val0_npoints = nProf
4271 4322 else : val0_npoints = len(val0)
4272 4323
4273 4324 val1 = (signalpn1 > 0).nonzero()
4274 4325 val1 = val1[0]
4275 4326 if len(val1) == 0 : val1_npoints = nProf
4276 4327 else : val1_npoints = len(val1)
4277 4328
4278 4329 dataOut.data_paramC[0+4*i,ht,0] = numpy.sum((signalpn0/val0_npoints))/n0
4279 4330 dataOut.data_paramC[1+4*i,ht,0] = numpy.sum((signalpn1/val1_npoints))/n1
4280 4331
4281 4332 signal0 = (signalpn0-n0)
4282 4333 vali = (signal0 < 0).nonzero()
4283 4334 vali = vali[0]
4284 4335 if len(vali) > 0 : signal0[vali] = 0
4285 4336 signal1 = (signalpn1-n1)
4286 4337 vali = (signal1 < 0).nonzero()
4287 4338 vali = vali[0]
4288 4339 if len(vali) > 0 : signal1[vali] = 0
4289 4340 snr0 = numpy.sum(signal0/n0)/(nProf-1)
4290 4341 snr1 = numpy.sum(signal1/n1)/(nProf-1)
4291 4342 doppler = absc[1:]
4292 4343 if snr0 >= snrth and snr1 >= snrth and smooth :
4293 4344 signalpn0_n0 = signalpn0
4294 4345 signalpn0_n0[val0] = signalpn0[val0] - n0
4295 4346 mom0 = self.moments(doppler,signalpn0-n0,nProf)
4296 4347
4297 4348 signalpn1_n1 = signalpn1
4298 4349 signalpn1_n1[val1] = signalpn1[val1] - n1
4299 4350 mom1 = self.moments(doppler,signalpn1_n1,nProf)
4300 4351 dataOut.data_paramC[2+4*i,ht,0] = (mom0[0]+mom1[0])/2.
4301 4352 dataOut.data_paramC[3+4*i,ht,0] = (mom0[1]+mom1[1])/2.
4302 4353
4303 4354 dataOut.data_spc = jspectra
4304 4355 dataOut.spc_noise = my_noises*nProf*M
4305 4356
4306 4357 if numpy.any(proc): dataOut.spc_noise = my_noises*nProf*M
4307 4358 if 0:
4308 4359 listChannels = groupArray.reshape((groupArray.size))
4309 4360 listChannels.sort()
4361 # norm Este factor debe estar implementado para ploteo o grabado como metadata
4362 # norm = dataOut.nProfiles * dataOut.nIncohInt * dataOut.nCohInt * dataOut.windowOfFilter
4310 4363 dataOut.data_snr = self.__getSNR(dataOut.data_spc[listChannels,:,:], my_noises[listChannels])
4311 4364 #print(dataOut.data_snr1_i)
4312 4365 # Adding coherent echoes from possible satellites.
4313 4366 #sat_spectra = numpy.zeros((nChan,nProf,nHei), dtype=float)
4314 4367 #sat_spectra = sat_spectra[*,*,anal_header.channels]
4315 4368 isat_spectra = numpy.zeros([2,int(nChan/2),nProf,nhei], dtype=float)
4316 4369
4317 4370 sat_fits = numpy.zeros([4,nhei], dtype=float)
4318 4371 noises = my_noises/nProf
4319 4372 #nchan2 = int(nChan/2)
4320 4373 for beam in range(int(nChan/2)-0) :
4321 4374 n0 = noises[2*beam]
4322 4375 n1 = noises[2*beam+1]
4323 4376 isat_spectra[0:2,beam,:,:] = dataOut.sat_spectra[2*beam +0:2*beam+2 ,:,:]
4324 4377
4325 4378 for ht in range(nhei-1) :
4326 4379 signalpn0 = isat_spectra[0,beam,:,ht]
4327 4380 signalpn0 = numpy.reshape(signalpn0,nProf)
4328 4381 signalpn1 = isat_spectra[1,beam,:,ht]
4329 4382 signalpn1 = numpy.reshape(signalpn1,nProf)
4330 4383
4331 4384 cval0 = len((signalpn0 > 0).nonzero()[0])
4332 4385 if cval0 == 0 : val0_npoints = nProf
4333 4386 else: val0_npoints = cval0
4334 4387
4335 4388 cval1 = len((signalpn1 > 0).nonzero()[0])
4336 4389 if cval1 == 0 : val1_npoints = nProf
4337 4390 else: val1_npoints = cval1
4338 4391
4339 4392 sat_fits[0+2*beam,ht] = numpy.sum(signalpn0/(val0_npoints*nProf))/n0
4340 4393 sat_fits[1+2*beam,ht] = numpy.sum(signalpn1/(val1_npoints*nProf))/n1
4341 4394
4342 4395 dataOut.sat_fits = sat_fits
4343 4396 return dataOut
4344 4397
4345 4398 def __residFunction(self, p, dp, LT, constants):
4346 4399
4347 4400 fm = self.library.modelFunction(p, constants)
4348 4401 fmp=numpy.dot(LT,fm)
4349 4402 return dp-fmp
4350 4403
4351 def __getSNR(self, z, noise):
4404 def __getSNR(self, z, noise, norm=1):
4352 4405
4353 4406 avg = numpy.average(z, axis=1)
4354 4407 SNR = (avg.T-noise)/noise
4355 4408 SNR = SNR.T
4356 4409 return SNR
4357 4410
4358 4411 def __chisq(self, p, chindex, hindex):
4359 4412 #similar to Resid but calculates CHI**2
4360 4413 [LT,d,fm]=setupLTdfm(p,chindex,hindex)
4361 4414 dp=numpy.dot(LT,d)
4362 4415 fmp=numpy.dot(LT,fm)
4363 4416 chisq=numpy.dot((dp-fmp).T,(dp-fmp))
4364 4417 return chisq
4365 4418
4366 class WindProfiler_V0(Operation):
4419 class WindProfiler(Operation):
4367 4420
4368 4421 __isConfig = False
4369 4422
4370 4423 __initime = None
4371 4424 __lastdatatime = None
4372 4425 __integrationtime = None
4373 4426
4374 4427 __buffer = None
4375 4428
4376 4429 __dataReady = False
4377 4430
4378 4431 __firstdata = None
4379 4432
4380 4433 n = None
4381 4434
4382 4435 def __init__(self):
4383 4436 Operation.__init__(self)
4384 4437
4385 4438 def __calculateCosDir(self, elev, azim):
4386 4439 zen = (90 - elev)*numpy.pi/180
4387 4440 azim = azim*numpy.pi/180
4388 4441 cosDirX = numpy.sqrt((1-numpy.cos(zen)**2)/((1+numpy.tan(azim)**2)))
4389 4442 cosDirY = numpy.sqrt(1-numpy.cos(zen)**2-cosDirX**2)
4390 4443
4391 4444 signX = numpy.sign(numpy.cos(azim))
4392 4445 signY = numpy.sign(numpy.sin(azim))
4393 4446
4394 4447 cosDirX = numpy.copysign(cosDirX, signX)
4395 4448 cosDirY = numpy.copysign(cosDirY, signY)
4396 4449 return cosDirX, cosDirY
4397 4450
4398 4451 def __calculateAngles(self, theta_x, theta_y, azimuth):
4399 4452
4400 4453 dir_cosw = numpy.sqrt(1-theta_x**2-theta_y**2)
4401 4454 zenith_arr = numpy.arccos(dir_cosw)
4402 4455 azimuth_arr = numpy.arctan2(theta_x,theta_y) + azimuth*math.pi/180
4403 4456
4404 4457 dir_cosu = numpy.sin(azimuth_arr)*numpy.sin(zenith_arr)
4405 4458 dir_cosv = numpy.cos(azimuth_arr)*numpy.sin(zenith_arr)
4406 4459
4407 4460 return azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw
4408 4461
4409 4462 def __calculateMatA(self, dir_cosu, dir_cosv, dir_cosw, horOnly):
4410 4463
4411 4464 if horOnly:
4412 4465 A = numpy.c_[dir_cosu,dir_cosv]
4413 4466 else:
4414 4467 A = numpy.c_[dir_cosu,dir_cosv,dir_cosw]
4415 4468 A = numpy.asmatrix(A)
4416 4469 A1 = numpy.linalg.inv(A.transpose()*A)*A.transpose()
4417 4470
4418 4471 return A1
4419 4472
4420 4473 def __correctValues(self, heiRang, phi, velRadial, SNR):
4421 4474 listPhi = phi.tolist()
4422 4475 maxid = listPhi.index(max(listPhi))
4423 4476 minid = listPhi.index(min(listPhi))
4424 4477
4425 4478 rango = list(range(len(phi)))
4426 # rango = numpy.delete(rango,maxid)
4427 4479
4428 4480 heiRang1 = heiRang*math.cos(phi[maxid])
4429 4481 heiRangAux = heiRang*math.cos(phi[minid])
4430 4482 indOut = (heiRang1 < heiRangAux[0]).nonzero()
4431 4483 heiRang1 = numpy.delete(heiRang1,indOut)
4432 4484
4433 4485 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
4434 4486 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
4435 4487
4436 4488 for i in rango:
4437 4489 x = heiRang*math.cos(phi[i])
4438 4490 y1 = velRadial[i,:]
4439 4491 f1 = interpolate.interp1d(x,y1,kind = 'cubic')
4440 4492
4441 4493 x1 = heiRang1
4442 4494 y11 = f1(x1)
4443 4495
4444 4496 y2 = SNR[i,:]
4445 4497 f2 = interpolate.interp1d(x,y2,kind = 'cubic')
4446 4498 y21 = f2(x1)
4447 4499
4448 4500 velRadial1[i,:] = y11
4449 4501 SNR1[i,:] = y21
4450 4502
4451 4503 return heiRang1, velRadial1, SNR1
4452 4504
4453 4505 def __calculateVelUVW(self, A, velRadial):
4454 4506
4455 4507 #Operacion Matricial
4456 # velUVW = numpy.zeros((velRadial.shape[1],3))
4457 # for ind in range(velRadial.shape[1]):
4458 # velUVW[ind,:] = numpy.dot(A,velRadial[:,ind])
4459 # velUVW = velUVW.transpose()
4460 4508 velUVW = numpy.zeros((A.shape[0],velRadial.shape[1]))
4461 4509 velUVW[:,:] = numpy.dot(A,velRadial)
4462 4510
4463 4511
4464 4512 return velUVW
4465 4513
4466 # def techniqueDBS(self, velRadial0, dirCosx, disrCosy, azimuth, correct, horizontalOnly, heiRang, SNR0):
4467
4468 4514 def techniqueDBS(self, kwargs):
4469 4515 """
4470 4516 Function that implements Doppler Beam Swinging (DBS) technique.
4471 4517
4472 4518 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
4473 4519 Direction correction (if necessary), Ranges and SNR
4474 4520
4475 4521 Output: Winds estimation (Zonal, Meridional and Vertical)
4476 4522
4477 4523 Parameters affected: Winds, height range, SNR
4478 4524 """
4479 4525 velRadial0 = kwargs['velRadial']
4480 4526 heiRang = kwargs['heightList']
4481 4527 SNR0 = kwargs['SNR']
4482 4528
4483 4529 if 'dirCosx' in kwargs and 'dirCosy' in kwargs:
4484 4530 theta_x = numpy.array(kwargs['dirCosx'])
4485 4531 theta_y = numpy.array(kwargs['dirCosy'])
4486 4532 else:
4487 4533 elev = numpy.array(kwargs['elevation'])
4488 4534 azim = numpy.array(kwargs['azimuth'])
4489 4535 theta_x, theta_y = self.__calculateCosDir(elev, azim)
4490 4536 azimuth = kwargs['correctAzimuth']
4491 4537 if 'horizontalOnly' in kwargs:
4492 4538 horizontalOnly = kwargs['horizontalOnly']
4493 4539 else: horizontalOnly = False
4494 4540 if 'correctFactor' in kwargs:
4495 4541 correctFactor = kwargs['correctFactor']
4496 4542 else: correctFactor = 1
4497 4543 if 'channelList' in kwargs:
4498 4544 channelList = kwargs['channelList']
4499 4545 if len(channelList) == 2:
4500 4546 horizontalOnly = True
4501 4547 arrayChannel = numpy.array(channelList)
4502 4548 param = param[arrayChannel,:,:]
4503 4549 theta_x = theta_x[arrayChannel]
4504 4550 theta_y = theta_y[arrayChannel]
4505 4551
4506 4552 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
4507 4553 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, zenith_arr, correctFactor*velRadial0, SNR0)
4508 4554 A = self.__calculateMatA(dir_cosu, dir_cosv, dir_cosw, horizontalOnly)
4509 4555
4510 4556 #Calculo de Componentes de la velocidad con DBS
4511 4557 winds = self.__calculateVelUVW(A,velRadial1)
4512 4558
4513 4559 return winds, heiRang1, SNR1
4514 4560
4515 4561 def __calculateDistance(self, posx, posy, pairs_ccf, azimuth = None):
4516 4562
4517 4563 nPairs = len(pairs_ccf)
4518 4564 posx = numpy.asarray(posx)
4519 4565 posy = numpy.asarray(posy)
4520 4566
4521 4567 #Rotacion Inversa para alinear con el azimuth
4522 4568 if azimuth!= None:
4523 4569 azimuth = azimuth*math.pi/180
4524 4570 posx1 = posx*math.cos(azimuth) + posy*math.sin(azimuth)
4525 4571 posy1 = -posx*math.sin(azimuth) + posy*math.cos(azimuth)
4526 4572 else:
4527 4573 posx1 = posx
4528 4574 posy1 = posy
4529 4575
4530 4576 #Calculo de Distancias
4531 4577 distx = numpy.zeros(nPairs)
4532 4578 disty = numpy.zeros(nPairs)
4533 4579 dist = numpy.zeros(nPairs)
4534 4580 ang = numpy.zeros(nPairs)
4535 4581
4536 4582 for i in range(nPairs):
4537 4583 distx[i] = posx1[pairs_ccf[i][1]] - posx1[pairs_ccf[i][0]]
4538 4584 disty[i] = posy1[pairs_ccf[i][1]] - posy1[pairs_ccf[i][0]]
4539 4585 dist[i] = numpy.sqrt(distx[i]**2 + disty[i]**2)
4540 4586 ang[i] = numpy.arctan2(disty[i],distx[i])
4541 4587
4542 4588 return distx, disty, dist, ang
4543 4589 #Calculo de Matrices
4544 # nPairs = len(pairs)
4545 # ang1 = numpy.zeros((nPairs, 2, 1))
4546 # dist1 = numpy.zeros((nPairs, 2, 1))
4547 #
4548 # for j in range(nPairs):
4549 # dist1[j,0,0] = dist[pairs[j][0]]
4550 # dist1[j,1,0] = dist[pairs[j][1]]
4551 # ang1[j,0,0] = ang[pairs[j][0]]
4552 # ang1[j,1,0] = ang[pairs[j][1]]
4553 #
4554 # return distx,disty, dist1,ang1
4555 4590
4556 4591
4557 4592 def __calculateVelVer(self, phase, lagTRange, _lambda):
4558 4593
4559 4594 Ts = lagTRange[1] - lagTRange[0]
4560 4595 velW = -_lambda*phase/(4*math.pi*Ts)
4561 4596
4562 4597 return velW
4563 4598
4564 4599 def __calculateVelHorDir(self, dist, tau1, tau2, ang):
4565 4600 nPairs = tau1.shape[0]
4566 4601 nHeights = tau1.shape[1]
4567 4602 vel = numpy.zeros((nPairs,3,nHeights))
4568 4603 dist1 = numpy.reshape(dist, (dist.size,1))
4569 4604
4570 4605 angCos = numpy.cos(ang)
4571 4606 angSin = numpy.sin(ang)
4572 4607
4573 4608 vel0 = dist1*tau1/(2*tau2**2)
4574 4609 vel[:,0,:] = (vel0*angCos).sum(axis = 1)
4575 4610 vel[:,1,:] = (vel0*angSin).sum(axis = 1)
4576 4611
4577 4612 ind = numpy.where(numpy.isinf(vel))
4578 4613 vel[ind] = numpy.nan
4579 4614
4580 4615 return vel
4581 4616
4582 # def __getPairsAutoCorr(self, pairsList, nChannels):
4583 #
4584 # pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan
4585 #
4586 # for l in range(len(pairsList)):
4587 # firstChannel = pairsList[l][0]
4588 # secondChannel = pairsList[l][1]
4589 #
4590 # #Obteniendo pares de Autocorrelacion
4591 # if firstChannel == secondChannel:
4592 # pairsAutoCorr[firstChannel] = int(l)
4593 #
4594 # pairsAutoCorr = pairsAutoCorr.astype(int)
4595 #
4596 # pairsCrossCorr = range(len(pairsList))
4597 # pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr)
4598 #
4599 # return pairsAutoCorr, pairsCrossCorr
4600
4601 # def techniqueSA(self, pairsSelected, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, lagTRange, correctFactor):
4602 4617 def techniqueSA(self, kwargs):
4603 4618
4604 4619 """
4605 4620 Function that implements Spaced Antenna (SA) technique.
4606 4621
4607 4622 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
4608 4623 Direction correction (if necessary), Ranges and SNR
4609 4624
4610 4625 Output: Winds estimation (Zonal, Meridional and Vertical)
4611 4626
4612 4627 Parameters affected: Winds
4613 4628 """
4614 4629 position_x = kwargs['positionX']
4615 4630 position_y = kwargs['positionY']
4616 4631 azimuth = kwargs['azimuth']
4617 4632
4618 4633 if 'correctFactor' in kwargs:
4619 4634 correctFactor = kwargs['correctFactor']
4620 4635 else:
4621 4636 correctFactor = 1
4622 4637
4623 4638 groupList = kwargs['groupList']
4624 4639 pairs_ccf = groupList[1]
4625 4640 tau = kwargs['tau']
4626 4641 _lambda = kwargs['_lambda']
4627 4642
4628 4643 #Cross Correlation pairs obtained
4629 # pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairssList, nChannels)
4630 # pairsArray = numpy.array(pairsList)[pairsCrossCorr]
4631 # pairsSelArray = numpy.array(pairsSelected)
4632 # pairs = []
4633 #
4634 # #Wind estimation pairs obtained
4635 # for i in range(pairsSelArray.shape[0]/2):
4636 # ind1 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i], axis = 1))[0][0]
4637 # ind2 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i + 1], axis = 1))[0][0]
4638 # pairs.append((ind1,ind2))
4639 4644
4640 4645 indtau = tau.shape[0]/2
4641 4646 tau1 = tau[:indtau,:]
4642 4647 tau2 = tau[indtau:-1,:]
4643 # tau1 = tau1[pairs,:]
4644 # tau2 = tau2[pairs,:]
4645 4648 phase1 = tau[-1,:]
4646 4649
4647 4650 #---------------------------------------------------------------------
4648 4651 #Metodo Directo
4649 4652 distx, disty, dist, ang = self.__calculateDistance(position_x, position_y, pairs_ccf,azimuth)
4650 4653 winds = self.__calculateVelHorDir(dist, tau1, tau2, ang)
4651 4654 winds = stats.nanmean(winds, axis=0)
4652 4655 #---------------------------------------------------------------------
4653 4656 #Metodo General
4654 # distx, disty, dist = self.calculateDistance(position_x,position_y,pairsCrossCorr, pairsList, azimuth)
4655 # #Calculo Coeficientes de Funcion de Correlacion
4656 # F,G,A,B,H = self.calculateCoef(tau1,tau2,distx,disty,n)
4657 # #Calculo de Velocidades
4658 # winds = self.calculateVelUV(F,G,A,B,H)
4659 4657
4660 4658 #---------------------------------------------------------------------
4661 4659 winds[2,:] = self.__calculateVelVer(phase1, lagTRange, _lambda)
4662 4660 winds = correctFactor*winds
4663 4661 return winds
4664 4662
4665 4663 def __checkTime(self, currentTime, paramInterval, outputInterval):
4666 4664
4667 4665 dataTime = currentTime + paramInterval
4668 4666 deltaTime = dataTime - self.__initime
4669 4667
4670 4668 if deltaTime >= outputInterval or deltaTime < 0:
4671 4669 self.__dataReady = True
4672 4670 return
4673 4671
4674 4672 def techniqueMeteors(self, arrayMeteor, meteorThresh, heightMin, heightMax):
4675 4673 '''
4676 4674 Function that implements winds estimation technique with detected meteors.
4677 4675
4678 4676 Input: Detected meteors, Minimum meteor quantity to wind estimation
4679 4677
4680 4678 Output: Winds estimation (Zonal and Meridional)
4681 4679
4682 4680 Parameters affected: Winds
4683 4681 '''
4684 4682 #Settings
4685 4683 nInt = (heightMax - heightMin)/2
4686 4684 nInt = int(nInt)
4687 4685 winds = numpy.zeros((2,nInt))*numpy.nan
4688 4686
4689 4687 #Filter errors
4690 4688 error = numpy.where(arrayMeteor[:,-1] == 0)[0]
4691 4689 finalMeteor = arrayMeteor[error,:]
4692 4690
4693 4691 #Meteor Histogram
4694 4692 finalHeights = finalMeteor[:,2]
4695 4693 hist = numpy.histogram(finalHeights, bins = nInt, range = (heightMin,heightMax))
4696 4694 nMeteorsPerI = hist[0]
4697 4695 heightPerI = hist[1]
4698 4696
4699 4697 #Sort of meteors
4700 4698 indSort = finalHeights.argsort()
4701 4699 finalMeteor2 = finalMeteor[indSort,:]
4702 4700
4703 4701 # Calculating winds
4704 4702 ind1 = 0
4705 4703 ind2 = 0
4706 4704
4707 4705 for i in range(nInt):
4708 4706 nMet = nMeteorsPerI[i]
4709 4707 ind1 = ind2
4710 4708 ind2 = ind1 + nMet
4711 4709
4712 4710 meteorAux = finalMeteor2[ind1:ind2,:]
4713 4711
4714 4712 if meteorAux.shape[0] >= meteorThresh:
4715 4713 vel = meteorAux[:, 6]
4716 4714 zen = meteorAux[:, 4]*numpy.pi/180
4717 4715 azim = meteorAux[:, 3]*numpy.pi/180
4718 4716
4719 4717 n = numpy.cos(zen)
4720 4718 # m = (1 - n**2)/(1 - numpy.tan(azim)**2)
4721 4719 # l = m*numpy.tan(azim)
4722 4720 l = numpy.sin(zen)*numpy.sin(azim)
4723 4721 m = numpy.sin(zen)*numpy.cos(azim)
4724 4722
4725 4723 A = numpy.vstack((l, m)).transpose()
4726 4724 A1 = numpy.dot(numpy.linalg.inv( numpy.dot(A.transpose(),A) ),A.transpose())
4727 4725 windsAux = numpy.dot(A1, vel)
4728 4726
4729 4727 winds[0,i] = windsAux[0]
4730 4728 winds[1,i] = windsAux[1]
4731 4729
4732 4730 return winds, heightPerI[:-1]
4733 4731
4734 4732 def techniqueNSM_SA(self, **kwargs):
4735 4733 metArray = kwargs['metArray']
4736 4734 heightList = kwargs['heightList']
4737 4735 timeList = kwargs['timeList']
4738 4736
4739 4737 rx_location = kwargs['rx_location']
4740 4738 groupList = kwargs['groupList']
4741 4739 azimuth = kwargs['azimuth']
4742 4740 dfactor = kwargs['dfactor']
4743 4741 k = kwargs['k']
4744 4742
4745 4743 azimuth1, dist = self.__calculateAzimuth1(rx_location, groupList, azimuth)
4746 4744 d = dist*dfactor
4747 4745 #Phase calculation
4748 4746 metArray1 = self.__getPhaseSlope(metArray, heightList, timeList)
4749 4747
4750 4748 metArray1[:,-2] = metArray1[:,-2]*metArray1[:,2]*1000/(k*d[metArray1[:,1].astype(int)]) #angles into velocities
4751 4749
4752 4750 velEst = numpy.zeros((heightList.size,2))*numpy.nan
4753 4751 azimuth1 = azimuth1*numpy.pi/180
4754 4752
4755 4753 for i in range(heightList.size):
4756 4754 h = heightList[i]
4757 4755 indH = numpy.where((metArray1[:,2] == h)&(numpy.abs(metArray1[:,-2]) < 100))[0]
4758 4756 metHeight = metArray1[indH,:]
4759 4757 if metHeight.shape[0] >= 2:
4760 4758 velAux = numpy.asmatrix(metHeight[:,-2]).T #Radial Velocities
4761 4759 iazim = metHeight[:,1].astype(int)
4762 4760 azimAux = numpy.asmatrix(azimuth1[iazim]).T #Azimuths
4763 4761 A = numpy.hstack((numpy.cos(azimAux),numpy.sin(azimAux)))
4764 4762 A = numpy.asmatrix(A)
4765 4763 A1 = numpy.linalg.pinv(A.transpose()*A)*A.transpose()
4766 4764 velHor = numpy.dot(A1,velAux)
4767 4765
4768 4766 velEst[i,:] = numpy.squeeze(velHor)
4769 4767 return velEst
4770 4768
4771 4769 def __getPhaseSlope(self, metArray, heightList, timeList):
4772 4770 meteorList = []
4773 4771 #utctime sec1 height SNR velRad ph0 ph1 ph2 coh0 coh1 coh2
4774 4772 #Putting back together the meteor matrix
4775 4773 utctime = metArray[:,0]
4776 4774 uniqueTime = numpy.unique(utctime)
4777 4775
4778 4776 phaseDerThresh = 0.5
4779 4777 ippSeconds = timeList[1] - timeList[0]
4780 4778 sec = numpy.where(timeList>1)[0][0]
4781 4779 nPairs = metArray.shape[1] - 6
4782 4780 nHeights = len(heightList)
4783 4781
4784 4782 for t in uniqueTime:
4785 4783 metArray1 = metArray[utctime==t,:]
4786 4784 # phaseDerThresh = numpy.pi/4 #reducir Phase thresh
4787 4785 tmet = metArray1[:,1].astype(int)
4788 4786 hmet = metArray1[:,2].astype(int)
4789 4787
4790 4788 metPhase = numpy.zeros((nPairs, heightList.size, timeList.size - 1))
4791 4789 metPhase[:,:] = numpy.nan
4792 4790 metPhase[:,hmet,tmet] = metArray1[:,6:].T
4793 4791
4794 4792 #Delete short trails
4795 4793 metBool = ~numpy.isnan(metPhase[0,:,:])
4796 4794 heightVect = numpy.sum(metBool, axis = 1)
4797 4795 metBool[heightVect<sec,:] = False
4798 4796 metPhase[:,heightVect<sec,:] = numpy.nan
4799 4797
4800 4798 #Derivative
4801 4799 metDer = numpy.abs(metPhase[:,:,1:] - metPhase[:,:,:-1])
4802 4800 phDerAux = numpy.dstack((numpy.full((nPairs,nHeights,1), False, dtype=bool),metDer > phaseDerThresh))
4803 4801 metPhase[phDerAux] = numpy.nan
4804 4802
4805 4803 #--------------------------METEOR DETECTION -----------------------------------------
4806 4804 indMet = numpy.where(numpy.any(metBool,axis=1))[0]
4807 4805
4808 4806 for p in numpy.arange(nPairs):
4809 4807 phase = metPhase[p,:,:]
4810 4808 phDer = metDer[p,:,:]
4811 4809
4812 4810 for h in indMet:
4813 4811 height = heightList[h]
4814 4812 phase1 = phase[h,:] #82
4815 4813 phDer1 = phDer[h,:]
4816 4814
4817 4815 phase1[~numpy.isnan(phase1)] = numpy.unwrap(phase1[~numpy.isnan(phase1)]) #Unwrap
4818 4816
4819 4817 indValid = numpy.where(~numpy.isnan(phase1))[0]
4820 4818 initMet = indValid[0]
4821 4819 endMet = 0
4822 4820
4823 4821 for i in range(len(indValid)-1):
4824 4822
4825 4823 #Time difference
4826 4824 inow = indValid[i]
4827 4825 inext = indValid[i+1]
4828 4826 idiff = inext - inow
4829 4827 #Phase difference
4830 4828 phDiff = numpy.abs(phase1[inext] - phase1[inow])
4831 4829
4832 4830 if idiff>sec or phDiff>numpy.pi/4 or inext==indValid[-1]: #End of Meteor
4833 4831 sizeTrail = inow - initMet + 1
4834 4832 if sizeTrail>3*sec: #Too short meteors
4835 4833 x = numpy.arange(initMet,inow+1)*ippSeconds
4836 4834 y = phase1[initMet:inow+1]
4837 4835 ynnan = ~numpy.isnan(y)
4838 4836 x = x[ynnan]
4839 4837 y = y[ynnan]
4840 4838 slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
4841 4839 ylin = x*slope + intercept
4842 4840 rsq = r_value**2
4843 4841 if rsq > 0.5:
4844 4842 vel = slope#*height*1000/(k*d)
4845 4843 estAux = numpy.array([utctime,p,height, vel, rsq])
4846 4844 meteorList.append(estAux)
4847 4845 initMet = inext
4848 4846 metArray2 = numpy.array(meteorList)
4849 4847
4850 4848 return metArray2
4851 4849
4852 4850 def __calculateAzimuth1(self, rx_location, pairslist, azimuth0):
4853 4851
4854 4852 azimuth1 = numpy.zeros(len(pairslist))
4855 4853 dist = numpy.zeros(len(pairslist))
4856 4854
4857 4855 for i in range(len(rx_location)):
4858 4856 ch0 = pairslist[i][0]
4859 4857 ch1 = pairslist[i][1]
4860 4858
4861 4859 diffX = rx_location[ch0][0] - rx_location[ch1][0]
4862 4860 diffY = rx_location[ch0][1] - rx_location[ch1][1]
4863 4861 azimuth1[i] = numpy.arctan2(diffY,diffX)*180/numpy.pi
4864 4862 dist[i] = numpy.sqrt(diffX**2 + diffY**2)
4865 4863
4866 4864 azimuth1 -= azimuth0
4867 4865 return azimuth1, dist
4868 4866
4869 4867 def techniqueNSM_DBS(self, **kwargs):
4870 4868 metArray = kwargs['metArray']
4871 4869 heightList = kwargs['heightList']
4872 4870 timeList = kwargs['timeList']
4873 4871 azimuth = kwargs['azimuth']
4874 4872 theta_x = numpy.array(kwargs['theta_x'])
4875 4873 theta_y = numpy.array(kwargs['theta_y'])
4876 4874
4877 4875 utctime = metArray[:,0]
4878 4876 cmet = metArray[:,1].astype(int)
4879 4877 hmet = metArray[:,3].astype(int)
4880 4878 SNRmet = metArray[:,4]
4881 4879 vmet = metArray[:,5]
4882 4880 spcmet = metArray[:,6]
4883 4881
4884 4882 nChan = numpy.max(cmet) + 1
4885 4883 nHeights = len(heightList)
4886 4884
4887 4885 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
4888 4886 hmet = heightList[hmet]
4889 4887 h1met = hmet*numpy.cos(zenith_arr[cmet]) #Corrected heights
4890 4888
4891 4889 velEst = numpy.zeros((heightList.size,2))*numpy.nan
4892 4890
4893 4891 for i in range(nHeights - 1):
4894 4892 hmin = heightList[i]
4895 4893 hmax = heightList[i + 1]
4896 4894
4897 4895 thisH = (h1met>=hmin) & (h1met<hmax) & (cmet!=2) & (SNRmet>8) & (vmet<50) & (spcmet<10)
4898 4896 indthisH = numpy.where(thisH)
4899 4897
4900 4898 if numpy.size(indthisH) > 3:
4901 4899
4902 4900 vel_aux = vmet[thisH]
4903 4901 chan_aux = cmet[thisH]
4904 4902 cosu_aux = dir_cosu[chan_aux]
4905 4903 cosv_aux = dir_cosv[chan_aux]
4906 4904 cosw_aux = dir_cosw[chan_aux]
4907 4905
4908 4906 nch = numpy.size(numpy.unique(chan_aux))
4909 4907 if nch > 1:
4910 4908 A = self.__calculateMatA(cosu_aux, cosv_aux, cosw_aux, True)
4911 4909 velEst[i,:] = numpy.dot(A,vel_aux)
4912 4910
4913 4911 return velEst
4914 4912
4915 4913 def run(self, dataOut, technique, nHours=1, hmin=70, hmax=110, **kwargs):
4916 4914
4917 4915 param = dataOut.data_param
4918 4916 #if dataOut.abscissaList != None:
4919 4917 if numpy.any(dataOut.abscissaList):
4920 4918 absc = dataOut.abscissaList[:-1]
4921 4919 # noise = dataOut.noise
4922 4920 heightList = dataOut.heightList
4923 4921 SNR = dataOut.data_snr
4924 4922
4925 4923 if technique == 'DBS':
4926 4924
4927 4925 kwargs['velRadial'] = param[:,1,:] #Radial velocity
4928 4926 kwargs['heightList'] = heightList
4929 4927 kwargs['SNR'] = SNR
4930 4928
4931 4929 dataOut.data_output, dataOut.heightList, dataOut.data_snr = self.techniqueDBS(kwargs) #DBS Function
4932 4930 dataOut.utctimeInit = dataOut.utctime
4933 4931 dataOut.outputInterval = dataOut.paramInterval
4934 4932
4935 4933 elif technique == 'SA':
4936 4934
4937 4935 #Parameters
4938 4936 # position_x = kwargs['positionX']
4939 4937 # position_y = kwargs['positionY']
4940 4938 # azimuth = kwargs['azimuth']
4941 4939 #
4942 4940 # if kwargs.has_key('crosspairsList'):
4943 4941 # pairs = kwargs['crosspairsList']
4944 4942 # else:
4945 4943 # pairs = None
4946 4944 #
4947 4945 # if kwargs.has_key('correctFactor'):
4948 4946 # correctFactor = kwargs['correctFactor']
4949 4947 # else:
4950 4948 # correctFactor = 1
4951 4949
4952 4950 # tau = dataOut.data_param
4953 4951 # _lambda = dataOut.C/dataOut.frequency
4954 4952 # pairsList = dataOut.groupList
4955 4953 # nChannels = dataOut.nChannels
4956 4954
4957 4955 kwargs['groupList'] = dataOut.groupList
4958 4956 kwargs['tau'] = dataOut.data_param
4959 4957 kwargs['_lambda'] = dataOut.C/dataOut.frequency
4960 4958 # dataOut.data_output = self.techniqueSA(pairs, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, absc, correctFactor)
4961 4959 dataOut.data_output = self.techniqueSA(kwargs)
4962 4960 dataOut.utctimeInit = dataOut.utctime
4963 4961 dataOut.outputInterval = dataOut.timeInterval
4964 4962
4965 4963 elif technique == 'Meteors':
4966 4964 dataOut.flagNoData = True
4967 4965 self.__dataReady = False
4968 4966
4969 4967 if 'nHours' in kwargs:
4970 4968 nHours = kwargs['nHours']
4971 4969 else:
4972 4970 nHours = 1
4973 4971
4974 4972 if 'meteorsPerBin' in kwargs:
4975 4973 meteorThresh = kwargs['meteorsPerBin']
4976 4974 else:
4977 4975 meteorThresh = 6
4978 4976
4979 4977 if 'hmin' in kwargs:
4980 4978 hmin = kwargs['hmin']
4981 4979 else: hmin = 70
4982 4980 if 'hmax' in kwargs:
4983 4981 hmax = kwargs['hmax']
4984 4982 else: hmax = 110
4985 4983
4986 4984 dataOut.outputInterval = nHours*3600
4987 4985
4988 4986 if self.__isConfig == False:
4989 4987 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
4990 4988 #Get Initial LTC time
4991 4989 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
4992 4990 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
4993 4991
4994 4992 self.__isConfig = True
4995 4993
4996 4994 if self.__buffer is None:
4997 4995 self.__buffer = dataOut.data_param
4998 4996 self.__firstdata = copy.copy(dataOut)
4999 4997
5000 4998 else:
5001 4999 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
5002 5000
5003 5001 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
5004 5002
5005 5003 if self.__dataReady:
5006 5004 dataOut.utctimeInit = self.__initime
5007 5005
5008 5006 self.__initime += dataOut.outputInterval #to erase time offset
5009 5007
5010 5008 dataOut.data_output, dataOut.heightList = self.techniqueMeteors(self.__buffer, meteorThresh, hmin, hmax)
5011 5009 dataOut.flagNoData = False
5012 5010 self.__buffer = None
5013 5011
5014 5012 elif technique == 'Meteors1':
5015 5013 dataOut.flagNoData = True
5016 5014 self.__dataReady = False
5017 5015
5018 5016 if 'nMins' in kwargs:
5019 5017 nMins = kwargs['nMins']
5020 5018 else: nMins = 20
5021 5019 if 'rx_location' in kwargs:
5022 5020 rx_location = kwargs['rx_location']
5023 5021 else: rx_location = [(0,1),(1,1),(1,0)]
5024 5022 if 'azimuth' in kwargs:
5025 5023 azimuth = kwargs['azimuth']
5026 5024 else: azimuth = 51.06
5027 5025 if 'dfactor' in kwargs:
5028 5026 dfactor = kwargs['dfactor']
5029 5027 if 'mode' in kwargs:
5030 5028 mode = kwargs['mode']
5031 5029 if 'theta_x' in kwargs:
5032 5030 theta_x = kwargs['theta_x']
5033 5031 if 'theta_y' in kwargs:
5034 5032 theta_y = kwargs['theta_y']
5035 5033 else: mode = 'SA'
5036 5034
5037 5035 #Borrar luego esto
5038 5036 if dataOut.groupList is None:
5039 5037 dataOut.groupList = [(0,1),(0,2),(1,2)]
5040 5038 groupList = dataOut.groupList
5041 5039 C = 3e8
5042 5040 freq = 50e6
5043 5041 lamb = C/freq
5044 5042 k = 2*numpy.pi/lamb
5045 5043
5046 5044 timeList = dataOut.abscissaList
5047 5045 heightList = dataOut.heightList
5048 5046
5049 5047 if self.__isConfig == False:
5050 5048 dataOut.outputInterval = nMins*60
5051 5049 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
5052 5050 #Get Initial LTC time
5053 5051 initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
5054 5052 minuteAux = initime.minute
5055 5053 minuteNew = int(numpy.floor(minuteAux/nMins)*nMins)
5056 5054 self.__initime = (initime.replace(minute = minuteNew, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
5057 5055
5058 5056 self.__isConfig = True
5059 5057
5060 5058 if self.__buffer is None:
5061 5059 self.__buffer = dataOut.data_param
5062 5060 self.__firstdata = copy.copy(dataOut)
5063 5061
5064 5062 else:
5065 5063 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
5066 5064
5067 5065 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
5068 5066
5069 5067 if self.__dataReady:
5070 5068 dataOut.utctimeInit = self.__initime
5071 5069 self.__initime += dataOut.outputInterval #to erase time offset
5072 5070
5073 5071 metArray = self.__buffer
5074 5072 if mode == 'SA':
5075 5073 dataOut.data_output = self.techniqueNSM_SA(rx_location=rx_location, groupList=groupList, azimuth=azimuth, dfactor=dfactor, k=k,metArray=metArray, heightList=heightList,timeList=timeList)
5076 5074 elif mode == 'DBS':
5077 5075 dataOut.data_output = self.techniqueNSM_DBS(metArray=metArray,heightList=heightList,timeList=timeList, azimuth=azimuth, theta_x=theta_x, theta_y=theta_y)
5078 5076 dataOut.data_output = dataOut.data_output.T
5079 5077 dataOut.flagNoData = False
5080 5078 self.__buffer = None
5081 5079
5082 5080 return
5083 5081
5084 5082 class WindProfiler(Operation):
5085 5083
5086 5084 __isConfig = False
5087 5085
5088 5086 __initime = None
5089 5087 __lastdatatime = None
5090 5088 __integrationtime = None
5091 5089
5092 5090 __buffer = None
5093 5091
5094 5092 __dataReady = False
5095 5093
5096 5094 __firstdata = None
5097 5095
5098 5096 n = None
5099 5097
5100 5098 def __init__(self):
5101 5099 Operation.__init__(self)
5102 5100
5103 5101 def __calculateCosDir(self, elev, azim):
5104 5102 zen = (90 - elev)*numpy.pi/180
5105 5103 azim = azim*numpy.pi/180
5106 5104 cosDirX = numpy.sqrt((1-numpy.cos(zen)**2)/((1+numpy.tan(azim)**2)))
5107 5105 cosDirY = numpy.sqrt(1-numpy.cos(zen)**2-cosDirX**2)
5108 5106
5109 5107 signX = numpy.sign(numpy.cos(azim))
5110 5108 signY = numpy.sign(numpy.sin(azim))
5111 5109
5112 5110 cosDirX = numpy.copysign(cosDirX, signX)
5113 5111 cosDirY = numpy.copysign(cosDirY, signY)
5114 5112 return cosDirX, cosDirY
5115 5113
5116 5114 def __calculateAngles(self, theta_x, theta_y, azimuth):
5117 5115
5118 5116 dir_cosw = numpy.sqrt(1-theta_x**2-theta_y**2)
5119 5117 zenith_arr = numpy.arccos(dir_cosw)
5120 5118 azimuth_arr = numpy.arctan2(theta_x,theta_y) + azimuth*math.pi/180
5121 5119
5122 5120 dir_cosu = numpy.sin(azimuth_arr)*numpy.sin(zenith_arr)
5123 5121 dir_cosv = numpy.cos(azimuth_arr)*numpy.sin(zenith_arr)
5124 5122
5125 5123 return azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw
5126 5124
5127 5125 def __calculateMatA(self, dir_cosu, dir_cosv, dir_cosw, horOnly):
5128 5126
5129 5127 if horOnly:
5130 5128 A = numpy.c_[dir_cosu,dir_cosv]
5131 5129 else:
5132 5130 A = numpy.c_[dir_cosu,dir_cosv,dir_cosw]
5133 5131 A = numpy.asmatrix(A)
5134 5132 A1 = numpy.linalg.inv(A.transpose()*A)*A.transpose()
5135 5133
5136 5134 return A1
5137 5135
5138 5136 def __correctValues(self, heiRang, phi, velRadial, SNR):
5139 5137 listPhi = phi.tolist()
5140 5138 maxid = listPhi.index(max(listPhi))
5141 5139 minid = listPhi.index(min(listPhi))
5142 5140
5143 5141 rango = list(range(len(phi)))
5144 5142
5145 5143 heiRang1 = heiRang*math.cos(phi[maxid])
5146 5144 heiRangAux = heiRang*math.cos(phi[minid])
5147 5145 indOut = (heiRang1 < heiRangAux[0]).nonzero()
5148 5146 heiRang1 = numpy.delete(heiRang1,indOut)
5149 5147
5150 5148 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
5151 5149 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
5152 5150
5153 5151 for i in rango:
5154 5152 x = heiRang*math.cos(phi[i])
5155 5153 y1 = velRadial[i,:]
5156 5154 f1 = interpolate.interp1d(x,y1,kind = 'cubic')
5157 5155
5158 5156 x1 = heiRang1
5159 5157 y11 = f1(x1)
5160 5158
5161 5159 y2 = SNR[i,:]
5162 5160 f2 = interpolate.interp1d(x,y2,kind = 'cubic')
5163 5161 y21 = f2(x1)
5164 5162
5165 5163 velRadial1[i,:] = y11
5166 5164 SNR1[i,:] = y21
5167 5165
5168 5166 return heiRang1, velRadial1, SNR1
5169 5167
5170 5168 def __calculateVelUVW(self, A, velRadial):
5171 5169
5172 5170 #Operacion Matricial
5173 5171 velUVW = numpy.zeros((A.shape[0],velRadial.shape[1]))
5174 5172 velUVW[:,:] = numpy.dot(A,velRadial)
5175 5173
5176 5174
5177 5175 return velUVW
5178 5176
5179 5177 def techniqueDBS(self, kwargs):
5180 5178 """
5181 5179 Function that implements Doppler Beam Swinging (DBS) technique.
5182 5180
5183 5181 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
5184 5182 Direction correction (if necessary), Ranges and SNR
5185 5183
5186 5184 Output: Winds estimation (Zonal, Meridional and Vertical)
5187 5185
5188 5186 Parameters affected: Winds, height range, SNR
5189 5187 """
5190 5188 velRadial0 = kwargs['velRadial']
5191 5189 heiRang = kwargs['heightList']
5192 5190 SNR0 = kwargs['SNR']
5193 5191
5194 5192 if 'dirCosx' in kwargs and 'dirCosy' in kwargs:
5195 5193 theta_x = numpy.array(kwargs['dirCosx'])
5196 5194 theta_y = numpy.array(kwargs['dirCosy'])
5197 5195 else:
5198 5196 elev = numpy.array(kwargs['elevation'])
5199 5197 azim = numpy.array(kwargs['azimuth'])
5200 5198 theta_x, theta_y = self.__calculateCosDir(elev, azim)
5201 5199 azimuth = kwargs['correctAzimuth']
5202 5200 if 'horizontalOnly' in kwargs:
5203 5201 horizontalOnly = kwargs['horizontalOnly']
5204 5202 else: horizontalOnly = False
5205 5203 if 'correctFactor' in kwargs:
5206 5204 correctFactor = kwargs['correctFactor']
5207 5205 else: correctFactor = 1
5208 5206 if 'channelList' in kwargs:
5209 5207 channelList = kwargs['channelList']
5210 5208 if len(channelList) == 2:
5211 5209 horizontalOnly = True
5212 5210 arrayChannel = numpy.array(channelList)
5213 5211 param = param[arrayChannel,:,:]
5214 5212 theta_x = theta_x[arrayChannel]
5215 5213 theta_y = theta_y[arrayChannel]
5216 5214
5217 5215 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
5218 5216 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, zenith_arr, correctFactor*velRadial0, SNR0)
5219 5217 A = self.__calculateMatA(dir_cosu, dir_cosv, dir_cosw, horizontalOnly)
5220 5218
5221 5219 #Calculo de Componentes de la velocidad con DBS
5222 5220 winds = self.__calculateVelUVW(A,velRadial1)
5223 5221
5224 5222 return winds, heiRang1, SNR1
5225 5223
5226 5224 def __calculateDistance(self, posx, posy, pairs_ccf, azimuth = None):
5227 5225
5228 5226 nPairs = len(pairs_ccf)
5229 5227 posx = numpy.asarray(posx)
5230 5228 posy = numpy.asarray(posy)
5231 5229
5232 5230 #Rotacion Inversa para alinear con el azimuth
5233 5231 if azimuth!= None:
5234 5232 azimuth = azimuth*math.pi/180
5235 5233 posx1 = posx*math.cos(azimuth) + posy*math.sin(azimuth)
5236 5234 posy1 = -posx*math.sin(azimuth) + posy*math.cos(azimuth)
5237 5235 else:
5238 5236 posx1 = posx
5239 5237 posy1 = posy
5240 5238
5241 5239 #Calculo de Distancias
5242 5240 distx = numpy.zeros(nPairs)
5243 5241 disty = numpy.zeros(nPairs)
5244 5242 dist = numpy.zeros(nPairs)
5245 5243 ang = numpy.zeros(nPairs)
5246 5244
5247 5245 for i in range(nPairs):
5248 5246 distx[i] = posx1[pairs_ccf[i][1]] - posx1[pairs_ccf[i][0]]
5249 5247 disty[i] = posy1[pairs_ccf[i][1]] - posy1[pairs_ccf[i][0]]
5250 5248 dist[i] = numpy.sqrt(distx[i]**2 + disty[i]**2)
5251 5249 ang[i] = numpy.arctan2(disty[i],distx[i])
5252 5250
5253 5251 return distx, disty, dist, ang
5254 5252 #Calculo de Matrices
5255 5253
5256 5254 def __calculateVelVer(self, phase, lagTRange, _lambda):
5257 5255
5258 5256 Ts = lagTRange[1] - lagTRange[0]
5259 5257 velW = -_lambda*phase/(4*math.pi*Ts)
5260 5258
5261 5259 return velW
5262 5260
5263 5261 def __calculateVelHorDir(self, dist, tau1, tau2, ang):
5264 5262 nPairs = tau1.shape[0]
5265 5263 nHeights = tau1.shape[1]
5266 5264 vel = numpy.zeros((nPairs,3,nHeights))
5267 5265 dist1 = numpy.reshape(dist, (dist.size,1))
5268 5266
5269 5267 angCos = numpy.cos(ang)
5270 5268 angSin = numpy.sin(ang)
5271 5269
5272 5270 vel0 = dist1*tau1/(2*tau2**2)
5273 5271 vel[:,0,:] = (vel0*angCos).sum(axis = 1)
5274 5272 vel[:,1,:] = (vel0*angSin).sum(axis = 1)
5275 5273
5276 5274 ind = numpy.where(numpy.isinf(vel))
5277 5275 vel[ind] = numpy.nan
5278 5276
5279 5277 return vel
5280 5278
5281 5279 def techniqueSA(self, kwargs):
5282 5280
5283 5281 """
5284 5282 Function that implements Spaced Antenna (SA) technique.
5285 5283
5286 5284 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
5287 5285 Direction correction (if necessary), Ranges and SNR
5288 5286
5289 5287 Output: Winds estimation (Zonal, Meridional and Vertical)
5290 5288
5291 5289 Parameters affected: Winds
5292 5290 """
5293 5291 position_x = kwargs['positionX']
5294 5292 position_y = kwargs['positionY']
5295 5293 azimuth = kwargs['azimuth']
5296 5294
5297 5295 if 'correctFactor' in kwargs:
5298 5296 correctFactor = kwargs['correctFactor']
5299 5297 else:
5300 5298 correctFactor = 1
5301 5299
5302 5300 groupList = kwargs['groupList']
5303 5301 pairs_ccf = groupList[1]
5304 5302 tau = kwargs['tau']
5305 5303 _lambda = kwargs['_lambda']
5306 5304
5307 5305 #Cross Correlation pairs obtained
5308 5306
5309 5307 indtau = tau.shape[0]/2
5310 5308 tau1 = tau[:indtau,:]
5311 5309 tau2 = tau[indtau:-1,:]
5312 5310 phase1 = tau[-1,:]
5313 5311
5314 5312 #---------------------------------------------------------------------
5315 5313 #Metodo Directo
5316 5314 distx, disty, dist, ang = self.__calculateDistance(position_x, position_y, pairs_ccf,azimuth)
5317 5315 winds = self.__calculateVelHorDir(dist, tau1, tau2, ang)
5318 5316 winds = stats.nanmean(winds, axis=0)
5319 5317 #---------------------------------------------------------------------
5320 5318 #Metodo General
5321 5319
5322 5320 #---------------------------------------------------------------------
5323 5321 winds[2,:] = self.__calculateVelVer(phase1, lagTRange, _lambda)
5324 5322 winds = correctFactor*winds
5325 5323 return winds
5326 5324
5327 5325 def __checkTime(self, currentTime, paramInterval, outputInterval):
5328 5326
5329 5327 dataTime = currentTime + paramInterval
5330 5328 deltaTime = dataTime - self.__initime
5331 5329
5332 5330 if deltaTime >= outputInterval or deltaTime < 0:
5333 5331 self.__dataReady = True
5334 5332 return
5335 5333
5336 5334 def techniqueMeteors(self, arrayMeteor, meteorThresh, heightMin, heightMax):
5337 5335 '''
5338 5336 Function that implements winds estimation technique with detected meteors.
5339 5337
5340 5338 Input: Detected meteors, Minimum meteor quantity to wind estimation
5341 5339
5342 5340 Output: Winds estimation (Zonal and Meridional)
5343 5341
5344 5342 Parameters affected: Winds
5345 5343 '''
5346 5344 #Settings
5347 5345 nInt = (heightMax - heightMin)/2
5348 5346 nInt = int(nInt)
5349 5347 winds = numpy.zeros((2,nInt))*numpy.nan
5350 5348
5351 5349 #Filter errors
5352 5350 error = numpy.where(arrayMeteor[:,-1] == 0)[0]
5353 5351 finalMeteor = arrayMeteor[error,:]
5354 5352
5355 5353 #Meteor Histogram
5356 5354 finalHeights = finalMeteor[:,2]
5357 5355 hist = numpy.histogram(finalHeights, bins = nInt, range = (heightMin,heightMax))
5358 5356 nMeteorsPerI = hist[0]
5359 5357 heightPerI = hist[1]
5360 5358
5361 5359 #Sort of meteors
5362 5360 indSort = finalHeights.argsort()
5363 5361 finalMeteor2 = finalMeteor[indSort,:]
5364 5362
5365 5363 # Calculating winds
5366 5364 ind1 = 0
5367 5365 ind2 = 0
5368 5366
5369 5367 for i in range(nInt):
5370 5368 nMet = nMeteorsPerI[i]
5371 5369 ind1 = ind2
5372 5370 ind2 = ind1 + nMet
5373 5371
5374 5372 meteorAux = finalMeteor2[ind1:ind2,:]
5375 5373
5376 5374 if meteorAux.shape[0] >= meteorThresh:
5377 5375 vel = meteorAux[:, 6]
5378 5376 zen = meteorAux[:, 4]*numpy.pi/180
5379 5377 azim = meteorAux[:, 3]*numpy.pi/180
5380 5378
5381 5379 n = numpy.cos(zen)
5382 5380 l = numpy.sin(zen)*numpy.sin(azim)
5383 5381 m = numpy.sin(zen)*numpy.cos(azim)
5384 5382
5385 5383 A = numpy.vstack((l, m)).transpose()
5386 5384 A1 = numpy.dot(numpy.linalg.inv( numpy.dot(A.transpose(),A) ),A.transpose())
5387 5385 windsAux = numpy.dot(A1, vel)
5388 5386
5389 5387 winds[0,i] = windsAux[0]
5390 5388 winds[1,i] = windsAux[1]
5391 5389
5392 5390 return winds, heightPerI[:-1]
5393 5391
5394 5392 def techniqueNSM_SA(self, **kwargs):
5395 5393 metArray = kwargs['metArray']
5396 5394 heightList = kwargs['heightList']
5397 5395 timeList = kwargs['timeList']
5398 5396
5399 5397 rx_location = kwargs['rx_location']
5400 5398 groupList = kwargs['groupList']
5401 5399 azimuth = kwargs['azimuth']
5402 5400 dfactor = kwargs['dfactor']
5403 5401 k = kwargs['k']
5404 5402
5405 5403 azimuth1, dist = self.__calculateAzimuth1(rx_location, groupList, azimuth)
5406 5404 d = dist*dfactor
5407 5405 #Phase calculation
5408 5406 metArray1 = self.__getPhaseSlope(metArray, heightList, timeList)
5409 5407
5410 5408 metArray1[:,-2] = metArray1[:,-2]*metArray1[:,2]*1000/(k*d[metArray1[:,1].astype(int)]) #angles into velocities
5411 5409
5412 5410 velEst = numpy.zeros((heightList.size,2))*numpy.nan
5413 5411 azimuth1 = azimuth1*numpy.pi/180
5414 5412
5415 5413 for i in range(heightList.size):
5416 5414 h = heightList[i]
5417 5415 indH = numpy.where((metArray1[:,2] == h)&(numpy.abs(metArray1[:,-2]) < 100))[0]
5418 5416 metHeight = metArray1[indH,:]
5419 5417 if metHeight.shape[0] >= 2:
5420 5418 velAux = numpy.asmatrix(metHeight[:,-2]).T #Radial Velocities
5421 5419 iazim = metHeight[:,1].astype(int)
5422 5420 azimAux = numpy.asmatrix(azimuth1[iazim]).T #Azimuths
5423 5421 A = numpy.hstack((numpy.cos(azimAux),numpy.sin(azimAux)))
5424 5422 A = numpy.asmatrix(A)
5425 5423 A1 = numpy.linalg.pinv(A.transpose()*A)*A.transpose()
5426 5424 velHor = numpy.dot(A1,velAux)
5427 5425
5428 5426 velEst[i,:] = numpy.squeeze(velHor)
5429 5427 return velEst
5430 5428
5431 5429 def __getPhaseSlope(self, metArray, heightList, timeList):
5432 5430 meteorList = []
5433 5431 #utctime sec1 height SNR velRad ph0 ph1 ph2 coh0 coh1 coh2
5434 5432 #Putting back together the meteor matrix
5435 5433 utctime = metArray[:,0]
5436 5434 uniqueTime = numpy.unique(utctime)
5437 5435
5438 5436 phaseDerThresh = 0.5
5439 5437 ippSeconds = timeList[1] - timeList[0]
5440 5438 sec = numpy.where(timeList>1)[0][0]
5441 5439 nPairs = metArray.shape[1] - 6
5442 5440 nHeights = len(heightList)
5443 5441
5444 5442 for t in uniqueTime:
5445 5443 metArray1 = metArray[utctime==t,:]
5446 5444 tmet = metArray1[:,1].astype(int)
5447 5445 hmet = metArray1[:,2].astype(int)
5448 5446
5449 5447 metPhase = numpy.zeros((nPairs, heightList.size, timeList.size - 1))
5450 5448 metPhase[:,:] = numpy.nan
5451 5449 metPhase[:,hmet,tmet] = metArray1[:,6:].T
5452 5450
5453 5451 #Delete short trails
5454 5452 metBool = ~numpy.isnan(metPhase[0,:,:])
5455 5453 heightVect = numpy.sum(metBool, axis = 1)
5456 5454 metBool[heightVect<sec,:] = False
5457 5455 metPhase[:,heightVect<sec,:] = numpy.nan
5458 5456
5459 5457 #Derivative
5460 5458 metDer = numpy.abs(metPhase[:,:,1:] - metPhase[:,:,:-1])
5461 5459 phDerAux = numpy.dstack((numpy.full((nPairs,nHeights,1), False, dtype=bool),metDer > phaseDerThresh))
5462 5460 metPhase[phDerAux] = numpy.nan
5463 5461
5464 5462 #--------------------------METEOR DETECTION -----------------------------------------
5465 5463 indMet = numpy.where(numpy.any(metBool,axis=1))[0]
5466 5464
5467 5465 for p in numpy.arange(nPairs):
5468 5466 phase = metPhase[p,:,:]
5469 5467 phDer = metDer[p,:,:]
5470 5468
5471 5469 for h in indMet:
5472 5470 height = heightList[h]
5473 5471 phase1 = phase[h,:] #82
5474 5472 phDer1 = phDer[h,:]
5475 5473
5476 5474 phase1[~numpy.isnan(phase1)] = numpy.unwrap(phase1[~numpy.isnan(phase1)]) #Unwrap
5477 5475
5478 5476 indValid = numpy.where(~numpy.isnan(phase1))[0]
5479 5477 initMet = indValid[0]
5480 5478 endMet = 0
5481 5479
5482 5480 for i in range(len(indValid)-1):
5483 5481
5484 5482 #Time difference
5485 5483 inow = indValid[i]
5486 5484 inext = indValid[i+1]
5487 5485 idiff = inext - inow
5488 5486 #Phase difference
5489 5487 phDiff = numpy.abs(phase1[inext] - phase1[inow])
5490 5488
5491 5489 if idiff>sec or phDiff>numpy.pi/4 or inext==indValid[-1]: #End of Meteor
5492 5490 sizeTrail = inow - initMet + 1
5493 5491 if sizeTrail>3*sec: #Too short meteors
5494 5492 x = numpy.arange(initMet,inow+1)*ippSeconds
5495 5493 y = phase1[initMet:inow+1]
5496 5494 ynnan = ~numpy.isnan(y)
5497 5495 x = x[ynnan]
5498 5496 y = y[ynnan]
5499 5497 slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
5500 5498 ylin = x*slope + intercept
5501 5499 rsq = r_value**2
5502 5500 if rsq > 0.5:
5503 5501 vel = slope#*height*1000/(k*d)
5504 5502 estAux = numpy.array([utctime,p,height, vel, rsq])
5505 5503 meteorList.append(estAux)
5506 5504 initMet = inext
5507 5505 metArray2 = numpy.array(meteorList)
5508 5506
5509 5507 return metArray2
5510 5508
5511 5509 def __calculateAzimuth1(self, rx_location, pairslist, azimuth0):
5512 5510
5513 5511 azimuth1 = numpy.zeros(len(pairslist))
5514 5512 dist = numpy.zeros(len(pairslist))
5515 5513
5516 5514 for i in range(len(rx_location)):
5517 5515 ch0 = pairslist[i][0]
5518 5516 ch1 = pairslist[i][1]
5519 5517
5520 5518 diffX = rx_location[ch0][0] - rx_location[ch1][0]
5521 5519 diffY = rx_location[ch0][1] - rx_location[ch1][1]
5522 5520 azimuth1[i] = numpy.arctan2(diffY,diffX)*180/numpy.pi
5523 5521 dist[i] = numpy.sqrt(diffX**2 + diffY**2)
5524 5522
5525 5523 azimuth1 -= azimuth0
5526 5524 return azimuth1, dist
5527 5525
5528 5526 def techniqueNSM_DBS(self, **kwargs):
5529 5527 metArray = kwargs['metArray']
5530 5528 heightList = kwargs['heightList']
5531 5529 timeList = kwargs['timeList']
5532 5530 azimuth = kwargs['azimuth']
5533 5531 theta_x = numpy.array(kwargs['theta_x'])
5534 5532 theta_y = numpy.array(kwargs['theta_y'])
5535 5533
5536 5534 utctime = metArray[:,0]
5537 5535 cmet = metArray[:,1].astype(int)
5538 5536 hmet = metArray[:,3].astype(int)
5539 5537 SNRmet = metArray[:,4]
5540 5538 vmet = metArray[:,5]
5541 5539 spcmet = metArray[:,6]
5542 5540
5543 5541 nChan = numpy.max(cmet) + 1
5544 5542 nHeights = len(heightList)
5545 5543
5546 5544 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
5547 5545 hmet = heightList[hmet]
5548 5546 h1met = hmet*numpy.cos(zenith_arr[cmet]) #Corrected heights
5549 5547
5550 5548 velEst = numpy.zeros((heightList.size,2))*numpy.nan
5551 5549
5552 5550 for i in range(nHeights - 1):
5553 5551 hmin = heightList[i]
5554 5552 hmax = heightList[i + 1]
5555 5553
5556 5554 thisH = (h1met>=hmin) & (h1met<hmax) & (cmet!=2) & (SNRmet>8) & (vmet<50) & (spcmet<10)
5557 5555 indthisH = numpy.where(thisH)
5558 5556
5559 5557 if numpy.size(indthisH) > 3:
5560 5558
5561 5559 vel_aux = vmet[thisH]
5562 5560 chan_aux = cmet[thisH]
5563 5561 cosu_aux = dir_cosu[chan_aux]
5564 5562 cosv_aux = dir_cosv[chan_aux]
5565 5563 cosw_aux = dir_cosw[chan_aux]
5566 5564
5567 5565 nch = numpy.size(numpy.unique(chan_aux))
5568 5566 if nch > 1:
5569 5567 A = self.__calculateMatA(cosu_aux, cosv_aux, cosw_aux, True)
5570 5568 velEst[i,:] = numpy.dot(A,vel_aux)
5571 5569
5572 5570 return velEst
5573 5571
5574 5572 def run(self, dataOut, technique, nHours=1, hmin=70, hmax=110, **kwargs):
5575 5573
5576 5574 param = dataOut.moments
5577 5575 if numpy.any(dataOut.abscissaList):
5578 5576 absc = dataOut.abscissaList[:-1]
5579 5577 # noise = dataOut.noise
5580 5578 heightList = dataOut.heightList
5581 5579 SNR = dataOut.data_snr
5582 5580
5583 5581 if technique == 'DBS':
5584 5582
5585 5583 kwargs['velRadial'] = param[:,1,:] #Radial velocity
5586 5584 kwargs['heightList'] = heightList
5587 5585 kwargs['SNR'] = SNR
5588 5586
5589 5587 dataOut.data_output, dataOut.heightList, dataOut.data_snr = self.techniqueDBS(kwargs) #DBS Function
5590 5588 dataOut.utctimeInit = dataOut.utctime
5591 5589 dataOut.outputInterval = dataOut.paramInterval
5592 5590
5593 5591 elif technique == 'SA':
5594 5592
5595 5593 #Parameters
5596 5594 kwargs['groupList'] = dataOut.groupList
5597 5595 kwargs['tau'] = dataOut.data_param
5598 5596 kwargs['_lambda'] = dataOut.C/dataOut.frequency
5599 5597 dataOut.data_output = self.techniqueSA(kwargs)
5600 5598 dataOut.utctimeInit = dataOut.utctime
5601 5599 dataOut.outputInterval = dataOut.timeInterval
5602 5600
5603 5601 elif technique == 'Meteors':
5604 5602 dataOut.flagNoData = True
5605 5603 self.__dataReady = False
5606 5604
5607 5605 if 'nHours' in kwargs:
5608 5606 nHours = kwargs['nHours']
5609 5607 else:
5610 5608 nHours = 1
5611 5609
5612 5610 if 'meteorsPerBin' in kwargs:
5613 5611 meteorThresh = kwargs['meteorsPerBin']
5614 5612 else:
5615 5613 meteorThresh = 6
5616 5614
5617 5615 if 'hmin' in kwargs:
5618 5616 hmin = kwargs['hmin']
5619 5617 else: hmin = 70
5620 5618 if 'hmax' in kwargs:
5621 5619 hmax = kwargs['hmax']
5622 5620 else: hmax = 110
5623 5621
5624 5622 dataOut.outputInterval = nHours*3600
5625 5623
5626 5624 if self.__isConfig == False:
5627 5625 #Get Initial LTC time
5628 5626 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
5629 5627 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
5630 5628
5631 5629 self.__isConfig = True
5632 5630
5633 5631 if self.__buffer is None:
5634 5632 self.__buffer = dataOut.data_param
5635 5633 self.__firstdata = copy.copy(dataOut)
5636 5634
5637 5635 else:
5638 5636 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
5639 5637
5640 5638 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
5641 5639
5642 5640 if self.__dataReady:
5643 5641 dataOut.utctimeInit = self.__initime
5644 5642
5645 5643 self.__initime += dataOut.outputInterval #to erase time offset
5646 5644
5647 5645 dataOut.data_output, dataOut.heightList = self.techniqueMeteors(self.__buffer, meteorThresh, hmin, hmax)
5648 5646 dataOut.flagNoData = False
5649 5647 self.__buffer = None
5650 5648
5651 5649 elif technique == 'Meteors1':
5652 5650 dataOut.flagNoData = True
5653 5651 self.__dataReady = False
5654 5652
5655 5653 if 'nMins' in kwargs:
5656 5654 nMins = kwargs['nMins']
5657 5655 else: nMins = 20
5658 5656 if 'rx_location' in kwargs:
5659 5657 rx_location = kwargs['rx_location']
5660 5658 else: rx_location = [(0,1),(1,1),(1,0)]
5661 5659 if 'azimuth' in kwargs:
5662 5660 azimuth = kwargs['azimuth']
5663 5661 else: azimuth = 51.06
5664 5662 if 'dfactor' in kwargs:
5665 5663 dfactor = kwargs['dfactor']
5666 5664 if 'mode' in kwargs:
5667 5665 mode = kwargs['mode']
5668 5666 if 'theta_x' in kwargs:
5669 5667 theta_x = kwargs['theta_x']
5670 5668 if 'theta_y' in kwargs:
5671 5669 theta_y = kwargs['theta_y']
5672 5670 else: mode = 'SA'
5673 5671
5674 5672 #Borrar luego esto
5675 5673 if dataOut.groupList is None:
5676 5674 dataOut.groupList = [(0,1),(0,2),(1,2)]
5677 5675 groupList = dataOut.groupList
5678 5676 C = 3e8
5679 5677 freq = 50e6
5680 5678 lamb = C/freq
5681 5679 k = 2*numpy.pi/lamb
5682 5680
5683 5681 timeList = dataOut.abscissaList
5684 5682 heightList = dataOut.heightList
5685 5683
5686 5684 if self.__isConfig == False:
5687 5685 dataOut.outputInterval = nMins*60
5688 5686 #Get Initial LTC time
5689 5687 initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
5690 5688 minuteAux = initime.minute
5691 5689 minuteNew = int(numpy.floor(minuteAux/nMins)*nMins)
5692 5690 self.__initime = (initime.replace(minute = minuteNew, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
5693 5691
5694 5692 self.__isConfig = True
5695 5693
5696 5694 if self.__buffer is None:
5697 5695 self.__buffer = dataOut.data_param
5698 5696 self.__firstdata = copy.copy(dataOut)
5699 5697
5700 5698 else:
5701 5699 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
5702 5700
5703 5701 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
5704 5702
5705 5703 if self.__dataReady:
5706 5704 dataOut.utctimeInit = self.__initime
5707 5705 self.__initime += dataOut.outputInterval #to erase time offset
5708 5706
5709 5707 metArray = self.__buffer
5710 5708 if mode == 'SA':
5711 5709 dataOut.data_output = self.techniqueNSM_SA(rx_location=rx_location, groupList=groupList, azimuth=azimuth, dfactor=dfactor, k=k,metArray=metArray, heightList=heightList,timeList=timeList)
5712 5710 elif mode == 'DBS':
5713 5711 dataOut.data_output = self.techniqueNSM_DBS(metArray=metArray,heightList=heightList,timeList=timeList, azimuth=azimuth, theta_x=theta_x, theta_y=theta_y)
5714 5712 dataOut.data_output = dataOut.data_output.T
5715 5713 dataOut.flagNoData = False
5716 5714 self.__buffer = None
5717 5715
5718 5716 return dataOut
5719 5717
5720 5718 class EWDriftsEstimation(Operation):
5721 5719
5722 5720 def __init__(self):
5723 5721 Operation.__init__(self)
5724 5722
5725 5723 def __correctValues(self, heiRang, phi, velRadial, SNR):
5726 5724 listPhi = phi.tolist()
5727 5725 maxid = listPhi.index(max(listPhi))
5728 5726 minid = listPhi.index(min(listPhi))
5729 5727
5730 5728 rango = list(range(len(phi)))
5731 5729 heiRang1 = heiRang*math.cos(phi[maxid])
5732 5730 heiRangAux = heiRang*math.cos(phi[minid])
5733 5731 indOut = (heiRang1 < heiRangAux[0]).nonzero()
5734 5732 heiRang1 = numpy.delete(heiRang1,indOut)
5735 5733
5736 5734 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
5737 5735 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
5738 5736
5739 5737 for i in rango:
5740 5738 x = heiRang*math.cos(phi[i])
5741 5739 y1 = velRadial[i,:]
5742 5740 vali= (numpy.isfinite(y1)==True).nonzero()
5743 5741 y1=y1[vali]
5744 5742 x = x[vali]
5745 5743 f1 = interpolate.interp1d(x,y1,kind = 'cubic',bounds_error=False)
5746 5744 x1 = heiRang1
5747 5745 y11 = f1(x1)
5748 5746 y2 = SNR[i,:]
5749 5747 x = heiRang*math.cos(phi[i])
5750 5748 vali= (y2 != -1).nonzero()
5751 5749 y2 = y2[vali]
5752 5750 x = x[vali]
5753 5751 f2 = interpolate.interp1d(x,y2,kind = 'cubic',bounds_error=False)
5754 5752 y21 = f2(x1)
5755 5753
5756 5754 velRadial1[i,:] = y11
5757 5755 SNR1[i,:] = y21
5758 5756
5759 5757 return heiRang1, velRadial1, SNR1
5760 5758
5761 5759 def run(self, dataOut, zenith, zenithCorrection,fileDrifts):
5762 5760
5763 5761 dataOut.lat = -11.95
5764 5762 dataOut.lon = -76.87
5765 5763 dataOut.spcst = 0.00666
5766 5764 dataOut.pl = 0.0003
5767 5765 dataOut.cbadn = 3
5768 5766 dataOut.inttms = 300
5769 5767 dataOut.azw = -115.687
5770 5768 dataOut.elw = 86.1095
5771 5769 dataOut.aze = 130.052
5772 5770 dataOut.ele = 87.6558
5773 5771 dataOut.jro14 = numpy.log10(dataOut.spc_noise[0]/dataOut.normFactor)
5774 5772 dataOut.jro15 = numpy.log10(dataOut.spc_noise[1]/dataOut.normFactor)
5775 5773 dataOut.jro16 = numpy.log10(dataOut.spc_noise[2]/dataOut.normFactor)
5776 5774 dataOut.nwlos = numpy.log10(dataOut.spc_noise[3]/dataOut.normFactor)
5777 5775
5778 5776 heiRang = dataOut.heightList
5779 5777 velRadial = dataOut.data_param[:,3,:]
5780 5778 velRadialm = dataOut.data_param[:,2:4,:]*-1
5781 5779
5782 5780 rbufc=dataOut.data_paramC[:,:,0]
5783 5781 ebufc=dataOut.data_paramC[:,:,1]
5784 #SNR = dataOut.data_snr
5785 5782 SNR = dataOut.data_snr1_i
5786 5783 rbufi = dataOut.data_snr1_i
5787 5784 velRerr = dataOut.data_error[:,4,:]
5788 5785 range1 = dataOut.heightList
5789 5786 nhei = len(range1)
5790 5787
5791 5788 sat_fits = dataOut.sat_fits
5792 5789
5793 5790 channels = dataOut.channelList
5794 5791 nChan = len(channels)
5795 5792 my_nbeams = nChan/2
5796 5793 if my_nbeams == 2:
5797 5794 moments=numpy.vstack(([velRadialm[0,:]],[velRadialm[0,:]],[velRadialm[1,:]],[velRadialm[1,:]]))
5798 5795 else :
5799 5796 moments=numpy.vstack(([velRadialm[0,:]],[velRadialm[0,:]]))
5800 5797 dataOut.moments=moments
5801 5798 #Incoherent
5802 5799 smooth_w = dataOut.clean_num_aver[0,:]
5803 5800 chisq_w = dataOut.data_error[0,0,:]
5804 5801 p_w0 = rbufi[0,:]
5805 5802 p_w1 = rbufi[1,:]
5806 5803
5807 5804 # Coherent
5808 5805 smooth_wC = ebufc[0,:]
5809 5806 p_w0C = rbufc[0,:]
5810 5807 p_w1C = rbufc[1,:]
5811 5808 w_wC = rbufc[2,:]*-1 #*radial_sign(radial EQ 1)
5812 5809 t_wC = rbufc[3,:]
5813 5810 val = (numpy.isfinite(p_w0)==False).nonzero()
5814 5811 p_w0[val]=0
5815 5812 val = (numpy.isfinite(p_w1)==False).nonzero()
5816 5813 p_w1[val]=0
5817 5814 val = (numpy.isfinite(p_w0C)==False).nonzero()
5818 5815 p_w0C[val]=0
5819 5816 val = (numpy.isfinite(p_w1C)==False).nonzero()
5820 5817 p_w1C[val]=0
5821 5818 val = (numpy.isfinite(smooth_w)==False).nonzero()
5822 5819 smooth_w[val]=0
5823 5820 val = (numpy.isfinite(smooth_wC)==False).nonzero()
5824 5821 smooth_wC[val]=0
5825 5822
5826 5823 #p_w0 = (p_w0*smooth_w+p_w0C*smooth_wC)/(smooth_w+smooth_wC)
5827 5824 #p_w1 = (p_w1*smooth_w+p_w1C*smooth_wC)/(smooth_w+smooth_wC)
5828 5825
5829 5826 if len(sat_fits) >0 :
5830 5827 p_w0C = p_w0C + sat_fits[0,:]
5831 5828 p_w1C = p_w1C + sat_fits[1,:]
5832 5829
5833 5830 if my_nbeams == 1:
5834 5831 w = velRadial[0,:]
5835 5832 winds = velRadial.copy()
5836 5833 w_err = velRerr[0,:]
5837 5834 u = w*numpy.nan
5838 5835 u_err = w_err*numpy.nan
5839 5836 p_e0 = p_w0*numpy.nan
5840 5837 p_e1 = p_w1*numpy.nan
5841 5838 #snr1 = 10*numpy.log10(SNR[0])
5842 5839 if my_nbeams == 2:
5843 5840
5844 5841 zenith = numpy.array(zenith)
5845 5842 zenith -= zenithCorrection
5846 5843 zenith *= numpy.pi/180
5847 5844 if zenithCorrection != 0 :
5848 5845 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, numpy.abs(zenith), velRadial, SNR)
5849 5846 else :
5850 5847 heiRang1 = heiRang
5851 5848 velRadial1 = velRadial
5852 5849 SNR1 = SNR
5853 5850
5854 5851 alp = zenith[0]
5855 5852 bet = zenith[1]
5856 5853
5857 5854 w_w = velRadial1[0,:]
5858 5855 w_e = velRadial1[1,:]
5859 5856 w_w_err = velRerr[0,:]
5860 5857 w_e_err = velRerr[1,:]
5861 5858 smooth_e = dataOut.clean_num_aver[2,:]
5862 5859 chisq_e = dataOut.data_error[1,0,:]
5863 5860 p_e0 = rbufi[2,:]
5864 5861 p_e1 = rbufi[3,:]
5865 5862
5866 5863 tini=time.localtime(dataOut.utctime)
5867 5864
5868 5865 if tini[3] >= 6 and tini[3] < 18 :
5869 5866 w_wtmp = numpy.where(numpy.isfinite(w_wC)==True,w_wC,w_w)
5870 5867 w_w_errtmp = numpy.where(numpy.isfinite(w_wC)==True,numpy.nan,w_w_err)
5871 5868 else:
5872 5869 w_wtmp = numpy.where(numpy.isfinite(w_wC)==True,w_wC,w_w)
5873 5870 w_wtmp = numpy.where(range1 > 200,w_w,w_wtmp)
5874 5871 w_w_errtmp = numpy.where(numpy.isfinite(w_wC)==True,numpy.nan,w_w_err)
5875 5872 w_w_errtmp = numpy.where(range1 > 200,w_w_err,w_w_errtmp)
5876 5873 w_w = w_wtmp
5877 5874 w_w_err = w_w_errtmp
5878 5875
5879 5876 #if my_nbeams == 2:
5880 5877 smooth_eC=ebufc[4,:]
5881 5878 p_e0C = rbufc[4,:]
5882 5879 p_e1C = rbufc[5,:]
5883 5880 w_eC = rbufc[6,:]*-1
5884 5881 t_eC = rbufc[7,:]
5885 5882 val = (numpy.isfinite(p_e0)==False).nonzero()
5886 5883 p_e0[val]=0
5887 5884 val = (numpy.isfinite(p_e1)==False).nonzero()
5888 5885 p_e1[val]=0
5889 5886 val = (numpy.isfinite(p_e0C)==False).nonzero()
5890 5887 p_e0C[val]=0
5891 5888 val = (numpy.isfinite(p_e1C)==False).nonzero()
5892 5889 p_e1C[val]=0
5893 5890 val = (numpy.isfinite(smooth_e)==False).nonzero()
5894 5891 smooth_e[val]=0
5895 5892 val = (numpy.isfinite(smooth_eC)==False).nonzero()
5896 5893 smooth_eC[val]=0
5897 5894 #p_e0 = (p_e0*smooth_e+p_e0C*smooth_eC)/(smooth_e+smooth_eC)
5898 5895 #p_e1 = (p_e1*smooth_e+p_e1C*smooth_eC)/(smooth_e+smooth_eC)
5899 5896
5900 5897 if len(sat_fits) >0 :
5901 5898 p_e0C = p_e0C + sat_fits[2,:]
5902 5899 p_e1C = p_e1C + sat_fits[3,:]
5903 5900
5904 5901 if tini[3] >= 6 and tini[3] < 18 :
5905 5902 w_etmp = numpy.where(numpy.isfinite(w_eC)==True,w_eC,w_e)
5906 5903 w_e_errtmp = numpy.where(numpy.isfinite(w_eC)==True,numpy.nan,w_e_err)
5907 5904 else:
5908 5905 w_etmp = numpy.where(numpy.isfinite(w_eC)==True,w_eC,w_e)
5909 5906 w_etmp = numpy.where(range1 > 200,w_e,w_etmp)
5910 5907 w_e_errtmp = numpy.where(numpy.isfinite(w_eC)==True,numpy.nan,w_e_err)
5911 5908 w_e_errtmp = numpy.where(range1 > 200,w_e_err,w_e_errtmp)
5912 5909 w_e = w_etmp
5913 5910 w_e_err = w_e_errtmp
5914 5911
5915 5912 w = (w_w*numpy.sin(bet) - w_e*numpy.sin(alp))/(numpy.cos(alp)*numpy.sin(bet) - numpy.cos(bet)*numpy.sin(alp))
5916 5913 u = (w_w*numpy.cos(bet) - w_e*numpy.cos(alp))/(numpy.sin(alp)*numpy.cos(bet) - numpy.sin(bet)*numpy.cos(alp))
5917 5914
5918 5915 w_err = numpy.sqrt((w_w_err*numpy.sin(bet))**2.+(w_e_err*numpy.sin(alp))**2.)/ numpy.absolute(numpy.cos(alp)*numpy.sin(bet)-numpy.cos(bet)*numpy.sin(alp))
5919 5916 u_err = numpy.sqrt((w_w_err*numpy.cos(bet))**2.+(w_e_err*numpy.cos(alp))**2.)/ numpy.absolute(numpy.cos(alp)*numpy.sin(bet)-numpy.cos(bet)*numpy.sin(alp))
5920 5917
5921 5918 winds = numpy.vstack((w,u))
5922 5919 dataOut.heightList = heiRang1
5923 5920 #snr1 = 10*numpy.log10(SNR1[0])
5924 5921 dataOut.data_output = winds
5925 5922 range1 = dataOut.heightList
5926 5923 nhei = len(range1)
5927 5924 #print('alt ',range1*numpy.sin(86.1*numpy.pi/180))
5928 5925 #print(numpy.min([dataOut.eldir7,dataOut.eldir8]))
5929 5926 galt = range1*numpy.sin(numpy.min([dataOut.elw,dataOut.ele])*numpy.pi/180.)
5930 5927 dataOut.params = numpy.vstack((range1,galt,w,w_err,u,u_err,w_w,w_w_err,w_e,w_e_err,numpy.log10(p_w0),numpy.log10(p_w0C),numpy.log10(p_w1),numpy.log10(p_w1C),numpy.log10(p_e0),numpy.log10(p_e0C),numpy.log10(p_e1),numpy.log10(p_e1C),chisq_w,chisq_e))
5931 5928 #snr1 = 10*numpy.log10(SNR1[0])
5932 5929 #print(min(snr1), max(snr1))
5933 5930 snr1 = numpy.vstack((p_w0,p_w1,p_e0,p_e1))
5934 5931 snr1db = 10*numpy.log10(snr1[0])
5935 5932
5936 5933 #dataOut.data_snr1 = numpy.reshape(snr1,(1,snr1.shape[0]))
5937 5934 dataOut.data_snr1 = numpy.reshape(snr1db,(1,snr1db.shape[0]))
5938 5935 dataOut.utctimeInit = dataOut.utctime
5939 5936 dataOut.outputInterval = dataOut.timeInterval
5940 5937
5941 5938 hei_aver0 = 218
5942 5939 jrange = 450 #900 para HA drifts
5943 5940 deltah = 15.0 #dataOut.spacing(0) 25 HAD
5944 5941 h0 = 0.0 #dataOut.first_height(0)
5945 5942
5946 5943 range1 = numpy.arange(nhei) * deltah + h0
5947 5944 jhei = (range1 >= hei_aver0).nonzero()
5948 5945 if len(jhei[0]) > 0 :
5949 5946 h0_index = jhei[0][0] # Initial height for getting averages 218km
5950 5947
5951 5948 mynhei = 7
5952 5949 nhei_avg = int(jrange/deltah)
5953 5950 h_avgs = int(nhei_avg/mynhei)
5954 5951 nhei_avg = h_avgs*(mynhei-1)+mynhei
5955 5952
5956 5953 navgs = numpy.zeros(mynhei,dtype='float')
5957 5954 delta_h = numpy.zeros(mynhei,dtype='float')
5958 5955 range_aver = numpy.zeros(mynhei,dtype='float')
5959 5956 for ih in range( mynhei-1 ):
5960 5957 range_aver[ih] = numpy.sum(range1[h0_index+h_avgs*ih:h0_index+h_avgs*(ih+1)-0])/h_avgs
5961 5958 navgs[ih] = h_avgs
5962 5959 delta_h[ih] = deltah*h_avgs
5963 5960
5964 5961 range_aver[mynhei-1] = numpy.sum(range1[h0_index:h0_index+6*h_avgs-0])/(6*h_avgs)
5965 5962 navgs[mynhei-1] = 6*h_avgs
5966 5963 delta_h[mynhei-1] = deltah*6*h_avgs
5967 5964
5968 5965 wA = w[h0_index:h0_index+nhei_avg-0]
5969 5966 wA_err = w_err[h0_index:h0_index+nhei_avg-0]
5970 5967 for i in range(5) :
5971 5968 vals = wA[i*h_avgs:(i+1)*h_avgs-0]
5972 5969 errs = wA_err[i*h_avgs:(i+1)*h_avgs-0]
5973 5970 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2.)
5974 5971 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
5975 5972 wA[6*h_avgs+i] = avg
5976 5973 wA_err[6*h_avgs+i] = sigma
5977 5974
5978 5975 vals = wA[0:6*h_avgs-0]
5979 5976 errs=wA_err[0:6*h_avgs-0]
5980 5977 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2)
5981 5978 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
5982 5979 wA[nhei_avg-1] = avg
5983 5980 wA_err[nhei_avg-1] = sigma
5984 5981
5985 5982 wA = wA[6*h_avgs:nhei_avg-0]
5986 5983 wA_err=wA_err[6*h_avgs:nhei_avg-0]
5987 5984 if my_nbeams == 2 :
5988 5985 uA = u[h0_index:h0_index+nhei_avg]
5989 5986 uA_err=u_err[h0_index:h0_index+nhei_avg]
5990 5987
5991 5988 for i in range(5) :
5992 5989 vals = uA[i*h_avgs:(i+1)*h_avgs-0]
5993 5990 errs=uA_err[i*h_avgs:(i+1)*h_avgs-0]
5994 5991 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2.)
5995 5992 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
5996 5993 uA[6*h_avgs+i] = avg
5997 5994 uA_err[6*h_avgs+i]=sigma
5998 5995
5999 5996 vals = uA[0:6*h_avgs-0]
6000 5997 errs = uA_err[0:6*h_avgs-0]
6001 5998 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2.)
6002 5999 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
6003 6000 uA[nhei_avg-1] = avg
6004 6001 uA_err[nhei_avg-1] = sigma
6005 6002 uA = uA[6*h_avgs:nhei_avg-0]
6006 6003 uA_err = uA_err[6*h_avgs:nhei_avg-0]
6007 6004 dataOut.drifts_avg = numpy.vstack((wA,uA))
6008 6005
6009 6006 if my_nbeams == 1: dataOut.drifts_avg = wA
6010 6007 #deltahavg= wA*0.0+deltah
6011 6008 dataOut.range = range1
6012 6009 galtavg = range_aver*numpy.sin(numpy.min([dataOut.elw,dataOut.ele])*numpy.pi/180.)
6013 6010 dataOut.params_avg = numpy.vstack((wA,wA_err,uA,uA_err,range_aver,galtavg,delta_h))
6014 6011
6015 6012 #print('comparando dim de avg ',wA.shape,deltahavg.shape,range_aver.shape)
6016 6013 tini=time.localtime(dataOut.utctime)
6017 6014 datefile= str(tini[0]).zfill(4)+str(tini[1]).zfill(2)+str(tini[2]).zfill(2)
6018 6015 nfile = fileDrifts+'/jro'+datefile+'drifts_sch3.txt'
6019 6016
6020 6017 f1 = open(nfile,'a')
6021 6018 datedriftavg=str(tini[0])+' '+str(tini[1])+' '+str(tini[2])+' '+str(tini[3])+' '+str(tini[4])
6022 6019 driftavgstr=str(dataOut.drifts_avg)
6023 6020 numpy.savetxt(f1,numpy.column_stack([tini[0],tini[1],tini[2],tini[3],tini[4]]),fmt='%4i')
6024 6021 numpy.savetxt(f1,numpy.reshape(range_aver,(1,len(range_aver))) ,fmt='%10.2f')
6025 6022 numpy.savetxt(f1,dataOut.drifts_avg[:,:],fmt='%10.2f')
6026 6023 f1.close()
6027 6024
6028 6025 swfile = fileDrifts+'/jro'+datefile+'drifts_sw.txt'
6029 6026 f1 = open(swfile,'a')
6030 6027 numpy.savetxt(f1,numpy.column_stack([tini[0],tini[1],tini[2],tini[3],tini[4]]),fmt='%4i')
6031 6028 numpy.savetxt(f1,numpy.reshape(heiRang,(1,len(heiRang))),fmt='%10.2f')
6032 6029 numpy.savetxt(f1,dataOut.data_param[:,0,:],fmt='%10.2f')
6033 6030 f1.close()
6034 6031 dataOut.heightListtmp = dataOut.heightList
6035 6032 '''
6036 6033 #Envio data de drifts a mysql
6037 6034 fechad = str(tini[0]).zfill(4)+'-'+str(tini[1]).zfill(2)+'-'+str(tini[2]).zfill(2)+' '+str(tini[3]).zfill(2)+':'+str(tini[4]).zfill(2)+':'+str(0).zfill(2)
6038 6035 mydb = mysql.connector.connect(
6039 6036 host="10.10.110.213",
6040 6037 user="user_clima",
6041 6038 password="5D.bh(B2)Y_wRNz9",
6042 6039 database="clima_espacial"
6043 6040 )
6044 6041
6045 6042 mycursor = mydb.cursor()
6046 6043 #mycursor.execute("CREATE TABLE drifts_vertical (id INT AUTO_INCREMENT PRIMARY KEY, fecha DATETIME(6), Vertical FLOAT(10,2))")
6047 6044
6048 6045 sql = "INSERT INTO drifts_vertical (datetime, value) VALUES (%s, %s)"
6049 6046 if numpy.isfinite(dataOut.drifts_avg[0,6]): vdql = dataOut.drifts_avg[0,6]
6050 6047 else : vdql = 999
6051 6048 val = (fechad, vdql)
6052 6049 mycursor.execute(sql, val)
6053 6050 mydb.commit()
6054 6051 sql = "INSERT INTO drifts_zonal (datetime, value) VALUES (%s, %s)"
6055 6052 if numpy.isfinite(dataOut.drifts_avg[1,6]): zdql = dataOut.drifts_avg[1,6]
6056 6053 else : zdql = 999
6057 6054 val = (fechad, zdql)
6058 6055 mycursor.execute(sql, val)
6059 6056 mydb.commit()
6060 6057
6061 6058 print(mycursor.rowcount, "record inserted.")
6062 6059 '''
6063 6060 return dataOut
6064 6061
6065 6062 class setHeightDrifts(Operation):
6066 6063
6067 6064 def __init__(self):
6068 6065 Operation.__init__(self)
6069 6066 def run(self, dataOut):
6070 6067 #print('h inicial ',dataOut.heightList,dataOut.heightListtmp)
6071 6068 dataOut.heightList = dataOut.heightListtmp
6072 6069 #print('regresa H ',dataOut.heightList)
6073 6070 return dataOut
6074 6071 class setHeightDriftsavg(Operation):
6075 6072
6076 6073 def __init__(self):
6077 6074 Operation.__init__(self)
6078 6075 def run(self, dataOut):
6079 6076 #print('h inicial ',dataOut.heightList)
6080 6077 dataOut.heightList = dataOut.params_avg[4]
6081 6078 #print('cambia H ',dataOut.params_avg[4],dataOut.heightList)
6082 6079 return dataOut
6083 6080
6084 6081 #--------------- Non Specular Meteor ----------------
6085 6082
6086 6083 class NonSpecularMeteorDetection(Operation):
6087 6084
6088 6085 def run(self, dataOut, mode, SNRthresh=8, phaseDerThresh=0.5, cohThresh=0.8, allData = False):
6089 6086 data_acf = dataOut.data_pre[0]
6090 6087 data_ccf = dataOut.data_pre[1]
6091 6088 pairsList = dataOut.groupList[1]
6092 6089
6093 6090 lamb = dataOut.C/dataOut.frequency
6094 6091 tSamp = dataOut.ippSeconds*dataOut.nCohInt
6095 6092 paramInterval = dataOut.paramInterval
6096 6093
6097 6094 nChannels = data_acf.shape[0]
6098 6095 nLags = data_acf.shape[1]
6099 6096 nProfiles = data_acf.shape[2]
6100 6097 nHeights = dataOut.nHeights
6101 6098 nCohInt = dataOut.nCohInt
6102 6099 sec = numpy.round(nProfiles/dataOut.paramInterval)
6103 6100 heightList = dataOut.heightList
6104 6101 ippSeconds = dataOut.ippSeconds*dataOut.nCohInt*dataOut.nAvg
6105 6102 utctime = dataOut.utctime
6106 6103
6107 6104 dataOut.abscissaList = numpy.arange(0,paramInterval+ippSeconds,ippSeconds)
6108 6105
6109 6106 #------------------------ SNR --------------------------------------
6110 6107 power = data_acf[:,0,:,:].real
6111 6108 noise = numpy.zeros(nChannels)
6112 6109 SNR = numpy.zeros(power.shape)
6113 6110 for i in range(nChannels):
6114 6111 noise[i] = hildebrand_sekhon(power[i,:], nCohInt)
6115 6112 SNR[i] = (power[i]-noise[i])/noise[i]
6116 6113 SNRm = numpy.nanmean(SNR, axis = 0)
6117 6114 SNRdB = 10*numpy.log10(SNR)
6118 6115
6119 6116 if mode == 'SA':
6120 6117 dataOut.groupList = dataOut.groupList[1]
6121 6118 nPairs = data_ccf.shape[0]
6122 6119 #---------------------- Coherence and Phase --------------------------
6123 6120 phase = numpy.zeros(data_ccf[:,0,:,:].shape)
6124 6121 coh1 = numpy.zeros(data_ccf[:,0,:,:].shape)
6125 6122
6126 6123 for p in range(nPairs):
6127 6124 ch0 = pairsList[p][0]
6128 6125 ch1 = pairsList[p][1]
6129 6126 ccf = data_ccf[p,0,:,:]/numpy.sqrt(data_acf[ch0,0,:,:]*data_acf[ch1,0,:,:])
6130 6127 phase[p,:,:] = ndimage.median_filter(numpy.angle(ccf), size = (5,1)) #median filter
6131 6128 coh1[p,:,:] = ndimage.median_filter(numpy.abs(ccf), 5) #median filter
6132 6129 coh = numpy.nanmax(coh1, axis = 0)
6133 6130 #---------------------- Radial Velocity ----------------------------
6134 6131 phaseAux = numpy.mean(numpy.angle(data_acf[:,1,:,:]), axis = 0)
6135 6132 velRad = phaseAux*lamb/(4*numpy.pi*tSamp)
6136 6133
6137 6134 if allData:
6138 6135 boolMetFin = ~numpy.isnan(SNRm)
6139 6136 else:
6140 6137 #------------------------ Meteor mask ---------------------------------
6141 6138
6142 6139 #Coherence mask
6143 6140 boolMet1 = coh > 0.75
6144 6141 struc = numpy.ones((30,1))
6145 6142 boolMet1 = ndimage.morphology.binary_dilation(boolMet1, structure=struc)
6146 6143
6147 6144 #Derivative mask
6148 6145 derPhase = numpy.nanmean(numpy.abs(phase[:,1:,:] - phase[:,:-1,:]),axis=0)
6149 6146 boolMet2 = derPhase < 0.2
6150 6147 boolMet2 = ndimage.median_filter(boolMet2,size=5)
6151 6148 boolMet2 = numpy.vstack((boolMet2,numpy.full((1,nHeights), True, dtype=bool)))
6152 6149 boolMetFin = boolMet1&boolMet2
6153 6150 #Creating data_param
6154 6151 coordMet = numpy.where(boolMetFin)
6155 6152
6156 6153 tmet = coordMet[0]
6157 6154 hmet = coordMet[1]
6158 6155
6159 6156 data_param = numpy.zeros((tmet.size, 6 + nPairs))
6160 6157 data_param[:,0] = utctime
6161 6158 data_param[:,1] = tmet
6162 6159 data_param[:,2] = hmet
6163 6160 data_param[:,3] = SNRm[tmet,hmet]
6164 6161 data_param[:,4] = velRad[tmet,hmet]
6165 6162 data_param[:,5] = coh[tmet,hmet]
6166 6163 data_param[:,6:] = phase[:,tmet,hmet].T
6167 6164
6168 6165 elif mode == 'DBS':
6169 6166 dataOut.groupList = numpy.arange(nChannels)
6170 6167
6171 6168 #Radial Velocities
6172 6169 phase = numpy.angle(data_acf[:,1,:,:])
6173 6170 velRad = phase*lamb/(4*numpy.pi*tSamp)
6174 6171
6175 6172 #Spectral width
6176 6173 acf1 = data_acf[:,1,:,:]
6177 6174 acf2 = data_acf[:,2,:,:]
6178 6175
6179 6176 spcWidth = (lamb/(2*numpy.sqrt(6)*numpy.pi*tSamp))*numpy.sqrt(numpy.log(acf1/acf2))
6180 6177 if allData:
6181 6178 boolMetFin = ~numpy.isnan(SNRdB)
6182 6179 else:
6183 6180 #SNR
6184 6181 boolMet1 = (SNRdB>SNRthresh) #SNR mask
6185 6182 boolMet1 = ndimage.median_filter(boolMet1, size=(1,5,5))
6186 6183
6187 6184 #Radial velocity
6188 6185 boolMet2 = numpy.abs(velRad) < 20
6189 6186 boolMet2 = ndimage.median_filter(boolMet2, (1,5,5))
6190 6187
6191 6188 #Spectral Width
6192 6189 boolMet3 = spcWidth < 30
6193 6190 boolMet3 = ndimage.median_filter(boolMet3, (1,5,5))
6194 6191 boolMetFin = boolMet1&boolMet2&boolMet3
6195 6192
6196 6193 #Creating data_param
6197 6194 coordMet = numpy.where(boolMetFin)
6198 6195
6199 6196 cmet = coordMet[0]
6200 6197 tmet = coordMet[1]
6201 6198 hmet = coordMet[2]
6202 6199
6203 6200 data_param = numpy.zeros((tmet.size, 7))
6204 6201 data_param[:,0] = utctime
6205 6202 data_param[:,1] = cmet
6206 6203 data_param[:,2] = tmet
6207 6204 data_param[:,3] = hmet
6208 6205 data_param[:,4] = SNR[cmet,tmet,hmet].T
6209 6206 data_param[:,5] = velRad[cmet,tmet,hmet].T
6210 6207 data_param[:,6] = spcWidth[cmet,tmet,hmet].T
6211 6208
6212 6209 if len(data_param) == 0:
6213 6210 dataOut.flagNoData = True
6214 6211 else:
6215 6212 dataOut.data_param = data_param
6216 6213
6217 6214 def __erase_small(self, binArray, threshX, threshY):
6218 6215 labarray, numfeat = ndimage.measurements.label(binArray)
6219 6216 binArray1 = numpy.copy(binArray)
6220 6217
6221 6218 for i in range(1,numfeat + 1):
6222 6219 auxBin = (labarray==i)
6223 6220 auxSize = auxBin.sum()
6224 6221
6225 6222 x,y = numpy.where(auxBin)
6226 6223 widthX = x.max() - x.min()
6227 6224 widthY = y.max() - y.min()
6228 6225
6229 6226 #width X: 3 seg -> 12.5*3
6230 6227 #width Y:
6231 6228
6232 6229 if (auxSize < 50) or (widthX < threshX) or (widthY < threshY):
6233 6230 binArray1[auxBin] = False
6234 6231
6235 6232 return binArray1
6236 6233
6237 6234 #--------------- Specular Meteor ----------------
6238 6235
6239 6236 class SMDetection(Operation):
6240 6237 '''
6241 6238 Function DetectMeteors()
6242 6239 Project developed with paper:
6243 6240 HOLDSWORTH ET AL. 2004
6244 6241
6245 6242 Input:
6246 6243 self.dataOut.data_pre
6247 6244
6248 6245 centerReceiverIndex: From the channels, which is the center receiver
6249 6246
6250 6247 hei_ref: Height reference for the Beacon signal extraction
6251 6248 tauindex:
6252 6249 predefinedPhaseShifts: Predefined phase offset for the voltge signals
6253 6250
6254 6251 cohDetection: Whether to user Coherent detection or not
6255 6252 cohDet_timeStep: Coherent Detection calculation time step
6256 6253 cohDet_thresh: Coherent Detection phase threshold to correct phases
6257 6254
6258 6255 noise_timeStep: Noise calculation time step
6259 6256 noise_multiple: Noise multiple to define signal threshold
6260 6257
6261 6258 multDet_timeLimit: Multiple Detection Removal time limit in seconds
6262 6259 multDet_rangeLimit: Multiple Detection Removal range limit in km
6263 6260
6264 6261 phaseThresh: Maximum phase difference between receiver to be consider a meteor
6265 6262 SNRThresh: Minimum SNR threshold of the meteor signal to be consider a meteor
6266 6263
6267 6264 hmin: Minimum Height of the meteor to use it in the further wind estimations
6268 6265 hmax: Maximum Height of the meteor to use it in the further wind estimations
6269 6266 azimuth: Azimuth angle correction
6270 6267
6271 6268 Affected:
6272 6269 self.dataOut.data_param
6273 6270
6274 6271 Rejection Criteria (Errors):
6275 6272 0: No error; analysis OK
6276 6273 1: SNR < SNR threshold
6277 6274 2: angle of arrival (AOA) ambiguously determined
6278 6275 3: AOA estimate not feasible
6279 6276 4: Large difference in AOAs obtained from different antenna baselines
6280 6277 5: echo at start or end of time series
6281 6278 6: echo less than 5 examples long; too short for analysis
6282 6279 7: echo rise exceeds 0.3s
6283 6280 8: echo decay time less than twice rise time
6284 6281 9: large power level before echo
6285 6282 10: large power level after echo
6286 6283 11: poor fit to amplitude for estimation of decay time
6287 6284 12: poor fit to CCF phase variation for estimation of radial drift velocity
6288 6285 13: height unresolvable echo: not valid height within 70 to 110 km
6289 6286 14: height ambiguous echo: more then one possible height within 70 to 110 km
6290 6287 15: radial drift velocity or projected horizontal velocity exceeds 200 m/s
6291 6288 16: oscilatory echo, indicating event most likely not an underdense echo
6292 6289
6293 6290 17: phase difference in meteor Reestimation
6294 6291
6295 6292 Data Storage:
6296 6293 Meteors for Wind Estimation (8):
6297 6294 Utc Time | Range Height
6298 6295 Azimuth Zenith errorCosDir
6299 6296 VelRad errorVelRad
6300 6297 Phase0 Phase1 Phase2 Phase3
6301 6298 TypeError
6302 6299
6303 6300 '''
6304 6301
6305 6302 def run(self, dataOut, hei_ref = None, tauindex = 0,
6306 6303 phaseOffsets = None,
6307 6304 cohDetection = False, cohDet_timeStep = 1, cohDet_thresh = 25,
6308 6305 noise_timeStep = 4, noise_multiple = 4,
6309 6306 multDet_timeLimit = 1, multDet_rangeLimit = 3,
6310 6307 phaseThresh = 20, SNRThresh = 5,
6311 6308 hmin = 50, hmax=150, azimuth = 0,
6312 6309 channelPositions = None) :
6313 6310
6314 6311
6315 6312 #Getting Pairslist
6316 6313 if channelPositions is None:
6317 6314 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
6318 6315 meteorOps = SMOperations()
6319 6316 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
6320 6317 heiRang = dataOut.heightList
6321 6318 #Get Beacon signal - No Beacon signal anymore
6322 6319 #****************REMOVING HARDWARE PHASE DIFFERENCES***************
6323 6320 # see if the user put in pre defined phase shifts
6324 6321 voltsPShift = dataOut.data_pre.copy()
6325 6322
6326 6323 #******************END OF REMOVING HARDWARE PHASE DIFFERENCES*********
6327 6324
6328 6325 #Remove DC
6329 6326 voltsDC = numpy.mean(voltsPShift,1)
6330 6327 voltsDC = numpy.mean(voltsDC,1)
6331 6328 for i in range(voltsDC.shape[0]):
6332 6329 voltsPShift[i] = voltsPShift[i] - voltsDC[i]
6333 6330
6334 6331 #Don't considerate last heights, theyre used to calculate Hardware Phase Shift
6335 6332
6336 6333 #************ FIND POWER OF DATA W/COH OR NON COH DETECTION (3.4) **********
6337 6334 #Coherent Detection
6338 6335 if cohDetection:
6339 6336 #use coherent detection to get the net power
6340 6337 cohDet_thresh = cohDet_thresh*numpy.pi/180
6341 6338 voltsPShift = self.__coherentDetection(voltsPShift, cohDet_timeStep, dataOut.timeInterval, pairslist0, cohDet_thresh)
6342 6339
6343 6340 #Non-coherent detection!
6344 6341 powerNet = numpy.nansum(numpy.abs(voltsPShift[:,:,:])**2,0)
6345 6342 #********** END OF COH/NON-COH POWER CALCULATION**********************
6346 6343
6347 6344 #********** FIND THE NOISE LEVEL AND POSSIBLE METEORS ****************
6348 6345 #Get noise
6349 6346 noise, noise1 = self.__getNoise(powerNet, noise_timeStep, dataOut.timeInterval)
6350 6347 #Get signal threshold
6351 6348 signalThresh = noise_multiple*noise
6352 6349 #Meteor echoes detection
6353 6350 listMeteors = self.__findMeteors(powerNet, signalThresh)
6354 6351 #******* END OF NOISE LEVEL AND POSSIBLE METEORS CACULATION **********
6355 6352
6356 6353 #************** REMOVE MULTIPLE DETECTIONS (3.5) ***************************
6357 6354 #Parameters
6358 6355 heiRange = dataOut.heightList
6359 6356 rangeInterval = heiRange[1] - heiRange[0]
6360 6357 rangeLimit = multDet_rangeLimit/rangeInterval
6361 6358 timeLimit = multDet_timeLimit/dataOut.timeInterval
6362 6359 #Multiple detection removals
6363 6360 listMeteors1 = self.__removeMultipleDetections(listMeteors, rangeLimit, timeLimit)
6364 6361 #************ END OF REMOVE MULTIPLE DETECTIONS **********************
6365 6362
6366 6363 #********************* METEOR REESTIMATION (3.7, 3.8, 3.9, 3.10) ********************
6367 6364 #Parameters
6368 6365 phaseThresh = phaseThresh*numpy.pi/180
6369 6366 thresh = [phaseThresh, noise_multiple, SNRThresh]
6370 6367 #Meteor reestimation (Errors N 1, 6, 12, 17)
6371 6368 listMeteors2, listMeteorsPower, listMeteorsVolts = self.__meteorReestimation(listMeteors1, voltsPShift, pairslist0, thresh, noise, dataOut.timeInterval, dataOut.frequency)
6372 6369 #Estimation of decay times (Errors N 7, 8, 11)
6373 6370 listMeteors3 = self.__estimateDecayTime(listMeteors2, listMeteorsPower, dataOut.timeInterval, dataOut.frequency)
6374 6371 #******************* END OF METEOR REESTIMATION *******************
6375 6372
6376 6373 #********************* METEOR PARAMETERS CALCULATION (3.11, 3.12, 3.13) **************************
6377 6374 #Calculating Radial Velocity (Error N 15)
6378 6375 radialStdThresh = 10
6379 6376 listMeteors4 = self.__getRadialVelocity(listMeteors3, listMeteorsVolts, radialStdThresh, pairslist0, dataOut.timeInterval)
6380 6377
6381 6378 if len(listMeteors4) > 0:
6382 6379 #Setting New Array
6383 6380 date = dataOut.utctime
6384 6381 arrayParameters = self.__setNewArrays(listMeteors4, date, heiRang)
6385 6382
6386 6383 #Correcting phase offset
6387 6384 if phaseOffsets != None:
6388 6385 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
6389 6386 arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
6390 6387
6391 6388 #Second Pairslist
6392 6389 pairsList = []
6393 6390 pairx = (0,1)
6394 6391 pairy = (2,3)
6395 6392 pairsList.append(pairx)
6396 6393 pairsList.append(pairy)
6397 6394
6398 6395 jph = numpy.array([0,0,0,0])
6399 6396 h = (hmin,hmax)
6400 6397 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
6401 6398 dataOut.data_param = arrayParameters
6402 6399
6403 6400 if arrayParameters is None:
6404 6401 dataOut.flagNoData = True
6405 6402 else:
6406 6403 dataOut.flagNoData = True
6407 6404
6408 6405 return
6409 6406
6410 6407 def __getHardwarePhaseDiff(self, voltage0, pairslist, newheis, n):
6411 6408
6412 6409 minIndex = min(newheis[0])
6413 6410 maxIndex = max(newheis[0])
6414 6411
6415 6412 voltage = voltage0[:,:,minIndex:maxIndex+1]
6416 6413 nLength = voltage.shape[1]/n
6417 6414 nMin = 0
6418 6415 nMax = 0
6419 6416 phaseOffset = numpy.zeros((len(pairslist),n))
6420 6417
6421 6418 for i in range(n):
6422 6419 nMax += nLength
6423 6420 phaseCCF = -numpy.angle(self.__calculateCCF(voltage[:,nMin:nMax,:], pairslist, [0]))
6424 6421 phaseCCF = numpy.mean(phaseCCF, axis = 2)
6425 6422 phaseOffset[:,i] = phaseCCF.transpose()
6426 6423 nMin = nMax
6427 6424
6428 6425 #Remove Outliers
6429 6426 factor = 2
6430 6427 wt = phaseOffset - signal.medfilt(phaseOffset,(1,5))
6431 6428 dw = numpy.std(wt,axis = 1)
6432 6429 dw = dw.reshape((dw.size,1))
6433 6430 ind = numpy.where(numpy.logical_or(wt>dw*factor,wt<-dw*factor))
6434 6431 phaseOffset[ind] = numpy.nan
6435 6432 phaseOffset = stats.nanmean(phaseOffset, axis=1)
6436 6433
6437 6434 return phaseOffset
6438 6435
6439 6436 def __shiftPhase(self, data, phaseShift):
6440 6437 #this will shift the phase of a complex number
6441 6438 dataShifted = numpy.abs(data) * numpy.exp((numpy.angle(data)+phaseShift)*1j)
6442 6439 return dataShifted
6443 6440
6444 6441 def __estimatePhaseDifference(self, array, pairslist):
6445 6442 nChannel = array.shape[0]
6446 6443 nHeights = array.shape[2]
6447 6444 numPairs = len(pairslist)
6448 6445 phaseCCF = numpy.angle(self.__calculateCCF(array, pairslist, [-2,-1,0,1,2]))
6449 6446
6450 6447 #Correct phases
6451 6448 derPhaseCCF = phaseCCF[:,1:,:] - phaseCCF[:,0:-1,:]
6452 6449 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
6453 6450
6454 6451 if indDer[0].shape[0] > 0:
6455 6452 for i in range(indDer[0].shape[0]):
6456 6453 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i],indDer[2][i]])
6457 6454 phaseCCF[indDer[0][i],indDer[1][i]+1:,:] += signo*2*numpy.pi
6458 6455
6459 6456 #Linear
6460 6457 phaseInt = numpy.zeros((numPairs,1))
6461 6458 angAllCCF = phaseCCF[:,[0,1,3,4],0]
6462 6459 for j in range(numPairs):
6463 6460 fit = stats.linregress([-2,-1,1,2],angAllCCF[j,:])
6464 6461 phaseInt[j] = fit[1]
6465 6462 #Phase Differences
6466 6463 phaseDiff = phaseInt - phaseCCF[:,2,:]
6467 6464 phaseArrival = phaseInt.reshape(phaseInt.size)
6468 6465
6469 6466 #Dealias
6470 6467 phaseArrival = numpy.angle(numpy.exp(1j*phaseArrival))
6471 6468
6472 6469 return phaseDiff, phaseArrival
6473 6470
6474 6471 def __coherentDetection(self, volts, timeSegment, timeInterval, pairslist, thresh):
6475 6472 #this function will run the coherent detection used in Holdworth et al. 2004 and return the net power
6476 6473 #find the phase shifts of each channel over 1 second intervals
6477 6474 #only look at ranges below the beacon signal
6478 6475 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
6479 6476 numBlocks = int(volts.shape[1]/numProfPerBlock)
6480 6477 numHeights = volts.shape[2]
6481 6478 nChannel = volts.shape[0]
6482 6479 voltsCohDet = volts.copy()
6483 6480
6484 6481 pairsarray = numpy.array(pairslist)
6485 6482 indSides = pairsarray[:,1]
6486 6483 listBlocks = numpy.array_split(volts, numBlocks, 1)
6487 6484
6488 6485 startInd = 0
6489 6486 endInd = 0
6490 6487
6491 6488 for i in range(numBlocks):
6492 6489 startInd = endInd
6493 6490 endInd = endInd + listBlocks[i].shape[1]
6494 6491
6495 6492 arrayBlock = listBlocks[i]
6496 6493
6497 6494 #Estimate the Phase Difference
6498 6495 phaseDiff, aux = self.__estimatePhaseDifference(arrayBlock, pairslist)
6499 6496 #Phase Difference RMS
6500 6497 arrayPhaseRMS = numpy.abs(phaseDiff)
6501 6498 phaseRMSaux = numpy.sum(arrayPhaseRMS < thresh,0)
6502 6499 indPhase = numpy.where(phaseRMSaux==4)
6503 6500 #Shifting
6504 6501 if indPhase[0].shape[0] > 0:
6505 6502 for j in range(indSides.size):
6506 6503 arrayBlock[indSides[j],:,indPhase] = self.__shiftPhase(arrayBlock[indSides[j],:,indPhase], phaseDiff[j,indPhase].transpose())
6507 6504 voltsCohDet[:,startInd:endInd,:] = arrayBlock
6508 6505
6509 6506 return voltsCohDet
6510 6507
6511 6508 def __calculateCCF(self, volts, pairslist ,laglist):
6512 6509
6513 6510 nHeights = volts.shape[2]
6514 6511 nPoints = volts.shape[1]
6515 6512 voltsCCF = numpy.zeros((len(pairslist), len(laglist), nHeights),dtype = 'complex')
6516 6513
6517 6514 for i in range(len(pairslist)):
6518 6515 volts1 = volts[pairslist[i][0]]
6519 6516 volts2 = volts[pairslist[i][1]]
6520 6517
6521 6518 for t in range(len(laglist)):
6522 6519 idxT = laglist[t]
6523 6520 if idxT >= 0:
6524 6521 vStacked = numpy.vstack((volts2[idxT:,:],
6525 6522 numpy.zeros((idxT, nHeights),dtype='complex')))
6526 6523 else:
6527 6524 vStacked = numpy.vstack((numpy.zeros((-idxT, nHeights),dtype='complex'),
6528 6525 volts2[:(nPoints + idxT),:]))
6529 6526 voltsCCF[i,t,:] = numpy.sum((numpy.conjugate(volts1)*vStacked),axis=0)
6530 6527
6531 6528 vStacked = None
6532 6529 return voltsCCF
6533 6530
6534 6531 def __getNoise(self, power, timeSegment, timeInterval):
6535 6532 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
6536 6533 numBlocks = int(power.shape[0]/numProfPerBlock)
6537 6534 numHeights = power.shape[1]
6538 6535
6539 6536 listPower = numpy.array_split(power, numBlocks, 0)
6540 6537 noise = numpy.zeros((power.shape[0], power.shape[1]))
6541 6538 noise1 = numpy.zeros((power.shape[0], power.shape[1]))
6542 6539
6543 6540 startInd = 0
6544 6541 endInd = 0
6545 6542
6546 6543 for i in range(numBlocks): #split por canal
6547 6544 startInd = endInd
6548 6545 endInd = endInd + listPower[i].shape[0]
6549 6546
6550 6547 arrayBlock = listPower[i]
6551 6548 noiseAux = numpy.mean(arrayBlock, 0)
6552 6549 noise[startInd:endInd,:] = noise[startInd:endInd,:] + noiseAux
6553 6550
6554 6551 noiseAux1 = numpy.mean(arrayBlock)
6555 6552 noise1[startInd:endInd,:] = noise1[startInd:endInd,:] + noiseAux1
6556 6553
6557 6554 return noise, noise1
6558 6555
6559 6556 def __findMeteors(self, power, thresh):
6560 6557 nProf = power.shape[0]
6561 6558 nHeights = power.shape[1]
6562 6559 listMeteors = []
6563 6560
6564 6561 for i in range(nHeights):
6565 6562 powerAux = power[:,i]
6566 6563 threshAux = thresh[:,i]
6567 6564
6568 6565 indUPthresh = numpy.where(powerAux > threshAux)[0]
6569 6566 indDNthresh = numpy.where(powerAux <= threshAux)[0]
6570 6567
6571 6568 j = 0
6572 6569
6573 6570 while (j < indUPthresh.size - 2):
6574 6571 if (indUPthresh[j + 2] == indUPthresh[j] + 2):
6575 6572 indDNAux = numpy.where(indDNthresh > indUPthresh[j])
6576 6573 indDNthresh = indDNthresh[indDNAux]
6577 6574
6578 6575 if (indDNthresh.size > 0):
6579 6576 indEnd = indDNthresh[0] - 1
6580 6577 indInit = indUPthresh[j]
6581 6578
6582 6579 meteor = powerAux[indInit:indEnd + 1]
6583 6580 indPeak = meteor.argmax() + indInit
6584 6581 FLA = sum(numpy.conj(meteor)*numpy.hstack((meteor[1:],0)))
6585 6582
6586 6583 listMeteors.append(numpy.array([i,indInit,indPeak,indEnd,FLA])) #CHEQUEAR!!!!!
6587 6584 j = numpy.where(indUPthresh == indEnd)[0] + 1
6588 6585 else: j+=1
6589 6586 else: j+=1
6590 6587
6591 6588 return listMeteors
6592 6589
6593 6590 def __removeMultipleDetections(self,listMeteors, rangeLimit, timeLimit):
6594 6591
6595 6592 arrayMeteors = numpy.asarray(listMeteors)
6596 6593 listMeteors1 = []
6597 6594
6598 6595 while arrayMeteors.shape[0] > 0:
6599 6596 FLAs = arrayMeteors[:,4]
6600 6597 maxFLA = FLAs.argmax()
6601 6598 listMeteors1.append(arrayMeteors[maxFLA,:])
6602 6599
6603 6600 MeteorInitTime = arrayMeteors[maxFLA,1]
6604 6601 MeteorEndTime = arrayMeteors[maxFLA,3]
6605 6602 MeteorHeight = arrayMeteors[maxFLA,0]
6606 6603
6607 6604 #Check neighborhood
6608 6605 maxHeightIndex = MeteorHeight + rangeLimit
6609 6606 minHeightIndex = MeteorHeight - rangeLimit
6610 6607 minTimeIndex = MeteorInitTime - timeLimit
6611 6608 maxTimeIndex = MeteorEndTime + timeLimit
6612 6609
6613 6610 #Check Heights
6614 6611 indHeight = numpy.logical_and(arrayMeteors[:,0] >= minHeightIndex, arrayMeteors[:,0] <= maxHeightIndex)
6615 6612 indTime = numpy.logical_and(arrayMeteors[:,3] >= minTimeIndex, arrayMeteors[:,1] <= maxTimeIndex)
6616 6613 indBoth = numpy.where(numpy.logical_and(indTime,indHeight))
6617 6614
6618 6615 arrayMeteors = numpy.delete(arrayMeteors, indBoth, axis = 0)
6619 6616
6620 6617 return listMeteors1
6621 6618
6622 6619 def __meteorReestimation(self, listMeteors, volts, pairslist, thresh, noise, timeInterval,frequency):
6623 6620 numHeights = volts.shape[2]
6624 6621 nChannel = volts.shape[0]
6625 6622
6626 6623 thresholdPhase = thresh[0]
6627 6624 thresholdNoise = thresh[1]
6628 6625 thresholdDB = float(thresh[2])
6629 6626
6630 6627 thresholdDB1 = 10**(thresholdDB/10)
6631 6628 pairsarray = numpy.array(pairslist)
6632 6629 indSides = pairsarray[:,1]
6633 6630
6634 6631 pairslist1 = list(pairslist)
6635 6632 pairslist1.append((0,1))
6636 6633 pairslist1.append((3,4))
6637 6634
6638 6635 listMeteors1 = []
6639 6636 listPowerSeries = []
6640 6637 listVoltageSeries = []
6641 6638 #volts has the war data
6642 6639
6643 6640 if frequency == 30e6:
6644 6641 timeLag = 45*10**-3
6645 6642 else:
6646 6643 timeLag = 15*10**-3
6647 6644 lag = numpy.ceil(timeLag/timeInterval)
6648 6645
6649 6646 for i in range(len(listMeteors)):
6650 6647
6651 6648 ###################### 3.6 - 3.7 PARAMETERS REESTIMATION #########################
6652 6649 meteorAux = numpy.zeros(16)
6653 6650
6654 6651 #Loading meteor Data (mHeight, mStart, mPeak, mEnd)
6655 6652 mHeight = listMeteors[i][0]
6656 6653 mStart = listMeteors[i][1]
6657 6654 mPeak = listMeteors[i][2]
6658 6655 mEnd = listMeteors[i][3]
6659 6656
6660 6657 #get the volt data between the start and end times of the meteor
6661 6658 meteorVolts = volts[:,mStart:mEnd+1,mHeight]
6662 6659 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
6663 6660
6664 6661 #3.6. Phase Difference estimation
6665 6662 phaseDiff, aux = self.__estimatePhaseDifference(meteorVolts, pairslist)
6666 6663
6667 6664 #3.7. Phase difference removal & meteor start, peak and end times reestimated
6668 6665 #meteorVolts0.- all Channels, all Profiles
6669 6666 meteorVolts0 = volts[:,:,mHeight]
6670 6667 meteorThresh = noise[:,mHeight]*thresholdNoise
6671 6668 meteorNoise = noise[:,mHeight]
6672 6669 meteorVolts0[indSides,:] = self.__shiftPhase(meteorVolts0[indSides,:], phaseDiff) #Phase Shifting
6673 6670 powerNet0 = numpy.nansum(numpy.abs(meteorVolts0)**2, axis = 0) #Power
6674 6671
6675 6672 #Times reestimation
6676 6673 mStart1 = numpy.where(powerNet0[:mPeak] < meteorThresh[:mPeak])[0]
6677 6674 if mStart1.size > 0:
6678 6675 mStart1 = mStart1[-1] + 1
6679 6676
6680 6677 else:
6681 6678 mStart1 = mPeak
6682 6679
6683 6680 mEnd1 = numpy.where(powerNet0[mPeak:] < meteorThresh[mPeak:])[0][0] + mPeak - 1
6684 6681 mEndDecayTime1 = numpy.where(powerNet0[mPeak:] < meteorNoise[mPeak:])[0]
6685 6682 if mEndDecayTime1.size == 0:
6686 6683 mEndDecayTime1 = powerNet0.size
6687 6684 else:
6688 6685 mEndDecayTime1 = mEndDecayTime1[0] + mPeak - 1
6689 6686
6690 6687 #meteorVolts1.- all Channels, from start to end
6691 6688 meteorVolts1 = meteorVolts0[:,mStart1:mEnd1 + 1]
6692 6689 meteorVolts2 = meteorVolts0[:,mPeak + lag:mEnd1 + 1]
6693 6690 if meteorVolts2.shape[1] == 0:
6694 6691 meteorVolts2 = meteorVolts0[:,mPeak:mEnd1 + 1]
6695 6692 meteorVolts1 = meteorVolts1.reshape(meteorVolts1.shape[0], meteorVolts1.shape[1], 1)
6696 6693 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1], 1)
6697 6694 ##################### END PARAMETERS REESTIMATION #########################
6698 6695
6699 6696 ##################### 3.8 PHASE DIFFERENCE REESTIMATION ########################
6700 6697 if meteorVolts2.shape[1] > 0:
6701 6698 #Phase Difference re-estimation
6702 6699 phaseDiff1, phaseDiffint = self.__estimatePhaseDifference(meteorVolts2, pairslist1) #Phase Difference Estimation
6703 6700 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1])
6704 6701 phaseDiff11 = numpy.reshape(phaseDiff1, (phaseDiff1.shape[0],1))
6705 6702 meteorVolts2[indSides,:] = self.__shiftPhase(meteorVolts2[indSides,:], phaseDiff11[0:4]) #Phase Shifting
6706 6703
6707 6704 #Phase Difference RMS
6708 6705 phaseRMS1 = numpy.sqrt(numpy.mean(numpy.square(phaseDiff1)))
6709 6706 powerNet1 = numpy.nansum(numpy.abs(meteorVolts1[:,:])**2,0)
6710 6707 #Data from Meteor
6711 6708 mPeak1 = powerNet1.argmax() + mStart1
6712 6709 mPeakPower1 = powerNet1.max()
6713 6710 noiseAux = sum(noise[mStart1:mEnd1 + 1,mHeight])
6714 6711 mSNR1 = (sum(powerNet1)-noiseAux)/noiseAux
6715 6712 Meteor1 = numpy.array([mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1])
6716 6713 Meteor1 = numpy.hstack((Meteor1,phaseDiffint))
6717 6714 PowerSeries = powerNet0[mStart1:mEndDecayTime1 + 1]
6718 6715 #Vectorize
6719 6716 meteorAux[0:7] = [mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1]
6720 6717 meteorAux[7:11] = phaseDiffint[0:4]
6721 6718
6722 6719 #Rejection Criterions
6723 6720 if phaseRMS1 > thresholdPhase: #Error Number 17: Phase variation
6724 6721 meteorAux[-1] = 17
6725 6722 elif mSNR1 < thresholdDB1: #Error Number 1: SNR < threshold dB
6726 6723 meteorAux[-1] = 1
6727 6724
6728 6725
6729 6726 else:
6730 6727 meteorAux[0:4] = [mHeight, mStart, mPeak, mEnd]
6731 6728 meteorAux[-1] = 6 #Error Number 6: echo less than 5 samples long; too short for analysis
6732 6729 PowerSeries = 0
6733 6730
6734 6731 listMeteors1.append(meteorAux)
6735 6732 listPowerSeries.append(PowerSeries)
6736 6733 listVoltageSeries.append(meteorVolts1)
6737 6734
6738 6735 return listMeteors1, listPowerSeries, listVoltageSeries
6739 6736
6740 6737 def __estimateDecayTime(self, listMeteors, listPower, timeInterval, frequency):
6741 6738
6742 6739 threshError = 10
6743 6740 #Depending if it is 30 or 50 MHz
6744 6741 if frequency == 30e6:
6745 6742 timeLag = 45*10**-3
6746 6743 else:
6747 6744 timeLag = 15*10**-3
6748 6745 lag = numpy.ceil(timeLag/timeInterval)
6749 6746
6750 6747 listMeteors1 = []
6751 6748
6752 6749 for i in range(len(listMeteors)):
6753 6750 meteorPower = listPower[i]
6754 6751 meteorAux = listMeteors[i]
6755 6752
6756 6753 if meteorAux[-1] == 0:
6757 6754
6758 6755 try:
6759 6756 indmax = meteorPower.argmax()
6760 6757 indlag = indmax + lag
6761 6758
6762 6759 y = meteorPower[indlag:]
6763 6760 x = numpy.arange(0, y.size)*timeLag
6764 6761
6765 6762 #first guess
6766 6763 a = y[0]
6767 6764 tau = timeLag
6768 6765 #exponential fit
6769 6766 popt, pcov = optimize.curve_fit(self.__exponential_function, x, y, p0 = [a, tau])
6770 6767 y1 = self.__exponential_function(x, *popt)
6771 6768 #error estimation
6772 6769 error = sum((y - y1)**2)/(numpy.var(y)*(y.size - popt.size))
6773 6770
6774 6771 decayTime = popt[1]
6775 6772 riseTime = indmax*timeInterval
6776 6773 meteorAux[11:13] = [decayTime, error]
6777 6774
6778 6775 #Table items 7, 8 and 11
6779 6776 if (riseTime > 0.3): #Number 7: Echo rise exceeds 0.3s
6780 6777 meteorAux[-1] = 7
6781 6778 elif (decayTime < 2*riseTime) : #Number 8: Echo decay time less than than twice rise time
6782 6779 meteorAux[-1] = 8
6783 6780 if (error > threshError): #Number 11: Poor fit to amplitude for estimation of decay time
6784 6781 meteorAux[-1] = 11
6785 6782
6786 6783
6787 6784 except:
6788 6785 meteorAux[-1] = 11
6789 6786
6790 6787
6791 6788 listMeteors1.append(meteorAux)
6792 6789
6793 6790 return listMeteors1
6794 6791
6795 6792 #Exponential Function
6796 6793
6797 6794 def __exponential_function(self, x, a, tau):
6798 6795 y = a*numpy.exp(-x/tau)
6799 6796 return y
6800 6797
6801 6798 def __getRadialVelocity(self, listMeteors, listVolts, radialStdThresh, pairslist, timeInterval):
6802 6799
6803 6800 pairslist1 = list(pairslist)
6804 6801 pairslist1.append((0,1))
6805 6802 pairslist1.append((3,4))
6806 6803 numPairs = len(pairslist1)
6807 6804 #Time Lag
6808 6805 timeLag = 45*10**-3
6809 6806 c = 3e8
6810 6807 lag = numpy.ceil(timeLag/timeInterval)
6811 6808 freq = 30e6
6812 6809
6813 6810 listMeteors1 = []
6814 6811
6815 6812 for i in range(len(listMeteors)):
6816 6813 meteorAux = listMeteors[i]
6817 6814 if meteorAux[-1] == 0:
6818 6815 mStart = listMeteors[i][1]
6819 6816 mPeak = listMeteors[i][2]
6820 6817 mLag = mPeak - mStart + lag
6821 6818
6822 6819 #get the volt data between the start and end times of the meteor
6823 6820 meteorVolts = listVolts[i]
6824 6821 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
6825 6822
6826 6823 #Get CCF
6827 6824 allCCFs = self.__calculateCCF(meteorVolts, pairslist1, [-2,-1,0,1,2])
6828 6825
6829 6826 #Method 2
6830 6827 slopes = numpy.zeros(numPairs)
6831 6828 time = numpy.array([-2,-1,1,2])*timeInterval
6832 6829 angAllCCF = numpy.angle(allCCFs[:,[0,1,3,4],0])
6833 6830
6834 6831 #Correct phases
6835 6832 derPhaseCCF = angAllCCF[:,1:] - angAllCCF[:,0:-1]
6836 6833 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
6837 6834
6838 6835 if indDer[0].shape[0] > 0:
6839 6836 for i in range(indDer[0].shape[0]):
6840 6837 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i]])
6841 6838 angAllCCF[indDer[0][i],indDer[1][i]+1:] += signo*2*numpy.pi
6842 6839
6843 6840 for j in range(numPairs):
6844 6841 fit = stats.linregress(time, angAllCCF[j,:])
6845 6842 slopes[j] = fit[0]
6846 6843
6847 6844 #Remove Outlier
6848 6845 radialVelocity = -numpy.mean(slopes)*(0.25/numpy.pi)*(c/freq)
6849 6846 radialError = numpy.std(slopes)*(0.25/numpy.pi)*(c/freq)
6850 6847 meteorAux[-2] = radialError
6851 6848 meteorAux[-3] = radialVelocity
6852 6849
6853 6850 #Setting Error
6854 6851 #Number 15: Radial Drift velocity or projected horizontal velocity exceeds 200 m/s
6855 6852 if numpy.abs(radialVelocity) > 200:
6856 6853 meteorAux[-1] = 15
6857 6854 #Number 12: Poor fit to CCF variation for estimation of radial drift velocity
6858 6855 elif radialError > radialStdThresh:
6859 6856 meteorAux[-1] = 12
6860 6857
6861 6858 listMeteors1.append(meteorAux)
6862 6859 return listMeteors1
6863 6860
6864 6861 def __setNewArrays(self, listMeteors, date, heiRang):
6865 6862
6866 6863 #New arrays
6867 6864 arrayMeteors = numpy.array(listMeteors)
6868 6865 arrayParameters = numpy.zeros((len(listMeteors), 13))
6869 6866
6870 6867 #Date inclusion
6871 6868 arrayDate = numpy.tile(date, (len(listMeteors)))
6872 6869
6873 6870 #Meteor array
6874 6871 #Parameters Array
6875 6872 arrayParameters[:,0] = arrayDate #Date
6876 6873 arrayParameters[:,1] = heiRang[arrayMeteors[:,0].astype(int)] #Range
6877 6874 arrayParameters[:,6:8] = arrayMeteors[:,-3:-1] #Radial velocity and its error
6878 6875 arrayParameters[:,8:12] = arrayMeteors[:,7:11] #Phases
6879 6876 arrayParameters[:,-1] = arrayMeteors[:,-1] #Error
6880 6877
6881 6878
6882 6879 return arrayParameters
6883 6880
6884 6881 class CorrectSMPhases(Operation):
6885 6882
6886 6883 def run(self, dataOut, phaseOffsets, hmin = 50, hmax = 150, azimuth = 45, channelPositions = None):
6887 6884
6888 6885 arrayParameters = dataOut.data_param
6889 6886 pairsList = []
6890 6887 pairx = (0,1)
6891 6888 pairy = (2,3)
6892 6889 pairsList.append(pairx)
6893 6890 pairsList.append(pairy)
6894 6891 jph = numpy.zeros(4)
6895 6892
6896 6893 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
6897 6894 # arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
6898 6895 arrayParameters[:,8:12] = numpy.angle(numpy.exp(1j*(arrayParameters[:,8:12] + phaseOffsets)))
6899 6896
6900 6897 meteorOps = SMOperations()
6901 6898 if channelPositions is None:
6902 6899 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
6903 6900 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
6904 6901
6905 6902 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
6906 6903 h = (hmin,hmax)
6907 6904
6908 6905 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
6909 6906
6910 6907 dataOut.data_param = arrayParameters
6911 6908 return
6912 6909
6913 6910 class SMPhaseCalibration(Operation):
6914 6911
6915 6912 __buffer = None
6916 6913
6917 6914 __initime = None
6918 6915
6919 6916 __dataReady = False
6920 6917
6921 6918 __isConfig = False
6922 6919
6923 6920 def __checkTime(self, currentTime, initTime, paramInterval, outputInterval):
6924 6921
6925 6922 dataTime = currentTime + paramInterval
6926 6923 deltaTime = dataTime - initTime
6927 6924
6928 6925 if deltaTime >= outputInterval or deltaTime < 0:
6929 6926 return True
6930 6927
6931 6928 return False
6932 6929
6933 6930 def __getGammas(self, pairs, d, phases):
6934 6931 gammas = numpy.zeros(2)
6935 6932
6936 6933 for i in range(len(pairs)):
6937 6934
6938 6935 pairi = pairs[i]
6939 6936
6940 6937 phip3 = phases[:,pairi[0]]
6941 6938 d3 = d[pairi[0]]
6942 6939 phip2 = phases[:,pairi[1]]
6943 6940 d2 = d[pairi[1]]
6944 6941 #Calculating gamma
6945 6942 jgamma = -phip2*d3/d2 - phip3
6946 6943 jgamma = numpy.angle(numpy.exp(1j*jgamma))
6947 6944
6948 6945 #Revised distribution
6949 6946 jgammaArray = numpy.hstack((jgamma,jgamma+0.5*numpy.pi,jgamma-0.5*numpy.pi))
6950 6947
6951 6948 #Histogram
6952 6949 nBins = 64
6953 6950 rmin = -0.5*numpy.pi
6954 6951 rmax = 0.5*numpy.pi
6955 6952 phaseHisto = numpy.histogram(jgammaArray, bins=nBins, range=(rmin,rmax))
6956 6953
6957 6954 meteorsY = phaseHisto[0]
6958 6955 phasesX = phaseHisto[1][:-1]
6959 6956 width = phasesX[1] - phasesX[0]
6960 6957 phasesX += width/2
6961 6958
6962 6959 #Gaussian aproximation
6963 6960 bpeak = meteorsY.argmax()
6964 6961 peak = meteorsY.max()
6965 6962 jmin = bpeak - 5
6966 6963 jmax = bpeak + 5 + 1
6967 6964
6968 6965 if jmin<0:
6969 6966 jmin = 0
6970 6967 jmax = 6
6971 6968 elif jmax > meteorsY.size:
6972 6969 jmin = meteorsY.size - 6
6973 6970 jmax = meteorsY.size
6974 6971
6975 6972 x0 = numpy.array([peak,bpeak,50])
6976 6973 coeff = optimize.leastsq(self.__residualFunction, x0, args=(meteorsY[jmin:jmax], phasesX[jmin:jmax]))
6977 6974
6978 6975 #Gammas
6979 6976 gammas[i] = coeff[0][1]
6980 6977
6981 6978 return gammas
6982 6979
6983 6980 def __residualFunction(self, coeffs, y, t):
6984 6981
6985 6982 return y - self.__gauss_function(t, coeffs)
6986 6983
6987 6984 def __gauss_function(self, t, coeffs):
6988 6985
6989 6986 return coeffs[0]*numpy.exp(-0.5*((t - coeffs[1]) / coeffs[2])**2)
6990 6987
6991 6988 def __getPhases(self, azimuth, h, pairsList, d, gammas, meteorsArray):
6992 6989 meteorOps = SMOperations()
6993 6990 nchan = 4
6994 6991 pairx = pairsList[0] #x es 0
6995 6992 pairy = pairsList[1] #y es 1
6996 6993 center_xangle = 0
6997 6994 center_yangle = 0
6998 6995 range_angle = numpy.array([10*numpy.pi,numpy.pi,numpy.pi/2,numpy.pi/4])
6999 6996 ntimes = len(range_angle)
7000 6997
7001 6998 nstepsx = 20
7002 6999 nstepsy = 20
7003 7000
7004 7001 for iz in range(ntimes):
7005 7002 min_xangle = -range_angle[iz]/2 + center_xangle
7006 7003 max_xangle = range_angle[iz]/2 + center_xangle
7007 7004 min_yangle = -range_angle[iz]/2 + center_yangle
7008 7005 max_yangle = range_angle[iz]/2 + center_yangle
7009 7006
7010 7007 inc_x = (max_xangle-min_xangle)/nstepsx
7011 7008 inc_y = (max_yangle-min_yangle)/nstepsy
7012 7009
7013 7010 alpha_y = numpy.arange(nstepsy)*inc_y + min_yangle
7014 7011 alpha_x = numpy.arange(nstepsx)*inc_x + min_xangle
7015 7012 penalty = numpy.zeros((nstepsx,nstepsy))
7016 7013 jph_array = numpy.zeros((nchan,nstepsx,nstepsy))
7017 7014 jph = numpy.zeros(nchan)
7018 7015
7019 7016 # Iterations looking for the offset
7020 7017 for iy in range(int(nstepsy)):
7021 7018 for ix in range(int(nstepsx)):
7022 7019 d3 = d[pairsList[1][0]]
7023 7020 d2 = d[pairsList[1][1]]
7024 7021 d5 = d[pairsList[0][0]]
7025 7022 d4 = d[pairsList[0][1]]
7026 7023
7027 7024 alp2 = alpha_y[iy] #gamma 1
7028 7025 alp4 = alpha_x[ix] #gamma 0
7029 7026
7030 7027 alp3 = -alp2*d3/d2 - gammas[1]
7031 7028 alp5 = -alp4*d5/d4 - gammas[0]
7032 7029 jph[pairsList[0][1]] = alp4
7033 7030 jph[pairsList[0][0]] = alp5
7034 7031 jph[pairsList[1][0]] = alp3
7035 7032 jph[pairsList[1][1]] = alp2
7036 7033 jph_array[:,ix,iy] = jph
7037 7034 meteorsArray1 = meteorOps.getMeteorParams(meteorsArray, azimuth, h, pairsList, d, jph)
7038 7035 error = meteorsArray1[:,-1]
7039 7036 ind1 = numpy.where(error==0)[0]
7040 7037 penalty[ix,iy] = ind1.size
7041 7038
7042 7039 i,j = numpy.unravel_index(penalty.argmax(), penalty.shape)
7043 7040 phOffset = jph_array[:,i,j]
7044 7041
7045 7042 center_xangle = phOffset[pairx[1]]
7046 7043 center_yangle = phOffset[pairy[1]]
7047 7044
7048 7045 phOffset = numpy.angle(numpy.exp(1j*jph_array[:,i,j]))
7049 7046 phOffset = phOffset*180/numpy.pi
7050 7047 return phOffset
7051 7048
7052 7049
7053 7050 def run(self, dataOut, hmin, hmax, channelPositions=None, nHours = 1):
7054 7051
7055 7052 dataOut.flagNoData = True
7056 7053 self.__dataReady = False
7057 7054 dataOut.outputInterval = nHours*3600
7058 7055
7059 7056 if self.__isConfig == False:
7060 7057 #Get Initial LTC time
7061 7058 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
7062 7059 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
7063 7060
7064 7061 self.__isConfig = True
7065 7062
7066 7063 if self.__buffer is None:
7067 7064 self.__buffer = dataOut.data_param.copy()
7068 7065
7069 7066 else:
7070 7067 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
7071 7068
7072 7069 self.__dataReady = self.__checkTime(dataOut.utctime, self.__initime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
7073 7070
7074 7071 if self.__dataReady:
7075 7072 dataOut.utctimeInit = self.__initime
7076 7073 self.__initime += dataOut.outputInterval #to erase time offset
7077 7074
7078 7075 freq = dataOut.frequency
7079 7076 c = dataOut.C #m/s
7080 7077 lamb = c/freq
7081 7078 k = 2*numpy.pi/lamb
7082 7079 azimuth = 0
7083 7080 h = (hmin, hmax)
7084 7081
7085 7082 if channelPositions is None:
7086 7083 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
7087 7084 meteorOps = SMOperations()
7088 7085 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
7089 7086
7090 7087 #Checking correct order of pairs
7091 7088 pairs = []
7092 7089 if distances[1] > distances[0]:
7093 7090 pairs.append((1,0))
7094 7091 else:
7095 7092 pairs.append((0,1))
7096 7093
7097 7094 if distances[3] > distances[2]:
7098 7095 pairs.append((3,2))
7099 7096 else:
7100 7097 pairs.append((2,3))
7101 7098
7102 7099 meteorsArray = self.__buffer
7103 7100 error = meteorsArray[:,-1]
7104 7101 boolError = (error==0)|(error==3)|(error==4)|(error==13)|(error==14)
7105 7102 ind1 = numpy.where(boolError)[0]
7106 7103 meteorsArray = meteorsArray[ind1,:]
7107 7104 meteorsArray[:,-1] = 0
7108 7105 phases = meteorsArray[:,8:12]
7109 7106
7110 7107 #Calculate Gammas
7111 7108 gammas = self.__getGammas(pairs, distances, phases)
7112 7109 #Calculate Phases
7113 7110 phasesOff = self.__getPhases(azimuth, h, pairs, distances, gammas, meteorsArray)
7114 7111 phasesOff = phasesOff.reshape((1,phasesOff.size))
7115 7112 dataOut.data_output = -phasesOff
7116 7113 dataOut.flagNoData = False
7117 7114 self.__buffer = None
7118 7115
7119 7116
7120 7117 return
7121 7118
7122 7119 class SMOperations():
7123 7120
7124 7121 def __init__(self):
7125 7122
7126 7123 return
7127 7124
7128 7125 def getMeteorParams(self, arrayParameters0, azimuth, h, pairsList, distances, jph):
7129 7126
7130 7127 arrayParameters = arrayParameters0.copy()
7131 7128 hmin = h[0]
7132 7129 hmax = h[1]
7133 7130
7134 7131 #Calculate AOA (Error N 3, 4)
7135 7132 #JONES ET AL. 1998
7136 7133 AOAthresh = numpy.pi/8
7137 7134 error = arrayParameters[:,-1]
7138 7135 phases = -arrayParameters[:,8:12] + jph
7139 7136 arrayParameters[:,3:6], arrayParameters[:,-1] = self.__getAOA(phases, pairsList, distances, error, AOAthresh, azimuth)
7140 7137
7141 7138 #Calculate Heights (Error N 13 and 14)
7142 7139 error = arrayParameters[:,-1]
7143 7140 Ranges = arrayParameters[:,1]
7144 7141 zenith = arrayParameters[:,4]
7145 7142 arrayParameters[:,2], arrayParameters[:,-1] = self.__getHeights(Ranges, zenith, error, hmin, hmax)
7146 7143
7147 7144 #----------------------- Get Final data ------------------------------------
7148 7145
7149 7146 return arrayParameters
7150 7147
7151 7148 def __getAOA(self, phases, pairsList, directions, error, AOAthresh, azimuth):
7152 7149
7153 7150 arrayAOA = numpy.zeros((phases.shape[0],3))
7154 7151 cosdir0, cosdir = self.__getDirectionCosines(phases, pairsList,directions)
7155 7152
7156 7153 arrayAOA[:,:2] = self.__calculateAOA(cosdir, azimuth)
7157 7154 cosDirError = numpy.sum(numpy.abs(cosdir0 - cosdir), axis = 1)
7158 7155 arrayAOA[:,2] = cosDirError
7159 7156
7160 7157 azimuthAngle = arrayAOA[:,0]
7161 7158 zenithAngle = arrayAOA[:,1]
7162 7159
7163 7160 #Setting Error
7164 7161 indError = numpy.where(numpy.logical_or(error == 3, error == 4))[0]
7165 7162 error[indError] = 0
7166 7163 #Number 3: AOA not fesible
7167 7164 indInvalid = numpy.where(numpy.logical_and((numpy.logical_or(numpy.isnan(zenithAngle), numpy.isnan(azimuthAngle))),error == 0))[0]
7168 7165 error[indInvalid] = 3
7169 7166 #Number 4: Large difference in AOAs obtained from different antenna baselines
7170 7167 indInvalid = numpy.where(numpy.logical_and(cosDirError > AOAthresh,error == 0))[0]
7171 7168 error[indInvalid] = 4
7172 7169 return arrayAOA, error
7173 7170
7174 7171 def __getDirectionCosines(self, arrayPhase, pairsList, distances):
7175 7172
7176 7173 #Initializing some variables
7177 7174 ang_aux = numpy.array([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])*2*numpy.pi
7178 7175 ang_aux = ang_aux.reshape(1,ang_aux.size)
7179 7176
7180 7177 cosdir = numpy.zeros((arrayPhase.shape[0],2))
7181 7178 cosdir0 = numpy.zeros((arrayPhase.shape[0],2))
7182 7179
7183 7180
7184 7181 for i in range(2):
7185 7182 ph0 = arrayPhase[:,pairsList[i][0]]
7186 7183 ph1 = arrayPhase[:,pairsList[i][1]]
7187 7184 d0 = distances[pairsList[i][0]]
7188 7185 d1 = distances[pairsList[i][1]]
7189 7186
7190 7187 ph0_aux = ph0 + ph1
7191 7188 ph0_aux = numpy.angle(numpy.exp(1j*ph0_aux))
7192
7193 7189 #First Estimation
7194 7190 cosdir0[:,i] = (ph0_aux)/(2*numpy.pi*(d0 - d1))
7195 7191
7196 7192 #Most-Accurate Second Estimation
7197 7193 phi1_aux = ph0 - ph1
7198 7194 phi1_aux = phi1_aux.reshape(phi1_aux.size,1)
7199 7195 #Direction Cosine 1
7200 7196 cosdir1 = (phi1_aux + ang_aux)/(2*numpy.pi*(d0 + d1))
7201 7197
7202 7198 #Searching the correct Direction Cosine
7203 7199 cosdir0_aux = cosdir0[:,i]
7204 7200 cosdir0_aux = cosdir0_aux.reshape(cosdir0_aux.size,1)
7205 7201 #Minimum Distance
7206 7202 cosDiff = (cosdir1 - cosdir0_aux)**2
7207 7203 indcos = cosDiff.argmin(axis = 1)
7208 7204 #Saving Value obtained
7209 7205 cosdir[:,i] = cosdir1[numpy.arange(len(indcos)),indcos]
7210 7206
7211 7207 return cosdir0, cosdir
7212 7208
7213 7209 def __calculateAOA(self, cosdir, azimuth):
7214 7210 cosdirX = cosdir[:,0]
7215 7211 cosdirY = cosdir[:,1]
7216 7212
7217 7213 zenithAngle = numpy.arccos(numpy.sqrt(1 - cosdirX**2 - cosdirY**2))*180/numpy.pi
7218 7214 azimuthAngle = numpy.arctan2(cosdirX,cosdirY)*180/numpy.pi + azimuth#0 deg north, 90 deg east
7219 7215 angles = numpy.vstack((azimuthAngle, zenithAngle)).transpose()
7220 7216
7221 7217 return angles
7222 7218
7223 7219 def __getHeights(self, Ranges, zenith, error, minHeight, maxHeight):
7224 7220
7225 7221 Ramb = 375 #Ramb = c/(2*PRF)
7226 7222 Re = 6371 #Earth Radius
7227 7223 heights = numpy.zeros(Ranges.shape)
7228 7224
7229 7225 R_aux = numpy.array([0,1,2])*Ramb
7230 7226 R_aux = R_aux.reshape(1,R_aux.size)
7231 7227
7232 7228 Ranges = Ranges.reshape(Ranges.size,1)
7233 7229
7234 7230 Ri = Ranges + R_aux
7235 7231 hi = numpy.sqrt(Re**2 + Ri**2 + (2*Re*numpy.cos(zenith*numpy.pi/180)*Ri.transpose()).transpose()) - Re
7236 7232
7237 7233 #Check if there is a height between 70 and 110 km
7238 7234 h_bool = numpy.sum(numpy.logical_and(hi > minHeight, hi < maxHeight), axis = 1)
7239 7235 ind_h = numpy.where(h_bool == 1)[0]
7240 7236
7241 7237 hCorr = hi[ind_h, :]
7242 7238 ind_hCorr = numpy.where(numpy.logical_and(hi > minHeight, hi < maxHeight))
7243 7239
7244 7240 hCorr = hi[ind_hCorr][:len(ind_h)]
7245 7241 heights[ind_h] = hCorr
7246 7242
7247 7243 #Setting Error
7248 7244 #Number 13: Height unresolvable echo: not valid height within 70 to 110 km
7249 7245 #Number 14: Height ambiguous echo: more than one possible height within 70 to 110 km
7250 7246 indError = numpy.where(numpy.logical_or(error == 13, error == 14))[0]
7251 7247 error[indError] = 0
7252 7248 indInvalid2 = numpy.where(numpy.logical_and(h_bool > 1, error == 0))[0]
7253 7249 error[indInvalid2] = 14
7254 7250 indInvalid1 = numpy.where(numpy.logical_and(h_bool == 0, error == 0))[0]
7255 7251 error[indInvalid1] = 13
7256 7252
7257 7253 return heights, error
7258 7254
7259 7255 def getPhasePairs(self, channelPositions):
7260 7256 chanPos = numpy.array(channelPositions)
7261 7257 listOper = list(itertools.combinations(list(range(5)),2))
7262 7258
7263 7259 distances = numpy.zeros(4)
7264 7260 axisX = []
7265 7261 axisY = []
7266 7262 distX = numpy.zeros(3)
7267 7263 distY = numpy.zeros(3)
7268 7264 ix = 0
7269 7265 iy = 0
7270 7266
7271 7267 pairX = numpy.zeros((2,2))
7272 7268 pairY = numpy.zeros((2,2))
7273 7269
7274 7270 for i in range(len(listOper)):
7275 7271 pairi = listOper[i]
7276 7272
7277 7273 posDif = numpy.abs(chanPos[pairi[0],:] - chanPos[pairi[1],:])
7278 7274
7279 7275 if posDif[0] == 0:
7280 7276 axisY.append(pairi)
7281 7277 distY[iy] = posDif[1]
7282 7278 iy += 1
7283 7279 elif posDif[1] == 0:
7284 7280 axisX.append(pairi)
7285 7281 distX[ix] = posDif[0]
7286 7282 ix += 1
7287 7283
7288 7284 for i in range(2):
7289 7285 if i==0:
7290 7286 dist0 = distX
7291 7287 axis0 = axisX
7292 7288 else:
7293 7289 dist0 = distY
7294 7290 axis0 = axisY
7295 7291
7296 7292 side = numpy.argsort(dist0)[:-1]
7297 7293 axis0 = numpy.array(axis0)[side,:]
7298 7294 chanC = int(numpy.intersect1d(axis0[0,:], axis0[1,:])[0])
7299 7295 axis1 = numpy.unique(numpy.reshape(axis0,4))
7300 7296 side = axis1[axis1 != chanC]
7301 7297 diff1 = chanPos[chanC,i] - chanPos[side[0],i]
7302 7298 diff2 = chanPos[chanC,i] - chanPos[side[1],i]
7303 7299 if diff1<0:
7304 7300 chan2 = side[0]
7305 7301 d2 = numpy.abs(diff1)
7306 7302 chan1 = side[1]
7307 7303 d1 = numpy.abs(diff2)
7308 7304 else:
7309 7305 chan2 = side[1]
7310 7306 d2 = numpy.abs(diff2)
7311 7307 chan1 = side[0]
7312 7308 d1 = numpy.abs(diff1)
7313 7309
7314 7310 if i==0:
7315 7311 chanCX = chanC
7316 7312 chan1X = chan1
7317 7313 chan2X = chan2
7318 7314 distances[0:2] = numpy.array([d1,d2])
7319 7315 else:
7320 7316 chanCY = chanC
7321 7317 chan1Y = chan1
7322 7318 chan2Y = chan2
7323 7319 distances[2:4] = numpy.array([d1,d2])
7324 7320
7325 7321 pairslist = [(chanCX, chan1X),(chanCX, chan2X),(chanCY,chan1Y),(chanCY, chan2Y)]
7326 7322
7327 7323 return pairslist, distances
7328 7324
7329 7325 class IGRFModel(Operation):
7330 7326 '''
7331 7327 Written by R. Flores
7332 7328 '''
7333 7329 """Operation to calculate Geomagnetic parameters.
7334 7330
7335 7331 Parameters:
7336 7332 -----------
7337 7333 None
7338 7334
7339 7335 Example
7340 7336 --------
7341 7337
7342 7338 op = proc_unit.addOperation(name='IGRFModel', optype='other')
7343 7339
7344 7340 """
7345 7341
7346 7342 def __init__(self, **kwargs):
7347 7343
7348 7344 Operation.__init__(self, **kwargs)
7349 7345
7350 7346 self.aux=1
7351 7347
7352 7348 def run(self,dataOut):
7353 7349
7354 7350 try:
7355 7351 from schainpy.model.proc import mkfact_short_2020_2
7356 7352 except:
7357 7353 log.warning('You should install "mkfact_short_2020" module to process IGRF Model')
7358 7354
7359 7355 if self.aux==1:
7360 7356
7361 7357 #dataOut.TimeBlockSeconds_First_Time=time.mktime(time.strptime(dataOut.TimeBlockDate))
7362 7358 #### we do not use dataOut.datatime.ctime() because it's the time of the second (next) block
7363 7359 dataOut.TimeBlockSeconds_First_Time=dataOut.TimeBlockSeconds
7364 7360 dataOut.bd_time=time.gmtime(dataOut.TimeBlockSeconds_First_Time)
7365 7361 dataOut.year=dataOut.bd_time.tm_year+(dataOut.bd_time.tm_yday-1)/364.0
7366 7362 dataOut.ut=dataOut.bd_time.tm_hour+dataOut.bd_time.tm_min/60.0+dataOut.bd_time.tm_sec/3600.0
7367 7363
7368 7364 self.aux=0
7369 7365 dh = dataOut.heightList[1]-dataOut.heightList[0]
7370 7366 #dataOut.h=numpy.arange(0.0,15.0*dataOut.MAXNRANGENDT,15.0,dtype='float32')
7371 7367 dataOut.h=numpy.arange(0.0,dh*dataOut.MAXNRANGENDT,dh,dtype='float32')
7372 7368 dataOut.bfm=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32')
7373 7369 dataOut.bfm=numpy.array(dataOut.bfm,order='F')
7374 7370 dataOut.thb=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32')
7375 7371 dataOut.thb=numpy.array(dataOut.thb,order='F')
7376 7372 dataOut.bki=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32')
7377 7373 dataOut.bki=numpy.array(dataOut.bki,order='F')
7378 7374
7379 7375 mkfact_short_2020_2.mkfact(dataOut.year,dataOut.h,dataOut.bfm,dataOut.thb,dataOut.bki,dataOut.MAXNRANGENDT)
7380 7376
7381 7377 return dataOut
7382 7378
7383 7379 class MergeProc(ProcessingUnit):
7384 7380
7385 7381 def __init__(self):
7386 7382 ProcessingUnit.__init__(self)
7387 7383
7388 7384 def run(self, attr_data, attr_data_2 = None, attr_data_3 = None, attr_data_4 = None, attr_data_5 = None, mode=0):
7389 7385
7390 7386 self.dataOut = getattr(self, self.inputs[0])
7391 7387 data_inputs = [getattr(self, attr) for attr in self.inputs]
7392 7388
7393 7389 if mode==0:
7394 7390 data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs])
7395 7391 setattr(self.dataOut, attr_data, data)
7396 7392
7397 7393 if mode==1: #Hybrid
7398 7394 #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1)
7399 7395 #setattr(self.dataOut, attr_data, data)
7400 7396 setattr(self.dataOut, 'dataLag_spc', [getattr(data, attr_data) for data in data_inputs][0])
7401 7397 setattr(self.dataOut, 'dataLag_spc_LP', [getattr(data, attr_data) for data in data_inputs][1])
7402 7398 setattr(self.dataOut, 'dataLag_cspc', [getattr(data, attr_data_2) for data in data_inputs][0])
7403 7399 setattr(self.dataOut, 'dataLag_cspc_LP', [getattr(data, attr_data_2) for data in data_inputs][1])
7404 7400 #setattr(self.dataOut, 'nIncohInt', [getattr(data, attr_data_3) for data in data_inputs][0])
7405 7401 #setattr(self.dataOut, 'nIncohInt_LP', [getattr(data, attr_data_3) for data in data_inputs][1])
7406 7402 '''
7407 7403 print(self.dataOut.dataLag_spc_LP.shape)
7408 7404 print(self.dataOut.dataLag_cspc_LP.shape)
7409 7405 exit(1)
7410 7406 '''
7411 7407
7412 7408 #self.dataOut.dataLag_spc_LP = numpy.transpose(self.dataOut.dataLag_spc_LP[0],(2,0,1))
7413 7409 #self.dataOut.dataLag_cspc_LP = numpy.transpose(self.dataOut.dataLag_cspc_LP,(3,1,2,0))
7414 7410 '''
7415 7411 print("Merge")
7416 7412 print(numpy.shape(self.dataOut.dataLag_spc))
7417 7413 print(numpy.shape(self.dataOut.dataLag_spc_LP))
7418 7414 print(numpy.shape(self.dataOut.dataLag_cspc))
7419 7415 print(numpy.shape(self.dataOut.dataLag_cspc_LP))
7420 7416 exit(1)
7421 7417 '''
7422 7418 #print(numpy.sum(self.dataOut.dataLag_spc_LP[2,:,164])/128)
7423 7419 #print(numpy.sum(self.dataOut.dataLag_cspc_LP[0,:,30,1])/128)
7424 7420 #exit(1)
7425 7421 #print(self.dataOut.NDP)
7426 7422 #print(self.dataOut.nNoiseProfiles)
7427 7423
7428 7424 #self.dataOut.nIncohInt_LP = 128
7429 7425 self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP
7430 7426 self.dataOut.nIncohInt_LP = self.dataOut.nIncohInt
7431 7427 self.dataOut.NLAG = 16
7432 7428 self.dataOut.NRANGE = 200
7433 7429 self.dataOut.NSCAN = 128
7434 7430 #print(numpy.shape(self.dataOut.data_spc))
7435 7431
7436 7432 #exit(1)
7437 7433
7438 7434 if mode==2: #HAE 2022
7439 7435 data = numpy.sum([getattr(data, attr_data) for data in data_inputs],axis=0)
7440 7436 setattr(self.dataOut, attr_data, data)
7441 7437
7442 7438 self.dataOut.nIncohInt *= 2
7443 7439 #meta = self.dataOut.getFreqRange(1)/1000.
7444 7440 self.dataOut.freqRange = self.dataOut.getFreqRange(1)/1000.
7445 7441
7446 7442 #exit(1)
7447 7443
7448 7444 if mode==4: #Hybrid LP-SSheightProfiles
7449 7445 #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1)
7450 7446 #setattr(self.dataOut, attr_data, data)
7451 7447 setattr(self.dataOut, 'dataLag_spc', getattr(data_inputs[0], attr_data)) #DP
7452 7448 setattr(self.dataOut, 'dataLag_cspc', getattr(data_inputs[0], attr_data_2)) #DP
7453 7449 setattr(self.dataOut, 'dataLag_spc_LP', getattr(data_inputs[1], attr_data_3)) #LP
7454 7450 #setattr(self.dataOut, 'dataLag_cspc_LP', getattr(data_inputs[1], attr_data_4)) #LP
7455 7451 #setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP
7456 7452 setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP
7457 7453 #print("Merge data_acf: ",self.dataOut.data_acf.shape)
7458 7454
7459 7455 #self.dataOut.nIncohInt_LP = 128
7460 7456 #self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP
7461 7457 self.dataOut.nProfiles_LP = 16#28#self.dataOut.nIncohInt_LP
7462 7458 self.dataOut.nProfiles_LP = self.dataOut.data_acf.shape[1]#28#self.dataOut.nIncohInt_LP
7463 7459 self.dataOut.NSCAN = 128
7464 7460 self.dataOut.nIncohInt_LP = self.dataOut.nIncohInt*self.dataOut.NSCAN
7465 7461 #print("sahpi",self.dataOut.nIncohInt_LP)
7466 7462 #exit(1)
7467 7463 self.dataOut.NLAG = 16
7468 7464 self.dataOut.NLAG = self.dataOut.data_acf.shape[1]
7469
7470 7465 self.dataOut.NRANGE = self.dataOut.data_acf.shape[-1]
7471 7466
7472 7467 #print(numpy.shape(self.dataOut.data_spc))
7473 7468
7474 7469 #exit(1)
7475 7470 if mode==5:
7476 7471 data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs])
7477 7472 setattr(self.dataOut, attr_data, data)
7478 7473 data = numpy.concatenate([getattr(data, attr_data_2) for data in data_inputs])
7479 7474 setattr(self.dataOut, attr_data_2, data)
7480 7475
7481 7476 if mode==6: #Hybrid Spectra-Voltage
7482 7477 #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1)
7483 7478 #setattr(self.dataOut, attr_data, data)
7484 7479 setattr(self.dataOut, 'dataLag_spc', getattr(data_inputs[1], attr_data)) #DP
7485 7480 setattr(self.dataOut, 'dataLag_cspc', getattr(data_inputs[1], attr_data_2)) #DP
7486 7481 setattr(self.dataOut, 'output_LP_integrated', getattr(data_inputs[0], attr_data_3)) #LP
7487 7482 #setattr(self.dataOut, 'dataLag_cspc_LP', getattr(data_inputs[1], attr_data_4)) #LP
7488 7483 #setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP
7489 7484 #setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP
7490 7485 #print("Merge data_acf: ",self.dataOut.data_acf.shape)
7491 7486 #print(self.dataOut.NSCAN)
7492 7487 self.dataOut.nIncohInt = int(self.dataOut.NAVG * self.dataOut.nint)
7493 7488 #print(self.dataOut.dataLag_spc.shape)
7494 7489 self.dataOut.nProfiles = self.dataOut.nProfiles_DP = self.dataOut.dataLag_spc.shape[1]
7495 7490 '''
7496 7491 #self.dataOut.nIncohInt_LP = 128
7497 7492 #self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP
7498 7493 self.dataOut.nProfiles_LP = 16#28#self.dataOut.nIncohInt_LP
7499 7494 self.dataOut.nProfiles_LP = self.dataOut.data_acf.shape[1]#28#self.dataOut.nIncohInt_LP
7500 7495 self.dataOut.NSCAN = 128
7501 7496 self.dataOut.nIncohInt_LP = self.dataOut.nIncohInt*self.dataOut.NSCAN
7502 7497 #print("sahpi",self.dataOut.nIncohInt_LP)
7503 7498 #exit(1)
7504 7499 self.dataOut.NLAG = 16
7505 7500 self.dataOut.NLAG = self.dataOut.data_acf.shape[1]
7506 7501 self.dataOut.NRANGE = self.dataOut.data_acf.shape[-1]
7507 7502 '''
7508 7503 #print(numpy.shape(self.dataOut.data_spc))
7509 7504 #print("*************************GOOD*************************")
7510 7505 #exit(1)
7511 7506
7512 7507 if mode==11: #MST ISR
7513 7508 #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1)
7514 7509 #setattr(self.dataOut, attr_data, data)
7515 7510 #setattr(self.dataOut, 'ph2', [getattr(data, attr_data) for data in data_inputs][1])
7516 7511 #setattr(self.dataOut, 'dphi', [getattr(data, attr_data_2) for data in data_inputs][1])
7517 7512 #setattr(self.dataOut, 'sdp2', [getattr(data, attr_data_3) for data in data_inputs][1])
7518 7513
7519 7514 setattr(self.dataOut, 'ph2', getattr(data_inputs[1], attr_data)) #DP
7520 7515 setattr(self.dataOut, 'dphi', getattr(data_inputs[1], attr_data_2)) #DP
7521 7516 setattr(self.dataOut, 'sdp2', getattr(data_inputs[1], attr_data_3)) #DP
7522 7517
7523 7518 print("MST Density", numpy.shape(self.dataOut.ph2))
7524 7519 print("cf MST: ", self.dataOut.cf)
7525 7520 #exit(1)
7526 7521 #print("MST Density", self.dataOut.ph2[116:283])
7527 7522 print("MST Density", self.dataOut.ph2[80:120])
7528 7523 print("MST dPhi", self.dataOut.dphi[80:120])
7529 7524 self.dataOut.ph2 *= self.dataOut.cf#0.0008136899
7530 7525 #print("MST Density", self.dataOut.ph2[116:283])
7531 7526 self.dataOut.sdp2 *= 0#self.dataOut.cf#0.0008136899
7532 7527 #print("MST Density", self.dataOut.ph2[116:283])
7533 7528 print("MST Density", self.dataOut.ph2[80:120])
7534 7529 self.dataOut.NSHTS = int(numpy.shape(self.dataOut.ph2)[0])
7535 7530 dH = self.dataOut.heightList[1]-self.dataOut.heightList[0]
7536 7531 dH /= self.dataOut.windowOfFilter
7537 7532 self.dataOut.heightList = numpy.arange(0,self.dataOut.NSHTS)*dH + dH
7538 7533 #print("heightList: ", self.dataOut.heightList)
7539 7534 self.dataOut.NDP = self.dataOut.NSHTS
7540 7535 #exit(1)
7541 7536 #print(self.dataOut.heightList)
7542 7537
7543 7538 class MST_Den_Conv(Operation):
7544 7539 '''
7545 7540 Written by R. Flores
7546 7541 '''
7547 7542 """Operation to calculate Geomagnetic parameters.
7548 7543
7549 7544 Parameters:
7550 7545 -----------
7551 7546 None
7552 7547
7553 7548 Example
7554 7549 --------
7555 7550
7556 7551 op = proc_unit.addOperation(name='MST_Den_Conv', optype='other')
7557 7552
7558 7553 """
7559 7554
7560 7555 def __init__(self, **kwargs):
7561 7556
7562 7557 Operation.__init__(self, **kwargs)
7563 7558
7564 7559 def run(self,dataOut):
7565 7560
7566 7561 dataOut.PowDen = numpy.zeros((1,dataOut.NDP))
7567 7562 dataOut.PowDen[0] = numpy.copy(dataOut.ph2[:dataOut.NDP])
7568 7563
7569 7564 dataOut.FarDen = numpy.zeros((1,dataOut.NDP))
7570 7565 dataOut.FarDen[0] = numpy.copy(dataOut.dphi[:dataOut.NDP])
7571 7566 print("pow den shape", numpy.shape(dataOut.PowDen))
7572 7567 print("far den shape", numpy.shape(dataOut.FarDen))
7573 7568 return dataOut
7569
7570 class addTxPower(Operation):
7571 '''
7572 Transmited power level integrated in the dataOut ->AMISR
7573 resolution 1 min
7574 The power files have the pattern power_YYYYMMDD.csv
7575 '''
7576 __slots__ =('isConfig','dataDatetimes','txPowers')
7577 def __init__(self):
7578
7579 Operation.__init__(self)
7580 self.isConfig = False
7581 self.dataDatetimes = []
7582 self.txPowers = []
7583
7584 def setup(self, powerFile, dutyCycle):
7585 if not os.path.isfile(powerFile):
7586 raise schainpy.admin.SchainError('There is no file named :{}'.format(powerFile))
7587 return
7588
7589 with open(powerFile, newline='') as pfile:
7590 reader = csv.reader(pfile, delimiter=',', quotechar='|')
7591 next(reader)
7592 for row in reader:
7593 #'2022-10-25 00:00:00'
7594 self.dataDatetimes.append(datetime.datetime.strptime(row[0], "%Y-%m-%d %H:%M:%S"))
7595 self.txPowers.append(float(row[1])/dutyCycle)
7596 self.isConfig = True
7597
7598 def run(self, dataOut, path, DS=0.05):
7599
7600 #dataOut.flagNoData = True
7601
7602 if not(self.isConfig):
7603 self.setup(path, DS)
7604
7605 dataDate = datetime.datetime.utcfromtimestamp(dataOut.utctime).replace(second=0, microsecond=0)#no seconds
7606 try:
7607 indx = self.dataDatetimes.index(dataDate)
7608 dataOut.txPower = self.txPowers[indx]
7609 except:
7610 log.warning("No power available for the datetime {}, setting power to 0 w", self.name)
7611 dataOut.txPower = 0
7612
7613 return dataOut No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now