##// END OF EJS Templates
Drifts tested
joabAM -
r1740:7f5b085e2124
parent child
Show More
@@ -1,1156 +1,1159
1 1 # Copyright (c) 2012-2020 Jicamarca Radio Observatory
2 2 # All rights reserved.
3 3 #
4 4 # Distributed under the terms of the BSD 3-clause license.
5 5 """Definition of diferent Data objects for different types of data
6 6
7 7 Here you will find the diferent data objects for the different types
8 8 of data, this data objects must be used as dataIn or dataOut objects in
9 9 processing units and operations. Currently the supported data objects are:
10 10 Voltage, Spectra, SpectraHeis, Fits, Correlation and Parameters
11 11 """
12 12
13 13 import copy
14 14 import numpy
15 15 import datetime
16 16 import json
17 17
18 18 import schainpy.admin
19 19 from schainpy.utils import log
20 20 from .jroheaderIO import SystemHeader, RadarControllerHeader,ProcessingHeader
21 21 from schainpy.model.data import _noise
22 22 SPEED_OF_LIGHT = 3e8
23 23
24 24
25 25 def getNumpyDtype(dataTypeCode):
26 26
27 27 if dataTypeCode == 0:
28 28 numpyDtype = numpy.dtype([('real', '<i1'), ('imag', '<i1')])
29 29 elif dataTypeCode == 1:
30 30 numpyDtype = numpy.dtype([('real', '<i2'), ('imag', '<i2')])
31 31 elif dataTypeCode == 2:
32 32 numpyDtype = numpy.dtype([('real', '<i4'), ('imag', '<i4')])
33 33 elif dataTypeCode == 3:
34 34 numpyDtype = numpy.dtype([('real', '<i8'), ('imag', '<i8')])
35 35 elif dataTypeCode == 4:
36 36 numpyDtype = numpy.dtype([('real', '<f4'), ('imag', '<f4')])
37 37 elif dataTypeCode == 5:
38 38 numpyDtype = numpy.dtype([('real', '<f8'), ('imag', '<f8')])
39 39 else:
40 40 raise ValueError('dataTypeCode was not defined')
41 41
42 42 return numpyDtype
43 43
44 44
45 45 def getDataTypeCode(numpyDtype):
46 46
47 47 if numpyDtype == numpy.dtype([('real', '<i1'), ('imag', '<i1')]):
48 48 datatype = 0
49 49 elif numpyDtype == numpy.dtype([('real', '<i2'), ('imag', '<i2')]):
50 50 datatype = 1
51 51 elif numpyDtype == numpy.dtype([('real', '<i4'), ('imag', '<i4')]):
52 52 datatype = 2
53 53 elif numpyDtype == numpy.dtype([('real', '<i8'), ('imag', '<i8')]):
54 54 datatype = 3
55 55 elif numpyDtype == numpy.dtype([('real', '<f4'), ('imag', '<f4')]):
56 56 datatype = 4
57 57 elif numpyDtype == numpy.dtype([('real', '<f8'), ('imag', '<f8')]):
58 58 datatype = 5
59 59 else:
60 60 datatype = None
61 61
62 62 return datatype
63 63
64 64
65 65 def hildebrand_sekhon(data, navg):
66 66 """
67 67 This method is for the objective determination of the noise level in Doppler spectra. This
68 68 implementation technique is based on the fact that the standard deviation of the spectral
69 69 densities is equal to the mean spectral density for white Gaussian noise
70 70
71 71 Inputs:
72 72 Data : heights
73 73 navg : numbers of averages
74 74
75 75 Return:
76 76 mean : noise's level
77 77 """
78 78
79 79 sortdata = numpy.sort(data, axis=None)
80 80 '''
81 81 lenOfData = len(sortdata)
82 82 nums_min = lenOfData*0.2
83 83
84 84 if nums_min <= 5:
85 85
86 86 nums_min = 5
87 87
88 88 sump = 0.
89 89 sumq = 0.
90 90
91 91 j = 0
92 92 cont = 1
93 93
94 94 while((cont == 1)and(j < lenOfData)):
95 95
96 96 sump += sortdata[j]
97 97 sumq += sortdata[j]**2
98 98
99 99 if j > nums_min:
100 100 rtest = float(j)/(j-1) + 1.0/navg
101 101 if ((sumq*j) > (rtest*sump**2)):
102 102 j = j - 1
103 103 sump = sump - sortdata[j]
104 104 sumq = sumq - sortdata[j]**2
105 105 cont = 0
106 106
107 107 j += 1
108 108
109 109 lnoise = sump / j
110 110 '''
111 111 return _noise.hildebrand_sekhon(sortdata, navg)
112 112
113 113
114 114 class Beam:
115 115
116 116 def __init__(self):
117 117 self.codeList = []
118 118 self.azimuthList = []
119 119 self.zenithList = []
120 120
121 121
122 122 class GenericData(object):
123 123
124 124 flagNoData = True
125 125
126 126 def copy(self, inputObj=None):
127 127
128 128 if inputObj == None:
129 129 return copy.deepcopy(self)
130 130
131 131 for key in list(inputObj.__dict__.keys()):
132 132
133 133 attribute = inputObj.__dict__[key]
134 134
135 135 # If this attribute is a tuple or list
136 136 if type(inputObj.__dict__[key]) in (tuple, list):
137 137 self.__dict__[key] = attribute[:]
138 138 continue
139 139
140 140 # If this attribute is another object or instance
141 141 if hasattr(attribute, '__dict__'):
142 142 self.__dict__[key] = attribute.copy()
143 143 continue
144 144
145 145 self.__dict__[key] = inputObj.__dict__[key]
146 146
147 147 def deepcopy(self):
148 148
149 149 return copy.deepcopy(self)
150 150
151 151 def isEmpty(self):
152 152
153 153 return self.flagNoData
154 154
155 155 def isReady(self):
156 156
157 157 return not self.flagNoData
158 158
159 159
160 160 class JROData(GenericData):
161 161
162 162 systemHeaderObj = SystemHeader()
163 163 radarControllerHeaderObj = RadarControllerHeader()
164 164 type = None
165 165 datatype = None # dtype but in string
166 166 nProfiles = None
167 167 heightList = None
168 168 channelList = None
169 169 flagDiscontinuousBlock = False
170 170 useLocalTime = False
171 171 utctime = None
172 172 timeZone = None
173 173 dstFlag = None
174 174 errorCount = None
175 175 blocksize = None
176 176 flagDecodeData = False # asumo q la data no esta decodificada
177 177 flagDeflipData = False # asumo q la data no esta sin flip
178 178 flagShiftFFT = False
179 179 nCohInt = None
180 180 windowOfFilter = 1
181 181 C = 3e8
182 182 frequency = 49.92e6
183 183 realtime = False
184 184 beacon_heiIndexList = None
185 185 last_block = None
186 186 blocknow = None
187 187 azimuth = None
188 188 zenith = None
189 189 beam = Beam()
190 190 profileIndex = None
191 191 error = None
192 192 data = None
193 193 nmodes = None
194 194 metadata_list = ['heightList', 'timeZone', 'type']
195 195
196 196 ippFactor = 1 #Added to correct the freq and vel range for AMISR data
197 197 useInputBuffer = False
198 198 buffer_empty = True
199 199 codeList = []
200 200 azimuthList = []
201 201 elevationList = []
202 202 last_noise = None
203 203 __ipp = None
204 204 __ippSeconds = None
205 205 sampled_heightsFFT = None
206 206 pulseLength_TxA = None
207 207 deltaHeight = None
208 208 __code = None
209 209 __nCode = None
210 210 __nBaud = None
211 211 unitsDescription = "The units of the parameters are according to the International System of units (Seconds, Meter, Hertz, ...), except \
212 212 the parameters related to distances such as heightList, or heightResolution wich are in Km"
213 213
214 214
215 215
216 216 def __str__(self):
217 217
218 218 return '{} - {}'.format(self.type, self.datatime())
219 219
220 220 def getNoise(self):
221 221
222 222 raise NotImplementedError
223 223
224 224 @property
225 225 def nChannels(self):
226 226
227 227 return len(self.channelList)
228 228
229 229 @property
230 230 def channelIndexList(self):
231 231
232 232 return list(range(self.nChannels))
233 233
234 234 @property
235 235 def nHeights(self):
236 236
237 237 return len(self.heightList)
238 238
239 239 def getDeltaH(self):
240 240
241 241 return self.heightList[1] - self.heightList[0]
242 242
243 243 @property
244 244 def ltctime(self):
245 245
246 246 if self.useLocalTime:
247 247 return self.utctime - self.timeZone * 60
248 248
249 249 return self.utctime
250 250
251 251 @property
252 252 def datatime(self):
253 253
254 254 datatimeValue = datetime.datetime.utcfromtimestamp(self.ltctime)
255 255 return datatimeValue
256 256
257 257 def getTimeRange(self):
258 258
259 259 datatime = []
260 260
261 261 datatime.append(self.ltctime)
262 262 datatime.append(self.ltctime + self.timeInterval + 1)
263 263
264 264 datatime = numpy.array(datatime)
265 265
266 266 return datatime
267 267
268 268 def getFmaxTimeResponse(self):
269 269
270 270 period = (10**-6) * self.getDeltaH() / (0.15)
271 271
272 272 PRF = 1. / (period * self.nCohInt)
273 273
274 274 fmax = PRF
275 275
276 276 return fmax
277 277
278 278 def getFmax(self):
279 279 PRF = 1. / (self.__ippSeconds * self.nCohInt)
280 280
281 281 fmax = PRF
282 282 return fmax
283 283
284 284 def getVmax(self):
285 285
286 286 _lambda = self.C / self.frequency
287 287
288 288 vmax = self.getFmax() * _lambda / 2
289 289
290 290 return vmax
291 291
292 292 ## Radar Controller Header must be immutable
293 293 @property
294 294 def ippSeconds(self):
295 295 '''
296 296 '''
297 297 #return self.radarControllerHeaderObj.ippSeconds
298 298 return self.__ippSeconds
299 299
300 300 @ippSeconds.setter
301 301 def ippSeconds(self, ippSeconds):
302 302 '''
303 303 '''
304 304 #self.radarControllerHeaderObj.ippSeconds = ippSeconds
305 305 self.__ippSeconds = ippSeconds
306 306 self.__ipp = ippSeconds*SPEED_OF_LIGHT/2000.0
307 307
308 308 @property
309 309 def code(self):
310 310 '''
311 311 '''
312 312 # return self.radarControllerHeaderObj.code
313 313 return self.__code
314 314
315 315 @code.setter
316 316 def code(self, code):
317 317 '''
318 318 '''
319 319 # self.radarControllerHeaderObj.code = code
320 320 self.__code = code
321 321
322 322 @property
323 323 def nCode(self):
324 324 '''
325 325 '''
326 326 # return self.radarControllerHeaderObj.nCode
327 327 return self.__nCode
328 328
329 329 @nCode.setter
330 330 def nCode(self, ncode):
331 331 '''
332 332 '''
333 333 # self.radarControllerHeaderObj.nCode = ncode
334 334 self.__nCode = ncode
335 335
336 336 @property
337 337 def nBaud(self):
338 338 '''
339 339 '''
340 340 # return self.radarControllerHeaderObj.nBaud
341 341 return self.__nBaud
342 342
343 343 @nBaud.setter
344 344 def nBaud(self, nbaud):
345 345 '''
346 346 '''
347 347 # self.radarControllerHeaderObj.nBaud = nbaud
348 348 self.__nBaud = nbaud
349 349
350 350 @property
351 351 def ipp(self):
352 352 '''
353 353 '''
354 354 # return self.radarControllerHeaderObj.ipp
355 355 return self.__ipp
356 356
357 357 @ipp.setter
358 358 def ipp(self, ipp):
359 359 '''
360 360 '''
361 361 # self.radarControllerHeaderObj.ipp = ipp
362 362 self.__ipp = ipp
363 363
364 364 @property
365 365 def metadata(self):
366 366 '''
367 367 '''
368 368
369 369 return {attr: getattr(self, attr) for attr in self.metadata_list}
370 370
371 371
372 372 class Voltage(JROData):
373 373
374 374 dataPP_POW = None
375 375 dataPP_DOP = None
376 376 dataPP_WIDTH = None
377 377 dataPP_SNR = None
378 378
379 379 # To use oper
380 380 flagProfilesByRange = False
381 381 nProfilesByRange = None
382 382 max_nIncohInt = 1
383 383
384 384 def __init__(self):
385 385 '''
386 386 Constructor
387 387 '''
388 388
389 389 self.useLocalTime = True
390 390 self.radarControllerHeaderObj = RadarControllerHeader()
391 391 self.systemHeaderObj = SystemHeader()
392 392 self.processingHeaderObj = ProcessingHeader()
393 393 self.type = "Voltage"
394 394 self.data = None
395 395 self.nProfiles = None
396 396 self.heightList = None
397 397 self.channelList = None
398 398 self.flagNoData = True
399 399 self.flagDiscontinuousBlock = False
400 400 self.utctime = None
401 401 self.timeZone = 0
402 402 self.dstFlag = None
403 403 self.errorCount = None
404 404 self.nCohInt = None
405 405 self.blocksize = None
406 406 self.flagCohInt = False
407 407 self.flagDecodeData = False # asumo q la data no esta decodificada
408 408 self.flagDeflipData = False # asumo q la data no esta sin flip
409 409 self.flagShiftFFT = False
410 410 self.flagDataAsBlock = False # Asumo que la data es leida perfil a perfil
411 411 self.profileIndex = 0
412 412 self.ippFactor=1
413 413 self.metadata_list = ['type', 'heightList', 'timeZone', 'nProfiles', 'channelList', 'nCohInt',
414 414 'code', 'nCode', 'nBaud', 'ippSeconds', 'ipp']
415 415
416 416 def getNoisebyHildebrand(self, channel=None, ymin_index=None, ymax_index=None):
417 417 """
418 418 Determino el nivel de ruido usando el metodo Hildebrand-Sekhon
419 419
420 420 Return:
421 421 noiselevel
422 422 """
423 423
424 424 if channel != None:
425 425 data = self.data[channel,ymin_index:ymax_index]
426 426 nChannels = 1
427 427 else:
428 428 data = self.data[:,ymin_index:ymax_index]
429 429 nChannels = self.nChannels
430 430
431 431 noise = numpy.zeros(nChannels)
432 432 power = data * numpy.conjugate(data)
433 433
434 434 for thisChannel in range(nChannels):
435 435 if nChannels == 1:
436 436 daux = power[:].real
437 437 else:
438 438 daux = power[thisChannel, :].real
439 439 noise[thisChannel] = hildebrand_sekhon(daux, self.nCohInt)
440 440
441 441 return noise
442 442
443 443 def getNoise(self, type=1, channel=None,ymin_index=None, ymax_index=None):
444 444
445 445 if type == 1:
446 446 noise = self.getNoisebyHildebrand(channel,ymin_index, ymax_index)
447 447
448 448 return noise
449 449
450 450 def getPower(self, channel=None):
451 451
452 452 if channel != None:
453 453 data = self.data[channel]
454 454 else:
455 455 data = self.data
456 456
457 457 power = data * numpy.conjugate(data)
458 458 powerdB = 10 * numpy.log10(power.real)
459 459 powerdB = numpy.squeeze(powerdB)
460 460
461 461 return powerdB
462 @property
463 def data_pow(self):
464 return self.getPower()
462 465
463 466 @property
464 467 def timeInterval(self):
465 468
466 469 return self.ippSeconds * self.nCohInt
467 470
468 471 noise = property(getNoise, "I'm the 'nHeights' property.")
469 472
470 473
471 474 class Spectra(JROData):
472 475
473 476 data_outlier = None
474 477 flagProfilesByRange = False
475 478 nProfilesByRange = None
476 479
477 480 def __init__(self):
478 481 '''
479 482 Constructor
480 483 '''
481 484
482 485 self.data_dc = None
483 486 self.data_spc = None
484 487 self.data_cspc = None
485 488 self.useLocalTime = True
486 489 self.radarControllerHeaderObj = RadarControllerHeader()
487 490 self.systemHeaderObj = SystemHeader()
488 491 self.processingHeaderObj = ProcessingHeader()
489 492 self.type = "Spectra"
490 493 self.timeZone = 0
491 494 self.nProfiles = None
492 495 self.heightList = None
493 496 self.channelList = None
494 497 self.pairsList = None
495 498 self.flagNoData = True
496 499 self.flagDiscontinuousBlock = False
497 500 self.utctime = None
498 501 self.nCohInt = None
499 502 self.nIncohInt = None
500 503 self.blocksize = None
501 504 self.nFFTPoints = None
502 505 self.wavelength = None
503 506 self.flagDecodeData = False # asumo q la data no esta decodificada
504 507 self.flagDeflipData = False # asumo q la data no esta sin flip
505 508 self.flagShiftFFT = False
506 509 self.ippFactor = 1
507 510 self.beacon_heiIndexList = []
508 511 self.noise_estimation = None
509 512 self.codeList = []
510 513 self.azimuthList = []
511 514 self.elevationList = []
512 515 self.metadata_list = ['type', 'heightList', 'timeZone', 'pairsList', 'channelList', 'nCohInt',
513 516 'code', 'nCode', 'nBaud', 'ippSeconds', 'ipp','nIncohInt', 'nFFTPoints', 'nProfiles']
514 517
515 518 def getNoisebyHildebrand(self, xmin_index=None, xmax_index=None, ymin_index=None, ymax_index=None):
516 519 """
517 520 Determino el nivel de ruido usando el metodo Hildebrand-Sekhon
518 521
519 522 Return:
520 523 noiselevel
521 524 """
522 525
523 526 noise = numpy.zeros(self.nChannels)
524 527
525 528 for channel in range(self.nChannels):
526 529 daux = self.data_spc[channel,
527 530 xmin_index:xmax_index, ymin_index:ymax_index]
528 531 # noise[channel] = hildebrand_sekhon(daux, self.nIncohInt)
529 532 noise[channel] = hildebrand_sekhon(daux, self.max_nIncohInt[channel])
530 533
531 534 return noise
532 535
533 536 def getNoise(self, xmin_index=None, xmax_index=None, ymin_index=None, ymax_index=None):
534 537
535 538 if self.noise_estimation is not None:
536 539 # this was estimated by getNoise Operation defined in jroproc_spectra.py
537 540 return self.noise_estimation
538 541 else:
539 542 noise = self.getNoisebyHildebrand(
540 543 xmin_index, xmax_index, ymin_index, ymax_index)
541 544 return noise
542 545
543 546 def getFreqRangeTimeResponse(self, extrapoints=0):
544 547
545 548 deltafreq = self.getFmaxTimeResponse() / (self.nFFTPoints * self.ippFactor)
546 549 freqrange = deltafreq * (numpy.arange(self.nFFTPoints + extrapoints) - self.nFFTPoints / 2.) - deltafreq / 2
547 550
548 551 return freqrange
549 552
550 553 def getAcfRange(self, extrapoints=0):
551 554
552 555 deltafreq = 10. / (self.getFmax() / (self.nFFTPoints * self.ippFactor))
553 556 freqrange = deltafreq * (numpy.arange(self.nFFTPoints + extrapoints) -self.nFFTPoints / 2.) - deltafreq / 2
554 557
555 558 return freqrange
556 559
557 560 def getFreqRange(self, extrapoints=0):
558 561
559 562 deltafreq = self.getFmax() / (self.nFFTPoints * self.ippFactor)
560 563 freqrange = deltafreq * (numpy.arange(self.nFFTPoints + extrapoints) -self.nFFTPoints / 2.) - deltafreq / 2
561 564
562 565 return freqrange
563 566
564 567 def getVelRange(self, extrapoints=0):
565 568
566 569 deltav = self.getVmax() / (self.nFFTPoints * self.ippFactor)
567 570 velrange = deltav * (numpy.arange(self.nFFTPoints + extrapoints) - self.nFFTPoints / 2.)
568 571
569 572 if self.nmodes:
570 573 return velrange/self.nmodes
571 574 else:
572 575 return velrange
573 576
574 577 @property
575 578 def nPairs(self):
576 579
577 580 return len(self.pairsList)
578 581
579 582 @property
580 583 def pairsIndexList(self):
581 584
582 585 return list(range(self.nPairs))
583 586
584 587 @property
585 588 def normFactor(self):
586 589
587 590 pwcode = 1
588 591 if self.flagDecodeData:
589 592 try:
590 593 pwcode = numpy.sum(self.code[0]**2)
591 594 except Exception as e:
592 595 log.warning("Failed pwcode read, setting to 1")
593 596 pwcode = 1
594 597 #normFactor = min(self.nFFTPoints,self.nProfiles)*self.nIncohInt*self.nCohInt*pwcode*self.windowOfFilter
595 598 normFactor = self.nProfiles * self.nIncohInt * self.nCohInt * pwcode * self.windowOfFilter
596 599 if self.flagProfilesByRange:
597 600 normFactor *= (self.nProfilesByRange/self.nProfilesByRange.max())
598 601 return normFactor
599 602
600 603 @property
601 604 def flag_cspc(self):
602 605
603 606 if self.data_cspc is None:
604 607 return True
605 608
606 609 return False
607 610
608 611 @property
609 612 def flag_dc(self):
610 613
611 614 if self.data_dc is None:
612 615 return True
613 616
614 617 return False
615 618
616 619 @property
617 620 def timeInterval(self):
618 621
619 622 timeInterval = self.ippSeconds * self.nCohInt * self.nIncohInt * self.nProfiles * self.ippFactor
620 623 if self.nmodes:
621 624 return self.nmodes*timeInterval
622 625 else:
623 626 return timeInterval
624 627
625 628 def getPower(self):
626 629
627 630 factor = self.normFactor
628 631 power = numpy.zeros( (self.nChannels,self.nHeights) )
629 632 for ch in range(self.nChannels):
630 633 z = None
631 634 if hasattr(factor,'shape'):
632 635 if factor.ndim > 1:
633 636 z = self.data_spc[ch]/factor[ch]
634 637 else:
635 638 z = self.data_spc[ch]/factor
636 639 else:
637 640 z = self.data_spc[ch]/factor
638 641 z = numpy.where(numpy.isfinite(z), z, numpy.NAN)
639 642 avg = numpy.average(z, axis=0)
640 643 power[ch] = 10 * numpy.log10(avg)
641 644 return power
642 645
643 646 @property
644 647 def max_nIncohInt(self):
645 648
646 649 ints = numpy.zeros(self.nChannels)
647 650 for ch in range(self.nChannels):
648 651 if hasattr(self.nIncohInt,'shape'):
649 652 if self.nIncohInt.ndim > 1:
650 653 ints[ch,] = self.nIncohInt[ch].max()
651 654 else:
652 655 ints[ch,] = self.nIncohInt
653 656 self.nIncohInt = int(self.nIncohInt)
654 657 else:
655 658 ints[ch,] = self.nIncohInt
656 659
657 660 return ints
658 661
659 662 def getCoherence(self, pairsList=None, phase=False):
660 663
661 664 z = []
662 665 if pairsList is None:
663 666 pairsIndexList = self.pairsIndexList
664 667 else:
665 668 pairsIndexList = []
666 669 for pair in pairsList:
667 670 if pair not in self.pairsList:
668 671 raise ValueError("Pair %s is not in dataOut.pairsList" % (
669 672 pair))
670 673 pairsIndexList.append(self.pairsList.index(pair))
671 674 for i in range(len(pairsIndexList)):
672 675 pair = self.pairsList[pairsIndexList[i]]
673 676 ccf = numpy.average(self.data_cspc[pairsIndexList[i], :, :], axis=0)
674 677 powa = numpy.average(self.data_spc[pair[0], :, :], axis=0)
675 678 powb = numpy.average(self.data_spc[pair[1], :, :], axis=0)
676 679 avgcoherenceComplex = ccf / numpy.sqrt(powa * powb)
677 680 if phase:
678 681 data = numpy.arctan2(avgcoherenceComplex.imag,
679 682 avgcoherenceComplex.real) * 180 / numpy.pi
680 683 else:
681 684 data = numpy.abs(avgcoherenceComplex)
682 685
683 686 z.append(data)
684 687
685 688 return numpy.array(z)
686 689
687 690 def setValue(self, value):
688 691
689 692 print("This property should not be initialized", value)
690 693
691 694 return
692 695
693 696 noise = property(getNoise, setValue, "I'm the 'nHeights' property.")
694 697
695 698
696 699 class SpectraHeis(Spectra):
697 700
698 701 def __init__(self):
699 702
700 703 self.radarControllerHeaderObj = RadarControllerHeader()
701 704 self.systemHeaderObj = SystemHeader()
702 705 self.type = "SpectraHeis"
703 706 self.nProfiles = None
704 707 self.heightList = None
705 708 self.channelList = None
706 709 self.flagNoData = True
707 710 self.flagDiscontinuousBlock = False
708 711 self.utctime = None
709 712 self.blocksize = None
710 713 self.profileIndex = 0
711 714 self.nCohInt = 1
712 715 self.nIncohInt = 1
713 716
714 717 @property
715 718 def normFactor(self):
716 719 pwcode = 1
717 720 if self.flagDecodeData:
718 721 pwcode = numpy.sum(self.code[0]**2)
719 722
720 723 normFactor = self.nIncohInt * self.nCohInt * pwcode
721 724
722 725 return normFactor
723 726
724 727 @property
725 728 def timeInterval(self):
726 729
727 730 return self.ippSeconds * self.nCohInt * self.nIncohInt
728 731
729 732
730 733 class Fits(JROData):
731 734
732 735 def __init__(self):
733 736
734 737 self.type = "Fits"
735 738 self.nProfiles = None
736 739 self.heightList = None
737 740 self.channelList = None
738 741 self.flagNoData = True
739 742 self.utctime = None
740 743 self.nCohInt = 1
741 744 self.nIncohInt = 1
742 745 self.useLocalTime = True
743 746 self.profileIndex = 0
744 747 self.timeZone = 0
745 748
746 749 def getTimeRange(self):
747 750
748 751 datatime = []
749 752
750 753 datatime.append(self.ltctime)
751 754 datatime.append(self.ltctime + self.timeInterval)
752 755
753 756 datatime = numpy.array(datatime)
754 757
755 758 return datatime
756 759
757 760 def getChannelIndexList(self):
758 761
759 762 return list(range(self.nChannels))
760 763
761 764 def getNoise(self, type=1):
762 765
763 766
764 767 if type == 1:
765 768 noise = self.getNoisebyHildebrand()
766 769
767 770 if type == 2:
768 771 noise = self.getNoisebySort()
769 772
770 773 if type == 3:
771 774 noise = self.getNoisebyWindow()
772 775
773 776 return noise
774 777
775 778 @property
776 779 def timeInterval(self):
777 780
778 781 timeInterval = self.ippSeconds * self.nCohInt * self.nIncohInt
779 782
780 783 return timeInterval
781 784
782 785 @property
783 786 def ippSeconds(self):
784 787 '''
785 788 '''
786 789 return self.ipp_sec
787 790
788 791 noise = property(getNoise, "I'm the 'nHeights' property.")
789 792
790 793
791 794 class Correlation(JROData):
792 795
793 796 def __init__(self):
794 797 '''
795 798 Constructor
796 799 '''
797 800 self.radarControllerHeaderObj = RadarControllerHeader()
798 801 self.systemHeaderObj = SystemHeader()
799 802 self.type = "Correlation"
800 803 self.data = None
801 804 self.dtype = None
802 805 self.nProfiles = None
803 806 self.heightList = None
804 807 self.channelList = None
805 808 self.flagNoData = True
806 809 self.flagDiscontinuousBlock = False
807 810 self.utctime = None
808 811 self.timeZone = 0
809 812 self.dstFlag = None
810 813 self.errorCount = None
811 814 self.blocksize = None
812 815 self.flagDecodeData = False # asumo q la data no esta decodificada
813 816 self.flagDeflipData = False # asumo q la data no esta sin flip
814 817 self.pairsList = None
815 818 self.nPoints = None
816 819
817 820 def getPairsList(self):
818 821
819 822 return self.pairsList
820 823
821 824 def getNoise(self, mode=2):
822 825
823 826 indR = numpy.where(self.lagR == 0)[0][0]
824 827 indT = numpy.where(self.lagT == 0)[0][0]
825 828
826 829 jspectra0 = self.data_corr[:, :, indR, :]
827 830 jspectra = copy.copy(jspectra0)
828 831
829 832 num_chan = jspectra.shape[0]
830 833 num_hei = jspectra.shape[2]
831 834
832 835 freq_dc = jspectra.shape[1] / 2
833 836 ind_vel = numpy.array([-2, -1, 1, 2]) + freq_dc
834 837
835 838 if ind_vel[0] < 0:
836 839 ind_vel[list(range(0, 1))] = ind_vel[list(
837 840 range(0, 1))] + self.num_prof
838 841
839 842 if mode == 1:
840 843 jspectra[:, freq_dc, :] = (
841 844 jspectra[:, ind_vel[1], :] + jspectra[:, ind_vel[2], :]) / 2 # CORRECCION
842 845
843 846 if mode == 2:
844 847
845 848 vel = numpy.array([-2, -1, 1, 2])
846 849 xx = numpy.zeros([4, 4])
847 850
848 851 for fil in range(4):
849 852 xx[fil, :] = vel[fil]**numpy.asarray(list(range(4)))
850 853
851 854 xx_inv = numpy.linalg.inv(xx)
852 855 xx_aux = xx_inv[0, :]
853 856
854 857 for ich in range(num_chan):
855 858 yy = jspectra[ich, ind_vel, :]
856 859 jspectra[ich, freq_dc, :] = numpy.dot(xx_aux, yy)
857 860
858 861 junkid = jspectra[ich, freq_dc, :] <= 0
859 862 cjunkid = sum(junkid)
860 863
861 864 if cjunkid.any():
862 865 jspectra[ich, freq_dc, junkid.nonzero()] = (
863 866 jspectra[ich, ind_vel[1], junkid] + jspectra[ich, ind_vel[2], junkid]) / 2
864 867
865 868 noise = jspectra0[:, freq_dc, :] - jspectra[:, freq_dc, :]
866 869
867 870 return noise
868 871
869 872 @property
870 873 def timeInterval(self):
871 874
872 875 return self.ippSeconds * self.nCohInt * self.nProfiles
873 876
874 877 def splitFunctions(self):
875 878
876 879 pairsList = self.pairsList
877 880 ccf_pairs = []
878 881 acf_pairs = []
879 882 ccf_ind = []
880 883 acf_ind = []
881 884 for l in range(len(pairsList)):
882 885 chan0 = pairsList[l][0]
883 886 chan1 = pairsList[l][1]
884 887
885 888 # Obteniendo pares de Autocorrelacion
886 889 if chan0 == chan1:
887 890 acf_pairs.append(chan0)
888 891 acf_ind.append(l)
889 892 else:
890 893 ccf_pairs.append(pairsList[l])
891 894 ccf_ind.append(l)
892 895
893 896 data_acf = self.data_cf[acf_ind]
894 897 data_ccf = self.data_cf[ccf_ind]
895 898
896 899 return acf_ind, ccf_ind, acf_pairs, ccf_pairs, data_acf, data_ccf
897 900
898 901 @property
899 902 def normFactor(self):
900 903 acf_ind, ccf_ind, acf_pairs, ccf_pairs, data_acf, data_ccf = self.splitFunctions()
901 904 acf_pairs = numpy.array(acf_pairs)
902 905 normFactor = numpy.zeros((self.nPairs, self.nHeights))
903 906
904 907 for p in range(self.nPairs):
905 908 pair = self.pairsList[p]
906 909
907 910 ch0 = pair[0]
908 911 ch1 = pair[1]
909 912
910 913 ch0_max = numpy.max(data_acf[acf_pairs == ch0, :, :], axis=1)
911 914 ch1_max = numpy.max(data_acf[acf_pairs == ch1, :, :], axis=1)
912 915 normFactor[p, :] = numpy.sqrt(ch0_max * ch1_max)
913 916
914 917 return normFactor
915 918
916 919
917 920 class Parameters(Spectra):
918 921
919 922 groupList = None # List of Pairs, Groups, etc
920 923 data_param = None # Parameters obtained
921 924 data_pre = None # Data Pre Parametrization
922 925 data_SNR = None # Signal to Noise Ratio
923 926 abscissaList = None # Abscissa, can be velocities, lags or time
924 927 utctimeInit = None # Initial UTC time
925 928 paramInterval = None # Time interval to calculate Parameters in seconds
926 929 useLocalTime = True
927 930 # Fitting
928 931 data_error = None # Error of the estimation
929 932 constants = None
930 933 library = None
931 934 # Output signal
932 935 outputInterval = None # Time interval to calculate output signal in seconds
933 936 data_output = None # Out signal
934 937 nAvg = None
935 938 noise_estimation = None
936 939 GauSPC = None # Fit gaussian SPC
937 940
938 941 data_outlier = None
939 942 data_vdrift = None
940 943 radarControllerHeaderTxt=None #header Controller like text
941 944 txPower = None
942 945 flagProfilesByRange = False
943 946 nProfilesByRange = None
944 947
945 948
946 949 def __init__(self):
947 950 '''
948 951 Constructor
949 952 '''
950 953 self.radarControllerHeaderObj = RadarControllerHeader()
951 954 self.systemHeaderObj = SystemHeader()
952 955 self.processingHeaderObj = ProcessingHeader()
953 956 self.type = "Parameters"
954 957 self.timeZone = 0
955 958
956 959 def getTimeRange1(self, interval):
957 960
958 961 datatime = []
959 962
960 963 if self.useLocalTime:
961 964 time1 = self.utctimeInit - self.timeZone * 60
962 965 else:
963 966 time1 = self.utctimeInit
964 967
965 968 datatime.append(time1)
966 969 datatime.append(time1 + interval)
967 970 datatime = numpy.array(datatime)
968 971
969 972 return datatime
970 973
971 974 @property
972 975 def timeInterval(self):
973 976
974 977 if hasattr(self, 'timeInterval1'):
975 978 return self.timeInterval1
976 979 else:
977 980 return self.paramInterval
978 981
979 982 def setValue(self, value):
980 983
981 984 print("This property should not be initialized")
982 985
983 986 return
984 987
985 988 def getNoise(self):
986 989
987 990 return self.spc_noise
988 991
989 992 noise = property(getNoise, setValue, "I'm the 'Noise' property.")
990 993
991 994
992 995 class PlotterData(object):
993 996 '''
994 997 Object to hold data to be plotted
995 998 '''
996 999
997 1000 MAXNUMX = 200
998 1001 MAXNUMY = 200
999 1002
1000 1003 def __init__(self, code, exp_code, localtime=True):
1001 1004
1002 1005 self.key = code
1003 1006 self.exp_code = exp_code
1004 1007 self.ready = False
1005 1008 self.flagNoData = False
1006 1009 self.localtime = localtime
1007 1010 self.data = {}
1008 1011 self.meta = {}
1009 1012 self.__heights = []
1010 1013
1011 1014 def __str__(self):
1012 1015 dum = ['{}{}'.format(key, self.shape(key)) for key in self.data]
1013 1016 return 'Data[{}][{}]'.format(';'.join(dum), len(self.times))
1014 1017
1015 1018 def __len__(self):
1016 1019 return len(self.data)
1017 1020
1018 1021 def __getitem__(self, key):
1019 1022 if isinstance(key, int):
1020 1023 return self.data[self.times[key]]
1021 1024 elif isinstance(key, str):
1022 1025 ret = numpy.array([self.data[x][key] for x in self.times])
1023 1026 if ret.ndim > 1:
1024 1027 ret = numpy.swapaxes(ret, 0, 1)
1025 1028 return ret
1026 1029
1027 1030 def __contains__(self, key):
1028 1031 return key in self.data[self.min_time]
1029 1032
1030 1033 def setup(self):
1031 1034 '''
1032 1035 Configure object
1033 1036 '''
1034 1037 self.type = ''
1035 1038 self.ready = False
1036 1039 del self.data
1037 1040 self.data = {}
1038 1041 self.__heights = []
1039 1042 self.__all_heights = set()
1040 1043
1041 1044 def shape(self, key):
1042 1045 '''
1043 1046 Get the shape of the one-element data for the given key
1044 1047 '''
1045 1048
1046 1049 if len(self.data[self.min_time][key]):
1047 1050 return self.data[self.min_time][key].shape
1048 1051 return (0,)
1049 1052
1050 1053 def update(self, data, tm, meta={}):
1051 1054 '''
1052 1055 Update data object with new dataOut
1053 1056 '''
1054 1057
1055 1058 self.data[tm] = data
1056 1059
1057 1060 for key, value in meta.items():
1058 1061 setattr(self, key, value)
1059 1062
1060 1063 def normalize_heights(self):
1061 1064 '''
1062 1065 Ensure same-dimension of the data for different heighList
1063 1066 '''
1064 1067
1065 1068 H = numpy.array(list(self.__all_heights))
1066 1069 H.sort()
1067 1070 for key in self.data:
1068 1071 shape = self.shape(key)[:-1] + H.shape
1069 1072 for tm, obj in list(self.data[key].items()):
1070 1073 h = self.__heights[self.times.tolist().index(tm)]
1071 1074 if H.size == h.size:
1072 1075 continue
1073 1076 index = numpy.where(numpy.in1d(H, h))[0]
1074 1077 dummy = numpy.zeros(shape) + numpy.nan
1075 1078 if len(shape) == 2:
1076 1079 dummy[:, index] = obj
1077 1080 else:
1078 1081 dummy[index] = obj
1079 1082 self.data[key][tm] = dummy
1080 1083
1081 1084 self.__heights = [H for tm in self.times]
1082 1085
1083 1086 def jsonify(self, tm, plot_name, plot_type, decimate=False):
1084 1087 '''
1085 1088 Convert data to json
1086 1089 '''
1087 1090
1088 1091 meta = {}
1089 1092 meta['xrange'] = []
1090 1093 dy = int(len(self.yrange)/self.MAXNUMY) + 1
1091 1094 tmp = self.data[tm][self.key]
1092 1095 shape = tmp.shape
1093 1096 if len(shape) == 2:
1094 1097 data = self.roundFloats(self.data[tm][self.key][::, ::dy].tolist())
1095 1098 elif len(shape) == 3:
1096 1099 dx = int(self.data[tm][self.key].shape[1]/self.MAXNUMX) + 1
1097 1100 data = self.roundFloats(
1098 1101 self.data[tm][self.key][::, ::dx, ::dy].tolist())
1099 1102 meta['xrange'] = self.roundFloats(self.xrange[2][::dx].tolist())
1100 1103 else:
1101 1104 data = self.roundFloats(self.data[tm][self.key].tolist())
1102 1105
1103 1106 ret = {
1104 1107 'plot': plot_name,
1105 1108 'code': self.exp_code,
1106 1109 'time': float(tm),
1107 1110 'data': data,
1108 1111 }
1109 1112 meta['type'] = plot_type
1110 1113 meta['interval'] = float(self.interval)
1111 1114 meta['localtime'] = self.localtime
1112 1115 meta['yrange'] = self.roundFloats(self.yrange[::dy].tolist())
1113 1116 meta.update(self.meta)
1114 1117 ret['metadata'] = meta
1115 1118 return json.dumps(ret)
1116 1119
1117 1120 @property
1118 1121 def times(self):
1119 1122 '''
1120 1123 Return the list of times of the current data
1121 1124 '''
1122 1125
1123 1126 ret = [t for t in self.data]
1124 1127 ret.sort()
1125 1128 return numpy.array(ret)
1126 1129
1127 1130 @property
1128 1131 def min_time(self):
1129 1132 '''
1130 1133 Return the minimun time value
1131 1134 '''
1132 1135
1133 1136 return self.times[0]
1134 1137
1135 1138 @property
1136 1139 def max_time(self):
1137 1140 '''
1138 1141 Return the maximun time value
1139 1142 '''
1140 1143
1141 1144 return self.times[-1]
1142 1145
1143 1146 # @property
1144 1147 # def heights(self):
1145 1148 # '''
1146 1149 # Return the list of heights of the current data
1147 1150 # '''
1148 1151
1149 1152 # return numpy.array(self.__heights[-1])
1150 1153
1151 1154 @staticmethod
1152 1155 def roundFloats(obj):
1153 1156 if isinstance(obj, list):
1154 1157 return list(map(PlotterData.roundFloats, obj))
1155 1158 elif isinstance(obj, float):
1156 1159 return round(obj, 2)
@@ -1,437 +1,437
1 1 import os
2 2 import datetime
3 3 import numpy
4 4
5 5 from schainpy.model.graphics.jroplot_base import Plot, plt
6 6 from schainpy.model.graphics.jroplot_spectra import SpectraPlot, RTIPlot, CoherencePlot, SpectraCutPlot
7 7 from schainpy.utils import log
8 8
9 9 EARTH_RADIUS = 6.3710e3
10 10
11 11
12 12 def ll2xy(lat1, lon1, lat2, lon2):
13 13
14 14 p = 0.017453292519943295
15 15 a = 0.5 - numpy.cos((lat2 - lat1) * p)/2 + numpy.cos(lat1 * p) * \
16 16 numpy.cos(lat2 * p) * (1 - numpy.cos((lon2 - lon1) * p)) / 2
17 17 r = 12742 * numpy.arcsin(numpy.sqrt(a))
18 18 theta = numpy.arctan2(numpy.sin((lon2-lon1)*p)*numpy.cos(lat2*p), numpy.cos(lat1*p)
19 19 * numpy.sin(lat2*p)-numpy.sin(lat1*p)*numpy.cos(lat2*p)*numpy.cos((lon2-lon1)*p))
20 20 theta = -theta + numpy.pi/2
21 21 return r*numpy.cos(theta), r*numpy.sin(theta)
22 22
23 23
24 24 def km2deg(km):
25 25 '''
26 26 Convert distance in km to degrees
27 27 '''
28 28
29 29 return numpy.rad2deg(km/EARTH_RADIUS)
30 30
31 31
32 32
33 33 class SpectralMomentsPlot(SpectraPlot):
34 34 '''
35 35 Plot for Spectral Moments
36 36 '''
37 37 CODE = 'spc_moments'
38 38 # colormap = 'jet'
39 39 # plot_type = 'pcolor'
40 40
41 41 class DobleGaussianPlot(SpectraPlot):
42 42 '''
43 43 Plot for Double Gaussian Plot
44 44 '''
45 45 CODE = 'gaussian_fit'
46 46 # colormap = 'jet'
47 47 # plot_type = 'pcolor'
48 48
49 49 class DoubleGaussianSpectraCutPlot(SpectraCutPlot):
50 50 '''
51 51 Plot SpectraCut with Double Gaussian Fit
52 52 '''
53 53 CODE = 'cut_gaussian_fit'
54 54
55 55
56 56 class SpectralFitObliquePlot(SpectraPlot):
57 57 '''
58 58 Plot for Spectral Oblique
59 59 '''
60 60 CODE = 'spc_moments'
61 61 colormap = 'jet'
62 62 plot_type = 'pcolor'
63 63
64 64
65 65 class SnrPlot(RTIPlot):
66 66 '''
67 67 Plot for SNR Data
68 68 '''
69 69
70 70 CODE = 'snr'
71 71 colormap = 'jet'
72 72
73 73 def update(self, dataOut):
74 74 if len(self.channelList) == 0:
75 75 self.update_list(dataOut)
76 76
77 77 meta = {}
78 78 data = {
79 79 'snr': 10 * numpy.log10(dataOut.data_snr)
80 80 }
81 81 return data, meta
82 82
83 83 class DopplerPlot(RTIPlot):
84 84 '''
85 85 Plot for DOPPLER Data (1st moment)
86 86 '''
87 87
88 88 CODE = 'dop'
89 89 colormap = 'RdBu_r'
90 90
91 91 def update(self, dataOut):
92 92 self.update_list(dataOut)
93 93 data = {
94 94 'dop': dataOut.data_dop
95 95 }
96 96
97 97 return data, {}
98 98
99 99 class PowerPlot(RTIPlot):
100 100 '''
101 101 Plot for Power Data (0 moment)
102 102 '''
103 103
104 104 CODE = 'pow'
105 105 colormap = 'jet'
106 106
107 107 def update(self, dataOut):
108 108 self.update_list(dataOut)
109 109 data = {
110 110 'pow': 10*numpy.log10(dataOut.data_pow/dataOut.normFactor)
111 111 }
112 112 try:
113 113 data['noise'] = 10*numpy.log10(dataOut.getNoise()/dataOut.normFactor)
114 114 except:
115 115 pass
116 116 return data, {}
117 117
118 118 class SpectralWidthPlot(RTIPlot):
119 119 '''
120 120 Plot for Spectral Width Data (2nd moment)
121 121 '''
122 122
123 123 CODE = 'width'
124 124 colormap = 'jet'
125 125
126 126 def update(self, dataOut):
127 127 self.update_list(dataOut)
128 128 data = {
129 129 'width': dataOut.data_width
130 130 }
131 131 data['noise'] = 10*numpy.log10(dataOut.getNoise()/dataOut.normFactor)
132 132 return data, {}
133 133
134 134 class SkyMapPlot(Plot):
135 135 '''
136 136 Plot for meteors detection data
137 137 '''
138 138
139 139 CODE = 'param'
140 140
141 141 def setup(self):
142 142
143 143 self.ncols = 1
144 144 self.nrows = 1
145 145 self.width = 7.2
146 146 self.height = 7.2
147 147 self.nplots = 1
148 148 self.xlabel = 'Zonal Zenith Angle (deg)'
149 149 self.ylabel = 'Meridional Zenith Angle (deg)'
150 150 self.polar = True
151 151 self.ymin = -180
152 152 self.ymax = 180
153 153 self.colorbar = False
154 154
155 155 def plot(self):
156 156
157 157 arrayParameters = numpy.concatenate(self.data['param'])
158 158 error = arrayParameters[:, -1]
159 159 indValid = numpy.where(error == 0)[0]
160 160 finalMeteor = arrayParameters[indValid, :]
161 161 finalAzimuth = finalMeteor[:, 3]
162 162 finalZenith = finalMeteor[:, 4]
163 163
164 164 x = finalAzimuth * numpy.pi / 180
165 165 y = finalZenith
166 166
167 167 ax = self.axes[0]
168 168
169 169 if ax.firsttime:
170 170 ax.plot = ax.plot(x, y, 'bo', markersize=5)[0]
171 171 else:
172 172 ax.plot.set_data(x, y)
173 173
174 174 dt1 = self.getDateTime(self.data.min_time).strftime('%y/%m/%d %H:%M:%S')
175 175 dt2 = self.getDateTime(self.data.max_time).strftime('%y/%m/%d %H:%M:%S')
176 176 title = 'Meteor Detection Sky Map\n %s - %s \n Number of events: %5.0f\n' % (dt1,
177 177 dt2,
178 178 len(x))
179 179 self.titles[0] = title
180 180
181 181 class GenericRTIPlot(Plot):
182 182 '''
183 183 Plot for data_xxxx object
184 184 '''
185 185
186 186 CODE = 'param'
187 187 colormap = 'viridis'
188 188 plot_type = 'pcolorbuffer'
189 189
190 190 def setup(self):
191 191 self.xaxis = 'time'
192 192 self.ncols = 1
193 193 self.nrows = self.data.shape('param')[0]
194 194 self.nplots = self.nrows
195 195 self.plots_adjust.update({'hspace':0.8, 'left': 0.1, 'bottom': 0.08, 'right':0.95, 'top': 0.95})
196 196
197 197 if not self.xlabel:
198 198 self.xlabel = 'Time'
199 199
200 200 self.ylabel = 'Range [km]'
201 201 if not self.titles:
202 202 self.titles = ['Param {}'.format(x) for x in range(self.nrows)]
203 203
204 204 def update(self, dataOut):
205 205
206 206 data = {
207 207 'param' : numpy.concatenate([getattr(dataOut, attr) for attr in self.attr_data], axis=0)
208 208 }
209
209
210 210 meta = {}
211 211
212 212 return data, meta
213 213
214 214 def plot(self):
215 215 # self.data.normalize_heights()
216 216 self.x = self.data.times
217 217 self.y = self.data.yrange
218 218 self.z = self.data['param']
219 219
220 220 self.z = numpy.ma.masked_invalid(self.z)
221 221
222 222 if self.decimation is None:
223 223 x, y, z = self.fill_gaps(self.x, self.y, self.z)
224 224 else:
225 225 x, y, z = self.fill_gaps(*self.decimate())
226 226
227 227 for n, ax in enumerate(self.axes):
228 228
229 229 self.zmax = self.zmax if self.zmax is not None else numpy.max(
230 230 self.z[n])
231 231 self.zmin = self.zmin if self.zmin is not None else numpy.min(
232 232 self.z[n])
233 233
234 234 if ax.firsttime:
235 235 if self.zlimits is not None:
236 236 self.zmin, self.zmax = self.zlimits[n]
237 237
238 238 ax.plt = ax.pcolormesh(x, y, z[n].T * self.factors[n],
239 239 vmin=self.zmin,
240 240 vmax=self.zmax,
241 241 cmap=self.cmaps[n]
242 242 )
243 243 else:
244 244 if self.zlimits is not None:
245 245 self.zmin, self.zmax = self.zlimits[n]
246 246 try:
247 247 ax.collections.remove(ax.collections[0])
248 248 except:
249 249 pass
250 250 ax.plt = ax.pcolormesh(x, y, z[n].T * self.factors[n],
251 251 vmin=self.zmin,
252 252 vmax=self.zmax,
253 253 cmap=self.cmaps[n]
254 254 )
255 255
256 256
257 257 class PolarMapPlot(Plot):
258 258 '''
259 259 Plot for weather radar
260 260 '''
261 261
262 262 CODE = 'param'
263 263 colormap = 'seismic'
264 264
265 265 def setup(self):
266 266 self.ncols = 1
267 267 self.nrows = 1
268 268 self.width = 9
269 269 self.height = 8
270 270 self.mode = self.data.meta['mode']
271 271 if self.channels is not None:
272 272 self.nplots = len(self.channels)
273 273 self.nrows = len(self.channels)
274 274 else:
275 275 self.nplots = self.data.shape(self.CODE)[0]
276 276 self.nrows = self.nplots
277 277 self.channels = list(range(self.nplots))
278 278 if self.mode == 'E':
279 279 self.xlabel = 'Longitude'
280 280 self.ylabel = 'Latitude'
281 281 else:
282 282 self.xlabel = 'Range (km)'
283 283 self.ylabel = 'Height (km)'
284 284 self.bgcolor = 'white'
285 285 self.cb_labels = self.data.meta['units']
286 286 self.lat = self.data.meta['latitude']
287 287 self.lon = self.data.meta['longitude']
288 288 self.xmin, self.xmax = float(
289 289 km2deg(self.xmin) + self.lon), float(km2deg(self.xmax) + self.lon)
290 290 self.ymin, self.ymax = float(
291 291 km2deg(self.ymin) + self.lat), float(km2deg(self.ymax) + self.lat)
292 292 # self.polar = True
293 293
294 294 def plot(self):
295 295
296 296 for n, ax in enumerate(self.axes):
297 297 data = self.data['param'][self.channels[n]]
298 298
299 299 zeniths = numpy.linspace(
300 300 0, self.data.meta['max_range'], data.shape[1])
301 301 if self.mode == 'E':
302 302 azimuths = -numpy.radians(self.data.yrange)+numpy.pi/2
303 303 r, theta = numpy.meshgrid(zeniths, azimuths)
304 304 x, y = r*numpy.cos(theta)*numpy.cos(numpy.radians(self.data.meta['elevation'])), r*numpy.sin(
305 305 theta)*numpy.cos(numpy.radians(self.data.meta['elevation']))
306 306 x = km2deg(x) + self.lon
307 307 y = km2deg(y) + self.lat
308 308 else:
309 309 azimuths = numpy.radians(self.data.yrange)
310 310 r, theta = numpy.meshgrid(zeniths, azimuths)
311 311 x, y = r*numpy.cos(theta), r*numpy.sin(theta)
312 312 self.y = zeniths
313 313
314 314 if ax.firsttime:
315 315 if self.zlimits is not None:
316 316 self.zmin, self.zmax = self.zlimits[n]
317 317 ax.plt = ax.pcolormesh(# r, theta, numpy.ma.array(data, mask=numpy.isnan(data)),
318 318 x, y, numpy.ma.array(data, mask=numpy.isnan(data)),
319 319 vmin=self.zmin,
320 320 vmax=self.zmax,
321 321 cmap=self.cmaps[n])
322 322 else:
323 323 if self.zlimits is not None:
324 324 self.zmin, self.zmax = self.zlimits[n]
325 325 ax.collections.remove(ax.collections[0])
326 326 ax.plt = ax.pcolormesh(# r, theta, numpy.ma.array(data, mask=numpy.isnan(data)),
327 327 x, y, numpy.ma.array(data, mask=numpy.isnan(data)),
328 328 vmin=self.zmin,
329 329 vmax=self.zmax,
330 330 cmap=self.cmaps[n])
331 331
332 332 if self.mode == 'A':
333 333 continue
334 334
335 335 # plot district names
336 336 f = open('/data/workspace/schain_scripts/distrito.csv')
337 337 for line in f:
338 338 label, lon, lat = [s.strip() for s in line.split(',') if s]
339 339 lat = float(lat)
340 340 lon = float(lon)
341 341 # ax.plot(lon, lat, '.b', ms=2)
342 342 ax.text(lon, lat, label.decode('utf8'), ha='center',
343 343 va='bottom', size='8', color='black')
344 344
345 345 # plot limites
346 346 limites = []
347 347 tmp = []
348 348 for line in open('/data/workspace/schain_scripts/lima.csv'):
349 349 if '#' in line:
350 350 if tmp:
351 351 limites.append(tmp)
352 352 tmp = []
353 353 continue
354 354 values = line.strip().split(',')
355 355 tmp.append((float(values[0]), float(values[1])))
356 356 for points in limites:
357 357 ax.add_patch(
358 358 Polygon(points, ec='k', fc='none', ls='--', lw=0.5))
359 359
360 360 # plot Cuencas
361 361 for cuenca in ('rimac', 'lurin', 'mala', 'chillon', 'chilca', 'chancay-huaral'):
362 362 f = open('/data/workspace/schain_scripts/{}.csv'.format(cuenca))
363 363 values = [line.strip().split(',') for line in f]
364 364 points = [(float(s[0]), float(s[1])) for s in values]
365 365 ax.add_patch(Polygon(points, ec='b', fc='none'))
366 366
367 367 # plot grid
368 368 for r in (15, 30, 45, 60):
369 369 ax.add_artist(plt.Circle((self.lon, self.lat),
370 370 km2deg(r), color='0.6', fill=False, lw=0.2))
371 371 ax.text(
372 372 self.lon + (km2deg(r))*numpy.cos(60*numpy.pi/180),
373 373 self.lat + (km2deg(r))*numpy.sin(60*numpy.pi/180),
374 374 '{}km'.format(r),
375 375 ha='center', va='bottom', size='8', color='0.6', weight='heavy')
376 376
377 377 if self.mode == 'E':
378 378 title = 'El={}\N{DEGREE SIGN}'.format(self.data.meta['elevation'])
379 379 label = 'E{:02d}'.format(int(self.data.meta['elevation']))
380 380 else:
381 381 title = 'Az={}\N{DEGREE SIGN}'.format(self.data.meta['azimuth'])
382 382 label = 'A{:02d}'.format(int(self.data.meta['azimuth']))
383 383
384 384 self.save_labels = ['{}-{}'.format(lbl, label) for lbl in self.labels]
385 385 self.titles = ['{} {}'.format(
386 386 self.data.parameters[x], title) for x in self.channels]
387 387
388 388
389 389
390 390 class TxPowerPlot(Plot):
391 391 '''
392 392 Plot for TX Power from external file
393 393 '''
394 394
395 395 CODE = 'tx_power'
396 396 plot_type = 'scatterbuffer'
397 397
398 398 def setup(self):
399 399 self.xaxis = 'time'
400 400 self.ncols = 1
401 401 self.nrows = 1
402 402 self.nplots = 1
403 403 self.ylabel = 'Power [kW]'
404 404 self.xlabel = 'Time'
405 405 self.titles = ['TX power']
406 406 self.colorbar = False
407 407 self.plots_adjust.update({'right': 0.85 })
408 408 #if not self.titles:
409 409 self.titles = ['TX Power Plot']
410 410
411 411 def update(self, dataOut):
412 412
413 413 data = {}
414 414 meta = {}
415 415
416 416 data['tx_power'] = dataOut.txPower/1000
417 417 meta['yrange'] = numpy.array([])
418 418 #print(dataOut.txPower/1000)
419 419 return data, meta
420 420
421 421 def plot(self):
422 422
423 423 x = self.data.times
424 424 xmin = self.data.min_time
425 425 xmax = xmin + self.xrange * 60 * 60
426 426 Y = self.data['tx_power']
427 427
428 428 if self.axes[0].firsttime:
429 429 if self.ymin is None: self.ymin = 0
430 430 if self.ymax is None: self.ymax = numpy.nanmax(Y) + 5
431 431 if self.ymax == 5:
432 432 self.ymax = 250
433 433 self.ymin = 100
434 434 self.axes[0].plot(x, Y, lw=1, label='Power')
435 435 plt.legend(bbox_to_anchor=(1.18, 1.0))
436 436 else:
437 437 self.axes[0].lines[0].set_data(x, Y) No newline at end of file
@@ -1,1928 +1,1935
1 1 # Copyright (c) 2012-2021 Jicamarca Radio Observatory
2 2 # All rights reserved.
3 3 #
4 4 # Distributed under the terms of the BSD 3-clause license.
5 5 """Classes to plot Spectra data
6 6
7 7 """
8 8
9 9 import os
10 10 import numpy
11 11 import datetime
12 12
13 13 from schainpy.model.graphics.jroplot_base import Plot, plt, log
14 14 from itertools import combinations
15 15 from matplotlib.ticker import LinearLocator
16 16
17 17 from schainpy.model.utils.BField import BField
18 18 from scipy.interpolate import splrep
19 19 from scipy.interpolate import splev
20 20
21 21 from matplotlib import __version__ as plt_version
22 22
23 23 if plt_version >='3.3.4':
24 24 EXTRA_POINTS = 0
25 25 else:
26 26 EXTRA_POINTS = 1
27 27 class SpectraPlot(Plot):
28 28 '''
29 29 Plot for Spectra data
30 30 '''
31 31
32 32 CODE = 'spc'
33 33 colormap = 'jet'
34 34 plot_type = 'pcolor'
35 35 buffering = False
36 36 channelList = []
37 37 elevationList = []
38 38 azimuthList = []
39 39
40 40 def setup(self):
41 41
42 42 self.nplots = len(self.data.channels)
43 43 self.ncols = int(numpy.sqrt(self.nplots) + 0.9)
44 44 self.nrows = int((1.0 * self.nplots / self.ncols) + 0.9)
45 45 self.height = 3.4 * self.nrows
46 46 self.cb_label = 'dB'
47 47 if self.showprofile:
48 48 self.width = 5.2 * self.ncols
49 49 else:
50 50 self.width = 4.2* self.ncols
51 51 self.plots_adjust.update({'wspace': 0.4, 'hspace':0.4, 'left': 0.1, 'right': 0.9, 'bottom': 0.12})
52 52 self.ylabel = 'Range [km]'
53 53
54 54 def update_list(self,dataOut):
55 55
56 56 if len(self.channelList) == 0:
57 57 self.channelList = dataOut.channelList
58 58 if len(self.elevationList) == 0:
59 59 self.elevationList = dataOut.elevationList
60 60 if len(self.azimuthList) == 0:
61 61 self.azimuthList = dataOut.azimuthList
62 62
63 63 def update(self, dataOut):
64 64
65 65 self.update_list(dataOut)
66 66 data = {}
67 67 meta = {}
68
69 68 norm = dataOut.nProfiles * dataOut.max_nIncohInt * dataOut.nCohInt * dataOut.windowOfFilter
70 noise = 10*numpy.log10(dataOut.getNoise()/norm)
71 z = numpy.zeros((dataOut.nChannels, dataOut.nFFTPoints, dataOut.nHeights))
72 for ch in range(dataOut.nChannels):
73 if hasattr(dataOut.normFactor,'ndim'):
74 if dataOut.normFactor.ndim > 1:
75 z[ch] = (numpy.divide(dataOut.data_spc[ch],dataOut.normFactor[ch]))
69 if dataOut.type == "Parameters":
70 noise = 10*numpy.log10(dataOut.getNoise()/dataOut.normFactor)
71 spc = 10*numpy.log10(dataOut.data_spc/(dataOut.nProfiles))
72 else:
73 noise = 10*numpy.log10(dataOut.getNoise()/norm)
74
75 z = numpy.zeros((dataOut.nChannels, dataOut.nFFTPoints, dataOut.nHeights))
76 for ch in range(dataOut.nChannels):
77 if hasattr(dataOut.normFactor,'ndim'):
78 if dataOut.normFactor.ndim > 1:
79 z[ch] = (numpy.divide(dataOut.data_spc[ch],dataOut.normFactor[ch]))
76 80
81 else:
82 z[ch] = (numpy.divide(dataOut.data_spc[ch],dataOut.normFactor))
77 83 else:
78 84 z[ch] = (numpy.divide(dataOut.data_spc[ch],dataOut.normFactor))
79 else:
80 z[ch] = (numpy.divide(dataOut.data_spc[ch],dataOut.normFactor))
81 85
82 z = numpy.where(numpy.isfinite(z), z, numpy.NAN)
83 spc = 10*numpy.log10(z)
86 z = numpy.where(numpy.isfinite(z), z, numpy.NAN)
87 spc = 10*numpy.log10(z)
84 88
85 89 data['spc'] = spc
86 90 data['rti'] = spc.mean(axis=1)
87 91 data['noise'] = noise
88 92 meta['xrange'] = (dataOut.getFreqRange(EXTRA_POINTS)/1000., dataOut.getAcfRange(EXTRA_POINTS), dataOut.getVelRange(EXTRA_POINTS))
89 93 if self.CODE == 'spc_moments':
90 94 data['moments'] = dataOut.moments
91 95
92 96 return data, meta
93 97
94 98 def plot(self):
95 99
96 100 if self.xaxis == "frequency":
97 101 x = self.data.xrange[0]
98 102 self.xlabel = "Frequency (kHz)"
99 103 elif self.xaxis == "time":
100 104 x = self.data.xrange[1]
101 105 self.xlabel = "Time (ms)"
102 106 else:
103 107 x = self.data.xrange[2]
104 108 self.xlabel = "Velocity (m/s)"
105 109
106 110 if (self.CODE == 'spc_moments') | (self.CODE == 'gaussian_fit'):
107 111 x = self.data.xrange[2]
108 112 self.xlabel = "Velocity (m/s)"
109 113
110 114 self.titles = []
111 115
112 116 y = self.data.yrange
113 117 self.y = y
114 118
115 119 data = self.data[-1]
116 120 z = data['spc']
117 121
118 122 for n, ax in enumerate(self.axes):
119 123 noise = self.data['noise'][n][0]
120 124 # noise = data['noise'][n]
121 125
122 126 if self.CODE == 'spc_moments':
123 127 mean = data['moments'][n, 1]
124 128 if self.CODE == 'gaussian_fit':
125 129 gau0 = data['gaussfit'][n][2,:,0]
126 130 gau1 = data['gaussfit'][n][2,:,1]
127 131 if ax.firsttime:
128 132 self.xmax = self.xmax if self.xmax else numpy.nanmax(x)
129 133 self.xmin = self.xmin if self.xmin else -self.xmax
130 134 self.zmin = self.zmin if self.zmin else numpy.nanmin(z)
131 135 self.zmax = self.zmax if self.zmax else numpy.nanmax(z)
132 136 ax.plt = ax.pcolormesh(x, y, z[n].T,
133 137 vmin=self.zmin,
134 138 vmax=self.zmax,
135 139 cmap=plt.get_cmap(self.colormap)
136 140 )
137 141
138 142 if self.showprofile:
139 143 ax.plt_profile = self.pf_axes[n].plot(
140 144 data['rti'][n], y)[0]
141 145 ax.plt_noise = self.pf_axes[n].plot(numpy.repeat(noise, len(y)), y,
142 146 color="k", linestyle="dashed", lw=1)[0]
143 147 if self.CODE == 'spc_moments':
144 148 ax.plt_mean = ax.plot(mean, y, color='k', lw=1)[0]
145 149 if self.CODE == 'gaussian_fit':
146 150 ax.plt_gau0 = ax.plot(gau0, y, color='r', lw=1)[0]
147 151 ax.plt_gau1 = ax.plot(gau1, y, color='y', lw=1)[0]
148 152 else:
149 153 ax.plt.set_array(z[n].T.ravel())
150 154 if self.showprofile:
151 155 ax.plt_profile.set_data(data['rti'][n], y)
152 156 ax.plt_noise.set_data(numpy.repeat(noise, len(y)), y)
153 157 if self.CODE == 'spc_moments':
154 158 ax.plt_mean.set_data(mean, y)
155 159 if self.CODE == 'gaussian_fit':
156 160 ax.plt_gau0.set_data(gau0, y)
157 161 ax.plt_gau1.set_data(gau1, y)
158 162 if len(self.azimuthList) > 0 and len(self.elevationList) > 0:
159 163 self.titles.append('CH {}: {:2.1f}elv {:2.1f}az {:3.2f}dB'.format(self.channelList[n], noise, self.elevationList[n], self.azimuthList[n]))
160 164 else:
161 165 self.titles.append('CH {}: {:3.2f}dB'.format(self.channelList[n], noise))
162 166
163 167 class SpectraObliquePlot(Plot):
164 168 '''
165 169 Plot for Spectra data
166 170 '''
167 171
168 172 CODE = 'spc_oblique'
169 173 colormap = 'jet'
170 174 plot_type = 'pcolor'
171 175
172 176 def setup(self):
173 177 self.xaxis = "oblique"
174 178 self.nplots = len(self.data.channels)
175 179 self.ncols = int(numpy.sqrt(self.nplots) + 0.9)
176 180 self.nrows = int((1.0 * self.nplots / self.ncols) + 0.9)
177 181 self.height = 2.6 * self.nrows
178 182 self.cb_label = 'dB'
179 183 if self.showprofile:
180 184 self.width = 4 * self.ncols
181 185 else:
182 186 self.width = 3.5 * self.ncols
183 187 self.plots_adjust.update({'wspace': 0.8, 'hspace':0.2, 'left': 0.2, 'right': 0.9, 'bottom': 0.18})
184 188 self.ylabel = 'Range [km]'
185 189
186 190 def update(self, dataOut):
187 191
188 192 data = {}
189 193 meta = {}
190 194
191 195 spc = 10*numpy.log10(dataOut.data_spc/dataOut.normFactor)
192 196 data['spc'] = spc
193 197 data['rti'] = dataOut.getPower()
194 198 data['noise'] = 10*numpy.log10(dataOut.getNoise()/dataOut.normFactor)
195 199 meta['xrange'] = (dataOut.getFreqRange(1)/1000., dataOut.getAcfRange(1), dataOut.getVelRange(1))
196 200
197 201 data['shift1'] = dataOut.Dop_EEJ_T1[0]
198 202 data['shift2'] = dataOut.Dop_EEJ_T2[0]
199 203 data['max_val_2'] = dataOut.Oblique_params[0,-1,:]
200 204 data['shift1_error'] = dataOut.Err_Dop_EEJ_T1[0]
201 205 data['shift2_error'] = dataOut.Err_Dop_EEJ_T2[0]
202 206
203 207 return data, meta
204 208
205 209 def plot(self):
206 210
207 211 if self.xaxis == "frequency":
208 212 x = self.data.xrange[0]
209 213 self.xlabel = "Frequency (kHz)"
210 214 elif self.xaxis == "time":
211 215 x = self.data.xrange[1]
212 216 self.xlabel = "Time (ms)"
213 217 else:
214 218 x = self.data.xrange[2]
215 219 self.xlabel = "Velocity (m/s)"
216 220
217 221 self.titles = []
218 222
219 223 y = self.data.yrange
220 224 self.y = y
221 225
222 226 data = self.data[-1]
223 227 z = data['spc']
224 228
225 229 for n, ax in enumerate(self.axes):
226 230 noise = self.data['noise'][n][-1]
227 231 shift1 = data['shift1']
228 232 shift2 = data['shift2']
229 233 max_val_2 = data['max_val_2']
230 234 err1 = data['shift1_error']
231 235 err2 = data['shift2_error']
232 236 if ax.firsttime:
233 237
234 238 self.xmax = self.xmax if self.xmax else numpy.nanmax(x)
235 239 self.xmin = self.xmin if self.xmin else -self.xmax
236 240 self.zmin = self.zmin if self.zmin else numpy.nanmin(z)
237 241 self.zmax = self.zmax if self.zmax else numpy.nanmax(z)
238 242 ax.plt = ax.pcolormesh(x, y, z[n].T,
239 243 vmin=self.zmin,
240 244 vmax=self.zmax,
241 245 cmap=plt.get_cmap(self.colormap)
242 246 )
243 247
244 248 if self.showprofile:
245 249 ax.plt_profile = self.pf_axes[n].plot(
246 250 self.data['rti'][n][-1], y)[0]
247 251 ax.plt_noise = self.pf_axes[n].plot(numpy.repeat(noise, len(y)), y,
248 252 color="k", linestyle="dashed", lw=1)[0]
249 253
250 254 self.ploterr1 = ax.errorbar(shift1, y, xerr=err1, fmt='k^', elinewidth=2.2, marker='o', linestyle='None',markersize=2.5,capsize=0.3,markeredgewidth=0.2)
251 255 self.ploterr2 = ax.errorbar(shift2, y, xerr=err2, fmt='m^',elinewidth=2.2,marker='o',linestyle='None',markersize=2.5,capsize=0.3,markeredgewidth=0.2)
252 256 self.ploterr3 = ax.errorbar(max_val_2, y, xerr=0, fmt='g^',elinewidth=2.2,marker='o',linestyle='None',markersize=2.5,capsize=0.3,markeredgewidth=0.2)
253 257
254 258 else:
255 259 self.ploterr1.remove()
256 260 self.ploterr2.remove()
257 261 self.ploterr3.remove()
258 262 ax.plt.set_array(z[n].T.ravel())
259 263 if self.showprofile:
260 264 ax.plt_profile.set_data(self.data['rti'][n][-1], y)
261 265 ax.plt_noise.set_data(numpy.repeat(noise, len(y)), y)
262 266 self.ploterr1 = ax.errorbar(shift1, y, xerr=err1, fmt='k^', elinewidth=2.2, marker='o', linestyle='None',markersize=2.5,capsize=0.3,markeredgewidth=0.2)
263 267 self.ploterr2 = ax.errorbar(shift2, y, xerr=err2, fmt='m^',elinewidth=2.2,marker='o',linestyle='None',markersize=2.5,capsize=0.3,markeredgewidth=0.2)
264 268 self.ploterr3 = ax.errorbar(max_val_2, y, xerr=0, fmt='g^',elinewidth=2.2,marker='o',linestyle='None',markersize=2.5,capsize=0.3,markeredgewidth=0.2)
265 269
266 270 self.titles.append('CH {}: {:3.2f}dB'.format(n, noise))
267 271
268 272
269 273 class CrossSpectraPlot(Plot):
270 274
271 275 CODE = 'cspc'
272 276 colormap = 'jet'
273 277 plot_type = 'pcolor'
274 278 zmin_coh = None
275 279 zmax_coh = None
276 280 zmin_phase = None
277 281 zmax_phase = None
278 282 realChannels = None
279 283 crossPairs = None
280 284
281 285 def setup(self):
282 286
283 287 self.ncols = 4
284 288 self.nplots = len(self.data.pairs) * 2
285 289 self.nrows = int((1.0 * self.nplots / self.ncols) + 0.9)
286 290 self.width = 3.1 * self.ncols
287 291 self.height = 2.6 * self.nrows
288 292 self.ylabel = 'Range [km]'
289 293 self.showprofile = False
290 294 self.plots_adjust.update({'left': 0.08, 'right': 0.92, 'wspace': 0.5, 'hspace':0.4, 'top':0.95, 'bottom': 0.08})
291 295
292 296 def update(self, dataOut):
293 297
294 298 data = {}
295 299 meta = {}
296 300
297 301 spc = dataOut.data_spc
298 302 cspc = dataOut.data_cspc
299 303 meta['xrange'] = (dataOut.getFreqRange(EXTRA_POINTS)/1000., dataOut.getAcfRange(EXTRA_POINTS), dataOut.getVelRange(EXTRA_POINTS))
300 304 rawPairs = list(combinations(list(range(dataOut.nChannels)), 2))
301 305 meta['pairs'] = rawPairs
302 306 if self.crossPairs == None:
303 307 self.crossPairs = dataOut.pairsList
304 308 tmp = []
305 309
306 310 for n, pair in enumerate(meta['pairs']):
307 311 out = cspc[n] / numpy.sqrt(spc[pair[0]] * spc[pair[1]])
308 312 coh = numpy.abs(out)
309 313 phase = numpy.arctan2(out.imag, out.real) * 180 / numpy.pi
310 314 tmp.append(coh)
311 315 tmp.append(phase)
312 316
313 317 data['cspc'] = numpy.array(tmp)
314 318
315 319 return data, meta
316 320
317 321 def plot(self):
318 322
319 323 if self.xaxis == "frequency":
320 324 x = self.data.xrange[0]
321 325 self.xlabel = "Frequency (kHz)"
322 326 elif self.xaxis == "time":
323 327 x = self.data.xrange[1]
324 328 self.xlabel = "Time (ms)"
325 329 else:
326 330 x = self.data.xrange[2]
327 331 self.xlabel = "Velocity (m/s)"
328 332
329 333 self.titles = []
330 334
331 335 y = self.data.yrange
332 336 self.y = y
333 337
334 338 data = self.data[-1]
335 339 cspc = data['cspc']
336 340
337 341 for n in range(len(self.data.pairs)):
338 342 pair = self.crossPairs[n]
339 343 coh = cspc[n*2]
340 344 phase = cspc[n*2+1]
341 345 ax = self.axes[2 * n]
342 346 if ax.firsttime:
343 347 ax.plt = ax.pcolormesh(x, y, coh.T,
344 348 vmin=self.zmin_coh,
345 349 vmax=self.zmax_coh,
346 350 cmap=plt.get_cmap(self.colormap_coh)
347 351 )
348 352 else:
349 353 ax.plt.set_array(coh.T.ravel())
350 354 self.titles.append(
351 355 'Coherence Ch{} * Ch{}'.format(pair[0], pair[1]))
352 356
353 357 ax = self.axes[2 * n + 1]
354 358 if ax.firsttime:
355 359 ax.plt = ax.pcolormesh(x, y, phase.T,
356 360 vmin=-180,
357 361 vmax=180,
358 362 cmap=plt.get_cmap(self.colormap_phase)
359 363 )
360 364 else:
361 365 ax.plt.set_array(phase.T.ravel())
362 366 self.titles.append('Phase CH{} * CH{}'.format(pair[0], pair[1]))
363 367
364 368
365 369 class CrossSpectra4Plot(Plot):
366 370
367 371 CODE = 'cspc'
368 372 colormap = 'jet'
369 373 plot_type = 'pcolor'
370 374 zmin_coh = None
371 375 zmax_coh = None
372 376 zmin_phase = None
373 377 zmax_phase = None
374 378
375 379 def setup(self):
376 380
377 381 self.ncols = 4
378 382 self.nrows = len(self.data.pairs)
379 383 self.nplots = self.nrows * 4
380 384 self.width = 3.1 * self.ncols
381 385 self.height = 5 * self.nrows
382 386 self.ylabel = 'Range [km]'
383 387 self.showprofile = False
384 388 self.plots_adjust.update({'left': 0.08, 'right': 0.92, 'wspace': 0.5, 'hspace':0.4, 'top':0.95, 'bottom': 0.08})
385 389
386 390 def plot(self):
387 391
388 392 if self.xaxis == "frequency":
389 393 x = self.data.xrange[0]
390 394 self.xlabel = "Frequency (kHz)"
391 395 elif self.xaxis == "time":
392 396 x = self.data.xrange[1]
393 397 self.xlabel = "Time (ms)"
394 398 else:
395 399 x = self.data.xrange[2]
396 400 self.xlabel = "Velocity (m/s)"
397 401
398 402 self.titles = []
399 403
400 404
401 405 y = self.data.heights
402 406 self.y = y
403 407 nspc = self.data['spc']
404 408 spc = self.data['cspc'][0]
405 409 cspc = self.data['cspc'][1]
406 410
407 411 for n in range(self.nrows):
408 412 noise = self.data['noise'][:,-1]
409 413 pair = self.data.pairs[n]
410 414
411 415 ax = self.axes[4 * n]
412 416 if ax.firsttime:
413 417 self.xmax = self.xmax if self.xmax else numpy.nanmax(x)
414 418 self.xmin = self.xmin if self.xmin else -self.xmax
415 419 self.zmin = self.zmin if self.zmin else numpy.nanmin(nspc)
416 420 self.zmax = self.zmax if self.zmax else numpy.nanmax(nspc)
417 421 ax.plt = ax.pcolormesh(x , y , nspc[pair[0]].T,
418 422 vmin=self.zmin,
419 423 vmax=self.zmax,
420 424 cmap=plt.get_cmap(self.colormap)
421 425 )
422 426 else:
423 427
424 428 ax.plt.set_array(nspc[pair[0]].T.ravel())
425 429 self.titles.append('CH {}: {:3.2f}dB'.format(pair[0], noise[pair[0]]))
426 430
427 431 ax = self.axes[4 * n + 1]
428 432
429 433 if ax.firsttime:
430 434 ax.plt = ax.pcolormesh(x , y, numpy.flip(nspc[pair[1]],axis=0).T,
431 435 vmin=self.zmin,
432 436 vmax=self.zmax,
433 437 cmap=plt.get_cmap(self.colormap)
434 438 )
435 439 else:
436 440
437 441 ax.plt.set_array(numpy.flip(nspc[pair[1]],axis=0).T.ravel())
438 442 self.titles.append('CH {}: {:3.2f}dB'.format(pair[1], noise[pair[1]]))
439 443
440 444 out = cspc[n] / numpy.sqrt(spc[pair[0]] * spc[pair[1]])
441 445 coh = numpy.abs(out)
442 446 phase = numpy.arctan2(out.imag, out.real) * 180 / numpy.pi
443 447
444 448 ax = self.axes[4 * n + 2]
445 449 if ax.firsttime:
446 450 ax.plt = ax.pcolormesh(x, y, numpy.flip(coh,axis=0).T,
447 451 vmin=0,
448 452 vmax=1,
449 453 cmap=plt.get_cmap(self.colormap_coh)
450 454 )
451 455 else:
452 456 ax.plt.set_array(numpy.flip(coh,axis=0).T.ravel())
453 457 self.titles.append(
454 458 'Coherence Ch{} * Ch{}'.format(pair[0], pair[1]))
455 459
456 460 ax = self.axes[4 * n + 3]
457 461 if ax.firsttime:
458 462 ax.plt = ax.pcolormesh(x, y, numpy.flip(phase,axis=0).T,
459 463 vmin=-180,
460 464 vmax=180,
461 465 cmap=plt.get_cmap(self.colormap_phase)
462 466 )
463 467 else:
464 468 ax.plt.set_array(numpy.flip(phase,axis=0).T.ravel())
465 469 self.titles.append('Phase CH{} * CH{}'.format(pair[0], pair[1]))
466 470
467 471
468 472 class CrossSpectra2Plot(Plot):
469 473
470 474 CODE = 'cspc'
471 475 colormap = 'jet'
472 476 plot_type = 'pcolor'
473 477 zmin_coh = None
474 478 zmax_coh = None
475 479 zmin_phase = None
476 480 zmax_phase = None
477 481
478 482 def setup(self):
479 483
480 484 self.ncols = 1
481 485 self.nrows = len(self.data.pairs)
482 486 self.nplots = self.nrows * 1
483 487 self.width = 3.1 * self.ncols
484 488 self.height = 5 * self.nrows
485 489 self.ylabel = 'Range [km]'
486 490 self.showprofile = False
487 491 self.plots_adjust.update({'left': 0.22, 'right': .90, 'wspace': 0.5, 'hspace':0.4, 'top':0.95, 'bottom': 0.08})
488 492
489 493 def plot(self):
490 494
491 495 if self.xaxis == "frequency":
492 496 x = self.data.xrange[0]
493 497 self.xlabel = "Frequency (kHz)"
494 498 elif self.xaxis == "time":
495 499 x = self.data.xrange[1]
496 500 self.xlabel = "Time (ms)"
497 501 else:
498 502 x = self.data.xrange[2]
499 503 self.xlabel = "Velocity (m/s)"
500 504
501 505 self.titles = []
502 506
503 507
504 508 y = self.data.heights
505 509 self.y = y
506 510 cspc = self.data['cspc'][1]
507 511
508 512 for n in range(self.nrows):
509 513 noise = self.data['noise'][:,-1]
510 514 pair = self.data.pairs[n]
511 515 out = cspc[n]
512 516 cross = numpy.abs(out)
513 517 z = cross/self.data.nFactor
514 518 cross = 10*numpy.log10(z)
515 519
516 520 ax = self.axes[1 * n]
517 521 if ax.firsttime:
518 522 self.xmax = self.xmax if self.xmax else numpy.nanmax(x)
519 523 self.xmin = self.xmin if self.xmin else -self.xmax
520 524 self.zmin = self.zmin if self.zmin else numpy.nanmin(cross)
521 525 self.zmax = self.zmax if self.zmax else numpy.nanmax(cross)
522 526 ax.plt = ax.pcolormesh(x, y, cross.T,
523 527 vmin=self.zmin,
524 528 vmax=self.zmax,
525 529 cmap=plt.get_cmap(self.colormap)
526 530 )
527 531 else:
528 532 ax.plt.set_array(cross.T.ravel())
529 533 self.titles.append(
530 534 'Cross Spectra Power Ch{} * Ch{}'.format(pair[0], pair[1]))
531 535
532 536
533 537 class CrossSpectra3Plot(Plot):
534 538
535 539 CODE = 'cspc'
536 540 colormap = 'jet'
537 541 plot_type = 'pcolor'
538 542 zmin_coh = None
539 543 zmax_coh = None
540 544 zmin_phase = None
541 545 zmax_phase = None
542 546
543 547 def setup(self):
544 548
545 549 self.ncols = 3
546 550 self.nrows = len(self.data.pairs)
547 551 self.nplots = self.nrows * 3
548 552 self.width = 3.1 * self.ncols
549 553 self.height = 5 * self.nrows
550 554 self.ylabel = 'Range [km]'
551 555 self.showprofile = False
552 556 self.plots_adjust.update({'left': 0.22, 'right': .90, 'wspace': 0.5, 'hspace':0.4, 'top':0.95, 'bottom': 0.08})
553 557
554 558 def plot(self):
555 559
556 560 if self.xaxis == "frequency":
557 561 x = self.data.xrange[0]
558 562 self.xlabel = "Frequency (kHz)"
559 563 elif self.xaxis == "time":
560 564 x = self.data.xrange[1]
561 565 self.xlabel = "Time (ms)"
562 566 else:
563 567 x = self.data.xrange[2]
564 568 self.xlabel = "Velocity (m/s)"
565 569
566 570 self.titles = []
567 571
568 572
569 573 y = self.data.heights
570 574 self.y = y
571 575
572 576 cspc = self.data['cspc'][1]
573 577
574 578 for n in range(self.nrows):
575 579 noise = self.data['noise'][:,-1]
576 580 pair = self.data.pairs[n]
577 581 out = cspc[n]
578 582
579 583 cross = numpy.abs(out)
580 584 z = cross/self.data.nFactor
581 585 cross = 10*numpy.log10(z)
582 586
583 587 out_r= out.real/self.data.nFactor
584 588
585 589 out_i= out.imag/self.data.nFactor
586 590
587 591 ax = self.axes[3 * n]
588 592 if ax.firsttime:
589 593 self.xmax = self.xmax if self.xmax else numpy.nanmax(x)
590 594 self.xmin = self.xmin if self.xmin else -self.xmax
591 595 self.zmin = self.zmin if self.zmin else numpy.nanmin(cross)
592 596 self.zmax = self.zmax if self.zmax else numpy.nanmax(cross)
593 597 ax.plt = ax.pcolormesh(x, y, cross.T,
594 598 vmin=self.zmin,
595 599 vmax=self.zmax,
596 600 cmap=plt.get_cmap(self.colormap)
597 601 )
598 602 else:
599 603 ax.plt.set_array(cross.T.ravel())
600 604 self.titles.append(
601 605 'Cross Spectra Power Ch{} * Ch{}'.format(pair[0], pair[1]))
602 606
603 607 ax = self.axes[3 * n + 1]
604 608 if ax.firsttime:
605 609 self.xmax = self.xmax if self.xmax else numpy.nanmax(x)
606 610 self.xmin = self.xmin if self.xmin else -self.xmax
607 611 self.zmin = self.zmin if self.zmin else numpy.nanmin(cross)
608 612 self.zmax = self.zmax if self.zmax else numpy.nanmax(cross)
609 613 ax.plt = ax.pcolormesh(x, y, out_r.T,
610 614 vmin=-1.e6,
611 615 vmax=0,
612 616 cmap=plt.get_cmap(self.colormap)
613 617 )
614 618 else:
615 619 ax.plt.set_array(out_r.T.ravel())
616 620 self.titles.append(
617 621 'Cross Spectra Real Ch{} * Ch{}'.format(pair[0], pair[1]))
618 622
619 623 ax = self.axes[3 * n + 2]
620 624
621 625
622 626 if ax.firsttime:
623 627 self.xmax = self.xmax if self.xmax else numpy.nanmax(x)
624 628 self.xmin = self.xmin if self.xmin else -self.xmax
625 629 self.zmin = self.zmin if self.zmin else numpy.nanmin(cross)
626 630 self.zmax = self.zmax if self.zmax else numpy.nanmax(cross)
627 631 ax.plt = ax.pcolormesh(x, y, out_i.T,
628 632 vmin=-1.e6,
629 633 vmax=1.e6,
630 634 cmap=plt.get_cmap(self.colormap)
631 635 )
632 636 else:
633 637 ax.plt.set_array(out_i.T.ravel())
634 638 self.titles.append(
635 639 'Cross Spectra Imag Ch{} * Ch{}'.format(pair[0], pair[1]))
636 640
637 641 class RTIPlot(Plot):
638 642 '''
639 643 Plot for RTI data
640 644 '''
641 645
642 646 CODE = 'rti'
643 647 colormap = 'jet'
644 648 plot_type = 'pcolorbuffer'
645 649 titles = None
646 650 channelList = []
647 651 elevationList = []
648 652 azimuthList = []
649 653
650 654 def setup(self):
651 655 self.xaxis = 'time'
652 656 self.ncols = 1
653 657 self.nrows = len(self.data.channels)
654 658 self.nplots = len(self.data.channels)
655 659 self.ylabel = 'Range [km]'
656 660 #self.xlabel = 'Time'
657 661 self.cb_label = 'dB'
658 662 self.plots_adjust.update({'hspace':0.8, 'left': 0.1, 'bottom': 0.1, 'right':0.95})
659 663 self.titles = ['{} Channel {}'.format(
660 664 self.CODE.upper(), x) for x in range(self.nplots)]
661 665
662 666 def update_list(self,dataOut):
663 667
664 668 if len(self.channelList) == 0:
665 669 self.channelList = dataOut.channelList
666 670 if len(self.elevationList) == 0:
667 671 self.elevationList = dataOut.elevationList
668 672 if len(self.azimuthList) == 0:
669 673 self.azimuthList = dataOut.azimuthList
670 674
671 675
672 676 def update(self, dataOut):
673 677
674 678 if len(self.channelList) == 0:
675 679 self.update_list(dataOut)
676 680 data = {}
677 681 meta = {}
678 682 data['rti'] = dataOut.getPower()
679 683 norm = dataOut.nProfiles * dataOut.max_nIncohInt * dataOut.nCohInt * dataOut.windowOfFilter
680 684 noise = 10*numpy.log10(dataOut.getNoise()/norm)
681 685 data['noise'] = noise
682 686
683 687 return data, meta
684 688
685 689 def plot(self):
686 690
687 691 self.x = self.data.times
688 692 self.y = self.data.yrange
689 693 self.z = self.data[self.CODE]
690 694 self.z = numpy.array(self.z, dtype=float)
691 695 self.z = numpy.ma.masked_invalid(self.z)
692 696
693 697 try:
694 698 if self.channelList != None:
695 699 if len(self.elevationList) > 0 and len(self.azimuthList) > 0:
696 700 self.titles = ['{} Channel {} ({:2.1f} Elev, {:2.1f} Azth)'.format(
697 701 self.CODE.upper(), x, self.elevationList[x], self.azimuthList[x]) for x in self.channelList]
698 702 else:
699 703 self.titles = ['{} Channel {}'.format(
700 704 self.CODE.upper(), x) for x in self.channelList]
701 705 except:
702 706 if self.channelList.any() != None:
703 707 if len(self.elevationList) > 0 and len(self.azimuthList) > 0:
704 708 self.titles = ['{} Channel {} ({:2.1f} Elev, {:2.1f} Azth)'.format(
705 709 self.CODE.upper(), x, self.elevationList[x], self.azimuthList[x]) for x in self.channelList]
706 710 else:
707 711 self.titles = ['{} Channel {}'.format(
708 712 self.CODE.upper(), x) for x in self.channelList]
709 713
710 714 if self.decimation is None:
711 715 x, y, z = self.fill_gaps(self.x, self.y, self.z)
712 716 else:
713 717 x, y, z = self.fill_gaps(*self.decimate())
714 718
715 719 for n, ax in enumerate(self.axes):
716 720
717 721 self.zmin = self.zmin if self.zmin else numpy.min(self.z)
718 722 self.zmax = self.zmax if self.zmax else numpy.max(self.z)
719 723 data = self.data[-1]
720 724 if ax.firsttime:
721 725 ax.plt = ax.pcolormesh(x, y, z[n].T,
722 726 vmin=self.zmin,
723 727 vmax=self.zmax,
724 728 cmap=plt.get_cmap(self.colormap)
725 729 )
726 730 if self.showprofile:
727 731 ax.plot_profile = self.pf_axes[n].plot(
728 data['rti'][n], self.y)[0]
732 data[self.CODE][n], self.y)[0]
729 733 if "noise" in self.data:
730 734 ax.plot_noise = self.pf_axes[n].plot(numpy.repeat(data['noise'][n], len(self.y)), self.y,
731 735 color="k", linestyle="dashed", lw=1)[0]
732 736 else:
733 737 # ax.collections.remove(ax.collections[0]) # error while running
734 738 ax.plt = ax.pcolormesh(x, y, z[n].T,
735 739 vmin=self.zmin,
736 740 vmax=self.zmax,
737 741 cmap=plt.get_cmap(self.colormap)
738 742 )
739 743 if self.showprofile:
740 ax.plot_profile.set_data(data['rti'][n], self.y)
744 ax.plot_profile.set_data(data[self.CODE][n], self.y)
741 745 if "noise" in self.data:
742 746 ax.plot_noise = self.pf_axes[n].plot(numpy.repeat(data['noise'][n], len(self.y)), self.y,
743 747 color="k", linestyle="dashed", lw=1)[0]
744 748
745 749 class SpectrogramPlot(Plot):
746 750 '''
747 751 Plot for Spectrogram data
748 752 '''
749 753
750 754 CODE = 'Spectrogram_Profile'
751 755 colormap = 'binary'
752 756 plot_type = 'pcolorbuffer'
753 757
754 758 def setup(self):
755 759 self.xaxis = 'time'
756 760 self.ncols = 1
757 761 self.nrows = len(self.data.channels)
758 762 self.nplots = len(self.data.channels)
759 763 self.xlabel = 'Time'
760 764 self.plots_adjust.update({'hspace':1.2, 'left': 0.1, 'bottom': 0.12, 'right':0.95})
761 765 self.titles = []
762 766
763 767 self.titles = ['{} Channel {}'.format(
764 768 self.CODE.upper(), x) for x in range(self.nrows)]
765 769
766 770
767 771 def update(self, dataOut):
768 772 data = {}
769 773 meta = {}
770 774
771 775 maxHei = 1620#+12000
772 776 indb = numpy.where(dataOut.heightList <= maxHei)
773 777 hei = indb[0][-1]
774 778
775 779 factor = dataOut.nIncohInt
776 780 z = dataOut.data_spc[:,:,hei] / factor
777 781 z = numpy.where(numpy.isfinite(z), z, numpy.NAN)
778 782
779 783 meta['xrange'] = (dataOut.getFreqRange(1)/1000., dataOut.getAcfRange(1), dataOut.getVelRange(1))
780 784 data['Spectrogram_Profile'] = 10 * numpy.log10(z)
781 785
782 786 data['hei'] = hei
783 787 data['DH'] = (dataOut.heightList[1] - dataOut.heightList[0])/dataOut.step
784 788 data['nProfiles'] = dataOut.nProfiles
785 789
786 790 return data, meta
787 791
788 792 def plot(self):
789 793
790 794 self.x = self.data.times
791 795 self.z = self.data[self.CODE]
792 796 self.y = self.data.xrange[0]
793 797
794 798 hei = self.data['hei'][-1]
795 799 DH = self.data['DH'][-1]
796 800 nProfiles = self.data['nProfiles'][-1]
797 801
798 802 self.ylabel = "Frequency (kHz)"
799 803
800 804 self.z = numpy.ma.masked_invalid(self.z)
801 805
802 806 if self.decimation is None:
803 807 x, y, z = self.fill_gaps(self.x, self.y, self.z)
804 808 else:
805 809 x, y, z = self.fill_gaps(*self.decimate())
806 810
807 811 for n, ax in enumerate(self.axes):
808 812 self.zmin = self.zmin if self.zmin else numpy.min(self.z)
809 813 self.zmax = self.zmax if self.zmax else numpy.max(self.z)
810 814 data = self.data[-1]
811 815 if ax.firsttime:
812 816 ax.plt = ax.pcolormesh(x, y, z[n].T,
813 817 vmin=self.zmin,
814 818 vmax=self.zmax,
815 819 cmap=plt.get_cmap(self.colormap)
816 820 )
817 821 else:
818 822 # ax.collections.remove(ax.collections[0]) # error while running
819 823 ax.plt = ax.pcolormesh(x, y, z[n].T,
820 824 vmin=self.zmin,
821 825 vmax=self.zmax,
822 826 cmap=plt.get_cmap(self.colormap)
823 827 )
824 828
825 829
826 830
827 831 class CoherencePlot(RTIPlot):
828 832 '''
829 833 Plot for Coherence data
830 834 '''
831 835
832 836 CODE = 'coh'
833 837 titles = None
834 838
835 839 def setup(self):
836 840 self.xaxis = 'time'
837 841 self.ncols = 1
838 842 self.nrows = len(self.data.pairs)
839 843 self.nplots = len(self.data.pairs)
840 844 self.ylabel = 'Range [km]'
841 845 self.xlabel = 'Time'
842 846 self.plots_adjust.update({'hspace':0.6, 'left': 0.1, 'bottom': 0.1,'right':0.95})
843 847 if self.CODE == 'coh':
844 848 self.cb_label = ''
845 849 self.titles = [
846 850 'Coherence Map Ch{} * Ch{}'.format(x[0], x[1]) for x in self.data.pairs]
847 851 else:
848 852 self.cb_label = 'Degrees'
849 853 self.titles = [
850 854 'Phase Map Ch{} * Ch{}'.format(x[0], x[1]) for x in self.data.pairs]
851 855
852 856 def update(self, dataOut):
853 857
854 858 data = {}
855 859 meta = {}
856 860 data['coh'] = dataOut.getCoherence()
857 861 meta['pairs'] = dataOut.pairsList
858 862
859 863 return data, meta
860 864
861 865 class PhasePlot(CoherencePlot):
862 866 '''
863 867 Plot for Phase map data
864 868 '''
865 869
866 870 CODE = 'phase'
867 871 colormap = 'seismic'
868 872
869 873 def update(self, dataOut):
870 874
871 875 data = {}
872 876 meta = {}
873 877 data['phase'] = dataOut.getCoherence(phase=True)
874 878 meta['pairs'] = dataOut.pairsList
875 879
876 880 return data, meta
877 881
878 882 class NoisePlot(Plot):
879 883 '''
880 884 Plot for noise
881 885 '''
882 886
883 887 CODE = 'noise'
884 888 plot_type = 'scatterbuffer'
885 889
886 890 def setup(self):
887 891 self.xaxis = 'time'
888 892 self.ncols = 1
889 893 self.nrows = 1
890 894 self.nplots = 1
891 895 self.ylabel = 'Intensity [dB]'
892 896 self.xlabel = 'Time'
893 897 self.titles = ['Noise']
894 898 self.colorbar = False
895 899 self.plots_adjust.update({'right': 0.85 })
900 self.titles = ['Noise Plot']
896 901
897 902 def update(self, dataOut):
898 903
899 904 data = {}
900 905 meta = {}
901 data['noise'] = 10*numpy.log10(dataOut.getNoise()/dataOut.normFactor).reshape(dataOut.nChannels, 1)
906 noise = 10*numpy.log10(dataOut.getNoise())
907 noise = noise.reshape(dataOut.nChannels, 1)
908 data['noise'] = noise
902 909 meta['yrange'] = numpy.array([])
903 910
904 911 return data, meta
905 912
906 913 def plot(self):
907 914
908 915 x = self.data.times
909 916 xmin = self.data.min_time
910 917 xmax = xmin + self.xrange * 60 * 60
911 918 Y = self.data['noise']
912 919
913 920 if self.axes[0].firsttime:
914 921 self.ymin = numpy.nanmin(Y) - 5
915 922 self.ymax = numpy.nanmax(Y) + 5
916 923 for ch in self.data.channels:
917 924 y = Y[ch]
918 925 self.axes[0].plot(x, y, lw=1, label='Ch{}'.format(ch))
919 926 plt.legend(bbox_to_anchor=(1.18, 1.0))
920 927 else:
921 928 for ch in self.data.channels:
922 929 y = Y[ch]
923 930 self.axes[0].lines[ch].set_data(x, y)
924 931
925 932 class PowerProfilePlot(Plot):
926 933
927 934 CODE = 'pow_profile'
928 935 plot_type = 'scatter'
929 936
930 937 def setup(self):
931 938
932 939 self.ncols = 1
933 940 self.nrows = 1
934 941 self.nplots = 1
935 942 self.height = 4
936 943 self.width = 3
937 944 self.ylabel = 'Range [km]'
938 945 self.xlabel = 'Intensity [dB]'
939 946 self.titles = ['Power Profile']
940 947 self.colorbar = False
941 948
942 949 def update(self, dataOut):
943 950
944 951 data = {}
945 952 meta = {}
946 953 data[self.CODE] = dataOut.getPower()
947 954
948 955 return data, meta
949 956
950 957 def plot(self):
951 958
952 959 y = self.data.yrange
953 960 self.y = y
954 961
955 962 x = self.data[-1][self.CODE]
956 963
957 964 if self.xmin is None: self.xmin = numpy.nanmin(x)*0.9
958 965 if self.xmax is None: self.xmax = numpy.nanmax(x)*1.1
959 966
960 967 if self.axes[0].firsttime:
961 968 for ch in self.data.channels:
962 969 self.axes[0].plot(x[ch], y, lw=1, label='Ch{}'.format(ch))
963 970 plt.legend()
964 971 else:
965 972 for ch in self.data.channels:
966 973 self.axes[0].lines[ch].set_data(x[ch], y)
967 974
968 975
969 976 class SpectraCutPlot(Plot):
970 977
971 978 CODE = 'spc_cut'
972 979 plot_type = 'scatter'
973 980 buffering = False
974 981 heights = []
975 982 channelList = []
976 983 maintitle = "Spectra Cuts"
977 984 flag_setIndex = False
978 985
979 986 def setup(self):
980 987
981 988 self.nplots = len(self.data.channels)
982 989 self.ncols = int(numpy.sqrt(self.nplots) + 0.9)
983 990 self.nrows = int((1.0 * self.nplots / self.ncols) + 0.9)
984 991 self.width = 4.5 * self.ncols + 2.5
985 992 self.height = 4.8 * self.nrows
986 993 self.ylabel = 'Power [dB]'
987 994 self.colorbar = False
988 995 self.plots_adjust.update({'left':0.1, 'hspace':0.3, 'right': 0.9, 'bottom':0.08})
989 996
990 997 if len(self.selectedHeightsList) > 0:
991 998 self.maintitle = "Spectra Cut"# for %d km " %(int(self.selectedHeight))
992 999
993 1000
994 1001
995 1002 def update(self, dataOut):
996 1003 if len(self.channelList) == 0:
997 1004 self.channelList = dataOut.channelList
998 1005
999 1006 self.heights = dataOut.heightList
1000 1007 #print("sels: ",self.selectedHeightsList)
1001 1008 if len(self.selectedHeightsList)>0 and not self.flag_setIndex:
1002 1009
1003 1010 for sel_height in self.selectedHeightsList:
1004 1011 index_list = numpy.where(self.heights >= sel_height)
1005 1012 index_list = index_list[0]
1006 1013 self.height_index.append(index_list[0])
1007 1014 #print("sels i:"", self.height_index)
1008 1015 self.flag_setIndex = True
1009 1016 #print(self.height_index)
1010 1017 data = {}
1011 1018 meta = {}
1012 1019
1013 1020 norm = dataOut.nProfiles * dataOut.max_nIncohInt * dataOut.nCohInt * dataOut.windowOfFilter#*dataOut.nFFTPoints
1014 1021 n0 = 10*numpy.log10(dataOut.getNoise()/norm)
1015 1022 noise = numpy.repeat(n0,(dataOut.nFFTPoints*dataOut.nHeights)).reshape(dataOut.nChannels,dataOut.nFFTPoints,dataOut.nHeights)
1016 1023
1017 1024
1018 1025 z = []
1019 1026 for ch in range(dataOut.nChannels):
1020 1027 if hasattr(dataOut.normFactor,'shape'):
1021 1028 z.append(numpy.divide(dataOut.data_spc[ch],dataOut.normFactor[ch]))
1022 1029 else:
1023 1030 z.append(numpy.divide(dataOut.data_spc[ch],dataOut.normFactor))
1024 1031
1025 1032 z = numpy.asarray(z)
1026 1033 z = numpy.where(numpy.isfinite(z), z, numpy.NAN)
1027 1034 spc = 10*numpy.log10(z)
1028 1035
1029 1036
1030 1037 data['spc'] = spc - noise
1031 1038 meta['xrange'] = (dataOut.getFreqRange(EXTRA_POINTS)/1000., dataOut.getAcfRange(EXTRA_POINTS), dataOut.getVelRange(EXTRA_POINTS))
1032 1039
1033 1040 return data, meta
1034 1041
1035 1042 def plot(self):
1036 1043 if self.xaxis == "frequency":
1037 1044 x = self.data.xrange[0][0:]
1038 1045 self.xlabel = "Frequency (kHz)"
1039 1046 elif self.xaxis == "time":
1040 1047 x = self.data.xrange[1]
1041 1048 self.xlabel = "Time (ms)"
1042 1049 else:
1043 1050 x = self.data.xrange[2]
1044 1051 self.xlabel = "Velocity (m/s)"
1045 1052
1046 1053 self.titles = []
1047 1054
1048 1055 y = self.data.yrange
1049 1056 z = self.data[-1]['spc']
1050 1057 #print(z.shape)
1051 1058 if len(self.height_index) > 0:
1052 1059 index = self.height_index
1053 1060 else:
1054 1061 index = numpy.arange(0, len(y), int((len(y))/9))
1055 1062 #print("inde x ", index, self.axes)
1056 1063
1057 1064 for n, ax in enumerate(self.axes):
1058 1065
1059 1066 if ax.firsttime:
1060 1067
1061 1068
1062 1069 self.xmax = self.xmax if self.xmax else numpy.nanmax(x)
1063 1070 self.xmin = self.xmin if self.xmin else -self.xmax
1064 1071 self.ymin = self.ymin if self.ymin else numpy.nanmin(z)
1065 1072 self.ymax = self.ymax if self.ymax else numpy.nanmax(z)
1066 1073
1067 1074
1068 1075 ax.plt = ax.plot(x, z[n, :, index].T)
1069 1076 labels = ['Range = {:2.1f}km'.format(y[i]) for i in index]
1070 1077 self.figures[0].legend(ax.plt, labels, loc='center right', prop={'size': 8})
1071 1078 ax.minorticks_on()
1072 1079 ax.grid(which='major', axis='both')
1073 1080 ax.grid(which='minor', axis='x')
1074 1081 else:
1075 1082 for i, line in enumerate(ax.plt):
1076 1083 line.set_data(x, z[n, :, index[i]])
1077 1084
1078 1085
1079 1086 self.titles.append('CH {}'.format(self.channelList[n]))
1080 1087 plt.suptitle(self.maintitle, fontsize=10)
1081 1088
1082 1089
1083 1090 class BeaconPhase(Plot):
1084 1091
1085 1092 __isConfig = None
1086 1093 __nsubplots = None
1087 1094
1088 1095 PREFIX = 'beacon_phase'
1089 1096
1090 1097 def __init__(self):
1091 1098 Plot.__init__(self)
1092 1099 self.timerange = 24*60*60
1093 1100 self.isConfig = False
1094 1101 self.__nsubplots = 1
1095 1102 self.counter_imagwr = 0
1096 1103 self.WIDTH = 800
1097 1104 self.HEIGHT = 400
1098 1105 self.WIDTHPROF = 120
1099 1106 self.HEIGHTPROF = 0
1100 1107 self.xdata = None
1101 1108 self.ydata = None
1102 1109
1103 1110 self.PLOT_CODE = BEACON_CODE
1104 1111
1105 1112 self.FTP_WEI = None
1106 1113 self.EXP_CODE = None
1107 1114 self.SUB_EXP_CODE = None
1108 1115 self.PLOT_POS = None
1109 1116
1110 1117 self.filename_phase = None
1111 1118
1112 1119 self.figfile = None
1113 1120
1114 1121 self.xmin = None
1115 1122 self.xmax = None
1116 1123
1117 1124 def getSubplots(self):
1118 1125
1119 1126 ncol = 1
1120 1127 nrow = 1
1121 1128
1122 1129 return nrow, ncol
1123 1130
1124 1131 def setup(self, id, nplots, wintitle, showprofile=True, show=True):
1125 1132
1126 1133 self.__showprofile = showprofile
1127 1134 self.nplots = nplots
1128 1135
1129 1136 ncolspan = 7
1130 1137 colspan = 6
1131 1138 self.__nsubplots = 2
1132 1139
1133 1140 self.createFigure(id = id,
1134 1141 wintitle = wintitle,
1135 1142 widthplot = self.WIDTH+self.WIDTHPROF,
1136 1143 heightplot = self.HEIGHT+self.HEIGHTPROF,
1137 1144 show=show)
1138 1145
1139 1146 nrow, ncol = self.getSubplots()
1140 1147
1141 1148 self.addAxes(nrow, ncol*ncolspan, 0, 0, colspan, 1)
1142 1149
1143 1150 def save_phase(self, filename_phase):
1144 1151 f = open(filename_phase,'w+')
1145 1152 f.write('\n\n')
1146 1153 f.write('JICAMARCA RADIO OBSERVATORY - Beacon Phase \n')
1147 1154 f.write('DD MM YYYY HH MM SS pair(2,0) pair(2,1) pair(2,3) pair(2,4)\n\n' )
1148 1155 f.close()
1149 1156
1150 1157 def save_data(self, filename_phase, data, data_datetime):
1151 1158 f=open(filename_phase,'a')
1152 1159 timetuple_data = data_datetime.timetuple()
1153 1160 day = str(timetuple_data.tm_mday)
1154 1161 month = str(timetuple_data.tm_mon)
1155 1162 year = str(timetuple_data.tm_year)
1156 1163 hour = str(timetuple_data.tm_hour)
1157 1164 minute = str(timetuple_data.tm_min)
1158 1165 second = str(timetuple_data.tm_sec)
1159 1166 f.write(day+' '+month+' '+year+' '+hour+' '+minute+' '+second+' '+str(data[0])+' '+str(data[1])+' '+str(data[2])+' '+str(data[3])+'\n')
1160 1167 f.close()
1161 1168
1162 1169 def plot(self):
1163 1170 log.warning('TODO: Not yet implemented...')
1164 1171
1165 1172 def run(self, dataOut, id, wintitle="", pairsList=None, showprofile='True',
1166 1173 xmin=None, xmax=None, ymin=None, ymax=None, hmin=None, hmax=None,
1167 1174 timerange=None,
1168 1175 save=False, figpath='./', figfile=None, show=True, ftp=False, wr_period=1,
1169 1176 server=None, folder=None, username=None, password=None,
1170 1177 ftp_wei=0, exp_code=0, sub_exp_code=0, plot_pos=0):
1171 1178
1172 1179 if dataOut.flagNoData:
1173 1180 return dataOut
1174 1181
1175 1182 if not isTimeInHourRange(dataOut.datatime, xmin, xmax):
1176 1183 return
1177 1184
1178 1185 if pairsList == None:
1179 1186 pairsIndexList = dataOut.pairsIndexList[:10]
1180 1187 else:
1181 1188 pairsIndexList = []
1182 1189 for pair in pairsList:
1183 1190 if pair not in dataOut.pairsList:
1184 1191 raise ValueError("Pair %s is not in dataOut.pairsList" %(pair))
1185 1192 pairsIndexList.append(dataOut.pairsList.index(pair))
1186 1193
1187 1194 if pairsIndexList == []:
1188 1195 return
1189 1196
1190 1197 # if len(pairsIndexList) > 4:
1191 1198 # pairsIndexList = pairsIndexList[0:4]
1192 1199
1193 1200 hmin_index = None
1194 1201 hmax_index = None
1195 1202
1196 1203 if hmin != None and hmax != None:
1197 1204 indexes = numpy.arange(dataOut.nHeights)
1198 1205 hmin_list = indexes[dataOut.heightList >= hmin]
1199 1206 hmax_list = indexes[dataOut.heightList <= hmax]
1200 1207
1201 1208 if hmin_list.any():
1202 1209 hmin_index = hmin_list[0]
1203 1210
1204 1211 if hmax_list.any():
1205 1212 hmax_index = hmax_list[-1]+1
1206 1213
1207 1214 x = dataOut.getTimeRange()
1208 1215
1209 1216 thisDatetime = dataOut.datatime
1210 1217
1211 1218 title = wintitle + " Signal Phase" # : %s" %(thisDatetime.strftime("%d-%b-%Y"))
1212 1219 xlabel = "Local Time"
1213 1220 ylabel = "Phase (degrees)"
1214 1221
1215 1222 update_figfile = False
1216 1223
1217 1224 nplots = len(pairsIndexList)
1218 1225 phase_beacon = numpy.zeros(len(pairsIndexList))
1219 1226 for i in range(nplots):
1220 1227 pair = dataOut.pairsList[pairsIndexList[i]]
1221 1228 ccf = numpy.average(dataOut.data_cspc[pairsIndexList[i], :, hmin_index:hmax_index], axis=0)
1222 1229 powa = numpy.average(dataOut.data_spc[pair[0], :, hmin_index:hmax_index], axis=0)
1223 1230 powb = numpy.average(dataOut.data_spc[pair[1], :, hmin_index:hmax_index], axis=0)
1224 1231 avgcoherenceComplex = ccf/numpy.sqrt(powa*powb)
1225 1232 phase = numpy.arctan2(avgcoherenceComplex.imag, avgcoherenceComplex.real)*180/numpy.pi
1226 1233
1227 1234 if dataOut.beacon_heiIndexList:
1228 1235 phase_beacon[i] = numpy.average(phase[dataOut.beacon_heiIndexList])
1229 1236 else:
1230 1237 phase_beacon[i] = numpy.average(phase)
1231 1238
1232 1239 if not self.isConfig:
1233 1240
1234 1241 nplots = len(pairsIndexList)
1235 1242
1236 1243 self.setup(id=id,
1237 1244 nplots=nplots,
1238 1245 wintitle=wintitle,
1239 1246 showprofile=showprofile,
1240 1247 show=show)
1241 1248
1242 1249 if timerange != None:
1243 1250 self.timerange = timerange
1244 1251
1245 1252 self.xmin, self.xmax = self.getTimeLim(x, xmin, xmax, timerange)
1246 1253
1247 1254 if ymin == None: ymin = 0
1248 1255 if ymax == None: ymax = 360
1249 1256
1250 1257 self.FTP_WEI = ftp_wei
1251 1258 self.EXP_CODE = exp_code
1252 1259 self.SUB_EXP_CODE = sub_exp_code
1253 1260 self.PLOT_POS = plot_pos
1254 1261
1255 1262 self.name = thisDatetime.strftime("%Y%m%d_%H%M%S")
1256 1263 self.isConfig = True
1257 1264 self.figfile = figfile
1258 1265 self.xdata = numpy.array([])
1259 1266 self.ydata = numpy.array([])
1260 1267
1261 1268 update_figfile = True
1262 1269
1263 1270 #open file beacon phase
1264 1271 path = '%s%03d' %(self.PREFIX, self.id)
1265 1272 beacon_file = os.path.join(path,'%s.txt'%self.name)
1266 1273 self.filename_phase = os.path.join(figpath,beacon_file)
1267 1274
1268 1275 self.setWinTitle(title)
1269 1276
1270 1277
1271 1278 title = "Phase Plot %s" %(thisDatetime.strftime("%Y/%m/%d %H:%M:%S"))
1272 1279
1273 1280 legendlabels = ["Pair (%d,%d)"%(pair[0], pair[1]) for pair in dataOut.pairsList]
1274 1281
1275 1282 axes = self.axesList[0]
1276 1283
1277 1284 self.xdata = numpy.hstack((self.xdata, x[0:1]))
1278 1285
1279 1286 if len(self.ydata)==0:
1280 1287 self.ydata = phase_beacon.reshape(-1,1)
1281 1288 else:
1282 1289 self.ydata = numpy.hstack((self.ydata, phase_beacon.reshape(-1,1)))
1283 1290
1284 1291
1285 1292 axes.pmultilineyaxis(x=self.xdata, y=self.ydata,
1286 1293 xmin=self.xmin, xmax=self.xmax, ymin=ymin, ymax=ymax,
1287 1294 xlabel=xlabel, ylabel=ylabel, title=title, legendlabels=legendlabels, marker='x', markersize=8, linestyle="solid",
1288 1295 XAxisAsTime=True, grid='both'
1289 1296 )
1290 1297
1291 1298 self.draw()
1292 1299
1293 1300 if dataOut.ltctime >= self.xmax:
1294 1301 self.counter_imagwr = wr_period
1295 1302 self.isConfig = False
1296 1303 update_figfile = True
1297 1304
1298 1305 self.save(figpath=figpath,
1299 1306 figfile=figfile,
1300 1307 save=save,
1301 1308 ftp=ftp,
1302 1309 wr_period=wr_period,
1303 1310 thisDatetime=thisDatetime,
1304 1311 update_figfile=update_figfile)
1305 1312
1306 1313 return dataOut
1307 1314
1308 1315 #####################################
1309 1316 class NoiselessSpectraPlot(Plot):
1310 1317 '''
1311 1318 Plot for Spectra data, subtracting
1312 1319 the noise in all channels, using for
1313 1320 amisr-14 data
1314 1321 '''
1315 1322
1316 1323 CODE = 'noiseless_spc'
1317 1324 colormap = 'jet'
1318 1325 plot_type = 'pcolor'
1319 1326 buffering = False
1320 1327 channelList = []
1321 1328 last_noise = None
1322 1329
1323 1330 def setup(self):
1324 1331
1325 1332 self.nplots = len(self.data.channels)
1326 1333 self.ncols = int(numpy.sqrt(self.nplots) + 0.9)
1327 1334 self.nrows = int((1.0 * self.nplots / self.ncols) + 0.9)
1328 1335 self.height = 3.5 * self.nrows
1329 1336
1330 1337 self.cb_label = 'dB'
1331 1338 if self.showprofile:
1332 1339 self.width = 5.8 * self.ncols
1333 1340 else:
1334 1341 self.width = 4.8* self.ncols
1335 1342 self.plots_adjust.update({'wspace': 0.4, 'hspace':0.4, 'left': 0.1, 'right': 0.92, 'bottom': 0.12})
1336 1343
1337 1344 self.ylabel = 'Range [km]'
1338 1345
1339 1346
1340 1347 def update_list(self,dataOut):
1341 1348 if len(self.channelList) == 0:
1342 1349 self.channelList = dataOut.channelList
1343 1350
1344 1351 def update(self, dataOut):
1345 1352
1346 1353 self.update_list(dataOut)
1347 1354 data = {}
1348 1355 meta = {}
1349 1356
1350 1357 norm = dataOut.nProfiles * dataOut.max_nIncohInt * dataOut.nCohInt * dataOut.windowOfFilter
1351 1358 n0 = (dataOut.getNoise()/norm)
1352 1359 noise = numpy.repeat(n0,(dataOut.nFFTPoints*dataOut.nHeights)).reshape(dataOut.nChannels,dataOut.nFFTPoints,dataOut.nHeights)
1353 1360 noise = 10*numpy.log10(noise)
1354 1361
1355 1362 z = numpy.zeros((dataOut.nChannels, dataOut.nFFTPoints, dataOut.nHeights))
1356 1363 for ch in range(dataOut.nChannels):
1357 1364 if hasattr(dataOut.normFactor,'ndim'):
1358 1365 if dataOut.normFactor.ndim > 1:
1359 1366 z[ch] = (numpy.divide(dataOut.data_spc[ch],dataOut.normFactor[ch]))
1360 1367 else:
1361 1368 z[ch] = (numpy.divide(dataOut.data_spc[ch],dataOut.normFactor))
1362 1369 else:
1363 1370 z[ch] = (numpy.divide(dataOut.data_spc[ch],dataOut.normFactor))
1364 1371
1365 1372 z = numpy.where(numpy.isfinite(z), z, numpy.NAN)
1366 1373 spc = 10*numpy.log10(z)
1367 1374
1368 1375
1369 1376 data['spc'] = spc - noise
1370 1377 #print(spc.shape)
1371 1378 data['rti'] = spc.mean(axis=1)
1372 1379 data['noise'] = noise
1373 1380
1374 1381
1375 1382
1376 1383 # data['noise'] = noise
1377 1384 meta['xrange'] = (dataOut.getFreqRange(EXTRA_POINTS)/1000., dataOut.getAcfRange(EXTRA_POINTS), dataOut.getVelRange(EXTRA_POINTS))
1378 1385
1379 1386 return data, meta
1380 1387
1381 1388 def plot(self):
1382 1389 if self.xaxis == "frequency":
1383 1390 x = self.data.xrange[0]
1384 1391 self.xlabel = "Frequency (kHz)"
1385 1392 elif self.xaxis == "time":
1386 1393 x = self.data.xrange[1]
1387 1394 self.xlabel = "Time (ms)"
1388 1395 else:
1389 1396 x = self.data.xrange[2]
1390 1397 self.xlabel = "Velocity (m/s)"
1391 1398
1392 1399 self.titles = []
1393 1400 y = self.data.yrange
1394 1401 self.y = y
1395 1402
1396 1403 data = self.data[-1]
1397 1404 z = data['spc']
1398 1405
1399 1406 for n, ax in enumerate(self.axes):
1400 1407 #noise = data['noise'][n]
1401 1408
1402 1409 if ax.firsttime:
1403 1410 self.xmax = self.xmax if self.xmax else numpy.nanmax(x)
1404 1411 self.xmin = self.xmin if self.xmin else -self.xmax
1405 1412 self.zmin = self.zmin if self.zmin else numpy.nanmin(z)
1406 1413 self.zmax = self.zmax if self.zmax else numpy.nanmax(z)
1407 1414 ax.plt = ax.pcolormesh(x, y, z[n].T,
1408 1415 vmin=self.zmin,
1409 1416 vmax=self.zmax,
1410 1417 cmap=plt.get_cmap(self.colormap)
1411 1418 )
1412 1419
1413 1420 if self.showprofile:
1414 1421 ax.plt_profile = self.pf_axes[n].plot(
1415 1422 data['rti'][n], y)[0]
1416 1423
1417 1424
1418 1425 else:
1419 1426 ax.plt.set_array(z[n].T.ravel())
1420 1427 if self.showprofile:
1421 1428 ax.plt_profile.set_data(data['rti'][n], y)
1422 1429
1423 1430
1424 1431 self.titles.append('CH {}'.format(self.channelList[n]))
1425 1432
1426 1433
1427 1434 class NoiselessRTIPlot(RTIPlot):
1428 1435 '''
1429 1436 Plot for RTI data
1430 1437 '''
1431 1438
1432 1439 CODE = 'noiseless_rti'
1433 1440 colormap = 'jet'
1434 1441 plot_type = 'pcolorbuffer'
1435 1442 titles = None
1436 1443 channelList = []
1437 1444 elevationList = []
1438 1445 azimuthList = []
1439 1446 last_noise = None
1440 1447
1441 1448 def setup(self):
1442 1449 self.xaxis = 'time'
1443 1450 self.ncols = 1
1444 1451 #print("dataChannels ",self.data.channels)
1445 1452 self.nrows = len(self.data.channels)
1446 1453 self.nplots = len(self.data.channels)
1447 1454 self.ylabel = 'Range [km]'
1448 1455 #self.xlabel = 'Time'
1449 1456 self.cb_label = 'dB'
1450 1457 self.plots_adjust.update({'hspace':0.8, 'left': 0.08, 'bottom': 0.2, 'right':0.94})
1451 1458 self.titles = ['{} Channel {}'.format(
1452 1459 self.CODE.upper(), x) for x in range(self.nplots)]
1453 1460
1454 1461 def update_list(self,dataOut):
1455 1462 if len(self.channelList) == 0:
1456 1463 self.channelList = dataOut.channelList
1457 1464 if len(self.elevationList) == 0:
1458 1465 self.elevationList = dataOut.elevationList
1459 1466 if len(self.azimuthList) == 0:
1460 1467 self.azimuthList = dataOut.azimuthList
1461 1468
1462 1469 def update(self, dataOut):
1463 1470 if len(self.channelList) == 0:
1464 1471 self.update_list(dataOut)
1465 1472
1466 1473 data = {}
1467 1474 meta = {}
1468 1475 #print(dataOut.max_nIncohInt, dataOut.nIncohInt)
1469 1476 #print(dataOut.windowOfFilter,dataOut.nCohInt,dataOut.nProfiles,dataOut.max_nIncohInt,dataOut.nIncohInt
1470 1477 norm = dataOut.nProfiles * dataOut.max_nIncohInt * dataOut.nCohInt * dataOut.windowOfFilter
1471 1478 n0 = 10*numpy.log10(dataOut.getNoise()/norm)
1472 1479 data['noise'] = n0
1473 1480 noise = numpy.repeat(n0,dataOut.nHeights).reshape(dataOut.nChannels,dataOut.nHeights)
1474 1481 noiseless_data = dataOut.getPower() - noise
1475 1482
1476 1483 #print("power, noise:", dataOut.getPower(), n0)
1477 1484 #print(noise)
1478 1485 #print(noiseless_data)
1479 1486
1480 1487 data['noiseless_rti'] = noiseless_data
1481 1488
1482 1489 return data, meta
1483 1490
1484 1491 def plot(self):
1485 1492 from matplotlib import pyplot as plt
1486 1493 self.x = self.data.times
1487 1494 self.y = self.data.yrange
1488 1495 self.z = self.data['noiseless_rti']
1489 1496 self.z = numpy.array(self.z, dtype=float)
1490 1497 self.z = numpy.ma.masked_invalid(self.z)
1491 1498
1492 1499
1493 1500 try:
1494 1501 if self.channelList != None:
1495 1502 if len(self.elevationList) > 0 and len(self.azimuthList) > 0:
1496 1503 self.titles = ['{} Channel {} ({:2.1f} Elev, {:2.1f} Azth)'.format(
1497 1504 self.CODE.upper(), x, self.elevationList[x], self.azimuthList[x]) for x in self.channelList]
1498 1505 else:
1499 1506 self.titles = ['{} Channel {}'.format(
1500 1507 self.CODE.upper(), x) for x in self.channelList]
1501 1508 except:
1502 1509 if self.channelList.any() != None:
1503 1510 if len(self.elevationList) > 0 and len(self.azimuthList) > 0:
1504 1511 self.titles = ['{} Channel {} ({:2.1f} Elev, {:2.1f} Azth)'.format(
1505 1512 self.CODE.upper(), x, self.elevationList[x], self.azimuthList[x]) for x in self.channelList]
1506 1513 else:
1507 1514 self.titles = ['{} Channel {}'.format(
1508 1515 self.CODE.upper(), x) for x in self.channelList]
1509 1516
1510 1517
1511 1518 if self.decimation is None:
1512 1519 x, y, z = self.fill_gaps(self.x, self.y, self.z)
1513 1520 else:
1514 1521 x, y, z = self.fill_gaps(*self.decimate())
1515 1522
1516 1523 dummy_var = self.axes #ExtraΓ±amente esto actualiza el valor axes
1517 1524 #print("plot shapes ", z.shape, x.shape, y.shape)
1518 1525 #print(self.axes)
1519 1526 for n, ax in enumerate(self.axes):
1520 1527
1521 1528
1522 1529 self.zmin = self.zmin if self.zmin else numpy.min(self.z)
1523 1530 self.zmax = self.zmax if self.zmax else numpy.max(self.z)
1524 1531 data = self.data[-1]
1525 1532 if ax.firsttime:
1526 1533 if (n+1) == len(self.channelList):
1527 1534 ax.set_xlabel('Time')
1528 1535 ax.plt = ax.pcolormesh(x, y, z[n].T,
1529 1536 vmin=self.zmin,
1530 1537 vmax=self.zmax,
1531 1538 cmap=plt.get_cmap(self.colormap)
1532 1539 )
1533 1540 if self.showprofile:
1534 1541 ax.plot_profile = self.pf_axes[n].plot(data['noiseless_rti'][n], self.y)[0]
1535 1542
1536 1543 else:
1537 1544 # ax.collections.remove(ax.collections[0]) # error while running
1538 1545 ax.plt = ax.pcolormesh(x, y, z[n].T,
1539 1546 vmin=self.zmin,
1540 1547 vmax=self.zmax,
1541 1548 cmap=plt.get_cmap(self.colormap)
1542 1549 )
1543 1550 if self.showprofile:
1544 1551 ax.plot_profile.set_data(data['noiseless_rti'][n], self.y)
1545 1552 # if "noise" in self.data:
1546 1553 # #ax.plot_noise.set_data(numpy.repeat(data['noise'][n], len(self.y)), self.y)
1547 1554 # ax.plot_noise.set_data(data['noise'][n], self.y)
1548 1555
1549 1556
1550 1557 class OutliersRTIPlot(Plot):
1551 1558 '''
1552 1559 Plot for data_xxxx object
1553 1560 '''
1554 1561
1555 1562 CODE = 'outlier_rtc' # Range Time Counts
1556 1563 colormap = 'cool'
1557 1564 plot_type = 'pcolorbuffer'
1558 1565
1559 1566 def setup(self):
1560 1567 self.xaxis = 'time'
1561 1568 self.ncols = 1
1562 1569 self.nrows = self.data.shape('outlier_rtc')[0]
1563 1570 self.nplots = self.nrows
1564 1571 self.plots_adjust.update({'hspace':0.8, 'left': 0.08, 'bottom': 0.2, 'right':0.94})
1565 1572
1566 1573
1567 1574 if not self.xlabel:
1568 1575 self.xlabel = 'Time'
1569 1576
1570 1577 self.ylabel = 'Height [km]'
1571 1578 if not self.titles:
1572 1579 self.titles = ['Outliers Ch:{}'.format(x) for x in range(self.nrows)]
1573 1580
1574 1581 def update(self, dataOut):
1575 1582
1576 1583 data = {}
1577 1584 data['outlier_rtc'] = dataOut.data_outlier
1578 1585
1579 1586 meta = {}
1580 1587
1581 1588 return data, meta
1582 1589
1583 1590 def plot(self):
1584 1591 # self.data.normalize_heights()
1585 1592 self.x = self.data.times
1586 1593 self.y = self.data.yrange
1587 1594 self.z = self.data['outlier_rtc']
1588 1595
1589 1596 #self.z = numpy.ma.masked_invalid(self.z)
1590 1597
1591 1598 if self.decimation is None:
1592 1599 x, y, z = self.fill_gaps(self.x, self.y, self.z)
1593 1600 else:
1594 1601 x, y, z = self.fill_gaps(*self.decimate())
1595 1602
1596 1603 for n, ax in enumerate(self.axes):
1597 1604
1598 1605 self.zmax = self.zmax if self.zmax is not None else numpy.max(
1599 1606 self.z[n])
1600 1607 self.zmin = self.zmin if self.zmin is not None else numpy.min(
1601 1608 self.z[n])
1602 1609 data = self.data[-1]
1603 1610 if ax.firsttime:
1604 1611 if self.zlimits is not None:
1605 1612 self.zmin, self.zmax = self.zlimits[n]
1606 1613
1607 1614 ax.plt = ax.pcolormesh(x, y, z[n].T,
1608 1615 vmin=self.zmin,
1609 1616 vmax=self.zmax,
1610 1617 cmap=self.cmaps[n]
1611 1618 )
1612 1619 if self.showprofile:
1613 1620 ax.plot_profile = self.pf_axes[n].plot(data['outlier_rtc'][n], self.y)[0]
1614 1621 self.pf_axes[n].set_xlabel('')
1615 1622 else:
1616 1623 if self.zlimits is not None:
1617 1624 self.zmin, self.zmax = self.zlimits[n]
1618 1625 # ax.collections.remove(ax.collections[0]) # error while running
1619 1626 ax.plt = ax.pcolormesh(x, y, z[n].T ,
1620 1627 vmin=self.zmin,
1621 1628 vmax=self.zmax,
1622 1629 cmap=self.cmaps[n]
1623 1630 )
1624 1631 if self.showprofile:
1625 1632 ax.plot_profile.set_data(data['outlier_rtc'][n], self.y)
1626 1633 self.pf_axes[n].set_xlabel('')
1627 1634
1628 1635 class NIncohIntRTIPlot(Plot):
1629 1636 '''
1630 1637 Plot for data_xxxx object
1631 1638 '''
1632 1639
1633 1640 CODE = 'integrations_rtc' # Range Time Counts
1634 1641 colormap = 'BuGn'
1635 1642 plot_type = 'pcolorbuffer'
1636 1643
1637 1644 def setup(self):
1638 1645 self.xaxis = 'time'
1639 1646 self.ncols = 1
1640 1647 self.nrows = self.data.shape('integrations_rtc')[0]
1641 1648 self.nplots = self.nrows
1642 1649 self.plots_adjust.update({'hspace':0.8, 'left': 0.08, 'bottom': 0.2, 'right':0.94})
1643 1650
1644 1651
1645 1652 if not self.xlabel:
1646 1653 self.xlabel = 'Time'
1647 1654
1648 1655 self.ylabel = 'Height [km]'
1649 1656 if not self.titles:
1650 1657 self.titles = ['Integration Ch:{}'.format(x) for x in range(self.nrows)]
1651 1658
1652 1659 def update(self, dataOut):
1653 1660
1654 1661 data = {}
1655 1662 data['integrations_rtc'] = dataOut.nIncohInt
1656 1663
1657 1664 meta = {}
1658 1665
1659 1666 return data, meta
1660 1667
1661 1668 def plot(self):
1662 1669 # self.data.normalize_heights()
1663 1670 self.x = self.data.times
1664 1671 self.y = self.data.yrange
1665 1672 self.z = self.data['integrations_rtc']
1666 1673
1667 1674 #self.z = numpy.ma.masked_invalid(self.z)
1668 1675
1669 1676 if self.decimation is None:
1670 1677 x, y, z = self.fill_gaps(self.x, self.y, self.z)
1671 1678 else:
1672 1679 x, y, z = self.fill_gaps(*self.decimate())
1673 1680
1674 1681 for n, ax in enumerate(self.axes):
1675 1682
1676 1683 self.zmax = self.zmax if self.zmax is not None else numpy.max(
1677 1684 self.z[n])
1678 1685 self.zmin = self.zmin if self.zmin is not None else numpy.min(
1679 1686 self.z[n])
1680 1687 data = self.data[-1]
1681 1688 if ax.firsttime:
1682 1689 if self.zlimits is not None:
1683 1690 self.zmin, self.zmax = self.zlimits[n]
1684 1691
1685 1692 ax.plt = ax.pcolormesh(x, y, z[n].T,
1686 1693 vmin=self.zmin,
1687 1694 vmax=self.zmax,
1688 1695 cmap=self.cmaps[n]
1689 1696 )
1690 1697 if self.showprofile:
1691 1698 ax.plot_profile = self.pf_axes[n].plot(data['integrations_rtc'][n], self.y)[0]
1692 1699 self.pf_axes[n].set_xlabel('')
1693 1700 else:
1694 1701 if self.zlimits is not None:
1695 1702 self.zmin, self.zmax = self.zlimits[n]
1696 1703 # ax.collections.remove(ax.collections[0]) # error while running
1697 1704 ax.plt = ax.pcolormesh(x, y, z[n].T ,
1698 1705 vmin=self.zmin,
1699 1706 vmax=self.zmax,
1700 1707 cmap=self.cmaps[n]
1701 1708 )
1702 1709 if self.showprofile:
1703 1710 ax.plot_profile.set_data(data['integrations_rtc'][n], self.y)
1704 1711 self.pf_axes[n].set_xlabel('')
1705 1712
1706 1713
1707 1714
1708 1715 class RTIMapPlot(Plot):
1709 1716 '''
1710 1717 Plot for RTI data
1711 1718
1712 1719 Example:
1713 1720
1714 1721 controllerObj = Project()
1715 1722 controllerObj.setup(id = '11', name='eej_proc', description=desc)
1716 1723 ##.......................................................................................
1717 1724 ##.......................................................................................
1718 1725 readUnitConfObj = controllerObj.addReadUnit(datatype='AMISRReader', path=inPath, startDate='2023/05/24',endDate='2023/05/24',
1719 1726 startTime='12:00:00',endTime='12:45:59',walk=1,timezone='lt',margin_days=1,code = code,nCode = nCode,
1720 1727 nBaud = nBaud,nOsamp = nosamp,nChannels=nChannels,nFFT=NFFT,
1721 1728 syncronization=False,shiftChannels=0)
1722 1729
1723 1730 volts_proc = controllerObj.addProcUnit(datatype='VoltageProc', inputId=readUnitConfObj.getId())
1724 1731
1725 1732 opObj01 = volts_proc.addOperation(name='Decoder', optype='other')
1726 1733 opObj01.addParameter(name='code', value=code, format='floatlist')
1727 1734 opObj01.addParameter(name='nCode', value=1, format='int')
1728 1735 opObj01.addParameter(name='nBaud', value=nBaud, format='int')
1729 1736 opObj01.addParameter(name='osamp', value=nosamp, format='int')
1730 1737
1731 1738 opObj12 = volts_proc.addOperation(name='selectHeights', optype='self')
1732 1739 opObj12.addParameter(name='minHei', value='90', format='float')
1733 1740 opObj12.addParameter(name='maxHei', value='150', format='float')
1734 1741
1735 1742 proc_spc = controllerObj.addProcUnit(datatype='SpectraProc', inputId=volts_proc.getId())
1736 1743 proc_spc.addParameter(name='nFFTPoints', value='8', format='int')
1737 1744
1738 1745 opObj11 = proc_spc.addOperation(name='IncohInt', optype='other')
1739 1746 opObj11.addParameter(name='n', value='1', format='int')
1740 1747
1741 1748 beamMapFile = "/home/japaza/Documents/AMISR_sky_mapper/UMET_beamcodes.csv"
1742 1749
1743 1750 opObj12 = proc_spc.addOperation(name='RTIMapPlot', optype='external')
1744 1751 opObj12.addParameter(name='selectedHeightsList', value='95, 100, 105, 110 ', format='int')
1745 1752 opObj12.addParameter(name='bField', value='100', format='int')
1746 1753 opObj12.addParameter(name='filename', value=beamMapFile, format='str')
1747 1754
1748 1755 '''
1749 1756
1750 1757 CODE = 'rti_skymap'
1751 1758
1752 1759 plot_type = 'scatter'
1753 1760 titles = None
1754 1761 colormap = 'jet'
1755 1762 channelList = []
1756 1763 elevationList = []
1757 1764 azimuthList = []
1758 1765 last_noise = None
1759 1766 flag_setIndex = False
1760 1767 heights = []
1761 1768 dcosx = []
1762 1769 dcosy = []
1763 1770 fullDcosy = None
1764 1771 fullDcosy = None
1765 1772 hindex = []
1766 1773 mapFile = False
1767 1774 ##### BField ####
1768 1775 flagBField = False
1769 1776 dcosxB = []
1770 1777 dcosyB = []
1771 1778 Bmarker = ['+','*','D','x','s','>','o','^']
1772 1779
1773 1780
1774 1781 def setup(self):
1775 1782
1776 1783 self.xaxis = 'Range (Km)'
1777 1784 if len(self.selectedHeightsList) > 0:
1778 1785 self.nplots = len(self.selectedHeightsList)
1779 1786 else:
1780 1787 self.nplots = 4
1781 1788 self.ncols = int(numpy.ceil(self.nplots/2))
1782 1789 self.nrows = int(numpy.ceil(self.nplots/self.ncols))
1783 1790 self.ylabel = 'dcosy'
1784 1791 self.xlabel = 'dcosx'
1785 1792 self.colorbar = True
1786 1793 self.width = 6 + 4.1*self.nrows
1787 1794 self.height = 3 + 3.5*self.ncols
1788 1795
1789 1796
1790 1797 if self.extFile!=None:
1791 1798 try:
1792 1799 pointings = numpy.genfromtxt(self.extFile, delimiter=',')
1793 1800 full_azi = pointings[:,1]
1794 1801 full_elev = pointings[:,2]
1795 1802 self.fullDcosx = numpy.cos(numpy.radians(full_elev))*numpy.sin(numpy.radians(full_azi))
1796 1803 self.fullDcosy = numpy.cos(numpy.radians(full_elev))*numpy.cos(numpy.radians(full_azi))
1797 1804 mapFile = True
1798 1805 except Exception as e:
1799 1806 self.extFile = None
1800 1807 print(e)
1801 1808
1802 1809
1803 1810 def update_list(self,dataOut):
1804 1811 if len(self.channelList) == 0:
1805 1812 self.channelList = dataOut.channelList
1806 1813 if len(self.elevationList) == 0:
1807 1814 self.elevationList = dataOut.elevationList
1808 1815 if len(self.azimuthList) == 0:
1809 1816 self.azimuthList = dataOut.azimuthList
1810 1817 a = numpy.radians(numpy.asarray(self.azimuthList))
1811 1818 e = numpy.radians(numpy.asarray(self.elevationList))
1812 1819 self.heights = dataOut.heightList
1813 1820 self.dcosx = numpy.cos(e)*numpy.sin(a)
1814 1821 self.dcosy = numpy.cos(e)*numpy.cos(a)
1815 1822
1816 1823 if len(self.bFieldList)>0:
1817 1824 datetObj = datetime.datetime.fromtimestamp(dataOut.utctime)
1818 1825 doy = datetObj.timetuple().tm_yday
1819 1826 year = datetObj.year
1820 1827 # self.dcosxB, self.dcosyB
1821 1828 ObjB = BField(year=year,doy=doy,site=2,heights=self.bFieldList)
1822 1829 [dcos, alpha, nlon, nlat] = ObjB.getBField()
1823 1830
1824 1831 alpha_location = numpy.zeros((nlon,2,len(self.bFieldList)))
1825 1832 for ih in range(len(self.bFieldList)):
1826 1833 alpha_location[:,0,ih] = dcos[:,0,ih,0]
1827 1834 for ilon in numpy.arange(nlon):
1828 1835 myx = (alpha[ilon,:,ih])[::-1]
1829 1836 myy = (dcos[ilon,:,ih,0])[::-1]
1830 1837 tck = splrep(myx,myy,s=0)
1831 1838 mydcosx = splev(ObjB.alpha_i,tck,der=0)
1832 1839
1833 1840 myx = (alpha[ilon,:,ih])[::-1]
1834 1841 myy = (dcos[ilon,:,ih,1])[::-1]
1835 1842 tck = splrep(myx,myy,s=0)
1836 1843 mydcosy = splev(ObjB.alpha_i,tck,der=0)
1837 1844 alpha_location[ilon,:,ih] = numpy.array([mydcosx, mydcosy])
1838 1845 self.dcosxB.append(alpha_location[:,0,ih])
1839 1846 self.dcosyB.append(alpha_location[:,1,ih])
1840 1847 self.flagBField = True
1841 1848
1842 1849 if len(self.celestialList)>0:
1843 1850 #getBField(self.bFieldList, date)
1844 1851 #pass = kwargs.get('celestial', [])
1845 1852 pass
1846 1853
1847 1854
1848 1855 def update(self, dataOut):
1849 1856
1850 1857 if len(self.channelList) == 0:
1851 1858 self.update_list(dataOut)
1852 1859
1853 1860 if not self.flag_setIndex:
1854 1861 if len(self.selectedHeightsList)>0:
1855 1862 for sel_height in self.selectedHeightsList:
1856 1863 index_list = numpy.where(self.heights >= sel_height)
1857 1864 index_list = index_list[0]
1858 1865 self.hindex.append(index_list[0])
1859 1866 self.flag_setIndex = True
1860 1867
1861 1868 data = {}
1862 1869 meta = {}
1863 1870
1864 1871 data['rti_skymap'] = dataOut.getPower()
1865 1872 norm = dataOut.nProfiles * dataOut.max_nIncohInt * dataOut.nCohInt * dataOut.windowOfFilter
1866 1873 noise = 10*numpy.log10(dataOut.getNoise()/norm)
1867 1874 data['noise'] = noise
1868 1875
1869 1876 return data, meta
1870 1877
1871 1878 def plot(self):
1872 1879
1873 1880 self.x = self.dcosx
1874 1881 self.y = self.dcosy
1875 1882 self.z = self.data[-1]['rti_skymap']
1876 1883 self.z = numpy.array(self.z, dtype=float)
1877 1884
1878 1885 if len(self.hindex) > 0:
1879 1886 index = self.hindex
1880 1887 else:
1881 1888 index = numpy.arange(0, len(self.heights), int((len(self.heights))/4.2))
1882 1889
1883 1890 self.titles = ['Height {:.2f} km '.format(self.heights[i])+" " for i in index]
1884 1891 for n, ax in enumerate(self.axes):
1885 1892
1886 1893 if ax.firsttime:
1887 1894
1888 1895 self.xmax = self.xmax if self.xmax else numpy.nanmax(self.x)
1889 1896 self.xmin = self.xmin if self.xmin else numpy.nanmin(self.x)
1890 1897 self.ymax = self.ymax if self.ymax else numpy.nanmax(self.y)
1891 1898 self.ymin = self.ymin if self.ymin else numpy.nanmin(self.y)
1892 1899 self.zmax = self.zmax if self.zmax else numpy.nanmax(self.z)
1893 1900 self.zmin = self.zmin if self.zmin else numpy.nanmin(self.z)
1894 1901
1895 1902 if self.extFile!=None:
1896 1903 ax.scatter(self.fullDcosx, self.fullDcosy, marker="+", s=20)
1897 1904
1898 1905 ax.plt = ax.scatter(self.x, self.y, c=self.z[:,index[n]], cmap = 'jet',vmin = self.zmin,
1899 1906 s=60, marker="s", vmax = self.zmax)
1900 1907
1901 1908
1902 1909 ax.minorticks_on()
1903 1910 ax.grid(which='major', axis='both')
1904 1911 ax.grid(which='minor', axis='x')
1905 1912
1906 1913 if self.flagBField :
1907 1914
1908 1915 for ih in range(len(self.bFieldList)):
1909 1916 label = str(self.bFieldList[ih]) + ' km'
1910 1917 ax.plot(self.dcosxB[ih], self.dcosyB[ih], color='k', marker=self.Bmarker[ih % 8],
1911 1918 label=label, linestyle='--', ms=4.0,lw=0.5)
1912 1919 handles, labels = ax.get_legend_handles_labels()
1913 1920 a = -0.05
1914 1921 b = 1.15 - 1.19*(self.nrows)
1915 1922 self.axes[0].legend(handles,labels, bbox_to_anchor=(a,b), prop={'size': (5.8+ 1.1*self.nplots)}, title='B Field βŠ₯')
1916 1923
1917 1924 else:
1918 1925
1919 1926 ax.plt = ax.scatter(self.x, self.y, c=self.z[:,index[n]], cmap = 'jet',vmin = self.zmin,
1920 1927 s=80, marker="s", vmax = self.zmax)
1921 1928
1922 1929 if self.flagBField :
1923 1930 for ih in range(len(self.bFieldList)):
1924 1931 ax.plot (self.dcosxB[ih], self.dcosyB[ih], color='k', marker=self.Bmarker[ih % 8],
1925 1932 linestyle='--', ms=4.0,lw=0.5)
1926 1933
1927 1934
1928 1935
@@ -1,815 +1,819
1 1 import os
2 2 import time
3 3 import datetime
4 4
5 5 import numpy
6 6 import h5py
7 7
8 8 import schainpy.admin
9 9 from schainpy.model.data.jrodata import *
10 10 from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation, MPDecorator
11 11 from schainpy.model.io.jroIO_base import *
12 12 from schainpy.utils import log
13 13
14 14
15 15 class HDFReader(Reader, ProcessingUnit):
16 16 """Processing unit to read HDF5 format files
17 17
18 18 This unit reads HDF5 files created with `HDFWriter` operation contains
19 19 by default two groups Data and Metadata all variables would be saved as `dataOut`
20 20 attributes.
21 21 It is possible to read any HDF5 file by given the structure in the `description`
22 22 parameter, also you can add extra values to metadata with the parameter `extras`.
23 23
24 24 Parameters:
25 25 -----------
26 26 path : str
27 27 Path where files are located.
28 28 startDate : date
29 29 Start date of the files
30 30 endDate : list
31 31 End date of the files
32 32 startTime : time
33 33 Start time of the files
34 34 endTime : time
35 35 End time of the files
36 36 description : dict, optional
37 37 Dictionary with the description of the HDF5 file
38 38 extras : dict, optional
39 39 Dictionary with extra metadata to be be added to `dataOut`
40 40
41 41 Attention: Be carefull, add attribute utcoffset, in the last part of reader in order to work in Local Time without time problems.
42 42
43 43 -----------
44 44 utcoffset='-18000'
45 45
46 46
47 47 Examples
48 48 --------
49 49
50 50 desc = {
51 51 'Data': {
52 52 'data_output': ['u', 'v', 'w'],
53 53 'utctime': 'timestamps',
54 54 } ,
55 55 'Metadata': {
56 56 'heightList': 'heights'
57 57 }
58 58 }
59 59
60 60 desc = {
61 61 'Data': {
62 62 'data_output': 'winds',
63 63 'utctime': 'timestamps'
64 64 },
65 65 'Metadata': {
66 66 'heightList': 'heights'
67 67 }
68 68 }
69 69
70 70 extras = {
71 71 'timeZone': 300
72 72 }
73 73
74 74 reader = project.addReadUnit(
75 75 name='HDFReader',
76 76 path='/path/to/files',
77 77 startDate='2019/01/01',
78 78 endDate='2019/01/31',
79 79 startTime='00:00:00',
80 80 endTime='23:59:59',
81 81 utcoffset='-18000'
82 82 # description=json.dumps(desc),
83 83 # extras=json.dumps(extras),
84 84 )
85 85
86 86 """
87 87
88 88 __attrs__ = ['path', 'startDate', 'endDate', 'startTime', 'endTime', 'description', 'extras']
89 89
90 90 def __init__(self):
91 91
92 92 ProcessingUnit.__init__(self)
93 93 self.ext = ".hdf5"
94 94 self.optchar = "D"
95 95 self.meta = {}
96 96 self.data = {}
97 97 self.open_file = h5py.File
98 98 self.open_mode = 'r'
99 99 self.description = {}
100 100 self.extras = {}
101 101 self.filefmt = "*%Y%j***"
102 102 self.folderfmt = "*%Y%j"
103 103 self.utcoffset = 0
104 104 self.flagUpdateDataOut = False
105 105 self.dataOut = Parameters()
106 106 self.dataOut.error=False ## NOTE: Importante definir esto antes inicio
107 107 self.dataOut.flagNoData = True
108 108
109 109 def setup(self, **kwargs):
110 110
111 111 self.set_kwargs(**kwargs)
112 112 if not self.ext.startswith('.'):
113 113 self.ext = '.{}'.format(self.ext)
114 114
115 115 if self.online:
116 116 log.log("Searching files in online mode...", self.name)
117 117
118 118 for nTries in range(self.nTries):
119 119 fullpath = self.searchFilesOnLine(self.path, self.startDate,
120 120 self.endDate, self.expLabel, self.ext, self.walk,
121 121 self.filefmt, self.folderfmt)
122 122 pathname, filename = os.path.split(fullpath)
123 123 try:
124 124 fullpath = next(fullpath)
125 125 except:
126 126 fullpath = None
127 127
128 128 if fullpath:
129 129 break
130 130
131 131 log.warning(
132 132 'Waiting {} sec for a valid file in {}: try {} ...'.format(
133 133 self.delay, self.path, nTries + 1),
134 134 self.name)
135 135 time.sleep(self.delay)
136 136
137 137 if not(fullpath):
138 138 raise schainpy.admin.SchainError(
139 139 'There isn\'t any valid file in {}'.format(self.path))
140 140
141 141 pathname, filename = os.path.split(fullpath)
142 142 self.year = int(filename[1:5])
143 143 self.doy = int(filename[5:8])
144 144 self.set = int(filename[8:11]) - 1
145 145 else:
146 146 log.log("Searching files in {}".format(self.path), self.name)
147 147 self.filenameList = self.searchFilesOffLine(self.path, self.startDate,
148 148 self.endDate, self.expLabel, self.ext, self.walk, self.filefmt, self.folderfmt)
149 149
150 150 self.setNextFile()
151 151
152 152 return
153 153
154 154 # def readFirstHeader(self):
155 155 # '''Read metadata and data'''
156 156
157 157 # self.__readMetadata()
158 158 # self.__readData()
159 159 # self.__setBlockList()
160 160
161 161 # if 'type' in self.meta:
162 162 # self.dataOut = eval(self.meta['type'])()
163 163
164 164 # for attr in self.meta:
165 165 # setattr(self.dataOut, attr, self.meta[attr])
166 166
167 167 # self.blockIndex = 0
168 168
169 169 # return
170 170
171 171 def readFirstHeader(self):
172 172 '''Read metadata and data'''
173 173
174 174 self.__readMetadata2()
175 175 self.__readData()
176 176 self.__setBlockList()
177 177 if 'type' in self.meta:
178 178 self.dataOut = eval(self.meta['type'])()
179 179
180 180 for attr in self.meta:
181 181 if "processingHeaderObj" in attr:
182 182 self.flagUpdateDataOut=True
183 183 at = attr.split('.')
184 184 if len(at) > 1:
185 185 setattr(eval("self.dataOut."+at[0]),at[1], self.meta[attr])
186 186 else:
187 187 setattr(self.dataOut, attr, self.meta[attr])
188 188 self.blockIndex = 0
189 189
190 190 if self.flagUpdateDataOut:
191 191 self.updateDataOut()
192 192
193 193 return
194 194
195 195 def updateDataOut(self):
196 196
197 197 self.dataOut.azimuthList = self.dataOut.processingHeaderObj.azimuthList
198 198 self.dataOut.elevationList = self.dataOut.processingHeaderObj.elevationList
199 199 self.dataOut.heightList = self.dataOut.processingHeaderObj.heightList
200 200 self.dataOut.ippSeconds = self.dataOut.processingHeaderObj.ipp
201 201 self.dataOut.elevationList = self.dataOut.processingHeaderObj.elevationList
202 202 self.dataOut.channelList = self.dataOut.processingHeaderObj.channelList
203 203 self.dataOut.nCohInt = self.dataOut.processingHeaderObj.nCohInt
204 204 self.dataOut.nFFTPoints = self.dataOut.processingHeaderObj.nFFTPoints
205 205 self.flagUpdateDataOut = False
206 206 self.dataOut.frequency = self.dataOut.radarControllerHeaderObj.frequency
207 207 #self.dataOut.heightList = self.dataOut.processingHeaderObj.heightList
208 208
209 209 def __setBlockList(self):
210 210 '''
211 211 Selects the data within the times defined
212 212
213 213 self.fp
214 214 self.startTime
215 215 self.endTime
216 216 self.blockList
217 217 self.blocksPerFile
218 218
219 219 '''
220 220
221 221 startTime = self.startTime
222 222 endTime = self.endTime
223 223 thisUtcTime = self.data['utctime'] + self.utcoffset
224 224 # self.interval = numpy.min(thisUtcTime[1:] - thisUtcTime[:-1])
225 225 thisDatetime = datetime.datetime.utcfromtimestamp(thisUtcTime[0])
226 226 self.startFileDatetime = thisDatetime
227 227 thisDate = thisDatetime.date()
228 228 thisTime = thisDatetime.time()
229 229 startUtcTime = (datetime.datetime.combine(thisDate, startTime) - datetime.datetime(1970, 1, 1)).total_seconds()
230 230 endUtcTime = (datetime.datetime.combine(thisDate, endTime) - datetime.datetime(1970, 1, 1)).total_seconds()
231 231 ind = numpy.where(numpy.logical_and(thisUtcTime >= startUtcTime, thisUtcTime < endUtcTime))[0]
232 232
233 233 self.blockList = ind
234 234 self.blocksPerFile = len(ind)
235 235 # self.blocksPerFile = len(thisUtcTime)
236 236 if len(ind)==0:
237 237 print("[Reading] Block No. %d/%d -> %s [Skipping]" % (self.blockIndex,
238 238 self.blocksPerFile,
239 239 thisDatetime))
240 240 self.setNextFile()
241 241
242 242 return
243 243
244 244 def __readMetadata(self):
245 245 '''
246 246 Reads Metadata
247 247 '''
248 248
249 249 meta = {}
250 250
251 251 if self.description:
252 252 for key, value in self.description['Metadata'].items():
253 253 meta[key] = self.fp[value][()]
254 254 else:
255 255 grp = self.fp['Metadata']
256 256 for name in grp:
257 257 meta[name] = grp[name][()]
258 258
259 259 if self.extras:
260 260 for key, value in self.extras.items():
261 261 meta[key] = value
262 262 self.meta = meta
263 263
264 264 return
265 265
266 266 def __readMetadata2(self):
267 267 '''
268 268 Reads Metadata
269 269 '''
270 270 meta = {}
271 271 if self.description:
272 272 for key, value in self.description['Metadata'].items():
273 273 meta[key] = self.fp[value][()]
274 274 else:
275 275 grp = self.fp['Metadata']
276 276 for item in grp.values():
277 277 name = item.name
278 278 if isinstance(item, h5py.Dataset):
279 279 name = name.split("/")[-1]
280 280 meta[name] = item[()]
281 281 else:
282 282 grp2 = self.fp[name]
283 283 Obj = name.split("/")[-1]
284 284
285 285 for item2 in grp2.values():
286 286 name2 = Obj+"."+item2.name.split("/")[-1]
287 287 meta[name2] = item2[()]
288 288
289 289 if self.extras:
290 290 for key, value in self.extras.items():
291 291 meta[key] = value
292 292 self.meta = meta
293 293
294 294 return
295 295
296 296 def __readData(self):
297 297
298 298 data = {}
299 299
300 300 if self.description:
301 301 for key, value in self.description['Data'].items():
302 302 if isinstance(value, str):
303 303 if isinstance(self.fp[value], h5py.Dataset):
304 304 data[key] = self.fp[value][()]
305 305 elif isinstance(self.fp[value], h5py.Group):
306 306 array = []
307 307 for ch in self.fp[value]:
308 308 array.append(self.fp[value][ch][()])
309 309 data[key] = numpy.array(array)
310 310 elif isinstance(value, list):
311 311 array = []
312 312 for ch in value:
313 313 array.append(self.fp[ch][()])
314 314 data[key] = numpy.array(array)
315 315 else:
316 316 grp = self.fp['Data']
317 317 for name in grp:
318 318 if isinstance(grp[name], h5py.Dataset):
319 319 array = grp[name][()]
320 320 elif isinstance(grp[name], h5py.Group):
321 321 array = []
322 322 for ch in grp[name]:
323 323 array.append(grp[name][ch][()])
324 324 array = numpy.array(array)
325 325 else:
326 326 log.warning('Unknown type: {}'.format(name))
327 327
328 328 if name in self.description:
329 329 key = self.description[name]
330 330 else:
331 331 key = name
332 332 data[key] = array
333 333
334 334 self.data = data
335 335 return
336 336
337 337 def getData(self):
338 338
339 339 if not self.isDateTimeInRange(self.startFileDatetime, self.startDate, self.endDate, self.startTime, self.endTime):
340 340 self.dataOut.flagNoData = True
341 341 self.blockIndex = self.blocksPerFile
342 342 self.dataOut.error = True # TERMINA EL PROGRAMA
343 343 return
344 344 for attr in self.data:
345 345
346 346 if self.data[attr].ndim == 1:
347 347 setattr(self.dataOut, attr, self.data[attr][self.blockIndex])
348 348 else:
349 349 setattr(self.dataOut, attr, self.data[attr][:, self.blockIndex])
350 350
351 351
352 352 self.blockIndex += 1
353 353
354 354 if self.blockIndex == 1:
355 355 log.log("Block No. {}/{} -> {}".format(
356 356 self.blockIndex,
357 357 self.blocksPerFile,
358 358 self.dataOut.datatime.ctime()), self.name)
359 359 else:
360 360 log.log("Block No. {}/{} ".format(
361 361 self.blockIndex,
362 362 self.blocksPerFile),self.name)
363 363
364 364 if self.blockIndex == self.blocksPerFile:
365 365 self.setNextFile()
366 366
367 367 self.dataOut.flagNoData = False
368 368
369 369 return
370 370
371 371 def run(self, **kwargs):
372 372
373 373 if not(self.isConfig):
374 374 self.setup(**kwargs)
375 375 self.isConfig = True
376 376
377 377 if self.blockIndex == self.blocksPerFile:
378 378 self.setNextFile()
379 379
380 380 self.getData()
381 381
382 382 return
383 383
384 384 @MPDecorator
385 385 class HDFWriter(Operation):
386 386 """Operation to write HDF5 files.
387 387
388 388 The HDF5 file contains by default two groups Data and Metadata where
389 389 you can save any `dataOut` attribute specified by `dataList` and `metadataList`
390 390 parameters, data attributes are normaly time dependent where the metadata
391 391 are not.
392 392 It is possible to customize the structure of the HDF5 file with the
393 393 optional description parameter see the examples.
394 394
395 395 Parameters:
396 396 -----------
397 397 path : str
398 398 Path where files will be saved.
399 399 blocksPerFile : int
400 400 Number of blocks per file
401 401 metadataList : list
402 402 List of the dataOut attributes that will be saved as metadata
403 403 dataList : int
404 404 List of the dataOut attributes that will be saved as data
405 405 setType : bool
406 406 If True the name of the files corresponds to the timestamp of the data
407 407 description : dict, optional
408 408 Dictionary with the desired description of the HDF5 file
409 409
410 410 Examples
411 411 --------
412 412
413 413 desc = {
414 414 'data_output': {'winds': ['z', 'w', 'v']},
415 415 'utctime': 'timestamps',
416 416 'heightList': 'heights'
417 417 }
418 418 desc = {
419 419 'data_output': ['z', 'w', 'v'],
420 420 'utctime': 'timestamps',
421 421 'heightList': 'heights'
422 422 }
423 423 desc = {
424 424 'Data': {
425 425 'data_output': 'winds',
426 426 'utctime': 'timestamps'
427 427 },
428 428 'Metadata': {
429 429 'heightList': 'heights'
430 430 }
431 431 }
432 432
433 433 writer = proc_unit.addOperation(name='HDFWriter')
434 434 writer.addParameter(name='path', value='/path/to/file')
435 435 writer.addParameter(name='blocksPerFile', value='32')
436 436 writer.addParameter(name='metadataList', value='heightList,timeZone')
437 437 writer.addParameter(name='dataList',value='data_output,utctime')
438 438 # writer.addParameter(name='description',value=json.dumps(desc))
439 439
440 440 """
441 441
442 442 ext = ".hdf5"
443 443 optchar = "D"
444 444 filename = None
445 445 path = None
446 446 setFile = None
447 447 fp = None
448 ds = None
448 449 firsttime = True
449 450 #Configurations
450 451 blocksPerFile = None
451 452 blockIndex = None
452 453 dataOut = None #eval ??????
453 454 #Data Arrays
454 455 dataList = None
455 456 metadataList = None
456 457 currentDay = None
457 458 lastTime = None
458 459 timeZone = "ut"
459 460 hourLimit = 3
460 461 breakDays = True
461 462
462 463 def __init__(self):
463 464
464 465 Operation.__init__(self)
465 466 return
466 467
467 468 def set_kwargs(self, **kwargs):
468 469
469 470 for key, value in kwargs.items():
470 471 setattr(self, key, value)
471 472
472 473 def set_kwargs_obj(self, obj, **kwargs):
473 474
474 475 for key, value in kwargs.items():
475 476 setattr(obj, key, value)
476 477
477 478 def setup(self, path=None, blocksPerFile=10, metadataList=None, dataList=None, setType=None,
478 479 description={},timeZone = "ut",hourLimit = 3, breakDays=True, **kwargs):
479 480 self.path = path
480 481 self.blocksPerFile = blocksPerFile
481 482 self.metadataList = metadataList
482 483 self.dataList = [s.strip() for s in dataList]
483 484 self.setType = setType
484 485 self.description = description
485 486 self.timeZone = timeZone
486 487 self.hourLimit = hourLimit
487 488 self.breakDays = breakDays
488 489 self.set_kwargs(**kwargs)
489 490
490 491 if self.metadataList is None:
491 492 self.metadataList = self.dataOut.metadata_list
492 493
494 self.metadataList = list(set(self.metadataList))
495
493 496 tableList = []
494 497 dsList = []
495 498
496 499 for i in range(len(self.dataList)):
497 500 dsDict = {}
498 501 if hasattr(self.dataOut, self.dataList[i]):
499 502 dataAux = getattr(self.dataOut, self.dataList[i])
500 503 dsDict['variable'] = self.dataList[i]
501 504 else:
502 505 log.warning('Attribute {} not found in dataOut'.format(self.dataList[i]),self.name)
503 506 continue
504 507
505 508 if dataAux is None:
506 509 continue
507 elif isinstance(dataAux, (int, float, numpy.integer, numpy.float)):
510 elif isinstance(dataAux, (int, float, numpy.integer, numpy.float_)):
508 511 dsDict['nDim'] = 0
509 512 else:
510 513 dsDict['nDim'] = len(dataAux.shape)
511 514 dsDict['shape'] = dataAux.shape
512 515 dsDict['dsNumber'] = dataAux.shape[0]
513 516 dsDict['dtype'] = dataAux.dtype
514 517
515 518 dsList.append(dsDict)
516 519
520 self.blockIndex = 0
517 521 self.dsList = dsList
518 522 self.currentDay = self.dataOut.datatime.date()
519 523
520 524 def timeFlag(self):
521 525 currentTime = self.dataOut.utctime
522 526 timeTuple = None
523 527 if self.timeZone == "lt":
524 528 timeTuple = time.localtime(currentTime)
525 529 else :
526 530 timeTuple = time.gmtime(currentTime)
527 531 dataDay = timeTuple.tm_yday
528 532
529 533 if self.lastTime is None:
530 534 self.lastTime = currentTime
531 535 self.currentDay = dataDay
532 536 return False
533 537
534 538 timeDiff = currentTime - self.lastTime
535 539
536 540 # Si el dia es diferente o si la diferencia entre un
537 541 # dato y otro supera self.hourLimit
538 542 if (dataDay != self.currentDay) and self.breakDays:
539 543 self.currentDay = dataDay
540 544 return True
541 545 elif timeDiff > self.hourLimit*60*60:
542 546 self.lastTime = currentTime
543 547 return True
544 548 else:
545 549 self.lastTime = currentTime
546 550 return False
547 551
548 552 def run(self, dataOut, path, blocksPerFile=10, metadataList=None,
549 553 dataList=[], setType=None, description={}, **kwargs):
550 554
551 555 self.dataOut = dataOut
552 556 self.set_kwargs_obj(self.dataOut, **kwargs)
553 557 if not(self.isConfig):
554 558 self.setup(path=path, blocksPerFile=blocksPerFile,
555 559 metadataList=metadataList, dataList=dataList,
556 560 setType=setType, description=description, **kwargs)
557 561
558 562 self.isConfig = True
559 563 self.setNextFile()
560 564
561 565 self.putData()
562 566 return
563 567
564 568 def setNextFile(self):
565 569
566 570 ext = self.ext
567 571 path = self.path
568 572 setFile = self.setFile
569 573 timeTuple = None
570 574 if self.timeZone == "lt":
571 575 timeTuple = time.localtime(self.dataOut.utctime)
572 576 elif self.timeZone == "ut":
573 577 timeTuple = time.gmtime(self.dataOut.utctime)
574 578 subfolder = 'd%4.4d%3.3d' % (timeTuple.tm_year,timeTuple.tm_yday)
575 579 fullpath = os.path.join(path, subfolder)
576 580
577 581 if os.path.exists(fullpath):
578 582 filesList = os.listdir(fullpath)
579 583 filesList = [k for k in filesList if k.startswith(self.optchar)]
580 584 if len(filesList) > 0:
581 585 filesList = sorted(filesList, key=str.lower)
582 586 filen = filesList[-1]
583 587 # el filename debera tener el siguiente formato
584 588 # 0 1234 567 89A BCDE (hex)
585 589 # x YYYY DDD SSS .ext
586 590 if isNumber(filen[8:11]):
587 591 setFile = int(filen[8:11]) #inicializo mi contador de seteo al seteo del ultimo file
588 592 else:
589 593 setFile = -1
590 594 else:
591 595 setFile = -1 #inicializo mi contador de seteo
592 596 else:
593 597 os.makedirs(fullpath)
594 598 setFile = -1 #inicializo mi contador de seteo
595 599
596 600 if self.setType is None:
597 601 setFile += 1
598 602 file = '%s%4.4d%3.3d%03d%s' % (self.optchar,
599 603 timeTuple.tm_year,
600 604 timeTuple.tm_yday,
601 605 setFile,
602 606 ext)
603 607 else:
604 608 setFile = timeTuple.tm_hour*60+timeTuple.tm_min
605 609 file = '%s%4.4d%3.3d%04d%s' % (self.optchar,
606 610 timeTuple.tm_year,
607 611 timeTuple.tm_yday,
608 612 setFile,
609 613 ext)
610 614
611 615 self.filename = os.path.join(path, subfolder, file)
612 616
613 617
614 618
615 619 def getLabel(self, name, x=None):
616 620
617 621 if x is None:
618 622 if 'Data' in self.description:
619 623 data = self.description['Data']
620 624 if 'Metadata' in self.description:
621 625 data.update(self.description['Metadata'])
622 626 else:
623 627 data = self.description
624 628 if name in data:
625 629 if isinstance(data[name], str):
626 630 return data[name]
627 631 elif isinstance(data[name], list):
628 632 return None
629 633 elif isinstance(data[name], dict):
630 634 for key, value in data[name].items():
631 635 return key
632 636 return name
633 637 else:
634 638 if 'Metadata' in self.description:
635 639 meta = self.description['Metadata']
636 640 else:
637 641 meta = self.description
638 642 if name in meta:
639 643 if isinstance(meta[name], list):
640 644 return meta[name][x]
641 645 elif isinstance(meta[name], dict):
642 646 for key, value in meta[name].items():
643 647 return value[x]
644 648 if 'cspc' in name:
645 649 return 'pair{:02d}'.format(x)
646 650 else:
647 651 return 'channel{:02d}'.format(x)
648 652
649 653 def writeMetadata(self, fp):
650 654
651 655 if self.description:
652 656 if 'Metadata' in self.description:
653 657 grp = fp.create_group('Metadata')
654 658 else:
655 659 grp = fp
656 660 else:
657 661 grp = fp.create_group('Metadata')
658 662
659 663 for i in range(len(self.metadataList)):
660 664 if not hasattr(self.dataOut, self.metadataList[i]):
661 665 log.warning('Metadata: `{}` not found'.format(self.metadataList[i]), self.name)
662 666 continue
663 667 value = getattr(self.dataOut, self.metadataList[i])
664 668 if isinstance(value, bool):
665 669 if value is True:
666 670 value = 1
667 671 else:
668 672 value = 0
669 673 grp.create_dataset(self.getLabel(self.metadataList[i]), data=value)
670 674 return
671 675
672 676 def writeMetadata2(self, fp):
673 677
674 678 if self.description:
675 679 if 'Metadata' in self.description:
676 680 grp = fp.create_group('Metadata')
677 681 else:
678 682 grp = fp
679 683 else:
680 684 grp = fp.create_group('Metadata')
681 685
682 686 for i in range(len(self.metadataList)):
683 687
684 688 attribute = self.metadataList[i]
685 689 attr = attribute.split('.')
686 690 if len(attr) > 1:
687 691 if not hasattr(eval("self.dataOut."+attr[0]),attr[1]):
688 692 log.warning('Metadata: {}.{} not found'.format(attr[0],attr[1]), self.name)
689 693 continue
690 694 value = getattr(eval("self.dataOut."+attr[0]),attr[1])
691 695 if isinstance(value, bool):
692 696 if value is True:
693 697 value = 1
694 698 else:
695 699 value = 0
696 700 if isinstance(value,type(None)):
697 701 log.warning("Invalid value detected, {} is None".format(attribute), self.name)
698 702 value = 0
699 703 grp2 = None
700 704 if not 'Metadata/'+attr[0] in fp:
701 705 grp2 = fp.create_group('Metadata/'+attr[0])
702 706 else:
703 707 grp2 = fp['Metadata/'+attr[0]]
704 708 grp2.create_dataset(attr[1], data=value)
705 709
706 710 else:
707 711 if not hasattr(self.dataOut, attr[0] ):
708 712 log.warning('Metadata: `{}` not found'.format(attribute), self.name)
709 713 continue
710 714 value = getattr(self.dataOut, attr[0])
711 715 if isinstance(value, bool):
712 716 if value is True:
713 717 value = 1
714 718 else:
715 719 value = 0
716 720 if isinstance(value, type(None)):
717 721 log.error("Value {} is None".format(attribute),self.name)
718 722
719 723 grp.create_dataset(self.getLabel(attribute), data=value)
720 724
721 725 return
722 726
723 727 def writeData(self, fp):
724 728
725 729 if self.description:
726 730 if 'Data' in self.description:
727 731 grp = fp.create_group('Data')
728 732 else:
729 733 grp = fp
730 734 else:
731 735 grp = fp.create_group('Data')
732 736
733 737 dtsets = []
734 738 data = []
735 739
736 740 for dsInfo in self.dsList:
737 741 if dsInfo['nDim'] == 0:
738 742 ds = grp.create_dataset(
739 743 self.getLabel(dsInfo['variable']),
740 744 (self.blocksPerFile,),
741 745 chunks=True,
742 746 dtype=numpy.float64)
743 747 dtsets.append(ds)
744 748 data.append((dsInfo['variable'], -1))
745 749 else:
746 750 label = self.getLabel(dsInfo['variable'])
747 751 if label is not None:
748 752 sgrp = grp.create_group(label)
749 753 else:
750 754 sgrp = grp
751 755 for i in range(dsInfo['dsNumber']):
752 756 ds = sgrp.create_dataset(
753 757 self.getLabel(dsInfo['variable'], i),
754 758 (self.blocksPerFile,) + dsInfo['shape'][1:],
755 759 chunks=True,
756 760 dtype=dsInfo['dtype'])
757 761 dtsets.append(ds)
758 762 data.append((dsInfo['variable'], i))
759 763 fp.flush()
760 764
761 765 log.log('Creating file: {}'.format(fp.filename), self.name)
762 766
763 767 self.ds = dtsets
764 768 self.data = data
765 769 self.firsttime = True
766 self.blockIndex = 0
770
767 771 return
768 772
769 773 def putData(self):
770 774
771 775 if (self.blockIndex == self.blocksPerFile) or self.timeFlag():
772 776 self.closeFile()
773 777 self.setNextFile()
774 778 self.dataOut.flagNoData = False
775 779 self.blockIndex = 0
776 780
777 781 if self.blockIndex == 0:
778 782 #Setting HDF5 File
779 783 self.fp = h5py.File(self.filename, 'w')
780 784 #write metadata
781 785 self.writeMetadata2(self.fp)
782 786 #Write data
783 787 self.writeData(self.fp)
784 788 log.log('Block No. {}/{} --> {}'.format(self.blockIndex+1, self.blocksPerFile,self.dataOut.datatime.ctime()), self.name)
785 789 elif (self.blockIndex % 10 ==0):
786 790 log.log('Block No. {}/{} --> {}'.format(self.blockIndex+1, self.blocksPerFile,self.dataOut.datatime.ctime()), self.name)
787 791 else:
788 792
789 793 log.log('Block No. {}/{}'.format(self.blockIndex+1, self.blocksPerFile), self.name)
790 794
791 795 for i, ds in enumerate(self.ds):
792 796 attr, ch = self.data[i]
793 797 if ch == -1:
794 798 ds[self.blockIndex] = getattr(self.dataOut, attr)
795 799 else:
796 800 ds[self.blockIndex] = getattr(self.dataOut, attr)[ch]
797 801
798 802 self.blockIndex += 1
799 803
800 804 self.fp.flush()
801 805 self.dataOut.flagNoData = True
802 806
803 807 def closeFile(self):
804 808
805 809 if self.blockIndex != self.blocksPerFile:
806 810 for ds in self.ds:
807 811 ds.resize(self.blockIndex, axis=0)
808 812
809 813 if self.fp:
810 814 self.fp.flush()
811 815 self.fp.close()
812 816
813 817 def close(self):
814 818
815 819 self.closeFile()
This diff has been collapsed as it changes many lines, (840 lines changed) Show them Hide them
@@ -1,6328 +1,6674
1 1 import numpy
2 2 import math
3 3 from scipy import optimize, interpolate, signal, stats, ndimage
4 4 from scipy.fftpack import fft
5 5 import scipy
6 6 import re
7 7 import datetime
8 8 import copy
9 9 import sys
10 10 import importlib
11 11 import itertools
12 12 from multiprocessing import Pool, TimeoutError
13 13 from multiprocessing.pool import ThreadPool
14 14 import time
15 15
16 16 from scipy.optimize import fmin_l_bfgs_b #optimize with bounds on state papameters
17 17 from .jroproc_base import ProcessingUnit, Operation, MPDecorator
18 18 from schainpy.model.data.jrodata import Parameters, hildebrand_sekhon
19 19 from schainpy.model.data.jrodata import Spectra
20 20 from numpy import asarray as ar,exp
21 21 from scipy.optimize import fmin, curve_fit
22 22 from schainpy.utils import log
23 23 import warnings
24 24 from numpy import NaN
25 25 from scipy.optimize.optimize import OptimizeWarning
26 26 warnings.filterwarnings('ignore')
27
27 import json
28 28 import os
29 29 import csv
30 30 from scipy import signal
31 31 import matplotlib.pyplot as plt
32 32
33 33 SPEED_OF_LIGHT = 299792458
34 34
35 35 '''solving pickling issue'''
36 36
37 37 def _pickle_method(method):
38 38 func_name = method.__func__.__name__
39 39 obj = method.__self__
40 40 cls = method.__self__.__class__
41 41 return _unpickle_method, (func_name, obj, cls)
42 42
43 43 def _unpickle_method(func_name, obj, cls):
44 44 for cls in cls.mro():
45 45 try:
46 46 func = cls.__dict__[func_name]
47 47 except KeyError:
48 48 pass
49 49 else:
50 50 break
51 51 return func.__get__(obj, cls)
52 52
53 53
54 54 class ParametersProc(ProcessingUnit):
55 55
56 56 METHODS = {}
57 57 nSeconds = None
58 58
59 59 def __init__(self):
60 60 ProcessingUnit.__init__(self)
61 61
62 62 self.buffer = None
63 63 self.firstdatatime = None
64 64 self.profIndex = 0
65 65 self.dataOut = Parameters()
66 66 self.setupReq = False #Agregar a todas las unidades de proc
67 67
68 68 def __updateObjFromInput(self):
69 69
70 70 self.dataOut.inputUnit = self.dataIn.type
71 71
72 72 self.dataOut.timeZone = self.dataIn.timeZone
73 73 self.dataOut.dstFlag = self.dataIn.dstFlag
74 74 self.dataOut.errorCount = self.dataIn.errorCount
75 75 self.dataOut.useLocalTime = self.dataIn.useLocalTime
76 76
77 77 self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy()
78 78 self.dataOut.processingHeaderObj = self.dataIn.processingHeaderObj.copy()
79 79 self.dataOut.systemHeaderObj = self.dataIn.systemHeaderObj.copy()
80 80 self.dataOut.channelList = self.dataIn.channelList
81 81 self.dataOut.heightList = self.dataIn.heightList
82 82 self.dataOut.ipp = self.dataIn.ipp
83 83 self.dataOut.ippSeconds = self.dataIn.ippSeconds
84 84 self.dataOut.deltaHeight = self.dataIn.deltaHeight
85 85 self.dataOut.dtype = numpy.dtype([('real','<f4'),('imag','<f4')])
86 86
87 87 self.dataOut.nBaud = self.dataIn.nBaud
88 88 self.dataOut.nCode = self.dataIn.nCode
89 89 self.dataOut.code = self.dataIn.code
90 90 self.dataOut.nProfiles = self.dataIn.nProfiles
91 91 self.dataOut.flagDiscontinuousBlock = self.dataIn.flagDiscontinuousBlock
92 92 self.dataOut.utctime = self.dataIn.utctime
93 93 self.dataOut.flagDecodeData = self.dataIn.flagDecodeData #asumo q la data esta decodificada
94 94 self.dataOut.flagDeflipData = self.dataIn.flagDeflipData #asumo q la data esta sin flip
95 95 self.dataOut.nCohInt = self.dataIn.nCohInt
96 96 self.dataOut.nIncohInt = self.dataIn.nIncohInt
97 97 self.dataOut.ippSeconds = self.dataIn.ippSeconds
98 98 self.dataOut.windowOfFilter = self.dataIn.windowOfFilter
99 99 self.dataOut.timeInterval1 = self.dataIn.timeInterval
100 100 self.dataOut.heightList = self.dataIn.heightList
101 101 self.dataOut.frequency = self.dataIn.frequency
102 102 self.dataOut.codeList = self.dataIn.codeList
103 103 self.dataOut.azimuthList = self.dataIn.azimuthList
104 104 self.dataOut.elevationList = self.dataIn.elevationList
105 105 self.dataOut.runNextUnit = self.dataIn.runNextUnit
106 106
107 107 def run(self, runNextUnit=0):
108 108
109 109 self.dataIn.runNextUnit = runNextUnit
110 110 #---------------------- Voltage Data ---------------------------
111 111 try:
112 112 intype = self.dataIn.type.decode("utf-8")
113 113 self.dataIn.type = intype
114 114 except:
115 115 pass
116 116
117 117 if self.dataIn.type == "Voltage":
118 118
119 119 self.__updateObjFromInput()
120 120 self.dataOut.data_pre = self.dataIn.data.copy()
121 121 self.dataOut.flagNoData = False
122 122 self.dataOut.utctimeInit = self.dataIn.utctime
123 123 self.dataOut.paramInterval = self.dataIn.nProfiles*self.dataIn.nCohInt*self.dataIn.ippSeconds
124 124 if hasattr(self.dataIn, 'dataPP_POW'):
125 125 self.dataOut.dataPP_POW = self.dataIn.dataPP_POW
126 126
127 127 if hasattr(self.dataIn, 'dataPP_POWER'):
128 128 self.dataOut.dataPP_POWER = self.dataIn.dataPP_POWER
129 129
130 130 if hasattr(self.dataIn, 'dataPP_DOP'):
131 131 self.dataOut.dataPP_DOP = self.dataIn.dataPP_DOP
132 132
133 133 if hasattr(self.dataIn, 'dataPP_SNR'):
134 134 self.dataOut.dataPP_SNR = self.dataIn.dataPP_SNR
135 135
136 136 if hasattr(self.dataIn, 'dataPP_WIDTH'):
137 137 self.dataOut.dataPP_WIDTH = self.dataIn.dataPP_WIDTH
138 138 return
139 139
140 140 #---------------------- Spectra Data ---------------------------
141 141
142 142 if self.dataIn.type == "Spectra":
143 143
144 144 self.dataOut.data_pre = [self.dataIn.data_spc, self.dataIn.data_cspc]
145 145 self.dataOut.data_spc = self.dataIn.data_spc
146 146 self.dataOut.data_cspc = self.dataIn.data_cspc
147 147 self.dataOut.data_outlier = self.dataIn.data_outlier
148 148 self.dataOut.nProfiles = self.dataIn.nProfiles
149 149 self.dataOut.nIncohInt = self.dataIn.nIncohInt
150 150 self.dataOut.nFFTPoints = self.dataIn.nFFTPoints
151 151 self.dataOut.ippFactor = self.dataIn.ippFactor
152 152 self.dataOut.flagProfilesByRange = self.dataIn.flagProfilesByRange
153 153 self.dataOut.nProfilesByRange = self.dataIn.nProfilesByRange
154 154 self.dataOut.deltaHeight = self.dataIn.deltaHeight
155 155 self.dataOut.abscissaList = self.dataIn.getVelRange(1)
156 156 self.dataOut.spc_noise = self.dataIn.getNoise()
157 157 self.dataOut.spc_range = (self.dataIn.getFreqRange(1) , self.dataIn.getAcfRange(1) , self.dataIn.getVelRange(1))
158 158 # self.dataOut.normFactor = self.dataIn.normFactor
159 159 if hasattr(self.dataIn, 'channelList'):
160 160 self.dataOut.channelList = self.dataIn.channelList
161 161 if hasattr(self.dataIn, 'pairsList'):
162 162 self.dataOut.pairsList = self.dataIn.pairsList
163 163 self.dataOut.groupList = self.dataIn.pairsList
164 164
165 165 self.dataOut.flagNoData = False
166 166
167 167 self.dataOut.noise_estimation = self.dataIn.noise_estimation
168 168 if hasattr(self.dataIn, 'ChanDist'): #Distances of receiver channels
169 169 self.dataOut.ChanDist = self.dataIn.ChanDist
170 170 else: self.dataOut.ChanDist = None
171 171
172 172 #if hasattr(self.dataIn, 'VelRange'): #Velocities range
173 173 # self.dataOut.VelRange = self.dataIn.VelRange
174 174 #else: self.dataOut.VelRange = None
175 175
176 176 if hasattr(self.dataIn, 'RadarConst'): #Radar Constant
177 177 self.dataOut.RadarConst = self.dataIn.RadarConst
178 178
179 179 if hasattr(self.dataIn, 'NPW'): #NPW
180 180 self.dataOut.NPW = self.dataIn.NPW
181 181
182 182 if hasattr(self.dataIn, 'COFA'): #COFA
183 183 self.dataOut.COFA = self.dataIn.COFA
184 184
185 185
186 186
187 187 #---------------------- Correlation Data ---------------------------
188 188
189 189 if self.dataIn.type == "Correlation":
190 190 acf_ind, ccf_ind, acf_pairs, ccf_pairs, data_acf, data_ccf = self.dataIn.splitFunctions()
191 191
192 192 self.dataOut.data_pre = (self.dataIn.data_cf[acf_ind,:], self.dataIn.data_cf[ccf_ind,:,:])
193 193 self.dataOut.normFactor = (self.dataIn.normFactor[acf_ind,:], self.dataIn.normFactor[ccf_ind,:])
194 194 self.dataOut.groupList = (acf_pairs, ccf_pairs)
195 195
196 196 self.dataOut.abscissaList = self.dataIn.lagRange
197 197 self.dataOut.noise = self.dataIn.noise
198 198 self.dataOut.data_snr = self.dataIn.SNR
199 199 self.dataOut.flagNoData = False
200 200 self.dataOut.nAvg = self.dataIn.nAvg
201 201
202 202 #---------------------- Parameters Data ---------------------------
203 203
204 204 if self.dataIn.type == "Parameters":
205 205 self.dataOut.copy(self.dataIn)
206 206 self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy()
207 207 self.dataOut.processingHeaderObj = self.dataIn.processingHeaderObj.copy()
208 208 self.dataOut.flagNoData = False
209 209 if isinstance(self.dataIn.nIncohInt,numpy.ndarray):
210 210 nch, nheis = self.dataIn.nIncohInt.shape
211 211 if nch != self.dataIn.nChannels:
212 212 aux = numpy.repeat(self.dataIn.nIncohInt, self.dataIn.nChannels, axis=0)
213 213 self.dataOut.nIncohInt = aux
214 214 return True
215 215
216 216 self.__updateObjFromInput()
217 217 self.dataOut.utctimeInit = self.dataIn.utctime
218 218 self.dataOut.paramInterval = self.dataIn.timeInterval
219
219
220 220 return
221 221
222 222
223 223 def target(tups):
224 224
225 225 obj, args = tups
226 226
227 227 return obj.FitGau(args)
228 228
229 229 class RemoveWideGC(Operation):
230 230 ''' This class remove the wide clutter and replace it with a simple interpolation points
231 231 This mainly applies to CLAIRE radar
232 232
233 233 ClutterWidth : Width to look for the clutter peak
234 234
235 235 Input:
236 236
237 237 self.dataOut.data_pre : SPC and CSPC
238 238 self.dataOut.spc_range : To select wind and rainfall velocities
239 239
240 240 Affected:
241 241
242 242 self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
243 243
244 244 Written by D. ScipiΓ³n 25.02.2021
245 245 '''
246 246 def __init__(self):
247 247 Operation.__init__(self)
248 248 self.i = 0
249 249 self.ich = 0
250 250 self.ir = 0
251 251
252 252 def run(self, dataOut, ClutterWidth=2.5):
253 253
254 254 self.spc = dataOut.data_pre[0].copy()
255 255 self.spc_out = dataOut.data_pre[0].copy()
256 256 self.Num_Chn = self.spc.shape[0]
257 257 self.Num_Hei = self.spc.shape[2]
258 258 VelRange = dataOut.spc_range[2][:-1]
259 259 dv = VelRange[1]-VelRange[0]
260 260
261 261 # Find the velocities that corresponds to zero
262 262 gc_values = numpy.squeeze(numpy.where(numpy.abs(VelRange) <= ClutterWidth))
263 263
264 264 # Removing novalid data from the spectra
265 265 for ich in range(self.Num_Chn) :
266 266 for ir in range(self.Num_Hei) :
267 267 # Estimate the noise at each range
268 268 HSn = hildebrand_sekhon(self.spc[ich,:,ir],dataOut.nIncohInt)
269 269
270 270 # Removing the noise floor at each range
271 271 novalid = numpy.where(self.spc[ich,:,ir] < HSn)
272 272 self.spc[ich,novalid,ir] = HSn
273 273
274 274 junk = numpy.append(numpy.insert(numpy.squeeze(self.spc[ich,gc_values,ir]),0,HSn),HSn)
275 275 j1index = numpy.squeeze(numpy.where(numpy.diff(junk)>0))
276 276 j2index = numpy.squeeze(numpy.where(numpy.diff(junk)<0))
277 277 if ((numpy.size(j1index)<=1) | (numpy.size(j2index)<=1)) :
278 278 continue
279 279 junk3 = numpy.squeeze(numpy.diff(j1index))
280 280 junk4 = numpy.squeeze(numpy.diff(j2index))
281 281
282 282 valleyindex = j2index[numpy.where(junk4>1)]
283 283 peakindex = j1index[numpy.where(junk3>1)]
284 284
285 285 isvalid = numpy.squeeze(numpy.where(numpy.abs(VelRange[gc_values[peakindex]]) <= 2.5*dv))
286 286 if numpy.size(isvalid) == 0 :
287 287 continue
288 288 if numpy.size(isvalid) >1 :
289 289 vindex = numpy.argmax(self.spc[ich,gc_values[peakindex[isvalid]],ir])
290 290 isvalid = isvalid[vindex]
291 291
292 292 # clutter peak
293 293 gcpeak = peakindex[isvalid]
294 294 vl = numpy.where(valleyindex < gcpeak)
295 295 if numpy.size(vl) == 0:
296 296 continue
297 297 gcvl = valleyindex[vl[0][-1]]
298 298 vr = numpy.where(valleyindex > gcpeak)
299 299 if numpy.size(vr) == 0:
300 300 continue
301 301 gcvr = valleyindex[vr[0][0]]
302 302
303 303 # Removing the clutter
304 304 interpindex = numpy.array([gc_values[gcvl], gc_values[gcvr]])
305 305 gcindex = gc_values[gcvl+1:gcvr-1]
306 306 self.spc_out[ich,gcindex,ir] = numpy.interp(VelRange[gcindex],VelRange[interpindex],self.spc[ich,interpindex,ir])
307 307
308 308 dataOut.data_pre[0] = self.spc_out
309 309
310 310 return dataOut
311 311
312 312 class SpectralFilters(Operation):
313 313 ''' This class allows to replace the novalid values with noise for each channel
314 314 This applies to CLAIRE RADAR
315 315
316 316 PositiveLimit : RightLimit of novalid data
317 317 NegativeLimit : LeftLimit of novalid data
318 318
319 319 Input:
320 320
321 321 self.dataOut.data_pre : SPC and CSPC
322 322 self.dataOut.spc_range : To select wind and rainfall velocities
323 323
324 324 Affected:
325 325
326 326 self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
327 327
328 328 Written by D. ScipiΓ³n 29.01.2021
329 329 '''
330 330 def __init__(self):
331 331 Operation.__init__(self)
332 332 self.i = 0
333 333
334 334 def run(self, dataOut, ):
335 335
336 336 self.spc = dataOut.data_pre[0].copy()
337 337 self.Num_Chn = self.spc.shape[0]
338 338 VelRange = dataOut.spc_range[2]
339 339
340 340 # novalid corresponds to data within the Negative and PositiveLimit
341 341
342 342
343 343 # Removing novalid data from the spectra
344 344 for i in range(self.Num_Chn):
345 345 self.spc[i,novalid,:] = dataOut.noise[i]
346 346 dataOut.data_pre[0] = self.spc
347 347 return dataOut
348 348
349 349
350 350 class GaussianFit(Operation):
351 351
352 352 '''
353 353 Function that fit of one and two generalized gaussians (gg) based
354 354 on the PSD shape across an "power band" identified from a cumsum of
355 355 the measured spectrum - noise.
356 356
357 357 Input:
358 358 self.dataOut.data_pre : SelfSpectra
359 359
360 360 Output:
361 361 self.dataOut.SPCparam : SPC_ch1, SPC_ch2
362 362
363 363 '''
364 364 def __init__(self):
365 365 Operation.__init__(self)
366 366 self.i=0
367 367
368 368 def run(self, dataOut, SNRdBlimit=-9, method='generalized'):
369 369 """This routine will find a couple of generalized Gaussians to a power spectrum
370 370 methods: generalized, squared
371 371 input: spc
372 372 output:
373 373 noise, amplitude0,shift0,width0,p0,Amplitude1,shift1,width1,p1
374 374 """
375 375 print ('Entering ',method,' double Gaussian fit')
376 376 self.spc = dataOut.data_pre[0].copy()
377 377 self.Num_Hei = self.spc.shape[2]
378 378 self.Num_Bin = self.spc.shape[1]
379 379 self.Num_Chn = self.spc.shape[0]
380 380
381 381 start_time = time.time()
382 382
383 383 pool = Pool(processes=self.Num_Chn)
384 384 args = [(dataOut.spc_range[2], ich, dataOut.spc_noise[ich], dataOut.nIncohInt, SNRdBlimit) for ich in range(self.Num_Chn)]
385 385 objs = [self for __ in range(self.Num_Chn)]
386 386 attrs = list(zip(objs, args))
387 387 DGauFitParam = pool.map(target, attrs)
388 388 # Parameters:
389 389 # 0. Noise, 1. Amplitude, 2. Shift, 3. Width 4. Power
390 390 dataOut.DGauFitParams = numpy.asarray(DGauFitParam)
391 391
392 392 # Double Gaussian Curves
393 393 gau0 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
394 394 gau0[:] = numpy.NaN
395 395 gau1 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
396 396 gau1[:] = numpy.NaN
397 397 x_mtr = numpy.transpose(numpy.tile(dataOut.getVelRange(1)[:-1], (self.Num_Hei,1)))
398 398 for iCh in range(self.Num_Chn):
399 399 N0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,0]] * self.Num_Bin))
400 400 N1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,1]] * self.Num_Bin))
401 401 A0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,0]] * self.Num_Bin))
402 402 A1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,1]] * self.Num_Bin))
403 403 v0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,0]] * self.Num_Bin))
404 404 v1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,1]] * self.Num_Bin))
405 405 s0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,0]] * self.Num_Bin))
406 406 s1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,1]] * self.Num_Bin))
407 407 if method == 'generalized':
408 408 p0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,0]] * self.Num_Bin))
409 409 p1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,1]] * self.Num_Bin))
410 410 elif method == 'squared':
411 411 p0 = 2.
412 412 p1 = 2.
413 413 gau0[iCh] = A0*numpy.exp(-0.5*numpy.abs((x_mtr-v0)/s0)**p0)+N0
414 414 gau1[iCh] = A1*numpy.exp(-0.5*numpy.abs((x_mtr-v1)/s1)**p1)+N1
415 415 dataOut.GaussFit0 = gau0
416 416 dataOut.GaussFit1 = gau1
417 417
418 418 print('Leaving ',method ,' double Gaussian fit')
419 419 return dataOut
420 420
421 421 def FitGau(self, X):
422 422 # print('Entering FitGau')
423 423 # Assigning the variables
424 424 Vrange, ch, wnoise, num_intg, SNRlimit = X
425 425 # Noise Limits
426 426 noisebl = wnoise * 0.9
427 427 noisebh = wnoise * 1.1
428 428 # Radar Velocity
429 429 Va = max(Vrange)
430 430 deltav = Vrange[1] - Vrange[0]
431 431 x = numpy.arange(self.Num_Bin)
432 432
433 433 # print ('stop 0')
434 434
435 435 # 5 parameters, 2 Gaussians
436 436 DGauFitParam = numpy.zeros([5, self.Num_Hei,2])
437 437 DGauFitParam[:] = numpy.NaN
438 438
439 439 # SPCparam = []
440 440 # SPC_ch1 = numpy.zeros([self.Num_Bin,self.Num_Hei])
441 441 # SPC_ch2 = numpy.zeros([self.Num_Bin,self.Num_Hei])
442 442 # SPC_ch1[:] = 0 #numpy.NaN
443 443 # SPC_ch2[:] = 0 #numpy.NaN
444 444 # print ('stop 1')
445 445 for ht in range(self.Num_Hei):
446 446 # print (ht)
447 447 # print ('stop 2')
448 448 # Spectra at each range
449 449 spc = numpy.asarray(self.spc)[ch,:,ht]
450 450 snr = ( spc.mean() - wnoise ) / wnoise
451 451 snrdB = 10.*numpy.log10(snr)
452 452
453 453 #print ('stop 3')
454 454 if snrdB < SNRlimit :
455 455 # snr = numpy.NaN
456 456 # SPC_ch1[:,ht] = 0#numpy.NaN
457 457 # SPC_ch1[:,ht] = 0#numpy.NaN
458 458 # SPCparam = (SPC_ch1,SPC_ch2)
459 459 # print ('SNR less than SNRth')
460 460 continue
461 461 # wnoise = hildebrand_sekhon(spc,num_intg)
462 462 # print ('stop 2.01')
463 463 #############################################
464 464 # normalizing spc and noise
465 465 # This part differs from gg1
466 466 # spc_norm_max = max(spc) #commented by D. ScipiΓ³n 19.03.2021
467 467 #spc = spc / spc_norm_max
468 468 # pnoise = pnoise #/ spc_norm_max #commented by D. ScipiΓ³n 19.03.2021
469 469 #############################################
470 470
471 471 # print ('stop 2.1')
472 472 fatspectra=1.0
473 473 # noise per channel.... we might want to use the noise at each range
474 474
475 475 # wnoise = noise_ #/ spc_norm_max #commented by D. ScipiΓ³n 19.03.2021
476 476 #wnoise,stdv,i_max,index =enoise(spc,num_intg) #noise estimate using Hildebrand Sekhon, only wnoise is used
477 477 #if wnoise>1.1*pnoise: # to be tested later
478 478 # wnoise=pnoise
479 479 # noisebl = wnoise*0.9
480 480 # noisebh = wnoise*1.1
481 481 spc = spc - wnoise # signal
482 482
483 483 # print ('stop 2.2')
484 484 minx = numpy.argmin(spc)
485 485 #spcs=spc.copy()
486 486 spcs = numpy.roll(spc,-minx)
487 487 cum = numpy.cumsum(spcs)
488 488 # tot_noise = wnoise * self.Num_Bin #64;
489 489
490 490 # print ('stop 2.3')
491 491 # snr = sum(spcs) / tot_noise
492 492 # snrdB = 10.*numpy.log10(snr)
493 493 #print ('stop 3')
494 494 # if snrdB < SNRlimit :
495 495 # snr = numpy.NaN
496 496 # SPC_ch1[:,ht] = 0#numpy.NaN
497 497 # SPC_ch1[:,ht] = 0#numpy.NaN
498 498 # SPCparam = (SPC_ch1,SPC_ch2)
499 499 # print ('SNR less than SNRth')
500 500 # continue
501 501
502 502
503 503 #if snrdB<-18 or numpy.isnan(snrdB) or num_intg<4:
504 504 # return [None,]*4,[None,]*4,None,snrdB,None,None,[None,]*5,[None,]*9,None
505 505 # print ('stop 4')
506 506 cummax = max(cum)
507 507 epsi = 0.08 * fatspectra # cumsum to narrow down the energy region
508 508 cumlo = cummax * epsi
509 509 cumhi = cummax * (1-epsi)
510 510 powerindex = numpy.array(numpy.where(numpy.logical_and(cum>cumlo, cum<cumhi))[0])
511 511
512 512 # print ('stop 5')
513 513 if len(powerindex) < 1:# case for powerindex 0
514 514 # print ('powerindex < 1')
515 515 continue
516 516 powerlo = powerindex[0]
517 517 powerhi = powerindex[-1]
518 518 powerwidth = powerhi-powerlo
519 519 if powerwidth <= 1:
520 520 # print('powerwidth <= 1')
521 521 continue
522 522
523 523 # print ('stop 6')
524 524 firstpeak = powerlo + powerwidth/10.# first gaussian energy location
525 525 secondpeak = powerhi - powerwidth/10. #second gaussian energy location
526 526 midpeak = (firstpeak + secondpeak)/2.
527 527 firstamp = spcs[int(firstpeak)]
528 528 secondamp = spcs[int(secondpeak)]
529 529 midamp = spcs[int(midpeak)]
530 530
531 531 y_data = spc + wnoise
532 532
533 533 ''' single Gaussian '''
534 534 shift0 = numpy.mod(midpeak+minx, self.Num_Bin )
535 535 width0 = powerwidth/4.#Initialization entire power of spectrum divided by 4
536 536 power0 = 2.
537 537 amplitude0 = midamp
538 538 state0 = [shift0,width0,amplitude0,power0,wnoise]
539 539 bnds = ((0,self.Num_Bin-1),(1,powerwidth),(0,None),(0.5,3.),(noisebl,noisebh))
540 540 lsq1 = fmin_l_bfgs_b(self.misfit1, state0, args=(y_data,x,num_intg), bounds=bnds, approx_grad=True)
541 541 # print ('stop 7.1')
542 542 # print (bnds)
543 543
544 544 chiSq1=lsq1[1]
545 545
546 546 # print ('stop 8')
547 547 if fatspectra<1.0 and powerwidth<4:
548 548 choice=0
549 549 Amplitude0=lsq1[0][2]
550 550 shift0=lsq1[0][0]
551 551 width0=lsq1[0][1]
552 552 p0=lsq1[0][3]
553 553 Amplitude1=0.
554 554 shift1=0.
555 555 width1=0.
556 556 p1=0.
557 557 noise=lsq1[0][4]
558 558 #return (numpy.array([shift0,width0,Amplitude0,p0]),
559 559 # numpy.array([shift1,width1,Amplitude1,p1]),noise,snrdB,chiSq1,6.,sigmas1,[None,]*9,choice)
560 560 # print ('stop 9')
561 561 ''' two Gaussians '''
562 562 #shift0=numpy.mod(firstpeak+minx,64); shift1=numpy.mod(secondpeak+minx,64)
563 563 shift0 = numpy.mod(firstpeak+minx, self.Num_Bin )
564 564 shift1 = numpy.mod(secondpeak+minx, self.Num_Bin )
565 565 width0 = powerwidth/6.
566 566 width1 = width0
567 567 power0 = 2.
568 568 power1 = power0
569 569 amplitude0 = firstamp
570 570 amplitude1 = secondamp
571 571 state0 = [shift0,width0,amplitude0,power0,shift1,width1,amplitude1,power1,wnoise]
572 572 #bnds=((0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
573 573 bnds=((0,self.Num_Bin-1),(1,powerwidth/2.),(0,None),(0.5,3.),(0,self.Num_Bin-1),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
574 574 #bnds=(( 0,(self.Num_Bin-1) ),(1,powerwidth/2.),(0,None),(0.5,3.),( 0,(self.Num_Bin-1)),(1,powerwidth/2.),(0,None),(0.5,3.),(0.1,0.5))
575 575
576 576 # print ('stop 10')
577 577 lsq2 = fmin_l_bfgs_b( self.misfit2 , state0 , args=(y_data,x,num_intg) , bounds=bnds , approx_grad=True )
578 578
579 579 # print ('stop 11')
580 580 chiSq2 = lsq2[1]
581 581
582 582 # print ('stop 12')
583 583
584 584 oneG = (chiSq1<5 and chiSq1/chiSq2<2.0) and (abs(lsq2[0][0]-lsq2[0][4])<(lsq2[0][1]+lsq2[0][5])/3. or abs(lsq2[0][0]-lsq2[0][4])<10)
585 585
586 586 # print ('stop 13')
587 587 if snrdB>-12: # when SNR is strong pick the peak with least shift (LOS velocity) error
588 588 if oneG:
589 589 choice = 0
590 590 else:
591 591 w1 = lsq2[0][1]; w2 = lsq2[0][5]
592 592 a1 = lsq2[0][2]; a2 = lsq2[0][6]
593 593 p1 = lsq2[0][3]; p2 = lsq2[0][7]
594 594 s1 = (2**(1+1./p1))*scipy.special.gamma(1./p1)/p1
595 595 s2 = (2**(1+1./p2))*scipy.special.gamma(1./p2)/p2
596 596 gp1 = a1*w1*s1; gp2 = a2*w2*s2 # power content of each ggaussian with proper p scaling
597 597
598 598 if gp1>gp2:
599 599 if a1>0.7*a2:
600 600 choice = 1
601 601 else:
602 602 choice = 2
603 603 elif gp2>gp1:
604 604 if a2>0.7*a1:
605 605 choice = 2
606 606 else:
607 607 choice = 1
608 608 else:
609 609 choice = numpy.argmax([a1,a2])+1
610 610 #else:
611 611 #choice=argmin([std2a,std2b])+1
612 612
613 613 else: # with low SNR go to the most energetic peak
614 614 choice = numpy.argmax([lsq1[0][2]*lsq1[0][1],lsq2[0][2]*lsq2[0][1],lsq2[0][6]*lsq2[0][5]])
615 615
616 616 # print ('stop 14')
617 617 shift0 = lsq2[0][0]
618 618 vel0 = Vrange[0] + shift0 * deltav
619 619 shift1 = lsq2[0][4]
620 620 # vel1=Vrange[0] + shift1 * deltav
621 621
622 622 # max_vel = 1.0
623 623 # Va = max(Vrange)
624 624 # deltav = Vrange[1]-Vrange[0]
625 625 # print ('stop 15')
626 626 #first peak will be 0, second peak will be 1
627 627 # if vel0 > -1.0 and vel0 < max_vel : #first peak is in the correct range # Commented by D.ScipiΓ³n 19.03.2021
628 628 if vel0 > -Va and vel0 < Va : #first peak is in the correct range
629 629 shift0 = lsq2[0][0]
630 630 width0 = lsq2[0][1]
631 631 Amplitude0 = lsq2[0][2]
632 632 p0 = lsq2[0][3]
633 633
634 634 shift1 = lsq2[0][4]
635 635 width1 = lsq2[0][5]
636 636 Amplitude1 = lsq2[0][6]
637 637 p1 = lsq2[0][7]
638 638 noise = lsq2[0][8]
639 639 else:
640 640 shift1 = lsq2[0][0]
641 641 width1 = lsq2[0][1]
642 642 Amplitude1 = lsq2[0][2]
643 643 p1 = lsq2[0][3]
644 644
645 645 shift0 = lsq2[0][4]
646 646 width0 = lsq2[0][5]
647 647 Amplitude0 = lsq2[0][6]
648 648 p0 = lsq2[0][7]
649 649 noise = lsq2[0][8]
650 650
651 651 if Amplitude0<0.05: # in case the peak is noise
652 652 shift0,width0,Amplitude0,p0 = 4*[numpy.NaN]
653 653 if Amplitude1<0.05:
654 654 shift1,width1,Amplitude1,p1 = 4*[numpy.NaN]
655 655
656 656 # print ('stop 16 ')
657 657 # SPC_ch1[:,ht] = noise + Amplitude0*numpy.exp(-0.5*(abs(x-shift0)/width0)**p0)
658 658 # SPC_ch2[:,ht] = noise + Amplitude1*numpy.exp(-0.5*(abs(x-shift1)/width1)**p1)
659 659 # SPCparam = (SPC_ch1,SPC_ch2)
660 660
661 661 DGauFitParam[0,ht,0] = noise
662 662 DGauFitParam[0,ht,1] = noise
663 663 DGauFitParam[1,ht,0] = Amplitude0
664 664 DGauFitParam[1,ht,1] = Amplitude1
665 665 DGauFitParam[2,ht,0] = Vrange[0] + shift0 * deltav
666 666 DGauFitParam[2,ht,1] = Vrange[0] + shift1 * deltav
667 667 DGauFitParam[3,ht,0] = width0 * deltav
668 668 DGauFitParam[3,ht,1] = width1 * deltav
669 669 DGauFitParam[4,ht,0] = p0
670 670 DGauFitParam[4,ht,1] = p1
671 671
672 672 return DGauFitParam
673 673
674 674 def y_model1(self,x,state):
675 675 shift0, width0, amplitude0, power0, noise = state
676 676 model0 = amplitude0*numpy.exp(-0.5*abs((x - shift0)/width0)**power0)
677 677 model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0)
678 678 model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0)
679 679 return model0 + model0u + model0d + noise
680 680
681 681 def y_model2(self,x,state): #Equation for two generalized Gaussians with Nyquist
682 682 shift0, width0, amplitude0, power0, shift1, width1, amplitude1, power1, noise = state
683 683 model0 = amplitude0*numpy.exp(-0.5*abs((x-shift0)/width0)**power0)
684 684 model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0)
685 685 model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0)
686 686
687 687 model1 = amplitude1*numpy.exp(-0.5*abs((x - shift1)/width1)**power1)
688 688 model1u = amplitude1*numpy.exp(-0.5*abs((x - shift1 - self.Num_Bin)/width1)**power1)
689 689 model1d = amplitude1*numpy.exp(-0.5*abs((x - shift1 + self.Num_Bin)/width1)**power1)
690 690 return model0 + model0u + model0d + model1 + model1u + model1d + noise
691 691
692 692 def misfit1(self,state,y_data,x,num_intg): # This function compares how close real data is with the model data, the close it is, the better it is.
693 693
694 694 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model1(x,state)))**2)#/(64-5.) # /(64-5.) can be commented
695 695
696 696 def misfit2(self,state,y_data,x,num_intg):
697 697 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model2(x,state)))**2)#/(64-9.)
698 698
699 699 class Oblique_Gauss_Fit(Operation):
700 700 '''
701 701 Written by R. Flores
702 702 '''
703 703 def __init__(self):
704 704 Operation.__init__(self)
705 705
706 706 def Gauss_fit(self,spc,x,nGauss):
707 707
708 708
709 709 def gaussian(x, a, b, c, d):
710 710 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
711 711 return val
712 712
713 713 if nGauss == 'first':
714 714 spc_1_aux = numpy.copy(spc[:numpy.argmax(spc)+1])
715 715 spc_2_aux = numpy.flip(spc_1_aux)
716 716 spc_3_aux = numpy.concatenate((spc_1_aux,spc_2_aux[1:]))
717 717
718 718 len_dif = len(x)-len(spc_3_aux)
719 719
720 720 spc_zeros = numpy.ones(len_dif)*spc_1_aux[0]
721 721
722 722 spc_new = numpy.concatenate((spc_3_aux,spc_zeros))
723 723
724 724 y = spc_new
725 725
726 726 elif nGauss == 'second':
727 727 y = spc
728 728
729 729
730 730 # estimate starting values from the data
731 731 a = y.max()
732 732 b = x[numpy.argmax(y)]
733 733 if nGauss == 'first':
734 734 c = 1.#b#b#numpy.std(spc)
735 735 elif nGauss == 'second':
736 736 c = b
737 737 else:
738 738 print("ERROR")
739 739
740 740 d = numpy.mean(y[-100:])
741 741
742 742 # define a least squares function to optimize
743 743 def minfunc(params):
744 744 return sum((y-gaussian(x,params[0],params[1],params[2],params[3]))**2)
745 745
746 746 # fit
747 747 popt = fmin(minfunc,[a,b,c,d],disp=False)
748 748 #popt,fopt,niter,funcalls = fmin(minfunc,[a,b,c,d])
749 749
750 750
751 751 return gaussian(x, popt[0], popt[1], popt[2], popt[3]), popt[0], popt[1], popt[2], popt[3]
752 752
753 753
754 754 def Gauss_fit_2(self,spc,x,nGauss):
755 755
756 756
757 757 def gaussian(x, a, b, c, d):
758 758 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
759 759 return val
760 760
761 761 if nGauss == 'first':
762 762 spc_1_aux = numpy.copy(spc[:numpy.argmax(spc)+1])
763 763 spc_2_aux = numpy.flip(spc_1_aux)
764 764 spc_3_aux = numpy.concatenate((spc_1_aux,spc_2_aux[1:]))
765 765
766 766 len_dif = len(x)-len(spc_3_aux)
767 767
768 768 spc_zeros = numpy.ones(len_dif)*spc_1_aux[0]
769 769
770 770 spc_new = numpy.concatenate((spc_3_aux,spc_zeros))
771 771
772 772 y = spc_new
773 773
774 774 elif nGauss == 'second':
775 775 y = spc
776 776
777 777
778 778 # estimate starting values from the data
779 779 a = y.max()
780 780 b = x[numpy.argmax(y)]
781 781 if nGauss == 'first':
782 782 c = 1.#b#b#numpy.std(spc)
783 783 elif nGauss == 'second':
784 784 c = b
785 785 else:
786 786 print("ERROR")
787 787
788 788 d = numpy.mean(y[-100:])
789 789 popt,pcov = curve_fit(gaussian,x,y,p0=[a,b,c,d])
790 790 return gaussian(x, popt[0], popt[1], popt[2], popt[3]),popt[0], popt[1], popt[2], popt[3]
791 791
792 792 def Double_Gauss_fit(self,spc,x,A1,B1,C1,A2,B2,C2,D):
793 793
794 794 def double_gaussian(x, a1, b1, c1, a2, b2, c2, d):
795 795 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
796 796 return val
797 797
798 798
799 799 y = spc
800 800
801 801 # estimate starting values from the data
802 802 a1 = A1
803 803 b1 = B1
804 804 c1 = C1#numpy.std(spc)
805 805
806 806 a2 = A2#y.max()
807 807 b2 = B2#x[numpy.argmax(y)]
808 808 c2 = C2#numpy.std(spc)
809 809 d = D
810 810
811 811 # define a least squares function to optimize
812 812 def minfunc(params):
813 813 return sum((y-double_gaussian(x,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2)
814 814
815 815 # fit
816 816 popt = fmin(minfunc,[a1,b1,c1,a2,b2,c2,d],disp=False)
817 817
818 818 return double_gaussian(x, popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
819 819
820 820 def Double_Gauss_fit_2(self,spc,x,A1,B1,C1,A2,B2,C2,D):
821 821
822 822 def double_gaussian(x, a1, b1, c1, a2, b2, c2, d):
823 823 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
824 824 return val
825 825
826 826
827 827 y = spc
828 828
829 829 # estimate starting values from the data
830 830 a1 = A1
831 831 b1 = B1
832 832 c1 = C1#numpy.std(spc)
833 833
834 834 a2 = A2#y.max()
835 835 b2 = B2#x[numpy.argmax(y)]
836 836 c2 = C2#numpy.std(spc)
837 837 d = D
838 838
839 839 # fit
840 840 popt,pcov = curve_fit(double_gaussian,x,y,p0=[a1,b1,c1,a2,b2,c2,d])
841 841 error = numpy.sqrt(numpy.diag(pcov))
842 842
843 843 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6], error[0], error[1], error[2], error[3], error[4], error[5], error[6]
844 844
845 845 def windowing_double(self,spc,x,A1,B1,C1,A2,B2,C2,D):
846 846 from scipy.optimize import curve_fit,fmin
847 847
848 848 def R_gaussian(x, a, b, c):
849 849 N = int(numpy.shape(x)[0])
850 850 val = a * numpy.exp(-((x)*c*2*2*numpy.pi)**2 / (2))* numpy.exp(1.j*b*x*4*numpy.pi)
851 851 return val
852 852
853 853 def T(x,N):
854 854 T = 1-abs(x)/N
855 855 return T
856 856
857 857 def R_T_spc_fun(x, a1, b1, c1, a2, b2, c2, d):
858 858
859 859 N = int(numpy.shape(x)[0])
860 860
861 861 x_max = x[-1]
862 862
863 863 x_pos = x[1600:]
864 864 x_neg = x[:1600]
865 865
866 866 R_T_neg_1 = R_gaussian(x, a1, b1, c1)[:1600]*T(x_neg,-x[0])
867 867 R_T_pos_1 = R_gaussian(x, a1, b1, c1)[1600:]*T(x_pos,x[-1])
868 868 R_T_sum_1 = R_T_pos_1 + R_T_neg_1
869 869 R_T_spc_1 = numpy.fft.fft(R_T_sum_1).real
870 870 R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1)
871 871 max_val_1 = numpy.max(R_T_spc_1)
872 872 R_T_spc_1 = R_T_spc_1*a1/max_val_1
873 873
874 874 R_T_neg_2 = R_gaussian(x, a2, b2, c2)[:1600]*T(x_neg,-x[0])
875 875 R_T_pos_2 = R_gaussian(x, a2, b2, c2)[1600:]*T(x_pos,x[-1])
876 876 R_T_sum_2 = R_T_pos_2 + R_T_neg_2
877 877 R_T_spc_2 = numpy.fft.fft(R_T_sum_2).real
878 878 R_T_spc_2 = numpy.fft.fftshift(R_T_spc_2)
879 879 max_val_2 = numpy.max(R_T_spc_2)
880 880 R_T_spc_2 = R_T_spc_2*a2/max_val_2
881 881
882 882 R_T_d = d*numpy.fft.fftshift(signal.unit_impulse(N))
883 883 R_T_d_neg = R_T_d[:1600]*T(x_neg,-x[0])
884 884 R_T_d_pos = R_T_d[1600:]*T(x_pos,x[-1])
885 885 R_T_d_sum = R_T_d_pos + R_T_d_neg
886 886 R_T_spc_3 = numpy.fft.fft(R_T_d_sum).real
887 887 R_T_spc_3 = numpy.fft.fftshift(R_T_spc_3)
888 888
889 889 R_T_final = R_T_spc_1 + R_T_spc_2 + R_T_spc_3
890 890
891 891 return R_T_final
892 892
893 893 y = spc#gaussian(x, a, meanY, sigmaY) + a*0.1*numpy.random.normal(0, 1, size=len(x))
894 894
895 895 from scipy.stats import norm
896 896 mean,std=norm.fit(spc)
897 897
898 898 # estimate starting values from the data
899 899 a1 = A1
900 900 b1 = B1
901 901 c1 = C1#numpy.std(spc)
902 902
903 903 a2 = A2#y.max()
904 904 b2 = B2#x[numpy.argmax(y)]
905 905 c2 = C2#numpy.std(spc)
906 906 d = D
907 907
908 908 ippSeconds = 250*20*1.e-6/3
909 909
910 910 x_t = ippSeconds * (numpy.arange(1600) -1600 / 2.)
911 911
912 912 x_t = numpy.linspace(x_t[0],x_t[-1],3200)
913 913
914 914 x_freq = numpy.fft.fftfreq(1600,d=ippSeconds)
915 915 x_freq = numpy.fft.fftshift(x_freq)
916 916
917 917 # define a least squares function to optimize
918 918 def minfunc(params):
919 919 return sum((y-R_T_spc_fun(x_t,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2/1)#y**2)
920 920
921 921 # fit
922 922 popt_full = fmin(minfunc,[a1,b1,c1,a2,b2,c2,d],full_output=True)
923 923 popt = popt_full[0]
924 924
925 925 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
926 926
927 927 def Double_Gauss_fit_weight(self,spc,x,A1,B1,C1,A2,B2,C2,D):
928 928 from scipy.optimize import curve_fit,fmin
929 929
930 930 def double_gaussian(x, a1, b1, c1, a2, b2, c2, d):
931 931 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
932 932 return val
933 933
934 934 y = spc
935 935
936 936 from scipy.stats import norm
937 937 mean,std=norm.fit(spc)
938 938
939 939 # estimate starting values from the data
940 940 a1 = A1
941 941 b1 = B1
942 942 c1 = C1#numpy.std(spc)
943 943
944 944 a2 = A2#y.max()
945 945 b2 = B2#x[numpy.argmax(y)]
946 946 c2 = C2#numpy.std(spc)
947 947 d = D
948 948
949 949 y_clean = signal.medfilt(y)
950 950 # define a least squares function to optimize
951 951 def minfunc(params):
952 952 return sum((y-double_gaussian(x,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2/(y_clean**2/1))
953 953
954 954 # fit
955 955 popt_full = fmin(minfunc,[a1,b1,c1,a2,b2,c2,d], disp =False, full_output=True)
956 956 #print("nIter", popt_full[2])
957 957 popt = popt_full[0]
958 958 #popt,pcov = curve_fit(double_gaussian,x,y,p0=[a1,b1,c1,a2,b2,c2,d])
959 959
960 960 #return double_gaussian(x, popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
961 961 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
962 962
963 963 def DH_mode(self,spectra,VelRange):
964 964
965 965 from scipy.optimize import curve_fit
966 966
967 967 def double_gauss(x, a1,b1,c1, a2,b2,c2, d):
968 968 val = a1 * numpy.exp(-(x - b1)**2 / (2*c1**2)) + a2 * numpy.exp(-(x - b2)**2 / (2*c2**2)) + d
969 969 return val
970 970
971 971 spec = (spectra.copy()).flatten()
972 972 amp=spec.max()
973 973 params=numpy.array([amp,-400,30,amp/4,-200,150,1.0e7])
974 974 #try:
975 975 popt,pcov=curve_fit(double_gauss, VelRange, spec, p0=params,bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf]))
976 976
977 977 error = numpy.sqrt(numpy.diag(pcov))
978 978 #doppler_2=popt[4]
979 979 #err_2 = numpy.sqrt(pcov[4][4])
980 980
981 981 #except:
982 982 #pass
983 983 #doppler_2=numpy.NAN
984 984 #err_2 = numpy.NAN
985 985
986 986 #return doppler_2, err_2
987 987
988 988 return popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6], error[0], error[1], error[2], error[3], error[4], error[5], error[6]
989 989
990 990 def Tri_Marco(self,spc,freq,a1,b1,c1,a2,b2,c2,d):
991 991
992 992 from scipy.optimize import least_squares
993 993
994 994 freq_max = numpy.max(numpy.abs(freq))
995 995 spc_max = numpy.max(spc)
996 996
997 997 def tri_gaussian(x, a1, b1, c1, a2, b2, c2, a3, b3, c3, d):
998 998 z1 = (x-b1)/c1
999 999 z2 = (x-b2)/c2
1000 1000 z3 = (x-b3)/c3
1001 1001 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-z2**2/2) + a3 * numpy.exp(-z3**2/2) + d
1002 1002 return val
1003 1003
1004 1004 from scipy.signal import medfilt
1005 1005 Nincoh = 20
1006 1006 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1007 1007 c1 = abs(c1)
1008 1008 c2 = abs(c2)
1009 1009
1010 1010 # define a least squares function to optimize
1011 1011 def lsq_func(params):
1012 1012 return (spc-tri_gaussian(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7],params[8],params[9]))/spcm
1013 1013
1014 1014 # fit
1015 1015 bounds=([0,-numpy.inf,0,0,-numpy.inf,0,0,0,0,0],[numpy.inf,-100,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,600,numpy.inf,numpy.inf])
1016 1016
1017 1017 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,spc_max]
1018 1018 #print(a1,b1,c1,a2,b2,c2,d)
1019 1019 popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,a2/4,-b1,c1,d],x_scale=params_scale,bounds=bounds)
1020 1020
1021 1021 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1022 1022 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]
1023 1023 A3f = popt.x[6]; B3f = popt.x[7]; C3f = popt.x[8]
1024 1024 Df = popt.x[9]
1025 1025
1026 1026 return A1f, B1f, C1f, A2f, B2f, C2f, Df
1027 1027
1028 1028 def Tri_Marco(self,spc,freq,a1,b1,c1,a2,b2,c2,d):
1029 1029
1030 1030 from scipy.optimize import least_squares
1031 1031
1032 1032 freq_max = numpy.max(numpy.abs(freq))
1033 1033 spc_max = numpy.max(spc)
1034 1034
1035 1035 def duo_gaussian(x, a1, b1, c1, a2, b2, c2, d):
1036 1036 z1 = (x-b1)/c1
1037 1037 z2 = (x-b2)/c2
1038 1038 #z3 = (x-b3)/c3
1039 1039 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-z2**2/2) + d
1040 1040 return val
1041 1041
1042 1042 from scipy.signal import medfilt
1043 1043 Nincoh = 20
1044 1044 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1045 1045 c1 = abs(c1)
1046 1046 c2 = abs(c2)
1047 1047
1048 1048 # define a least squares function to optimize
1049 1049 def lsq_func(params):
1050 1050 return (spc-tri_gaussian(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))/spcm
1051 1051
1052 1052 # fit
1053 1053 bounds=([0,-numpy.inf,0,0,-numpy.inf,0,0],[numpy.inf,-100,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf])
1054 1054
1055 1055 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,spc_max]
1056 1056 popt = least_squares(lsq_func,[a1,b1,c1,a2,b2,c2,d],x_scale=params_scale,bounds=bounds)
1057 1057
1058 1058 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1059 1059 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]
1060 1060 #A3f = popt.x[6]; B3f = popt.x[7]; C3f = popt.x[8]
1061 1061 Df = popt.x[9]
1062 1062
1063 1063 return A1f, B1f, C1f, A2f, B2f, C2f, Df
1064 1064
1065 1065 def double_gaussian_skew(self,x, a1, b1, c1, a2, b2, c2, k2, d):
1066 1066 z1 = (x-b1)/c1
1067 1067 z2 = (x-b2)/c2
1068 1068 h2 = 1-k2*z2
1069 1069 h2[h2<0] = 0
1070 1070 y2 = -1/k2*numpy.log(h2)
1071 1071 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-y2**2/2)/(1-k2*z2) + d
1072 1072 return val
1073 1073
1074 1074 def gaussian(self, x, a, b, c, d):
1075 1075 z = (x-b)/c
1076 1076 val = a * numpy.exp(-z**2/2) + d
1077 1077 return val
1078 1078
1079 1079 def double_gaussian(self, x, a1, b1, c1, a2, b2, c2, d):
1080 1080 z1 = (x-b1)/c1
1081 1081 z2 = (x-b2)/c2
1082 1082 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-z2**2/2) + d
1083 1083 return val
1084 1084
1085 1085 def double_gaussian_double_skew(self,x, a1, b1, c1, k1, a2, b2, c2, k2, d):
1086 1086
1087 1087 z1 = (x-b1)/c1
1088 1088 h1 = 1-k1*z1
1089 1089 h1[h1<0] = 0
1090 1090 y1 = -1/k1*numpy.log(h1)
1091 1091
1092 1092 z2 = (x-b2)/c2
1093 1093 h2 = 1-k2*z2
1094 1094 h2[h2<0] = 0
1095 1095 y2 = -1/k2*numpy.log(h2)
1096 1096
1097 1097 val = a1 * numpy.exp(-y1**2/2)/(1-k1*z1) + a2 * numpy.exp(-y2**2/2)/(1-k2*z2) + d
1098 1098 return val
1099 1099
1100 1100 def gaussian_skew(self,x, a2, b2, c2, k2, d):
1101 1101 z2 = (x-b2)/c2
1102 1102 h2 = 1-k2*z2
1103 1103 h2[h2<0] = 0
1104 1104 y2 = -1/k2*numpy.log(h2)
1105 1105 val = a2 * numpy.exp(-y2**2/2)/(1-k2*z2) + d
1106 1106 return val
1107 1107
1108 1108 def triple_gaussian_skew(self,x, a1, b1, c1, a2, b2, c2, k2, a3, b3, c3, k3, d):
1109 1109 z1 = (x-b1)/c1
1110 1110 z2 = (x-b2)/c2
1111 1111 z3 = (x-b3)/c3
1112 1112 h2 = 1-k2*z2
1113 1113 h2[h2<0] = 0
1114 1114 y2 = -1/k2*numpy.log(h2)
1115 1115 h3 = 1-k3*z3
1116 1116 h3[h3<0] = 0
1117 1117 y3 = -1/k3*numpy.log(h3)
1118 1118 val = a1 * numpy.exp(-z1**2/2) + a2 * numpy.exp(-y2**2/2)/(1-k2*z2) + a3 * numpy.exp(-y3**2/2)/(1-k3*z3) + d
1119 1119 return val
1120 1120
1121 1121 def Double_Gauss_Skew_fit_weight_bound_no_inputs(self,spc,freq):
1122 1122
1123 1123 from scipy.optimize import least_squares
1124 1124
1125 1125 freq_max = numpy.max(numpy.abs(freq))
1126 1126 spc_max = numpy.max(spc)
1127 1127
1128 1128 from scipy.signal import medfilt
1129 1129 Nincoh = 20
1130 1130 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1131 1131
1132 1132 # define a least squares function to optimize
1133 1133 def lsq_func(params):
1134 1134 return (spc-self.double_gaussian_skew(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7]))/spcm
1135 1135
1136 1136 # fit
1137 1137 bounds=([0,-numpy.inf,0,0,-400,0,0,0],[numpy.inf,-340,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1138 1138
1139 1139 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,1,spc_max]
1140 1140 x0_value = numpy.array([spc_max,-400,30,spc_max/4,-200,150,1,1.0e7])
1141 1141 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1142 1142 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1143 1143 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]; K2f = popt.x[6]
1144 1144 Df = popt.x[7]
1145 1145
1146 1146 aux = self.gaussian_skew(freq, A2f, B2f, C2f, K2f, Df)
1147 1147 doppler = freq[numpy.argmax(aux)]
1148 1148
1149 1149 return A1f, B1f, C1f, A2f, B2f, C2f, K2f, Df, doppler
1150 1150
1151 1151 def Double_Gauss_Double_Skew_fit_weight_bound_no_inputs(self,spc,freq,Nincoh,hei):
1152 1152
1153 1153 from scipy.optimize import least_squares
1154 1154
1155 1155 freq_max = numpy.max(numpy.abs(freq))
1156 1156 spc_max = numpy.max(spc)
1157 1157
1158 1158 #from scipy.signal import medfilt
1159 1159 #Nincoh = 20
1160 1160 #Nincoh = 80
1161 1161 Nincoh = Nincoh
1162 1162 #spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1163 1163 spcm = spc/numpy.sqrt(Nincoh)
1164 1164
1165 1165 # define a least squares function to optimize
1166 1166 def lsq_func(params):
1167 1167 return (spc-self.double_gaussian_double_skew(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7],params[8]))/spcm
1168 1168
1169 1169 # fit
1170 1170 bounds=([0,-numpy.inf,0,-5,0,-400,0,0,0],[numpy.inf,-200,numpy.inf,5,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1171 1171
1172 1172 params_scale = [spc_max,freq_max,freq_max,1,spc_max,freq_max,freq_max,1,spc_max]
1173 1173
1174 1174 dop1_x0 = freq[numpy.argmax(spc)]
1175 1175 if dop1_x0 < 0:
1176 1176 dop2_x0 = dop1_x0 + 100
1177 1177 if dop1_x0 > 0:
1178 1178 dop2_x0 = dop1_x0 - 100
1179 1179
1180 1180 x0_value = numpy.array([spc_max,dop1_x0,30,-.1,spc_max/4, dop2_x0,150,1,1.0e7])
1181 1181 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1182 1182 J = popt.jac
1183 1183
1184 1184 try:
1185 1185 cov = numpy.linalg.inv(J.T.dot(J))
1186 1186 error = numpy.sqrt(numpy.diagonal(cov))
1187 1187 except:
1188 1188 error = numpy.ones((9))*numpy.NAN
1189 1189
1190 1190 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]; K1f = popt.x[3]
1191 1191 A2f = popt.x[4]; B2f = popt.x[5]; C2f = popt.x[6]; K2f = popt.x[7]
1192 1192 Df = popt.x[8]
1193 1193 aux1 = self.gaussian_skew(freq, A1f, B1f, C1f, K1f, Df)
1194 1194 doppler1 = freq[numpy.argmax(aux1)]
1195 1195
1196 1196 aux2 = self.gaussian_skew(freq, A2f, B2f, C2f, K2f, Df)
1197 1197 doppler2 = freq[numpy.argmax(aux2)]
1198 1198 #print("error",error)
1199 1199 #exit(1)
1200 1200
1201 1201
1202 1202 return A1f, B1f, C1f, K1f, A2f, B2f, C2f, K2f, Df, doppler1, doppler2, error
1203 1203
1204 1204 def Double_Gauss_fit_weight_bound_no_inputs(self,spc,freq,Nincoh):
1205 1205
1206 1206 from scipy.optimize import least_squares
1207 1207
1208 1208 freq_max = numpy.max(numpy.abs(freq))
1209 1209 spc_max = numpy.max(spc)
1210 1210
1211 1211 from scipy.signal import medfilt
1212 1212 Nincoh = 20
1213 1213 Nincoh = 80
1214 1214 Nincoh = Nincoh
1215 1215 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1216 1216
1217 1217 # define a least squares function to optimize
1218 1218 def lsq_func(params):
1219 1219 return (spc-self.double_gaussian(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))/spcm
1220 1220
1221 1221 # fit
1222 1222 # bounds=([0,-460,0,0,-400,120,0],[numpy.inf,-340,50,numpy.inf,0,250,numpy.inf])
1223 1223 # bounds=([0,-numpy.inf,0,0,-numpy.inf,0,-numpy.inf,0],[numpy.inf,-200,numpy.inf,numpy.inf,0,numpy.inf,0,numpy.inf])
1224 1224 #print(a1,b1,c1,a2,b2,c2,k2,d)
1225 1225
1226 1226 dop1_x0 = freq[numpy.argmax(spcm)]
1227 1227
1228 1228 bounds=([0,-numpy.inf,0,0,dop1_x0-50,0,0],[numpy.inf,-300,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf])
1229 1229 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,spc_max]
1230 1230 x0_value = numpy.array([spc_max,-400.5,30,spc_max/4,dop1_x0,150,1.0e7])
1231 1231 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1232 1232 J = popt.jac
1233 1233
1234 1234 try:
1235 1235 cov = numpy.linalg.inv(J.T.dot(J))
1236 1236 error = numpy.sqrt(numpy.diagonal(cov))
1237 1237 except:
1238 1238 error = numpy.ones((7))*numpy.NAN
1239 1239
1240 1240 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1241 1241 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]
1242 1242 Df = popt.x[6]
1243 1243 return A1f, B1f, C1f, A2f, B2f, C2f, Df, error
1244 1244
1245 1245 def Double_Gauss_Double_Skew_fit_weight_bound_with_inputs(self, spc, freq, a1, b1, c1, a2, b2, c2, k2, d):
1246 1246
1247 1247 from scipy.optimize import least_squares
1248 1248
1249 1249 freq_max = numpy.max(numpy.abs(freq))
1250 1250 spc_max = numpy.max(spc)
1251 1251
1252 1252 from scipy.signal import medfilt
1253 1253 Nincoh = dataOut.nIncohInt
1254 1254 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1255 1255
1256 1256 # define a least squares function to optimize
1257 1257 def lsq_func(params):
1258 1258 return (spc-self.double_gaussian_double_skew(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7],params[8]))/spcm
1259 1259
1260 1260
1261 1261 bounds=([0,-numpy.inf,0,-numpy.inf,0,-400,0,0,0],[numpy.inf,-340,numpy.inf,0,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf])
1262 1262
1263 1263 params_scale = [spc_max,freq_max,freq_max,1,spc_max,freq_max,freq_max,1,spc_max]
1264 1264
1265 1265 x0_value = numpy.array([a1,b1,c1,-.1,a2,b2,c2,k2,d])
1266 1266
1267 1267 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1268 1268
1269 1269 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]; K1f = popt.x[3]
1270 1270 A2f = popt.x[4]; B2f = popt.x[5]; C2f = popt.x[6]; K2f = popt.x[7]
1271 1271 Df = popt.x[8]
1272 1272
1273 1273 aux = self.gaussian_skew(freq, A2f, B2f, C2f, K2f, Df)
1274 1274 doppler = x[numpy.argmax(aux)]
1275 1275
1276 1276 return A1f, B1f, C1f, K1f, A2f, B2f, C2f, K2f, Df, doppler
1277 1277
1278 1278 def Triple_Gauss_Skew_fit_weight_bound_no_inputs(self,spc,freq):
1279 1279
1280 1280 from scipy.optimize import least_squares
1281 1281
1282 1282 freq_max = numpy.max(numpy.abs(freq))
1283 1283 spc_max = numpy.max(spc)
1284 1284
1285 1285 from scipy.signal import medfilt
1286 1286 Nincoh = 20
1287 1287 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1288 1288
1289 1289 # define a least squares function to optimize
1290 1290 def lsq_func(params):
1291 1291 return (spc-self.triple_gaussian_skew(freq,params[0],params[1],params[2],params[3],params[4],params[5],params[6],params[7],params[8],params[9],params[10],params[11]))/spcm
1292 1292
1293 1293 # fit
1294 1294 bounds=([0,-numpy.inf,0,0,-400,0,0,0,0,0,0,0],[numpy.inf,-340,numpy.inf,numpy.inf,0,numpy.inf,numpy.inf,numpy.inf,numpy.inf,numpy.inf,numpy.inf,numpy.inf])
1295 1295
1296 1296 params_scale = [spc_max,freq_max,freq_max,spc_max,freq_max,freq_max,1,spc_max,freq_max,freq_max,1,spc_max]
1297 1297 x0_value = numpy.array([spc_max,-400,30,spc_max/4,-200,150,1,spc_max/4,400,150,1,1.0e7])
1298 1298 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1299 1299
1300 1300 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1301 1301 A2f = popt.x[3]; B2f = popt.x[4]; C2f = popt.x[5]; K2f = popt.x[6]
1302 1302 A3f = popt.x[7]; B3f = popt.x[8]; C3f = popt.x[9]; K3f = popt.x[10]
1303 1303 Df = popt.x[11]
1304 1304
1305 1305 aux = self.gaussian_skew(freq, A2f, B2f, C2f, K2f, Df)
1306 1306 doppler = freq[numpy.argmax(aux)]
1307 1307
1308 1308 return A1f, B1f, C1f, A2f, B2f, C2f, K2f, A3f, B3f, C3f, K3f, Df, doppler
1309 1309
1310 1310 def CEEJ_Skew_fit_weight_bound_no_inputs(self,spc,freq,Nincoh):
1311 1311
1312 1312 from scipy.optimize import least_squares
1313 1313
1314 1314 freq_max = numpy.max(numpy.abs(freq))
1315 1315 spc_max = numpy.max(spc)
1316 1316
1317 1317 from scipy.signal import medfilt
1318 1318 Nincoh = 20
1319 1319 Nincoh = 80
1320 1320 Nincoh = Nincoh
1321 1321 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1322 1322
1323 1323 # define a least squares function to optimize
1324 1324 def lsq_func(params):
1325 1325 return (spc-self.gaussian_skew(freq,params[0],params[1],params[2],params[3],params[4]))#/spcm
1326 1326
1327 1327
1328 1328 bounds=([0,0,0,-numpy.inf,0],[numpy.inf,numpy.inf,numpy.inf,0,numpy.inf])
1329 1329
1330 1330 params_scale = [spc_max,freq_max,freq_max,1,spc_max]
1331 1331
1332 1332 x0_value = numpy.array([spc_max,freq[numpy.argmax(spc)],30,-.1,numpy.mean(spc[:50])])
1333 1333
1334 1334 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1335 1335
1336 1336 J = popt.jac
1337 1337
1338 1338 try:
1339 1339 error = numpy.ones((9))*numpy.NAN
1340 1340 cov = numpy.linalg.inv(J.T.dot(J))
1341 1341 error[:4] = numpy.sqrt(numpy.diagonal(cov))[:4]
1342 1342 error[-1] = numpy.sqrt(numpy.diagonal(cov))[-1]
1343 1343 except:
1344 1344 error = numpy.ones((9))*numpy.NAN
1345 1345
1346 1346 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]; K1f = popt.x[3]
1347 1347 Df = popt.x[4]
1348 1348
1349 1349 aux1 = self.gaussian_skew(freq, A1f, B1f, C1f, K1f, Df)
1350 1350 doppler1 = freq[numpy.argmax(aux1)]
1351 1351 #print("CEEJ ERROR:",error)
1352 1352
1353 1353 return A1f, B1f, C1f, K1f, numpy.NAN, numpy.NAN, numpy.NAN, numpy.NAN, Df, doppler1, numpy.NAN, error
1354 1354
1355 1355 def CEEJ_fit_weight_bound_no_inputs(self,spc,freq,Nincoh):
1356 1356
1357 1357 from scipy.optimize import least_squares
1358 1358
1359 1359 freq_max = numpy.max(numpy.abs(freq))
1360 1360 spc_max = numpy.max(spc)
1361 1361
1362 1362 from scipy.signal import medfilt
1363 1363 Nincoh = 20
1364 1364 Nincoh = 80
1365 1365 Nincoh = Nincoh
1366 1366 spcm = medfilt(spc,11)/numpy.sqrt(Nincoh)
1367 1367
1368 1368 # define a least squares function to optimize
1369 1369 def lsq_func(params):
1370 1370 return (spc-self.gaussian(freq,params[0],params[1],params[2],params[3]))#/spcm
1371 1371
1372 1372
1373 1373 bounds=([0,0,0,0],[numpy.inf,numpy.inf,numpy.inf,numpy.inf])
1374 1374
1375 1375 params_scale = [spc_max,freq_max,freq_max,spc_max]
1376 1376
1377 1377 x0_value = numpy.array([spc_max,freq[numpy.argmax(spcm)],30,numpy.mean(spc[:50])])
1378 1378
1379 1379 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1380 1380
1381 1381 J = popt.jac
1382 1382
1383 1383 try:
1384 1384 error = numpy.ones((4))*numpy.NAN
1385 1385 cov = numpy.linalg.inv(J.T.dot(J))
1386 1386 error = numpy.sqrt(numpy.diagonal(cov))
1387 1387 except:
1388 1388 error = numpy.ones((4))*numpy.NAN
1389 1389
1390 1390 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1391 1391 Df = popt.x[3]
1392 1392
1393 1393 return A1f, B1f, C1f, Df, error
1394 1394
1395 1395 def Simple_fit_bound(self,spc,freq,Nincoh):
1396 1396
1397 1397 freq_max = numpy.max(numpy.abs(freq))
1398 1398 spc_max = numpy.max(spc)
1399 1399
1400 1400 Nincoh = Nincoh
1401 1401
1402 1402 def lsq_func(params):
1403 1403 return (spc-self.gaussian(freq,params[0],params[1],params[2],params[3]))
1404 1404
1405 1405 bounds=([0,-50,0,0],[numpy.inf,+50,numpy.inf,numpy.inf])
1406 1406
1407 1407 params_scale = [spc_max,freq_max,freq_max,spc_max]
1408 1408
1409 1409 x0_value = numpy.array([spc_max,-20.5,5,1.0e7])
1410 1410
1411 1411 popt = least_squares(lsq_func,x0=x0_value,x_scale=params_scale,bounds=bounds,verbose=0)
1412 1412
1413 1413 J = popt.jac
1414 1414
1415 1415 try:
1416 1416 cov = numpy.linalg.inv(J.T.dot(J))
1417 1417 error = numpy.sqrt(numpy.diagonal(cov))
1418 1418 except:
1419 1419 error = numpy.ones((4))*numpy.NAN
1420 1420
1421 1421 A1f = popt.x[0]; B1f = popt.x[1]; C1f = popt.x[2]
1422 1422 Df = popt.x[3]
1423 1423
1424 1424 return A1f, B1f, C1f, Df, error
1425 1425
1426 1426 def clean_outliers(self,param):
1427 1427
1428 1428 threshold = 700
1429 1429
1430 1430 param = numpy.where(param < -threshold, numpy.nan, param)
1431 1431 param = numpy.where(param > +threshold, numpy.nan, param)
1432 1432
1433 1433 return param
1434 1434
1435 1435 def windowing_single(self,spc,x,A,B,C,D,nFFTPoints):
1436 1436 from scipy.optimize import curve_fit,fmin
1437 1437
1438 1438 def R_gaussian(x, a, b, c):
1439 1439 N = int(numpy.shape(x)[0])
1440 1440 val = a * numpy.exp(-((x)*c*2*2*numpy.pi)**2 / (2))* numpy.exp(1.j*b*x*4*numpy.pi)
1441 1441 return val
1442 1442
1443 1443 def T(x,N):
1444 1444 T = 1-abs(x)/N
1445 1445 return T
1446 1446
1447 1447 def R_T_spc_fun(x, a, b, c, d, nFFTPoints):
1448 1448
1449 1449 N = int(numpy.shape(x)[0])
1450 1450
1451 1451 x_max = x[-1]
1452 1452
1453 1453 x_pos = x[int(nFFTPoints/2):]
1454 1454 x_neg = x[:int(nFFTPoints/2)]
1455 1455
1456 1456 R_T_neg_1 = R_gaussian(x, a, b, c)[:int(nFFTPoints/2)]*T(x_neg,-x[0])
1457 1457 R_T_pos_1 = R_gaussian(x, a, b, c)[int(nFFTPoints/2):]*T(x_pos,x[-1])
1458 1458 R_T_sum_1 = R_T_pos_1 + R_T_neg_1
1459 1459 R_T_spc_1 = numpy.fft.fft(R_T_sum_1).real
1460 1460 R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1)
1461 1461 max_val_1 = numpy.max(R_T_spc_1)
1462 1462 R_T_spc_1 = R_T_spc_1*a/max_val_1
1463 1463
1464 1464 R_T_d = d*numpy.fft.fftshift(signal.unit_impulse(N))
1465 1465 R_T_d_neg = R_T_d[:int(nFFTPoints/2)]*T(x_neg,-x[0])
1466 1466 R_T_d_pos = R_T_d[int(nFFTPoints/2):]*T(x_pos,x[-1])
1467 1467 R_T_d_sum = R_T_d_pos + R_T_d_neg
1468 1468 R_T_spc_3 = numpy.fft.fft(R_T_d_sum).real
1469 1469 R_T_spc_3 = numpy.fft.fftshift(R_T_spc_3)
1470 1470
1471 1471 R_T_final = R_T_spc_1 + R_T_spc_3
1472 1472
1473 1473 return R_T_final
1474 1474
1475 1475 y = spc#gaussian(x, a, meanY, sigmaY) + a*0.1*numpy.random.normal(0, 1, size=len(x))
1476 1476
1477 1477 from scipy.stats import norm
1478 1478 mean,std=norm.fit(spc)
1479 1479
1480 1480 # estimate starting values from the data
1481 1481 a = A
1482 1482 b = B
1483 1483 c = C#numpy.std(spc)
1484 1484 d = D
1485 1485 '''
1486 1486 ippSeconds = 250*20*1.e-6/3
1487 1487
1488 1488 x_t = ippSeconds * (numpy.arange(1600) -1600 / 2.)
1489 1489
1490 1490 x_t = numpy.linspace(x_t[0],x_t[-1],3200)
1491 1491
1492 1492 x_freq = numpy.fft.fftfreq(1600,d=ippSeconds)
1493 1493 x_freq = numpy.fft.fftshift(x_freq)
1494 1494 '''
1495 1495 # define a least squares function to optimize
1496 1496 def minfunc(params):
1497 1497 return sum((y-R_T_spc_fun(x,params[0],params[1],params[2],params[3],params[4],params[5],params[6]))**2/1)#y**2)
1498 1498
1499 1499 # fit
1500 1500
1501 1501 popt_full = fmin(minfunc,[a,b,c,d],full_output=True)
1502 1502 #print("nIter", popt_full[2])
1503 1503 popt = popt_full[0]
1504 1504
1505 1505 #return R_T_spc_fun(x_t,popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
1506 1506 return popt[0], popt[1], popt[2], popt[3]
1507 1507
1508 1508 def run(self, dataOut, mode = 0, Hmin1 = None, Hmax1 = None, Hmin2 = None, Hmax2 = None, Dop = 'Shift'):
1509 1509
1510 1510 pwcode = 1
1511 1511
1512 1512 if dataOut.flagDecodeData:
1513 1513 pwcode = numpy.sum(dataOut.code[0]**2)
1514 1514 #normFactor = min(self.nFFTPoints,self.nProfiles)*self.nIncohInt*self.nCohInt*pwcode*self.windowOfFilter
1515 1515 normFactor = dataOut.nProfiles * dataOut.nIncohInt * dataOut.nCohInt * pwcode * dataOut.windowOfFilter
1516 1516 factor = normFactor
1517 1517 z = dataOut.data_spc / factor
1518 1518 z = numpy.where(numpy.isfinite(z), z, numpy.NAN)
1519 1519 dataOut.power = numpy.average(z, axis=1)
1520 1520 dataOut.powerdB = 10 * numpy.log10(dataOut.power)
1521 1521
1522 1522 x = dataOut.getVelRange(0)
1523 1523
1524 1524 dataOut.Oblique_params = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
1525 1525 dataOut.Oblique_param_errors = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
1526 1526 dataOut.dplr_2_u = numpy.ones((1,1,dataOut.nHeights))*numpy.NAN
1527 1527
1528 1528 if mode == 6:
1529 1529 dataOut.Oblique_params = numpy.ones((1,9,dataOut.nHeights))*numpy.NAN
1530 1530 elif mode == 7:
1531 1531 dataOut.Oblique_params = numpy.ones((1,13,dataOut.nHeights))*numpy.NAN
1532 1532 elif mode == 8:
1533 1533 dataOut.Oblique_params = numpy.ones((1,10,dataOut.nHeights))*numpy.NAN
1534 1534 elif mode == 9:
1535 1535 dataOut.Oblique_params = numpy.ones((1,11,dataOut.nHeights))*numpy.NAN
1536 1536 dataOut.Oblique_param_errors = numpy.ones((1,9,dataOut.nHeights))*numpy.NAN
1537 1537 elif mode == 11:
1538 1538 dataOut.Oblique_params = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
1539 1539 dataOut.Oblique_param_errors = numpy.ones((1,7,dataOut.nHeights))*numpy.NAN
1540 1540 elif mode == 10: #150 km
1541 1541 dataOut.Oblique_params = numpy.ones((1,4,dataOut.nHeights))*numpy.NAN
1542 1542 dataOut.Oblique_param_errors = numpy.ones((1,4,dataOut.nHeights))*numpy.NAN
1543 1543 dataOut.snr_log10 = numpy.ones((1,dataOut.nHeights))*numpy.NAN
1544 1544
1545 1545 dataOut.VelRange = x
1546 1546
1547 1547
1548 1548
1549 1549 #l1=range(22,36) #+62
1550 1550 #l1=range(32,36)
1551 1551 #l2=range(58,99) #+62
1552 1552
1553 1553 #if Hmin1 == None or Hmax1 == None or Hmin2 == None or Hmax2 == None:
1554 1554
1555 1555 minHei1 = 105.
1556 1556 maxHei1 = 122.5
1557 1557 maxHei1 = 130.5
1558 1558
1559 1559 if mode == 10: #150 km
1560 1560 minHei1 = 100
1561 1561 maxHei1 = 100
1562 1562
1563 1563 inda1 = numpy.where(dataOut.heightList >= minHei1)
1564 1564 indb1 = numpy.where(dataOut.heightList <= maxHei1)
1565 1565
1566 1566 minIndex1 = inda1[0][0]
1567 1567 maxIndex1 = indb1[0][-1]
1568 1568
1569 1569 minHei2 = 150.
1570 1570 maxHei2 = 201.25
1571 1571 maxHei2 = 225.3
1572 1572
1573 1573 if mode == 10: #150 km
1574 1574 minHei2 = 110
1575 1575 maxHei2 = 165
1576 1576
1577 1577 inda2 = numpy.where(dataOut.heightList >= minHei2)
1578 1578 indb2 = numpy.where(dataOut.heightList <= maxHei2)
1579 1579
1580 1580 minIndex2 = inda2[0][0]
1581 1581 maxIndex2 = indb2[0][-1]
1582 1582
1583 1583 l1=range(minIndex1,maxIndex1)
1584 1584 l2=range(minIndex2,maxIndex2)
1585 1585
1586 1586 if mode == 4:
1587 1587 '''
1588 1588 for ind in range(dataOut.nHeights):
1589 1589 if(dataOut.heightList[ind]>=168 and dataOut.heightList[ind]<188):
1590 1590 try:
1591 1591 dataOut.Oblique_params[0,0,ind],dataOut.Oblique_params[0,1,ind],dataOut.Oblique_params[0,2,ind],dataOut.Oblique_params[0,3,ind],dataOut.Oblique_params[0,4,ind],dataOut.Oblique_params[0,5,ind],dataOut.Oblique_params[0,6,ind],dataOut.Oblique_param_errors[0,0,ind],dataOut.Oblique_param_errors[0,1,ind],dataOut.Oblique_param_errors[0,2,ind],dataOut.Oblique_param_errors[0,3,ind],dataOut.Oblique_param_errors[0,4,ind],dataOut.Oblique_param_errors[0,5,ind],dataOut.Oblique_param_errors[0,6,ind] = self.DH_mode(dataOut.data_spc[0,:,ind],dataOut.VelRange)
1592 1592 except:
1593 1593 pass
1594 1594 '''
1595 1595 for ind in itertools.chain(l1, l2):
1596 1596
1597 1597 try:
1598 1598 dataOut.Oblique_params[0,0,ind],dataOut.Oblique_params[0,1,ind],dataOut.Oblique_params[0,2,ind],dataOut.Oblique_params[0,3,ind],dataOut.Oblique_params[0,4,ind],dataOut.Oblique_params[0,5,ind],dataOut.Oblique_params[0,6,ind],dataOut.Oblique_param_errors[0,0,ind],dataOut.Oblique_param_errors[0,1,ind],dataOut.Oblique_param_errors[0,2,ind],dataOut.Oblique_param_errors[0,3,ind],dataOut.Oblique_param_errors[0,4,ind],dataOut.Oblique_param_errors[0,5,ind],dataOut.Oblique_param_errors[0,6,ind] = self.DH_mode(dataOut.data_spc[0,:,ind],dataOut.VelRange)
1599 1599 dataOut.dplr_2_u[0,0,ind] = dataOut.Oblique_params[0,4,ind]/numpy.sin(numpy.arccos(102/dataOut.heightList[ind]))
1600 1600 except:
1601 1601 pass
1602 1602
1603 1603 else:
1604 1604 for hei in itertools.chain(l1, l2):
1605 1605 if numpy.isnan(dataOut.snl[0,hei]) or dataOut.snl[0,hei]<.0:
1606 1606
1607 1607 continue #Avoids the analysis when there is only noise
1608 1608
1609 1609 try:
1610 1610 spc = dataOut.data_spc[0,:,hei]
1611 1611
1612 1612 if mode == 6: #Skew Weighted Bounded
1613 1613 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei] = self.Double_Gauss_Skew_fit_weight_bound_no_inputs(spc,x)
1614 1614 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,8,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1615 1615
1616 1616 elif mode == 7: #Triple Skew Weighted Bounded
1617 1617 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei],dataOut.Oblique_params[0,10,hei],dataOut.Oblique_params[0,11,hei],dataOut.Oblique_params[0,12,hei] = self.Triple_Gauss_Skew_fit_weight_bound_no_inputs(spc,x)
1618 1618 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,12,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1619 1619
1620 1620 elif mode == 8: #Double Skewed Weighted Bounded with inputs
1621 1621 a1, b1, c1, a2, b2, c2, k2, d, dopp = self.Double_Gauss_Skew_fit_weight_bound_no_inputs(spc,x)
1622 1622 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei] = self.Double_Gauss_Skew_fit_weight_bound_no_inputs(spc,x, a1, b1, c1, a2, b2, c2, k2, d)
1623 1623 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,9,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1624 1624
1625 1625 elif mode == 9: #Double Skewed Weighted Bounded no inputs
1626 1626 #if numpy.max(spc) <= 0:
1627 1627 from scipy.signal import medfilt
1628 1628 spcm = medfilt(spc,11)
1629 1629 if x[numpy.argmax(spcm)] <= 0:
1630 1630 #print("EEJ", dataOut.heightList[hei], hei)
1631 1631 #if hei != 70:
1632 1632 #continue
1633 1633 #else:
1634 1634 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei],dataOut.Oblique_params[0,10,hei],dataOut.Oblique_param_errors[0,:,hei] = self.Double_Gauss_Double_Skew_fit_weight_bound_no_inputs(spcm,x,dataOut.nIncohInt,dataOut.heightList[hei])
1635 1635 #if dataOut.Oblique_params[0,-2,hei] < -500 or dataOut.Oblique_params[0,-2,hei] > 500 or dataOut.Oblique_params[0,-1,hei] < -500 or dataOut.Oblique_params[0,-1,hei] > 500:
1636 1636 # dataOut.Oblique_params[0,:,hei] *= numpy.NAN
1637 1637 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,10,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1638 1638
1639 1639 else:
1640 1640 #print("CEEJ")
1641 1641 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_params[0,7,hei],dataOut.Oblique_params[0,8,hei],dataOut.Oblique_params[0,9,hei],dataOut.Oblique_params[0,10,hei],dataOut.Oblique_param_errors[0,:,hei] = self.CEEJ_Skew_fit_weight_bound_no_inputs(spcm,x,dataOut.nIncohInt)
1642 1642 #if dataOut.Oblique_params[0,-2,hei] < -500 or dataOut.Oblique_params[0,-2,hei] > 500 or dataOut.Oblique_params[0,-1,hei] < -500 or dataOut.Oblique_params[0,-1,hei] > 500:
1643 1643 # dataOut.Oblique_params[0,:,hei] *= numpy.NAN
1644 1644 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,10,hei]/numpy.sin(numpy.arccos(100./dataOut.heightList[hei]))
1645 1645 elif mode == 11: #Double Weighted Bounded no inputs
1646 1646 #if numpy.max(spc) <= 0:
1647 1647 from scipy.signal import medfilt
1648 1648 spcm = medfilt(spc,11)
1649 1649
1650 1650 if x[numpy.argmax(spcm)] <= 0:
1651 1651 #print("EEJ")
1652 1652 #print("EEJ",dataOut.heightList[hei])
1653 1653 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_param_errors[0,:,hei] = self.Double_Gauss_fit_weight_bound_no_inputs(spc,x,dataOut.nIncohInt)
1654 1654 #if dataOut.Oblique_params[0,-2,hei] < -500 or dataOut.Oblique_params[0,-2,hei] > 500 or dataOut.Oblique_params[0,-1,hei] < -500 or dataOut.Oblique_params[0,-1,hei] > 500:
1655 1655 # dataOut.Oblique_params[0,:,hei] *= numpy.NAN
1656 1656 else:
1657 1657 #print("CEEJ",dataOut.heightList[hei])
1658 1658 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_param_errors[0,:,hei] = self.CEEJ_fit_weight_bound_no_inputs(spc,x,dataOut.nIncohInt)
1659 1659
1660 1660 elif mode == 10: #150km
1661 1661 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_param_errors[0,:,hei] = self.Simple_fit_bound(spc,x,dataOut.nIncohInt)
1662 1662 snr = (dataOut.power[0,hei]*factor - dataOut.Oblique_params[0,3,hei])/dataOut.Oblique_params[0,3,hei]
1663 1663 dataOut.snr_log10[0,hei] = numpy.log10(snr)
1664 1664
1665 1665 else:
1666 1666 spc_fit, A1, B1, C1, D1 = self.Gauss_fit_2(spc,x,'first')
1667 1667
1668 1668 spc_diff = spc - spc_fit
1669 1669 spc_diff[spc_diff < 0] = 0
1670 1670
1671 1671 spc_fit_diff, A2, B2, C2, D2 = self.Gauss_fit_2(spc_diff,x,'second')
1672 1672
1673 1673 D = (D1+D2)
1674 1674
1675 1675 if mode == 0: #Double Fit
1676 1676 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei],dataOut.Oblique_param_errors[0,0,hei],dataOut.Oblique_param_errors[0,1,hei],dataOut.Oblique_param_errors[0,2,hei],dataOut.Oblique_param_errors[0,3,hei],dataOut.Oblique_param_errors[0,4,hei],dataOut.Oblique_param_errors[0,5,hei],dataOut.Oblique_param_errors[0,6,hei] = self.Double_Gauss_fit_2(spc,x,A1,B1,C1,A2,B2,C2,D)
1677 1677 #spc_double_fit,dataOut.Oblique_params = self.Double_Gauss_fit(spc,x,A1,B1,C1,A2,B2,C2,D)
1678 1678
1679 1679 elif mode == 1: #Double Fit Windowed
1680 1680 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei] = self.windowing_double(spc,dataOut.getFreqRange(0),A1,B1,C1,A2,B2,C2,D)
1681 1681
1682 1682 elif mode == 2: #Double Fit Weight
1683 1683 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei] = self.Double_Gauss_fit_weight(spc,x,A1,B1,C1,A2,B2,C2,D)
1684 1684
1685 1685 elif mode == 3: #Simple Fit
1686 1686 dataOut.Oblique_params[0,0,hei] = A1
1687 1687 dataOut.Oblique_params[0,1,hei] = B1
1688 1688 dataOut.Oblique_params[0,2,hei] = C1
1689 1689 dataOut.Oblique_params[0,3,hei] = A2
1690 1690 dataOut.Oblique_params[0,4,hei] = B2
1691 1691 dataOut.Oblique_params[0,5,hei] = C2
1692 1692 dataOut.Oblique_params[0,6,hei] = D
1693 1693
1694 1694 elif mode == 5: #Triple Fit Weight
1695 1695 if hei in l1:
1696 1696 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei] = self.duo_Marco(spc,x,A1,B1,C1,A2,B2,C2,D)
1697 1697 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,4,hei]/numpy.sin(numpy.arccos(102/dataOut.heightList[hei]))
1698 1698 #print(dataOut.Oblique_params[0,0,hei])
1699 1699 #print(dataOut.dplr_2_u[0,0,hei])
1700 1700 else:
1701 1701 dataOut.Oblique_params[0,0,hei],dataOut.Oblique_params[0,1,hei],dataOut.Oblique_params[0,2,hei],dataOut.Oblique_params[0,3,hei],dataOut.Oblique_params[0,4,hei],dataOut.Oblique_params[0,5,hei],dataOut.Oblique_params[0,6,hei] = self.Double_Gauss_fit_weight(spc,x,A1,B1,C1,A2,B2,C2,D)
1702 1702 dataOut.dplr_2_u[0,0,hei] = dataOut.Oblique_params[0,4,hei]/numpy.sin(numpy.arccos(102/dataOut.heightList[hei]))
1703 1703
1704 1704
1705 1705 except:
1706 1706 ###dataOut.Oblique_params[0,:,hei] = dataOut.Oblique_params[0,:,hei]*numpy.NAN
1707 1707 pass
1708 1708
1709 1709 #exit(1)
1710 1710 dataOut.paramInterval = dataOut.nProfiles*dataOut.nCohInt*dataOut.ippSeconds
1711 1711 dataOut.lat=-11.95
1712 1712 dataOut.lon=-76.87
1713 1713 '''
1714 1714 dataOut.Oblique_params = numpy.where(dataOut.Oblique_params<-700, numpy.nan, dop_t1)
1715 1715 dataOut.Oblique_params = numpy.where(dataOut.Oblique_params<+700, numpy.nan, dop_t1)
1716 1716 AquΓ­ debo exceptuar las amplitudes
1717 1717 '''
1718 1718 if mode == 9: #Double Skew Gaussian
1719 1719 #dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,-2,:] #Pos[Max_value]
1720 1720 #dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,1,:] #Shift
1721 1721 dataOut.Spec_W_T1 = dataOut.Oblique_params[:,2,:]
1722 1722 #dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,-1,:] #Pos[Max_value]
1723 1723 #dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,5,:] #Shift
1724 1724 dataOut.Spec_W_T2 = dataOut.Oblique_params[:,6,:]
1725 1725 if Dop == 'Shift':
1726 1726 dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,1,:] #Shift
1727 1727 dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,5,:] #Shift
1728 1728 elif Dop == 'Max':
1729 1729 dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,-2,:] #Pos[Max_value]
1730 1730 dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,-1,:] #Pos[Max_value]
1731 1731
1732 1732 dataOut.Err_Dop_EEJ_T1 = dataOut.Oblique_param_errors[:,1,:] #En realidad este es el error?
1733 1733 dataOut.Err_Spec_W_T1 = dataOut.Oblique_param_errors[:,2,:]
1734 1734 dataOut.Err_Dop_EEJ_T2 = dataOut.Oblique_param_errors[:,5,:] #En realidad este es el error?
1735 1735 dataOut.Err_Spec_W_T2 = dataOut.Oblique_param_errors[:,6,:]
1736 1736
1737 1737 elif mode == 11: #Double Gaussian
1738 1738 dataOut.Dop_EEJ_T1 = dataOut.Oblique_params[:,1,:]
1739 1739 dataOut.Spec_W_T1 = dataOut.Oblique_params[:,2,:]
1740 1740 dataOut.Dop_EEJ_T2 = dataOut.Oblique_params[:,4,:]
1741 1741 dataOut.Spec_W_T2 = dataOut.Oblique_params[:,5,:]
1742 1742
1743 1743 dataOut.Err_Dop_EEJ_T1 = dataOut.Oblique_param_errors[:,1,:]
1744 1744 dataOut.Err_Spec_W_T1 = dataOut.Oblique_param_errors[:,2,:]
1745 1745 dataOut.Err_Dop_EEJ_T2 = dataOut.Oblique_param_errors[:,4,:]
1746 1746 dataOut.Err_Spec_W_T2 = dataOut.Oblique_param_errors[:,5,:]
1747 1747
1748 1748 #print("Before: ", dataOut.Dop_EEJ_T2)
1749 1749 dataOut.Spec_W_T1 = self.clean_outliers(dataOut.Spec_W_T1)
1750 1750 dataOut.Spec_W_T2 = self.clean_outliers(dataOut.Spec_W_T2)
1751 1751 dataOut.Dop_EEJ_T1 = self.clean_outliers(dataOut.Dop_EEJ_T1)
1752 1752 dataOut.Dop_EEJ_T2 = self.clean_outliers(dataOut.Dop_EEJ_T2)
1753 1753 #print("After: ", dataOut.Dop_EEJ_T2)
1754 1754 dataOut.Err_Spec_W_T1 = self.clean_outliers(dataOut.Err_Spec_W_T1)
1755 1755 dataOut.Err_Spec_W_T2 = self.clean_outliers(dataOut.Err_Spec_W_T2)
1756 1756 dataOut.Err_Dop_EEJ_T1 = self.clean_outliers(dataOut.Err_Dop_EEJ_T1)
1757 1757 dataOut.Err_Dop_EEJ_T2 = self.clean_outliers(dataOut.Err_Dop_EEJ_T2)
1758 1758 #print("Before data_snr: ", dataOut.data_snr)
1759 1759 #dataOut.data_snr = numpy.where(numpy.isnan(dataOut.Dop_EEJ_T1), numpy.nan, dataOut.data_snr)
1760 1760 dataOut.snl = numpy.where(numpy.isnan(dataOut.Dop_EEJ_T1), numpy.nan, dataOut.snl)
1761 1761
1762 1762 #print("After data_snr: ", dataOut.data_snr)
1763 1763 dataOut.mode = mode
1764 1764 dataOut.flagNoData = numpy.all(numpy.isnan(dataOut.Dop_EEJ_T1)) #Si todos los valores son NaN no se prosigue
1765 1765 ###dataOut.flagNoData = False #Descomentar solo para ploteo sino mantener comentado (para guardado)
1766 1766
1767 1767 return dataOut
1768 1768
1769 1769 class Gaussian_Windowed(Operation):
1770 1770 '''
1771 1771 Written by R. Flores
1772 1772 '''
1773 1773 def __init__(self):
1774 1774 Operation.__init__(self)
1775 1775
1776 1776 def windowing_single(self,spc,x,A,B,C,D,nFFTPoints):
1777 1777 from scipy.optimize import curve_fit,fmin
1778 1778
1779 1779 def gaussian(x, a, b, c, d):
1780 1780 val = a * numpy.exp(-(x - b)**2 / (2*c**2)) + d
1781 1781 return val
1782 1782
1783 1783 def R_gaussian(x, a, b, c):
1784 1784 N = int(numpy.shape(x)[0])
1785 1785 val = a * numpy.exp(-((x)*c*2*2*numpy.pi)**2 / (2))* numpy.exp(1.j*b*x*4*numpy.pi)
1786 1786 return val
1787 1787
1788 1788 def T(x,N):
1789 1789 T = 1-abs(x)/N
1790 1790 return T
1791 1791
1792 1792 def R_T_spc_fun(x, a, b, c, d, nFFTPoints):
1793 1793
1794 1794 N = int(numpy.shape(x)[0])
1795 1795
1796 1796 x_max = x[-1]
1797 1797
1798 1798 x_pos = x[nFFTPoints:]
1799 1799 x_neg = x[:nFFTPoints]
1800 1800 #print([int(nFFTPoints/2))
1801 1801 #print("x: ", x)
1802 1802 #print("x_neg: ", x_neg)
1803 1803 #print("x_pos: ", x_pos)
1804 1804
1805 1805
1806 1806 R_T_neg_1 = R_gaussian(x, a, b, c)[:nFFTPoints]*T(x_neg,-x[0])
1807 1807 R_T_pos_1 = R_gaussian(x, a, b, c)[nFFTPoints:]*T(x_pos,x[-1])
1808 1808 #print(T(x_pos,x[-1]),x_pos,x[-1])
1809 1809 #print(R_T_neg_1.shape,R_T_pos_1.shape)
1810 1810 R_T_sum_1 = R_T_pos_1 + R_T_neg_1
1811 1811 R_T_spc_1 = numpy.fft.fft(R_T_sum_1).real
1812 1812 R_T_spc_1 = numpy.fft.fftshift(R_T_spc_1)
1813 1813 max_val_1 = numpy.max(R_T_spc_1)
1814 1814 R_T_spc_1 = R_T_spc_1*a/max_val_1
1815 1815
1816 1816 R_T_d = d*numpy.fft.fftshift(signal.unit_impulse(N))
1817 1817 R_T_d_neg = R_T_d[:nFFTPoints]*T(x_neg,-x[0])
1818 1818 R_T_d_pos = R_T_d[nFFTPoints:]*T(x_pos,x[-1])
1819 1819 R_T_d_sum = R_T_d_pos + R_T_d_neg
1820 1820 R_T_spc_3 = numpy.fft.fft(R_T_d_sum).real
1821 1821 R_T_spc_3 = numpy.fft.fftshift(R_T_spc_3)
1822 1822
1823 1823 R_T_final = R_T_spc_1 + R_T_spc_3
1824 1824
1825 1825 return R_T_final
1826 1826
1827 1827 y = spc#gaussian(x, a, meanY, sigmaY) + a*0.1*numpy.random.normal(0, 1, size=len(x))
1828 1828
1829 1829 from scipy.stats import norm
1830 1830 mean,std=norm.fit(spc)
1831 1831
1832 1832 # estimate starting values from the data
1833 1833 a = A
1834 1834 b = B
1835 1835 c = C#numpy.std(spc)
1836 1836 d = D
1837 1837 #'''
1838 1838 #ippSeconds = 250*20*1.e-6/3
1839 1839
1840 1840 #x_t = ippSeconds * (numpy.arange(nFFTPoints) - nFFTPoints / 2.)
1841 1841
1842 1842 #x_t = numpy.linspace(x_t[0],x_t[-1],3200)
1843 1843 #print("x_t: ", x_t)
1844 1844 #print("nFFTPoints: ", nFFTPoints)
1845 1845 x_vel = numpy.linspace(x[0],x[-1],int(2*nFFTPoints))
1846 1846 #print("x_vel: ", x_vel)
1847 1847 #x_freq = numpy.fft.fftfreq(1600,d=ippSeconds)
1848 1848 #x_freq = numpy.fft.fftshift(x_freq)
1849 1849 #'''
1850 1850 # define a least squares function to optimize
1851 1851 def minfunc(params):
1852 1852 #print("y.shape: ", numpy.shape(y))
1853 1853 return sum((y-R_T_spc_fun(x_vel,params[0],params[1],params[2],params[3],nFFTPoints))**2/1)#y**2)
1854 1854
1855 1855 # fit
1856 1856
1857 1857 popt_full = fmin(minfunc,[a,b,c,d], disp=False)
1858 1858 #print("nIter", popt_full[2])
1859 1859 popt = popt_full#[0]
1860 1860
1861 1861 fun = gaussian(x, popt[0], popt[1], popt[2], popt[3])
1862 1862
1863 1863 #return R_T_spc_fun(x_t,popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]), popt[0], popt[1], popt[2], popt[3], popt[4], popt[5], popt[6]
1864 1864 return fun, popt[0], popt[1], popt[2], popt[3]
1865 1865
1866 1866 def run(self, dataOut):
1867 1867
1868 1868 from scipy.signal import medfilt
1869 1869 import matplotlib.pyplot as plt
1870 1870 dataOut.moments = numpy.ones((dataOut.nChannels,4,dataOut.nHeights))*numpy.NAN
1871 1871 dataOut.VelRange = dataOut.getVelRange(0)
1872 1872 for nChannel in range(dataOut.nChannels):
1873 1873 for hei in range(dataOut.heightList.shape[0]):
1874 1874 #print("ipp: ", dataOut.ippSeconds)
1875 1875 spc = numpy.copy(dataOut.data_spc[nChannel,:,hei])
1876 1876
1877 1877 #print(VelRange)
1878 1878 #print(dataOut.getFreqRange(64))
1879 1879 spcm = medfilt(spc,11)
1880 1880 spc_max = numpy.max(spcm)
1881 1881 dop1_x0 = dataOut.VelRange[numpy.argmax(spcm)]
1882 1882 D = numpy.min(spcm)
1883 1883
1884 1884 fun, A, B, C, D = self.windowing_single(spc,dataOut.VelRange,spc_max,dop1_x0,abs(dop1_x0),D,dataOut.nFFTPoints)
1885 1885 dataOut.moments[nChannel,0,hei] = A
1886 1886 dataOut.moments[nChannel,1,hei] = B
1887 1887 dataOut.moments[nChannel,2,hei] = C
1888 1888 dataOut.moments[nChannel,3,hei] = D
1889 1889 '''
1890 1890 plt.figure()
1891 1891 plt.plot(VelRange,spc,marker='*',linestyle='')
1892 1892 plt.plot(VelRange,fun)
1893 1893 plt.title(dataOut.heightList[hei])
1894 1894 plt.show()
1895 1895 '''
1896 1896
1897 1897 return dataOut
1898 1898
1899 1899 class PrecipitationProc(Operation):
1900 1900
1901 1901 '''
1902 1902 Operator that estimates Reflectivity factor (Z), and estimates rainfall Rate (R)
1903 1903
1904 1904 Input:
1905 1905 self.dataOut.data_pre : SelfSpectra
1906 1906
1907 1907 Output:
1908 1908
1909 1909 self.dataOut.data_output : Reflectivity factor, rainfall Rate
1910 1910
1911 1911
1912 1912 Parameters affected:
1913 1913 '''
1914 1914
1915 1915 def __init__(self):
1916 1916 Operation.__init__(self)
1917 1917 self.i=0
1918 1918
1919 1919 def run(self, dataOut, radar=None, Pt=5000, Gt=295.1209, Gr=70.7945, Lambda=0.6741, aL=2.5118,
1920 1920 tauW=4e-06, ThetaT=0.1656317, ThetaR=0.36774087, Km2 = 0.93, Altitude=3350,SNRdBlimit=-30,
1921 1921 channel=None):
1922 1922
1923 1923 # print ('Entering PrecepitationProc ... ')
1924 1924
1925 1925 if radar == "MIRA35C" :
1926 1926
1927 1927 self.spc = dataOut.data_pre[0].copy()
1928 1928 self.Num_Hei = self.spc.shape[2]
1929 1929 self.Num_Bin = self.spc.shape[1]
1930 1930 self.Num_Chn = self.spc.shape[0]
1931 1931 Ze = self.dBZeMODE2(dataOut)
1932 1932
1933 1933 else:
1934 1934
1935 1935 self.spc = dataOut.data_pre[0].copy()
1936 1936
1937 1937 #NOTA SE DEBE REMOVER EL RANGO DEL PULSO TX
1938 1938 self.spc[:,:,0:7]= numpy.NaN
1939 1939
1940 1940 self.Num_Hei = self.spc.shape[2]
1941 1941 self.Num_Bin = self.spc.shape[1]
1942 1942 self.Num_Chn = self.spc.shape[0]
1943 1943
1944 1944 VelRange = dataOut.spc_range[2]
1945 1945
1946 1946 ''' Se obtiene la constante del RADAR '''
1947 1947
1948 1948 self.Pt = Pt
1949 1949 self.Gt = Gt
1950 1950 self.Gr = Gr
1951 1951 self.Lambda = Lambda
1952 1952 self.aL = aL
1953 1953 self.tauW = tauW
1954 1954 self.ThetaT = ThetaT
1955 1955 self.ThetaR = ThetaR
1956 1956 self.GSys = 10**(36.63/10) # Ganancia de los LNA 36.63 dB
1957 1957 self.lt = 10**(1.67/10) # Perdida en cables Tx 1.67 dB
1958 1958 self.lr = 10**(5.73/10) # Perdida en cables Rx 5.73 dB
1959 1959
1960 1960 Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
1961 1961 Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * tauW * numpy.pi * ThetaT * ThetaR)
1962 1962 RadarConstant = 10e-26 * Numerator / Denominator #
1963 1963 ExpConstant = 10**(40/10) #Constante Experimental
1964 1964
1965 1965 SignalPower = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
1966 1966 for i in range(self.Num_Chn):
1967 1967 SignalPower[i,:,:] = self.spc[i,:,:] - dataOut.noise[i]
1968 1968 SignalPower[numpy.where(SignalPower < 0)] = 1e-20
1969 1969
1970 1970 if channel is None:
1971 1971 SPCmean = numpy.mean(SignalPower, 0)
1972 1972 else:
1973 1973 SPCmean = SignalPower[channel]
1974 1974 Pr = SPCmean[:,:]/dataOut.normFactor
1975 1975
1976 1976 # Declaring auxiliary variables
1977 1977 Range = dataOut.heightList*1000. #Range in m
1978 1978 # replicate the heightlist to obtain a matrix [Num_Bin,Num_Hei]
1979 1979 rMtrx = numpy.transpose(numpy.transpose([dataOut.heightList*1000.] * self.Num_Bin))
1980 1980 zMtrx = rMtrx+Altitude
1981 1981 # replicate the VelRange to obtain a matrix [Num_Bin,Num_Hei]
1982 1982 VelMtrx = numpy.transpose(numpy.tile(VelRange[:-1], (self.Num_Hei,1)))
1983 1983
1984 1984 # height dependence to air density Foote and Du Toit (1969)
1985 1985 delv_z = 1 + 3.68e-5 * zMtrx + 1.71e-9 * zMtrx**2
1986 1986 VMtrx = VelMtrx / delv_z #Normalized velocity
1987 1987 VMtrx[numpy.where(VMtrx> 9.6)] = numpy.NaN
1988 1988 # Diameter is related to the fall speed of falling drops
1989 1989 D_Vz = -1.667 * numpy.log( 0.9369 - 0.097087 * VMtrx ) # D in [mm]
1990 1990 # Only valid for D>= 0.16 mm
1991 1991 D_Vz[numpy.where(D_Vz < 0.16)] = numpy.NaN
1992 1992
1993 1993 #Calculate Radar Reflectivity ETAn
1994 1994 ETAn = (RadarConstant *ExpConstant) * Pr * rMtrx**2 #Reflectivity (ETA)
1995 1995 ETAd = ETAn * 6.18 * exp( -0.6 * D_Vz ) * delv_z
1996 1996 # Radar Cross Section
1997 1997 sigmaD = Km2 * (D_Vz * 1e-3 )**6 * numpy.pi**5 / Lambda**4
1998 1998 # Drop Size Distribution
1999 1999 DSD = ETAn / sigmaD
2000 2000 # Equivalente Reflectivy
2001 2001 Ze_eqn = numpy.nansum( DSD * D_Vz**6 ,axis=0)
2002 2002 Ze_org = numpy.nansum(ETAn * Lambda**4, axis=0) / (1e-18*numpy.pi**5 * Km2) # [mm^6 /m^3]
2003 2003 # RainFall Rate
2004 2004 RR = 0.0006*numpy.pi * numpy.nansum( D_Vz**3 * DSD * VelMtrx ,0) #mm/hr
2005 2005
2006 2006 # Censoring the data
2007 2007 # Removing data with SNRth < 0dB se debe considerar el SNR por canal
2008 2008 SNRth = 10**(SNRdBlimit/10) #-30dB
2009 2009 novalid = numpy.where((dataOut.data_snr[0,:] <SNRth) | (dataOut.data_snr[1,:] <SNRth) | (dataOut.data_snr[2,:] <SNRth)) # AND condition. Maybe OR condition better
2010 2010 W = numpy.nanmean(dataOut.data_dop,0)
2011 2011 W[novalid] = numpy.NaN
2012 2012 Ze_org[novalid] = numpy.NaN
2013 2013 RR[novalid] = numpy.NaN
2014 2014
2015 2015 dataOut.data_output = RR[8]
2016 2016 dataOut.data_param = numpy.ones([3,self.Num_Hei])
2017 2017 dataOut.channelList = [0,1,2]
2018 2018
2019 2019 dataOut.data_param[0]=10*numpy.log10(Ze_org)
2020 2020 dataOut.data_param[1]=-W
2021 2021 dataOut.data_param[2]=RR
2022 2022
2023 2023 # print ('Leaving PrecepitationProc ... ')
2024 2024 return dataOut
2025 2025
2026 2026 def dBZeMODE2(self, dataOut): # Processing for MIRA35C
2027 2027
2028 2028 NPW = dataOut.NPW
2029 2029 COFA = dataOut.COFA
2030 2030
2031 2031 SNR = numpy.array([self.spc[0,:,:] / NPW[0]]) #, self.spc[1,:,:] / NPW[1]])
2032 2032 RadarConst = dataOut.RadarConst
2033 2033 #frequency = 34.85*10**9
2034 2034
2035 2035 ETA = numpy.zeros(([self.Num_Chn ,self.Num_Hei]))
2036 2036 data_output = numpy.ones([self.Num_Chn , self.Num_Hei])*numpy.NaN
2037 2037
2038 2038 ETA = numpy.sum(SNR,1)
2039 2039
2040 2040 ETA = numpy.where(ETA != 0. , ETA, numpy.NaN)
2041 2041
2042 2042 Ze = numpy.ones([self.Num_Chn, self.Num_Hei] )
2043 2043
2044 2044 for r in range(self.Num_Hei):
2045 2045
2046 2046 Ze[0,r] = ( ETA[0,r] ) * COFA[0,r][0] * RadarConst * ((r/5000.)**2)
2047 2047 #Ze[1,r] = ( ETA[1,r] ) * COFA[1,r][0] * RadarConst * ((r/5000.)**2)
2048 2048
2049 2049 return Ze
2050 2050
2051 2051 # def GetRadarConstant(self):
2052 2052 #
2053 2053 # """
2054 2054 # Constants:
2055 2055 #
2056 2056 # Pt: Transmission Power dB 5kW 5000
2057 2057 # Gt: Transmission Gain dB 24.7 dB 295.1209
2058 2058 # Gr: Reception Gain dB 18.5 dB 70.7945
2059 2059 # Lambda: Wavelenght m 0.6741 m 0.6741
2060 2060 # aL: Attenuation loses dB 4dB 2.5118
2061 2061 # tauW: Width of transmission pulse s 4us 4e-6
2062 2062 # ThetaT: Transmission antenna bean angle rad 0.1656317 rad 0.1656317
2063 2063 # ThetaR: Reception antenna beam angle rad 0.36774087 rad 0.36774087
2064 2064 #
2065 2065 # """
2066 2066 #
2067 2067 # Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
2068 2068 # Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * TauW * numpy.pi * ThetaT * TheraR)
2069 2069 # RadarConstant = Numerator / Denominator
2070 2070 #
2071 2071 # return RadarConstant
2072 2072
2073 2073
2074 2074 class FullSpectralAnalysis(Operation):
2075 2075
2076 2076 """
2077 2077 Function that implements Full Spectral Analysis technique.
2078 2078
2079 2079 Input:
2080 2080 self.dataOut.data_pre : SelfSpectra and CrossSpectra data
2081 2081 self.dataOut.groupList : Pairlist of channels
2082 2082 self.dataOut.ChanDist : Physical distance between receivers
2083 2083
2084 2084
2085 2085 Output:
2086 2086
2087 2087 self.dataOut.data_output : Zonal wind, Meridional wind, and Vertical wind
2088 2088
2089 2089
2090 2090 Parameters affected: Winds, height range, SNR
2091 2091
2092 2092 """
2093 2093 def run(self, dataOut, Xi01=None, Xi02=None, Xi12=None, Eta01=None, Eta02=None, Eta12=None, SNRdBlimit=-30,
2094 2094 minheight=None, maxheight=None, NegativeLimit=None, PositiveLimit=None):
2095 2095
2096 2096 spc = dataOut.data_pre[0].copy()
2097 2097 cspc = dataOut.data_pre[1]
2098 2098 nHeights = spc.shape[2]
2099 2099
2100 2100 # first_height = 0.75 #km (ref: data header 20170822)
2101 2101 # resolution_height = 0.075 #km
2102 2102 '''
2103 2103 finding height range. check this when radar parameters are changed!
2104 2104 '''
2105 2105 if maxheight is not None:
2106 2106 # range_max = math.ceil((maxheight - first_height) / resolution_height) # theoretical
2107 2107 range_max = math.ceil(13.26 * maxheight - 3) # empirical, works better
2108 2108 else:
2109 2109 range_max = nHeights
2110 2110 if minheight is not None:
2111 2111 # range_min = int((minheight - first_height) / resolution_height) # theoretical
2112 2112 range_min = int(13.26 * minheight - 5) # empirical, works better
2113 2113 if range_min < 0:
2114 2114 range_min = 0
2115 2115 else:
2116 2116 range_min = 0
2117 2117
2118 2118 pairsList = dataOut.groupList
2119 2119 if dataOut.ChanDist is not None :
2120 2120 ChanDist = dataOut.ChanDist
2121 2121 else:
2122 2122 ChanDist = numpy.array([[Xi01, Eta01],[Xi02,Eta02],[Xi12,Eta12]])
2123 2123
2124 2124 # 4 variables: zonal, meridional, vertical, and average SNR
2125 2125 data_param = numpy.zeros([4,nHeights]) * numpy.NaN
2126 2126 velocityX = numpy.zeros([nHeights]) * numpy.NaN
2127 2127 velocityY = numpy.zeros([nHeights]) * numpy.NaN
2128 2128 velocityZ = numpy.zeros([nHeights]) * numpy.NaN
2129 2129
2130 2130 dbSNR = 10*numpy.log10(numpy.average(dataOut.data_snr,0))
2131 2131
2132 2132 '''***********************************************WIND ESTIMATION**************************************'''
2133 2133 for Height in range(nHeights):
2134 2134
2135 2135 if Height >= range_min and Height < range_max:
2136 2136 # error_code will be useful in future analysis
2137 2137 [Vzon,Vmer,Vver, error_code] = self.WindEstimation(spc[:,:,Height], cspc[:,:,Height], pairsList,
2138 2138 ChanDist, Height, dataOut.noise, dataOut.spc_range, dbSNR[Height], SNRdBlimit, NegativeLimit, PositiveLimit,dataOut.frequency)
2139 2139
2140 2140 if abs(Vzon) < 100. and abs(Vmer) < 100.:
2141 2141 velocityX[Height] = Vzon
2142 2142 velocityY[Height] = -Vmer
2143 2143 velocityZ[Height] = Vver
2144 2144
2145 2145 # Censoring data with SNR threshold
2146 2146 dbSNR [dbSNR < SNRdBlimit] = numpy.NaN
2147 2147
2148 2148 data_param[0] = velocityX
2149 2149 data_param[1] = velocityY
2150 2150 data_param[2] = velocityZ
2151 2151 data_param[3] = dbSNR
2152 2152 dataOut.data_param = data_param
2153 2153 return dataOut
2154 2154
2155 2155 def moving_average(self,x, N=2):
2156 2156 """ convolution for smoothenig data. note that last N-1 values are convolution with zeroes """
2157 2157 return numpy.convolve(x, numpy.ones((N,))/N)[(N-1):]
2158 2158
2159 2159 def gaus(self,xSamples,Amp,Mu,Sigma):
2160 2160 return Amp * numpy.exp(-0.5*((xSamples - Mu)/Sigma)**2)
2161 2161
2162 2162 def Moments(self, ySamples, xSamples):
2163 2163 Power = numpy.nanmean(ySamples) # Power, 0th Moment
2164 2164 yNorm = ySamples / numpy.nansum(ySamples)
2165 2165 RadVel = numpy.nansum(xSamples * yNorm) # Radial Velocity, 1st Moment
2166 2166 Sigma2 = numpy.nansum(yNorm * (xSamples - RadVel)**2) # Spectral Width, 2nd Moment
2167 2167 StdDev = numpy.sqrt(numpy.abs(Sigma2)) # Desv. Estandar, Ancho espectral
2168 2168 return numpy.array([Power,RadVel,StdDev])
2169 2169
2170 2170 def StopWindEstimation(self, error_code):
2171 2171 Vzon = numpy.NaN
2172 2172 Vmer = numpy.NaN
2173 2173 Vver = numpy.NaN
2174 2174 return Vzon, Vmer, Vver, error_code
2175 2175
2176 2176 def AntiAliasing(self, interval, maxstep):
2177 2177 """
2178 2178 function to prevent errors from aliased values when computing phaseslope
2179 2179 """
2180 2180 antialiased = numpy.zeros(len(interval))
2181 2181 copyinterval = interval.copy()
2182 2182
2183 2183 antialiased[0] = copyinterval[0]
2184 2184
2185 2185 for i in range(1,len(antialiased)):
2186 2186 step = interval[i] - interval[i-1]
2187 2187 if step > maxstep:
2188 2188 copyinterval -= 2*numpy.pi
2189 2189 antialiased[i] = copyinterval[i]
2190 2190 elif step < maxstep*(-1):
2191 2191 copyinterval += 2*numpy.pi
2192 2192 antialiased[i] = copyinterval[i]
2193 2193 else:
2194 2194 antialiased[i] = copyinterval[i].copy()
2195 2195
2196 2196 return antialiased
2197 2197
2198 2198 def WindEstimation(self, spc, cspc, pairsList, ChanDist, Height, noise, AbbsisaRange, dbSNR, SNRlimit, NegativeLimit, PositiveLimit, radfreq):
2199 2199 """
2200 2200 Function that Calculates Zonal, Meridional and Vertical wind velocities.
2201 2201 Initial Version by E. Bocanegra updated by J. Zibell until Nov. 2019.
2202 2202
2203 2203 Input:
2204 2204 spc, cspc : self spectra and cross spectra data. In Briggs notation something like S_i*(S_i)_conj, (S_j)_conj respectively.
2205 2205 pairsList : Pairlist of channels
2206 2206 ChanDist : array of xi_ij and eta_ij
2207 2207 Height : height at which data is processed
2208 2208 noise : noise in [channels] format for specific height
2209 2209 Abbsisarange : range of the frequencies or velocities
2210 2210 dbSNR, SNRlimit : signal to noise ratio in db, lower limit
2211 2211
2212 2212 Output:
2213 2213 Vzon, Vmer, Vver : wind velocities
2214 2214 error_code : int that states where code is terminated
2215 2215
2216 2216 0 : no error detected
2217 2217 1 : Gaussian of mean spc exceeds widthlimit
2218 2218 2 : no Gaussian of mean spc found
2219 2219 3 : SNR to low or velocity to high -> prec. e.g.
2220 2220 4 : at least one Gaussian of cspc exceeds widthlimit
2221 2221 5 : zero out of three cspc Gaussian fits converged
2222 2222 6 : phase slope fit could not be found
2223 2223 7 : arrays used to fit phase have different length
2224 2224 8 : frequency range is either too short (len <= 5) or very long (> 30% of cspc)
2225 2225
2226 2226 """
2227 2227
2228 2228 error_code = 0
2229 2229
2230 2230 nChan = spc.shape[0]
2231 2231 nProf = spc.shape[1]
2232 2232 nPair = cspc.shape[0]
2233 2233
2234 2234 SPC_Samples = numpy.zeros([nChan, nProf]) # for normalized spc values for one height
2235 2235 CSPC_Samples = numpy.zeros([nPair, nProf], dtype=numpy.complex_) # for normalized cspc values
2236 2236 phase = numpy.zeros([nPair, nProf]) # phase between channels
2237 2237 PhaseSlope = numpy.zeros(nPair) # slope of the phases, channelwise
2238 2238 PhaseInter = numpy.zeros(nPair) # intercept to the slope of the phases, channelwise
2239 2239 xFrec = AbbsisaRange[0][:-1] # frequency range
2240 2240 xVel = AbbsisaRange[2][:-1] # velocity range
2241 2241 xSamples = xFrec # the frequency range is taken
2242 2242 delta_x = xSamples[1] - xSamples[0] # delta_f or delta_x
2243 2243
2244 2244 # only consider velocities with in NegativeLimit and PositiveLimit
2245 2245 if (NegativeLimit is None):
2246 2246 NegativeLimit = numpy.min(xVel)
2247 2247 if (PositiveLimit is None):
2248 2248 PositiveLimit = numpy.max(xVel)
2249 2249 xvalid = numpy.where((xVel > NegativeLimit) & (xVel < PositiveLimit))
2250 2250 xSamples_zoom = xSamples[xvalid]
2251 2251
2252 2252 '''Getting Eij and Nij'''
2253 2253 Xi01, Xi02, Xi12 = ChanDist[:,0]
2254 2254 Eta01, Eta02, Eta12 = ChanDist[:,1]
2255 2255
2256 2256 # spwd limit - updated by D. ScipiΓ³n 30.03.2021
2257 2257 widthlimit = 10
2258 2258 '''************************* SPC is normalized ********************************'''
2259 2259 spc_norm = spc.copy()
2260 2260 # For each channel
2261 2261 for i in range(nChan):
2262 2262 spc_sub = spc_norm[i,:] - noise[i] # only the signal power
2263 2263 SPC_Samples[i] = spc_sub / (numpy.nansum(spc_sub) * delta_x)
2264 2264
2265 2265 '''********************** FITTING MEAN SPC GAUSSIAN **********************'''
2266 2266
2267 2267 """ the gaussian of the mean: first subtract noise, then normalize. this is legal because
2268 2268 you only fit the curve and don't need the absolute value of height for calculation,
2269 2269 only for estimation of width. for normalization of cross spectra, you need initial,
2270 2270 unnormalized self-spectra With noise.
2271 2271
2272 2272 Technically, you don't even need to normalize the self-spectra, as you only need the
2273 2273 width of the peak. However, it was left this way. Note that the normalization has a flaw:
2274 2274 due to subtraction of the noise, some values are below zero. Raw "spc" values should be
2275 2275 >= 0, as it is the modulus squared of the signals (complex * it's conjugate)
2276 2276 """
2277 2277 # initial conditions
2278 2278 popt = [1e-10,0,1e-10]
2279 2279 # Spectra average
2280 2280 SPCMean = numpy.average(SPC_Samples,0)
2281 2281 # Moments in frequency
2282 2282 SPCMoments = self.Moments(SPCMean[xvalid], xSamples_zoom)
2283 2283
2284 2284 # Gauss Fit SPC in frequency domain
2285 2285 if dbSNR > SNRlimit: # only if SNR > SNRth
2286 2286 try:
2287 2287 popt,pcov = curve_fit(self.gaus,xSamples_zoom,SPCMean[xvalid],p0=SPCMoments)
2288 2288 if popt[2] <= 0 or popt[2] > widthlimit: # CONDITION
2289 2289 return self.StopWindEstimation(error_code = 1)
2290 2290 FitGauss = self.gaus(xSamples_zoom,*popt)
2291 2291 except :#RuntimeError:
2292 2292 return self.StopWindEstimation(error_code = 2)
2293 2293 else:
2294 2294 return self.StopWindEstimation(error_code = 3)
2295 2295
2296 2296 '''***************************** CSPC Normalization *************************
2297 2297 The Spc spectra are used to normalize the crossspectra. Peaks from precipitation
2298 2298 influence the norm which is not desired. First, a range is identified where the
2299 2299 wind peak is estimated -> sum_wind is sum of those frequencies. Next, the area
2300 2300 around it gets cut off and values replaced by mean determined by the boundary
2301 2301 data -> sum_noise (spc is not normalized here, thats why the noise is important)
2302 2302
2303 2303 The sums are then added and multiplied by range/datapoints, because you need
2304 2304 an integral and not a sum for normalization.
2305 2305
2306 2306 A norm is found according to Briggs 92.
2307 2307 '''
2308 2308 # for each pair
2309 2309 for i in range(nPair):
2310 2310 cspc_norm = cspc[i,:].copy()
2311 2311 chan_index0 = pairsList[i][0]
2312 2312 chan_index1 = pairsList[i][1]
2313 2313 CSPC_Samples[i] = cspc_norm / (numpy.sqrt(numpy.nansum(spc_norm[chan_index0])*numpy.nansum(spc_norm[chan_index1])) * delta_x)
2314 2314 phase[i] = numpy.arctan2(CSPC_Samples[i].imag, CSPC_Samples[i].real)
2315 2315
2316 2316 CSPCmoments = numpy.vstack([self.Moments(numpy.abs(CSPC_Samples[0,xvalid]), xSamples_zoom),
2317 2317 self.Moments(numpy.abs(CSPC_Samples[1,xvalid]), xSamples_zoom),
2318 2318 self.Moments(numpy.abs(CSPC_Samples[2,xvalid]), xSamples_zoom)])
2319 2319
2320 2320 popt01, popt02, popt12 = [1e-10,0,1e-10], [1e-10,0,1e-10] ,[1e-10,0,1e-10]
2321 2321 FitGauss01, FitGauss02, FitGauss12 = numpy.zeros(len(xSamples)), numpy.zeros(len(xSamples)), numpy.zeros(len(xSamples))
2322 2322
2323 2323 '''*******************************FIT GAUSS CSPC************************************'''
2324 2324 try:
2325 2325 popt01,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[0][xvalid]),p0=CSPCmoments[0])
2326 2326 if popt01[2] > widthlimit: # CONDITION
2327 2327 return self.StopWindEstimation(error_code = 4)
2328 2328 popt02,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[1][xvalid]),p0=CSPCmoments[1])
2329 2329 if popt02[2] > widthlimit: # CONDITION
2330 2330 return self.StopWindEstimation(error_code = 4)
2331 2331 popt12,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[2][xvalid]),p0=CSPCmoments[2])
2332 2332 if popt12[2] > widthlimit: # CONDITION
2333 2333 return self.StopWindEstimation(error_code = 4)
2334 2334
2335 2335 FitGauss01 = self.gaus(xSamples_zoom, *popt01)
2336 2336 FitGauss02 = self.gaus(xSamples_zoom, *popt02)
2337 2337 FitGauss12 = self.gaus(xSamples_zoom, *popt12)
2338 2338 except:
2339 2339 return self.StopWindEstimation(error_code = 5)
2340 2340
2341 2341
2342 2342 '''************* Getting Fij ***************'''
2343 2343 # x-axis point of the gaussian where the center is located from GaussFit of spectra
2344 2344 GaussCenter = popt[1]
2345 2345 ClosestCenter = xSamples_zoom[numpy.abs(xSamples_zoom-GaussCenter).argmin()]
2346 2346 PointGauCenter = numpy.where(xSamples_zoom==ClosestCenter)[0][0]
2347 2347
2348 2348 # Point where e^-1 is located in the gaussian
2349 2349 PeMinus1 = numpy.max(FitGauss) * numpy.exp(-1)
2350 2350 FijClosest = FitGauss[numpy.abs(FitGauss-PeMinus1).argmin()] # The closest point to"Peminus1" in "FitGauss"
2351 2351 PointFij = numpy.where(FitGauss==FijClosest)[0][0]
2352 2352 Fij = numpy.abs(xSamples_zoom[PointFij] - xSamples_zoom[PointGauCenter])
2353 2353
2354 2354 '''********** Taking frequency ranges from mean SPCs **********'''
2355 2355 GauWidth = popt[2] * 3/2 # Bandwidth of Gau01
2356 2356 Range = numpy.empty(2)
2357 2357 Range[0] = GaussCenter - GauWidth
2358 2358 Range[1] = GaussCenter + GauWidth
2359 2359 # Point in x-axis where the bandwidth is located (min:max)
2360 2360 ClosRangeMin = xSamples_zoom[numpy.abs(xSamples_zoom-Range[0]).argmin()]
2361 2361 ClosRangeMax = xSamples_zoom[numpy.abs(xSamples_zoom-Range[1]).argmin()]
2362 2362 PointRangeMin = numpy.where(xSamples_zoom==ClosRangeMin)[0][0]
2363 2363 PointRangeMax = numpy.where(xSamples_zoom==ClosRangeMax)[0][0]
2364 2364 Range = numpy.array([ PointRangeMin, PointRangeMax ])
2365 2365 FrecRange = xSamples_zoom[ Range[0] : Range[1] ]
2366 2366
2367 2367 '''************************** Getting Phase Slope ***************************'''
2368 2368 for i in range(nPair):
2369 2369 if len(FrecRange) > 5:
2370 2370 PhaseRange = phase[i, xvalid[0][Range[0]:Range[1]]].copy()
2371 2371 mask = ~numpy.isnan(FrecRange) & ~numpy.isnan(PhaseRange)
2372 2372 if len(FrecRange) == len(PhaseRange):
2373 2373 try:
2374 2374 slope, intercept, _, _, _ = stats.linregress(FrecRange[mask], self.AntiAliasing(PhaseRange[mask], 4.5))
2375 2375 PhaseSlope[i] = slope
2376 2376 PhaseInter[i] = intercept
2377 2377 except:
2378 2378 return self.StopWindEstimation(error_code = 6)
2379 2379 else:
2380 2380 return self.StopWindEstimation(error_code = 7)
2381 2381 else:
2382 2382 return self.StopWindEstimation(error_code = 8)
2383 2383
2384 2384 '''*** Constants A-H correspond to the convention as in Briggs and Vincent 1992 ***'''
2385 2385
2386 2386 '''Getting constant C'''
2387 2387 cC=(Fij*numpy.pi)**2
2388 2388
2389 2389 '''****** Getting constants F and G ******'''
2390 2390 MijEijNij = numpy.array([[Xi02,Eta02], [Xi12,Eta12]])
2391 2391 # MijEijNij = numpy.array([[Xi01,Eta01], [Xi02,Eta02], [Xi12,Eta12]])
2392 2392 # MijResult0 = (-PhaseSlope[0] * cC) / (2*numpy.pi)
2393 2393 MijResult1 = (-PhaseSlope[1] * cC) / (2*numpy.pi)
2394 2394 MijResult2 = (-PhaseSlope[2] * cC) / (2*numpy.pi)
2395 2395 # MijResults = numpy.array([MijResult0, MijResult1, MijResult2])
2396 2396 MijResults = numpy.array([MijResult1, MijResult2])
2397 2397 (cF,cG) = numpy.linalg.solve(MijEijNij, MijResults)
2398 2398
2399 2399 '''****** Getting constants A, B and H ******'''
2400 2400 W01 = numpy.nanmax( FitGauss01 )
2401 2401 W02 = numpy.nanmax( FitGauss02 )
2402 2402 W12 = numpy.nanmax( FitGauss12 )
2403 2403
2404 2404 WijResult01 = ((cF * Xi01 + cG * Eta01)**2)/cC - numpy.log(W01 / numpy.sqrt(numpy.pi / cC))
2405 2405 WijResult02 = ((cF * Xi02 + cG * Eta02)**2)/cC - numpy.log(W02 / numpy.sqrt(numpy.pi / cC))
2406 2406 WijResult12 = ((cF * Xi12 + cG * Eta12)**2)/cC - numpy.log(W12 / numpy.sqrt(numpy.pi / cC))
2407 2407 WijResults = numpy.array([WijResult01, WijResult02, WijResult12])
2408 2408
2409 2409 WijEijNij = numpy.array([ [Xi01**2, Eta01**2, 2*Xi01*Eta01] , [Xi02**2, Eta02**2, 2*Xi02*Eta02] , [Xi12**2, Eta12**2, 2*Xi12*Eta12] ])
2410 2410 (cA,cB,cH) = numpy.linalg.solve(WijEijNij, WijResults)
2411 2411
2412 2412 VxVy = numpy.array([[cA,cH],[cH,cB]])
2413 2413 VxVyResults = numpy.array([-cF,-cG])
2414 2414 (Vmer,Vzon) = numpy.linalg.solve(VxVy, VxVyResults)
2415 2415 Vver = -SPCMoments[1]*SPEED_OF_LIGHT/(2*radfreq)
2416 2416 error_code = 0
2417 2417
2418 2418 return Vzon, Vmer, Vver, error_code
2419 2419
2420 2420 class SpectralMoments(Operation):
2421 2421
2422 2422 '''
2423 2423 Function SpectralMoments()
2424 2424
2425 2425 Calculates moments (power, mean, standard deviation) and SNR of the signal
2426 2426
2427 2427 Type of dataIn: Spectra
2428 2428
2429 2429 Configuration Parameters:
2430 2430
2431 2431 dirCosx : Cosine director in X axis
2432 2432 dirCosy : Cosine director in Y axis
2433 2433
2434 2434 elevation :
2435 2435 azimuth :
2436 2436
2437 2437 Input:
2438 2438 channelList : simple channel list to select e.g. [2,3,7]
2439 2439 self.dataOut.data_pre : Spectral data
2440 2440 self.dataOut.abscissaList : List of frequencies
2441 2441 self.dataOut.noise : Noise level per channel
2442 2442
2443 2443 Affected:
2444 2444 self.dataOut.moments : Parameters per channel
2445 2445 self.dataOut.data_snr : SNR per channel
2446 2446
2447 2447 '''
2448 2448
2449 2449 def run(self, dataOut, proc_type=0):
2450 2450
2451 2451 absc = dataOut.abscissaList[:-1]
2452 2452 nChannel = dataOut.data_pre[0].shape[0]
2453 2453 nHei = dataOut.data_pre[0].shape[2]
2454 2454 data_param = numpy.zeros((nChannel, 4 + proc_type*3, nHei))
2455 2455
2456 2456 if proc_type == 1:
2457 2457 fwindow = numpy.zeros(absc.size) + 1
2458 2458 b=64
2459 2459 #b=16
2460 2460 fwindow[0:absc.size//2 - b] = 0
2461 2461 fwindow[absc.size//2 + b:] = 0
2462 2462 type1 = 1 # moments calculation & gaussean fitting
2463 2463 nProfiles = dataOut.nProfiles
2464 2464 nCohInt = dataOut.nCohInt
2465 2465 nIncohInt = dataOut.nIncohInt
2466 2466 M = numpy.power(numpy.array(1/(nProfiles * nCohInt) ,dtype='float32'),2)
2467 2467 N = numpy.array(M / nIncohInt,dtype='float32')
2468 2468 data = dataOut.data_pre[0] * N
2469 2469 #noise = dataOut.noise * N
2470 2470 noise = numpy.zeros(nChannel)
2471 2471 for ind in range(nChannel):
2472 2472 noise[ind] = self.__NoiseByChannel(nProfiles, nIncohInt, data[ind,:,:])
2473 2473 smooth=3
2474 2474 else:
2475 2475 data = dataOut.data_pre[0]
2476 2476 noise = dataOut.noise
2477 2477 fwindow = None
2478 2478 type1 = 0
2479 2479 nIncohInt = None
2480 2480 smooth=None
2481 2481
2482 2482 for ind in range(nChannel):
2483 2483 data_param[ind,:,:] = self.__calculateMoments( data[ind,:,:] , absc , noise[ind], nicoh=nIncohInt, smooth=smooth, type1=type1, fwindow=fwindow, id_ch=ind)
2484 2484
2485 2485 if proc_type == 1:
2486 2486 dataOut.moments = data_param[:,1:,:]
2487 2487 dataOut.data_dop = data_param[:,2]
2488 2488 dataOut.data_width = data_param[:,1]
2489 2489 dataOut.data_snr = data_param[:,0]
2490 2490 dataOut.data_pow = data_param[:,6] # to compare with type0 proccessing
2491 2491 dataOut.spcpar=numpy.stack((dataOut.data_dop,dataOut.data_width,dataOut.data_snr, data_param[:,3], data_param[:,4],data_param[:,5]),axis=2)
2492 2492 else:
2493 2493 dataOut.moments = data_param[:,1:,:]
2494 2494 dataOut.data_snr = data_param[:,0]
2495 2495 dataOut.data_pow = data_param[:,1]
2496 2496 dataOut.data_dop = data_param[:,2]
2497 2497 dataOut.data_width = data_param[:,3]
2498 2498 dataOut.spcpar=numpy.stack((dataOut.data_dop,dataOut.data_width,dataOut.data_snr, dataOut.data_pow),axis=2)
2499 2499
2500 2500 return dataOut
2501 2501
2502 2502 def __calculateMoments(self, oldspec, oldfreq, n0, normFactor = 1,
2503 2503 nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None,id_ch=0):
2504 2504
2505 2505 def __GAUSSWINFIT1(A, flagPDER=0):
2506 2506 nonlocal truex, xvalid
2507 2507 nparams = 4
2508 2508 M=truex.size
2509 2509 mm=numpy.arange(M,dtype='f4')
2510 2510 delta = numpy.zeros(M,dtype='f4')
2511 2511 delta[0] = 1.0
2512 2512 Ts = numpy.array([1.0/(2*truex[0])],dtype='f4')[0]
2513 2513 jj = -1j
2514 2514 #if self.winauto is None: self.winauto = (1.0 - mm/M)
2515 2515 winauto = (1.0 - mm/M)
2516 2516 winauto = winauto/winauto.max() # Normalized to 1
2517 2517 #ON_ERROR,2 # IDL sentence: Return to caller if an error occurs
2518 2518 A[0] = numpy.abs(A[0])
2519 2519 A[2] = numpy.abs(A[2])
2520 2520 A[3] = numpy.abs(A[3])
2521 2521 pi=numpy.array([numpy.pi],dtype='f4')[0]
2522 2522 if A[2] != 0:
2523 2523 Z = numpy.exp(-2*numpy.power((pi*A[2]*mm*Ts),2,dtype='f4')+jj*2*pi*A[1]*mm*Ts, dtype='c8') # Get Z
2524 2524 else:
2525 2525 Z = mm*0.0
2526 2526 A[0] = 0.0
2527 2527 junkF = numpy.roll(2*fft(winauto*(A[0]*Z+A[3]*delta)).real - \
2528 2528 winauto[0]*(A[0]+A[3]), M//2) # *M scale for fft not needed in python
2529 2529 F = junkF[xvalid]
2530 2530 if flagPDER == 0: #NEED PARTIAL?
2531 2531 return F
2532 2532 PDER = numpy.zeros((M,nparams)) #YES, MAKE ARRAY.
2533 2533 PDER[:,0] = numpy.shift(2*(fft(winauto*Z)*M) - winauto[0], M/2)
2534 2534 PDER[:,1] = numpy.shift(2*(fft(winauto*jj*2*numpy.pi*mm*Ts*A[0]*Z)*M), M/2)
2535 2535 PDER[:,2] = numpy.shift(2*(fft(winauto*(-4*numpy.power(numpy.pi*mm*Ts,2)*A[2]*A[0]*Z))*M), M/2)
2536 2536 PDER[:,3] = numpy.shift(2*(fft(winauto*delta)*M) - winauto[0], M/2)
2537 2537 PDER = PDER[xvalid,:]
2538 2538 return F, PDER
2539 2539
2540 2540 def __curvefit_koki(y, a, Weights, FlagNoDerivative=1,
2541 2541 itmax=20, tol=None):
2542 2542 #ON_ERROR,2 IDL SENTENCE: RETURN TO THE CALLER IF ERROR
2543 2543 if tol == None:
2544 2544 tol = numpy.array([1.e-3],dtype='f4')[0]
2545 2545 typ=a.dtype
2546 2546 double = 1 if typ == numpy.float64 else 0
2547 2547 if typ != numpy.float32:
2548 2548 a=a.astype(numpy.float32) #Make params floating
2549 2549 # if we will be estimating partial derivates then compute machine precision
2550 2550 if FlagNoDerivative == 1:
2551 2551 res=numpy.MachAr(float_conv=numpy.float32)
2552 2552 eps=numpy.sqrt(res.eps)
2553 2553
2554 2554 nterms = a.size # Number of parameters
2555 2555 nfree=numpy.array([numpy.size(y) - nterms],dtype='f4')[0] # Degrees of freedom
2556 2556 if nfree <= 0: print('Curvefit - not enough data points.')
2557 2557 flambda= numpy.array([0.001],dtype='f4')[0] # Initial lambda
2558 2558 #diag=numpy.arange(nterms)*(nterms+1) # Subscripta of diagonal elements
2559 2559 # Use diag method in python
2560 2560 converge=1
2561 2561
2562 2562 #Define the partial derivative array
2563 2563 PDER = numpy.zeros((nterms,numpy.size(y)),dtype='f8') if double == 1 else numpy.zeros((nterms,numpy.size(y)),dtype='f4')
2564 2564
2565 2565 for Niter in range(itmax): #Iteration loop
2566 2566
2567 2567 if FlagNoDerivative == 1:
2568 2568 #Evaluate function and estimate partial derivatives
2569 2569 yfit = __GAUSSWINFIT1(a)
2570 2570 for term in range(nterms):
2571 2571 p=a.copy() # Copy current parameters
2572 2572 #Increment size for forward difference derivative
2573 2573 inc = eps * abs(p[term])
2574 2574 if inc == 0: inc = eps
2575 2575 p[term] = p[term] + inc
2576 2576 yfit1 = __GAUSSWINFIT1(p)
2577 2577 PDER[term,:] = (yfit1-yfit)/inc
2578 2578 else:
2579 2579 #The user's procedure will return partial derivatives
2580 2580 yfit,PDER=__GAUSSWINFIT1(a, flagPDER=1)
2581 2581
2582 2582 beta = numpy.dot(PDER,(y-yfit)*Weights)
2583 2583 alpha = numpy.dot(PDER * numpy.tile(Weights,(nterms,1)), numpy.transpose(PDER))
2584 2584 # save current values of return parameters
2585 2585 sigma1 = numpy.sqrt( 1.0 / numpy.diag(alpha) ) # Current sigma.
2586 2586 sigma = sigma1
2587 2587
2588 2588 chisq1 = numpy.sum(Weights*numpy.power(y-yfit,2,dtype='f4'),dtype='f4')/nfree # Current chi squared.
2589 2589 chisq = chisq1
2590 2590 yfit1 = yfit
2591 2591 elev7=numpy.array([1.0e7],dtype='f4')[0]
2592 2592 compara =numpy.sum(abs(y))/elev7/nfree
2593 2593 done_early = chisq1 < compara
2594 2594
2595 2595 if done_early:
2596 2596 chi2 = chisq # Return chi-squared (chi2 obsolete-still works)
2597 2597 if done_early: Niter -= 1
2598 2598 #save_tp(chisq,Niter,yfit)
2599 2599 return yfit, a, converge, sigma, chisq # return result
2600 2600 #c = numpy.dot(c, c) # this operator implemented at the next lines
2601 2601 c_tmp = numpy.sqrt(numpy.diag(alpha))
2602 2602 siz=len(c_tmp)
2603 2603 c=numpy.dot(c_tmp.reshape(siz,1),c_tmp.reshape(1,siz))
2604 2604 lambdaCount = 0
2605 2605 while True:
2606 2606 lambdaCount += 1
2607 2607 # Normalize alpha to have unit diagonal.
2608 2608 array = alpha / c
2609 2609 # Augment the diagonal.
2610 2610 one=numpy.array([1.],dtype='f4')[0]
2611 2611 numpy.fill_diagonal(array,numpy.diag(array)*(one+flambda))
2612 2612 # Invert modified curvature matrix to find new parameters.
2613 2613 try:
2614 2614 array = (1.0/array) if array.size == 1 else numpy.linalg.inv(array)
2615 2615 except Exception as e:
2616 2616 print(e)
2617 2617 array[:]=numpy.NaN
2618 2618
2619 2619 b = a + numpy.dot(numpy.transpose(beta),array/c) # New params
2620 2620 yfit = __GAUSSWINFIT1(b) # Evaluate function
2621 2621 chisq = numpy.sum(Weights*numpy.power(y-yfit,2,dtype='f4'),dtype='f4')/nfree # New chisq
2622 2622 sigma = numpy.sqrt(numpy.diag(array)/numpy.diag(alpha)) # New sigma
2623 2623 if (numpy.isfinite(chisq) == 0) or \
2624 2624 (lambdaCount > 30 and chisq >= chisq1):
2625 2625 # Reject changes made this iteration, use old values.
2626 2626 yfit = yfit1
2627 2627 sigma = sigma1
2628 2628 chisq = chisq1
2629 2629 converge = 0
2630 2630 #print('Failed to converge.')
2631 2631 chi2 = chisq # Return chi-squared (chi2 obsolete-still works)
2632 2632 if done_early: Niter -= 1
2633 2633 #save_tp(chisq,Niter,yfit)
2634 2634 return yfit, a, converge, sigma, chisq, chi2 # return result
2635 2635 ten=numpy.array([10.0],dtype='f4')[0]
2636 2636 flambda *= ten # Assume fit got worse
2637 2637 if chisq <= chisq1:
2638 2638 break
2639 2639 hundred=numpy.array([100.0],dtype='f4')[0]
2640 2640 flambda /= hundred
2641 2641
2642 2642 a=b # Save new parameter estimate.
2643 2643 if ((chisq1-chisq)/chisq1) <= tol: # Finished?
2644 2644 chi2 = chisq # Return chi-squared (chi2 obsolete-still works)
2645 2645 if done_early: Niter -= 1
2646 2646 #save_tp(chisq,Niter,yfit)
2647 2647 return yfit, a, converge, sigma, chisq, chi2 # return result
2648 2648 converge = 0
2649 2649 chi2 = chisq
2650 2650 #print('Failed to converge.')
2651 2651 #save_tp(chisq,Niter,yfit)
2652 2652 return yfit, a, converge, sigma, chisq, chi2
2653 2653
2654 2654 if (nicoh is None): nicoh = 1
2655 2655 if (graph is None): graph = 0
2656 2656 if (smooth is None): smooth = 0
2657 2657 elif (self.smooth < 3): smooth = 0
2658 2658
2659 2659 if (type1 is None): type1 = 0
2660 2660 if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1
2661 2661 if (snrth is None): snrth = -20.0
2662 2662 if (dc is None): dc = 0
2663 2663 if (aliasing is None): aliasing = 0
2664 2664 if (oldfd is None): oldfd = 0
2665 2665 if (wwauto is None): wwauto = 0
2666 2666
2667 2667 if (n0 < 1.e-20): n0 = 1.e-20
2668 2668
2669 2669 xvalid = numpy.where(fwindow == 1)[0]
2670 2670 freq = oldfreq
2671 2671 truex = oldfreq
2672 2672 vec_power = numpy.zeros(oldspec.shape[1])
2673 2673 vec_fd = numpy.zeros(oldspec.shape[1])
2674 2674 vec_w = numpy.zeros(oldspec.shape[1])
2675 2675 vec_snr = numpy.zeros(oldspec.shape[1])
2676 2676 vec_n1 = numpy.empty(oldspec.shape[1])
2677 2677 vec_fp = numpy.empty(oldspec.shape[1])
2678 2678 vec_sigma_fd = numpy.empty(oldspec.shape[1])
2679 2679
2680 2680 norm = 1
2681 2681
2682 2682 for ind in range(oldspec.shape[1]):
2683 2683
2684 2684 spec = oldspec[:,ind]
2685 2685 if (smooth == 0):
2686 2686 spec2 = spec
2687 2687 else:
2688 2688 spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth)
2689 2689
2690 2690 aux = spec2*fwindow
2691 2691 max_spec = aux.max()
2692 2692 m = aux.tolist().index(max_spec)
2693 2693
2694 2694 if hasattr(normFactor, "ndim"):
2695 2695 if normFactor.ndim >= 1:
2696 2696 norm = normFactor[ind]
2697 2697
2698 2698 if m > 2 and m < oldfreq.size - 3:
2699 2699 newindex = m + numpy.array([-2,-1,0,1,2])
2700 2700 newfreq = numpy.arange(20)/20.0*(numpy.max(freq[newindex])-numpy.min(freq[newindex]))+numpy.min(freq[newindex])
2701 2701 #peakspec = SPLINE(,)
2702 2702 tck = interpolate.splrep(freq[newindex], spec2[newindex])
2703 2703 peakspec = interpolate.splev(newfreq, tck)
2704 2704 # max_spec = MAX(peakspec,)
2705 2705 max_spec = numpy.max(peakspec)
2706 2706 mnew = numpy.argmax(peakspec)
2707 2707 #fp = newfreq(mnew)
2708 2708 fp = newfreq[mnew]
2709 2709 else:
2710 2710 fp = freq[m]
2711 2711
2712 2712 if type1==0:
2713 2713
2714 2714 # Moments Estimation
2715 2715 bb = spec2[numpy.arange(m,spec2.size)]
2716 2716 bb = (bb<n0).nonzero()
2717 2717 bb = bb[0]
2718 2718
2719 2719 ss = spec2[numpy.arange(0,m + 1)]
2720 2720 ss = (ss<n0).nonzero()
2721 2721 ss = ss[0]
2722 2722
2723 2723 if (bb.size == 0):
2724 2724 bb0 = spec.size - 1 - m
2725 2725 else:
2726 2726 bb0 = bb[0] - 1
2727 2727 if (bb0 < 0):
2728 2728 bb0 = 0
2729 2729
2730 2730 if (ss.size == 0):
2731 2731 ss1 = 1
2732 2732 else:
2733 2733 ss1 = max(ss) + 1
2734 2734
2735 2735 if (ss1 > m):
2736 2736 ss1 = m
2737 2737
2738 2738 valid = numpy.arange(int(m + bb0 - ss1 + 1)) + ss1
2739 2739
2740 2740 signal_power = ((spec2[valid] - n0) * fwindow[valid]).mean() # D. ScipiΓ³n added with correct definition
2741 2741 total_power = (spec2[valid] * fwindow[valid]).mean() # D. ScipiΓ³n added with correct definition
2742 2742 power = ((spec2[valid] - n0) * fwindow[valid]).sum()
2743 2743 fd = ((spec2[valid]- n0)*freq[valid] * fwindow[valid]).sum() / power
2744 2744 w = numpy.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum() / power)
2745 2745 spec2 /=(norm) #compensation for sats remove
2746 2746 snr = (spec2.mean()-n0)/n0
2747 2747 if (snr < 1.e-20): snr = 1.e-20
2748 2748
2749 2749 vec_power[ind] = total_power
2750 2750 vec_fd[ind] = fd
2751 2751 vec_w[ind] = w
2752 2752 vec_snr[ind] = snr
2753 2753 else:
2754 2754 # Noise by heights
2755 2755 n1, stdv = self.__get_noise2(spec, nicoh)
2756 2756 # Moments Estimation
2757 2757 bb = spec2[numpy.arange(m,spec2.size)]
2758 2758 bb = (bb<n1).nonzero()
2759 2759 bb = bb[0]
2760 2760
2761 2761 ss = spec2[numpy.arange(0,m + 1)]
2762 2762 ss = (ss<n1).nonzero()
2763 2763 ss = ss[0]
2764 2764
2765 2765 if (bb.size == 0):
2766 2766 bb0 = spec.size - 1 - m
2767 2767 else:
2768 2768 bb0 = bb[0] - 1
2769 2769 if (bb0 < 0):
2770 2770 bb0 = 0
2771 2771
2772 2772 if (ss.size == 0):
2773 2773 ss1 = 1
2774 2774 else:
2775 2775 ss1 = max(ss) + 1
2776 2776
2777 2777 if (ss1 > m):
2778 2778 ss1 = m
2779 2779
2780 2780 valid = numpy.arange(int(m + bb0 - ss1 + 1)) + ss1
2781 2781 power = ((spec[valid] - n1)*fwindow[valid]).sum()
2782 2782 fd = ((spec[valid]- n1)*freq[valid]*fwindow[valid]).sum()/power
2783 2783 try:
2784 2784 w = numpy.sqrt(((spec[valid] - n1)*fwindow[valid]*(freq[valid]- fd)**2).sum()/power)
2785 2785 except:
2786 2786 w = float("NaN")
2787 2787 snr = power/(n0*fwindow.sum())
2788 2788 if snr < 1.e-20: snr = 1.e-20
2789 2789
2790 2790 # Here start gaussean adjustment
2791 2791
2792 2792 if snr > numpy.power(10,0.1*snrth):
2793 2793
2794 2794 a = numpy.zeros(4,dtype='f4')
2795 2795 a[0] = snr * n0
2796 2796 a[1] = fd
2797 2797 a[2] = w
2798 2798 a[3] = n0
2799 2799
2800 2800 np = spec.size
2801 2801 aold = a.copy()
2802 2802 spec2 = spec.copy()
2803 2803 oldxvalid = xvalid.copy()
2804 2804
2805 2805 for i in range(2):
2806 2806
2807 2807 ww = 1.0/(numpy.power(spec2,2)/nicoh)
2808 2808 ww[np//2] = 0.0
2809 2809
2810 2810 a = aold.copy()
2811 2811 xvalid = oldxvalid.copy()
2812 2812 #self.show_var(xvalid)
2813 2813
2814 2814 gaussfn = __curvefit_koki(spec[xvalid], a, ww[xvalid])
2815 2815 a = gaussfn[1]
2816 2816 converge = gaussfn[2]
2817 2817
2818 2818 xvalid = numpy.arange(np)
2819 2819 spec2 = __GAUSSWINFIT1(a)
2820 2820
2821 2821 xvalid = oldxvalid.copy()
2822 2822 power = a[0] * np
2823 2823 fd = a[1]
2824 2824 sigma_fd = gaussfn[3][1]
2825 2825 snr = max(power/ (max(a[3],n0) * len(oldxvalid)) * converge, 1e-20)
2826 2826 w = numpy.abs(a[2])
2827 2827 n1 = max(a[3], n0)
2828 2828
2829 2829 #gauss_adj=[fd,w,snr,n1,fp,sigma_fd]
2830 2830 else:
2831 2831 sigma_fd=numpy.nan # to avoid UnboundLocalError: local variable 'sigma_fd' referenced before assignment
2832 2832
2833 2833 vec_fd[ind] = fd
2834 2834 vec_w[ind] = w
2835 2835 vec_snr[ind] = snr
2836 2836 vec_n1[ind] = n1
2837 2837 vec_fp[ind] = fp
2838 2838 vec_sigma_fd[ind] = sigma_fd
2839 2839 vec_power[ind] = power # to compare with type 0 proccessing
2840 2840
2841 2841 if type1==1:
2842 2842 return numpy.vstack((vec_snr, vec_w, vec_fd, vec_n1, vec_fp, vec_sigma_fd, vec_power)) # snr and fd exchanged to compare doppler of both types
2843 2843 else:
2844 2844 return numpy.vstack((vec_snr, vec_power, vec_fd, vec_w))
2845 2845
2846 2846 def __get_noise2(self,POWER, fft_avg, TALK=0):
2847 2847 '''
2848 2848 Rutina para cΓ‘lculo de ruido por alturas(n1). Similar a IDL
2849 2849 '''
2850 2850 SPECT_PTS = len(POWER)
2851 2851 fft_avg = fft_avg*1.0
2852 2852 NOMIT = 0
2853 2853 NN = SPECT_PTS - NOMIT
2854 2854 N = NN//2
2855 2855 ARR = numpy.concatenate((POWER[0:N+1],POWER[N+NOMIT+1:SPECT_PTS]))
2856 2856 ARR = numpy.sort(ARR)
2857 2857 NUMS_MIN = (SPECT_PTS+7)//8
2858 2858 RTEST = (1.0+1.0/fft_avg)
2859 2859 SUM = 0.0
2860 2860 SUMSQ = 0.0
2861 2861 J = 0
2862 2862 for I in range(NN):
2863 2863 J = J + 1
2864 2864 SUM = SUM + ARR[I]
2865 2865 SUMSQ = SUMSQ + ARR[I]*ARR[I]
2866 2866 AVE = SUM*1.0/J
2867 2867 if J > NUMS_MIN:
2868 2868 if (SUMSQ*J <= RTEST*SUM*SUM): RNOISE = AVE
2869 2869 else:
2870 2870 if J == NUMS_MIN: RNOISE = AVE
2871 2871 if TALK == 1: print('Noise Power (2):%4.4f' %RNOISE)
2872 2872 stdv = numpy.sqrt(SUMSQ/J - numpy.power(SUM/J,2))
2873 2873 return RNOISE, stdv
2874 2874
2875 2875 def __get_noise1(self, power, fft_avg, TALK=0):
2876 2876 '''
2877 2877 Rutina para cΓ‘lculo de ruido por alturas(n0). Similar a IDL
2878 2878 '''
2879 2879 num_pts = numpy.size(power)
2880 2880 #print('num_pts',num_pts)
2881 2881 #print('power',power.shape)
2882 2882 #print(power[256:267,0:2])
2883 2883 fft_avg = fft_avg*1.0
2884 2884
2885 2885 ind = numpy.argsort(power, axis=None, kind='stable')
2886 2886 #ind = numpy.argsort(numpy.reshape(power,-1))
2887 2887 #print(ind.shape)
2888 2888 #print(ind[0:11])
2889 2889 #print(numpy.reshape(power,-1)[ind[0:11]])
2890 2890 ARR = numpy.reshape(power,-1)[ind]
2891 2891 #print('ARR',len(ARR))
2892 2892 #print('ARR',ARR.shape)
2893 2893 NUMS_MIN = num_pts//10
2894 2894 RTEST = (1.0+1.0/fft_avg)
2895 2895 SUM = 0.0
2896 2896 SUMSQ = 0.0
2897 2897 J = 0
2898 2898 cont = 1
2899 2899 while cont == 1 and J < num_pts:
2900 2900
2901 2901 SUM = SUM + ARR[J]
2902 2902 SUMSQ = SUMSQ + ARR[J]*ARR[J]
2903 2903 J = J + 1
2904 2904
2905 2905 if J > NUMS_MIN:
2906 2906 if (SUMSQ*J <= RTEST*SUM*SUM):
2907 2907 LNOISE = SUM*1.0/J
2908 2908 else:
2909 2909 J = J - 1
2910 2910 SUM = SUM - ARR[J]
2911 2911 SUMSQ = SUMSQ - ARR[J]*ARR[J]
2912 2912 cont = 0
2913 2913 else:
2914 2914 if J == NUMS_MIN: LNOISE = SUM*1.0/J
2915 2915 if TALK == 1: print('Noise Power (1):%8.8f' %LNOISE)
2916 2916 stdv = numpy.sqrt(SUMSQ/J - numpy.power(SUM/J,2))
2917 2917 return LNOISE, stdv
2918 2918
2919 2919 def __NoiseByChannel(self, num_prof, num_incoh, spectra,talk=0):
2920 2920
2921 2921 val_frq = numpy.arange(num_prof-2)+1
2922 2922 val_frq[(num_prof-2)//2:] = val_frq[(num_prof-2)//2:] + 1
2923 2923 junkspc = numpy.sum(spectra[val_frq,:], axis=1)
2924 2924 junkid = numpy.argsort(junkspc)
2925 2925 noisezone = val_frq[junkid[0:num_prof//2]]
2926 2926 specnoise = spectra[noisezone,:]
2927 2927 noise, stdvnoise = self.__get_noise1(specnoise,num_incoh)
2928 2928
2929 2929 if talk:
2930 2930 print('noise =', noise)
2931 2931 return noise
2932 2932 #------------------ Get SA Parameters --------------------------
2933 2933
2934 2934 def GetSAParameters(self):
2935 2935 #SA en frecuencia
2936 2936 pairslist = self.dataOut.groupList
2937 2937 num_pairs = len(pairslist)
2938 2938
2939 2939 vel = self.dataOut.abscissaList
2940 2940 spectra = self.dataOut.data_pre
2941 2941 cspectra = self.dataIn.data_cspc
2942 2942 delta_v = vel[1] - vel[0]
2943 2943
2944 2944 #Calculating the power spectrum
2945 2945 spc_pow = numpy.sum(spectra, 3)*delta_v
2946 2946 #Normalizing Spectra
2947 2947 norm_spectra = spectra/spc_pow
2948 2948 #Calculating the norm_spectra at peak
2949 2949 max_spectra = numpy.max(norm_spectra, 3)
2950 2950
2951 2951 #Normalizing Cross Spectra
2952 2952 norm_cspectra = numpy.zeros(cspectra.shape)
2953 2953
2954 2954 for i in range(num_chan):
2955 2955 norm_cspectra[i,:,:] = cspectra[i,:,:]/numpy.sqrt(spc_pow[pairslist[i][0],:]*spc_pow[pairslist[i][1],:])
2956 2956
2957 2957 max_cspectra = numpy.max(norm_cspectra,2)
2958 2958 max_cspectra_index = numpy.argmax(norm_cspectra, 2)
2959 2959
2960 2960 for i in range(num_pairs):
2961 2961 cspc_par[i,:,:] = __calculateMoments(norm_cspectra)
2962 2962 #------------------- Get Lags ----------------------------------
2963 2963
2964 2964 class JULIADriftsEstimation(Operation):
2965 2965
2966 2966 def __init__(self):
2967 2967 Operation.__init__(self)
2968 2968
2969 2969 def newtotal(self, data):
2970 2970 return numpy.nansum(data)
2971 2971
2972 2972 def data_filter(self, parm, snrth=-20, swth=20, wErrth=500):
2973 2973
2974 2974 Sz0 = parm.shape # Sz0: h,p
2975 2975 drift = parm[:,0]
2976 2976 sw = 2*parm[:,1]
2977 2977 snr = 10*numpy.log10(parm[:,2])
2978 2978 Sz = drift.shape # Sz: h
2979 2979 mask = numpy.ones((Sz[0]))
2980 2980 th=0
2981 2981 valid=numpy.where(numpy.isfinite(snr))
2982 2982 cvalid = len(valid[0])
2983 2983 if cvalid >= 1:
2984 2984 # CΓ‘lculo del ruido promedio de snr para el i-Γ©simo grupo de alturas
2985 2985 nbins = int(numpy.max(snr)-numpy.min(snr))+1 # bin size = 1, similar to IDL
2986 2986 h = numpy.histogram(snr,bins=nbins)
2987 2987 hist = h[0]
2988 2988 values = numpy.round_(h[1])
2989 2989 moda = values[numpy.where(hist == numpy.max(hist))]
2990 2990 indNoise = numpy.where(numpy.abs(snr - numpy.min(moda)) < 3)[0]
2991 2991
2992 2992 noise = snr[indNoise]
2993 2993 noise_mean = numpy.sum(noise)/len(noise)
2994 2994 # CΓ‘lculo de media de snr
2995 2995 med = numpy.median(snr)
2996 2996 # Establece el umbral de snr
2997 2997 if noise_mean > med + 3:
2998 2998 th = med
2999 2999 else:
3000 3000 th = noise_mean + 3
3001 3001 # Establece mΓ‘scara
3002 3002 novalid = numpy.where(snr <= th)[0]
3003 3003 mask[novalid] = numpy.nan
3004 3004 # Elimina datos que no sobrepasen el umbral: PARAMETRO
3005 3005 novalid = numpy.where(snr <= snrth)
3006 3006 cnovalid = len(novalid[0])
3007 3007 if cnovalid > 0:
3008 3008 mask[novalid] = numpy.nan
3009 3009 novalid = numpy.where(numpy.isnan(snr))
3010 3010 cnovalid = len(novalid[0])
3011 3011 if cnovalid > 0:
3012 3012 mask[novalid] = numpy.nan
3013 3013 new_parm = numpy.zeros((Sz0[0],Sz0[1]))
3014 3014 for h in range(Sz0[0]):
3015 3015 for p in range(Sz0[1]):
3016 3016 if numpy.isnan(mask[h]):
3017 3017 new_parm[h,p]=numpy.nan
3018 3018 else:
3019 3019 new_parm[h,p]=parm[h,p]
3020 3020
3021 3021 return new_parm, th
3022 3022
3023 3023 def run(self, dataOut, zenith, zenithCorrection,heights=None, statistics=0, otype=0):
3024 3024
3025 3025 dataOut.lat=-11.95
3026 3026 dataOut.lon=-76.87
3027 3027 nCh=dataOut.spcpar.shape[0]
3028 3028 nHei=dataOut.spcpar.shape[1]
3029 3029 nParam=dataOut.spcpar.shape[2]
3030 3030 # SelecciΓ³n de alturas
3031 3031
3032 3032 if not heights:
3033 3033 parm = numpy.zeros((nCh,nHei,nParam))
3034 3034 parm[:] = dataOut.spcpar[:]
3035 3035 else:
3036 3036 hei=dataOut.heightList
3037 3037 hvalid=numpy.where([hei >= heights[0]][0] & [hei <= heights[1]][0])[0]
3038 3038 nhvalid=len(hvalid)
3039 3039 dataOut.heightList = hei[hvalid]
3040 3040 parm = numpy.zeros((nCh,nhvalid,nParam))
3041 3041 parm[:] = dataOut.spcpar[:,hvalid,:]
3042 3042
3043 3043
3044 3044 # Primer filtrado: Umbral de SNR
3045 3045 for i in range(nCh):
3046 3046 parm[i,:,:] = self.data_filter(parm[i,:,:])[0]
3047 3047
3048 3048 zenith = numpy.array(zenith)
3049 3049 zenith -= zenithCorrection
3050 3050 zenith *= numpy.pi/180
3051 3051 alpha = zenith[0]
3052 3052 beta = zenith[1]
3053 3053 dopplerCH0 = parm[0,:,0]
3054 3054 dopplerCH1 = parm[1,:,0]
3055 3055 swCH0 = parm[0,:,1]
3056 3056 swCH1 = parm[1,:,1]
3057 3057 snrCH0 = 10*numpy.log10(parm[0,:,2])
3058 3058 snrCH1 = 10*numpy.log10(parm[1,:,2])
3059 3059 noiseCH0 = parm[0,:,3]
3060 3060 noiseCH1 = parm[1,:,3]
3061 3061 wErrCH0 = parm[0,:,5]
3062 3062 wErrCH1 = parm[1,:,5]
3063 3063
3064 3064 # Vertical and zonal calculation according to geometry
3065 3065 sinB_A = numpy.sin(beta)*numpy.cos(alpha) - numpy.sin(alpha)* numpy.cos(beta)
3066 3066 drift = -(dopplerCH0 * numpy.sin(beta) - dopplerCH1 * numpy.sin(alpha))/ sinB_A
3067 3067 zonal = (dopplerCH0 * numpy.cos(beta) - dopplerCH1 * numpy.cos(alpha))/ sinB_A
3068 3068 snr = (snrCH0 + snrCH1)/2
3069 3069 noise = (noiseCH0 + noiseCH1)/2
3070 3070 sw = (swCH0 + swCH1)/2
3071 3071 w_w_err= numpy.sqrt(numpy.power(wErrCH0 * numpy.sin(beta)/numpy.abs(sinB_A),2) + numpy.power(wErrCH1 * numpy.sin(alpha)/numpy.abs(sinB_A),2))
3072 3072 w_e_err= numpy.sqrt(numpy.power(wErrCH0 * numpy.cos(beta)/numpy.abs(-1*sinB_A),2) + numpy.power(wErrCH1 * numpy.cos(alpha)/numpy.abs(-1*sinB_A),2))
3073 3073
3074 3074 # for statistics150km
3075 3075 if statistics:
3076 3076 print('Implemented offline.')
3077 3077
3078 3078 if otype == 0:
3079 3079 winds = numpy.vstack((snr, drift, zonal, noise, sw, w_w_err, w_e_err)) # to process statistics drifts
3080 3080 elif otype == 3:
3081 3081 winds = numpy.vstack((snr, drift, zonal)) # to generic plot: 3 RTI's
3082 3082 elif otype == 4:
3083 3083 winds = numpy.vstack((snrCH0, drift, snrCH1, zonal)) # to generic plot: 4 RTI's
3084 3084
3085 3085 snr1 = numpy.vstack((snrCH0, snrCH1))
3086 3086 dataOut.data_output = winds
3087 3087 dataOut.data_snr = snr1
3088 3088
3089 3089 dataOut.utctimeInit = dataOut.utctime
3090 3090 dataOut.outputInterval = dataOut.timeInterval
3091 3091 try:
3092 3092 dataOut.flagNoData = numpy.all(numpy.isnan(dataOut.data_output[0])) # NAN vectors are not written MADRIGAL CASE
3093 3093 except:
3094 3094 print("Check there is no Data")
3095 3095
3096 3096 return dataOut
3097 3097
3098 3098 class SALags(Operation):
3099 3099 '''
3100 3100 Function GetMoments()
3101 3101
3102 3102 Input:
3103 3103 self.dataOut.data_pre
3104 3104 self.dataOut.abscissaList
3105 3105 self.dataOut.noise
3106 3106 self.dataOut.normFactor
3107 3107 self.dataOut.data_snr
3108 3108 self.dataOut.groupList
3109 3109 self.dataOut.nChannels
3110 3110
3111 3111 Affected:
3112 3112 self.dataOut.data_param
3113 3113
3114 3114 '''
3115 3115 def run(self, dataOut):
3116 3116 data_acf = dataOut.data_pre[0]
3117 3117 data_ccf = dataOut.data_pre[1]
3118 3118 normFactor_acf = dataOut.normFactor[0]
3119 3119 normFactor_ccf = dataOut.normFactor[1]
3120 3120 pairs_acf = dataOut.groupList[0]
3121 3121 pairs_ccf = dataOut.groupList[1]
3122 3122
3123 3123 nHeights = dataOut.nHeights
3124 3124 absc = dataOut.abscissaList
3125 3125 noise = dataOut.noise
3126 3126 SNR = dataOut.data_snr
3127 3127 nChannels = dataOut.nChannels
3128 3128 for l in range(len(pairs_acf)):
3129 3129 data_acf[l,:,:] = data_acf[l,:,:]/normFactor_acf[l,:]
3130 3130
3131 3131 for l in range(len(pairs_ccf)):
3132 3132 data_ccf[l,:,:] = data_ccf[l,:,:]/normFactor_ccf[l,:]
3133 3133
3134 3134 dataOut.data_param = numpy.zeros((len(pairs_ccf)*2 + 1, nHeights))
3135 3135 dataOut.data_param[:-1,:] = self.__calculateTaus(data_acf, data_ccf, absc)
3136 3136 dataOut.data_param[-1,:] = self.__calculateLag1Phase(data_acf, absc)
3137 3137 return
3138 3138
3139 3139 def __calculateTaus(self, data_acf, data_ccf, lagRange):
3140 3140
3141 3141 lag0 = data_acf.shape[1]/2
3142 3142 #Funcion de Autocorrelacion
3143 3143 mean_acf = stats.nanmean(data_acf, axis = 0)
3144 3144
3145 3145 #Obtencion Indice de TauCross
3146 3146 ind_ccf = data_ccf.argmax(axis = 1)
3147 3147 #Obtencion Indice de TauAuto
3148 3148 ind_acf = numpy.zeros(ind_ccf.shape,dtype = 'int')
3149 3149 ccf_lag0 = data_ccf[:,lag0,:]
3150 3150
3151 3151 for i in range(ccf_lag0.shape[0]):
3152 3152 ind_acf[i,:] = numpy.abs(mean_acf - ccf_lag0[i,:]).argmin(axis = 0)
3153 3153
3154 3154 #Obtencion de TauCross y TauAuto
3155 3155 tau_ccf = lagRange[ind_ccf]
3156 3156 tau_acf = lagRange[ind_acf]
3157 3157
3158 3158 Nan1, Nan2 = numpy.where(tau_ccf == lagRange[0])
3159 3159
3160 3160 tau_ccf[Nan1,Nan2] = numpy.nan
3161 3161 tau_acf[Nan1,Nan2] = numpy.nan
3162 3162 tau = numpy.vstack((tau_ccf,tau_acf))
3163 3163
3164 3164 return tau
3165 3165
3166 3166 def __calculateLag1Phase(self, data, lagTRange):
3167 3167 data1 = stats.nanmean(data, axis = 0)
3168 3168 lag1 = numpy.where(lagTRange == 0)[0][0] + 1
3169 3169
3170 3170 phase = numpy.angle(data1[lag1,:])
3171 3171
3172 3172 return phase
3173 3173
3174 3174 def fit_func( x, a0, a1, a2): #, a3, a4, a5):
3175 3175 z = (x - a1) / a2
3176 3176 y = a0 * numpy.exp(-z**2 / a2) #+ a3 + a4 * x + a5 * x**2
3177 3177 return y
3178 3178
3179
3180 3179 class SpectralFitting(Operation):
3181 3180 '''
3182 3181 Function GetMoments()
3183 3182
3184 3183 Input:
3185 3184 Output:
3186 3185 Variables modified:
3187 3186 '''
3188 3187 isConfig = False
3189 3188 __dataReady = False
3190 3189 bloques = None
3191 3190 bloque0 = None
3192 index = 0
3193 fint = 0
3194 buffer = 0
3195 buffer2 = 0
3196 buffer3 = 0
3197 3191
3198 3192 def __init__(self):
3199 3193 Operation.__init__(self)
3200 3194 self.i=0
3201 3195 self.isConfig = False
3202 3196
3203
3204 def setup(self,dataOut,groupList,path,file,filec):
3197 def setup(self,nChan,nProf,nHei,nBlocks):
3205 3198 self.__dataReady = False
3206 # new
3207 self.nChannels = dataOut.nChannels
3208 self.channels = dataOut.channelList
3209 self.nHeights = dataOut.heightList.size
3210 self.heights = dataOut.heightList
3211 self.nProf = dataOut.nProfiles
3212 self.nIncohInt = dataOut.nIncohInt
3213 self.absc = dataOut.abscissaList[:-1]
3214
3215
3216 #To be inserted as a parameter
3217 try:
3218 self.groupArray = numpy.array(groupList)#groupArray = numpy.array([[0,1],[2,3]])
3219 except:
3220 print("Please insert groupList. Example (0,1),(2,3) format multilist")
3221 dataOut.groupList = self.groupArray
3222 self.crosspairs = dataOut.groupList
3223 self.nPairs = len(self.crosspairs)
3224 self.nGroups = self.groupArray.shape[0]
3225
3226 #List of possible combinations
3227
3228 self.listComb = itertools.combinations(numpy.arange(self.groupArray.shape[1]),2)
3229 self.indCross = numpy.zeros(len(list(self.listComb)), dtype = 'int')
3230
3231 #Parameters Array
3232 dataOut.data_param = None
3233 dataOut.data_paramC = None
3234 dataOut.clean_num_aver = None
3235 dataOut.coh_num_aver = None
3236 dataOut.tmp_spectra_i = None
3237 dataOut.tmp_cspectra_i = None
3238 dataOut.tmp_spectra_c = None
3239 dataOut.tmp_cspectra_c = None
3240 dataOut.index = None
3241
3242 if path != None:
3243 sys.path.append(path)
3244 self.library = importlib.import_module(file)
3245 if filec != None:
3246 self.weightf = importlib.import_module(filec)
3247
3248 #Set constants
3249 self.constants = self.library.setConstants(dataOut)
3250 dataOut.constants = self.constants
3251 self.M = dataOut.normFactor
3252 self.N = dataOut.nFFTPoints
3253 self.ippSeconds = dataOut.ippSeconds
3254 self.K = dataOut.nIncohInt
3255 self.pairsArray = numpy.array(dataOut.pairsList)
3256 self.snrth = 20
3257
3199 self.bloques = numpy.zeros([2, nProf, nHei,nBlocks], dtype= complex)
3200 self.bloque0 = numpy.zeros([nChan, nProf, nHei, nBlocks])
3258 3201 def __calculateMoments(self,oldspec, oldfreq, n0, nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None):
3259 3202
3260 3203 if (nicoh is None): nicoh = 1
3261 3204 if (graph is None): graph = 0
3262 3205 if (smooth is None): smooth = 0
3263 3206 elif (self.smooth < 3): smooth = 0
3264 3207
3265 3208 if (type1 is None): type1 = 0
3266 3209 if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1
3267 3210 if (snrth is None): snrth = -3
3268 3211 if (dc is None): dc = 0
3269 3212 if (aliasing is None): aliasing = 0
3270 3213 if (oldfd is None): oldfd = 0
3271 3214 if (wwauto is None): wwauto = 0
3272 3215
3273 3216 if (n0 < 1.e-20): n0 = 1.e-20
3274 3217
3275 3218 freq = oldfreq
3276 3219 vec_power = numpy.zeros(oldspec.shape[1])
3277 3220 vec_fd = numpy.zeros(oldspec.shape[1])
3278 3221 vec_w = numpy.zeros(oldspec.shape[1])
3279 3222 vec_snr = numpy.zeros(oldspec.shape[1])
3280 3223
3281 3224 oldspec = numpy.ma.masked_invalid(oldspec)
3282 3225
3283 3226 for ind in range(oldspec.shape[1]):
3284 3227
3285 3228 spec = oldspec[:,ind]
3286 3229 aux = spec*fwindow
3287 3230 max_spec = aux.max()
3288 3231 m = list(aux).index(max_spec)
3289 3232
3290 3233 #Smooth
3291 3234 if (smooth == 0): spec2 = spec
3292 3235 else: spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth)
3293 3236
3294 3237 # Calculo de Momentos
3295 3238 bb = spec2[list(range(m,spec2.size))]
3296 3239 bb = (bb<n0).nonzero()
3297 3240 bb = bb[0]
3298 3241
3299 3242 ss = spec2[list(range(0,m + 1))]
3300 3243 ss = (ss<n0).nonzero()
3301 3244 ss = ss[0]
3302 3245
3303 3246 if (bb.size == 0):
3304 3247 bb0 = spec.size - 1 - m
3305 3248 else:
3306 3249 bb0 = bb[0] - 1
3307 3250 if (bb0 < 0):
3308 3251 bb0 = 0
3309 3252
3310 3253 if (ss.size == 0): ss1 = 1
3311 3254 else: ss1 = max(ss) + 1
3312 3255
3313 3256 if (ss1 > m): ss1 = m
3314 3257
3315 3258 valid = numpy.asarray(list(range(int(m + bb0 - ss1 + 1)))) + ss1
3316 3259 power = ((spec2[valid] - n0)*fwindow[valid]).sum()
3317 3260 fd = ((spec2[valid]- n0)*freq[valid]*fwindow[valid]).sum()/power
3318 3261 w = math.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum()/power)
3319 3262 snr = (spec2.mean()-n0)/n0
3320 3263
3321 if (snr < 1.e-20) :
3322 snr = 1.e-20
3264 # if (snr < 1.e-20) :
3265 # snr = 1.e-20
3323 3266
3324 3267 vec_power[ind] = power
3325 3268 vec_fd[ind] = fd
3326 3269 vec_w[ind] = w
3327 3270 vec_snr[ind] = snr
3328
3271
3329 3272 moments = numpy.vstack((vec_snr, vec_power, vec_fd, vec_w))
3330 3273 return moments
3331 3274
3275 #def __DiffCoherent(self,snrth, spectra, cspectra, nProf, heights,nChan, nHei, nPairs, channels, noise, crosspairs):
3332 3276 def __DiffCoherent(self, spectra, cspectra, dataOut, noise, snrth, coh_th, hei_th):
3333
3277
3278 #import matplotlib.pyplot as plt
3334 3279 nProf = dataOut.nProfiles
3335 3280 heights = dataOut.heightList
3336 3281 nHei = len(heights)
3337 3282 channels = dataOut.channelList
3338 3283 nChan = len(channels)
3339 3284 crosspairs = dataOut.groupList
3340 3285 nPairs = len(crosspairs)
3341 3286 #Separar espectros incoherentes de coherentes snr > 20 dB'
3342 3287 snr_th = 10**(snrth/10.0)
3343 3288 my_incoh_spectra = numpy.zeros([nChan, nProf,nHei], dtype='float')
3344 3289 my_incoh_cspectra = numpy.zeros([nPairs,nProf, nHei], dtype='complex')
3345 3290 my_incoh_aver = numpy.zeros([nChan, nHei])
3346 3291 my_coh_aver = numpy.zeros([nChan, nHei])
3347 3292
3348 3293 coh_spectra = numpy.zeros([nChan, nProf, nHei], dtype='float')
3349 3294 coh_cspectra = numpy.zeros([nPairs, nProf, nHei], dtype='complex')
3350 3295 coh_aver = numpy.zeros([nChan, nHei])
3351 3296
3352 3297 incoh_spectra = numpy.zeros([nChan, nProf, nHei], dtype='float')
3353 3298 incoh_cspectra = numpy.zeros([nPairs, nProf, nHei], dtype='complex')
3354 3299 incoh_aver = numpy.zeros([nChan, nHei])
3355 3300 power = numpy.sum(spectra, axis=1)
3356 3301
3357 3302 if coh_th == None : coh_th = numpy.array([0.75,0.65,0.15]) # 0.65
3358 3303 if hei_th == None : hei_th = numpy.array([60,300,650])
3359 3304 for ic in range(nPairs):
3360 3305 pair = crosspairs[ic]
3361 3306 #si el SNR es mayor que el SNR threshold los datos se toman coherentes
3362 3307 s_n0 = power[pair[0],:]/noise[pair[0]]
3363 3308 s_n1 = power[pair[1],:]/noise[pair[1]]
3364 3309 valid1 =(s_n0>=snr_th).nonzero()
3365 3310 valid2 = (s_n1>=snr_th).nonzero()
3311
3366 3312 valid1 = numpy.array(valid1[0])
3367 3313 valid2 = numpy.array(valid2[0])
3368 3314 valid = valid1
3369 3315 for iv in range(len(valid2)):
3316
3370 3317 indv = numpy.array((valid1 == valid2[iv]).nonzero())
3371 3318 if len(indv[0]) == 0 :
3372 3319 valid = numpy.concatenate((valid,valid2[iv]), axis=None)
3373 3320 if len(valid)>0:
3374 3321 my_coh_aver[pair[0],valid]=1
3375 3322 my_coh_aver[pair[1],valid]=1
3376 3323 # si la coherencia es mayor a la coherencia threshold los datos se toman
3324
3377 3325 coh = numpy.squeeze(numpy.nansum(cspectra[ic,:,:], axis=0)/numpy.sqrt(numpy.nansum(spectra[pair[0],:,:], axis=0)*numpy.nansum(spectra[pair[1],:,:], axis=0)))
3326
3378 3327 for ih in range(len(hei_th)):
3379 3328 hvalid = (heights>hei_th[ih]).nonzero()
3380 3329 hvalid = hvalid[0]
3381 3330 if len(hvalid)>0:
3382 3331 valid = (numpy.absolute(coh[hvalid])>coh_th[ih]).nonzero()
3383 3332 valid = valid[0]
3333
3384 3334 if len(valid)>0:
3385 3335 my_coh_aver[pair[0],hvalid[valid]] =1
3386 3336 my_coh_aver[pair[1],hvalid[valid]] =1
3387
3337
3388 3338 coh_echoes = (my_coh_aver[pair[0],:] == 1).nonzero()
3389 3339 incoh_echoes = (my_coh_aver[pair[0],:] != 1).nonzero()
3390 3340 incoh_echoes = incoh_echoes[0]
3391 3341 if len(incoh_echoes) > 0:
3392 3342 my_incoh_spectra[pair[0],:,incoh_echoes] = spectra[pair[0],:,incoh_echoes]
3393 3343 my_incoh_spectra[pair[1],:,incoh_echoes] = spectra[pair[1],:,incoh_echoes]
3394 3344 my_incoh_cspectra[ic,:,incoh_echoes] = cspectra[ic,:,incoh_echoes]
3395 3345 my_incoh_aver[pair[0],incoh_echoes] = 1
3396 3346 my_incoh_aver[pair[1],incoh_echoes] = 1
3397 3347
3398 3348
3399 3349 for ic in range(nPairs):
3400 3350 pair = crosspairs[ic]
3401 3351
3402 3352 valid1 =(my_coh_aver[pair[0],:]==1 ).nonzero()
3403 3353 valid2 = (my_coh_aver[pair[1],:]==1).nonzero()
3404 3354 valid1 = numpy.array(valid1[0])
3405 3355 valid2 = numpy.array(valid2[0])
3406 3356 valid = valid1
3407
3357
3408 3358 for iv in range(len(valid2)):
3409
3359
3410 3360 indv = numpy.array((valid1 == valid2[iv]).nonzero())
3411 3361 if len(indv[0]) == 0 :
3412 3362 valid = numpy.concatenate((valid,valid2[iv]), axis=None)
3413 3363 valid1 =(my_coh_aver[pair[0],:] !=1 ).nonzero()
3414 3364 valid2 = (my_coh_aver[pair[1],:] !=1).nonzero()
3415 3365 valid1 = numpy.array(valid1[0])
3416 3366 valid2 = numpy.array(valid2[0])
3417 3367 incoh_echoes = valid1
3368
3418 3369 for iv in range(len(valid2)):
3419
3370
3420 3371 indv = numpy.array((valid1 == valid2[iv]).nonzero())
3421 3372 if len(indv[0]) == 0 :
3422 3373 incoh_echoes = numpy.concatenate(( incoh_echoes,valid2[iv]), axis=None)
3423 3374
3424 3375 if len(valid)>0:
3425 3376 coh_spectra[pair[0],:,valid] = spectra[pair[0],:,valid]
3426 3377 coh_spectra[pair[1],:,valid] = spectra[pair[1],:,valid]
3427 3378 coh_cspectra[ic,:,valid] = cspectra[ic,:,valid]
3428 3379 coh_aver[pair[0],valid]=1
3429 3380 coh_aver[pair[1],valid]=1
3430 3381 if len(incoh_echoes)>0:
3431 3382 incoh_spectra[pair[0],:,incoh_echoes] = spectra[pair[0],:,incoh_echoes]
3432 3383 incoh_spectra[pair[1],:,incoh_echoes] = spectra[pair[1],:,incoh_echoes]
3433 3384 incoh_cspectra[ic,:,incoh_echoes] = cspectra[ic,:,incoh_echoes]
3434 3385 incoh_aver[pair[0],incoh_echoes]=1
3435 3386 incoh_aver[pair[1],incoh_echoes]=1
3387
3436 3388 return my_incoh_spectra ,my_incoh_cspectra,my_incoh_aver,my_coh_aver, incoh_spectra, coh_spectra, incoh_cspectra, coh_cspectra, incoh_aver, coh_aver
3437
3438
3389
3439 3390 def __CleanCoherent(self,snrth, spectra, cspectra, coh_aver,dataOut, noise,clean_coh_echoes,index):
3440 3391
3392 #import matplotlib.pyplot as plt
3441 3393 nProf = dataOut.nProfiles
3442 3394 heights = dataOut.heightList
3443 3395 nHei = len(heights)
3444 3396 channels = dataOut.channelList
3445 3397 nChan = len(channels)
3446 3398 crosspairs = dataOut.groupList
3447 3399 nPairs = len(crosspairs)
3448 3400
3449 3401 absc = dataOut.abscissaList[:-1]
3450 3402 data_param = numpy.zeros((nChan, 4, spectra.shape[2]))
3403
3451 3404 clean_coh_spectra = spectra.copy()
3452 3405 clean_coh_cspectra = cspectra.copy()
3453 3406 clean_coh_aver = coh_aver.copy()
3454 3407
3455 3408 spwd_th=[10,6] #spwd_th[0] --> For satellites ; spwd_th[1] --> For special events like SUN.
3456 3409 coh_th = 0.75
3457 3410
3458 3411 rtime0 = [6,18] # periodo sin ESF
3459 3412 rtime1 = [10.5,13.5] # periodo con alta coherencia y alto ancho espectral (esperado): SOL.
3460 3413
3461 3414 time = index*5./60 # en base a 5 min de proceso
3462 3415 if clean_coh_echoes == 1 :
3463 3416 for ind in range(nChan):
3464 3417 data_param[ind,:,:] = self.__calculateMoments( spectra[ind,:,:] , absc , noise[ind] )
3418
3465 3419 spwd = data_param[:,3]
3420
3466 3421 # SPECB_JULIA,header=anal_header,jspectra=spectra,vel=velocities,hei=heights, num_aver=1, mode_fit=0,smoothing=smoothing,jvelr=velr,jspwd=spwd,jsnr=snr,jnoise=noise,jstdvnoise=stdvnoise
3467 3422 # para obtener spwd
3468 3423 for ic in range(nPairs):
3469 3424 pair = crosspairs[ic]
3470 3425 coh = numpy.squeeze(numpy.sum(cspectra[ic,:,:], axis=1)/numpy.sqrt(numpy.sum(spectra[pair[0],:,:], axis=1)*numpy.sum(spectra[pair[1],:,:], axis=1)))
3471 3426 for ih in range(nHei) :
3472 3427 # Considering heights higher than 200km in order to avoid removing phenomena like EEJ.
3473 3428 if heights[ih] >= 200 and coh_aver[pair[0],ih] == 1 and coh_aver[pair[1],ih] == 1 :
3474 3429 # Checking coherence
3475 3430 if (numpy.abs(coh[ih]) <= coh_th) or (time >= rtime0[0] and time <= rtime0[1]) :
3476 3431 # Checking spectral widths
3477 3432 if (spwd[pair[0],ih] > spwd_th[0]) or (spwd[pair[1],ih] > spwd_th[0]) :
3478 3433 # satelite
3479 3434 clean_coh_spectra[pair,ih,:] = 0.0
3480 3435 clean_coh_cspectra[ic,ih,:] = 0.0
3481 3436 clean_coh_aver[pair,ih] = 0
3482 3437 else :
3483 3438 if ((spwd[pair[0],ih] < spwd_th[1]) or (spwd[pair[1],ih] < spwd_th[1])) :
3484 3439 # Especial event like sun.
3485 3440 clean_coh_spectra[pair,ih,:] = 0.0
3486 3441 clean_coh_cspectra[ic,ih,:] = 0.0
3487 3442 clean_coh_aver[pair,ih] = 0
3488 3443
3489 3444 return clean_coh_spectra, clean_coh_cspectra, clean_coh_aver
3490
3445
3491 3446 def CleanRayleigh(self,dataOut,spectra,cspectra,save_drifts):
3492
3447
3493 3448 rfunc = cspectra.copy()
3494 3449 n_funct = len(rfunc[0,:,0,0])
3495 3450 val_spc = spectra*0.0
3496 3451 val_cspc = cspectra*0.0
3497 3452 in_sat_spectra = spectra.copy()
3498 3453 in_sat_cspectra = cspectra.copy()
3499 3454
3500 3455 min_hei = 200
3501 3456 nProf = dataOut.nProfiles
3502 3457 heights = dataOut.heightList
3503 3458 nHei = len(heights)
3504 3459 channels = dataOut.channelList
3505 3460 nChan = len(channels)
3506 3461 crosspairs = dataOut.groupList
3507 3462 nPairs = len(crosspairs)
3508 3463 hval=(heights >= min_hei).nonzero()
3509 3464 ih=hval[0]
3465
3466
3510 3467 for ih in range(hval[0][0],nHei):
3511 3468 for ifreq in range(nProf):
3512 3469 for ii in range(n_funct):
3513 3470
3514 3471 func2clean = 10*numpy.log10(numpy.absolute(rfunc[:,ii,ifreq,ih]))
3472
3515 3473 val = (numpy.isfinite(func2clean)==True).nonzero()
3516 3474 if len(val)>0:
3517 3475 min_val = numpy.around(numpy.amin(func2clean)-2) #> (-40)
3518 3476 if min_val <= -40 : min_val = -40
3519 3477 max_val = numpy.around(numpy.amax(func2clean)+2) #< 200
3520 3478 if max_val >= 200 : max_val = 200
3479
3521 3480 step = 1
3522 #Getting bins and the histogram
3481 #Getting bins and the histogram
3523 3482 x_dist = min_val + numpy.arange(1 + ((max_val-(min_val))/step))*step
3524 3483 y_dist,binstep = numpy.histogram(func2clean,bins=range(int(min_val),int(max_val+2),step))
3525 3484 mean = numpy.sum(x_dist * y_dist) / numpy.sum(y_dist)
3526 3485 sigma = numpy.sqrt(numpy.sum(y_dist * (x_dist - mean)**2) / numpy.sum(y_dist))
3527 3486 parg = [numpy.amax(y_dist),mean,sigma]
3528 3487 try :
3529 3488 gauss_fit, covariance = curve_fit(fit_func, x_dist, y_dist,p0=parg)
3530 3489 mode = gauss_fit[1]
3531 3490 stdv = gauss_fit[2]
3532 3491 except:
3533 3492 mode = mean
3534 3493 stdv = sigma
3535
3494
3536 3495 #Removing echoes greater than mode + 3*stdv
3537 3496 factor_stdv = 2.5
3538 3497 noval = (abs(func2clean - mode)>=(factor_stdv*stdv)).nonzero()
3539 3498
3540 3499 if len(noval[0]) > 0:
3541 3500 novall = ((func2clean - mode) >= (factor_stdv*stdv)).nonzero()
3542 3501 cross_pairs = crosspairs[ii]
3543 3502 #Getting coherent echoes which are removed.
3544 if len(novall[0]) > 0:
3503 if len(novall[0]) > 0:
3545 3504 val_spc[novall[0],cross_pairs[0],ifreq,ih] = 1
3546 3505 val_spc[novall[0],cross_pairs[1],ifreq,ih] = 1
3547 val_cspc[novall[0],ii,ifreq,ih] = 1
3506 val_cspc[novall[0],ii,ifreq,ih] = 1
3548 3507 #Removing coherent from ISR data
3549 3508 spectra[noval,cross_pairs[0],ifreq,ih] = numpy.nan
3550 3509 spectra[noval,cross_pairs[1],ifreq,ih] = numpy.nan
3551 3510 cspectra[noval,ii,ifreq,ih] = numpy.nan
3552
3511 #
3512 #no sale es para savedrifts >2
3513 ''' channels = dataOut.channelList
3514 cross_pairs = dataOut.groupList
3515
3516 vcross0 = (cross_pairs[0] == channels[ii]).nonzero()
3517 vcross1 = (cross_pairs[1] == channels[ii]).nonzero()
3518 vcross = numpy.concatenate((vcross0,vcross1),axis=None)
3519
3520 #Getting coherent echoes which are removed.
3521 if len(novall) > 0:
3522 #val_spc[novall,ii,ifreq,ih] = 1
3523 val_spc[ii,ifreq,ih,novall] = 1
3524 if len(vcross) > 0:
3525 val_cspc[vcross,ifreq,ih,novall] = 1
3526
3527 #Removing coherent from ISR data.
3528 spectra[ii,ifreq,ih,noval] = numpy.nan
3529 if len(vcross) > 0:
3530 cspectra[vcross,ifreq,ih,noval] = numpy.nan
3531 '''
3553 3532 #Getting average of the spectra and cross-spectra from incoherent echoes.
3533
3554 3534 out_spectra = numpy.zeros([nChan,nProf,nHei], dtype=float) #+numpy.nan
3555 3535 out_cspectra = numpy.zeros([nPairs,nProf,nHei], dtype=complex) #+numpy.nan
3556 3536 for ih in range(nHei):
3557 3537 for ifreq in range(nProf):
3558 3538 for ich in range(nChan):
3559 3539 tmp = spectra[:,ich,ifreq,ih]
3560 valid = (numpy.isfinite(tmp[:])==True).nonzero()
3540 valid = (numpy.isfinite(tmp[:])==True).nonzero()
3561 3541 if len(valid[0]) >0 :
3562 3542 out_spectra[ich,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
3543
3563 3544 for icr in range(nPairs):
3564 3545 tmp = numpy.squeeze(cspectra[:,icr,ifreq,ih])
3565 3546 valid = (numpy.isfinite(tmp)==True).nonzero()
3566 3547 if len(valid[0]) > 0:
3567 3548 out_cspectra[icr,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
3568 #Removing fake coherent echoes (at least 4 points around the point)
3549 #Removing fake coherent echoes (at least 4 points around the point)
3569 3550 val_spectra = numpy.sum(val_spc,0)
3570 3551 val_cspectra = numpy.sum(val_cspc,0)
3571 3552
3572 3553 val_spectra = self.REM_ISOLATED_POINTS(val_spectra,4)
3573 3554 val_cspectra = self.REM_ISOLATED_POINTS(val_cspectra,4)
3574 3555
3575 3556 for i in range(nChan):
3576 3557 for j in range(nProf):
3577 3558 for k in range(nHei):
3578 3559 if numpy.isfinite(val_spectra[i,j,k]) and val_spectra[i,j,k] < 1 :
3579 3560 val_spc[:,i,j,k] = 0.0
3580 3561 for i in range(nPairs):
3581 3562 for j in range(nProf):
3582 3563 for k in range(nHei):
3583 3564 if numpy.isfinite(val_cspectra[i,j,k]) and val_cspectra[i,j,k] < 1 :
3584 3565 val_cspc[:,i,j,k] = 0.0
3566 # val_spc = numpy.reshape(val_spc, (len(spectra[:,0,0,0]),nProf*nHei*nChan))
3567 # if numpy.isfinite(val_spectra)==str(True):
3568 # noval = (val_spectra<1).nonzero()
3569 # if len(noval) > 0:
3570 # val_spc[:,noval] = 0.0
3571 # val_spc = numpy.reshape(val_spc, (149,nChan,nProf,nHei))
3572
3573 #val_cspc = numpy.reshape(val_spc, (149,nChan*nHei*nProf))
3574 #if numpy.isfinite(val_cspectra)==str(True):
3575 # noval = (val_cspectra<1).nonzero()
3576 # if len(noval) > 0:
3577 # val_cspc[:,noval] = 0.0
3578 # val_cspc = numpy.reshape(val_cspc, (149,nChan,nProf,nHei))
3585 3579
3586 3580 tmp_sat_spectra = spectra.copy()
3587 3581 tmp_sat_spectra = tmp_sat_spectra*numpy.nan
3588 3582 tmp_sat_cspectra = cspectra.copy()
3589 tmp_sat_cspectra = tmp_sat_cspectra*numpy.nan
3583 tmp_sat_cspectra = tmp_sat_cspectra*numpy.nan
3584
3590 3585 val = (val_spc > 0).nonzero()
3591 3586 if len(val[0]) > 0:
3592 3587 tmp_sat_spectra[val] = in_sat_spectra[val]
3593 3588
3594 3589 val = (val_cspc > 0).nonzero()
3595 3590 if len(val[0]) > 0:
3596 3591 tmp_sat_cspectra[val] = in_sat_cspectra[val]
3597 3592
3598 3593 #Getting average of the spectra and cross-spectra from incoherent echoes.
3599 3594 sat_spectra = numpy.zeros((nChan,nProf,nHei), dtype=float)
3600 3595 sat_cspectra = numpy.zeros((nPairs,nProf,nHei), dtype=complex)
3601 3596 for ih in range(nHei):
3602 3597 for ifreq in range(nProf):
3603 3598 for ich in range(nChan):
3604 3599 tmp = numpy.squeeze(tmp_sat_spectra[:,ich,ifreq,ih])
3605 3600 valid = (numpy.isfinite(tmp)).nonzero()
3606 3601 if len(valid[0]) > 0:
3607 3602 sat_spectra[ich,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
3608 3603
3609 3604 for icr in range(nPairs):
3610 3605 tmp = numpy.squeeze(tmp_sat_cspectra[:,icr,ifreq,ih])
3611 3606 valid = (numpy.isfinite(tmp)).nonzero()
3612 3607 if len(valid[0]) > 0:
3613 3608 sat_cspectra[icr,ifreq,ih] = numpy.nansum(tmp)/len(valid[0])
3609
3614 3610 return out_spectra, out_cspectra,sat_spectra,sat_cspectra
3615
3616 3611 def REM_ISOLATED_POINTS(self,array,rth):
3617 3612 if rth == None : rth = 4
3618 3613 num_prof = len(array[0,:,0])
3619 3614 num_hei = len(array[0,0,:])
3620 3615 n2d = len(array[:,0,0])
3621 3616
3622 3617 for ii in range(n2d) :
3623 tmp = array[ii,:,:]
3618 tmp = array[ii,:,:]
3624 3619 tmp = numpy.reshape(tmp,num_prof*num_hei)
3625 3620 indxs1 = (numpy.isfinite(tmp)==True).nonzero()
3626 indxs2 = (tmp > 0).nonzero()
3621 indxs2 = (tmp > 0).nonzero()
3627 3622 indxs1 = (indxs1[0])
3628 indxs2 = indxs2[0]
3623 indxs2 = indxs2[0]
3629 3624 indxs = None
3625
3630 3626 for iv in range(len(indxs2)):
3631 indv = numpy.array((indxs1 == indxs2[iv]).nonzero())
3627 indv = numpy.array((indxs1 == indxs2[iv]).nonzero())
3632 3628 if len(indv[0]) > 0 :
3633 3629 indxs = numpy.concatenate((indxs,indxs2[iv]), axis=None)
3630
3634 3631 indxs = indxs[1:]
3635 3632 if len(indxs) < 4 :
3636 3633 array[ii,:,:] = 0.
3637 3634 return
3638 3635
3639 xpos = numpy.mod(indxs ,num_hei)
3640 ypos = (indxs / num_hei)
3636 xpos = numpy.mod(indxs ,num_prof)
3637 ypos = (indxs / num_prof)
3641 3638 sx = numpy.argsort(xpos) # Ordering respect to "x" (time)
3642 3639 xpos = xpos[sx]
3643 3640 ypos = ypos[sx]
3644 # *********************************** Cleaning isolated points **********************************
3641
3642 # *********************************** Cleaning isolated points **********************************
3645 3643 ic = 0
3646 3644 while True :
3647 3645 r = numpy.sqrt(list(numpy.power((xpos[ic]-xpos),2)+ numpy.power((ypos[ic]-ypos),2)))
3646
3648 3647 no_coh1 = (numpy.isfinite(r)==True).nonzero()
3649 3648 no_coh2 = (r <= rth).nonzero()
3650 3649 no_coh1 = numpy.array(no_coh1[0])
3651 3650 no_coh2 = numpy.array(no_coh2[0])
3652 3651 no_coh = None
3653 3652 for iv in range(len(no_coh2)):
3654 3653 indv = numpy.array((no_coh1 == no_coh2[iv]).nonzero())
3655 3654 if len(indv[0]) > 0 :
3656 3655 no_coh = numpy.concatenate((no_coh,no_coh2[iv]), axis=None)
3657 3656 no_coh = no_coh[1:]
3658 3657 if len(no_coh) < 4 :
3659 3658 xpos[ic] = numpy.nan
3660 3659 ypos[ic] = numpy.nan
3661 3660
3662 3661 ic = ic + 1
3663 3662 if (ic == len(indxs)) :
3664 3663 break
3665 3664 indxs = (numpy.isfinite(list(xpos))==True).nonzero()
3666 3665 if len(indxs[0]) < 4 :
3667 3666 array[ii,:,:] = 0.
3668 3667 return
3669 3668
3670 3669 xpos = xpos[indxs[0]]
3671 3670 ypos = ypos[indxs[0]]
3672 3671 for i in range(0,len(ypos)):
3673 3672 ypos[i]=int(ypos[i])
3674 3673 junk = tmp
3675 3674 tmp = junk*0.0
3676 3675
3677 3676 tmp[list(xpos + (ypos*num_hei))] = junk[list(xpos + (ypos*num_hei))]
3678 3677 array[ii,:,:] = numpy.reshape(tmp,(num_prof,num_hei))
3678
3679 3679 return array
3680
3681 3680 def moments(self,doppler,yarray,npoints):
3682 ytemp = yarray
3681 ytemp = yarray
3683 3682 val = (ytemp > 0).nonzero()
3684 3683 val = val[0]
3685 3684 if len(val) == 0 : val = range(npoints-1)
3686 3685
3687 3686 ynew = 0.5*(ytemp[val[0]]+ytemp[val[len(val)-1]])
3688 3687 ytemp[len(ytemp):] = [ynew]
3689 3688
3690 3689 index = 0
3691 3690 index = numpy.argmax(ytemp)
3692 3691 ytemp = numpy.roll(ytemp,int(npoints/2)-1-index)
3693 3692 ytemp = ytemp[0:npoints-1]
3694 3693
3695 3694 fmom = numpy.sum(doppler*ytemp)/numpy.sum(ytemp)+(index-(npoints/2-1))*numpy.abs(doppler[1]-doppler[0])
3696 3695 smom = numpy.sum(doppler*doppler*ytemp)/numpy.sum(ytemp)
3697 3696 return [fmom,numpy.sqrt(smom)]
3698
3697 # **********************************************************************************************
3698 index = 0
3699 fint = 0
3700 buffer = 0
3701 buffer2 = 0
3702 buffer3 = 0
3699 3703 def run(self, dataOut, getSNR = True, path=None, file=None, groupList=None, filec=None,coh_th=None, hei_th=None,taver=None,proc=None,nhei=None,nprofs=None,ipp=None,channelList=None):
3700 if not self.isConfig:
3701 self.setup(dataOut = dataOut,groupList=groupList,path=path,file=file,filec=filec)
3702 self.isConfig = True
3703
3704 if not numpy.any(proc):
3705 if numpy.any(taver):
3706 taver = int(taver)
3707 else :
3708 taver = 5
3709 tini = time.localtime(dataOut.utctime)
3710 if (tini.tm_min % taver) == 0 and (tini.tm_sec < 5 and self.fint==0):
3711 self.index = 0
3712 jspc = self.buffer
3713 jcspc = self.buffer2
3714 jnoise = self.buffer3
3715 self.buffer = dataOut.data_spc
3704 if not numpy.any(proc):
3705
3706 nChannels = dataOut.nChannels
3707 nHeights= dataOut.heightList.size
3708 nProf = dataOut.nProfiles
3709 if numpy.any(taver): taver=int(taver)
3710 else : taver = 5
3711 tini=time.localtime(dataOut.utctime)
3712 if (tini.tm_min % taver) == 0 and (tini.tm_sec < 5 and self.fint==0):
3713
3714 self.index = 0
3715 jspc = self.buffer
3716 jcspc = self.buffer2
3717 jnoise = self.buffer3
3718 self.buffer = dataOut.data_spc
3716 3719 self.buffer2 = dataOut.data_cspc
3717 3720 self.buffer3 = dataOut.noise
3718 self.fint = 1
3721 self.fint = 1
3719 3722 if numpy.any(jspc) :
3720 jspc = numpy.reshape(jspc ,(int(len(jspc) / self.nChannels) , self.nChannels ,self.nProf,self.nHeights ))
3721 jcspc = numpy.reshape(jcspc ,(int(len(jcspc) /int(self.nChannels/2)),int(self.nChannels/2),self.nProf,self.nHeights ))
3722 jnoise = numpy.reshape(jnoise,(int(len(jnoise)/ self.nChannels) , self.nChannels))
3723 jspc= numpy.reshape(jspc,(int(len(jspc)/nChannels),nChannels,nProf,nHeights))
3724 jcspc= numpy.reshape(jcspc,(int(len(jcspc)/int(nChannels/2)),int(nChannels/2),nProf,nHeights))
3725 jnoise= numpy.reshape(jnoise,(int(len(jnoise)/nChannels),nChannels))
3723 3726 else:
3724 3727 dataOut.flagNoData = True
3725 3728 return dataOut
3726 3729 else :
3727 if (tini.tm_min % taver) == 0 :
3728 self.fint = 1
3729 else :
3730 self.fint = 0
3731
3730 if (tini.tm_min % taver) == 0 : self.fint = 1
3731 else : self.fint = 0
3732 3732 self.index += 1
3733 if numpy.any(self.buffer):
3733 if numpy.any(self.buffer):
3734 3734 self.buffer = numpy.concatenate((self.buffer,dataOut.data_spc), axis=0)
3735 3735 self.buffer2 = numpy.concatenate((self.buffer2,dataOut.data_cspc), axis=0)
3736 3736 self.buffer3 = numpy.concatenate((self.buffer3,dataOut.noise), axis=0)
3737 3737 else:
3738 3738 self.buffer = dataOut.data_spc
3739 3739 self.buffer2 = dataOut.data_cspc
3740 3740 self.buffer3 = dataOut.noise
3741 3741 dataOut.flagNoData = True
3742 3742 return dataOut
3743 if path != None:
3744 sys.path.append(path)
3745 self.library = importlib.import_module(file)
3746 if filec != None:
3747 self.weightf = importlib.import_module(filec)
3748 #self.weightf = importlib.import_module('weightfit')
3749
3750 #To be inserted as a parameter
3751 groupArray = numpy.array(groupList)
3752 #groupArray = numpy.array([[0,1],[2,3]])
3753 dataOut.groupList = groupArray
3754
3755 nGroups = groupArray.shape[0]
3756 nChannels = dataOut.nChannels
3757 nHeights = dataOut.heightList.size
3743 3758
3744 jnoise = jnoise/self.N# creo que falta dividirlo entre N
3745 noise = numpy.nansum(jnoise,axis=0)#/len(jnoise)
3746 index = tini.tm_hour*12+tini.tm_min/taver
3747 dataOut.index = index
3748 jspc = jspc/self.N/self.N
3749 jcspc = jcspc/self.N/self.N
3750
3759 #Parameters Array
3760 dataOut.data_param = None
3761 dataOut.data_paramC = None
3762 dataOut.clean_num_aver = None
3763 dataOut.coh_num_aver = None
3764 dataOut.tmp_spectra_i = None
3765 dataOut.tmp_cspectra_i = None
3766 dataOut.tmp_spectra_c = None
3767 dataOut.tmp_cspectra_c = None
3768 dataOut.sat_spectra = None
3769 dataOut.sat_cspectra = None
3770 dataOut.index = None
3771
3772 #Set constants
3773 constants = self.library.setConstants(dataOut)
3774 dataOut.constants = constants
3775 M = dataOut.normFactor
3776 N = dataOut.nFFTPoints
3777
3778 ippSeconds = dataOut.ippSeconds
3779 K = dataOut.nIncohInt
3780 pairsArray = numpy.array(dataOut.pairsList)
3781
3782 snrth= 15
3783 spectra = dataOut.data_spc
3784 cspectra = dataOut.data_cspc
3785 nProf = dataOut.nProfiles
3786 heights = dataOut.heightList
3787 nHei = len(heights)
3788 channels = dataOut.channelList
3789 nChan = len(channels)
3790 nIncohInt = dataOut.nIncohInt
3791 crosspairs = dataOut.groupList
3792 noise = dataOut.noise
3793 jnoise = jnoise/N
3794 noise = numpy.nansum(jnoise,axis=0)#/len(jnoise)
3795 #print("NOISE-> ", 10*numpy.log10(noise))
3796 power = numpy.sum(spectra, axis=1)
3797 nPairs = len(crosspairs)
3798 absc = dataOut.abscissaList[:-1]
3799 #print('para escribir h5 ',dataOut.paramInterval)
3800 if not self.isConfig:
3801 self.isConfig = True
3802
3803 index = tini.tm_hour*12+tini.tm_min/taver
3804 dataOut.index= index
3805 jspc = jspc/N/N
3806 jcspc = jcspc/N/N
3751 3807 tmp_spectra,tmp_cspectra,sat_spectra,sat_cspectra = self.CleanRayleigh(dataOut,jspc,jcspc,2)
3752 jspectra = tmp_spectra * len(jspc[:,0,0,0])
3753 jcspectra = tmp_cspectra * len(jspc[:,0,0,0])
3754
3755 my_incoh_spectra ,my_incoh_cspectra,my_incoh_aver,my_coh_aver, incoh_spectra, coh_spectra, incoh_cspectra, coh_cspectra, incoh_aver, coh_aver = self.__DiffCoherent(jspectra, jcspectra, dataOut, noise, self.snrth,coh_th, hei_th)
3756 clean_coh_spectra, clean_coh_cspectra, clean_coh_aver = self.__CleanCoherent(self.snrth, coh_spectra, coh_cspectra, coh_aver, dataOut, noise,1,index)
3757
3758 dataOut.data_spc = incoh_spectra
3759 dataOut.data_cspc = incoh_cspectra
3760 clean_num_aver = incoh_aver * len(jspc[:,0,0,0])
3761 coh_num_aver = clean_coh_aver* len(jspc[:,0,0,0])
3762 dataOut.clean_num_aver = clean_num_aver
3763 dataOut.coh_num_aver = coh_num_aver
3764
3808 jspectra = tmp_spectra*len(jspc[:,0,0,0])
3809 jcspectra = tmp_cspectra*len(jspc[:,0,0,0])
3810 my_incoh_spectra ,my_incoh_cspectra,my_incoh_aver,my_coh_aver, incoh_spectra, coh_spectra, incoh_cspectra, coh_cspectra, incoh_aver, coh_aver = self.__DiffCoherent(jspectra, jcspectra, dataOut, noise, snrth,coh_th, hei_th)
3811 clean_coh_spectra, clean_coh_cspectra, clean_coh_aver = self.__CleanCoherent(snrth, coh_spectra, coh_cspectra, coh_aver, dataOut, noise,1,index)
3812 dataOut.data_spc = incoh_spectra
3813 dataOut.data_cspc = incoh_cspectra
3814 dataOut.sat_spectra = sat_spectra
3815 dataOut.sat_cspectra = sat_cspectra
3816 # dataOut.data_spc = tmp_spectra
3817 # dataOut.data_cspc = tmp_cspectra
3818
3819 clean_num_aver = incoh_aver*len(jspc[:,0,0,0])
3820 coh_num_aver = clean_coh_aver*len(jspc[:,0,0,0])
3821 # clean_num_aver = (numpy.zeros([nChan, nHei])+1)*len(jspc[:,0,0,0])
3822 # coh_num_aver = numpy.zeros([nChan, nHei])*0*len(jspc[:,0,0,0])
3823 dataOut.clean_num_aver = clean_num_aver
3824 dataOut.coh_num_aver = coh_num_aver
3825 dataOut.tmp_spectra_i = incoh_spectra
3826 dataOut.tmp_cspectra_i = incoh_cspectra
3827 dataOut.tmp_spectra_c = clean_coh_spectra
3828 dataOut.tmp_cspectra_c = clean_coh_cspectra
3829 #List of possible combinations
3830 listComb = itertools.combinations(numpy.arange(groupArray.shape[1]),2)
3831 indCross = numpy.zeros(len(list(listComb)), dtype = 'int')
3832
3833 if getSNR:
3834 listChannels = groupArray.reshape((groupArray.size))
3835 listChannels.sort()
3836 norm = dataOut.nProfiles * dataOut.nIncohInt * dataOut.nCohInt * dataOut.windowOfFilter #* jspc.shape[0]
3837 dataOut.data_snr = self.__getSNR(dataOut.data_spc[listChannels,:,:], noise[listChannels], norm=norm)
3765 3838 else:
3839 if numpy.any(taver): taver=int(taver)
3840 else : taver = 5
3841 tini=time.localtime(dataOut.utctime)
3842 index = tini.tm_hour*12+tini.tm_min/taver
3766 3843 clean_num_aver = dataOut.clean_num_aver
3767 3844 coh_num_aver = dataOut.coh_num_aver
3768 3845 dataOut.data_spc = dataOut.tmp_spectra_i
3769 3846 dataOut.data_cspc = dataOut.tmp_cspectra_i
3770 3847 clean_coh_spectra = dataOut.tmp_spectra_c
3771 3848 clean_coh_cspectra = dataOut.tmp_cspectra_c
3772 3849 jspectra = dataOut.data_spc+clean_coh_spectra
3773 3850 nHeights = len(dataOut.heightList) # nhei
3774 3851 nProf = int(dataOut.nProfiles)
3775 3852 dataOut.nProfiles = nProf
3776 3853 dataOut.data_param = None
3777 3854 dataOut.data_paramC = None
3778 3855 dataOut.code = numpy.array([[-1.,-1.,1.],[1.,1.,-1.]])
3856 #dataOut.paramInterval = 2.0
3779 3857 #M=600
3780 3858 #N=200
3781 3859 dataOut.flagDecodeData=True
3782 3860 M = int(dataOut.normFactor)
3783 3861 N = int(dataOut.nFFTPoints)
3784 3862 dataOut.nFFTPoints = N
3785 3863 dataOut.nIncohInt= int(dataOut.nIncohInt)
3786 3864 dataOut.nProfiles = int(dataOut.nProfiles)
3787 3865 dataOut.nCohInt = int(dataOut.nCohInt)
3866 #print('sale',dataOut.nProfiles,dataOut.nHeights)
3788 3867 #dataOut.nFFTPoints=nprofs
3789 3868 #dataOut.normFactor = nprofs
3790 3869 dataOut.channelList = channelList
3870 nChan = len(channelList)
3791 3871 #dataOut.ippFactor=1
3792 3872 #ipp = ipp/150*1.e-3
3793 3873 vmax = (300000000/49920000.0/2) / (dataOut.ippSeconds)
3794 3874 #dataOut.ippSeconds=ipp
3795 3875 absc = vmax*( numpy.arange(nProf,dtype='float')-nProf/2.)/nProf
3876 #print('sale 2',dataOut.ippSeconds,M,N)
3877 # print('Empieza procesamiento offline')
3796 3878 if path != None:
3797 3879 sys.path.append(path)
3798 3880 self.library = importlib.import_module(file)
3799 3881 constants = self.library.setConstants(dataOut)
3800 3882 constants['M'] = M
3801 3883 dataOut.constants = constants
3802
3803 #List of possible combinations
3804 listComb = itertools.combinations(numpy.arange(self.groupArray.shape[1]),2)
3884 if filec != None:
3885 self.weightf = importlib.import_module(filec)
3886
3887 groupArray = numpy.array(groupList)
3888 dataOut.groupList = groupArray
3889 nGroups = groupArray.shape[0]
3890 #List of possible combinations
3891 listComb = itertools.combinations(numpy.arange(groupArray.shape[1]),2)
3805 3892 indCross = numpy.zeros(len(list(listComb)), dtype = 'int')
3806 3893 if dataOut.data_paramC is None:
3807 dataOut.data_paramC = numpy.zeros((self.nGroups*4, self.nHeights ,2))*numpy.nan
3808 for i in range(self.nGroups):
3809 coord = self.groupArray[i,:]
3894 dataOut.data_paramC = numpy.zeros((nGroups*4, nHeights,2))*numpy.nan
3895 dataOut.data_snr1_i = numpy.zeros((nGroups*2, nHeights))*numpy.nan
3896 # dataOut.smooth_i = numpy.zeros((nGroups*2, nHeights))*numpy.nan
3897 for i in range(nGroups):
3898 coord = groupArray[i,:]
3810 3899 #Input data array
3811 data = dataOut.data_spc[coord,:,:]/(self.M*self.N)
3812 data = data.reshape((data.shape[0]*data.shape[1],data.shape[2]))
3900 data = dataOut.data_spc[coord,:,:]/(M*N)
3901 data = data.reshape((data.shape[0]*data.shape[1],data.shape[2]))
3813 3902
3814 3903 #Cross Spectra data array for Covariance Matrixes
3815 3904 ind = 0
3816 3905 for pairs in listComb:
3817 pairsSel = numpy.array([coord[x],coord[y]])
3818 indCross[ind] = int(numpy.where(numpy.all(self.pairsArray == pairsSel, axis = 1))[0][0])
3819 ind += 1
3820 dataCross = dataOut.data_cspc[indCross,:,:]/(self.M*self.N)
3906 pairsSel = numpy.array([coord[x],coord[y]])
3907 indCross[ind] = int(numpy.where(numpy.all(pairsArray == pairsSel, axis = 1))[0][0])
3908 ind += 1
3909 dataCross = dataOut.data_cspc[indCross,:,:]/(M*N)
3821 3910 dataCross = dataCross**2
3822 nhei = self.nHeights
3823 poweri = numpy.sum(dataOut.data_spc[:,1:self.nProf-0,:],axis=1)/clean_num_aver[:,:]
3824
3911 nhei = nHeights
3912 poweri = numpy.sum(dataOut.data_spc[:,1:nProf-0,:],axis=1)/clean_num_aver[:,:]
3825 3913 if i == 0 : my_noises = numpy.zeros(4,dtype=float)
3826 n0i = numpy.nanmin(poweri[0+i*2,0:nhei-0])/(self.nProf-1)
3827 n1i = numpy.nanmin(poweri[1+i*2,0:nhei-0])/(self.nProf-1)
3914 n0i = numpy.nanmin(poweri[0+i*2,0:nhei-0])/(nProf-1)
3915 n1i = numpy.nanmin(poweri[1+i*2,0:nhei-0])/(nProf-1)
3828 3916 n0 = n0i
3829 3917 n1= n1i
3830 3918 my_noises[2*i+0] = n0
3831 3919 my_noises[2*i+1] = n1
3832 snrth = -15.0 # -4 -16 -25
3920 snrth = -13.0 # -4 -16 -25
3833 3921 snrth = 10**(snrth/10.0)
3834 jvelr = numpy.zeros(self.nHeights, dtype = 'float')
3922 jvelr = numpy.zeros(nHeights, dtype = 'float')
3923 #snr0 = numpy.zeros(nHeights, dtype = 'float')
3924 #snr1 = numpy.zeros(nHeights, dtype = 'float')
3835 3925 hvalid = [0]
3836 coh2 = abs(dataOut.data_cspc[i,1:self.nProf,:])**2/(dataOut.data_spc[0+i*2,1:self.nProf-0,:]*dataOut.data_spc[1+i*2,1:self.nProf-0,:])
3837
3838 for h in range(self.nHeights):
3926
3927 coh2 = abs(dataOut.data_cspc[i,1:nProf,:])**2/(dataOut.data_spc[0+i*2,1:nProf-0,:]*dataOut.data_spc[1+i*2,1:nProf-0,:])
3928
3929 for h in range(nHeights):
3839 3930 smooth = clean_num_aver[i+1,h]
3840 signalpn0 = (dataOut.data_spc[i*2,1:(self.nProf-0),h])/smooth
3841 signalpn1 = (dataOut.data_spc[i*2+1,1:(self.nProf-0),h])/smooth
3931 signalpn0 = (dataOut.data_spc[i*2,1:(nProf-0),h])/smooth
3932 signalpn1 = (dataOut.data_spc[i*2+1,1:(nProf-0),h])/smooth
3842 3933 signal0 = signalpn0-n0
3843 3934 signal1 = signalpn1-n1
3844 snr0 = numpy.sum(signal0/n0)/(self.nProf-1)
3845 snr1 = numpy.sum(signal1/n1)/(self.nProf-1)
3935 snr0 = numpy.sum(signal0/n0)/(nProf-1)
3936 snr1 = numpy.sum(signal1/n1)/(nProf-1)
3937 #jmax0 = MAX(signal0,maxp0)
3938 #jmax1 = MAX(signal1,maxp1)
3846 3939 gamma = coh2[:,h]
3940
3847 3941 indxs = (numpy.isfinite(list(gamma))==True).nonzero()
3942
3848 3943 if len(indxs) >0:
3849 3944 if numpy.nanmean(gamma) > 0.07:
3850 3945 maxp0 = numpy.argmax(signal0*gamma)
3851 3946 maxp1 = numpy.argmax(signal1*gamma)
3852 3947 #print('usa gamma',numpy.nanmean(gamma))
3853 3948 else:
3854 3949 maxp0 = numpy.argmax(signal0)
3855 3950 maxp1 = numpy.argmax(signal1)
3856 jvelr[h] = (self.absc[maxp0]+self.absc[maxp1])/2.
3857 else: jvelr[h] = self.absc[0]
3951 jvelr[h] = (absc[maxp0]+absc[maxp1])/2.
3952 else: jvelr[h] = absc[0]
3858 3953 if snr0 > 0.1 and snr1 > 0.1: hvalid = numpy.concatenate((hvalid,h), axis=None)
3859 3954 #print(maxp0,absc[maxp0],snr0,jvelr[h])
3860 3955
3861 3956 if len(hvalid)> 1: fd0 = numpy.median(jvelr[hvalid[1:]])*-1
3862 3957 else: fd0 = numpy.nan
3863 for h in range(self.nHeights):
3958 #print(fd0)
3959 for h in range(nHeights):
3864 3960 d = data[:,h]
3865 3961 smooth = clean_num_aver[i+1,h] #dataOut.data_spc[:,1:nProf-0,:]
3866 signalpn0 = (dataOut.data_spc[i*2,1:(self.nProf-0),h])/smooth
3867 signalpn1 = (dataOut.data_spc[i*2+1,1:(self.nProf-0),h])/smooth
3962 signalpn0 = (dataOut.data_spc[i*2,1:(nProf-0),h])/smooth
3963 signalpn1 = (dataOut.data_spc[i*2+1,1:(nProf-0),h])/smooth
3868 3964 signal0 = signalpn0-n0
3869 3965 signal1 = signalpn1-n1
3870 snr0 = numpy.sum(signal0/n0)/(self.nProf-1)
3871 snr1 = numpy.sum(signal1/n1)/(self.nProf-1)
3966 snr0 = numpy.sum(signal0/n0)/(nProf-1)
3967 snr1 = numpy.sum(signal1/n1)/(nProf-1)
3968
3872 3969 if snr0 > snrth and snr1 > snrth and clean_num_aver[i+1,h] > 0 :
3873 #Covariance Matrix
3970 #Covariance Matrix
3874 3971 D = numpy.diag(d**2)
3875 3972 ind = 0
3876 3973 for pairs in listComb:
3877 3974 #Coordinates in Covariance Matrix
3878 3975 x = pairs[0]
3879 3976 y = pairs[1]
3880 3977 #Channel Index
3881 3978 S12 = dataCross[ind,:,h]
3882 3979 D12 = numpy.diag(S12)
3883 3980 #Completing Covariance Matrix with Cross Spectras
3884 3981 D[x*N:(x+1)*N,y*N:(y+1)*N] = D12
3885 3982 D[y*N:(y+1)*N,x*N:(x+1)*N] = D12
3886 3983 ind += 1
3887 3984 diagD = numpy.zeros(256)
3888
3985
3986 #Dinv=numpy.linalg.inv(D)
3987 #L=numpy.linalg.cholesky(Dinv)
3889 3988 try:
3890 3989 Dinv=numpy.linalg.inv(D)
3891 3990 L=numpy.linalg.cholesky(Dinv)
3892 3991 except:
3893 3992 Dinv = D*numpy.nan
3894 3993 L= D*numpy.nan
3895 3994 LT=L.T
3896 3995
3897 3996 dp = numpy.dot(LT,d)
3898 #Initial values
3997
3998 #Initial values
3899 3999 data_spc = dataOut.data_spc[coord,:,h]
3900 4000 w = data_spc/data_spc
3901 4001 if filec != None:
3902 4002 w = self.weightf.weightfit(w,tini.tm_year,tini.tm_yday,index,h,i)
3903 if (h>6)and(error1[3]<25):
4003
4004 if (h>6) and (error1[3]<25):
3904 4005 p0 = dataOut.data_param[i,:,h-1].copy()
4006 #print('usa anterior')
3905 4007 else:
3906 p0 = numpy.array(self.library.initialValuesFunction(data_spc*w, self.constants))# sin el i(data_spc, constants, i)
4008 p0 = numpy.array(self.library.initialValuesFunction(data_spc*w, constants))# sin el i(data_spc, constants, i)
3907 4009 p0[3] = fd0
3908
3909 4010 if filec != None:
3910 4011 p0 = self.weightf.Vrfit(p0,tini.tm_year,tini.tm_yday,index,h,i)
3911
4012
4013 #if index == 175 and i==1 and h>=27 and h<=35: p0[3]=30
4014 #if h >= 6 and i==1 and h<= 10: print(p0)
3912 4015 try:
3913 #Least Squares
3914 minp,covp,infodict,mesg,ier = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,self.constants),full_output=True)
3915 #minp,covp = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants))
3916 #Chi square error
3917 error0 = numpy.sum(infodict['fvec']**2)/(2*self.N)
3918 #Error with Jacobian
3919 error1 = self.library.errorFunction(minp,self.constants,LT)
4016 #Least Squares
4017 minp,covp,infodict,mesg,ier = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants),full_output=True)
4018 #minp,covp = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants))
4019 #Chi square error
4020 error0 = numpy.sum(infodict['fvec']**2)/(2*N)
4021 #Error with Jacobian
4022 error1 = self.library.errorFunction(minp,constants,LT)
4023 #if h >= 0 and h<= 10 and i ==0: print(p0,minp,error1)
4024 #if i>=0 and h>=0: print(index,h,minp[3])
4025 # print self.__residFunction(p0,dp,LT, constants)
4026 # print infodict['fvec']
4027 # print self.__residFunction(minp,dp,LT,constants)
3920 4028
3921 4029 except:
3922 4030 minp = p0*numpy.nan
3923 4031 error0 = numpy.nan
3924 4032 error1 = p0*numpy.nan
4033 # s_sq = (self.__residFunction(minp,dp,LT,constants)).sum()/(len(dp)-len(p0))
4034 # covp = covp*s_sq
4035 # error = []
4036 # for ip in range(len(minp)):
4037 # try:
4038 # error.append(numpy.absolute(covp[ip][ip])**0.5)
4039 # except:
4040 # error.append( 0.00 )
4041 #if i==1 and h==11 and index == 139: print(p0, minp,data_spc)
3925 4042 else :
3926 4043 data_spc = dataOut.data_spc[coord,:,h]
3927 p0 = numpy.array(self.library.initialValuesFunction(data_spc, self.constants))
4044 p0 = numpy.array(self.library.initialValuesFunction(data_spc, constants))
3928 4045 minp = p0*numpy.nan
3929 4046 error0 = numpy.nan
3930 error1 = p0*numpy.nan
4047 error1 = p0*numpy.nan
4048
3931 4049 if dataOut.data_param is None:
3932 dataOut.data_param = numpy.zeros((self.nGroups, p0.size, self.nHeights ))*numpy.nan
3933 dataOut.data_error = numpy.zeros((self.nGroups, p0.size + 1, self.nHeights ))*numpy.nan
4050 dataOut.data_param = numpy.zeros((nGroups, p0.size, nHeights))*numpy.nan
4051 dataOut.data_error = numpy.zeros((nGroups, p0.size + 1, nHeights))*numpy.nan
4052
3934 4053 dataOut.data_error[i,:,h] = numpy.hstack((error0,error1))
3935 4054 dataOut.data_param[i,:,h] = minp
3936
3937 for ht in range(self.nHeights-1) :
4055 dataOut.data_snr1_i[i*2,h] = numpy.sum(signalpn0/(nProf-1))/n0
4056 dataOut.data_snr1_i[i*2+1,h] = numpy.sum(signalpn1/(nProf-1))/n1
4057 #dataOut.smooth_i[i*2,h] = clean_num_aver[i+1,h]
4058 #print(fd0,dataOut.data_param[i,3,h])
4059 #print(fd0,dataOut.data_param[i,3,:])
4060 for ht in range(nHeights-1) :
3938 4061 smooth = coh_num_aver[i+1,ht] #datc[0,ht,0,beam]
3939 4062 dataOut.data_paramC[4*i,ht,1] = smooth
3940 signalpn0 = (clean_coh_spectra[i*2 ,1:(self.nProf-0),ht])/smooth #coh_spectra
3941 signalpn1 = (clean_coh_spectra[i*2+1,1:(self.nProf-0),ht])/smooth
4063 signalpn0 = (clean_coh_spectra[i*2 ,1:(nProf-0),ht])/smooth #coh_spectra
4064 signalpn1 = (clean_coh_spectra[i*2+1,1:(nProf-0),ht])/smooth
4065
3942 4066 val0 = (signalpn0 > 0).nonzero()
3943 4067 val0 = val0[0]
3944 if len(val0) == 0 : val0_npoints = self.nProf
4068
4069 if len(val0) == 0 : val0_npoints = nProf
3945 4070 else : val0_npoints = len(val0)
3946
4071
3947 4072 val1 = (signalpn1 > 0).nonzero()
3948 4073 val1 = val1[0]
3949 if len(val1) == 0 : val1_npoints = self.nProf
4074 if len(val1) == 0 : val1_npoints = nProf
3950 4075 else : val1_npoints = len(val1)
3951 4076
3952 4077 dataOut.data_paramC[0+4*i,ht,0] = numpy.sum((signalpn0/val0_npoints))/n0
3953 4078 dataOut.data_paramC[1+4*i,ht,0] = numpy.sum((signalpn1/val1_npoints))/n1
3954 4079
3955 4080 signal0 = (signalpn0-n0)
3956 4081 vali = (signal0 < 0).nonzero()
3957 4082 vali = vali[0]
3958 4083 if len(vali) > 0 : signal0[vali] = 0
3959 4084 signal1 = (signalpn1-n1)
3960 4085 vali = (signal1 < 0).nonzero()
3961 4086 vali = vali[0]
3962 4087 if len(vali) > 0 : signal1[vali] = 0
3963 snr0 = numpy.sum(signal0/n0)/(self.nProf-1)
3964 snr1 = numpy.sum(signal1/n1)/(self.nProf-1)
3965 doppler = self.absc[1:]
4088 snr0 = numpy.sum(signal0/n0)/(nProf-1)
4089 snr1 = numpy.sum(signal1/n1)/(nProf-1)
4090 doppler = absc[1:]
3966 4091 if snr0 >= snrth and snr1 >= snrth and smooth :
3967 4092 signalpn0_n0 = signalpn0
3968 4093 signalpn0_n0[val0] = signalpn0[val0] - n0
3969 mom0 = self.moments(doppler,signalpn0-n0,self.nProf)
4094 mom0 = self.moments(doppler,signalpn0-n0,nProf)
4095
3970 4096 signalpn1_n1 = signalpn1
3971 4097 signalpn1_n1[val1] = signalpn1[val1] - n1
3972 mom1 = self.moments(doppler,signalpn1_n1,self.nProf)
4098 mom1 = self.moments(doppler,signalpn1_n1,nProf)
3973 4099 dataOut.data_paramC[2+4*i,ht,0] = (mom0[0]+mom1[0])/2.
3974 dataOut.data_paramC[3+4*i,ht,0] = (mom0[1]+mom1[1])/2.
4100 dataOut.data_paramC[3+4*i,ht,0] = (mom0[1]+mom1[1])/2.
4101 #dataOut.data_snr1_c[i*2,ht] = numpy.sum(signalpn0/(nProf-1))/n0
4102 #dataOut.data_snr1_c[i*2+1,ht] = numpy.sum(signalpn1/(nProf-1))/n1
3975 4103 dataOut.data_spc = jspectra
3976 dataOut.spc_noise = my_noises*self.nProf*self.M
3977 if numpy.any(proc): dataOut.spc_noise = my_noises*self.nProf*self.M
3978 if getSNR:
3979 listChannels = self.groupArray.reshape((self.groupArray.size))
4104 dataOut.spc_noise = my_noises*nProf*M
4105
4106 if numpy.any(proc): dataOut.spc_noise = my_noises*nProf*M
4107 if 0:
4108 listChannels = groupArray.reshape((groupArray.size))
3980 4109 listChannels.sort()
3981 # TEST
3982 noise_C = numpy.zeros(self.nChannels)
3983 noise_C = dataOut.getNoise()
3984 #print("noise_C",noise_C)
3985 dataOut.data_snr = self.__getSNR(dataOut.data_spc[listChannels,:,:],noise_C/(600.0*1.15))# PRUEBA *nProf*M
3986 #dataOut.data_snr = self.__getSNR(dataOut.data_spc[listChannels,:,:], noise_C[listChannels])# PRUEBA *nProf*M
3987 dataOut.flagNoData = False
4110 norm = dataOut.nProfiles * dataOut.nIncohInt * dataOut.nCohInt * dataOut.windowOfFilter
4111 dataOut.data_snr = self.__getSNR(dataOut.data_spc[listChannels,:,:], my_noises[listChannels], norm=norm)
4112 #print(dataOut.data_snr1_i)
4113 # Adding coherent echoes from possible satellites.
4114 #sat_spectra = numpy.zeros((nChan,nProf,nHei), dtype=float)
4115 #sat_spectra = sat_spectra[*,*,anal_header.channels]
4116 isat_spectra = numpy.zeros([2,int(nChan/2),nProf,nhei], dtype=float)
4117
4118 sat_fits = numpy.zeros([4,nhei], dtype=float)
4119 noises = my_noises/nProf
4120 #nchan2 = int(nChan/2)
4121 for beam in range(int(nChan/2)-0) :
4122 n0 = noises[2*beam]
4123 n1 = noises[2*beam+1]
4124 isat_spectra[0:2,beam,:,:] = dataOut.sat_spectra[2*beam +0:2*beam+2 ,:,:]
4125
4126 for ht in range(nhei-1) :
4127 signalpn0 = isat_spectra[0,beam,:,ht]
4128 signalpn0 = numpy.reshape(signalpn0,nProf)
4129 signalpn1 = isat_spectra[1,beam,:,ht]
4130 signalpn1 = numpy.reshape(signalpn1,nProf)
4131
4132 cval0 = len((signalpn0 > 0).nonzero()[0])
4133 if cval0 == 0 : val0_npoints = nProf
4134 else: val0_npoints = cval0
4135
4136 cval1 = len((signalpn1 > 0).nonzero()[0])
4137 if cval1 == 0 : val1_npoints = nProf
4138 else: val1_npoints = cval1
4139
4140 sat_fits[0+2*beam,ht] = numpy.sum(signalpn0/(val0_npoints*nProf))/n0
4141 sat_fits[1+2*beam,ht] = numpy.sum(signalpn1/(val1_npoints*nProf))/n1
4142
4143 dataOut.sat_fits = sat_fits
3988 4144 return dataOut
3989 4145
3990 4146 def __residFunction(self, p, dp, LT, constants):
3991 4147
3992 4148 fm = self.library.modelFunction(p, constants)
3993 4149 fmp=numpy.dot(LT,fm)
3994 4150 return dp-fmp
3995 4151
3996 def __getSNR(self, z, noise):
4152 def __getSNR(self, z, noise, norm=1):
3997 4153
4154 # normFactor = dataOut.nProfiles * dataOut.nIncohInt * dataOut.nCohInt * pwcode * dataOut.windowOfFilter
3998 4155 avg = numpy.average(z, axis=1)
4156 #noise /= norm
3999 4157 SNR = (avg.T-noise)/noise
4158 # SNR = avg.T/noise
4159 # print("Noise: ", 10*numpy.log10(noise))
4160 # print("SNR: ", SNR)
4161 # print("SNR: ", 10*numpy.log10(SNR))
4000 4162 SNR = SNR.T
4001 4163 return SNR
4002 4164
4003 4165 def __chisq(self, p, chindex, hindex):
4004 4166 #similar to Resid but calculates CHI**2
4005 4167 [LT,d,fm]=setupLTdfm(p,chindex,hindex)
4006 4168 dp=numpy.dot(LT,d)
4007 4169 fmp=numpy.dot(LT,fm)
4008 4170 chisq=numpy.dot((dp-fmp).T,(dp-fmp))
4009 return chisq
4010
4171 return chisq
4011 4172 class WindProfiler(Operation):
4012 4173
4013 4174 __isConfig = False
4014 4175
4015 4176 __initime = None
4016 4177 __lastdatatime = None
4017 4178 __integrationtime = None
4018 4179
4019 4180 __buffer = None
4020 4181
4021 4182 __dataReady = False
4022 4183
4023 4184 __firstdata = None
4024 4185
4025 4186 n = None
4026 4187
4027 4188 def __init__(self):
4028 4189 Operation.__init__(self)
4029 4190
4030 4191 def __calculateCosDir(self, elev, azim):
4031 4192 zen = (90 - elev)*numpy.pi/180
4032 4193 azim = azim*numpy.pi/180
4033 4194 cosDirX = numpy.sqrt((1-numpy.cos(zen)**2)/((1+numpy.tan(azim)**2)))
4034 4195 cosDirY = numpy.sqrt(1-numpy.cos(zen)**2-cosDirX**2)
4035 4196
4036 4197 signX = numpy.sign(numpy.cos(azim))
4037 4198 signY = numpy.sign(numpy.sin(azim))
4038 4199
4039 4200 cosDirX = numpy.copysign(cosDirX, signX)
4040 4201 cosDirY = numpy.copysign(cosDirY, signY)
4041 4202 return cosDirX, cosDirY
4042 4203
4043 4204 def __calculateAngles(self, theta_x, theta_y, azimuth):
4044 4205
4045 4206 dir_cosw = numpy.sqrt(1-theta_x**2-theta_y**2)
4046 4207 zenith_arr = numpy.arccos(dir_cosw)
4047 4208 azimuth_arr = numpy.arctan2(theta_x,theta_y) + azimuth*math.pi/180
4048 4209
4049 4210 dir_cosu = numpy.sin(azimuth_arr)*numpy.sin(zenith_arr)
4050 4211 dir_cosv = numpy.cos(azimuth_arr)*numpy.sin(zenith_arr)
4051 4212
4052 4213 return azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw
4053 4214
4054 4215 def __calculateMatA(self, dir_cosu, dir_cosv, dir_cosw, horOnly):
4055 4216
4056 4217 if horOnly:
4057 4218 A = numpy.c_[dir_cosu,dir_cosv]
4058 4219 else:
4059 4220 A = numpy.c_[dir_cosu,dir_cosv,dir_cosw]
4060 4221 A = numpy.asmatrix(A)
4061 4222 A1 = numpy.linalg.inv(A.transpose()*A)*A.transpose()
4062 4223
4063 4224 return A1
4064 4225
4065 4226 def __correctValues(self, heiRang, phi, velRadial, SNR):
4066 4227 listPhi = phi.tolist()
4067 4228 maxid = listPhi.index(max(listPhi))
4068 4229 minid = listPhi.index(min(listPhi))
4069 4230
4070 4231 rango = list(range(len(phi)))
4071 4232
4072 4233 heiRang1 = heiRang*math.cos(phi[maxid])
4073 4234 heiRangAux = heiRang*math.cos(phi[minid])
4074 4235 indOut = (heiRang1 < heiRangAux[0]).nonzero()
4075 4236 heiRang1 = numpy.delete(heiRang1,indOut)
4076 4237
4077 4238 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
4078 4239 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
4079 4240
4080 4241 for i in rango:
4081 4242 x = heiRang*math.cos(phi[i])
4082 4243 y1 = velRadial[i,:]
4083 4244 f1 = interpolate.interp1d(x,y1,kind = 'cubic')
4084 4245
4085 4246 x1 = heiRang1
4086 4247 y11 = f1(x1)
4087 4248
4088 4249 y2 = SNR[i,:]
4089 4250 f2 = interpolate.interp1d(x,y2,kind = 'cubic')
4090 4251 y21 = f2(x1)
4091 4252
4092 4253 velRadial1[i,:] = y11
4093 4254 SNR1[i,:] = y21
4094 4255
4095 4256 return heiRang1, velRadial1, SNR1
4096 4257
4097 4258 def __calculateVelUVW(self, A, velRadial):
4098 4259
4099 4260 #Operacion Matricial
4100 4261 velUVW = numpy.zeros((A.shape[0],velRadial.shape[1]))
4101 4262 velUVW[:,:] = numpy.dot(A,velRadial)
4102 4263
4103 4264
4104 4265 return velUVW
4105 4266
4106 4267 def techniqueDBS(self, kwargs):
4107 4268 """
4108 4269 Function that implements Doppler Beam Swinging (DBS) technique.
4109 4270
4110 4271 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
4111 4272 Direction correction (if necessary), Ranges and SNR
4112 4273
4113 4274 Output: Winds estimation (Zonal, Meridional and Vertical)
4114 4275
4115 4276 Parameters affected: Winds, height range, SNR
4116 4277 """
4117 4278 velRadial0 = kwargs['velRadial']
4118 4279 heiRang = kwargs['heightList']
4119 4280 SNR0 = kwargs['SNR']
4120 4281
4121 4282 if 'dirCosx' in kwargs and 'dirCosy' in kwargs:
4122 4283 theta_x = numpy.array(kwargs['dirCosx'])
4123 4284 theta_y = numpy.array(kwargs['dirCosy'])
4124 4285 else:
4125 4286 elev = numpy.array(kwargs['elevation'])
4126 4287 azim = numpy.array(kwargs['azimuth'])
4127 4288 theta_x, theta_y = self.__calculateCosDir(elev, azim)
4128 4289 azimuth = kwargs['correctAzimuth']
4129 4290 if 'horizontalOnly' in kwargs:
4130 4291 horizontalOnly = kwargs['horizontalOnly']
4131 4292 else: horizontalOnly = False
4132 4293 if 'correctFactor' in kwargs:
4133 4294 correctFactor = kwargs['correctFactor']
4134 4295 else: correctFactor = 1
4135 4296 if 'channelList' in kwargs:
4136 4297 channelList = kwargs['channelList']
4137 4298 if len(channelList) == 2:
4138 4299 horizontalOnly = True
4139 4300 arrayChannel = numpy.array(channelList)
4140 4301 param = param[arrayChannel,:,:]
4141 4302 theta_x = theta_x[arrayChannel]
4142 4303 theta_y = theta_y[arrayChannel]
4143 4304
4144 4305 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
4145 4306 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, zenith_arr, correctFactor*velRadial0, SNR0)
4146 4307 A = self.__calculateMatA(dir_cosu, dir_cosv, dir_cosw, horizontalOnly)
4147 4308
4148 4309 #Calculo de Componentes de la velocidad con DBS
4149 4310 winds = self.__calculateVelUVW(A,velRadial1)
4150 4311
4151 4312 return winds, heiRang1, SNR1
4152 4313
4153 4314 def __calculateDistance(self, posx, posy, pairs_ccf, azimuth = None):
4154 4315
4155 4316 nPairs = len(pairs_ccf)
4156 4317 posx = numpy.asarray(posx)
4157 4318 posy = numpy.asarray(posy)
4158 4319
4159 4320 #Rotacion Inversa para alinear con el azimuth
4160 4321 if azimuth!= None:
4161 4322 azimuth = azimuth*math.pi/180
4162 4323 posx1 = posx*math.cos(azimuth) + posy*math.sin(azimuth)
4163 4324 posy1 = -posx*math.sin(azimuth) + posy*math.cos(azimuth)
4164 4325 else:
4165 4326 posx1 = posx
4166 4327 posy1 = posy
4167 4328
4168 4329 #Calculo de Distancias
4169 4330 distx = numpy.zeros(nPairs)
4170 4331 disty = numpy.zeros(nPairs)
4171 4332 dist = numpy.zeros(nPairs)
4172 4333 ang = numpy.zeros(nPairs)
4173 4334
4174 4335 for i in range(nPairs):
4175 4336 distx[i] = posx1[pairs_ccf[i][1]] - posx1[pairs_ccf[i][0]]
4176 4337 disty[i] = posy1[pairs_ccf[i][1]] - posy1[pairs_ccf[i][0]]
4177 4338 dist[i] = numpy.sqrt(distx[i]**2 + disty[i]**2)
4178 4339 ang[i] = numpy.arctan2(disty[i],distx[i])
4179 4340
4180 4341 return distx, disty, dist, ang
4181 4342 #Calculo de Matrices
4182 4343
4183 4344
4184 4345 def __calculateVelVer(self, phase, lagTRange, _lambda):
4185 4346
4186 4347 Ts = lagTRange[1] - lagTRange[0]
4187 4348 velW = -_lambda*phase/(4*math.pi*Ts)
4188 4349
4189 4350 return velW
4190 4351
4191 4352 def __calculateVelHorDir(self, dist, tau1, tau2, ang):
4192 4353 nPairs = tau1.shape[0]
4193 4354 nHeights = tau1.shape[1]
4194 4355 vel = numpy.zeros((nPairs,3,nHeights))
4195 4356 dist1 = numpy.reshape(dist, (dist.size,1))
4196 4357
4197 4358 angCos = numpy.cos(ang)
4198 4359 angSin = numpy.sin(ang)
4199 4360
4200 4361 vel0 = dist1*tau1/(2*tau2**2)
4201 4362 vel[:,0,:] = (vel0*angCos).sum(axis = 1)
4202 4363 vel[:,1,:] = (vel0*angSin).sum(axis = 1)
4203 4364
4204 4365 ind = numpy.where(numpy.isinf(vel))
4205 4366 vel[ind] = numpy.nan
4206 4367
4207 4368 return vel
4208 4369
4209 4370 def techniqueSA(self, kwargs):
4210 4371
4211 4372 """
4212 4373 Function that implements Spaced Antenna (SA) technique.
4213 4374
4214 4375 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
4215 4376 Direction correction (if necessary), Ranges and SNR
4216 4377
4217 4378 Output: Winds estimation (Zonal, Meridional and Vertical)
4218 4379
4219 4380 Parameters affected: Winds
4220 4381 """
4221 4382 position_x = kwargs['positionX']
4222 4383 position_y = kwargs['positionY']
4223 4384 azimuth = kwargs['azimuth']
4224 4385
4225 4386 if 'correctFactor' in kwargs:
4226 4387 correctFactor = kwargs['correctFactor']
4227 4388 else:
4228 4389 correctFactor = 1
4229 4390
4230 4391 groupList = kwargs['groupList']
4231 4392 pairs_ccf = groupList[1]
4232 4393 tau = kwargs['tau']
4233 4394 _lambda = kwargs['_lambda']
4234 4395
4235 4396 #Cross Correlation pairs obtained
4236 4397
4237 4398 indtau = tau.shape[0]/2
4238 4399 tau1 = tau[:indtau,:]
4239 4400 tau2 = tau[indtau:-1,:]
4240 4401 phase1 = tau[-1,:]
4241 4402
4242 4403 #---------------------------------------------------------------------
4243 4404 #Metodo Directo
4244 4405 distx, disty, dist, ang = self.__calculateDistance(position_x, position_y, pairs_ccf,azimuth)
4245 4406 winds = self.__calculateVelHorDir(dist, tau1, tau2, ang)
4246 4407 winds = stats.nanmean(winds, axis=0)
4247 4408 #---------------------------------------------------------------------
4248 4409 #Metodo General
4249 4410
4250 4411 #---------------------------------------------------------------------
4251 4412 winds[2,:] = self.__calculateVelVer(phase1, lagTRange, _lambda)
4252 4413 winds = correctFactor*winds
4253 4414 return winds
4254 4415
4255 4416 def __checkTime(self, currentTime, paramInterval, outputInterval):
4256 4417
4257 4418 dataTime = currentTime + paramInterval
4258 4419 deltaTime = dataTime - self.__initime
4259 4420
4260 4421 if deltaTime >= outputInterval or deltaTime < 0:
4261 4422 self.__dataReady = True
4262 4423 return
4263 4424
4264 4425 def techniqueMeteors(self, arrayMeteor, meteorThresh, heightMin, heightMax):
4265 4426 '''
4266 4427 Function that implements winds estimation technique with detected meteors.
4267 4428
4268 4429 Input: Detected meteors, Minimum meteor quantity to wind estimation
4269 4430
4270 4431 Output: Winds estimation (Zonal and Meridional)
4271 4432
4272 4433 Parameters affected: Winds
4273 4434 '''
4274 4435 #Settings
4275 4436 nInt = (heightMax - heightMin)/2
4276 4437 nInt = int(nInt)
4277 4438 winds = numpy.zeros((2,nInt))*numpy.nan
4278 4439
4279 4440 #Filter errors
4280 4441 error = numpy.where(arrayMeteor[:,-1] == 0)[0]
4281 4442 finalMeteor = arrayMeteor[error,:]
4282 4443
4283 4444 #Meteor Histogram
4284 4445 finalHeights = finalMeteor[:,2]
4285 4446 hist = numpy.histogram(finalHeights, bins = nInt, range = (heightMin,heightMax))
4286 4447 nMeteorsPerI = hist[0]
4287 4448 heightPerI = hist[1]
4288 4449
4289 4450 #Sort of meteors
4290 4451 indSort = finalHeights.argsort()
4291 4452 finalMeteor2 = finalMeteor[indSort,:]
4292 4453
4293 4454 # Calculating winds
4294 4455 ind1 = 0
4295 4456 ind2 = 0
4296 4457
4297 4458 for i in range(nInt):
4298 4459 nMet = nMeteorsPerI[i]
4299 4460 ind1 = ind2
4300 4461 ind2 = ind1 + nMet
4301 4462
4302 4463 meteorAux = finalMeteor2[ind1:ind2,:]
4303 4464
4304 4465 if meteorAux.shape[0] >= meteorThresh:
4305 4466 vel = meteorAux[:, 6]
4306 4467 zen = meteorAux[:, 4]*numpy.pi/180
4307 4468 azim = meteorAux[:, 3]*numpy.pi/180
4308 4469
4309 4470 n = numpy.cos(zen)
4310 4471 l = numpy.sin(zen)*numpy.sin(azim)
4311 4472 m = numpy.sin(zen)*numpy.cos(azim)
4312 4473
4313 4474 A = numpy.vstack((l, m)).transpose()
4314 4475 A1 = numpy.dot(numpy.linalg.inv( numpy.dot(A.transpose(),A) ),A.transpose())
4315 4476 windsAux = numpy.dot(A1, vel)
4316 4477
4317 4478 winds[0,i] = windsAux[0]
4318 4479 winds[1,i] = windsAux[1]
4319 4480
4320 4481 return winds, heightPerI[:-1]
4321 4482
4322 4483 def techniqueNSM_SA(self, **kwargs):
4323 4484 metArray = kwargs['metArray']
4324 4485 heightList = kwargs['heightList']
4325 4486 timeList = kwargs['timeList']
4326 4487
4327 4488 rx_location = kwargs['rx_location']
4328 4489 groupList = kwargs['groupList']
4329 4490 azimuth = kwargs['azimuth']
4330 4491 dfactor = kwargs['dfactor']
4331 4492 k = kwargs['k']
4332 4493
4333 4494 azimuth1, dist = self.__calculateAzimuth1(rx_location, groupList, azimuth)
4334 4495 d = dist*dfactor
4335 4496 #Phase calculation
4336 4497 metArray1 = self.__getPhaseSlope(metArray, heightList, timeList)
4337 4498
4338 4499 metArray1[:,-2] = metArray1[:,-2]*metArray1[:,2]*1000/(k*d[metArray1[:,1].astype(int)]) #angles into velocities
4339 4500
4340 4501 velEst = numpy.zeros((heightList.size,2))*numpy.nan
4341 4502 azimuth1 = azimuth1*numpy.pi/180
4342 4503
4343 4504 for i in range(heightList.size):
4344 4505 h = heightList[i]
4345 4506 indH = numpy.where((metArray1[:,2] == h)&(numpy.abs(metArray1[:,-2]) < 100))[0]
4346 4507 metHeight = metArray1[indH,:]
4347 4508 if metHeight.shape[0] >= 2:
4348 4509 velAux = numpy.asmatrix(metHeight[:,-2]).T #Radial Velocities
4349 4510 iazim = metHeight[:,1].astype(int)
4350 4511 azimAux = numpy.asmatrix(azimuth1[iazim]).T #Azimuths
4351 4512 A = numpy.hstack((numpy.cos(azimAux),numpy.sin(azimAux)))
4352 4513 A = numpy.asmatrix(A)
4353 4514 A1 = numpy.linalg.pinv(A.transpose()*A)*A.transpose()
4354 4515 velHor = numpy.dot(A1,velAux)
4355 4516
4356 4517 velEst[i,:] = numpy.squeeze(velHor)
4357 4518 return velEst
4358 4519
4359 4520 def __getPhaseSlope(self, metArray, heightList, timeList):
4360 4521 meteorList = []
4361 4522 #utctime sec1 height SNR velRad ph0 ph1 ph2 coh0 coh1 coh2
4362 4523 #Putting back together the meteor matrix
4363 4524 utctime = metArray[:,0]
4364 4525 uniqueTime = numpy.unique(utctime)
4365 4526
4366 4527 phaseDerThresh = 0.5
4367 4528 ippSeconds = timeList[1] - timeList[0]
4368 4529 sec = numpy.where(timeList>1)[0][0]
4369 4530 nPairs = metArray.shape[1] - 6
4370 4531 nHeights = len(heightList)
4371 4532
4372 4533 for t in uniqueTime:
4373 4534 metArray1 = metArray[utctime==t,:]
4374 4535 tmet = metArray1[:,1].astype(int)
4375 4536 hmet = metArray1[:,2].astype(int)
4376 4537
4377 4538 metPhase = numpy.zeros((nPairs, heightList.size, timeList.size - 1))
4378 4539 metPhase[:,:] = numpy.nan
4379 4540 metPhase[:,hmet,tmet] = metArray1[:,6:].T
4380 4541
4381 4542 #Delete short trails
4382 4543 metBool = ~numpy.isnan(metPhase[0,:,:])
4383 4544 heightVect = numpy.sum(metBool, axis = 1)
4384 4545 metBool[heightVect<sec,:] = False
4385 4546 metPhase[:,heightVect<sec,:] = numpy.nan
4386 4547
4387 4548 #Derivative
4388 4549 metDer = numpy.abs(metPhase[:,:,1:] - metPhase[:,:,:-1])
4389 4550 phDerAux = numpy.dstack((numpy.full((nPairs,nHeights,1), False, dtype=bool),metDer > phaseDerThresh))
4390 4551 metPhase[phDerAux] = numpy.nan
4391 4552
4392 4553 #--------------------------METEOR DETECTION -----------------------------------------
4393 4554 indMet = numpy.where(numpy.any(metBool,axis=1))[0]
4394 4555
4395 4556 for p in numpy.arange(nPairs):
4396 4557 phase = metPhase[p,:,:]
4397 4558 phDer = metDer[p,:,:]
4398 4559
4399 4560 for h in indMet:
4400 4561 height = heightList[h]
4401 4562 phase1 = phase[h,:] #82
4402 4563 phDer1 = phDer[h,:]
4403 4564
4404 4565 phase1[~numpy.isnan(phase1)] = numpy.unwrap(phase1[~numpy.isnan(phase1)]) #Unwrap
4405 4566
4406 4567 indValid = numpy.where(~numpy.isnan(phase1))[0]
4407 4568 initMet = indValid[0]
4408 4569 endMet = 0
4409 4570
4410 4571 for i in range(len(indValid)-1):
4411 4572
4412 4573 #Time difference
4413 4574 inow = indValid[i]
4414 4575 inext = indValid[i+1]
4415 4576 idiff = inext - inow
4416 4577 #Phase difference
4417 4578 phDiff = numpy.abs(phase1[inext] - phase1[inow])
4418 4579
4419 4580 if idiff>sec or phDiff>numpy.pi/4 or inext==indValid[-1]: #End of Meteor
4420 4581 sizeTrail = inow - initMet + 1
4421 4582 if sizeTrail>3*sec: #Too short meteors
4422 4583 x = numpy.arange(initMet,inow+1)*ippSeconds
4423 4584 y = phase1[initMet:inow+1]
4424 4585 ynnan = ~numpy.isnan(y)
4425 4586 x = x[ynnan]
4426 4587 y = y[ynnan]
4427 4588 slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
4428 4589 ylin = x*slope + intercept
4429 4590 rsq = r_value**2
4430 4591 if rsq > 0.5:
4431 4592 vel = slope#*height*1000/(k*d)
4432 4593 estAux = numpy.array([utctime,p,height, vel, rsq])
4433 4594 meteorList.append(estAux)
4434 4595 initMet = inext
4435 4596 metArray2 = numpy.array(meteorList)
4436 4597
4437 4598 return metArray2
4438 4599
4439 4600 def __calculateAzimuth1(self, rx_location, pairslist, azimuth0):
4440 4601
4441 4602 azimuth1 = numpy.zeros(len(pairslist))
4442 4603 dist = numpy.zeros(len(pairslist))
4443 4604
4444 4605 for i in range(len(rx_location)):
4445 4606 ch0 = pairslist[i][0]
4446 4607 ch1 = pairslist[i][1]
4447 4608
4448 4609 diffX = rx_location[ch0][0] - rx_location[ch1][0]
4449 4610 diffY = rx_location[ch0][1] - rx_location[ch1][1]
4450 4611 azimuth1[i] = numpy.arctan2(diffY,diffX)*180/numpy.pi
4451 4612 dist[i] = numpy.sqrt(diffX**2 + diffY**2)
4452 4613
4453 4614 azimuth1 -= azimuth0
4454 4615 return azimuth1, dist
4455 4616
4456 4617 def techniqueNSM_DBS(self, **kwargs):
4457 4618 metArray = kwargs['metArray']
4458 4619 heightList = kwargs['heightList']
4459 4620 timeList = kwargs['timeList']
4460 4621 azimuth = kwargs['azimuth']
4461 4622 theta_x = numpy.array(kwargs['theta_x'])
4462 4623 theta_y = numpy.array(kwargs['theta_y'])
4463 4624
4464 4625 utctime = metArray[:,0]
4465 4626 cmet = metArray[:,1].astype(int)
4466 4627 hmet = metArray[:,3].astype(int)
4467 4628 SNRmet = metArray[:,4]
4468 4629 vmet = metArray[:,5]
4469 4630 spcmet = metArray[:,6]
4470 4631
4471 4632 nChan = numpy.max(cmet) + 1
4472 4633 nHeights = len(heightList)
4473 4634
4474 4635 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
4475 4636 hmet = heightList[hmet]
4476 4637 h1met = hmet*numpy.cos(zenith_arr[cmet]) #Corrected heights
4477 4638
4478 4639 velEst = numpy.zeros((heightList.size,2))*numpy.nan
4479 4640
4480 4641 for i in range(nHeights - 1):
4481 4642 hmin = heightList[i]
4482 4643 hmax = heightList[i + 1]
4483 4644
4484 4645 thisH = (h1met>=hmin) & (h1met<hmax) & (cmet!=2) & (SNRmet>8) & (vmet<50) & (spcmet<10)
4485 4646 indthisH = numpy.where(thisH)
4486 4647
4487 4648 if numpy.size(indthisH) > 3:
4488 4649
4489 4650 vel_aux = vmet[thisH]
4490 4651 chan_aux = cmet[thisH]
4491 4652 cosu_aux = dir_cosu[chan_aux]
4492 4653 cosv_aux = dir_cosv[chan_aux]
4493 4654 cosw_aux = dir_cosw[chan_aux]
4494 4655
4495 4656 nch = numpy.size(numpy.unique(chan_aux))
4496 4657 if nch > 1:
4497 4658 A = self.__calculateMatA(cosu_aux, cosv_aux, cosw_aux, True)
4498 4659 velEst[i,:] = numpy.dot(A,vel_aux)
4499 4660
4500 4661 return velEst
4501 4662
4502 4663 def run(self, dataOut, technique, nHours=1, hmin=70, hmax=110, **kwargs):
4503 4664
4504 4665 param = dataOut.moments
4505 4666 if numpy.any(dataOut.abscissaList):
4506 4667 absc = dataOut.abscissaList[:-1]
4507 4668 # noise = dataOut.noise
4508 4669 heightList = dataOut.heightList
4509 4670 SNR = dataOut.data_snr
4510 4671
4511 4672 if technique == 'DBS':
4512 4673
4513 4674 kwargs['velRadial'] = param[:,1,:] #Radial velocity
4514 4675 kwargs['heightList'] = heightList
4515 4676 kwargs['SNR'] = SNR
4516 4677
4517 4678 dataOut.data_output, dataOut.heightList, dataOut.data_snr = self.techniqueDBS(kwargs) #DBS Function
4518 4679 dataOut.utctimeInit = dataOut.utctime
4519 4680 dataOut.outputInterval = dataOut.paramInterval
4520 4681
4521 4682 elif technique == 'SA':
4522 4683
4523 4684 #Parameters
4524 4685 kwargs['groupList'] = dataOut.groupList
4525 4686 kwargs['tau'] = dataOut.data_param
4526 4687 kwargs['_lambda'] = dataOut.C/dataOut.frequency
4527 4688 dataOut.data_output = self.techniqueSA(kwargs)
4528 4689 dataOut.utctimeInit = dataOut.utctime
4529 4690 dataOut.outputInterval = dataOut.timeInterval
4530 4691
4531 4692 elif technique == 'Meteors':
4532 4693 dataOut.flagNoData = True
4533 4694 self.__dataReady = False
4534 4695
4535 4696 if 'nHours' in kwargs:
4536 4697 nHours = kwargs['nHours']
4537 4698 else:
4538 4699 nHours = 1
4539 4700
4540 4701 if 'meteorsPerBin' in kwargs:
4541 4702 meteorThresh = kwargs['meteorsPerBin']
4542 4703 else:
4543 4704 meteorThresh = 6
4544 4705
4545 4706 if 'hmin' in kwargs:
4546 4707 hmin = kwargs['hmin']
4547 4708 else: hmin = 70
4548 4709 if 'hmax' in kwargs:
4549 4710 hmax = kwargs['hmax']
4550 4711 else: hmax = 110
4551 4712
4552 4713 dataOut.outputInterval = nHours*3600
4553 4714
4554 4715 if self.__isConfig == False:
4555 4716 #Get Initial LTC time
4556 4717 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
4557 4718 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
4558 4719
4559 4720 self.__isConfig = True
4560 4721
4561 4722 if self.__buffer is None:
4562 4723 self.__buffer = dataOut.data_param
4563 4724 self.__firstdata = copy.copy(dataOut)
4564 4725
4565 4726 else:
4566 4727 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
4567 4728
4568 4729 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
4569 4730
4570 4731 if self.__dataReady:
4571 4732 dataOut.utctimeInit = self.__initime
4572 4733
4573 4734 self.__initime += dataOut.outputInterval #to erase time offset
4574 4735
4575 4736 dataOut.data_output, dataOut.heightList = self.techniqueMeteors(self.__buffer, meteorThresh, hmin, hmax)
4576 4737 dataOut.flagNoData = False
4577 4738 self.__buffer = None
4578 4739
4579 4740 elif technique == 'Meteors1':
4580 4741 dataOut.flagNoData = True
4581 4742 self.__dataReady = False
4582 4743
4583 4744 if 'nMins' in kwargs:
4584 4745 nMins = kwargs['nMins']
4585 4746 else: nMins = 20
4586 4747 if 'rx_location' in kwargs:
4587 4748 rx_location = kwargs['rx_location']
4588 4749 else: rx_location = [(0,1),(1,1),(1,0)]
4589 4750 if 'azimuth' in kwargs:
4590 4751 azimuth = kwargs['azimuth']
4591 4752 else: azimuth = 51.06
4592 4753 if 'dfactor' in kwargs:
4593 4754 dfactor = kwargs['dfactor']
4594 4755 if 'mode' in kwargs:
4595 4756 mode = kwargs['mode']
4596 4757 if 'theta_x' in kwargs:
4597 4758 theta_x = kwargs['theta_x']
4598 4759 if 'theta_y' in kwargs:
4599 4760 theta_y = kwargs['theta_y']
4600 4761 else: mode = 'SA'
4601 4762
4602 4763 #Borrar luego esto
4603 4764 if dataOut.groupList is None:
4604 4765 dataOut.groupList = [(0,1),(0,2),(1,2)]
4605 4766 groupList = dataOut.groupList
4606 4767 C = 3e8
4607 4768 freq = 50e6
4608 4769 lamb = C/freq
4609 4770 k = 2*numpy.pi/lamb
4610 4771
4611 4772 timeList = dataOut.abscissaList
4612 4773 heightList = dataOut.heightList
4613 4774
4614 4775 if self.__isConfig == False:
4615 4776 dataOut.outputInterval = nMins*60
4616 4777 #Get Initial LTC time
4617 4778 initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
4618 4779 minuteAux = initime.minute
4619 4780 minuteNew = int(numpy.floor(minuteAux/nMins)*nMins)
4620 4781 self.__initime = (initime.replace(minute = minuteNew, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
4621 4782
4622 4783 self.__isConfig = True
4623 4784
4624 4785 if self.__buffer is None:
4625 4786 self.__buffer = dataOut.data_param
4626 4787 self.__firstdata = copy.copy(dataOut)
4627 4788
4628 4789 else:
4629 4790 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
4630 4791
4631 4792 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
4632 4793
4633 4794 if self.__dataReady:
4634 4795 dataOut.utctimeInit = self.__initime
4635 4796 self.__initime += dataOut.outputInterval #to erase time offset
4636 4797
4637 4798 metArray = self.__buffer
4638 4799 if mode == 'SA':
4639 4800 dataOut.data_output = self.techniqueNSM_SA(rx_location=rx_location, groupList=groupList, azimuth=azimuth, dfactor=dfactor, k=k,metArray=metArray, heightList=heightList,timeList=timeList)
4640 4801 elif mode == 'DBS':
4641 4802 dataOut.data_output = self.techniqueNSM_DBS(metArray=metArray,heightList=heightList,timeList=timeList, azimuth=azimuth, theta_x=theta_x, theta_y=theta_y)
4642 4803 dataOut.data_output = dataOut.data_output.T
4643 4804 dataOut.flagNoData = False
4644 4805 self.__buffer = None
4645 4806
4646 4807 return dataOut
4647 4808
4648 4809 class EWDriftsEstimation(Operation):
4649 4810
4650 4811 def __init__(self):
4651 4812 Operation.__init__(self)
4652 4813
4653 4814 def __correctValues(self, heiRang, phi, velRadial, SNR):
4654 4815 listPhi = phi.tolist()
4655 4816 maxid = listPhi.index(max(listPhi))
4656 4817 minid = listPhi.index(min(listPhi))
4657 4818
4658 rango = list(range(len(phi)))
4819 rango = list(range(len(phi)))
4659 4820 heiRang1 = heiRang*math.cos(phi[maxid])
4660 4821 heiRangAux = heiRang*math.cos(phi[minid])
4661 4822 indOut = (heiRang1 < heiRangAux[0]).nonzero()
4662 4823 heiRang1 = numpy.delete(heiRang1,indOut)
4663 4824
4664 4825 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
4665 4826 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
4666 4827
4667 4828 for i in rango:
4668 4829 x = heiRang*math.cos(phi[i])
4669 4830 y1 = velRadial[i,:]
4670 4831 vali= (numpy.isfinite(y1)==True).nonzero()
4671 4832 y1=y1[vali]
4672 4833 x = x[vali]
4673 4834 f1 = interpolate.interp1d(x,y1,kind = 'cubic',bounds_error=False)
4835
4674 4836 x1 = heiRang1
4675 4837 y11 = f1(x1)
4676 y2 = SNR[i,:]
4838 y2 = SNR[i,:]
4677 4839 x = heiRang*math.cos(phi[i])
4678 4840 vali= (y2 != -1).nonzero()
4679 4841 y2 = y2[vali]
4680 4842 x = x[vali]
4843
4681 4844 f2 = interpolate.interp1d(x,y2,kind = 'cubic',bounds_error=False)
4682 4845 y21 = f2(x1)
4683 4846
4684 4847 velRadial1[i,:] = y11
4685 4848 SNR1[i,:] = y21
4686 4849
4687 4850 return heiRang1, velRadial1, SNR1
4688 4851
4852
4853
4689 4854 def run(self, dataOut, zenith, zenithCorrection,fileDrifts):
4690
4691 dataOut.lat=-11.95
4692 dataOut.lon=-76.87
4855 dataOut.lat = -11.95
4856 dataOut.lon = -76.87
4857 dataOut.spcst = 0.00666
4858 dataOut.pl = 0.0003
4859 dataOut.cbadn = 3
4860 dataOut.inttms = 300
4861 dataOut.azw = -115.687
4862 dataOut.elw = 86.1095
4863 dataOut.aze = 130.052
4864 dataOut.ele = 87.6558
4865 dataOut.jro14 = numpy.log10(dataOut.spc_noise[0]/dataOut.normFactor)
4866 dataOut.jro15 = numpy.log10(dataOut.spc_noise[1]/dataOut.normFactor)
4867 dataOut.jro16 = numpy.log10(dataOut.spc_noise[2]/dataOut.normFactor)
4868 dataOut.nwlos = numpy.log10(dataOut.spc_noise[3]/dataOut.normFactor)
4869
4693 4870 heiRang = dataOut.heightList
4694 4871 velRadial = dataOut.data_param[:,3,:]
4695 4872 velRadialm = dataOut.data_param[:,2:4,:]*-1
4873
4696 4874 rbufc=dataOut.data_paramC[:,:,0]
4697 4875 ebufc=dataOut.data_paramC[:,:,1]
4698 SNR = dataOut.data_snr
4876 SNR = dataOut.data_snr1_i
4877 rbufi = dataOut.data_snr1_i
4699 4878 velRerr = dataOut.data_error[:,4,:]
4879 range1 = dataOut.heightList
4880 nhei = len(range1)
4881
4882 sat_fits = dataOut.sat_fits
4883
4700 4884 channels = dataOut.channelList
4701 4885 nChan = len(channels)
4702 4886 my_nbeams = nChan/2
4703 4887 if my_nbeams == 2:
4704 4888 moments=numpy.vstack(([velRadialm[0,:]],[velRadialm[0,:]],[velRadialm[1,:]],[velRadialm[1,:]]))
4705 4889 else :
4706 4890 moments=numpy.vstack(([velRadialm[0,:]],[velRadialm[0,:]]))
4707 4891 dataOut.moments=moments
4892 #Incoherent
4893 smooth_w = dataOut.clean_num_aver[0,:]
4894 chisq_w = dataOut.data_error[0,0,:]
4895 p_w0 = rbufi[0,:]
4896 p_w1 = rbufi[1,:]
4708 4897 # Coherent
4709 4898 smooth_wC = ebufc[0,:]
4710 4899 p_w0C = rbufc[0,:]
4711 4900 p_w1C = rbufc[1,:]
4712 4901 w_wC = rbufc[2,:]*-1 #*radial_sign(radial EQ 1)
4713 4902 t_wC = rbufc[3,:]
4903 val = (numpy.isfinite(p_w0)==False).nonzero()
4904 p_w0[val]=0
4905 val = (numpy.isfinite(p_w1)==False).nonzero()
4906 p_w1[val]=0
4907 val = (numpy.isfinite(p_w0C)==False).nonzero()
4908 p_w0C[val]=0
4909 val = (numpy.isfinite(p_w1C)==False).nonzero()
4910 p_w1C[val]=0
4911 val = (numpy.isfinite(smooth_w)==False).nonzero()
4912 smooth_w[val]=0
4913 val = (numpy.isfinite(smooth_wC)==False).nonzero()
4914 smooth_wC[val]=0
4915
4916 #p_w0 = (p_w0*smooth_w+p_w0C*smooth_wC)/(smooth_w+smooth_wC)
4917 #p_w1 = (p_w1*smooth_w+p_w1C*smooth_wC)/(smooth_w+smooth_wC)
4918
4919 if len(sat_fits) >0 :
4920 p_w0C = p_w0C + sat_fits[0,:]
4921 p_w1C = p_w1C + sat_fits[1,:]
4922
4714 4923 if my_nbeams == 1:
4715 4924 w = velRadial[0,:]
4716 4925 winds = velRadial.copy()
4717 4926 w_err = velRerr[0,:]
4718 snr1 = 10*numpy.log10(SNR[0])
4927 u = w*numpy.nan
4928 u_err = w_err*numpy.nan
4929 p_e0 = p_w0*numpy.nan
4930 p_e1 = p_w1*numpy.nan
4931 #snr1 = 10*numpy.log10(SNR[0])
4719 4932 if my_nbeams == 2:
4933
4720 4934 zenith = numpy.array(zenith)
4721 4935 zenith -= zenithCorrection
4722 4936 zenith *= numpy.pi/180
4723 4937 if zenithCorrection != 0 :
4724 4938 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, numpy.abs(zenith), velRadial, SNR)
4725 4939 else :
4726 4940 heiRang1 = heiRang
4727 4941 velRadial1 = velRadial
4728 4942 SNR1 = SNR
4729 4943
4730 4944 alp = zenith[0]
4731 4945 bet = zenith[1]
4732 4946
4733 4947 w_w = velRadial1[0,:]
4734 4948 w_e = velRadial1[1,:]
4735 4949 w_w_err = velRerr[0,:]
4736 4950 w_e_err = velRerr[1,:]
4737
4738 val = (numpy.isfinite(w_w)==False).nonzero()
4739 val = val[0]
4740 bad = val
4741 if len(bad) > 0 :
4742 w_w[bad] = w_wC[bad]
4743 w_w_err[bad]= numpy.nan
4951 smooth_e = dataOut.clean_num_aver[2,:]
4952 chisq_e = dataOut.data_error[1,0,:]
4953 p_e0 = rbufi[2,:]
4954 p_e1 = rbufi[3,:]
4955
4956 tini=time.localtime(dataOut.utctime)
4957 #print(tini[3],tini[4])
4958 #val = (numpy.isfinite(w_w)==False).nonzero()
4959 #val = val[0]
4960 #bad = val
4961 #if len(bad) > 0 :
4962 # w_w[bad] = w_wC[bad]
4963 # w_w_err[bad]= numpy.nan
4964 if tini[3] >= 6 and tini[3] < 18 :
4965 w_wtmp = numpy.where(numpy.isfinite(w_wC)==True,w_wC,w_w)
4966 w_w_errtmp = numpy.where(numpy.isfinite(w_wC)==True,numpy.nan,w_w_err)
4967 else:
4968 w_wtmp = numpy.where(numpy.isfinite(w_wC)==True,w_wC,w_w)
4969 w_wtmp = numpy.where(range1 > 200,w_w,w_wtmp)
4970 w_w_errtmp = numpy.where(numpy.isfinite(w_wC)==True,numpy.nan,w_w_err)
4971 w_w_errtmp = numpy.where(range1 > 200,w_w_err,w_w_errtmp)
4972 w_w = w_wtmp
4973 w_w_err = w_w_errtmp
4974
4975 #if my_nbeams == 2:
4744 4976 smooth_eC=ebufc[4,:]
4745 4977 p_e0C = rbufc[4,:]
4746 4978 p_e1C = rbufc[5,:]
4747 4979 w_eC = rbufc[6,:]*-1
4748 4980 t_eC = rbufc[7,:]
4749 val = (numpy.isfinite(w_e)==False).nonzero()
4750 val = val[0]
4751 bad = val
4752 if len(bad) > 0 :
4753 w_e[bad] = w_eC[bad]
4754 w_e_err[bad]= numpy.nan
4755
4981
4982 val = (numpy.isfinite(p_e0)==False).nonzero()
4983 p_e0[val]=0
4984 val = (numpy.isfinite(p_e1)==False).nonzero()
4985 p_e1[val]=0
4986 val = (numpy.isfinite(p_e0C)==False).nonzero()
4987 p_e0C[val]=0
4988 val = (numpy.isfinite(p_e1C)==False).nonzero()
4989 p_e1C[val]=0
4990 val = (numpy.isfinite(smooth_e)==False).nonzero()
4991 smooth_e[val]=0
4992 val = (numpy.isfinite(smooth_eC)==False).nonzero()
4993 smooth_eC[val]=0
4994 #p_e0 = (p_e0*smooth_e+p_e0C*smooth_eC)/(smooth_e+smooth_eC)
4995 #p_e1 = (p_e1*smooth_e+p_e1C*smooth_eC)/(smooth_e+smooth_eC)
4996
4997 if len(sat_fits) >0 :
4998 p_e0C = p_e0C + sat_fits[2,:]
4999 p_e1C = p_e1C + sat_fits[3,:]
5000
5001 #val = (numpy.isfinite(w_e)==False).nonzero()
5002 #val = val[0]
5003 #bad = val
5004 #if len(bad) > 0 :
5005 # w_e[bad] = w_eC[bad]
5006 # w_e_err[bad]= numpy.nan
5007 if tini[3] >= 6 and tini[3] < 18 :
5008 w_etmp = numpy.where(numpy.isfinite(w_eC)==True,w_eC,w_e)
5009 w_e_errtmp = numpy.where(numpy.isfinite(w_eC)==True,numpy.nan,w_e_err)
5010 else:
5011 w_etmp = numpy.where(numpy.isfinite(w_eC)==True,w_eC,w_e)
5012 w_etmp = numpy.where(range1 > 200,w_e,w_etmp)
5013 w_e_errtmp = numpy.where(numpy.isfinite(w_eC)==True,numpy.nan,w_e_err)
5014 w_e_errtmp = numpy.where(range1 > 200,w_e_err,w_e_errtmp)
5015 w_e = w_etmp
5016 w_e_err = w_e_errtmp
5017
4756 5018 w = (w_w*numpy.sin(bet) - w_e*numpy.sin(alp))/(numpy.cos(alp)*numpy.sin(bet) - numpy.cos(bet)*numpy.sin(alp))
4757 5019 u = (w_w*numpy.cos(bet) - w_e*numpy.cos(alp))/(numpy.sin(alp)*numpy.cos(bet) - numpy.sin(bet)*numpy.cos(alp))
4758 5020
4759 5021 w_err = numpy.sqrt((w_w_err*numpy.sin(bet))**2.+(w_e_err*numpy.sin(alp))**2.)/ numpy.absolute(numpy.cos(alp)*numpy.sin(bet)-numpy.cos(bet)*numpy.sin(alp))
4760 5022 u_err = numpy.sqrt((w_w_err*numpy.cos(bet))**2.+(w_e_err*numpy.cos(alp))**2.)/ numpy.absolute(numpy.cos(alp)*numpy.sin(bet)-numpy.cos(bet)*numpy.sin(alp))
4761 5023
4762 5024 winds = numpy.vstack((w,u))
4763
5025 #winds = numpy.vstack((w,u,w_err,u_err))
4764 5026 dataOut.heightList = heiRang1
4765 snr1 = 10*numpy.log10(SNR1[0])
5027 #snr1 = 10*numpy.log10(SNR1[0])
4766 5028 dataOut.data_output = winds
4767 #snr1 = 10*numpy.log10(SNR1[0])# estaba comentado
4768 dataOut.data_snr1 = numpy.reshape(snr1,(1,snr1.shape[0]))
4769 #print("data_snr1",dataOut.data_snr1)
5029 range1 = dataOut.heightList
5030 nhei = len(range1)
5031 #print('alt ',range1*numpy.sin(86.1*numpy.pi/180))
5032 #print(numpy.min([dataOut.eldir7,dataOut.eldir8]))
5033 galt = range1*numpy.sin(numpy.min([dataOut.elw,dataOut.ele])*numpy.pi/180.)
5034 dataOut.params = numpy.vstack((range1,galt,w,w_err,u,u_err,w_w,w_w_err,w_e,w_e_err,numpy.log10(p_w0),numpy.log10(p_w0C),numpy.log10(p_w1),numpy.log10(p_w1C),numpy.log10(p_e0),numpy.log10(p_e0C),numpy.log10(p_e1),numpy.log10(p_e1C),chisq_w,chisq_e))
5035 #snr1 = 10*numpy.log10(SNR1[0])
5036 #print(min(snr1), max(snr1))
5037 snr1 = numpy.vstack((p_w0,p_w1,p_e0,p_e1))
5038 snr1db = 10*numpy.log10(snr1[0])
5039 #dataOut.data_snr1 = numpy.reshape(snr1,(1,snr1.shape[0]))
5040 dataOut.data_snr1 = numpy.reshape(snr1db,(1,snr1db.shape[0]))
4770 5041 dataOut.utctimeInit = dataOut.utctime
4771 5042 dataOut.outputInterval = dataOut.timeInterval
4772 5043
4773 5044 hei_aver0 = 218
4774 5045 jrange = 450 #900 para HA drifts
4775 5046 deltah = 15.0 #dataOut.spacing(0) 25 HAD
4776 5047 h0 = 0.0 #dataOut.first_height(0)
4777 heights = dataOut.heightList
4778 nhei = len(heights)
4779 5048
4780 5049 range1 = numpy.arange(nhei) * deltah + h0
4781 5050 jhei = (range1 >= hei_aver0).nonzero()
4782 5051 if len(jhei[0]) > 0 :
4783 5052 h0_index = jhei[0][0] # Initial height for getting averages 218km
4784 5053
4785 5054 mynhei = 7
4786 5055 nhei_avg = int(jrange/deltah)
4787 5056 h_avgs = int(nhei_avg/mynhei)
4788 5057 nhei_avg = h_avgs*(mynhei-1)+mynhei
4789 5058
4790 5059 navgs = numpy.zeros(mynhei,dtype='float')
4791 5060 delta_h = numpy.zeros(mynhei,dtype='float')
4792 5061 range_aver = numpy.zeros(mynhei,dtype='float')
4793 5062 for ih in range( mynhei-1 ):
4794 5063 range_aver[ih] = numpy.sum(range1[h0_index+h_avgs*ih:h0_index+h_avgs*(ih+1)-0])/h_avgs
4795 5064 navgs[ih] = h_avgs
4796 5065 delta_h[ih] = deltah*h_avgs
4797 5066
4798 5067 range_aver[mynhei-1] = numpy.sum(range1[h0_index:h0_index+6*h_avgs-0])/(6*h_avgs)
4799 5068 navgs[mynhei-1] = 6*h_avgs
4800 5069 delta_h[mynhei-1] = deltah*6*h_avgs
4801 5070
4802 5071 wA = w[h0_index:h0_index+nhei_avg-0]
4803 5072 wA_err = w_err[h0_index:h0_index+nhei_avg-0]
4804 5073 for i in range(5) :
4805 5074 vals = wA[i*h_avgs:(i+1)*h_avgs-0]
4806 5075 errs = wA_err[i*h_avgs:(i+1)*h_avgs-0]
4807 5076 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2.)
4808 5077 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
4809 5078 wA[6*h_avgs+i] = avg
4810 5079 wA_err[6*h_avgs+i] = sigma
4811 5080
4812 5081
4813 5082 vals = wA[0:6*h_avgs-0]
4814 5083 errs=wA_err[0:6*h_avgs-0]
4815 5084 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2)
4816 5085 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
4817 5086 wA[nhei_avg-1] = avg
4818 5087 wA_err[nhei_avg-1] = sigma
4819 5088
4820 5089 wA = wA[6*h_avgs:nhei_avg-0]
4821 5090 wA_err=wA_err[6*h_avgs:nhei_avg-0]
4822 5091 if my_nbeams == 2 :
4823 5092 uA = u[h0_index:h0_index+nhei_avg]
4824 5093 uA_err=u_err[h0_index:h0_index+nhei_avg]
4825 5094
4826 5095 for i in range(5) :
4827 5096 vals = uA[i*h_avgs:(i+1)*h_avgs-0]
4828 5097 errs=uA_err[i*h_avgs:(i+1)*h_avgs-0]
4829 5098 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2.)
4830 5099 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
4831 5100 uA[6*h_avgs+i] = avg
4832 5101 uA_err[6*h_avgs+i]=sigma
4833 5102
4834 5103 vals = uA[0:6*h_avgs-0]
4835 5104 errs = uA_err[0:6*h_avgs-0]
4836 5105 avg = numpy.nansum(vals/errs**2.)/numpy.nansum(1./errs**2.)
4837 5106 sigma = numpy.sqrt(1./numpy.nansum(1./errs**2.))
4838 5107 uA[nhei_avg-1] = avg
4839 5108 uA_err[nhei_avg-1] = sigma
4840 5109 uA = uA[6*h_avgs:nhei_avg-0]
4841 5110 uA_err = uA_err[6*h_avgs:nhei_avg-0]
4842 5111 dataOut.drifts_avg = numpy.vstack((wA,uA))
5112
4843 5113 if my_nbeams == 1: dataOut.drifts_avg = wA
5114 #deltahavg= wA*0.0+deltah
5115 dataOut.range = range1
5116 galtavg = range_aver*numpy.sin(numpy.min([dataOut.elw,dataOut.ele])*numpy.pi/180.)
5117 dataOut.params_avg = numpy.vstack((wA,wA_err,uA,uA_err,range_aver,galtavg,delta_h))
5118
5119 #print('comparando dim de avg ',wA.shape,deltahavg.shape,range_aver.shape)
4844 5120 tini=time.localtime(dataOut.utctime)
4845 5121 datefile= str(tini[0]).zfill(4)+str(tini[1]).zfill(2)+str(tini[2]).zfill(2)
4846 nfile = fileDrifts+'/jro'+datefile+'drifts.txt'
5122 nfile = fileDrifts+'/jro'+datefile+'drifts_sch3.txt'
5123 #nfile = '/home/pcondor/Database/ewdriftsschain2019/jro'+datefile+'drifts_sch3.txt'
5124 #print(len(dataOut.drifts_avg),dataOut.drifts_avg.shape)
4847 5125 f1 = open(nfile,'a')
4848 5126 datedriftavg=str(tini[0])+' '+str(tini[1])+' '+str(tini[2])+' '+str(tini[3])+' '+str(tini[4])
4849 5127 driftavgstr=str(dataOut.drifts_avg)
4850 5128 numpy.savetxt(f1,numpy.column_stack([tini[0],tini[1],tini[2],tini[3],tini[4]]),fmt='%4i')
4851 numpy.savetxt(f1,dataOut.drifts_avg,fmt='%10.2f')
5129 numpy.savetxt(f1,numpy.reshape(range_aver,(1,len(range_aver))) ,fmt='%10.2f')
5130 #numpy.savetxt(f1,numpy.reshape(dataOut.drifts_avg,(7,len(dataOut.drifts_avg))) ,fmt='%10.2f')
5131 numpy.savetxt(f1,dataOut.drifts_avg[:,:],fmt='%10.2f')
5132 f1.close()
5133
5134 swfile = fileDrifts+'/jro'+datefile+'drifts_sw.txt'
5135 f1 = open(swfile,'a')
5136 numpy.savetxt(f1,numpy.column_stack([tini[0],tini[1],tini[2],tini[3],tini[4]]),fmt='%4i')
5137 numpy.savetxt(f1,numpy.reshape(heiRang,(1,len(heiRang))),fmt='%10.2f')
5138 numpy.savetxt(f1,dataOut.data_param[:,0,:],fmt='%10.2f')
4852 5139 f1.close()
5140 dataOut.heightListtmp = dataOut.heightList
5141 '''
5142 one = {'range':'range','gdlatr': 'lat', 'gdlonr': 'lon', 'inttms': 'paramInterval'} #reader gdlatr-->lat only 1D
5143
5144 two = {
5145 'gdalt': 'heightList', #<----- nmonics
5146 'VIPN': ('params', 0),
5147 'dvipn': ('params', 1),
5148 'vipe': ('params', 2),
5149 'dvipe': ('params', 3),
5150 'PACWL': ('params', 4),
5151 'pbcwl': ('params', 5),
5152 'pccel': ('params', 6),
5153 'pdcel': ('params', 7)
5154 } #writer
5155
5156 #f=open('/home/roberto/moder_test.txt','r')
5157 #file_contents=f.read()
5158
5159 ind = ['gdalt']
5160
5161 meta = {
5162 'kinst': 10, #instrument code
5163 'kindat': 1910, #type of data
5164 'catalog': {
5165 'principleInvestigator': 'Danny ScipiΓ³n',
5166 'expPurpose': 'Drifts'#,
5167 # 'sciRemarks': file_contents
5168 },
5169 'header': {
5170 'analyst': 'D. Hysell'
5171 }
5172 }
5173 print('empieza h5 madrigal')
5174 try:
5175 h5mad=MADWriter(dataOut, fileDrifts, one, ind, two, meta, format='hdf5')
5176 except:
5177 print("Error in MADWriter")
5178 print(h5mad)
5179 '''
5180 return dataOut
5181 class setHeightDrifts(Operation):
4853 5182
5183 def __init__(self):
5184 Operation.__init__(self)
5185 def run(self, dataOut):
5186 #print('h inicial ',dataOut.heightList,dataOut.heightListtmp)
5187 dataOut.heightList = dataOut.heightListtmp
5188 #print('regresa H ',dataOut.heightList)
4854 5189 return dataOut
5190 class setHeightDriftsavg(Operation):
5191
5192 def __init__(self):
5193 Operation.__init__(self)
5194 def run(self, dataOut):
5195 #print('h inicial ',dataOut.heightList)
5196 dataOut.heightList = dataOut.params_avg[4]
5197 #print('cambia H ',dataOut.params_avg[4],dataOut.heightList)
5198 return dataOut
5199
5200
4855 5201
4856 5202 #--------------- Non Specular Meteor ----------------
4857 5203
4858 5204 class NonSpecularMeteorDetection(Operation):
4859 5205
4860 5206 def run(self, dataOut, mode, SNRthresh=8, phaseDerThresh=0.5, cohThresh=0.8, allData = False):
4861 5207 data_acf = dataOut.data_pre[0]
4862 5208 data_ccf = dataOut.data_pre[1]
4863 5209 pairsList = dataOut.groupList[1]
4864 5210
4865 5211 lamb = dataOut.C/dataOut.frequency
4866 5212 tSamp = dataOut.ippSeconds*dataOut.nCohInt
4867 5213 paramInterval = dataOut.paramInterval
4868 5214
4869 5215 nChannels = data_acf.shape[0]
4870 5216 nLags = data_acf.shape[1]
4871 5217 nProfiles = data_acf.shape[2]
4872 5218 nHeights = dataOut.nHeights
4873 5219 nCohInt = dataOut.nCohInt
4874 5220 sec = numpy.round(nProfiles/dataOut.paramInterval)
4875 5221 heightList = dataOut.heightList
4876 5222 ippSeconds = dataOut.ippSeconds*dataOut.nCohInt*dataOut.nAvg
4877 5223 utctime = dataOut.utctime
4878 5224
4879 5225 dataOut.abscissaList = numpy.arange(0,paramInterval+ippSeconds,ippSeconds)
4880 5226
4881 5227 #------------------------ SNR --------------------------------------
4882 5228 power = data_acf[:,0,:,:].real
4883 5229 noise = numpy.zeros(nChannels)
4884 5230 SNR = numpy.zeros(power.shape)
4885 5231 for i in range(nChannels):
4886 5232 noise[i] = hildebrand_sekhon(power[i,:], nCohInt)
4887 5233 SNR[i] = (power[i]-noise[i])/noise[i]
4888 5234 SNRm = numpy.nanmean(SNR, axis = 0)
4889 5235 SNRdB = 10*numpy.log10(SNR)
4890 5236
4891 5237 if mode == 'SA':
4892 5238 dataOut.groupList = dataOut.groupList[1]
4893 5239 nPairs = data_ccf.shape[0]
4894 5240 #---------------------- Coherence and Phase --------------------------
4895 5241 phase = numpy.zeros(data_ccf[:,0,:,:].shape)
4896 5242 coh1 = numpy.zeros(data_ccf[:,0,:,:].shape)
4897 5243
4898 5244 for p in range(nPairs):
4899 5245 ch0 = pairsList[p][0]
4900 5246 ch1 = pairsList[p][1]
4901 5247 ccf = data_ccf[p,0,:,:]/numpy.sqrt(data_acf[ch0,0,:,:]*data_acf[ch1,0,:,:])
4902 5248 phase[p,:,:] = ndimage.median_filter(numpy.angle(ccf), size = (5,1)) #median filter
4903 5249 coh1[p,:,:] = ndimage.median_filter(numpy.abs(ccf), 5) #median filter
4904 5250 coh = numpy.nanmax(coh1, axis = 0)
4905 5251 #---------------------- Radial Velocity ----------------------------
4906 5252 phaseAux = numpy.mean(numpy.angle(data_acf[:,1,:,:]), axis = 0)
4907 5253 velRad = phaseAux*lamb/(4*numpy.pi*tSamp)
4908 5254
4909 5255 if allData:
4910 5256 boolMetFin = ~numpy.isnan(SNRm)
4911 5257 else:
4912 5258 #------------------------ Meteor mask ---------------------------------
4913 5259
4914 5260 #Coherence mask
4915 5261 boolMet1 = coh > 0.75
4916 5262 struc = numpy.ones((30,1))
4917 5263 boolMet1 = ndimage.morphology.binary_dilation(boolMet1, structure=struc)
4918 5264
4919 5265 #Derivative mask
4920 5266 derPhase = numpy.nanmean(numpy.abs(phase[:,1:,:] - phase[:,:-1,:]),axis=0)
4921 5267 boolMet2 = derPhase < 0.2
4922 5268 boolMet2 = ndimage.median_filter(boolMet2,size=5)
4923 5269 boolMet2 = numpy.vstack((boolMet2,numpy.full((1,nHeights), True, dtype=bool)))
4924 5270 boolMetFin = boolMet1&boolMet2
4925 5271 #Creating data_param
4926 5272 coordMet = numpy.where(boolMetFin)
4927 5273
4928 5274 tmet = coordMet[0]
4929 5275 hmet = coordMet[1]
4930 5276
4931 5277 data_param = numpy.zeros((tmet.size, 6 + nPairs))
4932 5278 data_param[:,0] = utctime
4933 5279 data_param[:,1] = tmet
4934 5280 data_param[:,2] = hmet
4935 5281 data_param[:,3] = SNRm[tmet,hmet]
4936 5282 data_param[:,4] = velRad[tmet,hmet]
4937 5283 data_param[:,5] = coh[tmet,hmet]
4938 5284 data_param[:,6:] = phase[:,tmet,hmet].T
4939 5285
4940 5286 elif mode == 'DBS':
4941 5287 dataOut.groupList = numpy.arange(nChannels)
4942 5288
4943 5289 #Radial Velocities
4944 5290 phase = numpy.angle(data_acf[:,1,:,:])
4945 5291 velRad = phase*lamb/(4*numpy.pi*tSamp)
4946 5292
4947 5293 #Spectral width
4948 5294 acf1 = data_acf[:,1,:,:]
4949 5295 acf2 = data_acf[:,2,:,:]
4950 5296
4951 5297 spcWidth = (lamb/(2*numpy.sqrt(6)*numpy.pi*tSamp))*numpy.sqrt(numpy.log(acf1/acf2))
4952 5298 if allData:
4953 5299 boolMetFin = ~numpy.isnan(SNRdB)
4954 5300 else:
4955 5301 #SNR
4956 5302 boolMet1 = (SNRdB>SNRthresh) #SNR mask
4957 5303 boolMet1 = ndimage.median_filter(boolMet1, size=(1,5,5))
4958 5304
4959 5305 #Radial velocity
4960 5306 boolMet2 = numpy.abs(velRad) < 20
4961 5307 boolMet2 = ndimage.median_filter(boolMet2, (1,5,5))
4962 5308
4963 5309 #Spectral Width
4964 5310 boolMet3 = spcWidth < 30
4965 5311 boolMet3 = ndimage.median_filter(boolMet3, (1,5,5))
4966 5312 boolMetFin = boolMet1&boolMet2&boolMet3
4967 5313
4968 5314 #Creating data_param
4969 5315 coordMet = numpy.where(boolMetFin)
4970 5316
4971 5317 cmet = coordMet[0]
4972 5318 tmet = coordMet[1]
4973 5319 hmet = coordMet[2]
4974 5320
4975 5321 data_param = numpy.zeros((tmet.size, 7))
4976 5322 data_param[:,0] = utctime
4977 5323 data_param[:,1] = cmet
4978 5324 data_param[:,2] = tmet
4979 5325 data_param[:,3] = hmet
4980 5326 data_param[:,4] = SNR[cmet,tmet,hmet].T
4981 5327 data_param[:,5] = velRad[cmet,tmet,hmet].T
4982 5328 data_param[:,6] = spcWidth[cmet,tmet,hmet].T
4983 5329
4984 5330 if len(data_param) == 0:
4985 5331 dataOut.flagNoData = True
4986 5332 else:
4987 5333 dataOut.data_param = data_param
4988 5334
4989 5335 def __erase_small(self, binArray, threshX, threshY):
4990 5336 labarray, numfeat = ndimage.measurements.label(binArray)
4991 5337 binArray1 = numpy.copy(binArray)
4992 5338
4993 5339 for i in range(1,numfeat + 1):
4994 5340 auxBin = (labarray==i)
4995 5341 auxSize = auxBin.sum()
4996 5342
4997 5343 x,y = numpy.where(auxBin)
4998 5344 widthX = x.max() - x.min()
4999 5345 widthY = y.max() - y.min()
5000 5346
5001 5347 #width X: 3 seg -> 12.5*3
5002 5348 #width Y:
5003 5349
5004 5350 if (auxSize < 50) or (widthX < threshX) or (widthY < threshY):
5005 5351 binArray1[auxBin] = False
5006 5352
5007 5353 return binArray1
5008 5354
5009 5355 #--------------- Specular Meteor ----------------
5010 5356
5011 5357 class SMDetection(Operation):
5012 5358 '''
5013 5359 Function DetectMeteors()
5014 5360 Project developed with paper:
5015 5361 HOLDSWORTH ET AL. 2004
5016 5362
5017 5363 Input:
5018 5364 self.dataOut.data_pre
5019 5365
5020 5366 centerReceiverIndex: From the channels, which is the center receiver
5021 5367
5022 5368 hei_ref: Height reference for the Beacon signal extraction
5023 5369 tauindex:
5024 5370 predefinedPhaseShifts: Predefined phase offset for the voltge signals
5025 5371
5026 5372 cohDetection: Whether to user Coherent detection or not
5027 5373 cohDet_timeStep: Coherent Detection calculation time step
5028 5374 cohDet_thresh: Coherent Detection phase threshold to correct phases
5029 5375
5030 5376 noise_timeStep: Noise calculation time step
5031 5377 noise_multiple: Noise multiple to define signal threshold
5032 5378
5033 5379 multDet_timeLimit: Multiple Detection Removal time limit in seconds
5034 5380 multDet_rangeLimit: Multiple Detection Removal range limit in km
5035 5381
5036 5382 phaseThresh: Maximum phase difference between receiver to be consider a meteor
5037 5383 SNRThresh: Minimum SNR threshold of the meteor signal to be consider a meteor
5038 5384
5039 5385 hmin: Minimum Height of the meteor to use it in the further wind estimations
5040 5386 hmax: Maximum Height of the meteor to use it in the further wind estimations
5041 5387 azimuth: Azimuth angle correction
5042 5388
5043 5389 Affected:
5044 5390 self.dataOut.data_param
5045 5391
5046 5392 Rejection Criteria (Errors):
5047 5393 0: No error; analysis OK
5048 5394 1: SNR < SNR threshold
5049 5395 2: angle of arrival (AOA) ambiguously determined
5050 5396 3: AOA estimate not feasible
5051 5397 4: Large difference in AOAs obtained from different antenna baselines
5052 5398 5: echo at start or end of time series
5053 5399 6: echo less than 5 examples long; too short for analysis
5054 5400 7: echo rise exceeds 0.3s
5055 5401 8: echo decay time less than twice rise time
5056 5402 9: large power level before echo
5057 5403 10: large power level after echo
5058 5404 11: poor fit to amplitude for estimation of decay time
5059 5405 12: poor fit to CCF phase variation for estimation of radial drift velocity
5060 5406 13: height unresolvable echo: not valid height within 70 to 110 km
5061 5407 14: height ambiguous echo: more then one possible height within 70 to 110 km
5062 5408 15: radial drift velocity or projected horizontal velocity exceeds 200 m/s
5063 5409 16: oscilatory echo, indicating event most likely not an underdense echo
5064 5410
5065 5411 17: phase difference in meteor Reestimation
5066 5412
5067 5413 Data Storage:
5068 5414 Meteors for Wind Estimation (8):
5069 5415 Utc Time | Range Height
5070 5416 Azimuth Zenith errorCosDir
5071 5417 VelRad errorVelRad
5072 5418 Phase0 Phase1 Phase2 Phase3
5073 5419 TypeError
5074 5420
5075 5421 '''
5076 5422
5077 5423 def run(self, dataOut, hei_ref = None, tauindex = 0,
5078 5424 phaseOffsets = None,
5079 5425 cohDetection = False, cohDet_timeStep = 1, cohDet_thresh = 25,
5080 5426 noise_timeStep = 4, noise_multiple = 4,
5081 5427 multDet_timeLimit = 1, multDet_rangeLimit = 3,
5082 5428 phaseThresh = 20, SNRThresh = 5,
5083 5429 hmin = 50, hmax=150, azimuth = 0,
5084 5430 channelPositions = None) :
5085 5431
5086 5432
5087 5433 #Getting Pairslist
5088 5434 if channelPositions is None:
5089 5435 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
5090 5436 meteorOps = SMOperations()
5091 5437 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
5092 5438 heiRang = dataOut.heightList
5093 5439 #Get Beacon signal - No Beacon signal anymore
5094 5440 #****************REMOVING HARDWARE PHASE DIFFERENCES***************
5095 5441 # see if the user put in pre defined phase shifts
5096 5442 voltsPShift = dataOut.data_pre.copy()
5097 5443
5098 5444 #******************END OF REMOVING HARDWARE PHASE DIFFERENCES*********
5099 5445
5100 5446 #Remove DC
5101 5447 voltsDC = numpy.mean(voltsPShift,1)
5102 5448 voltsDC = numpy.mean(voltsDC,1)
5103 5449 for i in range(voltsDC.shape[0]):
5104 5450 voltsPShift[i] = voltsPShift[i] - voltsDC[i]
5105 5451
5106 5452 #Don't considerate last heights, theyre used to calculate Hardware Phase Shift
5107 5453
5108 5454 #************ FIND POWER OF DATA W/COH OR NON COH DETECTION (3.4) **********
5109 5455 #Coherent Detection
5110 5456 if cohDetection:
5111 5457 #use coherent detection to get the net power
5112 5458 cohDet_thresh = cohDet_thresh*numpy.pi/180
5113 5459 voltsPShift = self.__coherentDetection(voltsPShift, cohDet_timeStep, dataOut.timeInterval, pairslist0, cohDet_thresh)
5114 5460
5115 5461 #Non-coherent detection!
5116 5462 powerNet = numpy.nansum(numpy.abs(voltsPShift[:,:,:])**2,0)
5117 5463 #********** END OF COH/NON-COH POWER CALCULATION**********************
5118 5464
5119 5465 #********** FIND THE NOISE LEVEL AND POSSIBLE METEORS ****************
5120 5466 #Get noise
5121 5467 noise, noise1 = self.__getNoise(powerNet, noise_timeStep, dataOut.timeInterval)
5122 5468 #Get signal threshold
5123 5469 signalThresh = noise_multiple*noise
5124 5470 #Meteor echoes detection
5125 5471 listMeteors = self.__findMeteors(powerNet, signalThresh)
5126 5472 #******* END OF NOISE LEVEL AND POSSIBLE METEORS CACULATION **********
5127 5473
5128 5474 #************** REMOVE MULTIPLE DETECTIONS (3.5) ***************************
5129 5475 #Parameters
5130 5476 heiRange = dataOut.heightList
5131 5477 rangeInterval = heiRange[1] - heiRange[0]
5132 5478 rangeLimit = multDet_rangeLimit/rangeInterval
5133 5479 timeLimit = multDet_timeLimit/dataOut.timeInterval
5134 5480 #Multiple detection removals
5135 5481 listMeteors1 = self.__removeMultipleDetections(listMeteors, rangeLimit, timeLimit)
5136 5482 #************ END OF REMOVE MULTIPLE DETECTIONS **********************
5137 5483
5138 5484 #********************* METEOR REESTIMATION (3.7, 3.8, 3.9, 3.10) ********************
5139 5485 #Parameters
5140 5486 phaseThresh = phaseThresh*numpy.pi/180
5141 5487 thresh = [phaseThresh, noise_multiple, SNRThresh]
5142 5488 #Meteor reestimation (Errors N 1, 6, 12, 17)
5143 5489 listMeteors2, listMeteorsPower, listMeteorsVolts = self.__meteorReestimation(listMeteors1, voltsPShift, pairslist0, thresh, noise, dataOut.timeInterval, dataOut.frequency)
5144 5490 #Estimation of decay times (Errors N 7, 8, 11)
5145 5491 listMeteors3 = self.__estimateDecayTime(listMeteors2, listMeteorsPower, dataOut.timeInterval, dataOut.frequency)
5146 5492 #******************* END OF METEOR REESTIMATION *******************
5147 5493
5148 5494 #********************* METEOR PARAMETERS CALCULATION (3.11, 3.12, 3.13) **************************
5149 5495 #Calculating Radial Velocity (Error N 15)
5150 5496 radialStdThresh = 10
5151 5497 listMeteors4 = self.__getRadialVelocity(listMeteors3, listMeteorsVolts, radialStdThresh, pairslist0, dataOut.timeInterval)
5152 5498
5153 5499 if len(listMeteors4) > 0:
5154 5500 #Setting New Array
5155 5501 date = dataOut.utctime
5156 5502 arrayParameters = self.__setNewArrays(listMeteors4, date, heiRang)
5157 5503
5158 5504 #Correcting phase offset
5159 5505 if phaseOffsets != None:
5160 5506 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
5161 5507 arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
5162 5508
5163 5509 #Second Pairslist
5164 5510 pairsList = []
5165 5511 pairx = (0,1)
5166 5512 pairy = (2,3)
5167 5513 pairsList.append(pairx)
5168 5514 pairsList.append(pairy)
5169 5515
5170 5516 jph = numpy.array([0,0,0,0])
5171 5517 h = (hmin,hmax)
5172 5518 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
5173 5519 dataOut.data_param = arrayParameters
5174 5520
5175 5521 if arrayParameters is None:
5176 5522 dataOut.flagNoData = True
5177 5523 else:
5178 5524 dataOut.flagNoData = True
5179 5525
5180 5526 return
5181 5527
5182 5528 def __getHardwarePhaseDiff(self, voltage0, pairslist, newheis, n):
5183 5529
5184 5530 minIndex = min(newheis[0])
5185 5531 maxIndex = max(newheis[0])
5186 5532
5187 5533 voltage = voltage0[:,:,minIndex:maxIndex+1]
5188 5534 nLength = voltage.shape[1]/n
5189 5535 nMin = 0
5190 5536 nMax = 0
5191 5537 phaseOffset = numpy.zeros((len(pairslist),n))
5192 5538
5193 5539 for i in range(n):
5194 5540 nMax += nLength
5195 5541 phaseCCF = -numpy.angle(self.__calculateCCF(voltage[:,nMin:nMax,:], pairslist, [0]))
5196 5542 phaseCCF = numpy.mean(phaseCCF, axis = 2)
5197 5543 phaseOffset[:,i] = phaseCCF.transpose()
5198 5544 nMin = nMax
5199 5545
5200 5546 #Remove Outliers
5201 5547 factor = 2
5202 5548 wt = phaseOffset - signal.medfilt(phaseOffset,(1,5))
5203 5549 dw = numpy.std(wt,axis = 1)
5204 5550 dw = dw.reshape((dw.size,1))
5205 5551 ind = numpy.where(numpy.logical_or(wt>dw*factor,wt<-dw*factor))
5206 5552 phaseOffset[ind] = numpy.nan
5207 5553 phaseOffset = stats.nanmean(phaseOffset, axis=1)
5208 5554
5209 5555 return phaseOffset
5210 5556
5211 5557 def __shiftPhase(self, data, phaseShift):
5212 5558 #this will shift the phase of a complex number
5213 5559 dataShifted = numpy.abs(data) * numpy.exp((numpy.angle(data)+phaseShift)*1j)
5214 5560 return dataShifted
5215 5561
5216 5562 def __estimatePhaseDifference(self, array, pairslist):
5217 5563 nChannel = array.shape[0]
5218 5564 nHeights = array.shape[2]
5219 5565 numPairs = len(pairslist)
5220 5566 phaseCCF = numpy.angle(self.__calculateCCF(array, pairslist, [-2,-1,0,1,2]))
5221 5567
5222 5568 #Correct phases
5223 5569 derPhaseCCF = phaseCCF[:,1:,:] - phaseCCF[:,0:-1,:]
5224 5570 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
5225 5571
5226 5572 if indDer[0].shape[0] > 0:
5227 5573 for i in range(indDer[0].shape[0]):
5228 5574 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i],indDer[2][i]])
5229 5575 phaseCCF[indDer[0][i],indDer[1][i]+1:,:] += signo*2*numpy.pi
5230 5576
5231 5577 #Linear
5232 5578 phaseInt = numpy.zeros((numPairs,1))
5233 5579 angAllCCF = phaseCCF[:,[0,1,3,4],0]
5234 5580 for j in range(numPairs):
5235 5581 fit = stats.linregress([-2,-1,1,2],angAllCCF[j,:])
5236 5582 phaseInt[j] = fit[1]
5237 5583 #Phase Differences
5238 5584 phaseDiff = phaseInt - phaseCCF[:,2,:]
5239 5585 phaseArrival = phaseInt.reshape(phaseInt.size)
5240 5586
5241 5587 #Dealias
5242 5588 phaseArrival = numpy.angle(numpy.exp(1j*phaseArrival))
5243 5589
5244 5590 return phaseDiff, phaseArrival
5245 5591
5246 5592 def __coherentDetection(self, volts, timeSegment, timeInterval, pairslist, thresh):
5247 5593 #this function will run the coherent detection used in Holdworth et al. 2004 and return the net power
5248 5594 #find the phase shifts of each channel over 1 second intervals
5249 5595 #only look at ranges below the beacon signal
5250 5596 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
5251 5597 numBlocks = int(volts.shape[1]/numProfPerBlock)
5252 5598 numHeights = volts.shape[2]
5253 5599 nChannel = volts.shape[0]
5254 5600 voltsCohDet = volts.copy()
5255 5601
5256 5602 pairsarray = numpy.array(pairslist)
5257 5603 indSides = pairsarray[:,1]
5258 5604 listBlocks = numpy.array_split(volts, numBlocks, 1)
5259 5605
5260 5606 startInd = 0
5261 5607 endInd = 0
5262 5608
5263 5609 for i in range(numBlocks):
5264 5610 startInd = endInd
5265 5611 endInd = endInd + listBlocks[i].shape[1]
5266 5612
5267 5613 arrayBlock = listBlocks[i]
5268 5614
5269 5615 #Estimate the Phase Difference
5270 5616 phaseDiff, aux = self.__estimatePhaseDifference(arrayBlock, pairslist)
5271 5617 #Phase Difference RMS
5272 5618 arrayPhaseRMS = numpy.abs(phaseDiff)
5273 5619 phaseRMSaux = numpy.sum(arrayPhaseRMS < thresh,0)
5274 5620 indPhase = numpy.where(phaseRMSaux==4)
5275 5621 #Shifting
5276 5622 if indPhase[0].shape[0] > 0:
5277 5623 for j in range(indSides.size):
5278 5624 arrayBlock[indSides[j],:,indPhase] = self.__shiftPhase(arrayBlock[indSides[j],:,indPhase], phaseDiff[j,indPhase].transpose())
5279 5625 voltsCohDet[:,startInd:endInd,:] = arrayBlock
5280 5626
5281 5627 return voltsCohDet
5282 5628
5283 5629 def __calculateCCF(self, volts, pairslist ,laglist):
5284 5630
5285 5631 nHeights = volts.shape[2]
5286 5632 nPoints = volts.shape[1]
5287 5633 voltsCCF = numpy.zeros((len(pairslist), len(laglist), nHeights),dtype = 'complex')
5288 5634
5289 5635 for i in range(len(pairslist)):
5290 5636 volts1 = volts[pairslist[i][0]]
5291 5637 volts2 = volts[pairslist[i][1]]
5292 5638
5293 5639 for t in range(len(laglist)):
5294 5640 idxT = laglist[t]
5295 5641 if idxT >= 0:
5296 5642 vStacked = numpy.vstack((volts2[idxT:,:],
5297 5643 numpy.zeros((idxT, nHeights),dtype='complex')))
5298 5644 else:
5299 5645 vStacked = numpy.vstack((numpy.zeros((-idxT, nHeights),dtype='complex'),
5300 5646 volts2[:(nPoints + idxT),:]))
5301 5647 voltsCCF[i,t,:] = numpy.sum((numpy.conjugate(volts1)*vStacked),axis=0)
5302 5648
5303 5649 vStacked = None
5304 5650 return voltsCCF
5305 5651
5306 5652 def __getNoise(self, power, timeSegment, timeInterval):
5307 5653 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
5308 5654 numBlocks = int(power.shape[0]/numProfPerBlock)
5309 5655 numHeights = power.shape[1]
5310 5656
5311 5657 listPower = numpy.array_split(power, numBlocks, 0)
5312 5658 noise = numpy.zeros((power.shape[0], power.shape[1]))
5313 5659 noise1 = numpy.zeros((power.shape[0], power.shape[1]))
5314 5660
5315 5661 startInd = 0
5316 5662 endInd = 0
5317 5663
5318 5664 for i in range(numBlocks): #split por canal
5319 5665 startInd = endInd
5320 5666 endInd = endInd + listPower[i].shape[0]
5321 5667
5322 5668 arrayBlock = listPower[i]
5323 5669 noiseAux = numpy.mean(arrayBlock, 0)
5324 5670 noise[startInd:endInd,:] = noise[startInd:endInd,:] + noiseAux
5325 5671
5326 5672 noiseAux1 = numpy.mean(arrayBlock)
5327 5673 noise1[startInd:endInd,:] = noise1[startInd:endInd,:] + noiseAux1
5328 5674
5329 5675 return noise, noise1
5330 5676
5331 5677 def __findMeteors(self, power, thresh):
5332 5678 nProf = power.shape[0]
5333 5679 nHeights = power.shape[1]
5334 5680 listMeteors = []
5335 5681
5336 5682 for i in range(nHeights):
5337 5683 powerAux = power[:,i]
5338 5684 threshAux = thresh[:,i]
5339 5685
5340 5686 indUPthresh = numpy.where(powerAux > threshAux)[0]
5341 5687 indDNthresh = numpy.where(powerAux <= threshAux)[0]
5342 5688
5343 5689 j = 0
5344 5690
5345 5691 while (j < indUPthresh.size - 2):
5346 5692 if (indUPthresh[j + 2] == indUPthresh[j] + 2):
5347 5693 indDNAux = numpy.where(indDNthresh > indUPthresh[j])
5348 5694 indDNthresh = indDNthresh[indDNAux]
5349 5695
5350 5696 if (indDNthresh.size > 0):
5351 5697 indEnd = indDNthresh[0] - 1
5352 5698 indInit = indUPthresh[j]
5353 5699
5354 5700 meteor = powerAux[indInit:indEnd + 1]
5355 5701 indPeak = meteor.argmax() + indInit
5356 5702 FLA = sum(numpy.conj(meteor)*numpy.hstack((meteor[1:],0)))
5357 5703
5358 5704 listMeteors.append(numpy.array([i,indInit,indPeak,indEnd,FLA])) #CHEQUEAR!!!!!
5359 5705 j = numpy.where(indUPthresh == indEnd)[0] + 1
5360 5706 else: j+=1
5361 5707 else: j+=1
5362 5708
5363 5709 return listMeteors
5364 5710
5365 5711 def __removeMultipleDetections(self,listMeteors, rangeLimit, timeLimit):
5366 5712
5367 5713 arrayMeteors = numpy.asarray(listMeteors)
5368 5714 listMeteors1 = []
5369 5715
5370 5716 while arrayMeteors.shape[0] > 0:
5371 5717 FLAs = arrayMeteors[:,4]
5372 5718 maxFLA = FLAs.argmax()
5373 5719 listMeteors1.append(arrayMeteors[maxFLA,:])
5374 5720
5375 5721 MeteorInitTime = arrayMeteors[maxFLA,1]
5376 5722 MeteorEndTime = arrayMeteors[maxFLA,3]
5377 5723 MeteorHeight = arrayMeteors[maxFLA,0]
5378 5724
5379 5725 #Check neighborhood
5380 5726 maxHeightIndex = MeteorHeight + rangeLimit
5381 5727 minHeightIndex = MeteorHeight - rangeLimit
5382 5728 minTimeIndex = MeteorInitTime - timeLimit
5383 5729 maxTimeIndex = MeteorEndTime + timeLimit
5384 5730
5385 5731 #Check Heights
5386 5732 indHeight = numpy.logical_and(arrayMeteors[:,0] >= minHeightIndex, arrayMeteors[:,0] <= maxHeightIndex)
5387 5733 indTime = numpy.logical_and(arrayMeteors[:,3] >= minTimeIndex, arrayMeteors[:,1] <= maxTimeIndex)
5388 5734 indBoth = numpy.where(numpy.logical_and(indTime,indHeight))
5389 5735
5390 5736 arrayMeteors = numpy.delete(arrayMeteors, indBoth, axis = 0)
5391 5737
5392 5738 return listMeteors1
5393 5739
5394 5740 def __meteorReestimation(self, listMeteors, volts, pairslist, thresh, noise, timeInterval,frequency):
5395 5741 numHeights = volts.shape[2]
5396 5742 nChannel = volts.shape[0]
5397 5743
5398 5744 thresholdPhase = thresh[0]
5399 5745 thresholdNoise = thresh[1]
5400 5746 thresholdDB = float(thresh[2])
5401 5747
5402 5748 thresholdDB1 = 10**(thresholdDB/10)
5403 5749 pairsarray = numpy.array(pairslist)
5404 5750 indSides = pairsarray[:,1]
5405 5751
5406 5752 pairslist1 = list(pairslist)
5407 5753 pairslist1.append((0,1))
5408 5754 pairslist1.append((3,4))
5409 5755
5410 5756 listMeteors1 = []
5411 5757 listPowerSeries = []
5412 5758 listVoltageSeries = []
5413 5759 #volts has the war data
5414 5760
5415 5761 if frequency == 30e6:
5416 5762 timeLag = 45*10**-3
5417 5763 else:
5418 5764 timeLag = 15*10**-3
5419 5765 lag = numpy.ceil(timeLag/timeInterval)
5420 5766
5421 5767 for i in range(len(listMeteors)):
5422 5768
5423 5769 ###################### 3.6 - 3.7 PARAMETERS REESTIMATION #########################
5424 5770 meteorAux = numpy.zeros(16)
5425 5771
5426 5772 #Loading meteor Data (mHeight, mStart, mPeak, mEnd)
5427 5773 mHeight = listMeteors[i][0]
5428 5774 mStart = listMeteors[i][1]
5429 5775 mPeak = listMeteors[i][2]
5430 5776 mEnd = listMeteors[i][3]
5431 5777
5432 5778 #get the volt data between the start and end times of the meteor
5433 5779 meteorVolts = volts[:,mStart:mEnd+1,mHeight]
5434 5780 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
5435 5781
5436 5782 #3.6. Phase Difference estimation
5437 5783 phaseDiff, aux = self.__estimatePhaseDifference(meteorVolts, pairslist)
5438 5784
5439 5785 #3.7. Phase difference removal & meteor start, peak and end times reestimated
5440 5786 #meteorVolts0.- all Channels, all Profiles
5441 5787 meteorVolts0 = volts[:,:,mHeight]
5442 5788 meteorThresh = noise[:,mHeight]*thresholdNoise
5443 5789 meteorNoise = noise[:,mHeight]
5444 5790 meteorVolts0[indSides,:] = self.__shiftPhase(meteorVolts0[indSides,:], phaseDiff) #Phase Shifting
5445 5791 powerNet0 = numpy.nansum(numpy.abs(meteorVolts0)**2, axis = 0) #Power
5446 5792
5447 5793 #Times reestimation
5448 5794 mStart1 = numpy.where(powerNet0[:mPeak] < meteorThresh[:mPeak])[0]
5449 5795 if mStart1.size > 0:
5450 5796 mStart1 = mStart1[-1] + 1
5451 5797
5452 5798 else:
5453 5799 mStart1 = mPeak
5454 5800
5455 5801 mEnd1 = numpy.where(powerNet0[mPeak:] < meteorThresh[mPeak:])[0][0] + mPeak - 1
5456 5802 mEndDecayTime1 = numpy.where(powerNet0[mPeak:] < meteorNoise[mPeak:])[0]
5457 5803 if mEndDecayTime1.size == 0:
5458 5804 mEndDecayTime1 = powerNet0.size
5459 5805 else:
5460 5806 mEndDecayTime1 = mEndDecayTime1[0] + mPeak - 1
5461 5807
5462 5808 #meteorVolts1.- all Channels, from start to end
5463 5809 meteorVolts1 = meteorVolts0[:,mStart1:mEnd1 + 1]
5464 5810 meteorVolts2 = meteorVolts0[:,mPeak + lag:mEnd1 + 1]
5465 5811 if meteorVolts2.shape[1] == 0:
5466 5812 meteorVolts2 = meteorVolts0[:,mPeak:mEnd1 + 1]
5467 5813 meteorVolts1 = meteorVolts1.reshape(meteorVolts1.shape[0], meteorVolts1.shape[1], 1)
5468 5814 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1], 1)
5469 5815 ##################### END PARAMETERS REESTIMATION #########################
5470 5816
5471 5817 ##################### 3.8 PHASE DIFFERENCE REESTIMATION ########################
5472 5818 if meteorVolts2.shape[1] > 0:
5473 5819 #Phase Difference re-estimation
5474 5820 phaseDiff1, phaseDiffint = self.__estimatePhaseDifference(meteorVolts2, pairslist1) #Phase Difference Estimation
5475 5821 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1])
5476 5822 phaseDiff11 = numpy.reshape(phaseDiff1, (phaseDiff1.shape[0],1))
5477 5823 meteorVolts2[indSides,:] = self.__shiftPhase(meteorVolts2[indSides,:], phaseDiff11[0:4]) #Phase Shifting
5478 5824
5479 5825 #Phase Difference RMS
5480 5826 phaseRMS1 = numpy.sqrt(numpy.mean(numpy.square(phaseDiff1)))
5481 5827 powerNet1 = numpy.nansum(numpy.abs(meteorVolts1[:,:])**2,0)
5482 5828 #Data from Meteor
5483 5829 mPeak1 = powerNet1.argmax() + mStart1
5484 5830 mPeakPower1 = powerNet1.max()
5485 5831 noiseAux = sum(noise[mStart1:mEnd1 + 1,mHeight])
5486 5832 mSNR1 = (sum(powerNet1)-noiseAux)/noiseAux
5487 5833 Meteor1 = numpy.array([mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1])
5488 5834 Meteor1 = numpy.hstack((Meteor1,phaseDiffint))
5489 5835 PowerSeries = powerNet0[mStart1:mEndDecayTime1 + 1]
5490 5836 #Vectorize
5491 5837 meteorAux[0:7] = [mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1]
5492 5838 meteorAux[7:11] = phaseDiffint[0:4]
5493 5839
5494 5840 #Rejection Criterions
5495 5841 if phaseRMS1 > thresholdPhase: #Error Number 17: Phase variation
5496 5842 meteorAux[-1] = 17
5497 5843 elif mSNR1 < thresholdDB1: #Error Number 1: SNR < threshold dB
5498 5844 meteorAux[-1] = 1
5499 5845
5500 5846
5501 5847 else:
5502 5848 meteorAux[0:4] = [mHeight, mStart, mPeak, mEnd]
5503 5849 meteorAux[-1] = 6 #Error Number 6: echo less than 5 samples long; too short for analysis
5504 5850 PowerSeries = 0
5505 5851
5506 5852 listMeteors1.append(meteorAux)
5507 5853 listPowerSeries.append(PowerSeries)
5508 5854 listVoltageSeries.append(meteorVolts1)
5509 5855
5510 5856 return listMeteors1, listPowerSeries, listVoltageSeries
5511 5857
5512 5858 def __estimateDecayTime(self, listMeteors, listPower, timeInterval, frequency):
5513 5859
5514 5860 threshError = 10
5515 5861 #Depending if it is 30 or 50 MHz
5516 5862 if frequency == 30e6:
5517 5863 timeLag = 45*10**-3
5518 5864 else:
5519 5865 timeLag = 15*10**-3
5520 5866 lag = numpy.ceil(timeLag/timeInterval)
5521 5867
5522 5868 listMeteors1 = []
5523 5869
5524 5870 for i in range(len(listMeteors)):
5525 5871 meteorPower = listPower[i]
5526 5872 meteorAux = listMeteors[i]
5527 5873
5528 5874 if meteorAux[-1] == 0:
5529 5875
5530 5876 try:
5531 5877 indmax = meteorPower.argmax()
5532 5878 indlag = indmax + lag
5533 5879
5534 5880 y = meteorPower[indlag:]
5535 5881 x = numpy.arange(0, y.size)*timeLag
5536 5882
5537 5883 #first guess
5538 5884 a = y[0]
5539 5885 tau = timeLag
5540 5886 #exponential fit
5541 5887 popt, pcov = optimize.curve_fit(self.__exponential_function, x, y, p0 = [a, tau])
5542 5888 y1 = self.__exponential_function(x, *popt)
5543 5889 #error estimation
5544 5890 error = sum((y - y1)**2)/(numpy.var(y)*(y.size - popt.size))
5545 5891
5546 5892 decayTime = popt[1]
5547 5893 riseTime = indmax*timeInterval
5548 5894 meteorAux[11:13] = [decayTime, error]
5549 5895
5550 5896 #Table items 7, 8 and 11
5551 5897 if (riseTime > 0.3): #Number 7: Echo rise exceeds 0.3s
5552 5898 meteorAux[-1] = 7
5553 5899 elif (decayTime < 2*riseTime) : #Number 8: Echo decay time less than than twice rise time
5554 5900 meteorAux[-1] = 8
5555 5901 if (error > threshError): #Number 11: Poor fit to amplitude for estimation of decay time
5556 5902 meteorAux[-1] = 11
5557 5903
5558 5904
5559 5905 except:
5560 5906 meteorAux[-1] = 11
5561 5907
5562 5908
5563 5909 listMeteors1.append(meteorAux)
5564 5910
5565 5911 return listMeteors1
5566 5912
5567 5913 #Exponential Function
5568 5914
5569 5915 def __exponential_function(self, x, a, tau):
5570 5916 y = a*numpy.exp(-x/tau)
5571 5917 return y
5572 5918
5573 5919 def __getRadialVelocity(self, listMeteors, listVolts, radialStdThresh, pairslist, timeInterval):
5574 5920
5575 5921 pairslist1 = list(pairslist)
5576 5922 pairslist1.append((0,1))
5577 5923 pairslist1.append((3,4))
5578 5924 numPairs = len(pairslist1)
5579 5925 #Time Lag
5580 5926 timeLag = 45*10**-3
5581 5927 c = 3e8
5582 5928 lag = numpy.ceil(timeLag/timeInterval)
5583 5929 freq = 30e6
5584 5930
5585 5931 listMeteors1 = []
5586 5932
5587 5933 for i in range(len(listMeteors)):
5588 5934 meteorAux = listMeteors[i]
5589 5935 if meteorAux[-1] == 0:
5590 5936 mStart = listMeteors[i][1]
5591 5937 mPeak = listMeteors[i][2]
5592 5938 mLag = mPeak - mStart + lag
5593 5939
5594 5940 #get the volt data between the start and end times of the meteor
5595 5941 meteorVolts = listVolts[i]
5596 5942 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
5597 5943
5598 5944 #Get CCF
5599 5945 allCCFs = self.__calculateCCF(meteorVolts, pairslist1, [-2,-1,0,1,2])
5600 5946
5601 5947 #Method 2
5602 5948 slopes = numpy.zeros(numPairs)
5603 5949 time = numpy.array([-2,-1,1,2])*timeInterval
5604 5950 angAllCCF = numpy.angle(allCCFs[:,[0,1,3,4],0])
5605 5951
5606 5952 #Correct phases
5607 5953 derPhaseCCF = angAllCCF[:,1:] - angAllCCF[:,0:-1]
5608 5954 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
5609 5955
5610 5956 if indDer[0].shape[0] > 0:
5611 5957 for i in range(indDer[0].shape[0]):
5612 5958 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i]])
5613 5959 angAllCCF[indDer[0][i],indDer[1][i]+1:] += signo*2*numpy.pi
5614 5960
5615 5961 for j in range(numPairs):
5616 5962 fit = stats.linregress(time, angAllCCF[j,:])
5617 5963 slopes[j] = fit[0]
5618 5964
5619 5965 #Remove Outlier
5620 5966 radialVelocity = -numpy.mean(slopes)*(0.25/numpy.pi)*(c/freq)
5621 5967 radialError = numpy.std(slopes)*(0.25/numpy.pi)*(c/freq)
5622 5968 meteorAux[-2] = radialError
5623 5969 meteorAux[-3] = radialVelocity
5624 5970
5625 5971 #Setting Error
5626 5972 #Number 15: Radial Drift velocity or projected horizontal velocity exceeds 200 m/s
5627 5973 if numpy.abs(radialVelocity) > 200:
5628 5974 meteorAux[-1] = 15
5629 5975 #Number 12: Poor fit to CCF variation for estimation of radial drift velocity
5630 5976 elif radialError > radialStdThresh:
5631 5977 meteorAux[-1] = 12
5632 5978
5633 5979 listMeteors1.append(meteorAux)
5634 5980 return listMeteors1
5635 5981
5636 5982 def __setNewArrays(self, listMeteors, date, heiRang):
5637 5983
5638 5984 #New arrays
5639 5985 arrayMeteors = numpy.array(listMeteors)
5640 5986 arrayParameters = numpy.zeros((len(listMeteors), 13))
5641 5987
5642 5988 #Date inclusion
5643 5989 arrayDate = numpy.tile(date, (len(listMeteors)))
5644 5990
5645 5991 #Meteor array
5646 5992 #Parameters Array
5647 5993 arrayParameters[:,0] = arrayDate #Date
5648 5994 arrayParameters[:,1] = heiRang[arrayMeteors[:,0].astype(int)] #Range
5649 5995 arrayParameters[:,6:8] = arrayMeteors[:,-3:-1] #Radial velocity and its error
5650 5996 arrayParameters[:,8:12] = arrayMeteors[:,7:11] #Phases
5651 5997 arrayParameters[:,-1] = arrayMeteors[:,-1] #Error
5652 5998
5653 5999
5654 6000 return arrayParameters
5655 6001
5656 6002 class CorrectSMPhases(Operation):
5657 6003
5658 6004 def run(self, dataOut, phaseOffsets, hmin = 50, hmax = 150, azimuth = 45, channelPositions = None):
5659 6005
5660 6006 arrayParameters = dataOut.data_param
5661 6007 pairsList = []
5662 6008 pairx = (0,1)
5663 6009 pairy = (2,3)
5664 6010 pairsList.append(pairx)
5665 6011 pairsList.append(pairy)
5666 6012 jph = numpy.zeros(4)
5667 6013
5668 6014 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
5669 6015 # arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
5670 6016 arrayParameters[:,8:12] = numpy.angle(numpy.exp(1j*(arrayParameters[:,8:12] + phaseOffsets)))
5671 6017
5672 6018 meteorOps = SMOperations()
5673 6019 if channelPositions is None:
5674 6020 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
5675 6021 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
5676 6022
5677 6023 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
5678 6024 h = (hmin,hmax)
5679 6025
5680 6026 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
5681 6027
5682 6028 dataOut.data_param = arrayParameters
5683 6029 return
5684 6030
5685 6031 class SMPhaseCalibration(Operation):
5686 6032
5687 6033 __buffer = None
5688 6034
5689 6035 __initime = None
5690 6036
5691 6037 __dataReady = False
5692 6038
5693 6039 __isConfig = False
5694 6040
5695 6041 def __checkTime(self, currentTime, initTime, paramInterval, outputInterval):
5696 6042
5697 6043 dataTime = currentTime + paramInterval
5698 6044 deltaTime = dataTime - initTime
5699 6045
5700 6046 if deltaTime >= outputInterval or deltaTime < 0:
5701 6047 return True
5702 6048
5703 6049 return False
5704 6050
5705 6051 def __getGammas(self, pairs, d, phases):
5706 6052 gammas = numpy.zeros(2)
5707 6053
5708 6054 for i in range(len(pairs)):
5709 6055
5710 6056 pairi = pairs[i]
5711 6057
5712 6058 phip3 = phases[:,pairi[0]]
5713 6059 d3 = d[pairi[0]]
5714 6060 phip2 = phases[:,pairi[1]]
5715 6061 d2 = d[pairi[1]]
5716 6062 #Calculating gamma
5717 6063 jgamma = -phip2*d3/d2 - phip3
5718 6064 jgamma = numpy.angle(numpy.exp(1j*jgamma))
5719 6065
5720 6066 #Revised distribution
5721 6067 jgammaArray = numpy.hstack((jgamma,jgamma+0.5*numpy.pi,jgamma-0.5*numpy.pi))
5722 6068
5723 6069 #Histogram
5724 6070 nBins = 64
5725 6071 rmin = -0.5*numpy.pi
5726 6072 rmax = 0.5*numpy.pi
5727 6073 phaseHisto = numpy.histogram(jgammaArray, bins=nBins, range=(rmin,rmax))
5728 6074
5729 6075 meteorsY = phaseHisto[0]
5730 6076 phasesX = phaseHisto[1][:-1]
5731 6077 width = phasesX[1] - phasesX[0]
5732 6078 phasesX += width/2
5733 6079
5734 6080 #Gaussian aproximation
5735 6081 bpeak = meteorsY.argmax()
5736 6082 peak = meteorsY.max()
5737 6083 jmin = bpeak - 5
5738 6084 jmax = bpeak + 5 + 1
5739 6085
5740 6086 if jmin<0:
5741 6087 jmin = 0
5742 6088 jmax = 6
5743 6089 elif jmax > meteorsY.size:
5744 6090 jmin = meteorsY.size - 6
5745 6091 jmax = meteorsY.size
5746 6092
5747 6093 x0 = numpy.array([peak,bpeak,50])
5748 6094 coeff = optimize.leastsq(self.__residualFunction, x0, args=(meteorsY[jmin:jmax], phasesX[jmin:jmax]))
5749 6095
5750 6096 #Gammas
5751 6097 gammas[i] = coeff[0][1]
5752 6098
5753 6099 return gammas
5754 6100
5755 6101 def __residualFunction(self, coeffs, y, t):
5756 6102
5757 6103 return y - self.__gauss_function(t, coeffs)
5758 6104
5759 6105 def __gauss_function(self, t, coeffs):
5760 6106
5761 6107 return coeffs[0]*numpy.exp(-0.5*((t - coeffs[1]) / coeffs[2])**2)
5762 6108
5763 6109 def __getPhases(self, azimuth, h, pairsList, d, gammas, meteorsArray):
5764 6110 meteorOps = SMOperations()
5765 6111 nchan = 4
5766 6112 pairx = pairsList[0] #x es 0
5767 6113 pairy = pairsList[1] #y es 1
5768 6114 center_xangle = 0
5769 6115 center_yangle = 0
5770 6116 range_angle = numpy.array([10*numpy.pi,numpy.pi,numpy.pi/2,numpy.pi/4])
5771 6117 ntimes = len(range_angle)
5772 6118
5773 6119 nstepsx = 20
5774 6120 nstepsy = 20
5775 6121
5776 6122 for iz in range(ntimes):
5777 6123 min_xangle = -range_angle[iz]/2 + center_xangle
5778 6124 max_xangle = range_angle[iz]/2 + center_xangle
5779 6125 min_yangle = -range_angle[iz]/2 + center_yangle
5780 6126 max_yangle = range_angle[iz]/2 + center_yangle
5781 6127
5782 6128 inc_x = (max_xangle-min_xangle)/nstepsx
5783 6129 inc_y = (max_yangle-min_yangle)/nstepsy
5784 6130
5785 6131 alpha_y = numpy.arange(nstepsy)*inc_y + min_yangle
5786 6132 alpha_x = numpy.arange(nstepsx)*inc_x + min_xangle
5787 6133 penalty = numpy.zeros((nstepsx,nstepsy))
5788 6134 jph_array = numpy.zeros((nchan,nstepsx,nstepsy))
5789 6135 jph = numpy.zeros(nchan)
5790 6136
5791 6137 # Iterations looking for the offset
5792 6138 for iy in range(int(nstepsy)):
5793 6139 for ix in range(int(nstepsx)):
5794 6140 d3 = d[pairsList[1][0]]
5795 6141 d2 = d[pairsList[1][1]]
5796 6142 d5 = d[pairsList[0][0]]
5797 6143 d4 = d[pairsList[0][1]]
5798 6144
5799 6145 alp2 = alpha_y[iy] #gamma 1
5800 6146 alp4 = alpha_x[ix] #gamma 0
5801 6147
5802 6148 alp3 = -alp2*d3/d2 - gammas[1]
5803 6149 alp5 = -alp4*d5/d4 - gammas[0]
5804 6150 jph[pairsList[0][1]] = alp4
5805 6151 jph[pairsList[0][0]] = alp5
5806 6152 jph[pairsList[1][0]] = alp3
5807 6153 jph[pairsList[1][1]] = alp2
5808 6154 jph_array[:,ix,iy] = jph
5809 6155 meteorsArray1 = meteorOps.getMeteorParams(meteorsArray, azimuth, h, pairsList, d, jph)
5810 6156 error = meteorsArray1[:,-1]
5811 6157 ind1 = numpy.where(error==0)[0]
5812 6158 penalty[ix,iy] = ind1.size
5813 6159
5814 6160 i,j = numpy.unravel_index(penalty.argmax(), penalty.shape)
5815 6161 phOffset = jph_array[:,i,j]
5816 6162
5817 6163 center_xangle = phOffset[pairx[1]]
5818 6164 center_yangle = phOffset[pairy[1]]
5819 6165
5820 6166 phOffset = numpy.angle(numpy.exp(1j*jph_array[:,i,j]))
5821 6167 phOffset = phOffset*180/numpy.pi
5822 6168 return phOffset
5823 6169
5824 6170
5825 6171 def run(self, dataOut, hmin, hmax, channelPositions=None, nHours = 1):
5826 6172
5827 6173 dataOut.flagNoData = True
5828 6174 self.__dataReady = False
5829 6175 dataOut.outputInterval = nHours*3600
5830 6176
5831 6177 if self.__isConfig == False:
5832 6178 #Get Initial LTC time
5833 6179 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
5834 6180 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
5835 6181
5836 6182 self.__isConfig = True
5837 6183
5838 6184 if self.__buffer is None:
5839 6185 self.__buffer = dataOut.data_param.copy()
5840 6186
5841 6187 else:
5842 6188 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
5843 6189
5844 6190 self.__dataReady = self.__checkTime(dataOut.utctime, self.__initime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
5845 6191
5846 6192 if self.__dataReady:
5847 6193 dataOut.utctimeInit = self.__initime
5848 6194 self.__initime += dataOut.outputInterval #to erase time offset
5849 6195
5850 6196 freq = dataOut.frequency
5851 6197 c = dataOut.C #m/s
5852 6198 lamb = c/freq
5853 6199 k = 2*numpy.pi/lamb
5854 6200 azimuth = 0
5855 6201 h = (hmin, hmax)
5856 6202
5857 6203 if channelPositions is None:
5858 6204 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
5859 6205 meteorOps = SMOperations()
5860 6206 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
5861 6207
5862 6208 #Checking correct order of pairs
5863 6209 pairs = []
5864 6210 if distances[1] > distances[0]:
5865 6211 pairs.append((1,0))
5866 6212 else:
5867 6213 pairs.append((0,1))
5868 6214
5869 6215 if distances[3] > distances[2]:
5870 6216 pairs.append((3,2))
5871 6217 else:
5872 6218 pairs.append((2,3))
5873 6219
5874 6220 meteorsArray = self.__buffer
5875 6221 error = meteorsArray[:,-1]
5876 6222 boolError = (error==0)|(error==3)|(error==4)|(error==13)|(error==14)
5877 6223 ind1 = numpy.where(boolError)[0]
5878 6224 meteorsArray = meteorsArray[ind1,:]
5879 6225 meteorsArray[:,-1] = 0
5880 6226 phases = meteorsArray[:,8:12]
5881 6227
5882 6228 #Calculate Gammas
5883 6229 gammas = self.__getGammas(pairs, distances, phases)
5884 6230 #Calculate Phases
5885 6231 phasesOff = self.__getPhases(azimuth, h, pairs, distances, gammas, meteorsArray)
5886 6232 phasesOff = phasesOff.reshape((1,phasesOff.size))
5887 6233 dataOut.data_output = -phasesOff
5888 6234 dataOut.flagNoData = False
5889 6235 self.__buffer = None
5890 6236
5891 6237
5892 6238 return
5893 6239
5894 6240 class SMOperations():
5895 6241
5896 6242 def __init__(self):
5897 6243
5898 6244 return
5899 6245
5900 6246 def getMeteorParams(self, arrayParameters0, azimuth, h, pairsList, distances, jph):
5901 6247
5902 6248 arrayParameters = arrayParameters0.copy()
5903 6249 hmin = h[0]
5904 6250 hmax = h[1]
5905 6251
5906 6252 #Calculate AOA (Error N 3, 4)
5907 6253 #JONES ET AL. 1998
5908 6254 AOAthresh = numpy.pi/8
5909 6255 error = arrayParameters[:,-1]
5910 6256 phases = -arrayParameters[:,8:12] + jph
5911 6257 arrayParameters[:,3:6], arrayParameters[:,-1] = self.__getAOA(phases, pairsList, distances, error, AOAthresh, azimuth)
5912 6258
5913 6259 #Calculate Heights (Error N 13 and 14)
5914 6260 error = arrayParameters[:,-1]
5915 6261 Ranges = arrayParameters[:,1]
5916 6262 zenith = arrayParameters[:,4]
5917 6263 arrayParameters[:,2], arrayParameters[:,-1] = self.__getHeights(Ranges, zenith, error, hmin, hmax)
5918 6264
5919 6265 #----------------------- Get Final data ------------------------------------
5920 6266
5921 6267 return arrayParameters
5922 6268
5923 6269 def __getAOA(self, phases, pairsList, directions, error, AOAthresh, azimuth):
5924 6270
5925 6271 arrayAOA = numpy.zeros((phases.shape[0],3))
5926 6272 cosdir0, cosdir = self.__getDirectionCosines(phases, pairsList,directions)
5927 6273
5928 6274 arrayAOA[:,:2] = self.__calculateAOA(cosdir, azimuth)
5929 6275 cosDirError = numpy.sum(numpy.abs(cosdir0 - cosdir), axis = 1)
5930 6276 arrayAOA[:,2] = cosDirError
5931 6277
5932 6278 azimuthAngle = arrayAOA[:,0]
5933 6279 zenithAngle = arrayAOA[:,1]
5934 6280
5935 6281 #Setting Error
5936 6282 indError = numpy.where(numpy.logical_or(error == 3, error == 4))[0]
5937 6283 error[indError] = 0
5938 6284 #Number 3: AOA not fesible
5939 6285 indInvalid = numpy.where(numpy.logical_and((numpy.logical_or(numpy.isnan(zenithAngle), numpy.isnan(azimuthAngle))),error == 0))[0]
5940 6286 error[indInvalid] = 3
5941 6287 #Number 4: Large difference in AOAs obtained from different antenna baselines
5942 6288 indInvalid = numpy.where(numpy.logical_and(cosDirError > AOAthresh,error == 0))[0]
5943 6289 error[indInvalid] = 4
5944 6290 return arrayAOA, error
5945 6291
5946 6292 def __getDirectionCosines(self, arrayPhase, pairsList, distances):
5947 6293
5948 6294 #Initializing some variables
5949 6295 ang_aux = numpy.array([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])*2*numpy.pi
5950 6296 ang_aux = ang_aux.reshape(1,ang_aux.size)
5951 6297
5952 6298 cosdir = numpy.zeros((arrayPhase.shape[0],2))
5953 6299 cosdir0 = numpy.zeros((arrayPhase.shape[0],2))
5954 6300
5955 6301
5956 6302 for i in range(2):
5957 6303 ph0 = arrayPhase[:,pairsList[i][0]]
5958 6304 ph1 = arrayPhase[:,pairsList[i][1]]
5959 6305 d0 = distances[pairsList[i][0]]
5960 6306 d1 = distances[pairsList[i][1]]
5961 6307
5962 6308 ph0_aux = ph0 + ph1
5963 6309 ph0_aux = numpy.angle(numpy.exp(1j*ph0_aux))
5964 6310 #First Estimation
5965 6311 cosdir0[:,i] = (ph0_aux)/(2*numpy.pi*(d0 - d1))
5966 6312
5967 6313 #Most-Accurate Second Estimation
5968 6314 phi1_aux = ph0 - ph1
5969 6315 phi1_aux = phi1_aux.reshape(phi1_aux.size,1)
5970 6316 #Direction Cosine 1
5971 6317 cosdir1 = (phi1_aux + ang_aux)/(2*numpy.pi*(d0 + d1))
5972 6318
5973 6319 #Searching the correct Direction Cosine
5974 6320 cosdir0_aux = cosdir0[:,i]
5975 6321 cosdir0_aux = cosdir0_aux.reshape(cosdir0_aux.size,1)
5976 6322 #Minimum Distance
5977 6323 cosDiff = (cosdir1 - cosdir0_aux)**2
5978 6324 indcos = cosDiff.argmin(axis = 1)
5979 6325 #Saving Value obtained
5980 6326 cosdir[:,i] = cosdir1[numpy.arange(len(indcos)),indcos]
5981 6327
5982 6328 return cosdir0, cosdir
5983 6329
5984 6330 def __calculateAOA(self, cosdir, azimuth):
5985 6331 cosdirX = cosdir[:,0]
5986 6332 cosdirY = cosdir[:,1]
5987 6333
5988 6334 zenithAngle = numpy.arccos(numpy.sqrt(1 - cosdirX**2 - cosdirY**2))*180/numpy.pi
5989 6335 azimuthAngle = numpy.arctan2(cosdirX,cosdirY)*180/numpy.pi + azimuth#0 deg north, 90 deg east
5990 6336 angles = numpy.vstack((azimuthAngle, zenithAngle)).transpose()
5991 6337
5992 6338 return angles
5993 6339
5994 6340 def __getHeights(self, Ranges, zenith, error, minHeight, maxHeight):
5995 6341
5996 6342 Ramb = 375 #Ramb = c/(2*PRF)
5997 6343 Re = 6371 #Earth Radius
5998 6344 heights = numpy.zeros(Ranges.shape)
5999 6345
6000 6346 R_aux = numpy.array([0,1,2])*Ramb
6001 6347 R_aux = R_aux.reshape(1,R_aux.size)
6002 6348
6003 6349 Ranges = Ranges.reshape(Ranges.size,1)
6004 6350
6005 6351 Ri = Ranges + R_aux
6006 6352 hi = numpy.sqrt(Re**2 + Ri**2 + (2*Re*numpy.cos(zenith*numpy.pi/180)*Ri.transpose()).transpose()) - Re
6007 6353
6008 6354 #Check if there is a height between 70 and 110 km
6009 6355 h_bool = numpy.sum(numpy.logical_and(hi > minHeight, hi < maxHeight), axis = 1)
6010 6356 ind_h = numpy.where(h_bool == 1)[0]
6011 6357
6012 6358 hCorr = hi[ind_h, :]
6013 6359 ind_hCorr = numpy.where(numpy.logical_and(hi > minHeight, hi < maxHeight))
6014 6360
6015 6361 hCorr = hi[ind_hCorr][:len(ind_h)]
6016 6362 heights[ind_h] = hCorr
6017 6363
6018 6364 #Setting Error
6019 6365 #Number 13: Height unresolvable echo: not valid height within 70 to 110 km
6020 6366 #Number 14: Height ambiguous echo: more than one possible height within 70 to 110 km
6021 6367 indError = numpy.where(numpy.logical_or(error == 13, error == 14))[0]
6022 6368 error[indError] = 0
6023 6369 indInvalid2 = numpy.where(numpy.logical_and(h_bool > 1, error == 0))[0]
6024 6370 error[indInvalid2] = 14
6025 6371 indInvalid1 = numpy.where(numpy.logical_and(h_bool == 0, error == 0))[0]
6026 6372 error[indInvalid1] = 13
6027 6373
6028 6374 return heights, error
6029 6375
6030 6376 def getPhasePairs(self, channelPositions):
6031 6377 chanPos = numpy.array(channelPositions)
6032 6378 listOper = list(itertools.combinations(list(range(5)),2))
6033 6379
6034 6380 distances = numpy.zeros(4)
6035 6381 axisX = []
6036 6382 axisY = []
6037 6383 distX = numpy.zeros(3)
6038 6384 distY = numpy.zeros(3)
6039 6385 ix = 0
6040 6386 iy = 0
6041 6387
6042 6388 pairX = numpy.zeros((2,2))
6043 6389 pairY = numpy.zeros((2,2))
6044 6390
6045 6391 for i in range(len(listOper)):
6046 6392 pairi = listOper[i]
6047 6393
6048 6394 posDif = numpy.abs(chanPos[pairi[0],:] - chanPos[pairi[1],:])
6049 6395
6050 6396 if posDif[0] == 0:
6051 6397 axisY.append(pairi)
6052 6398 distY[iy] = posDif[1]
6053 6399 iy += 1
6054 6400 elif posDif[1] == 0:
6055 6401 axisX.append(pairi)
6056 6402 distX[ix] = posDif[0]
6057 6403 ix += 1
6058 6404
6059 6405 for i in range(2):
6060 6406 if i==0:
6061 6407 dist0 = distX
6062 6408 axis0 = axisX
6063 6409 else:
6064 6410 dist0 = distY
6065 6411 axis0 = axisY
6066 6412
6067 6413 side = numpy.argsort(dist0)[:-1]
6068 6414 axis0 = numpy.array(axis0)[side,:]
6069 6415 chanC = int(numpy.intersect1d(axis0[0,:], axis0[1,:])[0])
6070 6416 axis1 = numpy.unique(numpy.reshape(axis0,4))
6071 6417 side = axis1[axis1 != chanC]
6072 6418 diff1 = chanPos[chanC,i] - chanPos[side[0],i]
6073 6419 diff2 = chanPos[chanC,i] - chanPos[side[1],i]
6074 6420 if diff1<0:
6075 6421 chan2 = side[0]
6076 6422 d2 = numpy.abs(diff1)
6077 6423 chan1 = side[1]
6078 6424 d1 = numpy.abs(diff2)
6079 6425 else:
6080 6426 chan2 = side[1]
6081 6427 d2 = numpy.abs(diff2)
6082 6428 chan1 = side[0]
6083 6429 d1 = numpy.abs(diff1)
6084 6430
6085 6431 if i==0:
6086 6432 chanCX = chanC
6087 6433 chan1X = chan1
6088 6434 chan2X = chan2
6089 6435 distances[0:2] = numpy.array([d1,d2])
6090 6436 else:
6091 6437 chanCY = chanC
6092 6438 chan1Y = chan1
6093 6439 chan2Y = chan2
6094 6440 distances[2:4] = numpy.array([d1,d2])
6095 6441
6096 6442 pairslist = [(chanCX, chan1X),(chanCX, chan2X),(chanCY,chan1Y),(chanCY, chan2Y)]
6097 6443
6098 6444 return pairslist, distances
6099 6445
6100 6446 class IGRFModel(Operation):
6101 6447 """Operation to calculate Geomagnetic parameters.
6102 6448
6103 6449 Parameters:
6104 6450 -----------
6105 6451 None
6106 6452
6107 6453 Example
6108 6454 --------
6109 6455
6110 6456 op = proc_unit.addOperation(name='IGRFModel', optype='other')
6111 6457
6112 6458 """
6113 6459
6114 6460 def __init__(self, **kwargs):
6115 6461
6116 6462 Operation.__init__(self, **kwargs)
6117 6463
6118 6464 self.aux=1
6119 6465
6120 6466 def run(self,dataOut):
6121 6467
6122 6468 try:
6123 6469 from schainpy.model.proc import mkfact_short_2020
6124 6470 except:
6125 6471 log.warning('You should install "mkfact_short_2020" module to process IGRF Model')
6126 6472
6127 6473 if self.aux==1:
6128 6474
6129 6475 #dataOut.TimeBlockSeconds_First_Time=time.mktime(time.strptime(dataOut.TimeBlockDate))
6130 6476 #### we do not use dataOut.datatime.ctime() because it's the time of the second (next) block
6131 6477 dataOut.TimeBlockSeconds_First_Time=dataOut.TimeBlockSeconds
6132 6478 dataOut.bd_time=time.gmtime(dataOut.TimeBlockSeconds_First_Time)
6133 6479 dataOut.year=dataOut.bd_time.tm_year+(dataOut.bd_time.tm_yday-1)/364.0
6134 6480 dataOut.ut=dataOut.bd_time.tm_hour+dataOut.bd_time.tm_min/60.0+dataOut.bd_time.tm_sec/3600.0
6135 6481
6136 6482 self.aux=0
6137 6483
6138 6484 dataOut.h=numpy.arange(0.0,15.0*dataOut.MAXNRANGENDT,15.0,dtype='float32')
6139 6485 dataOut.bfm=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32')
6140 6486 dataOut.bfm=numpy.array(dataOut.bfm,order='F')
6141 6487 dataOut.thb=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32')
6142 6488 dataOut.thb=numpy.array(dataOut.thb,order='F')
6143 6489 dataOut.bki=numpy.zeros(dataOut.MAXNRANGENDT,dtype='float32')
6144 6490 dataOut.bki=numpy.array(dataOut.bki,order='F')
6145 6491
6146 6492 mkfact_short_2020.mkfact(dataOut.year,dataOut.h,dataOut.bfm,dataOut.thb,dataOut.bki,dataOut.MAXNRANGENDT)
6147 6493
6148 6494 return dataOut
6149 6495
6150 6496 class MergeProc(ProcessingUnit):
6151 6497
6152 6498 def __init__(self):
6153 6499 ProcessingUnit.__init__(self)
6154 6500
6155 6501 def run(self, attr_data, attr_data_2 = None, attr_data_3 = None, attr_data_4 = None, attr_data_5 = None, mode=0):
6156 6502
6157 6503 self.dataOut = getattr(self, self.inputs[0])
6158 6504 data_inputs = [getattr(self, attr) for attr in self.inputs]
6159 6505 #print(self.inputs)
6160 6506 #print(numpy.shape([getattr(data, attr_data) for data in data_inputs][1]))
6161 6507 #exit(1)
6162 6508 if mode==0:
6163 6509 data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs])
6164 6510 setattr(self.dataOut, attr_data, data)
6165 6511
6166 6512 if mode==1: #Hybrid
6167 6513 #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1)
6168 6514 #setattr(self.dataOut, attr_data, data)
6169 6515 setattr(self.dataOut, 'dataLag_spc', [getattr(data, attr_data) for data in data_inputs][0])
6170 6516 setattr(self.dataOut, 'dataLag_spc_LP', [getattr(data, attr_data) for data in data_inputs][1])
6171 6517 setattr(self.dataOut, 'dataLag_cspc', [getattr(data, attr_data_2) for data in data_inputs][0])
6172 6518 setattr(self.dataOut, 'dataLag_cspc_LP', [getattr(data, attr_data_2) for data in data_inputs][1])
6173 6519 #setattr(self.dataOut, 'nIncohInt', [getattr(data, attr_data_3) for data in data_inputs][0])
6174 6520 #setattr(self.dataOut, 'nIncohInt_LP', [getattr(data, attr_data_3) for data in data_inputs][1])
6175 6521 '''
6176 6522 print(self.dataOut.dataLag_spc_LP.shape)
6177 6523 print(self.dataOut.dataLag_cspc_LP.shape)
6178 6524 exit(1)
6179 6525 '''
6180 6526
6181 6527 #self.dataOut.dataLag_spc_LP = numpy.transpose(self.dataOut.dataLag_spc_LP[0],(2,0,1))
6182 6528 #self.dataOut.dataLag_cspc_LP = numpy.transpose(self.dataOut.dataLag_cspc_LP,(3,1,2,0))
6183 6529 '''
6184 6530 print("Merge")
6185 6531 print(numpy.shape(self.dataOut.dataLag_spc))
6186 6532 print(numpy.shape(self.dataOut.dataLag_spc_LP))
6187 6533 print(numpy.shape(self.dataOut.dataLag_cspc))
6188 6534 print(numpy.shape(self.dataOut.dataLag_cspc_LP))
6189 6535 exit(1)
6190 6536 '''
6191 6537 #print(numpy.sum(self.dataOut.dataLag_spc_LP[2,:,164])/128)
6192 6538 #print(numpy.sum(self.dataOut.dataLag_cspc_LP[0,:,30,1])/128)
6193 6539 #exit(1)
6194 6540 #print(self.dataOut.NDP)
6195 6541 #print(self.dataOut.nNoiseProfiles)
6196 6542
6197 6543 #self.dataOut.nIncohInt_LP = 128
6198 6544 self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP
6199 6545 self.dataOut.nIncohInt_LP = self.dataOut.nIncohInt
6200 6546 self.dataOut.NLAG = 16
6201 6547 self.dataOut.NRANGE = 200
6202 6548 self.dataOut.NSCAN = 128
6203 6549 #print(numpy.shape(self.dataOut.data_spc))
6204 6550
6205 6551 #exit(1)
6206 6552
6207 6553 if mode==2: #HAE 2022
6208 6554 data = numpy.sum([getattr(data, attr_data) for data in data_inputs],axis=0)
6209 6555 setattr(self.dataOut, attr_data, data)
6210 6556
6211 6557 self.dataOut.nIncohInt *= 2
6212 6558 #meta = self.dataOut.getFreqRange(1)/1000.
6213 6559 self.dataOut.freqRange = self.dataOut.getFreqRange(1)/1000.
6214 6560
6215 6561 #exit(1)
6216 6562
6217 6563 if mode==4: #Hybrid LP-SSheightProfiles
6218 6564 #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1)
6219 6565 #setattr(self.dataOut, attr_data, data)
6220 6566 setattr(self.dataOut, 'dataLag_spc', getattr(data_inputs[0], attr_data)) #DP
6221 6567 setattr(self.dataOut, 'dataLag_cspc', getattr(data_inputs[0], attr_data_2)) #DP
6222 6568 setattr(self.dataOut, 'dataLag_spc_LP', getattr(data_inputs[1], attr_data_3)) #LP
6223 6569 #setattr(self.dataOut, 'dataLag_cspc_LP', getattr(data_inputs[1], attr_data_4)) #LP
6224 6570 #setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP
6225 6571 setattr(self.dataOut, 'data_acf', getattr(data_inputs[1], attr_data_5)) #LP
6226 6572 #print("Merge data_acf: ",self.dataOut.data_acf.shape)
6227 6573 #exit(1)
6228 6574 #print(self.dataOut.data_spc_LP.shape)
6229 6575 #print("Exit")
6230 6576 #exit(1)
6231 6577 #setattr(self.dataOut, 'dataLag_cspc', [getattr(data, attr_data_2) for data in data_inputs][0])
6232 6578 #setattr(self.dataOut, 'dataLag_cspc_LP', [getattr(data, attr_data_2) for data in data_inputs][1])
6233 6579 #setattr(self.dataOut, 'nIncohInt', [getattr(data, attr_data_3) for data in data_inputs][0])
6234 6580 #setattr(self.dataOut, 'nIncohInt_LP', [getattr(data, attr_data_3) for data in data_inputs][1])
6235 6581 '''
6236 6582 print(self.dataOut.dataLag_spc_LP.shape)
6237 6583 print(self.dataOut.dataLag_cspc_LP.shape)
6238 6584 exit(1)
6239 6585 '''
6240 6586 '''
6241 6587 print(self.dataOut.dataLag_spc_LP[0,:,100])
6242 6588 print(self.dataOut.dataLag_spc_LP[1,:,100])
6243 6589 exit(1)
6244 6590 '''
6245 6591 #self.dataOut.dataLag_spc_LP = numpy.transpose(self.dataOut.dataLag_spc_LP[0],(2,0,1))
6246 6592 #self.dataOut.dataLag_cspc_LP = numpy.transpose(self.dataOut.dataLag_cspc_LP,(3,1,2,0))
6247 6593 '''
6248 6594 print("Merge")
6249 6595 print(numpy.shape(self.dataOut.dataLag_spc))
6250 6596 print(numpy.shape(self.dataOut.dataLag_spc_LP))
6251 6597 print(numpy.shape(self.dataOut.dataLag_cspc))
6252 6598 print(numpy.shape(self.dataOut.dataLag_cspc_LP))
6253 6599 exit(1)
6254 6600 '''
6255 6601 #print(numpy.sum(self.dataOut.dataLag_spc_LP[2,:,164])/128)
6256 6602 #print(numpy.sum(self.dataOut.dataLag_cspc_LP[0,:,30,1])/128)
6257 6603 #exit(1)
6258 6604 #print(self.dataOut.NDP)
6259 6605 #print(self.dataOut.nNoiseProfiles)
6260 6606
6261 6607 #self.dataOut.nIncohInt_LP = 128
6262 6608 #self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP
6263 6609 self.dataOut.nProfiles_LP = 16#28#self.dataOut.nIncohInt_LP
6264 6610 self.dataOut.nProfiles_LP = self.dataOut.data_acf.shape[1]#28#self.dataOut.nIncohInt_LP
6265 6611 self.dataOut.NSCAN = 128
6266 6612 self.dataOut.nIncohInt_LP = self.dataOut.nIncohInt*self.dataOut.NSCAN
6267 6613 #print("sahpi",self.dataOut.nIncohInt_LP)
6268 6614 #exit(1)
6269 6615 self.dataOut.NLAG = 16
6270 6616 self.dataOut.NRANGE = self.dataOut.data_acf.shape[-1]
6271 6617
6272 6618 #print(numpy.shape(self.dataOut.data_spc))
6273 6619
6274 6620 #exit(1)
6275 6621 if mode==5:
6276 6622 data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs])
6277 6623 setattr(self.dataOut, attr_data, data)
6278 6624 data = numpy.concatenate([getattr(data, attr_data_2) for data in data_inputs])
6279 6625 setattr(self.dataOut, attr_data_2, data)
6280 6626 #data = numpy.concatenate([getattr(data, attr_data_3) for data in data_inputs])
6281 6627 #setattr(self.dataOut, attr_data_3, data)
6282 6628 #print(self.dataOut.moments.shape,self.dataOut.data_snr.shape,self.dataOut.heightList.shape)
6283 6629
6284 6630
6285 6631 class addTxPower(Operation):
6286 6632 '''
6287 6633 Transmited power level integrated in the dataOut ->AMISR
6288 6634 resolution 1 min
6289 6635 The power files have the pattern power_YYYYMMDD.csv
6290 6636 '''
6291 6637 __slots__ =('isConfig','dataDatetimes','txPowers')
6292 6638 def __init__(self):
6293 6639
6294 6640 Operation.__init__(self)
6295 6641 self.isConfig = False
6296 6642 self.dataDatetimes = []
6297 6643 self.txPowers = []
6298 6644
6299 6645 def setup(self, powerFile, dutyCycle):
6300 6646 if not os.path.isfile(powerFile):
6301 6647 raise schainpy.admin.SchainError('There is no file named :{}'.format(powerFile))
6302 6648 return
6303 6649
6304 6650 with open(powerFile, newline='') as pfile:
6305 6651 reader = csv.reader(pfile, delimiter=',', quotechar='|')
6306 6652 next(reader)
6307 6653 for row in reader:
6308 6654 #'2022-10-25 00:00:00'
6309 6655 self.dataDatetimes.append(datetime.datetime.strptime(row[0], "%Y-%m-%d %H:%M:%S"))
6310 6656 self.txPowers.append(float(row[1])/dutyCycle)
6311 6657 self.isConfig = True
6312 6658
6313 6659 def run(self, dataOut, path, DS=0.05):
6314 6660
6315 6661 #dataOut.flagNoData = True
6316 6662
6317 6663 if not(self.isConfig):
6318 6664 self.setup(path, DS)
6319 6665
6320 6666 dataDate = datetime.datetime.utcfromtimestamp(dataOut.utctime).replace(second=0, microsecond=0)#no seconds
6321 6667 try:
6322 6668 indx = self.dataDatetimes.index(dataDate)
6323 6669 dataOut.txPower = self.txPowers[indx]
6324 6670 except:
6325 6671 log.warning("No power available for the datetime {}, setting power to 0 w", self.name)
6326 6672 dataOut.txPower = 0
6327 6673
6328 6674 return dataOut No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now