##// END OF EJS Templates
HDFWriter writes standard Weather parameters/Now we can run a type OTHER Operation even if flagNoData is True
rflores -
r1459:52714d4d854d
parent child
Show More
@@ -1,834 +1,838
1 1 '''
2 2 Created on Jul 3, 2014
3 3
4 4 @author: roj-idl71
5 5 '''
6 6 # SUBCHANNELS EN VEZ DE CHANNELS
7 7 # BENCHMARKS -> PROBLEMAS CON ARCHIVOS GRANDES -> INCONSTANTE EN EL TIEMPO
8 8 # ACTUALIZACION DE VERSION
9 9 # HEADERS
10 10 # MODULO DE ESCRITURA
11 11 # METADATA
12 12
13 13 import os
14 14 import time
15 15 import datetime
16 16 import numpy
17 17 import timeit
18 18 from fractions import Fraction
19 19 from time import time
20 20 from time import sleep
21 21
22 22 import schainpy.admin
23 23 from schainpy.model.data.jroheaderIO import RadarControllerHeader, SystemHeader
24 24 from schainpy.model.data.jrodata import Voltage
25 25 from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation, MPDecorator
26 26
27 27 import pickle
28 28 try:
29 29 import digital_rf
30 30 except:
31 31 pass
32 32
33 33
34 34 class DigitalRFReader(ProcessingUnit):
35 35 '''
36 36 classdocs
37 37 '''
38 38
39 39 def __init__(self):
40 40 '''
41 41 Constructor
42 42 '''
43 43
44 44 ProcessingUnit.__init__(self)
45 45
46 46 self.dataOut = Voltage()
47 47 self.__printInfo = True
48 48 self.__flagDiscontinuousBlock = False
49 49 self.__bufferIndex = 9999999
50 50 self.__codeType = 0
51 51 self.__ippKm = None
52 52 self.__nCode = None
53 53 self.__nBaud = None
54 54 self.__code = None
55 55 self.dtype = None
56 56 self.oldAverage = None
57 57 self.path = None
58 58
59 59 def close(self):
60 60 print('Average of writing to digital rf format is ', self.oldAverage * 1000)
61 61 return
62 62
63 63 def __getCurrentSecond(self):
64 64
65 65 return self.__thisUnixSample / self.__sample_rate
66 66
67 67 thisSecond = property(__getCurrentSecond, "I'm the 'thisSecond' property.")
68 68
69 69 def __setFileHeader(self):
70 70 '''
71 71 In this method will be initialized every parameter of dataOut object (header, no data)
72 72 '''
73 73 ippSeconds = 1.0 * self.__nSamples / self.__sample_rate
74 74 if not self.getByBlock:
75 75 nProfiles = 1.0 / ippSeconds # Number of profiles in one second
76 76 else:
77 77 nProfiles = self.nProfileBlocks # Number of profiles in one block
78 78
79 79 try:
80 80 self.dataOut.radarControllerHeaderObj = RadarControllerHeader(
81 81 self.__radarControllerHeader)
82 82 except:
83 83 self.dataOut.radarControllerHeaderObj = RadarControllerHeader(
84 84 txA=0,
85 85 txB=0,
86 86 nWindows=1,
87 87 nHeights=self.__nSamples,
88 88 firstHeight=self.__firstHeigth,
89 89 deltaHeight=self.__deltaHeigth,
90 90 codeType=self.__codeType,
91 91 nCode=self.__nCode, nBaud=self.__nBaud,
92 92 code=self.__code)
93 93
94 94 try:
95 95 self.dataOut.systemHeaderObj = SystemHeader(self.__systemHeader)
96 96 except:
97 97 self.dataOut.systemHeaderObj = SystemHeader(nSamples=self.__nSamples,
98 98 nProfiles=nProfiles,
99 99 nChannels=len(
100 100 self.__channelList),
101 101 adcResolution=14)
102 102 self.dataOut.type = "Voltage"
103 103
104 104 self.dataOut.data = None
105 105
106 106 self.dataOut.dtype = self.dtype
107 107
108 108 # self.dataOut.nChannels = 0
109 109
110 110 # self.dataOut.nHeights = 0
111 111
112 112 self.dataOut.nProfiles = int(nProfiles)
113 113
114 114 self.dataOut.heightList = self.__firstHeigth + \
115 115 numpy.arange(self.__nSamples, dtype=numpy.float) * \
116 116 self.__deltaHeigth
117 117
118 118 #self.dataOut.channelList = list(range(self.__num_subchannels))
119 119 self.dataOut.channelList = list(range(len(self.__channelList)))
120 120 if not self.getByBlock:
121 121
122 122 self.dataOut.blocksize = self.dataOut.nChannels * self.dataOut.nHeights
123 123 else:
124 124 self.dataOut.blocksize = self.dataOut.nChannels * self.dataOut.nHeights*self.nProfileBlocks
125 125
126 126 # self.dataOut.channelIndexList = None
127 127
128 128 self.dataOut.flagNoData = True
129 129 if not self.getByBlock:
130 130 self.dataOut.flagDataAsBlock = False
131 131 else:
132 132 self.dataOut.flagDataAsBlock = True
133 133 # Set to TRUE if the data is discontinuous
134 134 self.dataOut.flagDiscontinuousBlock = False
135 135
136 136 self.dataOut.utctime = None
137 137
138 138 # timezone like jroheader, difference in minutes between UTC and localtime
139 139 self.dataOut.timeZone = self.__timezone / 60
140 140
141 141 self.dataOut.dstFlag = 0
142 142
143 143 self.dataOut.errorCount = 0
144 144
145 145 try:
146 146 self.dataOut.nCohInt = self.fixed_metadata_dict.get(
147 147 'nCohInt', self.nCohInt)
148 148
149 149 # asumo que la data esta decodificada
150 150 self.dataOut.flagDecodeData = self.fixed_metadata_dict.get(
151 151 'flagDecodeData', self.flagDecodeData)
152 152
153 153 # asumo que la data esta sin flip
154 154 self.dataOut.flagDeflipData = self.fixed_metadata_dict['flagDeflipData']
155 155
156 156 self.dataOut.flagShiftFFT = self.fixed_metadata_dict['flagShiftFFT']
157 157
158 158 self.dataOut.useLocalTime = self.fixed_metadata_dict['useLocalTime']
159 159 except:
160 160 pass
161 161
162 162 self.dataOut.ippSeconds = ippSeconds
163 163
164 164 # Time interval between profiles
165 165 # self.dataOut.timeInterval = self.dataOut.ippSeconds * self.dataOut.nCohInt
166 166
167 167 self.dataOut.frequency = self.__frequency
168 168
169 169 self.dataOut.realtime = self.__online
170 170
171 171 def findDatafiles(self, path, startDate=None, endDate=None):
172 172
173 173 if not os.path.isdir(path):
174 174 return []
175 175
176 176 try:
177 177 digitalReadObj = digital_rf.DigitalRFReader(
178 178 path, load_all_metadata=True)
179 179 except:
180 180 digitalReadObj = digital_rf.DigitalRFReader(path)
181 181
182 182 channelNameList = digitalReadObj.get_channels()
183 183
184 184 if not channelNameList:
185 185 return []
186 186
187 187 metadata_dict = digitalReadObj.get_rf_file_metadata(channelNameList[0])
188 188
189 189 sample_rate = metadata_dict['sample_rate'][0]
190 190
191 191 this_metadata_file = digitalReadObj.get_metadata(channelNameList[0])
192 192
193 193 try:
194 194 timezone = this_metadata_file['timezone'].value
195 195 except:
196 196 timezone = 0
197 197
198 198 startUTCSecond, endUTCSecond = digitalReadObj.get_bounds(
199 199 channelNameList[0]) / sample_rate - timezone
200 200
201 201 startDatetime = datetime.datetime.utcfromtimestamp(startUTCSecond)
202 202 endDatatime = datetime.datetime.utcfromtimestamp(endUTCSecond)
203 203
204 204 if not startDate:
205 205 startDate = startDatetime.date()
206 206
207 207 if not endDate:
208 208 endDate = endDatatime.date()
209 209
210 210 dateList = []
211 211
212 212 thisDatetime = startDatetime
213 213
214 214 while(thisDatetime <= endDatatime):
215 215
216 216 thisDate = thisDatetime.date()
217 217
218 218 if thisDate < startDate:
219 219 continue
220 220
221 221 if thisDate > endDate:
222 222 break
223 223
224 224 dateList.append(thisDate)
225 225 thisDatetime += datetime.timedelta(1)
226 226
227 227 return dateList
228 228
229 229 def setup(self, path=None,
230 230 startDate=None,
231 231 endDate=None,
232 232 startTime=datetime.time(0, 0, 0),
233 233 endTime=datetime.time(23, 59, 59),
234 234 channelList=None,
235 235 nSamples=None,
236 236 online=False,
237 237 delay=60,
238 238 buffer_size=1024,
239 239 ippKm=None,
240 240 nCohInt=1,
241 241 nCode=1,
242 242 nBaud=1,
243 243 flagDecodeData=False,
244 244 code=numpy.ones((1, 1), dtype=numpy.int),
245 245 getByBlock=0,
246 246 nProfileBlocks=1,
247 247 **kwargs):
248 248 '''
249 249 In this method we should set all initial parameters.
250 250
251 251 Inputs:
252 252 path
253 253 startDate
254 254 endDate
255 255 startTime
256 256 endTime
257 257 set
258 258 expLabel
259 259 ext
260 260 online
261 261 delay
262 262 '''
263 263 self.path = path
264 264 self.nCohInt = nCohInt
265 265 self.flagDecodeData = flagDecodeData
266 266 self.i = 0
267 267
268 268 self.getByBlock = getByBlock
269 269 self.nProfileBlocks = nProfileBlocks
270 270 if not os.path.isdir(path):
271 271 raise ValueError("[Reading] Directory %s does not exist" % path)
272 272
273 273 try:
274 274 self.digitalReadObj = digital_rf.DigitalRFReader(
275 275 path, load_all_metadata=True)
276 276 except:
277 277 self.digitalReadObj = digital_rf.DigitalRFReader(path)
278 278
279 279 channelNameList = self.digitalReadObj.get_channels()
280 280
281 281 if not channelNameList:
282 282 raise ValueError("[Reading] Directory %s does not have any files" % path)
283 283
284 284 if not channelList:
285 285 channelList = list(range(len(channelNameList)))
286 286
287 287 ########## Reading metadata ######################
288 288
289 289 top_properties = self.digitalReadObj.get_properties(
290 290 channelNameList[channelList[0]])
291 291
292 292 self.__num_subchannels = top_properties['num_subchannels']
293 293 self.__sample_rate = 1.0 * \
294 294 top_properties['sample_rate_numerator'] / \
295 295 top_properties['sample_rate_denominator']
296 296 # self.__samples_per_file = top_properties['samples_per_file'][0]
297 297 self.__deltaHeigth = 1e6 * 0.15 / self.__sample_rate # why 0.15?
298 298
299 299 this_metadata_file = self.digitalReadObj.get_digital_metadata(
300 300 channelNameList[channelList[0]])
301 301 metadata_bounds = this_metadata_file.get_bounds()
302 302 self.fixed_metadata_dict = this_metadata_file.read(
303 303 metadata_bounds[0])[metadata_bounds[0]] # GET FIRST HEADER
304 304
305 305 try:
306 306 self.__processingHeader = self.fixed_metadata_dict['processingHeader']
307 307 self.__radarControllerHeader = self.fixed_metadata_dict['radarControllerHeader']
308 308 self.__systemHeader = self.fixed_metadata_dict['systemHeader']
309 309 self.dtype = pickle.loads(self.fixed_metadata_dict['dtype'])
310 310 except:
311 311 pass
312 312
313 313 self.__frequency = None
314 314
315 315 self.__frequency = self.fixed_metadata_dict.get('frequency', 1)
316 316
317 317 self.__timezone = self.fixed_metadata_dict.get('timezone', 18000)
318 318
319 319 try:
320 320 nSamples = self.fixed_metadata_dict['nSamples']
321 321 except:
322 322 nSamples = None
323 323
324 324 self.__firstHeigth = 0
325 325
326 326 try:
327 327 codeType = self.__radarControllerHeader['codeType']
328 328 except:
329 329 codeType = 0
330 330
331 331 try:
332 332 if codeType:
333 333 nCode = self.__radarControllerHeader['nCode']
334 334 nBaud = self.__radarControllerHeader['nBaud']
335 335 code = self.__radarControllerHeader['code']
336 336 except:
337 337 pass
338 338
339 339 if not ippKm:
340 340 try:
341 341 # seconds to km
342 342 ippKm = self.__radarControllerHeader['ipp']
343 343 except:
344 344 ippKm = None
345 345 ####################################################
346 346 self.__ippKm = ippKm
347 347 startUTCSecond = None
348 348 endUTCSecond = None
349 349
350 350 if startDate:
351 351 startDatetime = datetime.datetime.combine(startDate, startTime)
352 352 startUTCSecond = (
353 353 startDatetime - datetime.datetime(1970, 1, 1)).total_seconds() + self.__timezone
354 354
355 355 if endDate:
356 356 endDatetime = datetime.datetime.combine(endDate, endTime)
357 357 endUTCSecond = (endDatetime - datetime.datetime(1970,
358 358 1, 1)).total_seconds() + self.__timezone
359 359
360 360
361 361 #print(startUTCSecond,endUTCSecond)
362 362 start_index, end_index = self.digitalReadObj.get_bounds(
363 363 channelNameList[channelList[0]])
364 364
365 365 #print("*****",start_index,end_index)
366 366 if not startUTCSecond:
367 367 startUTCSecond = start_index / self.__sample_rate
368 368
369 369 if start_index > startUTCSecond * self.__sample_rate:
370 370 startUTCSecond = start_index / self.__sample_rate
371 371
372 372 if not endUTCSecond:
373 373 endUTCSecond = end_index / self.__sample_rate
374 374 if end_index < endUTCSecond * self.__sample_rate:
375 375 endUTCSecond = end_index / self.__sample_rate #Check UTC and LT time
376 376 if not nSamples:
377 377 if not ippKm:
378 378 raise ValueError("[Reading] nSamples or ippKm should be defined")
379 379 nSamples = int(ippKm / (1e6 * 0.15 / self.__sample_rate))
380 380
381 381 channelBoundList = []
382 382 channelNameListFiltered = []
383 383
384 384 for thisIndexChannel in channelList:
385 385 thisChannelName = channelNameList[thisIndexChannel]
386 386 start_index, end_index = self.digitalReadObj.get_bounds(
387 387 thisChannelName)
388 388 channelBoundList.append((start_index, end_index))
389 389 channelNameListFiltered.append(thisChannelName)
390 390
391 391 self.profileIndex = 0
392 392 self.i = 0
393 393 self.__delay = delay
394 394
395 395 self.__codeType = codeType
396 396 self.__nCode = nCode
397 397 self.__nBaud = nBaud
398 398 self.__code = code
399 399
400 400 self.__datapath = path
401 401 self.__online = online
402 402 self.__channelList = channelList
403 403 self.__channelNameList = channelNameListFiltered
404 404 self.__channelBoundList = channelBoundList
405 405 self.__nSamples = nSamples
406 406 if self.getByBlock:
407 407 nSamples = nSamples*nProfileBlocks
408 408
409 409
410 410 self.__samples_to_read = int(nSamples) # FIJO: AHORA 40
411 #self.__samples_to_read = int(1000000) # FIJO: AHORA 40
411 412 self.__nChannels = len(self.__channelList)
412 413 #print("------------------------------------------")
413 414 #print("self.__samples_to_read",self.__samples_to_read)
414 415 #print("self.__nSamples",self.__nSamples)
415 416 # son iguales y el buffer_index da 0
416 417 self.__startUTCSecond = startUTCSecond
417 418 self.__endUTCSecond = endUTCSecond
418 419
419 420 self.__timeInterval = 1.0 * self.__samples_to_read / \
420 421 self.__sample_rate # Time interval
421 422
422 423 if online:
423 424 # self.__thisUnixSample = int(endUTCSecond*self.__sample_rate - 4*self.__samples_to_read)
424 425 startUTCSecond = numpy.floor(endUTCSecond)
425 426
426 427 # por que en el otro metodo lo primero q se hace es sumar samplestoread
427 428 self.__thisUnixSample = int(startUTCSecond * self.__sample_rate) - self.__samples_to_read
428 429
429 430 #self.__data_buffer = numpy.zeros(
430 431 # (self.__num_subchannels, self.__samples_to_read), dtype=numpy.complex)
431 432 self.__data_buffer = numpy.zeros((int(len(channelList)), self.__samples_to_read), dtype=numpy.complex)
432 433
433 434
434 435 self.__setFileHeader()
435 436 self.isConfig = True
436 437
437 438 print("[Reading] Digital RF Data was found from %s to %s " % (
438 439 datetime.datetime.utcfromtimestamp(
439 440 self.__startUTCSecond - self.__timezone),
440 441 datetime.datetime.utcfromtimestamp(
441 442 self.__endUTCSecond - self.__timezone)
442 443 ))
443 444
444 445 print("[Reading] Starting process from %s to %s" % (datetime.datetime.utcfromtimestamp(startUTCSecond - self.__timezone),
445 446 datetime.datetime.utcfromtimestamp(
446 447 endUTCSecond - self.__timezone)
447 448 ))
448 449 self.oldAverage = None
449 450 self.count = 0
450 451 self.executionTime = 0
451 452
452 453 def __reload(self):
453 454 # print
454 455 # print "%s not in range [%s, %s]" %(
455 456 # datetime.datetime.utcfromtimestamp(self.thisSecond - self.__timezone),
456 457 # datetime.datetime.utcfromtimestamp(self.__startUTCSecond - self.__timezone),
457 458 # datetime.datetime.utcfromtimestamp(self.__endUTCSecond - self.__timezone)
458 459 # )
459 460 print("[Reading] reloading metadata ...")
460 461
461 462 try:
462 463 self.digitalReadObj.reload(complete_update=True)
463 464 except:
464 465 self.digitalReadObj = digital_rf.DigitalRFReader(self.path)
465 466
466 467 start_index, end_index = self.digitalReadObj.get_bounds(
467 468 self.__channelNameList[self.__channelList[0]])
468 469
469 470 if start_index > self.__startUTCSecond * self.__sample_rate:
470 471 self.__startUTCSecond = 1.0 * start_index / self.__sample_rate
471 472
472 473 if end_index > self.__endUTCSecond * self.__sample_rate:
473 474 self.__endUTCSecond = 1.0 * end_index / self.__sample_rate
474 475 print()
475 476 print("[Reading] New timerange found [%s, %s] " % (
476 477 datetime.datetime.utcfromtimestamp(
477 478 self.__startUTCSecond - self.__timezone),
478 479 datetime.datetime.utcfromtimestamp(
479 480 self.__endUTCSecond - self.__timezone)
480 481 ))
481 482
482 483 return True
483 484
484 485 return False
485 486
486 487 def timeit(self, toExecute):
487 488 t0 = time.time()
488 489 toExecute()
489 490 self.executionTime = time.time() - t0
490 491 if self.oldAverage is None:
491 492 self.oldAverage = self.executionTime
492 493 self.oldAverage = (self.executionTime + self.count *
493 494 self.oldAverage) / (self.count + 1.0)
494 495 self.count = self.count + 1.0
495 496 return
496 497
497 498 def __readNextBlock(self, seconds=30, volt_scale=1):
498 499 '''
499 500 '''
500 501
501 502 # Set the next data
502 503 self.__flagDiscontinuousBlock = False
503 504 self.__thisUnixSample += self.__samples_to_read
504 505
505 506 if self.__thisUnixSample + 2 * self.__samples_to_read > self.__endUTCSecond * self.__sample_rate:
506 507 print ("[Reading] There are no more data into selected time-range")
507 508 if self.__online:
508 509 sleep(3)
509 510 self.__reload()
510 511 else:
511 512 return False
512 513
513 514 if self.__thisUnixSample + 2 * self.__samples_to_read > self.__endUTCSecond * self.__sample_rate:
514 515 return False
515 516 self.__thisUnixSample -= self.__samples_to_read
516 517
517 518 indexChannel = 0
518 519
519 520 dataOk = False
520 521
521 522 for thisChannelName in self.__channelNameList: # TODO VARIOS CHANNELS?
522 523 for indexSubchannel in range(self.__num_subchannels):
523 524 try:
524 525 t0 = time()
526 #print("Unitindex",self.__thisUnixSample)
527 #print("__samples_to_read",self.__samples_to_read)
525 528 result = self.digitalReadObj.read_vector_c81d(self.__thisUnixSample,
526 529 self.__samples_to_read,
527 530 thisChannelName, sub_channel=indexSubchannel)
528 531 self.executionTime = time() - t0
529 532 if self.oldAverage is None:
530 533 self.oldAverage = self.executionTime
531 534 self.oldAverage = (
532 535 self.executionTime + self.count * self.oldAverage) / (self.count + 1.0)
533 536 self.count = self.count + 1.0
534 537
535 538 except IOError as e:
536 539 # read next profile
537 540 self.__flagDiscontinuousBlock = True
538 541 print("[Reading] %s" % datetime.datetime.utcfromtimestamp(self.thisSecond - self.__timezone), e)
539 542 break
540 543
541 544 if result.shape[0] != self.__samples_to_read:
542 545 self.__flagDiscontinuousBlock = True
543 546 print("[Reading] %s: Too few samples were found, just %d/%d samples" % (datetime.datetime.utcfromtimestamp(self.thisSecond - self.__timezone),
544 547 result.shape[0],
545 548 self.__samples_to_read))
546 549 break
547 550
548 551 self.__data_buffer[indexChannel, :] = result * volt_scale
549 552 indexChannel+=1
550 553
551 554 dataOk = True
552 555
553 556 self.__utctime = self.__thisUnixSample / self.__sample_rate
554 557
555 558 if not dataOk:
556 559 return False
557 560
558 561 print("[Reading] %s: %d samples <> %f sec" % (datetime.datetime.utcfromtimestamp(self.thisSecond - self.__timezone),
559 562 self.__samples_to_read,
560 563 self.__timeInterval))
561 564
562 565 self.__bufferIndex = 0
563 566
564 567 return True
565 568
566 569 def __isBufferEmpty(self):
567 570
568 571 return self.__bufferIndex > self.__samples_to_read - self.__nSamples # 40960 - 40
569 572
570 573 def getData(self, seconds=30, nTries=5):
571 574 '''
572 575 This method gets the data from files and put the data into the dataOut object
573 576
574 577 In addition, increase el the buffer counter in one.
575 578
576 579 Return:
577 580 data : retorna un perfil de voltages (alturas * canales) copiados desde el
578 581 buffer. Si no hay mas archivos a leer retorna None.
579 582
580 583 Affected:
581 584 self.dataOut
582 585 self.profileIndex
583 586 self.flagDiscontinuousBlock
584 587 self.flagIsNewBlock
585 588 '''
586 589 #print("getdata")
587 590 err_counter = 0
588 591 self.dataOut.flagNoData = True
589 592
590 593
591 594 if self.__isBufferEmpty():
592 595 #print("hi")
593 596 self.__flagDiscontinuousBlock = False
594 597
595 598 while True:
596 599 if self.__readNextBlock():
597 600 break
598 601 if self.__thisUnixSample > self.__endUTCSecond * self.__sample_rate:
599 602 raise schainpy.admin.SchainError('Error')
600 603 return
601 604
602 605 if self.__flagDiscontinuousBlock:
603 606 raise schainpy.admin.SchainError('discontinuous block found')
604 607 return
605 608
606 609 if not self.__online:
607 610 raise schainpy.admin.SchainError('Online?')
608 611 return
609 612
610 613 err_counter += 1
611 614 if err_counter > nTries:
612 615 raise schainpy.admin.SchainError('Max retrys reach')
613 616 return
614 617
615 618 print('[Reading] waiting %d seconds to read a new block' % seconds)
616 619 sleep(seconds)
617 620
618 621
619 622 if not self.getByBlock:
620 623
621 624 #print("self.__bufferIndex",self.__bufferIndex)# este valor siempre es cero aparentemente
622 625 self.dataOut.data = self.__data_buffer[:, self.__bufferIndex:self.__bufferIndex + self.__nSamples]
623 626 self.dataOut.utctime = ( self.__thisUnixSample + self.__bufferIndex) / self.__sample_rate
624 627 self.dataOut.flagNoData = False
625 628 self.dataOut.flagDiscontinuousBlock = self.__flagDiscontinuousBlock
626 629 self.dataOut.profileIndex = self.profileIndex
627 630
628 631 self.__bufferIndex += self.__nSamples
629 632 self.profileIndex += 1
630 633
631 634 if self.profileIndex == self.dataOut.nProfiles:
632 635 self.profileIndex = 0
633 636 else:
634 637 # ojo debo anadir el readNextBLock y el __isBufferEmpty(
635 638 self.dataOut.flagNoData = False
636 639 buffer = self.__data_buffer[:,self.__bufferIndex:self.__bufferIndex + self.__samples_to_read]
640 #print("test",self.__bufferIndex)
637 641 buffer = buffer.reshape((self.__nChannels, self.nProfileBlocks, int(self.__samples_to_read/self.nProfileBlocks)))
638 642 self.dataOut.nProfileBlocks = self.nProfileBlocks
639 643 self.dataOut.data = buffer
640 644 self.dataOut.utctime = ( self.__thisUnixSample + self.__bufferIndex) / self.__sample_rate
641 645 self.profileIndex += self.__samples_to_read
642 646 self.__bufferIndex += self.__samples_to_read
643 647 self.dataOut.flagDiscontinuousBlock = self.__flagDiscontinuousBlock
644 648 return True
645 649
646 650
647 651 def printInfo(self):
648 652 '''
649 653 '''
650 654 if self.__printInfo == False:
651 655 return
652 656
653 657 # self.systemHeaderObj.printInfo()
654 658 # self.radarControllerHeaderObj.printInfo()
655 659
656 660 self.__printInfo = False
657 661
658 662 def printNumberOfBlock(self):
659 663 '''
660 664 '''
661 665 return
662 666 # print self.profileIndex
663 667
664 668 def run(self, **kwargs):
665 669 '''
666 670 This method will be called many times so here you should put all your code
667 671 '''
668 672
669 673 if not self.isConfig:
670 674 self.setup(**kwargs)
671 675
672 676 self.getData(seconds=self.__delay)
673 677
674 678 return
675 679
676 680 @MPDecorator
677 681 class DigitalRFWriter(Operation):
678 682 '''
679 683 classdocs
680 684 '''
681 685
682 686 def __init__(self, **kwargs):
683 687 '''
684 688 Constructor
685 689 '''
686 690 Operation.__init__(self, **kwargs)
687 691 self.metadata_dict = {}
688 692 self.dataOut = None
689 693 self.dtype = None
690 694 self.oldAverage = 0
691 695
692 696 def setHeader(self):
693 697
694 698 self.metadata_dict['frequency'] = self.dataOut.frequency
695 699 self.metadata_dict['timezone'] = self.dataOut.timeZone
696 700 self.metadata_dict['dtype'] = pickle.dumps(self.dataOut.dtype)
697 701 self.metadata_dict['nProfiles'] = self.dataOut.nProfiles
698 702 self.metadata_dict['heightList'] = self.dataOut.heightList
699 703 self.metadata_dict['channelList'] = self.dataOut.channelList
700 704 self.metadata_dict['flagDecodeData'] = self.dataOut.flagDecodeData
701 705 self.metadata_dict['flagDeflipData'] = self.dataOut.flagDeflipData
702 706 self.metadata_dict['flagShiftFFT'] = self.dataOut.flagShiftFFT
703 707 self.metadata_dict['useLocalTime'] = self.dataOut.useLocalTime
704 708 self.metadata_dict['nCohInt'] = self.dataOut.nCohInt
705 709 self.metadata_dict['type'] = self.dataOut.type
706 710 self.metadata_dict['flagDataAsBlock']= getattr(
707 711 self.dataOut, 'flagDataAsBlock', None) # chequear
708 712
709 713 def setup(self, dataOut, path, frequency, fileCadence, dirCadence, metadataCadence, set=0, metadataFile='metadata', ext='.h5'):
710 714 '''
711 715 In this method we should set all initial parameters.
712 716 Input:
713 717 dataOut: Input data will also be outputa data
714 718 '''
715 719 self.setHeader()
716 720 self.__ippSeconds = dataOut.ippSeconds
717 721 self.__deltaH = dataOut.getDeltaH()
718 722 self.__sample_rate = 1e6 * 0.15 / self.__deltaH
719 723 self.__dtype = dataOut.dtype
720 724 if len(dataOut.dtype) == 2:
721 725 self.__dtype = dataOut.dtype[0]
722 726 self.__nSamples = dataOut.systemHeaderObj.nSamples
723 727 self.__nProfiles = dataOut.nProfiles
724 728
725 729 if self.dataOut.type != 'Voltage':
726 730 raise 'Digital RF cannot be used with this data type'
727 731 self.arr_data = numpy.ones((1, dataOut.nFFTPoints * len(
728 732 self.dataOut.channelList)), dtype=[('r', self.__dtype), ('i', self.__dtype)])
729 733 else:
730 734 self.arr_data = numpy.ones((self.__nSamples, len(
731 735 self.dataOut.channelList)), dtype=[('r', self.__dtype), ('i', self.__dtype)])
732 736
733 737 file_cadence_millisecs = 1000
734 738
735 739 sample_rate_fraction = Fraction(self.__sample_rate).limit_denominator()
736 740 sample_rate_numerator = int(sample_rate_fraction.numerator)
737 741 sample_rate_denominator = int(sample_rate_fraction.denominator)
738 742 start_global_index = dataOut.utctime * self.__sample_rate
739 743
740 744 uuid = 'prueba'
741 745 compression_level = 0
742 746 checksum = False
743 747 is_complex = True
744 748 num_subchannels = len(dataOut.channelList)
745 749 is_continuous = True
746 750 marching_periods = False
747 751
748 752 self.digitalWriteObj = digital_rf.DigitalRFWriter(path, self.__dtype, dirCadence,
749 753 fileCadence, start_global_index,
750 754 sample_rate_numerator, sample_rate_denominator, uuid, compression_level, checksum,
751 755 is_complex, num_subchannels, is_continuous, marching_periods)
752 756 metadata_dir = os.path.join(path, 'metadata')
753 757 os.system('mkdir %s' % (metadata_dir))
754 758 self.digitalMetadataWriteObj = digital_rf.DigitalMetadataWriter(metadata_dir, dirCadence, 1, # 236, file_cadence_millisecs / 1000
755 759 sample_rate_numerator, sample_rate_denominator,
756 760 metadataFile)
757 761 self.isConfig = True
758 762 self.currentSample = 0
759 763 self.oldAverage = 0
760 764 self.count = 0
761 765 return
762 766
763 767 def writeMetadata(self):
764 768 start_idx = self.__sample_rate * self.dataOut.utctime
765 769
766 770 self.metadata_dict['processingHeader'] = self.dataOut.processingHeaderObj.getAsDict(
767 771 )
768 772 self.metadata_dict['radarControllerHeader'] = self.dataOut.radarControllerHeaderObj.getAsDict(
769 773 )
770 774 self.metadata_dict['systemHeader'] = self.dataOut.systemHeaderObj.getAsDict(
771 775 )
772 776 self.digitalMetadataWriteObj.write(start_idx, self.metadata_dict)
773 777 return
774 778
775 779 def timeit(self, toExecute):
776 780 t0 = time()
777 781 toExecute()
778 782 self.executionTime = time() - t0
779 783 if self.oldAverage is None:
780 784 self.oldAverage = self.executionTime
781 785 self.oldAverage = (self.executionTime + self.count *
782 786 self.oldAverage) / (self.count + 1.0)
783 787 self.count = self.count + 1.0
784 788 return
785 789
786 790 def writeData(self):
787 791 if self.dataOut.type != 'Voltage':
788 792 raise 'Digital RF cannot be used with this data type'
789 793 for channel in self.dataOut.channelList:
790 794 for i in range(self.dataOut.nFFTPoints):
791 795 self.arr_data[1][channel * self.dataOut.nFFTPoints +
792 796 i]['r'] = self.dataOut.data[channel][i].real
793 797 self.arr_data[1][channel * self.dataOut.nFFTPoints +
794 798 i]['i'] = self.dataOut.data[channel][i].imag
795 799 else:
796 800 for i in range(self.dataOut.systemHeaderObj.nSamples):
797 801 for channel in self.dataOut.channelList:
798 802 self.arr_data[i][channel]['r'] = self.dataOut.data[channel][i].real
799 803 self.arr_data[i][channel]['i'] = self.dataOut.data[channel][i].imag
800 804
801 805 def f(): return self.digitalWriteObj.rf_write(self.arr_data)
802 806 self.timeit(f)
803 807
804 808 return
805 809
806 810 def run(self, dataOut, frequency=49.92e6, path=None, fileCadence=1000, dirCadence=36000, metadataCadence=1, **kwargs):
807 811 '''
808 812 This method will be called many times so here you should put all your code
809 813 Inputs:
810 814 dataOut: object with the data
811 815 '''
812 816 # print dataOut.__dict__
813 817 self.dataOut = dataOut
814 818 if not self.isConfig:
815 819 self.setup(dataOut, path, frequency, fileCadence,
816 820 dirCadence, metadataCadence, **kwargs)
817 821 self.writeMetadata()
818 822
819 823 self.writeData()
820 824
821 825 ## self.currentSample += 1
822 826 # if self.dataOut.flagDataAsBlock or self.currentSample == 1:
823 827 # self.writeMetadata()
824 828 ## if self.currentSample == self.__nProfiles: self.currentSample = 0
825 829
826 830 return dataOut# en la version 2.7 no aparece este return
827 831
828 832 def close(self):
829 833 print('[Writing] - Closing files ')
830 834 print('Average of writing to digital rf format is ', self.oldAverage * 1000)
831 835 try:
832 836 self.digitalWriteObj.close()
833 837 except:
834 838 pass
@@ -1,714 +1,731
1 1 import os
2 2 import time
3 3 import datetime
4 4
5 5 import numpy
6 6 import h5py
7 7
8 8 import schainpy.admin
9 9 from schainpy.model.data.jrodata import *
10 10 from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation, MPDecorator
11 11 from schainpy.model.io.jroIO_base import *
12 12 from schainpy.utils import log
13 13
14 14
15 15 class HDFReader(Reader, ProcessingUnit):
16 16 """Processing unit to read HDF5 format files
17 17
18 18 This unit reads HDF5 files created with `HDFWriter` operation contains
19 19 by default two groups Data and Metadata all variables would be saved as `dataOut`
20 20 attributes.
21 21 It is possible to read any HDF5 file by given the structure in the `description`
22 22 parameter, also you can add extra values to metadata with the parameter `extras`.
23 23
24 24 Parameters:
25 25 -----------
26 26 path : str
27 27 Path where files are located.
28 28 startDate : date
29 29 Start date of the files
30 30 endDate : list
31 31 End date of the files
32 32 startTime : time
33 33 Start time of the files
34 34 endTime : time
35 35 End time of the files
36 36 description : dict, optional
37 37 Dictionary with the description of the HDF5 file
38 38 extras : dict, optional
39 39 Dictionary with extra metadata to be be added to `dataOut`
40 40
41 41 Examples
42 42 --------
43 43
44 44 desc = {
45 45 'Data': {
46 46 'data_output': ['u', 'v', 'w'],
47 47 'utctime': 'timestamps',
48 48 } ,
49 49 'Metadata': {
50 50 'heightList': 'heights'
51 51 }
52 52 }
53 53
54 54 desc = {
55 55 'Data': {
56 56 'data_output': 'winds',
57 57 'utctime': 'timestamps'
58 58 },
59 59 'Metadata': {
60 60 'heightList': 'heights'
61 61 }
62 62 }
63 63
64 64 extras = {
65 65 'timeZone': 300
66 66 }
67 67
68 68 reader = project.addReadUnit(
69 69 name='HDFReader',
70 70 path='/path/to/files',
71 71 startDate='2019/01/01',
72 72 endDate='2019/01/31',
73 73 startTime='00:00:00',
74 74 endTime='23:59:59',
75 75 # description=json.dumps(desc),
76 76 # extras=json.dumps(extras),
77 77 )
78 78
79 79 """
80 80
81 81 __attrs__ = ['path', 'startDate', 'endDate', 'startTime', 'endTime', 'description', 'extras']
82 82
83 83 def __init__(self):
84 84 ProcessingUnit.__init__(self)
85 85 self.dataOut = Parameters()
86 86 self.ext = ".hdf5"
87 87 self.optchar = "D"
88 88 self.meta = {}
89 89 self.data = {}
90 90 self.open_file = h5py.File
91 91 self.open_mode = 'r'
92 92 self.description = {}
93 93 self.extras = {}
94 94 self.filefmt = "*%Y%j***"
95 95 self.folderfmt = "*%Y%j"
96 96 self.utcoffset = 0
97 97
98 98 def setup(self, **kwargs):
99 99
100 100 self.set_kwargs(**kwargs)
101 101 if not self.ext.startswith('.'):
102 102 self.ext = '.{}'.format(self.ext)
103 103
104 104 if self.online:
105 105 log.log("Searching files in online mode...", self.name)
106 106
107 107 for nTries in range(self.nTries):
108 108 fullpath = self.searchFilesOnLine(self.path, self.startDate,
109 109 self.endDate, self.expLabel, self.ext, self.walk,
110 110 self.filefmt, self.folderfmt)
111 111 try:
112 112 fullpath = next(fullpath)
113 113 except:
114 114 fullpath = None
115 115
116 116 if fullpath:
117 117 break
118 118
119 119 log.warning(
120 120 'Waiting {} sec for a valid file in {}: try {} ...'.format(
121 121 self.delay, self.path, nTries + 1),
122 122 self.name)
123 123 time.sleep(self.delay)
124 124
125 125 if not(fullpath):
126 126 raise schainpy.admin.SchainError(
127 127 'There isn\'t any valid file in {}'.format(self.path))
128 128
129 129 pathname, filename = os.path.split(fullpath)
130 130 self.year = int(filename[1:5])
131 131 self.doy = int(filename[5:8])
132 132 self.set = int(filename[8:11]) - 1
133 133 else:
134 134 log.log("Searching files in {}".format(self.path), self.name)
135 135 self.filenameList = self.searchFilesOffLine(self.path, self.startDate,
136 136 self.endDate, self.expLabel, self.ext, self.walk, self.filefmt, self.folderfmt)
137 137
138 138 self.setNextFile()
139 139
140 140 return
141 141
142 142 def readFirstHeader(self):
143 143 '''Read metadata and data'''
144 144
145 145 self.__readMetadata()
146 146 self.__readData()
147 147 self.__setBlockList()
148 148
149 149 if 'type' in self.meta:
150 150 self.dataOut = eval(self.meta['type'])()
151 151
152 152 for attr in self.meta:
153 153 setattr(self.dataOut, attr, self.meta[attr])
154 154
155 155 self.blockIndex = 0
156 156
157 157 return
158 158
159 159 def __setBlockList(self):
160 160 '''
161 161 Selects the data within the times defined
162 162
163 163 self.fp
164 164 self.startTime
165 165 self.endTime
166 166 self.blockList
167 167 self.blocksPerFile
168 168
169 169 '''
170 170
171 171 startTime = self.startTime
172 172 endTime = self.endTime
173 173 thisUtcTime = self.data['utctime'] + self.utcoffset
174 174 self.interval = numpy.min(thisUtcTime[1:] - thisUtcTime[:-1])
175 175 thisDatetime = datetime.datetime.utcfromtimestamp(thisUtcTime[0])
176 176
177 177 thisDate = thisDatetime.date()
178 178 thisTime = thisDatetime.time()
179 179
180 180 startUtcTime = (datetime.datetime.combine(thisDate, startTime) - datetime.datetime(1970, 1, 1)).total_seconds()
181 181 endUtcTime = (datetime.datetime.combine(thisDate, endTime) - datetime.datetime(1970, 1, 1)).total_seconds()
182 182
183 183 ind = numpy.where(numpy.logical_and(thisUtcTime >= startUtcTime, thisUtcTime < endUtcTime))[0]
184 184
185 185 self.blockList = ind
186 186 self.blocksPerFile = len(ind)
187 187 return
188 188
189 189 def __readMetadata(self):
190 190 '''
191 191 Reads Metadata
192 192 '''
193 193
194 194 meta = {}
195 195
196 196 if self.description:
197 197 for key, value in self.description['Metadata'].items():
198 198 meta[key] = self.fp[value][()]
199 199 else:
200 200 grp = self.fp['Metadata']
201 201 for name in grp:
202 202 meta[name] = grp[name][()]
203 203
204 204 if self.extras:
205 205 for key, value in self.extras.items():
206 206 meta[key] = value
207 207 self.meta = meta
208 208
209 209 return
210 210
211 211 def __readData(self):
212 212
213 213 data = {}
214 214
215 215 if self.description:
216 216 for key, value in self.description['Data'].items():
217 217 if isinstance(value, str):
218 218 if isinstance(self.fp[value], h5py.Dataset):
219 219 data[key] = self.fp[value][()]
220 220 elif isinstance(self.fp[value], h5py.Group):
221 221 array = []
222 222 for ch in self.fp[value]:
223 223 array.append(self.fp[value][ch][()])
224 224 data[key] = numpy.array(array)
225 225 elif isinstance(value, list):
226 226 array = []
227 227 for ch in value:
228 228 array.append(self.fp[ch][()])
229 229 data[key] = numpy.array(array)
230 230 else:
231 231 grp = self.fp['Data']
232 232 for name in grp:
233 233 if isinstance(grp[name], h5py.Dataset):
234 234 array = grp[name][()]
235 235 elif isinstance(grp[name], h5py.Group):
236 236 array = []
237 237 for ch in grp[name]:
238 238 array.append(grp[name][ch][()])
239 239 array = numpy.array(array)
240 240 else:
241 241 log.warning('Unknown type: {}'.format(name))
242 242
243 243 if name in self.description:
244 244 key = self.description[name]
245 245 else:
246 246 key = name
247 247 data[key] = array
248 248
249 249 self.data = data
250 250 return
251 251
252 252 def getData(self):
253 253
254 254 for attr in self.data:
255 255 if self.data[attr].ndim == 1:
256 256 setattr(self.dataOut, attr, self.data[attr][self.blockIndex])
257 257 else:
258 258 setattr(self.dataOut, attr, self.data[attr][:, self.blockIndex])
259 259
260 260 self.dataOut.flagNoData = False
261 261 self.blockIndex += 1
262 262
263 263 log.log("Block No. {}/{} -> {}".format(
264 264 self.blockIndex,
265 265 self.blocksPerFile,
266 266 self.dataOut.datatime.ctime()), self.name)
267 267
268 268 return
269 269
270 270 def run(self, **kwargs):
271 271
272 272 if not(self.isConfig):
273 273 self.setup(**kwargs)
274 274 self.isConfig = True
275 275
276 276 if self.blockIndex == self.blocksPerFile:
277 277 self.setNextFile()
278 278
279 279 self.getData()
280 280
281 281 return
282 282
283 283 @MPDecorator
284 284 class HDFWriter(Operation):
285 285 """Operation to write HDF5 files.
286 286
287 287 The HDF5 file contains by default two groups Data and Metadata where
288 288 you can save any `dataOut` attribute specified by `dataList` and `metadataList`
289 289 parameters, data attributes are normaly time dependent where the metadata
290 290 are not.
291 291 It is possible to customize the structure of the HDF5 file with the
292 292 optional description parameter see the examples.
293 293
294 294 Parameters:
295 295 -----------
296 296 path : str
297 297 Path where files will be saved.
298 298 blocksPerFile : int
299 299 Number of blocks per file
300 300 metadataList : list
301 301 List of the dataOut attributes that will be saved as metadata
302 302 dataList : int
303 303 List of the dataOut attributes that will be saved as data
304 304 setType : bool
305 305 If True the name of the files corresponds to the timestamp of the data
306 306 description : dict, optional
307 307 Dictionary with the desired description of the HDF5 file
308 308
309 309 Examples
310 310 --------
311 311
312 312 desc = {
313 313 'data_output': {'winds': ['z', 'w', 'v']},
314 314 'utctime': 'timestamps',
315 315 'heightList': 'heights'
316 316 }
317 317 desc = {
318 318 'data_output': ['z', 'w', 'v'],
319 319 'utctime': 'timestamps',
320 320 'heightList': 'heights'
321 321 }
322 322 desc = {
323 323 'Data': {
324 324 'data_output': 'winds',
325 325 'utctime': 'timestamps'
326 326 },
327 327 'Metadata': {
328 328 'heightList': 'heights'
329 329 }
330 330 }
331 331
332 332 writer = proc_unit.addOperation(name='HDFWriter')
333 333 writer.addParameter(name='path', value='/path/to/file')
334 334 writer.addParameter(name='blocksPerFile', value='32')
335 335 writer.addParameter(name='metadataList', value='heightList,timeZone')
336 336 writer.addParameter(name='dataList',value='data_output,utctime')
337 337 # writer.addParameter(name='description',value=json.dumps(desc))
338 338
339 339 """
340 340
341 341 ext = ".hdf5"
342 342 optchar = "D"
343 343 filename = None
344 344 path = None
345 345 setFile = None
346 346 fp = None
347 347 firsttime = True
348 348 #Configurations
349 349 blocksPerFile = None
350 350 blockIndex = None
351 351 dataOut = None
352 352 #Data Arrays
353 353 dataList = None
354 354 metadataList = None
355 355 currentDay = None
356 356 lastTime = None
357 357 last_Azipos = None
358 358 last_Elepos = None
359 359 mode = None
360 360 #-----------------------
361 361 Typename = None
362 362
363 363
364 364
365 365 def __init__(self):
366 366
367 367 Operation.__init__(self)
368 368 return
369 369
370 370
371 371 def set_kwargs(self, **kwargs):
372 372
373 373 for key, value in kwargs.items():
374 374 setattr(self, key, value)
375 375
376 376 def set_kwargs_obj(self,obj, **kwargs):
377 377
378 378 for key, value in kwargs.items():
379 379 setattr(obj, key, value)
380 380
381 381 def generalFlag(self):
382 382 ####rint("GENERALFLAG")
383 383 if self.mode== "weather":
384 384 if self.last_Azipos == None:
385 385 tmp = self.dataOut.azimuth
386 386 ####print("ang azimuth writer",tmp)
387 387 self.last_Azipos = tmp
388 388 flag = False
389 389 return flag
390 390 ####print("ang_azimuth writer",self.dataOut.azimuth)
391 391 result = self.dataOut.azimuth - self.last_Azipos
392 392 self.last_Azipos = self.dataOut.azimuth
393 393 if result<0:
394 394 flag = True
395 395 return flag
396 396
397 397 def generalFlag_vRF(self):
398 398 ####rint("GENERALFLAG")
399 399
400 400 try:
401 401 self.dataOut.flagBlock360Done
402 402 return self.dataOut.flagBlock360Done
403 403 except:
404 404 return 0
405 405
406 406
407 407 def setup(self, path=None, blocksPerFile=10, metadataList=None, dataList=None, setType=None, description=None,type_data=None,**kwargs):
408 408 self.path = path
409 409 self.blocksPerFile = blocksPerFile
410 410 self.metadataList = metadataList
411 411 self.dataList = [s.strip() for s in dataList]
412 412 self.setType = setType
413 413 if self.mode == "weather":
414 414 self.setType = "weather"
415 415 self.set_kwargs(**kwargs)
416 416 self.set_kwargs_obj(self.dataOut,**kwargs)
417 417
418 418
419 419 self.description = description
420 420 self.type_data=type_data
421 421
422 422 if self.metadataList is None:
423 423 self.metadataList = self.dataOut.metadata_list
424 424
425 425 tableList = []
426 426 dsList = []
427 427
428 428 for i in range(len(self.dataList)):
429 429 dsDict = {}
430 430 if hasattr(self.dataOut, self.dataList[i]):
431 431 dataAux = getattr(self.dataOut, self.dataList[i])
432 432 dsDict['variable'] = self.dataList[i]
433 433 else:
434 434 log.warning('Attribute {} not found in dataOut', self.name)
435 435 continue
436 436
437 437 if dataAux is None:
438 438 continue
439 439 elif isinstance(dataAux, (int, float, numpy.integer, numpy.float)):
440 440 dsDict['nDim'] = 0
441 441 else:
442 442 dsDict['nDim'] = len(dataAux.shape)
443 443 dsDict['shape'] = dataAux.shape
444 444 dsDict['dsNumber'] = dataAux.shape[0]
445 445 dsDict['dtype'] = dataAux.dtype
446 446 dsList.append(dsDict)
447 447
448 448 self.dsList = dsList
449 449 self.currentDay = self.dataOut.datatime.date()
450 450
451 451 def timeFlag(self):
452 452 currentTime = self.dataOut.utctime
453 453 timeTuple = time.localtime(currentTime)
454 454 dataDay = timeTuple.tm_yday
455 455
456 456 if self.lastTime is None:
457 457 self.lastTime = currentTime
458 458 self.currentDay = dataDay
459 459 return False
460 460
461 461 timeDiff = currentTime - self.lastTime
462 462
463 463 #Si el dia es diferente o si la diferencia entre un dato y otro supera la hora
464 464 if dataDay != self.currentDay:
465 465 self.currentDay = dataDay
466 466 return True
467 467 elif timeDiff > 3*60*60:
468 468 self.lastTime = currentTime
469 469 return True
470 470 else:
471 471 self.lastTime = currentTime
472 472 return False
473 473
474 474 def run(self, dataOut, path, blocksPerFile=10, metadataList=None,
475 475 dataList=[], setType=None, description={},mode= None,type_data=None,Reset = False,**kwargs):
476 476
477 477 if Reset:
478 478 self.isConfig = False
479 479 self.closeFile()
480 480 self.lastTime = None
481 481 self.blockIndex = 0
482 482
483 483 self.dataOut = dataOut
484 484 self.mode = mode
485 self.var = dataList[0]
486
485 487 if not(self.isConfig):
486 488 self.setup(path=path, blocksPerFile=blocksPerFile,
487 489 metadataList=metadataList, dataList=dataList,
488 490 setType=setType, description=description,type_data=type_data,**kwargs)
489 491
490 492 self.isConfig = True
491 493 self.setNextFile()
492 494
493 495 self.putData()
494 496 return
495 497
496 498 def setNextFile(self):
497 499 ###print("HELLO WORLD--------------------------------")
498 500 ext = self.ext
499 501 path = self.path
500 502 setFile = self.setFile
501 503 type_data = self.type_data
502 504
503 505 timeTuple = time.localtime(self.dataOut.utctime)
504 506 subfolder = 'd%4.4d%3.3d' % (timeTuple.tm_year,timeTuple.tm_yday)
505 507 fullpath = os.path.join(path, subfolder)
506 508
507 509 if os.path.exists(fullpath):
508 510 filesList = os.listdir(fullpath)
509 511 filesList = [k for k in filesList if k.startswith(self.optchar)]
510 512 if len( filesList ) > 0:
511 513 filesList = sorted(filesList, key=str.lower)
512 514 filen = filesList[-1]
513 515 # el filename debera tener el siguiente formato
514 516 # 0 1234 567 89A BCDE (hex)
515 517 # x YYYY DDD SSS .ext
516 518 if isNumber(filen[8:11]):
517 519 setFile = int(filen[8:11]) #inicializo mi contador de seteo al seteo del ultimo file
518 520 else:
519 521 setFile = -1
520 522 else:
521 523 setFile = -1 #inicializo mi contador de seteo
522 524 else:
523 525 os.makedirs(fullpath)
524 526 setFile = -1 #inicializo mi contador de seteo
525 527
526 528 ###print("**************************",self.setType)
527 529 if self.setType is None:
528 530 setFile += 1
529 531 file = '%s%4.4d%3.3d%03d%s' % (self.optchar,
530 532 timeTuple.tm_year,
531 533 timeTuple.tm_yday,
532 534 setFile,
533 535 ext )
534 536 elif self.setType == "weather":
535 print("HOLA AMIGOS")
536 wr_exp = self.dataOut.wr_exp
537 if wr_exp== "PPI":
538 wr_type = 'E'
539 ang_ = numpy.mean(self.dataOut.elevation)
540 else:
541 wr_type = 'A'
542 ang_ = numpy.mean(self.dataOut.azimuth)
543
544 wr_writer = '%s%s%2.1f%s'%('-',
545 wr_type,
546 ang_,
547 '-')
548 ###print("wr_writer********************",wr_writer)
549 file = '%s%4.4d%2.2d%2.2d%s%2.2d%2.2d%2.2d%s%s%s' % (self.optchar,
550 timeTuple.tm_year,
551 timeTuple.tm_mon,
552 timeTuple.tm_mday,
553 '-',
554 timeTuple.tm_hour,
555 timeTuple.tm_min,
556 timeTuple.tm_sec,
557 wr_writer,
558 type_data,
559 ext )
560 ###print("FILENAME", file)
561 537
538 if self.var.lower() == 'Zdb'.lower():
539 wr_type = 'Z'
540 elif self.var.lower() == 'Zdb_D'.lower():
541 wr_type = 'D'
542 elif self.var.lower() == 'PhiD_P'.lower():
543 wr_type = 'P'
544 elif self.var.lower() == 'RhoHV_R'.lower():
545 wr_type = 'R'
546 elif self.var.lower() == 'velRadial_V'.lower():
547 wr_type = 'V'
548 elif self.var.lower() == 'Sigmav_W'.lower():
549 wr_type = 'S'
550 elif self.var.lower() == 'dataPP_POWER'.lower():
551 wr_type = 'Pow'
552 elif self.var.lower() == 'dataPP_DOP'.lower():
553 wr_type = 'Dop'
554
555
556 #Z_SOPHy_El10.0_20200505_14:02:15.h5
557 #Z_SOPHy_Az40.0_20200505_14:02:15.h5
558 if self.dataOut.flagMode == 1: #'AZI' #PPI
559 ang_type = 'El'
560 ang_ = round(numpy.mean(self.dataOut.data_ele),1)
561 elif self.dataOut.flagMode == 0: #'ELE' #RHI
562 ang_type = 'Az'
563 ang_ = round(numpy.mean(self.dataOut.data_azi),1)
564
565 file = '%s%s%s%2.1f%s%2.2d%2.2d%2.2d%s%2.2d%2.2d%2.2d%s' % (wr_type,
566 '_SOPHy_',
567 ang_type,
568 ang_,
569 '_',
570 timeTuple.tm_year,
571 timeTuple.tm_mon,
572 timeTuple.tm_mday,
573 '_',
574 timeTuple.tm_hour,
575 timeTuple.tm_min,
576 timeTuple.tm_sec,
577 ext )
562 578
563 579 else:
564 580 setFile = timeTuple.tm_hour*60+timeTuple.tm_min
565 581 file = '%s%4.4d%3.3d%04d%s' % (self.optchar,
566 582 timeTuple.tm_year,
567 583 timeTuple.tm_yday,
568 584 setFile,
569 585 ext )
570 586
571 587 self.filename = os.path.join( path, subfolder, file )
572 588
573 589 #Setting HDF5 File
574
590 print("filename",self.filename)
575 591 self.fp = h5py.File(self.filename, 'w')
576 592 #write metadata
577 593 self.writeMetadata(self.fp)
578 594 #Write data
579 595 self.writeData(self.fp)
580 596
581 597 def getLabel(self, name, x=None):
582 598
583 599 if x is None:
584 600 if 'Data' in self.description:
585 601 data = self.description['Data']
586 602 if 'Metadata' in self.description:
587 603 data.update(self.description['Metadata'])
588 604 else:
589 605 data = self.description
590 606 if name in data:
591 607 if isinstance(data[name], str):
592 608 return data[name]
593 609 elif isinstance(data[name], list):
594 610 return None
595 611 elif isinstance(data[name], dict):
596 612 for key, value in data[name].items():
597 613 return key
598 614 return name
599 615 else:
600 616 if 'Metadata' in self.description:
601 617 meta = self.description['Metadata']
602 618 else:
603 619 meta = self.description
604 620 if name in meta:
605 621 if isinstance(meta[name], list):
606 622 return meta[name][x]
607 623 elif isinstance(meta[name], dict):
608 624 for key, value in meta[name].items():
609 625 return value[x]
610 626 if 'cspc' in name:
611 627 return 'pair{:02d}'.format(x)
612 628 else:
613 629 return 'channel{:02d}'.format(x)
614 630
615 631 def writeMetadata(self, fp):
616 632
617 633 if self.description:
618 634 if 'Metadata' in self.description:
619 635 grp = fp.create_group('Metadata')
620 636 else:
621 637 grp = fp
622 638 else:
623 639 grp = fp.create_group('Metadata')
624 640
625 641 for i in range(len(self.metadataList)):
626 642 if not hasattr(self.dataOut, self.metadataList[i]):
627 643 log.warning('Metadata: `{}` not found'.format(self.metadataList[i]), self.name)
628 644 continue
629 645 value = getattr(self.dataOut, self.metadataList[i])
630 646 if isinstance(value, bool):
631 647 if value is True:
632 648 value = 1
633 649 else:
634 650 value = 0
635 651 grp.create_dataset(self.getLabel(self.metadataList[i]), data=value)
636 652 return
637 653
638 654 def writeData(self, fp):
639 655
640 656 if self.description:
641 657 if 'Data' in self.description:
642 658 grp = fp.create_group('Data')
643 659 else:
644 660 grp = fp
645 661 else:
646 662 grp = fp.create_group('Data')
647 663
648 664 dtsets = []
649 665 data = []
650 666
651 667 for dsInfo in self.dsList:
652 668 if dsInfo['nDim'] == 0:
653 669 ds = grp.create_dataset(
654 670 self.getLabel(dsInfo['variable']),
655 671 (self.blocksPerFile, ),
656 672 chunks=True,
657 673 dtype=numpy.float64)
658 674 dtsets.append(ds)
659 675 data.append((dsInfo['variable'], -1))
660 676 else:
661 677 label = self.getLabel(dsInfo['variable'])
662 678 if label is not None:
663 679 sgrp = grp.create_group(label)
664 680 else:
665 681 sgrp = grp
666 682 for i in range(dsInfo['dsNumber']):
667 683 ds = sgrp.create_dataset(
668 684 self.getLabel(dsInfo['variable'], i),
669 685 (self.blocksPerFile, ) + dsInfo['shape'][1:],
670 686 chunks=True,
671 687 dtype=dsInfo['dtype'])
672 688 dtsets.append(ds)
673 689 data.append((dsInfo['variable'], i))
674 690 fp.flush()
675 691
676 692 log.log('Creating file: {}'.format(fp.filename), self.name)
677 693
678 694 self.ds = dtsets
679 695 self.data = data
680 696 self.firsttime = True
681 697 self.blockIndex = 0
682 698 return
683 699
684 700 def putData(self):
701
685 702 if (self.blockIndex == self.blocksPerFile) or self.timeFlag():# or self.generalFlag_vRF():
686 703 self.closeFile()
687 704 self.setNextFile()
688 705
689 706 for i, ds in enumerate(self.ds):
690 707 attr, ch = self.data[i]
691 708 if ch == -1:
692 709 ds[self.blockIndex] = getattr(self.dataOut, attr)
693 710 else:
694 711 ds[self.blockIndex] = getattr(self.dataOut, attr)[ch]
695 712
696 713 self.fp.flush()
697 714 self.blockIndex += 1
698 715 log.log('Block No. {}/{}'.format(self.blockIndex, self.blocksPerFile), self.name)
699 716
700 717 return
701 718
702 719 def closeFile(self):
703 720
704 721 if self.blockIndex != self.blocksPerFile:
705 722 for ds in self.ds:
706 723 ds.resize(self.blockIndex, axis=0)
707 724
708 725 if self.fp:
709 726 self.fp.flush()
710 727 self.fp.close()
711 728
712 729 def close(self):
713 730
714 731 self.closeFile()
@@ -1,226 +1,236
1 1 '''
2 2 Base clases to create Processing units and operations, the MPDecorator
3 3 must be used in plotting and writing operations to allow to run as an
4 4 external process.
5 5 '''
6 6
7 7 import os
8 8 import inspect
9 9 import zmq
10 10 import time
11 11 import pickle
12 12 import traceback
13 13 from threading import Thread
14 14 from multiprocessing import Process, Queue
15 15 from schainpy.utils import log
16 16
17 17 QUEUE_SIZE = int(os.environ.get('QUEUE_MAX_SIZE', '10'))
18 18
19 19 class ProcessingUnit(object):
20 20 '''
21 21 Base class to create Signal Chain Units
22 22 '''
23 23
24 24 proc_type = 'processing'
25 25
26 26 def __init__(self):
27 27
28 28 self.dataIn = None
29 29 self.dataOut = None
30 30 self.isConfig = False
31 31 self.operations = []
32 32 self.name = 'Test'
33 33 self.inputs = []
34 34
35 35 def setInput(self, unit):
36 36
37 37 attr = 'dataIn'
38 38 for i, u in enumerate(unit):
39 39 if i==0:
40 40 self.dataIn = u.dataOut
41 41 self.inputs.append('dataIn')
42 42 else:
43 43 setattr(self, 'dataIn{}'.format(i), u.dataOut)
44 44 self.inputs.append('dataIn{}'.format(i))
45 45
46 46 def getAllowedArgs(self):
47 47 if hasattr(self, '__attrs__'):
48 48 return self.__attrs__
49 49 else:
50 50 return inspect.getargspec(self.run).args
51 51
52 52 def addOperation(self, conf, operation):
53 53 '''
54 54 '''
55 55
56 56 self.operations.append((operation, conf.type, conf.getKwargs()))
57 57
58 58 def getOperationObj(self, objId):
59 59
60 60 if objId not in list(self.operations.keys()):
61 61 return None
62 62
63 63 return self.operations[objId]
64 64
65 65 def call(self, **kwargs):
66 66 '''
67 67 '''
68 68
69 69 try:
70 70 if self.dataIn is not None and self.dataIn.flagNoData and not self.dataIn.error:
71 71 return self.dataIn.isReady()
72 72 elif self.dataIn is None or not self.dataIn.error:
73 73 self.run(**kwargs)
74 74 elif self.dataIn.error:
75 75 self.dataOut.error = self.dataIn.error
76 76 self.dataOut.flagNoData = True
77 77 except:
78 78 err = traceback.format_exc()
79 79 if 'SchainWarning' in err:
80 80 log.warning(err.split('SchainWarning:')[-1].split('\n')[0].strip(), self.name)
81 81 elif 'SchainError' in err:
82 82 log.error(err.split('SchainError:')[-1].split('\n')[0].strip(), self.name)
83 83 else:
84 84 log.error(err, self.name)
85 85 self.dataOut.error = True
86 86 ##### correcion de la declaracion Out
87 87 for op, optype, opkwargs in self.operations:
88 88 aux = self.dataOut.copy()
89 if optype == 'other' and not self.dataOut.flagNoData:
89 '''
90 print("op",op)
91 try:
92 print("runNextOp",self.dataOut.runNextOp)
93 except:
94 pass
95 '''
96 if not hasattr(self.dataOut, 'runNextOp'):
97 self.dataOut.runNextOp = False
98 if optype == 'other' and (not self.dataOut.flagNoData or self.dataOut.runNextOp):
99 #if optype == 'other' and not self.dataOut.flagNoData:
90 100 self.dataOut = op.run(self.dataOut, **opkwargs)
91 101 elif optype == 'external' and not self.dataOut.flagNoData:
92 102 #op.queue.put(self.dataOut)
93 103 op.queue.put(aux)
94 104 elif optype == 'external' and self.dataOut.error:
95 105 #op.queue.put(self.dataOut)
96 106 op.queue.put(aux)
97 107
98 108 try:
99 109 if self.dataOut.runNextUnit:
100 110 runNextUnit = self.dataOut.runNextUnit
101 111
102 112 else:
103 113 runNextUnit = self.dataOut.isReady()
104 114 except:
105 115 runNextUnit = self.dataOut.isReady()
106 116
107 117 return 'Error' if self.dataOut.error else runNextUnit
108 118
109 119 def setup(self):
110 120
111 121 raise NotImplementedError
112 122
113 123 def run(self):
114 124
115 125 raise NotImplementedError
116 126
117 127 def close(self):
118 128
119 129 return
120 130
121 131
122 132 class Operation(object):
123 133
124 134 '''
125 135 '''
126 136
127 137 proc_type = 'operation'
128 138
129 139 def __init__(self):
130 140
131 141 self.id = None
132 142 self.isConfig = False
133 143
134 144 if not hasattr(self, 'name'):
135 145 self.name = self.__class__.__name__
136 146
137 147 def getAllowedArgs(self):
138 148 if hasattr(self, '__attrs__'):
139 149 return self.__attrs__
140 150 else:
141 151 return inspect.getargspec(self.run).args
142 152
143 153 def setup(self):
144 154
145 155 self.isConfig = True
146 156
147 157 raise NotImplementedError
148 158
149 159 def run(self, dataIn, **kwargs):
150 160 """
151 161 Realiza las operaciones necesarias sobre la dataIn.data y actualiza los
152 162 atributos del objeto dataIn.
153 163
154 164 Input:
155 165
156 166 dataIn : objeto del tipo JROData
157 167
158 168 Return:
159 169
160 170 None
161 171
162 172 Affected:
163 173 __buffer : buffer de recepcion de datos.
164 174
165 175 """
166 176 if not self.isConfig:
167 177 self.setup(**kwargs)
168 178
169 179 raise NotImplementedError
170 180
171 181 def close(self):
172 182
173 183 return
174 184
175 185
176 186 def MPDecorator(BaseClass):
177 187 """
178 188 Multiprocessing class decorator
179 189
180 190 This function add multiprocessing features to a BaseClass.
181 191 """
182 192
183 193 class MPClass(BaseClass, Process):
184 194
185 195 def __init__(self, *args, **kwargs):
186 196 super(MPClass, self).__init__()
187 197 Process.__init__(self)
188 198
189 199 self.args = args
190 200 self.kwargs = kwargs
191 201 self.t = time.time()
192 202 self.op_type = 'external'
193 203 self.name = BaseClass.__name__
194 204 self.__doc__ = BaseClass.__doc__
195 205
196 206 if 'plot' in self.name.lower() and not self.name.endswith('_'):
197 207 self.name = '{}{}'.format(self.CODE.upper(), 'Plot')
198 208
199 209 self.start_time = time.time()
200 210 self.err_queue = args[3]
201 211 self.queue = Queue(maxsize=QUEUE_SIZE)
202 212 self.myrun = BaseClass.run
203 213
204 214 def run(self):
205 215
206 216 while True:
207 217
208 218 dataOut = self.queue.get()
209 219
210 220 if not dataOut.error:
211 221 try:
212 222 BaseClass.run(self, dataOut, **self.kwargs)
213 223 except:
214 224 err = traceback.format_exc()
215 225 log.error(err, self.name)
216 226 else:
217 227 break
218 228
219 229 self.close()
220 230
221 231 def close(self):
222 232
223 233 BaseClass.close(self)
224 234 log.success('Done...(Time:{:4.2f} secs)'.format(time.time()-self.start_time), self.name)
225 235
226 236 return MPClass
@@ -1,5106 +1,5107
1 1
2 2 import os
3 3 import time
4 4 import math
5 5
6 6 import re
7 7 import datetime
8 8 import copy
9 9 import sys
10 10 import importlib
11 11 import itertools
12 12
13 13 from multiprocessing import Pool, TimeoutError
14 14 from multiprocessing.pool import ThreadPool
15 15 import numpy
16 16 import glob
17 17 import scipy
18 18 import h5py
19 19 from scipy.optimize import fmin_l_bfgs_b #optimize with bounds on state papameters
20 20 from .jroproc_base import ProcessingUnit, Operation, MPDecorator
21 21 from schainpy.model.data.jrodata import Parameters, hildebrand_sekhon
22 22 from scipy import asarray as ar,exp
23 23 from scipy.optimize import curve_fit
24 24 from schainpy.utils import log
25 25 import schainpy.admin
26 26 import warnings
27 27 from scipy import optimize, interpolate, signal, stats, ndimage
28 28 from scipy.optimize.optimize import OptimizeWarning
29 29 warnings.filterwarnings('ignore')
30 30
31 31
32 32 SPEED_OF_LIGHT = 299792458
33 33
34 34 '''solving pickling issue'''
35 35
36 36 def _pickle_method(method):
37 37 func_name = method.__func__.__name__
38 38 obj = method.__self__
39 39 cls = method.__self__.__class__
40 40 return _unpickle_method, (func_name, obj, cls)
41 41
42 42 def _unpickle_method(func_name, obj, cls):
43 43 for cls in cls.mro():
44 44 try:
45 45 func = cls.__dict__[func_name]
46 46 except KeyError:
47 47 pass
48 48 else:
49 49 break
50 50 return func.__get__(obj, cls)
51 51
52 52 def isNumber(str):
53 53 try:
54 54 float(str)
55 55 return True
56 56 except:
57 57 return False
58 58
59 59 class ParametersProc(ProcessingUnit):
60 60
61 61 METHODS = {}
62 62 nSeconds = None
63 63
64 64 def __init__(self):
65 65 ProcessingUnit.__init__(self)
66 66
67 67 # self.objectDict = {}
68 68 self.buffer = None
69 69 self.firstdatatime = None
70 70 self.profIndex = 0
71 71 self.dataOut = Parameters()
72 72 self.setupReq = False #Agregar a todas las unidades de proc
73 73
74 74 def __updateObjFromInput(self):
75 75
76 76 self.dataOut.inputUnit = self.dataIn.type
77 77
78 78 self.dataOut.timeZone = self.dataIn.timeZone
79 79 self.dataOut.dstFlag = self.dataIn.dstFlag
80 80 self.dataOut.errorCount = self.dataIn.errorCount
81 81 self.dataOut.useLocalTime = self.dataIn.useLocalTime
82 82
83 83 self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy()
84 84 self.dataOut.systemHeaderObj = self.dataIn.systemHeaderObj.copy()
85 85 self.dataOut.channelList = self.dataIn.channelList
86 86 self.dataOut.heightList = self.dataIn.heightList
87 87 self.dataOut.dtype = numpy.dtype([('real','<f4'),('imag','<f4')])
88 88 # self.dataOut.nHeights = self.dataIn.nHeights
89 89 # self.dataOut.nChannels = self.dataIn.nChannels
90 90 # self.dataOut.nBaud = self.dataIn.nBaud
91 91 # self.dataOut.nCode = self.dataIn.nCode
92 92 # self.dataOut.code = self.dataIn.code
93 93 # self.dataOut.nProfiles = self.dataOut.nFFTPoints
94 94 self.dataOut.flagDiscontinuousBlock = self.dataIn.flagDiscontinuousBlock
95 95 # self.dataOut.utctime = self.firstdatatime
96 96 self.dataOut.utctime = self.dataIn.utctime
97 97 self.dataOut.flagDecodeData = self.dataIn.flagDecodeData #asumo q la data esta decodificada
98 98 self.dataOut.flagDeflipData = self.dataIn.flagDeflipData #asumo q la data esta sin flip
99 99 self.dataOut.nCohInt = self.dataIn.nCohInt
100 100 # self.dataOut.nIncohInt = 1
101 101 # self.dataOut.ippSeconds = self.dataIn.ippSeconds
102 102 # self.dataOut.windowOfFilter = self.dataIn.windowOfFilter
103 103 self.dataOut.timeInterval1 = self.dataIn.timeInterval
104 104 self.dataOut.heightList = self.dataIn.heightList
105 105 self.dataOut.frequency = self.dataIn.frequency
106 106 # self.dataOut.noise = self.dataIn.noise
107 107 self.dataOut.runNextUnit = self.dataIn.runNextUnit
108 108
109 109 def run(self, runNextUnit = 0):
110 110
111 111 self.dataIn.runNextUnit = runNextUnit
112 112 #print("HOLA MUNDO SOY YO")
113 113 #---------------------- Voltage Data ---------------------------
114 114
115 115 if self.dataIn.type == "Voltage":
116 116
117 117 self.__updateObjFromInput()
118 118 self.dataOut.data_pre = self.dataIn.data.copy()
119 119 self.dataOut.flagNoData = False
120 120 self.dataOut.utctimeInit = self.dataIn.utctime
121 121 self.dataOut.paramInterval = self.dataIn.nProfiles*self.dataIn.nCohInt*self.dataIn.ippSeconds
122 122
123 123 if hasattr(self.dataIn, 'flagDataAsBlock'):
124 124 self.dataOut.flagDataAsBlock = self.dataIn.flagDataAsBlock
125 125
126 126 if hasattr(self.dataIn, 'profileIndex'):
127 127 self.dataOut.profileIndex = self.dataIn.profileIndex
128 128
129 129 if hasattr(self.dataIn, 'dataPP_POW'):
130 130 self.dataOut.dataPP_POW = self.dataIn.dataPP_POW
131 131
132 132 if hasattr(self.dataIn, 'dataPP_POWER'):
133 133 self.dataOut.dataPP_POWER = self.dataIn.dataPP_POWER
134 134
135 135 if hasattr(self.dataIn, 'dataPP_DOP'):
136 136 self.dataOut.dataPP_DOP = self.dataIn.dataPP_DOP
137 137
138 138 if hasattr(self.dataIn, 'dataPP_SNR'):
139 139 self.dataOut.dataPP_SNR = self.dataIn.dataPP_SNR
140 140
141 141 if hasattr(self.dataIn, 'dataPP_WIDTH'):
142 142 self.dataOut.dataPP_WIDTH = self.dataIn.dataPP_WIDTH
143 143
144 144 if hasattr(self.dataIn, 'dataPP_CCF'):
145 145 self.dataOut.dataPP_CCF = self.dataIn.dataPP_CCF
146 146
147 147 return
148 148
149 149 #---------------------- Spectra Data ---------------------------
150 150
151 151 if self.dataIn.type == "Spectra":
152 152 #print("que paso en spectra")
153 153 self.dataOut.data_pre = [self.dataIn.data_spc, self.dataIn.data_cspc]
154 154 self.dataOut.data_spc = self.dataIn.data_spc
155 155 self.dataOut.data_cspc = self.dataIn.data_cspc
156 156 self.dataOut.nProfiles = self.dataIn.nProfiles
157 157 self.dataOut.nIncohInt = self.dataIn.nIncohInt
158 158 self.dataOut.nFFTPoints = self.dataIn.nFFTPoints
159 159 self.dataOut.ippFactor = self.dataIn.ippFactor
160 160 self.dataOut.abscissaList = self.dataIn.getVelRange(1)
161 161 self.dataOut.spc_noise = self.dataIn.getNoise()
162 162 self.dataOut.spc_range = (self.dataIn.getFreqRange(1) , self.dataIn.getAcfRange(1) , self.dataIn.getVelRange(1))
163 163 # self.dataOut.normFactor = self.dataIn.normFactor
164 164 self.dataOut.pairsList = self.dataIn.pairsList
165 165 self.dataOut.groupList = self.dataIn.pairsList
166 166 self.dataOut.flagNoData = False
167 167
168 168 if hasattr(self.dataIn, 'flagDataAsBlock'):
169 169 self.dataOut.flagDataAsBlock = self.dataIn.flagDataAsBlock
170 170
171 171 if hasattr(self.dataIn, 'ChanDist'): #Distances of receiver channels
172 172 self.dataOut.ChanDist = self.dataIn.ChanDist
173 173 else: self.dataOut.ChanDist = None
174 174
175 175 #if hasattr(self.dataIn, 'VelRange'): #Velocities range
176 176 # self.dataOut.VelRange = self.dataIn.VelRange
177 177 #else: self.dataOut.VelRange = None
178 178
179 179 if hasattr(self.dataIn, 'RadarConst'): #Radar Constant
180 180 self.dataOut.RadarConst = self.dataIn.RadarConst
181 181
182 182 if hasattr(self.dataIn, 'NPW'): #NPW
183 183 self.dataOut.NPW = self.dataIn.NPW
184 184
185 185 if hasattr(self.dataIn, 'COFA'): #COFA
186 186 self.dataOut.COFA = self.dataIn.COFA
187 187
188 188
189 189
190 190 #---------------------- Correlation Data ---------------------------
191 191
192 192 if self.dataIn.type == "Correlation":
193 193 acf_ind, ccf_ind, acf_pairs, ccf_pairs, data_acf, data_ccf = self.dataIn.splitFunctions()
194 194
195 195 self.dataOut.data_pre = (self.dataIn.data_cf[acf_ind,:], self.dataIn.data_cf[ccf_ind,:,:])
196 196 self.dataOut.normFactor = (self.dataIn.normFactor[acf_ind,:], self.dataIn.normFactor[ccf_ind,:])
197 197 self.dataOut.groupList = (acf_pairs, ccf_pairs)
198 198
199 199 self.dataOut.abscissaList = self.dataIn.lagRange
200 200 self.dataOut.noise = self.dataIn.noise
201 201 self.dataOut.data_snr = self.dataIn.SNR
202 202 self.dataOut.flagNoData = False
203 203 self.dataOut.nAvg = self.dataIn.nAvg
204 204
205 205 #---------------------- Parameters Data ---------------------------
206 206
207 207 if self.dataIn.type == "Parameters":
208 208 self.dataOut.copy(self.dataIn)
209 209 self.dataOut.flagNoData = False
210 210 #print("yo si entre")
211 211
212 212 return True
213 213
214 214 self.__updateObjFromInput()
215 215 #print("yo si entre2")
216 216
217 217 self.dataOut.utctimeInit = self.dataIn.utctime
218 218 self.dataOut.paramInterval = self.dataIn.timeInterval
219 219 #print("soy spectra ",self.dataOut.utctimeInit)
220 220 return
221 221
222 222
223 223 def target(tups):
224 224
225 225 obj, args = tups
226 226
227 227 return obj.FitGau(args)
228 228
229 229 class RemoveWideGC(Operation):
230 230 ''' This class remove the wide clutter and replace it with a simple interpolation points
231 231 This mainly applies to CLAIRE radar
232 232
233 233 ClutterWidth : Width to look for the clutter peak
234 234
235 235 Input:
236 236
237 237 self.dataOut.data_pre : SPC and CSPC
238 238 self.dataOut.spc_range : To select wind and rainfall velocities
239 239
240 240 Affected:
241 241
242 242 self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
243 243
244 244 Written by D. ScipiΓ³n 25.02.2021
245 245 '''
246 246 def __init__(self):
247 247 Operation.__init__(self)
248 248 self.i = 0
249 249 self.ich = 0
250 250 self.ir = 0
251 251
252 252 def run(self, dataOut, ClutterWidth=2.5):
253 253 # print ('Entering RemoveWideGC ... ')
254 254
255 255 self.spc = dataOut.data_pre[0].copy()
256 256 self.spc_out = dataOut.data_pre[0].copy()
257 257 self.Num_Chn = self.spc.shape[0]
258 258 self.Num_Hei = self.spc.shape[2]
259 259 VelRange = dataOut.spc_range[2][:-1]
260 260 dv = VelRange[1]-VelRange[0]
261 261
262 262 # Find the velocities that corresponds to zero
263 263 gc_values = numpy.squeeze(numpy.where(numpy.abs(VelRange) <= ClutterWidth))
264 264
265 265 # Removing novalid data from the spectra
266 266 for ich in range(self.Num_Chn) :
267 267 for ir in range(self.Num_Hei) :
268 268 # Estimate the noise at each range
269 269 HSn = hildebrand_sekhon(self.spc[ich,:,ir],dataOut.nIncohInt)
270 270
271 271 # Removing the noise floor at each range
272 272 novalid = numpy.where(self.spc[ich,:,ir] < HSn)
273 273 self.spc[ich,novalid,ir] = HSn
274 274
275 275 junk = numpy.append(numpy.insert(numpy.squeeze(self.spc[ich,gc_values,ir]),0,HSn),HSn)
276 276 j1index = numpy.squeeze(numpy.where(numpy.diff(junk)>0))
277 277 j2index = numpy.squeeze(numpy.where(numpy.diff(junk)<0))
278 278 if ((numpy.size(j1index)<=1) | (numpy.size(j2index)<=1)) :
279 279 continue
280 280 junk3 = numpy.squeeze(numpy.diff(j1index))
281 281 junk4 = numpy.squeeze(numpy.diff(j2index))
282 282
283 283 valleyindex = j2index[numpy.where(junk4>1)]
284 284 peakindex = j1index[numpy.where(junk3>1)]
285 285
286 286 isvalid = numpy.squeeze(numpy.where(numpy.abs(VelRange[gc_values[peakindex]]) <= 2.5*dv))
287 287 if numpy.size(isvalid) == 0 :
288 288 continue
289 289 if numpy.size(isvalid) >1 :
290 290 vindex = numpy.argmax(self.spc[ich,gc_values[peakindex[isvalid]],ir])
291 291 isvalid = isvalid[vindex]
292 292
293 293 # clutter peak
294 294 gcpeak = peakindex[isvalid]
295 295 vl = numpy.where(valleyindex < gcpeak)
296 296 if numpy.size(vl) == 0:
297 297 continue
298 298 gcvl = valleyindex[vl[0][-1]]
299 299 vr = numpy.where(valleyindex > gcpeak)
300 300 if numpy.size(vr) == 0:
301 301 continue
302 302 gcvr = valleyindex[vr[0][0]]
303 303
304 304 # Removing the clutter
305 305 interpindex = numpy.array([gc_values[gcvl], gc_values[gcvr]])
306 306 gcindex = gc_values[gcvl+1:gcvr-1]
307 307 self.spc_out[ich,gcindex,ir] = numpy.interp(VelRange[gcindex],VelRange[interpindex],self.spc[ich,interpindex,ir])
308 308
309 309 dataOut.data_pre[0] = self.spc_out
310 310 #print ('Leaving RemoveWideGC ... ')
311 311 return dataOut
312 312
313 313 class SpectralFilters(Operation):
314 314 ''' This class allows to replace the novalid values with noise for each channel
315 315 This applies to CLAIRE RADAR
316 316
317 317 PositiveLimit : RightLimit of novalid data
318 318 NegativeLimit : LeftLimit of novalid data
319 319
320 320 Input:
321 321
322 322 self.dataOut.data_pre : SPC and CSPC
323 323 self.dataOut.spc_range : To select wind and rainfall velocities
324 324
325 325 Affected:
326 326
327 327 self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
328 328
329 329 Written by D. ScipiΓ³n 29.01.2021
330 330 '''
331 331 def __init__(self):
332 332 Operation.__init__(self)
333 333 self.i = 0
334 334
335 335 def run(self, dataOut, ):
336 336
337 337 self.spc = dataOut.data_pre[0].copy()
338 338 self.Num_Chn = self.spc.shape[0]
339 339 VelRange = dataOut.spc_range[2]
340 340
341 341 # novalid corresponds to data within the Negative and PositiveLimit
342 342
343 343
344 344 # Removing novalid data from the spectra
345 345 for i in range(self.Num_Chn):
346 346 self.spc[i,novalid,:] = dataOut.noise[i]
347 347 dataOut.data_pre[0] = self.spc
348 348 return dataOut
349 349
350 350 class GaussianFit(Operation):
351 351
352 352 '''
353 353 Function that fit of one and two generalized gaussians (gg) based
354 354 on the PSD shape across an "power band" identified from a cumsum of
355 355 the measured spectrum - noise.
356 356
357 357 Input:
358 358 self.dataOut.data_pre : SelfSpectra
359 359
360 360 Output:
361 361 self.dataOut.SPCparam : SPC_ch1, SPC_ch2
362 362
363 363 '''
364 364 def __init__(self):
365 365 Operation.__init__(self)
366 366 self.i=0
367 367
368 368
369 369 # def run(self, dataOut, num_intg=7, pnoise=1., SNRlimit=-9): #num_intg: Incoherent integrations, pnoise: Noise, vel_arr: range of velocities, similar to the ftt points
370 370 def run(self, dataOut, SNRdBlimit=-9, method='generalized'):
371 371 """This routine will find a couple of generalized Gaussians to a power spectrum
372 372 methods: generalized, squared
373 373 input: spc
374 374 output:
375 375 noise, amplitude0,shift0,width0,p0,Amplitude1,shift1,width1,p1
376 376 """
377 377 print ('Entering ',method,' double Gaussian fit')
378 378 self.spc = dataOut.data_pre[0].copy()
379 379 self.Num_Hei = self.spc.shape[2]
380 380 self.Num_Bin = self.spc.shape[1]
381 381 self.Num_Chn = self.spc.shape[0]
382 382
383 383 start_time = time.time()
384 384
385 385 pool = Pool(processes=self.Num_Chn)
386 386 args = [(dataOut.spc_range[2], ich, dataOut.spc_noise[ich], dataOut.nIncohInt, SNRdBlimit) for ich in range(self.Num_Chn)]
387 387 objs = [self for __ in range(self.Num_Chn)]
388 388 attrs = list(zip(objs, args))
389 389 DGauFitParam = pool.map(target, attrs)
390 390 # Parameters:
391 391 # 0. Noise, 1. Amplitude, 2. Shift, 3. Width 4. Power
392 392 dataOut.DGauFitParams = numpy.asarray(DGauFitParam)
393 393
394 394 # Double Gaussian Curves
395 395 gau0 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
396 396 gau0[:] = numpy.NaN
397 397 gau1 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
398 398 gau1[:] = numpy.NaN
399 399 x_mtr = numpy.transpose(numpy.tile(dataOut.getVelRange(1)[:-1], (self.Num_Hei,1)))
400 400 for iCh in range(self.Num_Chn):
401 401 N0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,0]] * self.Num_Bin))
402 402 N1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,1]] * self.Num_Bin))
403 403 A0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,0]] * self.Num_Bin))
404 404 A1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,1]] * self.Num_Bin))
405 405 v0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,0]] * self.Num_Bin))
406 406 v1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,1]] * self.Num_Bin))
407 407 s0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,0]] * self.Num_Bin))
408 408 s1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,1]] * self.Num_Bin))
409 409 if method == 'genealized':
410 410 p0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,0]] * self.Num_Bin))
411 411 p1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,1]] * self.Num_Bin))
412 412 elif method == 'squared':
413 413 p0 = 2.
414 414 p1 = 2.
415 415 gau0[iCh] = A0*numpy.exp(-0.5*numpy.abs((x_mtr-v0)/s0)**p0)+N0
416 416 gau1[iCh] = A1*numpy.exp(-0.5*numpy.abs((x_mtr-v1)/s1)**p1)+N1
417 417 dataOut.GaussFit0 = gau0
418 418 dataOut.GaussFit1 = gau1
419 419
420 420 print('Leaving ',method ,' double Gaussian fit')
421 421 return dataOut
422 422
423 423 def FitGau(self, X):
424 424 # print('Entering FitGau')
425 425 # Assigning the variables
426 426 Vrange, ch, wnoise, num_intg, SNRlimit = X
427 427 # Noise Limits
428 428 noisebl = wnoise * 0.9
429 429 noisebh = wnoise * 1.1
430 430 # Radar Velocity
431 431 Va = max(Vrange)
432 432 deltav = Vrange[1] - Vrange[0]
433 433 x = numpy.arange(self.Num_Bin)
434 434
435 435 # print ('stop 0')
436 436
437 437 # 5 parameters, 2 Gaussians
438 438 DGauFitParam = numpy.zeros([5, self.Num_Hei,2])
439 439 DGauFitParam[:] = numpy.NaN
440 440
441 441 # SPCparam = []
442 442 # SPC_ch1 = numpy.zeros([self.Num_Bin,self.Num_Hei])
443 443 # SPC_ch2 = numpy.zeros([self.Num_Bin,self.Num_Hei])
444 444 # SPC_ch1[:] = 0 #numpy.NaN
445 445 # SPC_ch2[:] = 0 #numpy.NaN
446 446 # print ('stop 1')
447 447 for ht in range(self.Num_Hei):
448 448 # print (ht)
449 449 # print ('stop 2')
450 450 # Spectra at each range
451 451 spc = numpy.asarray(self.spc)[ch,:,ht]
452 452 snr = ( spc.mean() - wnoise ) / wnoise
453 453 snrdB = 10.*numpy.log10(snr)
454 454
455 455 #print ('stop 3')
456 456 if snrdB < SNRlimit :
457 457 # snr = numpy.NaN
458 458 # SPC_ch1[:,ht] = 0#numpy.NaN
459 459 # SPC_ch1[:,ht] = 0#numpy.NaN
460 460 # SPCparam = (SPC_ch1,SPC_ch2)
461 461 # print ('SNR less than SNRth')
462 462 continue
463 463 # wnoise = hildebrand_sekhon(spc,num_intg)
464 464 # print ('stop 2.01')
465 465 #############################################
466 466 # normalizing spc and noise
467 467 # This part differs from gg1
468 468 # spc_norm_max = max(spc) #commented by D. ScipiΓ³n 19.03.2021
469 469 #spc = spc / spc_norm_max
470 470 # pnoise = pnoise #/ spc_norm_max #commented by D. ScipiΓ³n 19.03.2021
471 471 #############################################
472 472
473 473 # print ('stop 2.1')
474 474 fatspectra=1.0
475 475 # noise per channel.... we might want to use the noise at each range
476 476
477 477 # wnoise = noise_ #/ spc_norm_max #commented by D. ScipiΓ³n 19.03.2021
478 478 #wnoise,stdv,i_max,index =enoise(spc,num_intg) #noise estimate using Hildebrand Sekhon, only wnoise is used
479 479 #if wnoise>1.1*pnoise: # to be tested later
480 480 # wnoise=pnoise
481 481 # noisebl = wnoise*0.9
482 482 # noisebh = wnoise*1.1
483 483 spc = spc - wnoise # signal
484 484
485 485 # print ('stop 2.2')
486 486 minx = numpy.argmin(spc)
487 487 #spcs=spc.copy()
488 488 spcs = numpy.roll(spc,-minx)
489 489 cum = numpy.cumsum(spcs)
490 490 # tot_noise = wnoise * self.Num_Bin #64;
491 491
492 492 # print ('stop 2.3')
493 493 # snr = sum(spcs) / tot_noise
494 494 # snrdB = 10.*numpy.log10(snr)
495 495 #print ('stop 3')
496 496 # if snrdB < SNRlimit :
497 497 # snr = numpy.NaN
498 498 # SPC_ch1[:,ht] = 0#numpy.NaN
499 499 # SPC_ch1[:,ht] = 0#numpy.NaN
500 500 # SPCparam = (SPC_ch1,SPC_ch2)
501 501 # print ('SNR less than SNRth')
502 502 # continue
503 503
504 504
505 505 #if snrdB<-18 or numpy.isnan(snrdB) or num_intg<4:
506 506 # return [None,]*4,[None,]*4,None,snrdB,None,None,[None,]*5,[None,]*9,None
507 507 # print ('stop 4')
508 508 cummax = max(cum)
509 509 epsi = 0.08 * fatspectra # cumsum to narrow down the energy region
510 510 cumlo = cummax * epsi
511 511 cumhi = cummax * (1-epsi)
512 512 powerindex = numpy.array(numpy.where(numpy.logical_and(cum>cumlo, cum<cumhi))[0])
513 513
514 514 # print ('stop 5')
515 515 if len(powerindex) < 1:# case for powerindex 0
516 516 # print ('powerindex < 1')
517 517 continue
518 518 powerlo = powerindex[0]
519 519 powerhi = powerindex[-1]
520 520 powerwidth = powerhi-powerlo
521 521 if powerwidth <= 1:
522 522 # print('powerwidth <= 1')
523 523 continue
524 524
525 525 # print ('stop 6')
526 526 firstpeak = powerlo + powerwidth/10.# first gaussian energy location
527 527 secondpeak = powerhi - powerwidth/10. #second gaussian energy location
528 528 midpeak = (firstpeak + secondpeak)/2.
529 529 firstamp = spcs[int(firstpeak)]
530 530 secondamp = spcs[int(secondpeak)]
531 531 midamp = spcs[int(midpeak)]
532 532
533 533 y_data = spc + wnoise
534 534
535 535 ''' single Gaussian '''
536 536 shift0 = numpy.mod(midpeak+minx, self.Num_Bin )
537 537 width0 = powerwidth/4.#Initialization entire power of spectrum divided by 4
538 538 power0 = 2.
539 539 amplitude0 = midamp
540 540 state0 = [shift0,width0,amplitude0,power0,wnoise]
541 541 bnds = ((0,self.Num_Bin-1),(1,powerwidth),(0,None),(0.5,3.),(noisebl,noisebh))
542 542 lsq1 = fmin_l_bfgs_b(self.misfit1, state0, args=(y_data,x,num_intg), bounds=bnds, approx_grad=True)
543 543 # print ('stop 7.1')
544 544 # print (bnds)
545 545
546 546 chiSq1=lsq1[1]
547 547
548 548 # print ('stop 8')
549 549 if fatspectra<1.0 and powerwidth<4:
550 550 choice=0
551 551 Amplitude0=lsq1[0][2]
552 552 shift0=lsq1[0][0]
553 553 width0=lsq1[0][1]
554 554 p0=lsq1[0][3]
555 555 Amplitude1=0.
556 556 shift1=0.
557 557 width1=0.
558 558 p1=0.
559 559 noise=lsq1[0][4]
560 560 #return (numpy.array([shift0,width0,Amplitude0,p0]),
561 561 # numpy.array([shift1,width1,Amplitude1,p1]),noise,snrdB,chiSq1,6.,sigmas1,[None,]*9,choice)
562 562
563 563 # print ('stop 9')
564 564 ''' two Gaussians '''
565 565 #shift0=numpy.mod(firstpeak+minx,64); shift1=numpy.mod(secondpeak+minx,64)
566 566 shift0 = numpy.mod(firstpeak+minx, self.Num_Bin )
567 567 shift1 = numpy.mod(secondpeak+minx, self.Num_Bin )
568 568 width0 = powerwidth/6.
569 569 width1 = width0
570 570 power0 = 2.
571 571 power1 = power0
572 572 amplitude0 = firstamp
573 573 amplitude1 = secondamp
574 574 state0 = [shift0,width0,amplitude0,power0,shift1,width1,amplitude1,power1,wnoise]
575 575 #bnds=((0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
576 576 bnds=((0,self.Num_Bin-1),(1,powerwidth/2.),(0,None),(0.5,3.),(0,self.Num_Bin-1),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
577 577 #bnds=(( 0,(self.Num_Bin-1) ),(1,powerwidth/2.),(0,None),(0.5,3.),( 0,(self.Num_Bin-1)),(1,powerwidth/2.),(0,None),(0.5,3.),(0.1,0.5))
578 578
579 579 # print ('stop 10')
580 580 lsq2 = fmin_l_bfgs_b( self.misfit2 , state0 , args=(y_data,x,num_intg) , bounds=bnds , approx_grad=True )
581 581
582 582 # print ('stop 11')
583 583 chiSq2 = lsq2[1]
584 584
585 585 # print ('stop 12')
586 586
587 587 oneG = (chiSq1<5 and chiSq1/chiSq2<2.0) and (abs(lsq2[0][0]-lsq2[0][4])<(lsq2[0][1]+lsq2[0][5])/3. or abs(lsq2[0][0]-lsq2[0][4])<10)
588 588
589 589 # print ('stop 13')
590 590 if snrdB>-12: # when SNR is strong pick the peak with least shift (LOS velocity) error
591 591 if oneG:
592 592 choice = 0
593 593 else:
594 594 w1 = lsq2[0][1]; w2 = lsq2[0][5]
595 595 a1 = lsq2[0][2]; a2 = lsq2[0][6]
596 596 p1 = lsq2[0][3]; p2 = lsq2[0][7]
597 597 s1 = (2**(1+1./p1))*scipy.special.gamma(1./p1)/p1
598 598 s2 = (2**(1+1./p2))*scipy.special.gamma(1./p2)/p2
599 599 gp1 = a1*w1*s1; gp2 = a2*w2*s2 # power content of each ggaussian with proper p scaling
600 600
601 601 if gp1>gp2:
602 602 if a1>0.7*a2:
603 603 choice = 1
604 604 else:
605 605 choice = 2
606 606 elif gp2>gp1:
607 607 if a2>0.7*a1:
608 608 choice = 2
609 609 else:
610 610 choice = 1
611 611 else:
612 612 choice = numpy.argmax([a1,a2])+1
613 613 #else:
614 614 #choice=argmin([std2a,std2b])+1
615 615
616 616 else: # with low SNR go to the most energetic peak
617 617 choice = numpy.argmax([lsq1[0][2]*lsq1[0][1],lsq2[0][2]*lsq2[0][1],lsq2[0][6]*lsq2[0][5]])
618 618
619 619 # print ('stop 14')
620 620 shift0 = lsq2[0][0]
621 621 vel0 = Vrange[0] + shift0 * deltav
622 622 shift1 = lsq2[0][4]
623 623 # vel1=Vrange[0] + shift1 * deltav
624 624
625 625 # max_vel = 1.0
626 626 # Va = max(Vrange)
627 627 # deltav = Vrange[1]-Vrange[0]
628 628 # print ('stop 15')
629 629 #first peak will be 0, second peak will be 1
630 630 # if vel0 > -1.0 and vel0 < max_vel : #first peak is in the correct range # Commented by D.ScipiΓ³n 19.03.2021
631 631 if vel0 > -Va and vel0 < Va : #first peak is in the correct range
632 632 shift0 = lsq2[0][0]
633 633 width0 = lsq2[0][1]
634 634 Amplitude0 = lsq2[0][2]
635 635 p0 = lsq2[0][3]
636 636
637 637 shift1 = lsq2[0][4]
638 638 width1 = lsq2[0][5]
639 639 Amplitude1 = lsq2[0][6]
640 640 p1 = lsq2[0][7]
641 641 noise = lsq2[0][8]
642 642 else:
643 643 shift1 = lsq2[0][0]
644 644 width1 = lsq2[0][1]
645 645 Amplitude1 = lsq2[0][2]
646 646 p1 = lsq2[0][3]
647 647
648 648 shift0 = lsq2[0][4]
649 649 width0 = lsq2[0][5]
650 650 Amplitude0 = lsq2[0][6]
651 651 p0 = lsq2[0][7]
652 652 noise = lsq2[0][8]
653 653
654 654 if Amplitude0<0.05: # in case the peak is noise
655 655 shift0,width0,Amplitude0,p0 = 4*[numpy.NaN]
656 656 if Amplitude1<0.05:
657 657 shift1,width1,Amplitude1,p1 = 4*[numpy.NaN]
658 658
659 659 # print ('stop 16 ')
660 660 # SPC_ch1[:,ht] = noise + Amplitude0*numpy.exp(-0.5*(abs(x-shift0)/width0)**p0)
661 661 # SPC_ch2[:,ht] = noise + Amplitude1*numpy.exp(-0.5*(abs(x-shift1)/width1)**p1)
662 662 # SPCparam = (SPC_ch1,SPC_ch2)
663 663
664 664 DGauFitParam[0,ht,0] = noise
665 665 DGauFitParam[0,ht,1] = noise
666 666 DGauFitParam[1,ht,0] = Amplitude0
667 667 DGauFitParam[1,ht,1] = Amplitude1
668 668 DGauFitParam[2,ht,0] = Vrange[0] + shift0 * deltav
669 669 DGauFitParam[2,ht,1] = Vrange[0] + shift1 * deltav
670 670 DGauFitParam[3,ht,0] = width0 * deltav
671 671 DGauFitParam[3,ht,1] = width1 * deltav
672 672 DGauFitParam[4,ht,0] = p0
673 673 DGauFitParam[4,ht,1] = p1
674 674
675 675 # print (DGauFitParam.shape)
676 676 # print ('Leaving FitGau')
677 677 return DGauFitParam
678 678 # return SPCparam
679 679 # return GauSPC
680 680
681 681 def y_model1(self,x,state):
682 682 shift0, width0, amplitude0, power0, noise = state
683 683 model0 = amplitude0*numpy.exp(-0.5*abs((x - shift0)/width0)**power0)
684 684 model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0)
685 685 model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0)
686 686 return model0 + model0u + model0d + noise
687 687
688 688 def y_model2(self,x,state): #Equation for two generalized Gaussians with Nyquist
689 689 shift0, width0, amplitude0, power0, shift1, width1, amplitude1, power1, noise = state
690 690 model0 = amplitude0*numpy.exp(-0.5*abs((x-shift0)/width0)**power0)
691 691 model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0)
692 692 model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0)
693 693
694 694 model1 = amplitude1*numpy.exp(-0.5*abs((x - shift1)/width1)**power1)
695 695 model1u = amplitude1*numpy.exp(-0.5*abs((x - shift1 - self.Num_Bin)/width1)**power1)
696 696 model1d = amplitude1*numpy.exp(-0.5*abs((x - shift1 + self.Num_Bin)/width1)**power1)
697 697 return model0 + model0u + model0d + model1 + model1u + model1d + noise
698 698
699 699 def misfit1(self,state,y_data,x,num_intg): # This function compares how close real data is with the model data, the close it is, the better it is.
700 700
701 701 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model1(x,state)))**2)#/(64-5.) # /(64-5.) can be commented
702 702
703 703 def misfit2(self,state,y_data,x,num_intg):
704 704 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model2(x,state)))**2)#/(64-9.)
705 705
706 706
707 707
708 708 class PrecipitationProc(Operation):
709 709
710 710 '''
711 711 Operator that estimates Reflectivity factor (Z), and estimates rainfall Rate (R)
712 712
713 713 Input:
714 714 self.dataOut.data_pre : SelfSpectra
715 715
716 716 Output:
717 717
718 718 self.dataOut.data_output : Reflectivity factor, rainfall Rate
719 719
720 720
721 721 Parameters affected:
722 722 '''
723 723
724 724 def __init__(self):
725 725 Operation.__init__(self)
726 726 self.i=0
727 727
728 728 def run(self, dataOut, radar=None, Pt=5000, Gt=295.1209, Gr=70.7945, Lambda=0.6741, aL=2.5118,
729 729 tauW=4e-06, ThetaT=0.1656317, ThetaR=0.36774087, Km2 = 0.93, Altitude=3350,SNRdBlimit=-30):
730 730
731 731 # print ('Entering PrecepitationProc ... ')
732 732
733 733 if radar == "MIRA35C" :
734 734
735 735 self.spc = dataOut.data_pre[0].copy()
736 736 self.Num_Hei = self.spc.shape[2]
737 737 self.Num_Bin = self.spc.shape[1]
738 738 self.Num_Chn = self.spc.shape[0]
739 739 Ze = self.dBZeMODE2(dataOut)
740 740
741 741 else:
742 742
743 743 self.spc = dataOut.data_pre[0].copy()
744 744
745 745 #NOTA SE DEBE REMOVER EL RANGO DEL PULSO TX
746 746 self.spc[:,:,0:7]= numpy.NaN
747 747
748 748 self.Num_Hei = self.spc.shape[2]
749 749 self.Num_Bin = self.spc.shape[1]
750 750 self.Num_Chn = self.spc.shape[0]
751 751
752 752 VelRange = dataOut.spc_range[2]
753 753
754 754 ''' Se obtiene la constante del RADAR '''
755 755
756 756 self.Pt = Pt
757 757 self.Gt = Gt
758 758 self.Gr = Gr
759 759 self.Lambda = Lambda
760 760 self.aL = aL
761 761 self.tauW = tauW
762 762 self.ThetaT = ThetaT
763 763 self.ThetaR = ThetaR
764 764 self.GSys = 10**(36.63/10) # Ganancia de los LNA 36.63 dB
765 765 self.lt = 10**(1.67/10) # Perdida en cables Tx 1.67 dB
766 766 self.lr = 10**(5.73/10) # Perdida en cables Rx 5.73 dB
767 767
768 768 Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
769 769 Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * tauW * numpy.pi * ThetaT * ThetaR)
770 770 RadarConstant = 10e-26 * Numerator / Denominator #
771 771 ExpConstant = 10**(40/10) #Constante Experimental
772 772
773 773 SignalPower = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
774 774 for i in range(self.Num_Chn):
775 775 SignalPower[i,:,:] = self.spc[i,:,:] - dataOut.noise[i]
776 776 SignalPower[numpy.where(SignalPower < 0)] = 1e-20
777 777
778 778 SPCmean = numpy.mean(SignalPower, 0)
779 779 Pr = SPCmean[:,:]/dataOut.normFactor
780 780
781 781 # Declaring auxiliary variables
782 782 Range = dataOut.heightList*1000. #Range in m
783 783 # replicate the heightlist to obtain a matrix [Num_Bin,Num_Hei]
784 784 rMtrx = numpy.transpose(numpy.transpose([dataOut.heightList*1000.] * self.Num_Bin))
785 785 zMtrx = rMtrx+Altitude
786 786 # replicate the VelRange to obtain a matrix [Num_Bin,Num_Hei]
787 787 VelMtrx = numpy.transpose(numpy.tile(VelRange[:-1], (self.Num_Hei,1)))
788 788
789 789 # height dependence to air density Foote and Du Toit (1969)
790 790 delv_z = 1 + 3.68e-5 * zMtrx + 1.71e-9 * zMtrx**2
791 791 VMtrx = VelMtrx / delv_z #Normalized velocity
792 792 VMtrx[numpy.where(VMtrx> 9.6)] = numpy.NaN
793 793 # Diameter is related to the fall speed of falling drops
794 794 D_Vz = -1.667 * numpy.log( 0.9369 - 0.097087 * VMtrx ) # D in [mm]
795 795 # Only valid for D>= 0.16 mm
796 796 D_Vz[numpy.where(D_Vz < 0.16)] = numpy.NaN
797 797
798 798 #Calculate Radar Reflectivity ETAn
799 799 ETAn = (RadarConstant *ExpConstant) * Pr * rMtrx**2 #Reflectivity (ETA)
800 800 ETAd = ETAn * 6.18 * exp( -0.6 * D_Vz ) * delv_z
801 801 # Radar Cross Section
802 802 sigmaD = Km2 * (D_Vz * 1e-3 )**6 * numpy.pi**5 / Lambda**4
803 803 # Drop Size Distribution
804 804 DSD = ETAn / sigmaD
805 805 # Equivalente Reflectivy
806 806 Ze_eqn = numpy.nansum( DSD * D_Vz**6 ,axis=0)
807 807 Ze_org = numpy.nansum(ETAn * Lambda**4, axis=0) / (1e-18*numpy.pi**5 * Km2) # [mm^6 /m^3]
808 808 # RainFall Rate
809 809 RR = 0.0006*numpy.pi * numpy.nansum( D_Vz**3 * DSD * VelMtrx ,0) #mm/hr
810 810
811 811 # Censoring the data
812 812 # Removing data with SNRth < 0dB se debe considerar el SNR por canal
813 813 SNRth = 10**(SNRdBlimit/10) #-30dB
814 814 novalid = numpy.where((dataOut.data_snr[0,:] <SNRth) | (dataOut.data_snr[1,:] <SNRth) | (dataOut.data_snr[2,:] <SNRth)) # AND condition. Maybe OR condition better
815 815 W = numpy.nanmean(dataOut.data_dop,0)
816 816 W[novalid] = numpy.NaN
817 817 Ze_org[novalid] = numpy.NaN
818 818 RR[novalid] = numpy.NaN
819 819
820 820 dataOut.data_output = RR[8]
821 821 dataOut.data_param = numpy.ones([3,self.Num_Hei])
822 822 dataOut.channelList = [0,1,2]
823 823
824 824 dataOut.data_param[0]=10*numpy.log10(Ze_org)
825 825 dataOut.data_param[1]=-W
826 826 dataOut.data_param[2]=RR
827 827
828 828 # print ('Leaving PrecepitationProc ... ')
829 829 return dataOut
830 830
831 831 def dBZeMODE2(self, dataOut): # Processing for MIRA35C
832 832
833 833 NPW = dataOut.NPW
834 834 COFA = dataOut.COFA
835 835
836 836 SNR = numpy.array([self.spc[0,:,:] / NPW[0]]) #, self.spc[1,:,:] / NPW[1]])
837 837 RadarConst = dataOut.RadarConst
838 838 #frequency = 34.85*10**9
839 839
840 840 ETA = numpy.zeros(([self.Num_Chn ,self.Num_Hei]))
841 841 data_output = numpy.ones([self.Num_Chn , self.Num_Hei])*numpy.NaN
842 842
843 843 ETA = numpy.sum(SNR,1)
844 844
845 845 ETA = numpy.where(ETA != 0. , ETA, numpy.NaN)
846 846
847 847 Ze = numpy.ones([self.Num_Chn, self.Num_Hei] )
848 848
849 849 for r in range(self.Num_Hei):
850 850
851 851 Ze[0,r] = ( ETA[0,r] ) * COFA[0,r][0] * RadarConst * ((r/5000.)**2)
852 852 #Ze[1,r] = ( ETA[1,r] ) * COFA[1,r][0] * RadarConst * ((r/5000.)**2)
853 853
854 854 return Ze
855 855
856 856 # def GetRadarConstant(self):
857 857 #
858 858 # """
859 859 # Constants:
860 860 #
861 861 # Pt: Transmission Power dB 5kW 5000
862 862 # Gt: Transmission Gain dB 24.7 dB 295.1209
863 863 # Gr: Reception Gain dB 18.5 dB 70.7945
864 864 # Lambda: Wavelenght m 0.6741 m 0.6741
865 865 # aL: Attenuation loses dB 4dB 2.5118
866 866 # tauW: Width of transmission pulse s 4us 4e-6
867 867 # ThetaT: Transmission antenna bean angle rad 0.1656317 rad 0.1656317
868 868 # ThetaR: Reception antenna beam angle rad 0.36774087 rad 0.36774087
869 869 #
870 870 # """
871 871 #
872 872 # Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
873 873 # Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * TauW * numpy.pi * ThetaT * TheraR)
874 874 # RadarConstant = Numerator / Denominator
875 875 #
876 876 # return RadarConstant
877 877
878 878
879 879
880 880 class FullSpectralAnalysis(Operation):
881 881
882 882 """
883 883 Function that implements Full Spectral Analysis technique.
884 884
885 885 Input:
886 886 self.dataOut.data_pre : SelfSpectra and CrossSpectra data
887 887 self.dataOut.groupList : Pairlist of channels
888 888 self.dataOut.ChanDist : Physical distance between receivers
889 889
890 890
891 891 Output:
892 892
893 893 self.dataOut.data_output : Zonal wind, Meridional wind, and Vertical wind
894 894
895 895
896 896 Parameters affected: Winds, height range, SNR
897 897
898 898 """
899 899 def run(self, dataOut, Xi01=None, Xi02=None, Xi12=None, Eta01=None, Eta02=None, Eta12=None, SNRdBlimit=-30,
900 900 minheight=None, maxheight=None, NegativeLimit=None, PositiveLimit=None):
901 901
902 902 spc = dataOut.data_pre[0].copy()
903 903 cspc = dataOut.data_pre[1]
904 904 nHeights = spc.shape[2]
905 905
906 906 # first_height = 0.75 #km (ref: data header 20170822)
907 907 # resolution_height = 0.075 #km
908 908 '''
909 909 finding height range. check this when radar parameters are changed!
910 910 '''
911 911 if maxheight is not None:
912 912 # range_max = math.ceil((maxheight - first_height) / resolution_height) # theoretical
913 913 range_max = math.ceil(13.26 * maxheight - 3) # empirical, works better
914 914 else:
915 915 range_max = nHeights
916 916 if minheight is not None:
917 917 # range_min = int((minheight - first_height) / resolution_height) # theoretical
918 918 range_min = int(13.26 * minheight - 5) # empirical, works better
919 919 if range_min < 0:
920 920 range_min = 0
921 921 else:
922 922 range_min = 0
923 923
924 924 pairsList = dataOut.groupList
925 925 if dataOut.ChanDist is not None :
926 926 ChanDist = dataOut.ChanDist
927 927 else:
928 928 ChanDist = numpy.array([[Xi01, Eta01],[Xi02,Eta02],[Xi12,Eta12]])
929 929
930 930 # 4 variables: zonal, meridional, vertical, and average SNR
931 931 data_param = numpy.zeros([4,nHeights]) * numpy.NaN
932 932 velocityX = numpy.zeros([nHeights]) * numpy.NaN
933 933 velocityY = numpy.zeros([nHeights]) * numpy.NaN
934 934 velocityZ = numpy.zeros([nHeights]) * numpy.NaN
935 935
936 936 dbSNR = 10*numpy.log10(numpy.average(dataOut.data_snr,0))
937 937
938 938 '''***********************************************WIND ESTIMATION**************************************'''
939 939 for Height in range(nHeights):
940 940
941 941 if Height >= range_min and Height < range_max:
942 942 # error_code will be useful in future analysis
943 943 [Vzon,Vmer,Vver, error_code] = self.WindEstimation(spc[:,:,Height], cspc[:,:,Height], pairsList,
944 944 ChanDist, Height, dataOut.noise, dataOut.spc_range, dbSNR[Height], SNRdBlimit, NegativeLimit, PositiveLimit,dataOut.frequency)
945 945
946 946 if abs(Vzon) < 100. and abs(Vmer) < 100.:
947 947 velocityX[Height] = Vzon
948 948 velocityY[Height] = -Vmer
949 949 velocityZ[Height] = Vver
950 950
951 951 # Censoring data with SNR threshold
952 952 dbSNR [dbSNR < SNRdBlimit] = numpy.NaN
953 953
954 954 data_param[0] = velocityX
955 955 data_param[1] = velocityY
956 956 data_param[2] = velocityZ
957 957 data_param[3] = dbSNR
958 958 dataOut.data_param = data_param
959 959 return dataOut
960 960
961 961 def moving_average(self,x, N=2):
962 962 """ convolution for smoothenig data. note that last N-1 values are convolution with zeroes """
963 963 return numpy.convolve(x, numpy.ones((N,))/N)[(N-1):]
964 964
965 965 def gaus(self,xSamples,Amp,Mu,Sigma):
966 966 return Amp * numpy.exp(-0.5*((xSamples - Mu)/Sigma)**2)
967 967
968 968 def Moments(self, ySamples, xSamples):
969 969 Power = numpy.nanmean(ySamples) # Power, 0th Moment
970 970 yNorm = ySamples / numpy.nansum(ySamples)
971 971 RadVel = numpy.nansum(xSamples * yNorm) # Radial Velocity, 1st Moment
972 972 Sigma2 = numpy.nansum(yNorm * (xSamples - RadVel)**2) # Spectral Width, 2nd Moment
973 973 StdDev = numpy.sqrt(numpy.abs(Sigma2)) # Desv. Estandar, Ancho espectral
974 974 return numpy.array([Power,RadVel,StdDev])
975 975
976 976 def StopWindEstimation(self, error_code):
977 977 Vzon = numpy.NaN
978 978 Vmer = numpy.NaN
979 979 Vver = numpy.NaN
980 980 return Vzon, Vmer, Vver, error_code
981 981
982 982 def AntiAliasing(self, interval, maxstep):
983 983 """
984 984 function to prevent errors from aliased values when computing phaseslope
985 985 """
986 986 antialiased = numpy.zeros(len(interval))
987 987 copyinterval = interval.copy()
988 988
989 989 antialiased[0] = copyinterval[0]
990 990
991 991 for i in range(1,len(antialiased)):
992 992 step = interval[i] - interval[i-1]
993 993 if step > maxstep:
994 994 copyinterval -= 2*numpy.pi
995 995 antialiased[i] = copyinterval[i]
996 996 elif step < maxstep*(-1):
997 997 copyinterval += 2*numpy.pi
998 998 antialiased[i] = copyinterval[i]
999 999 else:
1000 1000 antialiased[i] = copyinterval[i].copy()
1001 1001
1002 1002 return antialiased
1003 1003
1004 1004 def WindEstimation(self, spc, cspc, pairsList, ChanDist, Height, noise, AbbsisaRange, dbSNR, SNRlimit, NegativeLimit, PositiveLimit, radfreq):
1005 1005 """
1006 1006 Function that Calculates Zonal, Meridional and Vertical wind velocities.
1007 1007 Initial Version by E. Bocanegra updated by J. Zibell until Nov. 2019.
1008 1008
1009 1009 Input:
1010 1010 spc, cspc : self spectra and cross spectra data. In Briggs notation something like S_i*(S_i)_conj, (S_j)_conj respectively.
1011 1011 pairsList : Pairlist of channels
1012 1012 ChanDist : array of xi_ij and eta_ij
1013 1013 Height : height at which data is processed
1014 1014 noise : noise in [channels] format for specific height
1015 1015 Abbsisarange : range of the frequencies or velocities
1016 1016 dbSNR, SNRlimit : signal to noise ratio in db, lower limit
1017 1017
1018 1018 Output:
1019 1019 Vzon, Vmer, Vver : wind velocities
1020 1020 error_code : int that states where code is terminated
1021 1021
1022 1022 0 : no error detected
1023 1023 1 : Gaussian of mean spc exceeds widthlimit
1024 1024 2 : no Gaussian of mean spc found
1025 1025 3 : SNR to low or velocity to high -> prec. e.g.
1026 1026 4 : at least one Gaussian of cspc exceeds widthlimit
1027 1027 5 : zero out of three cspc Gaussian fits converged
1028 1028 6 : phase slope fit could not be found
1029 1029 7 : arrays used to fit phase have different length
1030 1030 8 : frequency range is either too short (len <= 5) or very long (> 30% of cspc)
1031 1031
1032 1032 """
1033 1033
1034 1034 error_code = 0
1035 1035
1036 1036 nChan = spc.shape[0]
1037 1037 nProf = spc.shape[1]
1038 1038 nPair = cspc.shape[0]
1039 1039
1040 1040 SPC_Samples = numpy.zeros([nChan, nProf]) # for normalized spc values for one height
1041 1041 CSPC_Samples = numpy.zeros([nPair, nProf], dtype=numpy.complex_) # for normalized cspc values
1042 1042 phase = numpy.zeros([nPair, nProf]) # phase between channels
1043 1043 PhaseSlope = numpy.zeros(nPair) # slope of the phases, channelwise
1044 1044 PhaseInter = numpy.zeros(nPair) # intercept to the slope of the phases, channelwise
1045 1045 xFrec = AbbsisaRange[0][:-1] # frequency range
1046 1046 xVel = AbbsisaRange[2][:-1] # velocity range
1047 1047 xSamples = xFrec # the frequency range is taken
1048 1048 delta_x = xSamples[1] - xSamples[0] # delta_f or delta_x
1049 1049
1050 1050 # only consider velocities with in NegativeLimit and PositiveLimit
1051 1051 if (NegativeLimit is None):
1052 1052 NegativeLimit = numpy.min(xVel)
1053 1053 if (PositiveLimit is None):
1054 1054 PositiveLimit = numpy.max(xVel)
1055 1055 xvalid = numpy.where((xVel > NegativeLimit) & (xVel < PositiveLimit))
1056 1056 xSamples_zoom = xSamples[xvalid]
1057 1057
1058 1058 '''Getting Eij and Nij'''
1059 1059 Xi01, Xi02, Xi12 = ChanDist[:,0]
1060 1060 Eta01, Eta02, Eta12 = ChanDist[:,1]
1061 1061
1062 1062 # spwd limit - updated by D. ScipiΓ³n 30.03.2021
1063 1063 widthlimit = 10
1064 1064 '''************************* SPC is normalized ********************************'''
1065 1065 spc_norm = spc.copy()
1066 1066 # For each channel
1067 1067 for i in range(nChan):
1068 1068 spc_sub = spc_norm[i,:] - noise[i] # only the signal power
1069 1069 SPC_Samples[i] = spc_sub / (numpy.nansum(spc_sub) * delta_x)
1070 1070
1071 1071 '''********************** FITTING MEAN SPC GAUSSIAN **********************'''
1072 1072
1073 1073 """ the gaussian of the mean: first subtract noise, then normalize. this is legal because
1074 1074 you only fit the curve and don't need the absolute value of height for calculation,
1075 1075 only for estimation of width. for normalization of cross spectra, you need initial,
1076 1076 unnormalized self-spectra With noise.
1077 1077
1078 1078 Technically, you don't even need to normalize the self-spectra, as you only need the
1079 1079 width of the peak. However, it was left this way. Note that the normalization has a flaw:
1080 1080 due to subtraction of the noise, some values are below zero. Raw "spc" values should be
1081 1081 >= 0, as it is the modulus squared of the signals (complex * it's conjugate)
1082 1082 """
1083 1083 # initial conditions
1084 1084 popt = [1e-10,0,1e-10]
1085 1085 # Spectra average
1086 1086 SPCMean = numpy.average(SPC_Samples,0)
1087 1087 # Moments in frequency
1088 1088 SPCMoments = self.Moments(SPCMean[xvalid], xSamples_zoom)
1089 1089
1090 1090 # Gauss Fit SPC in frequency domain
1091 1091 if dbSNR > SNRlimit: # only if SNR > SNRth
1092 1092 try:
1093 1093 popt,pcov = curve_fit(self.gaus,xSamples_zoom,SPCMean[xvalid],p0=SPCMoments)
1094 1094 if popt[2] <= 0 or popt[2] > widthlimit: # CONDITION
1095 1095 return self.StopWindEstimation(error_code = 1)
1096 1096 FitGauss = self.gaus(xSamples_zoom,*popt)
1097 1097 except :#RuntimeError:
1098 1098 return self.StopWindEstimation(error_code = 2)
1099 1099 else:
1100 1100 return self.StopWindEstimation(error_code = 3)
1101 1101
1102 1102 '''***************************** CSPC Normalization *************************
1103 1103 The Spc spectra are used to normalize the crossspectra. Peaks from precipitation
1104 1104 influence the norm which is not desired. First, a range is identified where the
1105 1105 wind peak is estimated -> sum_wind is sum of those frequencies. Next, the area
1106 1106 around it gets cut off and values replaced by mean determined by the boundary
1107 1107 data -> sum_noise (spc is not normalized here, thats why the noise is important)
1108 1108
1109 1109 The sums are then added and multiplied by range/datapoints, because you need
1110 1110 an integral and not a sum for normalization.
1111 1111
1112 1112 A norm is found according to Briggs 92.
1113 1113 '''
1114 1114 # for each pair
1115 1115 for i in range(nPair):
1116 1116 cspc_norm = cspc[i,:].copy()
1117 1117 chan_index0 = pairsList[i][0]
1118 1118 chan_index1 = pairsList[i][1]
1119 1119 CSPC_Samples[i] = cspc_norm / (numpy.sqrt(numpy.nansum(spc_norm[chan_index0])*numpy.nansum(spc_norm[chan_index1])) * delta_x)
1120 1120 phase[i] = numpy.arctan2(CSPC_Samples[i].imag, CSPC_Samples[i].real)
1121 1121
1122 1122 CSPCmoments = numpy.vstack([self.Moments(numpy.abs(CSPC_Samples[0,xvalid]), xSamples_zoom),
1123 1123 self.Moments(numpy.abs(CSPC_Samples[1,xvalid]), xSamples_zoom),
1124 1124 self.Moments(numpy.abs(CSPC_Samples[2,xvalid]), xSamples_zoom)])
1125 1125
1126 1126 popt01, popt02, popt12 = [1e-10,0,1e-10], [1e-10,0,1e-10] ,[1e-10,0,1e-10]
1127 1127 FitGauss01, FitGauss02, FitGauss12 = numpy.zeros(len(xSamples)), numpy.zeros(len(xSamples)), numpy.zeros(len(xSamples))
1128 1128
1129 1129 '''*******************************FIT GAUSS CSPC************************************'''
1130 1130 try:
1131 1131 popt01,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[0][xvalid]),p0=CSPCmoments[0])
1132 1132 if popt01[2] > widthlimit: # CONDITION
1133 1133 return self.StopWindEstimation(error_code = 4)
1134 1134 popt02,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[1][xvalid]),p0=CSPCmoments[1])
1135 1135 if popt02[2] > widthlimit: # CONDITION
1136 1136 return self.StopWindEstimation(error_code = 4)
1137 1137 popt12,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[2][xvalid]),p0=CSPCmoments[2])
1138 1138 if popt12[2] > widthlimit: # CONDITION
1139 1139 return self.StopWindEstimation(error_code = 4)
1140 1140
1141 1141 FitGauss01 = self.gaus(xSamples_zoom, *popt01)
1142 1142 FitGauss02 = self.gaus(xSamples_zoom, *popt02)
1143 1143 FitGauss12 = self.gaus(xSamples_zoom, *popt12)
1144 1144 except:
1145 1145 return self.StopWindEstimation(error_code = 5)
1146 1146
1147 1147
1148 1148 '''************* Getting Fij ***************'''
1149 1149 # x-axis point of the gaussian where the center is located from GaussFit of spectra
1150 1150 GaussCenter = popt[1]
1151 1151 ClosestCenter = xSamples_zoom[numpy.abs(xSamples_zoom-GaussCenter).argmin()]
1152 1152 PointGauCenter = numpy.where(xSamples_zoom==ClosestCenter)[0][0]
1153 1153
1154 1154 # Point where e^-1 is located in the gaussian
1155 1155 PeMinus1 = numpy.max(FitGauss) * numpy.exp(-1)
1156 1156 FijClosest = FitGauss[numpy.abs(FitGauss-PeMinus1).argmin()] # The closest point to"Peminus1" in "FitGauss"
1157 1157 PointFij = numpy.where(FitGauss==FijClosest)[0][0]
1158 1158 Fij = numpy.abs(xSamples_zoom[PointFij] - xSamples_zoom[PointGauCenter])
1159 1159
1160 1160 '''********** Taking frequency ranges from mean SPCs **********'''
1161 1161 GauWidth = popt[2] * 3/2 # Bandwidth of Gau01
1162 1162 Range = numpy.empty(2)
1163 1163 Range[0] = GaussCenter - GauWidth
1164 1164 Range[1] = GaussCenter + GauWidth
1165 1165 # Point in x-axis where the bandwidth is located (min:max)
1166 1166 ClosRangeMin = xSamples_zoom[numpy.abs(xSamples_zoom-Range[0]).argmin()]
1167 1167 ClosRangeMax = xSamples_zoom[numpy.abs(xSamples_zoom-Range[1]).argmin()]
1168 1168 PointRangeMin = numpy.where(xSamples_zoom==ClosRangeMin)[0][0]
1169 1169 PointRangeMax = numpy.where(xSamples_zoom==ClosRangeMax)[0][0]
1170 1170 Range = numpy.array([ PointRangeMin, PointRangeMax ])
1171 1171 FrecRange = xSamples_zoom[ Range[0] : Range[1] ]
1172 1172
1173 1173 '''************************** Getting Phase Slope ***************************'''
1174 1174 for i in range(nPair):
1175 1175 if len(FrecRange) > 5:
1176 1176 PhaseRange = phase[i, xvalid[0][Range[0]:Range[1]]].copy()
1177 1177 mask = ~numpy.isnan(FrecRange) & ~numpy.isnan(PhaseRange)
1178 1178 if len(FrecRange) == len(PhaseRange):
1179 1179 try:
1180 1180 slope, intercept, _, _, _ = stats.linregress(FrecRange[mask], self.AntiAliasing(PhaseRange[mask], 4.5))
1181 1181 PhaseSlope[i] = slope
1182 1182 PhaseInter[i] = intercept
1183 1183 except:
1184 1184 return self.StopWindEstimation(error_code = 6)
1185 1185 else:
1186 1186 return self.StopWindEstimation(error_code = 7)
1187 1187 else:
1188 1188 return self.StopWindEstimation(error_code = 8)
1189 1189
1190 1190 '''*** Constants A-H correspond to the convention as in Briggs and Vincent 1992 ***'''
1191 1191
1192 1192 '''Getting constant C'''
1193 1193 cC=(Fij*numpy.pi)**2
1194 1194
1195 1195 '''****** Getting constants F and G ******'''
1196 1196 MijEijNij = numpy.array([[Xi02,Eta02], [Xi12,Eta12]])
1197 1197 # MijEijNij = numpy.array([[Xi01,Eta01], [Xi02,Eta02], [Xi12,Eta12]])
1198 1198 # MijResult0 = (-PhaseSlope[0] * cC) / (2*numpy.pi)
1199 1199 MijResult1 = (-PhaseSlope[1] * cC) / (2*numpy.pi)
1200 1200 MijResult2 = (-PhaseSlope[2] * cC) / (2*numpy.pi)
1201 1201 # MijResults = numpy.array([MijResult0, MijResult1, MijResult2])
1202 1202 MijResults = numpy.array([MijResult1, MijResult2])
1203 1203 (cF,cG) = numpy.linalg.solve(MijEijNij, MijResults)
1204 1204
1205 1205 '''****** Getting constants A, B and H ******'''
1206 1206 W01 = numpy.nanmax( FitGauss01 )
1207 1207 W02 = numpy.nanmax( FitGauss02 )
1208 1208 W12 = numpy.nanmax( FitGauss12 )
1209 1209
1210 1210 WijResult01 = ((cF * Xi01 + cG * Eta01)**2)/cC - numpy.log(W01 / numpy.sqrt(numpy.pi / cC))
1211 1211 WijResult02 = ((cF * Xi02 + cG * Eta02)**2)/cC - numpy.log(W02 / numpy.sqrt(numpy.pi / cC))
1212 1212 WijResult12 = ((cF * Xi12 + cG * Eta12)**2)/cC - numpy.log(W12 / numpy.sqrt(numpy.pi / cC))
1213 1213 WijResults = numpy.array([WijResult01, WijResult02, WijResult12])
1214 1214
1215 1215 WijEijNij = numpy.array([ [Xi01**2, Eta01**2, 2*Xi01*Eta01] , [Xi02**2, Eta02**2, 2*Xi02*Eta02] , [Xi12**2, Eta12**2, 2*Xi12*Eta12] ])
1216 1216 (cA,cB,cH) = numpy.linalg.solve(WijEijNij, WijResults)
1217 1217
1218 1218 VxVy = numpy.array([[cA,cH],[cH,cB]])
1219 1219 VxVyResults = numpy.array([-cF,-cG])
1220 1220 (Vmer,Vzon) = numpy.linalg.solve(VxVy, VxVyResults)
1221 1221 Vver = -SPCMoments[1]*SPEED_OF_LIGHT/(2*radfreq)
1222 1222 error_code = 0
1223 1223
1224 1224 return Vzon, Vmer, Vver, error_code
1225 1225
1226 1226 class SpectralMoments(Operation):
1227 1227
1228 1228 '''
1229 1229 Function SpectralMoments()
1230 1230
1231 1231 Calculates moments (power, mean, standard deviation) and SNR of the signal
1232 1232
1233 1233 Type of dataIn: Spectra
1234 1234
1235 1235 Configuration Parameters:
1236 1236
1237 1237 dirCosx : Cosine director in X axis
1238 1238 dirCosy : Cosine director in Y axis
1239 1239
1240 1240 elevation :
1241 1241 azimuth :
1242 1242
1243 1243 Input:
1244 1244 channelList : simple channel list to select e.g. [2,3,7]
1245 1245 self.dataOut.data_pre : Spectral data
1246 1246 self.dataOut.abscissaList : List of frequencies
1247 1247 self.dataOut.noise : Noise level per channel
1248 1248
1249 1249 Affected:
1250 1250 self.dataOut.moments : Parameters per channel
1251 1251 self.dataOut.data_snr : SNR per channel
1252 1252
1253 1253 '''
1254 1254
1255 1255 def run(self, dataOut):
1256 1256
1257 1257 data = dataOut.data_pre[0]
1258 1258 absc = dataOut.abscissaList[:-1]
1259 1259 noise = dataOut.noise
1260 1260 nChannel = data.shape[0]
1261 1261 data_param = numpy.zeros((nChannel, 4, data.shape[2]))
1262 1262
1263 1263 for ind in range(nChannel):
1264 1264 data_param[ind,:,:] = self.__calculateMoments( data[ind,:,:] , absc , noise[ind] )
1265 1265
1266 1266 dataOut.moments = data_param[:,1:,:]
1267 1267 dataOut.data_snr = data_param[:,0]
1268 1268 dataOut.data_pow = data_param[:,1]
1269 1269 dataOut.data_dop = data_param[:,2]
1270 1270 dataOut.data_width = data_param[:,3]
1271 1271 return dataOut
1272 1272
1273 1273 def __calculateMoments(self, oldspec, oldfreq, n0,
1274 1274 nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None):
1275 1275
1276 1276 if (nicoh is None): nicoh = 1
1277 1277 if (graph is None): graph = 0
1278 1278 if (smooth is None): smooth = 0
1279 1279 elif (self.smooth < 3): smooth = 0
1280 1280
1281 1281 if (type1 is None): type1 = 0
1282 1282 if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1
1283 1283 if (snrth is None): snrth = -3
1284 1284 if (dc is None): dc = 0
1285 1285 if (aliasing is None): aliasing = 0
1286 1286 if (oldfd is None): oldfd = 0
1287 1287 if (wwauto is None): wwauto = 0
1288 1288
1289 1289 if (n0 < 1.e-20): n0 = 1.e-20
1290 1290
1291 1291 freq = oldfreq
1292 1292 vec_power = numpy.zeros(oldspec.shape[1])
1293 1293 vec_fd = numpy.zeros(oldspec.shape[1])
1294 1294 vec_w = numpy.zeros(oldspec.shape[1])
1295 1295 vec_snr = numpy.zeros(oldspec.shape[1])
1296 1296
1297 1297 # oldspec = numpy.ma.masked_invalid(oldspec)
1298 1298 for ind in range(oldspec.shape[1]):
1299 1299
1300 1300 spec = oldspec[:,ind]
1301 1301 aux = spec*fwindow
1302 1302 max_spec = aux.max()
1303 1303 m = aux.tolist().index(max_spec)
1304 1304
1305 1305 # Smooth
1306 1306 if (smooth == 0):
1307 1307 spec2 = spec
1308 1308 else:
1309 1309 spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth)
1310 1310
1311 1311 # Moments Estimation
1312 1312 bb = spec2[numpy.arange(m,spec2.size)]
1313 1313 bb = (bb<n0).nonzero()
1314 1314 bb = bb[0]
1315 1315
1316 1316 ss = spec2[numpy.arange(0,m + 1)]
1317 1317 ss = (ss<n0).nonzero()
1318 1318 ss = ss[0]
1319 1319
1320 1320 if (bb.size == 0):
1321 1321 bb0 = spec.size - 1 - m
1322 1322 else:
1323 1323 bb0 = bb[0] - 1
1324 1324 if (bb0 < 0):
1325 1325 bb0 = 0
1326 1326
1327 1327 if (ss.size == 0):
1328 1328 ss1 = 1
1329 1329 else:
1330 1330 ss1 = max(ss) + 1
1331 1331
1332 1332 if (ss1 > m):
1333 1333 ss1 = m
1334 1334
1335 1335 #valid = numpy.arange(int(m + bb0 - ss1 + 1)) + ss1
1336 1336 valid = numpy.arange(1,oldspec.shape[0])# valid perfil completo igual pulsepair
1337 1337 signal_power = ((spec2[valid] - n0) * fwindow[valid]).mean() # D. ScipiΓ³n added with correct definition
1338 1338 total_power = (spec2[valid] * fwindow[valid]).mean() # D. ScipiΓ³n added with correct definition
1339 1339 power = ((spec2[valid] - n0) * fwindow[valid]).sum()
1340 1340 fd = ((spec2[valid]- n0)*freq[valid] * fwindow[valid]).sum() / power
1341 1341 w = numpy.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum() / power)
1342 1342 snr = (spec2.mean()-n0)/n0
1343 1343 if (snr < 1.e-20) :
1344 1344 snr = 1.e-20
1345 1345
1346 1346 # vec_power[ind] = power #D. ScipiΓ³n replaced with the line below
1347 1347 vec_power[ind] = total_power
1348 1348 vec_fd[ind] = fd
1349 1349 vec_w[ind] = w
1350 1350 vec_snr[ind] = snr
1351 1351
1352 1352 return numpy.vstack((vec_snr, vec_power, vec_fd, vec_w))
1353 1353
1354 1354 #------------------ Get SA Parameters --------------------------
1355 1355
1356 1356 def GetSAParameters(self):
1357 1357 #SA en frecuencia
1358 1358 pairslist = self.dataOut.groupList
1359 1359 num_pairs = len(pairslist)
1360 1360
1361 1361 vel = self.dataOut.abscissaList
1362 1362 spectra = self.dataOut.data_pre
1363 1363 cspectra = self.dataIn.data_cspc
1364 1364 delta_v = vel[1] - vel[0]
1365 1365
1366 1366 #Calculating the power spectrum
1367 1367 spc_pow = numpy.sum(spectra, 3)*delta_v
1368 1368 #Normalizing Spectra
1369 1369 norm_spectra = spectra/spc_pow
1370 1370 #Calculating the norm_spectra at peak
1371 1371 max_spectra = numpy.max(norm_spectra, 3)
1372 1372
1373 1373 #Normalizing Cross Spectra
1374 1374 norm_cspectra = numpy.zeros(cspectra.shape)
1375 1375
1376 1376 for i in range(num_chan):
1377 1377 norm_cspectra[i,:,:] = cspectra[i,:,:]/numpy.sqrt(spc_pow[pairslist[i][0],:]*spc_pow[pairslist[i][1],:])
1378 1378
1379 1379 max_cspectra = numpy.max(norm_cspectra,2)
1380 1380 max_cspectra_index = numpy.argmax(norm_cspectra, 2)
1381 1381
1382 1382 for i in range(num_pairs):
1383 1383 cspc_par[i,:,:] = __calculateMoments(norm_cspectra)
1384 1384 #------------------- Get Lags ----------------------------------
1385 1385
1386 1386 class SALags(Operation):
1387 1387 '''
1388 1388 Function GetMoments()
1389 1389
1390 1390 Input:
1391 1391 self.dataOut.data_pre
1392 1392 self.dataOut.abscissaList
1393 1393 self.dataOut.noise
1394 1394 self.dataOut.normFactor
1395 1395 self.dataOut.data_snr
1396 1396 self.dataOut.groupList
1397 1397 self.dataOut.nChannels
1398 1398
1399 1399 Affected:
1400 1400 self.dataOut.data_param
1401 1401
1402 1402 '''
1403 1403 def run(self, dataOut):
1404 1404 data_acf = dataOut.data_pre[0]
1405 1405 data_ccf = dataOut.data_pre[1]
1406 1406 normFactor_acf = dataOut.normFactor[0]
1407 1407 normFactor_ccf = dataOut.normFactor[1]
1408 1408 pairs_acf = dataOut.groupList[0]
1409 1409 pairs_ccf = dataOut.groupList[1]
1410 1410
1411 1411 nHeights = dataOut.nHeights
1412 1412 absc = dataOut.abscissaList
1413 1413 noise = dataOut.noise
1414 1414 SNR = dataOut.data_snr
1415 1415 nChannels = dataOut.nChannels
1416 1416 # pairsList = dataOut.groupList
1417 1417 # pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairsList, nChannels)
1418 1418
1419 1419 for l in range(len(pairs_acf)):
1420 1420 data_acf[l,:,:] = data_acf[l,:,:]/normFactor_acf[l,:]
1421 1421
1422 1422 for l in range(len(pairs_ccf)):
1423 1423 data_ccf[l,:,:] = data_ccf[l,:,:]/normFactor_ccf[l,:]
1424 1424
1425 1425 dataOut.data_param = numpy.zeros((len(pairs_ccf)*2 + 1, nHeights))
1426 1426 dataOut.data_param[:-1,:] = self.__calculateTaus(data_acf, data_ccf, absc)
1427 1427 dataOut.data_param[-1,:] = self.__calculateLag1Phase(data_acf, absc)
1428 1428 return
1429 1429
1430 1430 # def __getPairsAutoCorr(self, pairsList, nChannels):
1431 1431 #
1432 1432 # pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan
1433 1433 #
1434 1434 # for l in range(len(pairsList)):
1435 1435 # firstChannel = pairsList[l][0]
1436 1436 # secondChannel = pairsList[l][1]
1437 1437 #
1438 1438 # #Obteniendo pares de Autocorrelacion
1439 1439 # if firstChannel == secondChannel:
1440 1440 # pairsAutoCorr[firstChannel] = int(l)
1441 1441 #
1442 1442 # pairsAutoCorr = pairsAutoCorr.astype(int)
1443 1443 #
1444 1444 # pairsCrossCorr = range(len(pairsList))
1445 1445 # pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr)
1446 1446 #
1447 1447 # return pairsAutoCorr, pairsCrossCorr
1448 1448
1449 1449 def __calculateTaus(self, data_acf, data_ccf, lagRange):
1450 1450
1451 1451 lag0 = data_acf.shape[1]/2
1452 1452 #Funcion de Autocorrelacion
1453 1453 mean_acf = stats.nanmean(data_acf, axis = 0)
1454 1454
1455 1455 #Obtencion Indice de TauCross
1456 1456 ind_ccf = data_ccf.argmax(axis = 1)
1457 1457 #Obtencion Indice de TauAuto
1458 1458 ind_acf = numpy.zeros(ind_ccf.shape,dtype = 'int')
1459 1459 ccf_lag0 = data_ccf[:,lag0,:]
1460 1460
1461 1461 for i in range(ccf_lag0.shape[0]):
1462 1462 ind_acf[i,:] = numpy.abs(mean_acf - ccf_lag0[i,:]).argmin(axis = 0)
1463 1463
1464 1464 #Obtencion de TauCross y TauAuto
1465 1465 tau_ccf = lagRange[ind_ccf]
1466 1466 tau_acf = lagRange[ind_acf]
1467 1467
1468 1468 Nan1, Nan2 = numpy.where(tau_ccf == lagRange[0])
1469 1469
1470 1470 tau_ccf[Nan1,Nan2] = numpy.nan
1471 1471 tau_acf[Nan1,Nan2] = numpy.nan
1472 1472 tau = numpy.vstack((tau_ccf,tau_acf))
1473 1473
1474 1474 return tau
1475 1475
1476 1476 def __calculateLag1Phase(self, data, lagTRange):
1477 1477 data1 = stats.nanmean(data, axis = 0)
1478 1478 lag1 = numpy.where(lagTRange == 0)[0][0] + 1
1479 1479
1480 1480 phase = numpy.angle(data1[lag1,:])
1481 1481
1482 1482 return phase
1483 1483
1484 1484 class SpectralFitting(Operation):
1485 1485 '''
1486 1486 Function GetMoments()
1487 1487
1488 1488 Input:
1489 1489 Output:
1490 1490 Variables modified:
1491 1491 '''
1492 1492
1493 1493 def run(self, dataOut, getSNR = True, path=None, file=None, groupList=None):
1494 1494
1495 1495
1496 1496 if path != None:
1497 1497 sys.path.append(path)
1498 1498 self.dataOut.library = importlib.import_module(file)
1499 1499
1500 1500 #To be inserted as a parameter
1501 1501 groupArray = numpy.array(groupList)
1502 1502 # groupArray = numpy.array([[0,1],[2,3]])
1503 1503 self.dataOut.groupList = groupArray
1504 1504
1505 1505 nGroups = groupArray.shape[0]
1506 1506 nChannels = self.dataIn.nChannels
1507 1507 nHeights=self.dataIn.heightList.size
1508 1508
1509 1509 #Parameters Array
1510 1510 self.dataOut.data_param = None
1511 1511
1512 1512 #Set constants
1513 1513 constants = self.dataOut.library.setConstants(self.dataIn)
1514 1514 self.dataOut.constants = constants
1515 1515 M = self.dataIn.normFactor
1516 1516 N = self.dataIn.nFFTPoints
1517 1517 ippSeconds = self.dataIn.ippSeconds
1518 1518 K = self.dataIn.nIncohInt
1519 1519 pairsArray = numpy.array(self.dataIn.pairsList)
1520 1520
1521 1521 #List of possible combinations
1522 1522 listComb = itertools.combinations(numpy.arange(groupArray.shape[1]),2)
1523 1523 indCross = numpy.zeros(len(list(listComb)), dtype = 'int')
1524 1524
1525 1525 if getSNR:
1526 1526 listChannels = groupArray.reshape((groupArray.size))
1527 1527 listChannels.sort()
1528 1528 noise = self.dataIn.getNoise()
1529 1529 self.dataOut.data_snr = self.__getSNR(self.dataIn.data_spc[listChannels,:,:], noise[listChannels])
1530 1530
1531 1531 for i in range(nGroups):
1532 1532 coord = groupArray[i,:]
1533 1533
1534 1534 #Input data array
1535 1535 data = self.dataIn.data_spc[coord,:,:]/(M*N)
1536 1536 data = data.reshape((data.shape[0]*data.shape[1],data.shape[2]))
1537 1537
1538 1538 #Cross Spectra data array for Covariance Matrixes
1539 1539 ind = 0
1540 1540 for pairs in listComb:
1541 1541 pairsSel = numpy.array([coord[x],coord[y]])
1542 1542 indCross[ind] = int(numpy.where(numpy.all(pairsArray == pairsSel, axis = 1))[0][0])
1543 1543 ind += 1
1544 1544 dataCross = self.dataIn.data_cspc[indCross,:,:]/(M*N)
1545 1545 dataCross = dataCross**2/K
1546 1546
1547 1547 for h in range(nHeights):
1548 1548
1549 1549 #Input
1550 1550 d = data[:,h]
1551 1551
1552 1552 #Covariance Matrix
1553 1553 D = numpy.diag(d**2/K)
1554 1554 ind = 0
1555 1555 for pairs in listComb:
1556 1556 #Coordinates in Covariance Matrix
1557 1557 x = pairs[0]
1558 1558 y = pairs[1]
1559 1559 #Channel Index
1560 1560 S12 = dataCross[ind,:,h]
1561 1561 D12 = numpy.diag(S12)
1562 1562 #Completing Covariance Matrix with Cross Spectras
1563 1563 D[x*N:(x+1)*N,y*N:(y+1)*N] = D12
1564 1564 D[y*N:(y+1)*N,x*N:(x+1)*N] = D12
1565 1565 ind += 1
1566 1566 Dinv=numpy.linalg.inv(D)
1567 1567 L=numpy.linalg.cholesky(Dinv)
1568 1568 LT=L.T
1569 1569
1570 1570 dp = numpy.dot(LT,d)
1571 1571
1572 1572 #Initial values
1573 1573 data_spc = self.dataIn.data_spc[coord,:,h]
1574 1574
1575 1575 if (h>0)and(error1[3]<5):
1576 1576 p0 = self.dataOut.data_param[i,:,h-1]
1577 1577 else:
1578 1578 p0 = numpy.array(self.dataOut.library.initialValuesFunction(data_spc, constants, i))
1579 1579
1580 1580 try:
1581 1581 #Least Squares
1582 1582 minp,covp,infodict,mesg,ier = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants),full_output=True)
1583 1583 # minp,covp = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants))
1584 1584 #Chi square error
1585 1585 error0 = numpy.sum(infodict['fvec']**2)/(2*N)
1586 1586 #Error with Jacobian
1587 1587 error1 = self.dataOut.library.errorFunction(minp,constants,LT)
1588 1588 except:
1589 1589 minp = p0*numpy.nan
1590 1590 error0 = numpy.nan
1591 1591 error1 = p0*numpy.nan
1592 1592
1593 1593 #Save
1594 1594 if self.dataOut.data_param is None:
1595 1595 self.dataOut.data_param = numpy.zeros((nGroups, p0.size, nHeights))*numpy.nan
1596 1596 self.dataOut.data_error = numpy.zeros((nGroups, p0.size + 1, nHeights))*numpy.nan
1597 1597
1598 1598 self.dataOut.data_error[i,:,h] = numpy.hstack((error0,error1))
1599 1599 self.dataOut.data_param[i,:,h] = minp
1600 1600 return
1601 1601
1602 1602 def __residFunction(self, p, dp, LT, constants):
1603 1603
1604 1604 fm = self.dataOut.library.modelFunction(p, constants)
1605 1605 fmp=numpy.dot(LT,fm)
1606 1606
1607 1607 return dp-fmp
1608 1608
1609 1609 def __getSNR(self, z, noise):
1610 1610
1611 1611 avg = numpy.average(z, axis=1)
1612 1612 SNR = (avg.T-noise)/noise
1613 1613 SNR = SNR.T
1614 1614 return SNR
1615 1615
1616 1616 def __chisq(p,chindex,hindex):
1617 1617 #similar to Resid but calculates CHI**2
1618 1618 [LT,d,fm]=setupLTdfm(p,chindex,hindex)
1619 1619 dp=numpy.dot(LT,d)
1620 1620 fmp=numpy.dot(LT,fm)
1621 1621 chisq=numpy.dot((dp-fmp).T,(dp-fmp))
1622 1622 return chisq
1623 1623
1624 1624 class WindProfiler(Operation):
1625 1625
1626 1626 __isConfig = False
1627 1627
1628 1628 __initime = None
1629 1629 __lastdatatime = None
1630 1630 __integrationtime = None
1631 1631
1632 1632 __buffer = None
1633 1633
1634 1634 __dataReady = False
1635 1635
1636 1636 __firstdata = None
1637 1637
1638 1638 n = None
1639 1639
1640 1640 def __init__(self):
1641 1641 Operation.__init__(self)
1642 1642
1643 1643 def __calculateCosDir(self, elev, azim):
1644 1644 zen = (90 - elev)*numpy.pi/180
1645 1645 azim = azim*numpy.pi/180
1646 1646 cosDirX = numpy.sqrt((1-numpy.cos(zen)**2)/((1+numpy.tan(azim)**2)))
1647 1647 cosDirY = numpy.sqrt(1-numpy.cos(zen)**2-cosDirX**2)
1648 1648
1649 1649 signX = numpy.sign(numpy.cos(azim))
1650 1650 signY = numpy.sign(numpy.sin(azim))
1651 1651
1652 1652 cosDirX = numpy.copysign(cosDirX, signX)
1653 1653 cosDirY = numpy.copysign(cosDirY, signY)
1654 1654 return cosDirX, cosDirY
1655 1655
1656 1656 def __calculateAngles(self, theta_x, theta_y, azimuth):
1657 1657
1658 1658 dir_cosw = numpy.sqrt(1-theta_x**2-theta_y**2)
1659 1659 zenith_arr = numpy.arccos(dir_cosw)
1660 1660 azimuth_arr = numpy.arctan2(theta_x,theta_y) + azimuth*math.pi/180
1661 1661
1662 1662 dir_cosu = numpy.sin(azimuth_arr)*numpy.sin(zenith_arr)
1663 1663 dir_cosv = numpy.cos(azimuth_arr)*numpy.sin(zenith_arr)
1664 1664
1665 1665 return azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw
1666 1666
1667 1667 def __calculateMatA(self, dir_cosu, dir_cosv, dir_cosw, horOnly):
1668 1668
1669 1669 #
1670 1670 if horOnly:
1671 1671 A = numpy.c_[dir_cosu,dir_cosv]
1672 1672 else:
1673 1673 A = numpy.c_[dir_cosu,dir_cosv,dir_cosw]
1674 1674 A = numpy.asmatrix(A)
1675 1675 A1 = numpy.linalg.inv(A.transpose()*A)*A.transpose()
1676 1676
1677 1677 return A1
1678 1678
1679 1679 def __correctValues(self, heiRang, phi, velRadial, SNR):
1680 1680 listPhi = phi.tolist()
1681 1681 maxid = listPhi.index(max(listPhi))
1682 1682 minid = listPhi.index(min(listPhi))
1683 1683
1684 1684 rango = list(range(len(phi)))
1685 1685 # rango = numpy.delete(rango,maxid)
1686 1686
1687 1687 heiRang1 = heiRang*math.cos(phi[maxid])
1688 1688 heiRangAux = heiRang*math.cos(phi[minid])
1689 1689 indOut = (heiRang1 < heiRangAux[0]).nonzero()
1690 1690 heiRang1 = numpy.delete(heiRang1,indOut)
1691 1691
1692 1692 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
1693 1693 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
1694 1694
1695 1695 for i in rango:
1696 1696 x = heiRang*math.cos(phi[i])
1697 1697 y1 = velRadial[i,:]
1698 1698 f1 = interpolate.interp1d(x,y1,kind = 'cubic')
1699 1699
1700 1700 x1 = heiRang1
1701 1701 y11 = f1(x1)
1702 1702
1703 1703 y2 = SNR[i,:]
1704 1704 f2 = interpolate.interp1d(x,y2,kind = 'cubic')
1705 1705 y21 = f2(x1)
1706 1706
1707 1707 velRadial1[i,:] = y11
1708 1708 SNR1[i,:] = y21
1709 1709
1710 1710 return heiRang1, velRadial1, SNR1
1711 1711
1712 1712 def __calculateVelUVW(self, A, velRadial):
1713 1713
1714 1714 #Operacion Matricial
1715 1715 # velUVW = numpy.zeros((velRadial.shape[1],3))
1716 1716 # for ind in range(velRadial.shape[1]):
1717 1717 # velUVW[ind,:] = numpy.dot(A,velRadial[:,ind])
1718 1718 # velUVW = velUVW.transpose()
1719 1719 velUVW = numpy.zeros((A.shape[0],velRadial.shape[1]))
1720 1720 velUVW[:,:] = numpy.dot(A,velRadial)
1721 1721
1722 1722
1723 1723 return velUVW
1724 1724
1725 1725 # def techniqueDBS(self, velRadial0, dirCosx, disrCosy, azimuth, correct, horizontalOnly, heiRang, SNR0):
1726 1726
1727 1727 def techniqueDBS(self, kwargs):
1728 1728 """
1729 1729 Function that implements Doppler Beam Swinging (DBS) technique.
1730 1730
1731 1731 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
1732 1732 Direction correction (if necessary), Ranges and SNR
1733 1733
1734 1734 Output: Winds estimation (Zonal, Meridional and Vertical)
1735 1735
1736 1736 Parameters affected: Winds, height range, SNR
1737 1737 """
1738 1738 velRadial0 = kwargs['velRadial']
1739 1739 heiRang = kwargs['heightList']
1740 1740 SNR0 = kwargs['SNR']
1741 1741
1742 1742 if 'dirCosx' in kwargs and 'dirCosy' in kwargs:
1743 1743 theta_x = numpy.array(kwargs['dirCosx'])
1744 1744 theta_y = numpy.array(kwargs['dirCosy'])
1745 1745 else:
1746 1746 elev = numpy.array(kwargs['elevation'])
1747 1747 azim = numpy.array(kwargs['azimuth'])
1748 1748 theta_x, theta_y = self.__calculateCosDir(elev, azim)
1749 1749 azimuth = kwargs['correctAzimuth']
1750 1750 if 'horizontalOnly' in kwargs:
1751 1751 horizontalOnly = kwargs['horizontalOnly']
1752 1752 else: horizontalOnly = False
1753 1753 if 'correctFactor' in kwargs:
1754 1754 correctFactor = kwargs['correctFactor']
1755 1755 else: correctFactor = 1
1756 1756 if 'channelList' in kwargs:
1757 1757 channelList = kwargs['channelList']
1758 1758 if len(channelList) == 2:
1759 1759 horizontalOnly = True
1760 1760 arrayChannel = numpy.array(channelList)
1761 1761 param = param[arrayChannel,:,:]
1762 1762 theta_x = theta_x[arrayChannel]
1763 1763 theta_y = theta_y[arrayChannel]
1764 1764
1765 1765 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
1766 1766 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, zenith_arr, correctFactor*velRadial0, SNR0)
1767 1767 A = self.__calculateMatA(dir_cosu, dir_cosv, dir_cosw, horizontalOnly)
1768 1768
1769 1769 #Calculo de Componentes de la velocidad con DBS
1770 1770 winds = self.__calculateVelUVW(A,velRadial1)
1771 1771
1772 1772 return winds, heiRang1, SNR1
1773 1773
1774 1774 def __calculateDistance(self, posx, posy, pairs_ccf, azimuth = None):
1775 1775
1776 1776 nPairs = len(pairs_ccf)
1777 1777 posx = numpy.asarray(posx)
1778 1778 posy = numpy.asarray(posy)
1779 1779
1780 1780 #Rotacion Inversa para alinear con el azimuth
1781 1781 if azimuth!= None:
1782 1782 azimuth = azimuth*math.pi/180
1783 1783 posx1 = posx*math.cos(azimuth) + posy*math.sin(azimuth)
1784 1784 posy1 = -posx*math.sin(azimuth) + posy*math.cos(azimuth)
1785 1785 else:
1786 1786 posx1 = posx
1787 1787 posy1 = posy
1788 1788
1789 1789 #Calculo de Distancias
1790 1790 distx = numpy.zeros(nPairs)
1791 1791 disty = numpy.zeros(nPairs)
1792 1792 dist = numpy.zeros(nPairs)
1793 1793 ang = numpy.zeros(nPairs)
1794 1794
1795 1795 for i in range(nPairs):
1796 1796 distx[i] = posx1[pairs_ccf[i][1]] - posx1[pairs_ccf[i][0]]
1797 1797 disty[i] = posy1[pairs_ccf[i][1]] - posy1[pairs_ccf[i][0]]
1798 1798 dist[i] = numpy.sqrt(distx[i]**2 + disty[i]**2)
1799 1799 ang[i] = numpy.arctan2(disty[i],distx[i])
1800 1800
1801 1801 return distx, disty, dist, ang
1802 1802 #Calculo de Matrices
1803 1803 # nPairs = len(pairs)
1804 1804 # ang1 = numpy.zeros((nPairs, 2, 1))
1805 1805 # dist1 = numpy.zeros((nPairs, 2, 1))
1806 1806 #
1807 1807 # for j in range(nPairs):
1808 1808 # dist1[j,0,0] = dist[pairs[j][0]]
1809 1809 # dist1[j,1,0] = dist[pairs[j][1]]
1810 1810 # ang1[j,0,0] = ang[pairs[j][0]]
1811 1811 # ang1[j,1,0] = ang[pairs[j][1]]
1812 1812 #
1813 1813 # return distx,disty, dist1,ang1
1814 1814
1815 1815
1816 1816 def __calculateVelVer(self, phase, lagTRange, _lambda):
1817 1817
1818 1818 Ts = lagTRange[1] - lagTRange[0]
1819 1819 velW = -_lambda*phase/(4*math.pi*Ts)
1820 1820
1821 1821 return velW
1822 1822
1823 1823 def __calculateVelHorDir(self, dist, tau1, tau2, ang):
1824 1824 nPairs = tau1.shape[0]
1825 1825 nHeights = tau1.shape[1]
1826 1826 vel = numpy.zeros((nPairs,3,nHeights))
1827 1827 dist1 = numpy.reshape(dist, (dist.size,1))
1828 1828
1829 1829 angCos = numpy.cos(ang)
1830 1830 angSin = numpy.sin(ang)
1831 1831
1832 1832 vel0 = dist1*tau1/(2*tau2**2)
1833 1833 vel[:,0,:] = (vel0*angCos).sum(axis = 1)
1834 1834 vel[:,1,:] = (vel0*angSin).sum(axis = 1)
1835 1835
1836 1836 ind = numpy.where(numpy.isinf(vel))
1837 1837 vel[ind] = numpy.nan
1838 1838
1839 1839 return vel
1840 1840
1841 1841 # def __getPairsAutoCorr(self, pairsList, nChannels):
1842 1842 #
1843 1843 # pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan
1844 1844 #
1845 1845 # for l in range(len(pairsList)):
1846 1846 # firstChannel = pairsList[l][0]
1847 1847 # secondChannel = pairsList[l][1]
1848 1848 #
1849 1849 # #Obteniendo pares de Autocorrelacion
1850 1850 # if firstChannel == secondChannel:
1851 1851 # pairsAutoCorr[firstChannel] = int(l)
1852 1852 #
1853 1853 # pairsAutoCorr = pairsAutoCorr.astype(int)
1854 1854 #
1855 1855 # pairsCrossCorr = range(len(pairsList))
1856 1856 # pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr)
1857 1857 #
1858 1858 # return pairsAutoCorr, pairsCrossCorr
1859 1859
1860 1860 # def techniqueSA(self, pairsSelected, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, lagTRange, correctFactor):
1861 1861 def techniqueSA(self, kwargs):
1862 1862
1863 1863 """
1864 1864 Function that implements Spaced Antenna (SA) technique.
1865 1865
1866 1866 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
1867 1867 Direction correction (if necessary), Ranges and SNR
1868 1868
1869 1869 Output: Winds estimation (Zonal, Meridional and Vertical)
1870 1870
1871 1871 Parameters affected: Winds
1872 1872 """
1873 1873 position_x = kwargs['positionX']
1874 1874 position_y = kwargs['positionY']
1875 1875 azimuth = kwargs['azimuth']
1876 1876
1877 1877 if 'correctFactor' in kwargs:
1878 1878 correctFactor = kwargs['correctFactor']
1879 1879 else:
1880 1880 correctFactor = 1
1881 1881
1882 1882 groupList = kwargs['groupList']
1883 1883 pairs_ccf = groupList[1]
1884 1884 tau = kwargs['tau']
1885 1885 _lambda = kwargs['_lambda']
1886 1886
1887 1887 #Cross Correlation pairs obtained
1888 1888 # pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairssList, nChannels)
1889 1889 # pairsArray = numpy.array(pairsList)[pairsCrossCorr]
1890 1890 # pairsSelArray = numpy.array(pairsSelected)
1891 1891 # pairs = []
1892 1892 #
1893 1893 # #Wind estimation pairs obtained
1894 1894 # for i in range(pairsSelArray.shape[0]/2):
1895 1895 # ind1 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i], axis = 1))[0][0]
1896 1896 # ind2 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i + 1], axis = 1))[0][0]
1897 1897 # pairs.append((ind1,ind2))
1898 1898
1899 1899 indtau = tau.shape[0]/2
1900 1900 tau1 = tau[:indtau,:]
1901 1901 tau2 = tau[indtau:-1,:]
1902 1902 # tau1 = tau1[pairs,:]
1903 1903 # tau2 = tau2[pairs,:]
1904 1904 phase1 = tau[-1,:]
1905 1905
1906 1906 #---------------------------------------------------------------------
1907 1907 #Metodo Directo
1908 1908 distx, disty, dist, ang = self.__calculateDistance(position_x, position_y, pairs_ccf,azimuth)
1909 1909 winds = self.__calculateVelHorDir(dist, tau1, tau2, ang)
1910 1910 winds = stats.nanmean(winds, axis=0)
1911 1911 #---------------------------------------------------------------------
1912 1912 #Metodo General
1913 1913 # distx, disty, dist = self.calculateDistance(position_x,position_y,pairsCrossCorr, pairsList, azimuth)
1914 1914 # #Calculo Coeficientes de Funcion de Correlacion
1915 1915 # F,G,A,B,H = self.calculateCoef(tau1,tau2,distx,disty,n)
1916 1916 # #Calculo de Velocidades
1917 1917 # winds = self.calculateVelUV(F,G,A,B,H)
1918 1918
1919 1919 #---------------------------------------------------------------------
1920 1920 winds[2,:] = self.__calculateVelVer(phase1, lagTRange, _lambda)
1921 1921 winds = correctFactor*winds
1922 1922 return winds
1923 1923
1924 1924 def __checkTime(self, currentTime, paramInterval, outputInterval):
1925 1925
1926 1926 dataTime = currentTime + paramInterval
1927 1927 deltaTime = dataTime - self.__initime
1928 1928
1929 1929 if deltaTime >= outputInterval or deltaTime < 0:
1930 1930 self.__dataReady = True
1931 1931 return
1932 1932
1933 1933 def techniqueMeteors(self, arrayMeteor, meteorThresh, heightMin, heightMax):
1934 1934 '''
1935 1935 Function that implements winds estimation technique with detected meteors.
1936 1936
1937 1937 Input: Detected meteors, Minimum meteor quantity to wind estimation
1938 1938
1939 1939 Output: Winds estimation (Zonal and Meridional)
1940 1940
1941 1941 Parameters affected: Winds
1942 1942 '''
1943 1943 #Settings
1944 1944 nInt = (heightMax - heightMin)/2
1945 1945 nInt = int(nInt)
1946 1946 winds = numpy.zeros((2,nInt))*numpy.nan
1947 1947
1948 1948 #Filter errors
1949 1949 error = numpy.where(arrayMeteor[:,-1] == 0)[0]
1950 1950 finalMeteor = arrayMeteor[error,:]
1951 1951
1952 1952 #Meteor Histogram
1953 1953 finalHeights = finalMeteor[:,2]
1954 1954 hist = numpy.histogram(finalHeights, bins = nInt, range = (heightMin,heightMax))
1955 1955 nMeteorsPerI = hist[0]
1956 1956 heightPerI = hist[1]
1957 1957
1958 1958 #Sort of meteors
1959 1959 indSort = finalHeights.argsort()
1960 1960 finalMeteor2 = finalMeteor[indSort,:]
1961 1961
1962 1962 # Calculating winds
1963 1963 ind1 = 0
1964 1964 ind2 = 0
1965 1965
1966 1966 for i in range(nInt):
1967 1967 nMet = nMeteorsPerI[i]
1968 1968 ind1 = ind2
1969 1969 ind2 = ind1 + nMet
1970 1970
1971 1971 meteorAux = finalMeteor2[ind1:ind2,:]
1972 1972
1973 1973 if meteorAux.shape[0] >= meteorThresh:
1974 1974 vel = meteorAux[:, 6]
1975 1975 zen = meteorAux[:, 4]*numpy.pi/180
1976 1976 azim = meteorAux[:, 3]*numpy.pi/180
1977 1977
1978 1978 n = numpy.cos(zen)
1979 1979 # m = (1 - n**2)/(1 - numpy.tan(azim)**2)
1980 1980 # l = m*numpy.tan(azim)
1981 1981 l = numpy.sin(zen)*numpy.sin(azim)
1982 1982 m = numpy.sin(zen)*numpy.cos(azim)
1983 1983
1984 1984 A = numpy.vstack((l, m)).transpose()
1985 1985 A1 = numpy.dot(numpy.linalg.inv( numpy.dot(A.transpose(),A) ),A.transpose())
1986 1986 windsAux = numpy.dot(A1, vel)
1987 1987
1988 1988 winds[0,i] = windsAux[0]
1989 1989 winds[1,i] = windsAux[1]
1990 1990
1991 1991 return winds, heightPerI[:-1]
1992 1992
1993 1993 def techniqueNSM_SA(self, **kwargs):
1994 1994 metArray = kwargs['metArray']
1995 1995 heightList = kwargs['heightList']
1996 1996 timeList = kwargs['timeList']
1997 1997
1998 1998 rx_location = kwargs['rx_location']
1999 1999 groupList = kwargs['groupList']
2000 2000 azimuth = kwargs['azimuth']
2001 2001 dfactor = kwargs['dfactor']
2002 2002 k = kwargs['k']
2003 2003
2004 2004 azimuth1, dist = self.__calculateAzimuth1(rx_location, groupList, azimuth)
2005 2005 d = dist*dfactor
2006 2006 #Phase calculation
2007 2007 metArray1 = self.__getPhaseSlope(metArray, heightList, timeList)
2008 2008
2009 2009 metArray1[:,-2] = metArray1[:,-2]*metArray1[:,2]*1000/(k*d[metArray1[:,1].astype(int)]) #angles into velocities
2010 2010
2011 2011 velEst = numpy.zeros((heightList.size,2))*numpy.nan
2012 2012 azimuth1 = azimuth1*numpy.pi/180
2013 2013
2014 2014 for i in range(heightList.size):
2015 2015 h = heightList[i]
2016 2016 indH = numpy.where((metArray1[:,2] == h)&(numpy.abs(metArray1[:,-2]) < 100))[0]
2017 2017 metHeight = metArray1[indH,:]
2018 2018 if metHeight.shape[0] >= 2:
2019 2019 velAux = numpy.asmatrix(metHeight[:,-2]).T #Radial Velocities
2020 2020 iazim = metHeight[:,1].astype(int)
2021 2021 azimAux = numpy.asmatrix(azimuth1[iazim]).T #Azimuths
2022 2022 A = numpy.hstack((numpy.cos(azimAux),numpy.sin(azimAux)))
2023 2023 A = numpy.asmatrix(A)
2024 2024 A1 = numpy.linalg.pinv(A.transpose()*A)*A.transpose()
2025 2025 velHor = numpy.dot(A1,velAux)
2026 2026
2027 2027 velEst[i,:] = numpy.squeeze(velHor)
2028 2028 return velEst
2029 2029
2030 2030 def __getPhaseSlope(self, metArray, heightList, timeList):
2031 2031 meteorList = []
2032 2032 #utctime sec1 height SNR velRad ph0 ph1 ph2 coh0 coh1 coh2
2033 2033 #Putting back together the meteor matrix
2034 2034 utctime = metArray[:,0]
2035 2035 uniqueTime = numpy.unique(utctime)
2036 2036
2037 2037 phaseDerThresh = 0.5
2038 2038 ippSeconds = timeList[1] - timeList[0]
2039 2039 sec = numpy.where(timeList>1)[0][0]
2040 2040 nPairs = metArray.shape[1] - 6
2041 2041 nHeights = len(heightList)
2042 2042
2043 2043 for t in uniqueTime:
2044 2044 metArray1 = metArray[utctime==t,:]
2045 2045 # phaseDerThresh = numpy.pi/4 #reducir Phase thresh
2046 2046 tmet = metArray1[:,1].astype(int)
2047 2047 hmet = metArray1[:,2].astype(int)
2048 2048
2049 2049 metPhase = numpy.zeros((nPairs, heightList.size, timeList.size - 1))
2050 2050 metPhase[:,:] = numpy.nan
2051 2051 metPhase[:,hmet,tmet] = metArray1[:,6:].T
2052 2052
2053 2053 #Delete short trails
2054 2054 metBool = ~numpy.isnan(metPhase[0,:,:])
2055 2055 heightVect = numpy.sum(metBool, axis = 1)
2056 2056 metBool[heightVect<sec,:] = False
2057 2057 metPhase[:,heightVect<sec,:] = numpy.nan
2058 2058
2059 2059 #Derivative
2060 2060 metDer = numpy.abs(metPhase[:,:,1:] - metPhase[:,:,:-1])
2061 2061 phDerAux = numpy.dstack((numpy.full((nPairs,nHeights,1), False, dtype=bool),metDer > phaseDerThresh))
2062 2062 metPhase[phDerAux] = numpy.nan
2063 2063
2064 2064 #--------------------------METEOR DETECTION -----------------------------------------
2065 2065 indMet = numpy.where(numpy.any(metBool,axis=1))[0]
2066 2066
2067 2067 for p in numpy.arange(nPairs):
2068 2068 phase = metPhase[p,:,:]
2069 2069 phDer = metDer[p,:,:]
2070 2070
2071 2071 for h in indMet:
2072 2072 height = heightList[h]
2073 2073 phase1 = phase[h,:] #82
2074 2074 phDer1 = phDer[h,:]
2075 2075
2076 2076 phase1[~numpy.isnan(phase1)] = numpy.unwrap(phase1[~numpy.isnan(phase1)]) #Unwrap
2077 2077
2078 2078 indValid = numpy.where(~numpy.isnan(phase1))[0]
2079 2079 initMet = indValid[0]
2080 2080 endMet = 0
2081 2081
2082 2082 for i in range(len(indValid)-1):
2083 2083
2084 2084 #Time difference
2085 2085 inow = indValid[i]
2086 2086 inext = indValid[i+1]
2087 2087 idiff = inext - inow
2088 2088 #Phase difference
2089 2089 phDiff = numpy.abs(phase1[inext] - phase1[inow])
2090 2090
2091 2091 if idiff>sec or phDiff>numpy.pi/4 or inext==indValid[-1]: #End of Meteor
2092 2092 sizeTrail = inow - initMet + 1
2093 2093 if sizeTrail>3*sec: #Too short meteors
2094 2094 x = numpy.arange(initMet,inow+1)*ippSeconds
2095 2095 y = phase1[initMet:inow+1]
2096 2096 ynnan = ~numpy.isnan(y)
2097 2097 x = x[ynnan]
2098 2098 y = y[ynnan]
2099 2099 slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
2100 2100 ylin = x*slope + intercept
2101 2101 rsq = r_value**2
2102 2102 if rsq > 0.5:
2103 2103 vel = slope#*height*1000/(k*d)
2104 2104 estAux = numpy.array([utctime,p,height, vel, rsq])
2105 2105 meteorList.append(estAux)
2106 2106 initMet = inext
2107 2107 metArray2 = numpy.array(meteorList)
2108 2108
2109 2109 return metArray2
2110 2110
2111 2111 def __calculateAzimuth1(self, rx_location, pairslist, azimuth0):
2112 2112
2113 2113 azimuth1 = numpy.zeros(len(pairslist))
2114 2114 dist = numpy.zeros(len(pairslist))
2115 2115
2116 2116 for i in range(len(rx_location)):
2117 2117 ch0 = pairslist[i][0]
2118 2118 ch1 = pairslist[i][1]
2119 2119
2120 2120 diffX = rx_location[ch0][0] - rx_location[ch1][0]
2121 2121 diffY = rx_location[ch0][1] - rx_location[ch1][1]
2122 2122 azimuth1[i] = numpy.arctan2(diffY,diffX)*180/numpy.pi
2123 2123 dist[i] = numpy.sqrt(diffX**2 + diffY**2)
2124 2124
2125 2125 azimuth1 -= azimuth0
2126 2126 return azimuth1, dist
2127 2127
2128 2128 def techniqueNSM_DBS(self, **kwargs):
2129 2129 metArray = kwargs['metArray']
2130 2130 heightList = kwargs['heightList']
2131 2131 timeList = kwargs['timeList']
2132 2132 azimuth = kwargs['azimuth']
2133 2133 theta_x = numpy.array(kwargs['theta_x'])
2134 2134 theta_y = numpy.array(kwargs['theta_y'])
2135 2135
2136 2136 utctime = metArray[:,0]
2137 2137 cmet = metArray[:,1].astype(int)
2138 2138 hmet = metArray[:,3].astype(int)
2139 2139 SNRmet = metArray[:,4]
2140 2140 vmet = metArray[:,5]
2141 2141 spcmet = metArray[:,6]
2142 2142
2143 2143 nChan = numpy.max(cmet) + 1
2144 2144 nHeights = len(heightList)
2145 2145
2146 2146 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
2147 2147 hmet = heightList[hmet]
2148 2148 h1met = hmet*numpy.cos(zenith_arr[cmet]) #Corrected heights
2149 2149
2150 2150 velEst = numpy.zeros((heightList.size,2))*numpy.nan
2151 2151
2152 2152 for i in range(nHeights - 1):
2153 2153 hmin = heightList[i]
2154 2154 hmax = heightList[i + 1]
2155 2155
2156 2156 thisH = (h1met>=hmin) & (h1met<hmax) & (cmet!=2) & (SNRmet>8) & (vmet<50) & (spcmet<10)
2157 2157 indthisH = numpy.where(thisH)
2158 2158
2159 2159 if numpy.size(indthisH) > 3:
2160 2160
2161 2161 vel_aux = vmet[thisH]
2162 2162 chan_aux = cmet[thisH]
2163 2163 cosu_aux = dir_cosu[chan_aux]
2164 2164 cosv_aux = dir_cosv[chan_aux]
2165 2165 cosw_aux = dir_cosw[chan_aux]
2166 2166
2167 2167 nch = numpy.size(numpy.unique(chan_aux))
2168 2168 if nch > 1:
2169 2169 A = self.__calculateMatA(cosu_aux, cosv_aux, cosw_aux, True)
2170 2170 velEst[i,:] = numpy.dot(A,vel_aux)
2171 2171
2172 2172 return velEst
2173 2173
2174 2174 def run(self, dataOut, technique, nHours=1, hmin=70, hmax=110, **kwargs):
2175 2175
2176 2176 param = dataOut.data_param
2177 2177 if dataOut.abscissaList != None:
2178 2178 absc = dataOut.abscissaList[:-1]
2179 2179 # noise = dataOut.noise
2180 2180 heightList = dataOut.heightList
2181 2181 SNR = dataOut.data_snr
2182 2182
2183 2183 if technique == 'DBS':
2184 2184
2185 2185 kwargs['velRadial'] = param[:,1,:] #Radial velocity
2186 2186 kwargs['heightList'] = heightList
2187 2187 kwargs['SNR'] = SNR
2188 2188
2189 2189 dataOut.data_output, dataOut.heightList, dataOut.data_snr = self.techniqueDBS(kwargs) #DBS Function
2190 2190 dataOut.utctimeInit = dataOut.utctime
2191 2191 dataOut.outputInterval = dataOut.paramInterval
2192 2192
2193 2193 elif technique == 'SA':
2194 2194
2195 2195 #Parameters
2196 2196 # position_x = kwargs['positionX']
2197 2197 # position_y = kwargs['positionY']
2198 2198 # azimuth = kwargs['azimuth']
2199 2199 #
2200 2200 # if kwargs.has_key('crosspairsList'):
2201 2201 # pairs = kwargs['crosspairsList']
2202 2202 # else:
2203 2203 # pairs = None
2204 2204 #
2205 2205 # if kwargs.has_key('correctFactor'):
2206 2206 # correctFactor = kwargs['correctFactor']
2207 2207 # else:
2208 2208 # correctFactor = 1
2209 2209
2210 2210 # tau = dataOut.data_param
2211 2211 # _lambda = dataOut.C/dataOut.frequency
2212 2212 # pairsList = dataOut.groupList
2213 2213 # nChannels = dataOut.nChannels
2214 2214
2215 2215 kwargs['groupList'] = dataOut.groupList
2216 2216 kwargs['tau'] = dataOut.data_param
2217 2217 kwargs['_lambda'] = dataOut.C/dataOut.frequency
2218 2218 # dataOut.data_output = self.techniqueSA(pairs, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, absc, correctFactor)
2219 2219 dataOut.data_output = self.techniqueSA(kwargs)
2220 2220 dataOut.utctimeInit = dataOut.utctime
2221 2221 dataOut.outputInterval = dataOut.timeInterval
2222 2222
2223 2223 elif technique == 'Meteors':
2224 2224 dataOut.flagNoData = True
2225 2225 self.__dataReady = False
2226 2226
2227 2227 if 'nHours' in kwargs:
2228 2228 nHours = kwargs['nHours']
2229 2229 else:
2230 2230 nHours = 1
2231 2231
2232 2232 if 'meteorsPerBin' in kwargs:
2233 2233 meteorThresh = kwargs['meteorsPerBin']
2234 2234 else:
2235 2235 meteorThresh = 6
2236 2236
2237 2237 if 'hmin' in kwargs:
2238 2238 hmin = kwargs['hmin']
2239 2239 else: hmin = 70
2240 2240 if 'hmax' in kwargs:
2241 2241 hmax = kwargs['hmax']
2242 2242 else: hmax = 110
2243 2243
2244 2244 dataOut.outputInterval = nHours*3600
2245 2245
2246 2246 if self.__isConfig == False:
2247 2247 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
2248 2248 #Get Initial LTC time
2249 2249 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
2250 2250 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
2251 2251
2252 2252 self.__isConfig = True
2253 2253
2254 2254 if self.__buffer is None:
2255 2255 self.__buffer = dataOut.data_param
2256 2256 self.__firstdata = copy.copy(dataOut)
2257 2257
2258 2258 else:
2259 2259 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
2260 2260
2261 2261 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
2262 2262
2263 2263 if self.__dataReady:
2264 2264 dataOut.utctimeInit = self.__initime
2265 2265
2266 2266 self.__initime += dataOut.outputInterval #to erase time offset
2267 2267
2268 2268 dataOut.data_output, dataOut.heightList = self.techniqueMeteors(self.__buffer, meteorThresh, hmin, hmax)
2269 2269 dataOut.flagNoData = False
2270 2270 self.__buffer = None
2271 2271
2272 2272 elif technique == 'Meteors1':
2273 2273 dataOut.flagNoData = True
2274 2274 self.__dataReady = False
2275 2275
2276 2276 if 'nMins' in kwargs:
2277 2277 nMins = kwargs['nMins']
2278 2278 else: nMins = 20
2279 2279 if 'rx_location' in kwargs:
2280 2280 rx_location = kwargs['rx_location']
2281 2281 else: rx_location = [(0,1),(1,1),(1,0)]
2282 2282 if 'azimuth' in kwargs:
2283 2283 azimuth = kwargs['azimuth']
2284 2284 else: azimuth = 51.06
2285 2285 if 'dfactor' in kwargs:
2286 2286 dfactor = kwargs['dfactor']
2287 2287 if 'mode' in kwargs:
2288 2288 mode = kwargs['mode']
2289 2289 if 'theta_x' in kwargs:
2290 2290 theta_x = kwargs['theta_x']
2291 2291 if 'theta_y' in kwargs:
2292 2292 theta_y = kwargs['theta_y']
2293 2293 else: mode = 'SA'
2294 2294
2295 2295 #Borrar luego esto
2296 2296 if dataOut.groupList is None:
2297 2297 dataOut.groupList = [(0,1),(0,2),(1,2)]
2298 2298 groupList = dataOut.groupList
2299 2299 C = 3e8
2300 2300 freq = 50e6
2301 2301 lamb = C/freq
2302 2302 k = 2*numpy.pi/lamb
2303 2303
2304 2304 timeList = dataOut.abscissaList
2305 2305 heightList = dataOut.heightList
2306 2306
2307 2307 if self.__isConfig == False:
2308 2308 dataOut.outputInterval = nMins*60
2309 2309 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
2310 2310 #Get Initial LTC time
2311 2311 initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
2312 2312 minuteAux = initime.minute
2313 2313 minuteNew = int(numpy.floor(minuteAux/nMins)*nMins)
2314 2314 self.__initime = (initime.replace(minute = minuteNew, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
2315 2315
2316 2316 self.__isConfig = True
2317 2317
2318 2318 if self.__buffer is None:
2319 2319 self.__buffer = dataOut.data_param
2320 2320 self.__firstdata = copy.copy(dataOut)
2321 2321
2322 2322 else:
2323 2323 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
2324 2324
2325 2325 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
2326 2326
2327 2327 if self.__dataReady:
2328 2328 dataOut.utctimeInit = self.__initime
2329 2329 self.__initime += dataOut.outputInterval #to erase time offset
2330 2330
2331 2331 metArray = self.__buffer
2332 2332 if mode == 'SA':
2333 2333 dataOut.data_output = self.techniqueNSM_SA(rx_location=rx_location, groupList=groupList, azimuth=azimuth, dfactor=dfactor, k=k,metArray=metArray, heightList=heightList,timeList=timeList)
2334 2334 elif mode == 'DBS':
2335 2335 dataOut.data_output = self.techniqueNSM_DBS(metArray=metArray,heightList=heightList,timeList=timeList, azimuth=azimuth, theta_x=theta_x, theta_y=theta_y)
2336 2336 dataOut.data_output = dataOut.data_output.T
2337 2337 dataOut.flagNoData = False
2338 2338 self.__buffer = None
2339 2339
2340 2340 return
2341 2341
2342 2342 class EWDriftsEstimation(Operation):
2343 2343
2344 2344 def __init__(self):
2345 2345 Operation.__init__(self)
2346 2346
2347 2347 def __correctValues(self, heiRang, phi, velRadial, SNR):
2348 2348 listPhi = phi.tolist()
2349 2349 maxid = listPhi.index(max(listPhi))
2350 2350 minid = listPhi.index(min(listPhi))
2351 2351
2352 2352 rango = list(range(len(phi)))
2353 2353 # rango = numpy.delete(rango,maxid)
2354 2354
2355 2355 heiRang1 = heiRang*math.cos(phi[maxid])
2356 2356 heiRangAux = heiRang*math.cos(phi[minid])
2357 2357 indOut = (heiRang1 < heiRangAux[0]).nonzero()
2358 2358 heiRang1 = numpy.delete(heiRang1,indOut)
2359 2359
2360 2360 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
2361 2361 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
2362 2362
2363 2363 for i in rango:
2364 2364 x = heiRang*math.cos(phi[i])
2365 2365 y1 = velRadial[i,:]
2366 2366 f1 = interpolate.interp1d(x,y1,kind = 'cubic')
2367 2367
2368 2368 x1 = heiRang1
2369 2369 y11 = f1(x1)
2370 2370
2371 2371 y2 = SNR[i,:]
2372 2372 f2 = interpolate.interp1d(x,y2,kind = 'cubic')
2373 2373 y21 = f2(x1)
2374 2374
2375 2375 velRadial1[i,:] = y11
2376 2376 SNR1[i,:] = y21
2377 2377
2378 2378 return heiRang1, velRadial1, SNR1
2379 2379
2380 2380 def run(self, dataOut, zenith, zenithCorrection):
2381 2381 heiRang = dataOut.heightList
2382 2382 velRadial = dataOut.data_param[:,3,:]
2383 2383 SNR = dataOut.data_snr
2384 2384
2385 2385 zenith = numpy.array(zenith)
2386 2386 zenith -= zenithCorrection
2387 2387 zenith *= numpy.pi/180
2388 2388
2389 2389 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, numpy.abs(zenith), velRadial, SNR)
2390 2390
2391 2391 alp = zenith[0]
2392 2392 bet = zenith[1]
2393 2393
2394 2394 w_w = velRadial1[0,:]
2395 2395 w_e = velRadial1[1,:]
2396 2396
2397 2397 w = (w_w*numpy.sin(bet) - w_e*numpy.sin(alp))/(numpy.cos(alp)*numpy.sin(bet) - numpy.cos(bet)*numpy.sin(alp))
2398 2398 u = (w_w*numpy.cos(bet) - w_e*numpy.cos(alp))/(numpy.sin(alp)*numpy.cos(bet) - numpy.sin(bet)*numpy.cos(alp))
2399 2399
2400 2400 winds = numpy.vstack((u,w))
2401 2401
2402 2402 dataOut.heightList = heiRang1
2403 2403 dataOut.data_output = winds
2404 2404 dataOut.data_snr = SNR1
2405 2405
2406 2406 dataOut.utctimeInit = dataOut.utctime
2407 2407 dataOut.outputInterval = dataOut.timeInterval
2408 2408 return
2409 2409
2410 2410 #--------------- Non Specular Meteor ----------------
2411 2411
2412 2412 class NonSpecularMeteorDetection(Operation):
2413 2413
2414 2414 def run(self, dataOut, mode, SNRthresh=8, phaseDerThresh=0.5, cohThresh=0.8, allData = False):
2415 2415 data_acf = dataOut.data_pre[0]
2416 2416 data_ccf = dataOut.data_pre[1]
2417 2417 pairsList = dataOut.groupList[1]
2418 2418
2419 2419 lamb = dataOut.C/dataOut.frequency
2420 2420 tSamp = dataOut.ippSeconds*dataOut.nCohInt
2421 2421 paramInterval = dataOut.paramInterval
2422 2422
2423 2423 nChannels = data_acf.shape[0]
2424 2424 nLags = data_acf.shape[1]
2425 2425 nProfiles = data_acf.shape[2]
2426 2426 nHeights = dataOut.nHeights
2427 2427 nCohInt = dataOut.nCohInt
2428 2428 sec = numpy.round(nProfiles/dataOut.paramInterval)
2429 2429 heightList = dataOut.heightList
2430 2430 ippSeconds = dataOut.ippSeconds*dataOut.nCohInt*dataOut.nAvg
2431 2431 utctime = dataOut.utctime
2432 2432
2433 2433 dataOut.abscissaList = numpy.arange(0,paramInterval+ippSeconds,ippSeconds)
2434 2434
2435 2435 #------------------------ SNR --------------------------------------
2436 2436 power = data_acf[:,0,:,:].real
2437 2437 noise = numpy.zeros(nChannels)
2438 2438 SNR = numpy.zeros(power.shape)
2439 2439 for i in range(nChannels):
2440 2440 noise[i] = hildebrand_sekhon(power[i,:], nCohInt)
2441 2441 SNR[i] = (power[i]-noise[i])/noise[i]
2442 2442 SNRm = numpy.nanmean(SNR, axis = 0)
2443 2443 SNRdB = 10*numpy.log10(SNR)
2444 2444
2445 2445 if mode == 'SA':
2446 2446 dataOut.groupList = dataOut.groupList[1]
2447 2447 nPairs = data_ccf.shape[0]
2448 2448 #---------------------- Coherence and Phase --------------------------
2449 2449 phase = numpy.zeros(data_ccf[:,0,:,:].shape)
2450 2450 # phase1 = numpy.copy(phase)
2451 2451 coh1 = numpy.zeros(data_ccf[:,0,:,:].shape)
2452 2452
2453 2453 for p in range(nPairs):
2454 2454 ch0 = pairsList[p][0]
2455 2455 ch1 = pairsList[p][1]
2456 2456 ccf = data_ccf[p,0,:,:]/numpy.sqrt(data_acf[ch0,0,:,:]*data_acf[ch1,0,:,:])
2457 2457 phase[p,:,:] = ndimage.median_filter(numpy.angle(ccf), size = (5,1)) #median filter
2458 2458 # phase1[p,:,:] = numpy.angle(ccf) #median filter
2459 2459 coh1[p,:,:] = ndimage.median_filter(numpy.abs(ccf), 5) #median filter
2460 2460 # coh1[p,:,:] = numpy.abs(ccf) #median filter
2461 2461 coh = numpy.nanmax(coh1, axis = 0)
2462 2462 # struc = numpy.ones((5,1))
2463 2463 # coh = ndimage.morphology.grey_dilation(coh, size=(10,1))
2464 2464 #---------------------- Radial Velocity ----------------------------
2465 2465 phaseAux = numpy.mean(numpy.angle(data_acf[:,1,:,:]), axis = 0)
2466 2466 velRad = phaseAux*lamb/(4*numpy.pi*tSamp)
2467 2467
2468 2468 if allData:
2469 2469 boolMetFin = ~numpy.isnan(SNRm)
2470 2470 # coh[:-1,:] = numpy.nanmean(numpy.abs(phase[:,1:,:] - phase[:,:-1,:]),axis=0)
2471 2471 else:
2472 2472 #------------------------ Meteor mask ---------------------------------
2473 2473 # #SNR mask
2474 2474 # boolMet = (SNRdB>SNRthresh)#|(~numpy.isnan(SNRdB))
2475 2475 #
2476 2476 # #Erase small objects
2477 2477 # boolMet1 = self.__erase_small(boolMet, 2*sec, 5)
2478 2478 #
2479 2479 # auxEEJ = numpy.sum(boolMet1,axis=0)
2480 2480 # indOver = auxEEJ>nProfiles*0.8 #Use this later
2481 2481 # indEEJ = numpy.where(indOver)[0]
2482 2482 # indNEEJ = numpy.where(~indOver)[0]
2483 2483 #
2484 2484 # boolMetFin = boolMet1
2485 2485 #
2486 2486 # if indEEJ.size > 0:
2487 2487 # boolMet1[:,indEEJ] = False #Erase heights with EEJ
2488 2488 #
2489 2489 # boolMet2 = coh > cohThresh
2490 2490 # boolMet2 = self.__erase_small(boolMet2, 2*sec,5)
2491 2491 #
2492 2492 # #Final Meteor mask
2493 2493 # boolMetFin = boolMet1|boolMet2
2494 2494
2495 2495 #Coherence mask
2496 2496 boolMet1 = coh > 0.75
2497 2497 struc = numpy.ones((30,1))
2498 2498 boolMet1 = ndimage.morphology.binary_dilation(boolMet1, structure=struc)
2499 2499
2500 2500 #Derivative mask
2501 2501 derPhase = numpy.nanmean(numpy.abs(phase[:,1:,:] - phase[:,:-1,:]),axis=0)
2502 2502 boolMet2 = derPhase < 0.2
2503 2503 # boolMet2 = ndimage.morphology.binary_opening(boolMet2)
2504 2504 # boolMet2 = ndimage.morphology.binary_closing(boolMet2, structure = numpy.ones((10,1)))
2505 2505 boolMet2 = ndimage.median_filter(boolMet2,size=5)
2506 2506 boolMet2 = numpy.vstack((boolMet2,numpy.full((1,nHeights), True, dtype=bool)))
2507 2507 # #Final mask
2508 2508 # boolMetFin = boolMet2
2509 2509 boolMetFin = boolMet1&boolMet2
2510 2510 # boolMetFin = ndimage.morphology.binary_dilation(boolMetFin)
2511 2511 #Creating data_param
2512 2512 coordMet = numpy.where(boolMetFin)
2513 2513
2514 2514 tmet = coordMet[0]
2515 2515 hmet = coordMet[1]
2516 2516
2517 2517 data_param = numpy.zeros((tmet.size, 6 + nPairs))
2518 2518 data_param[:,0] = utctime
2519 2519 data_param[:,1] = tmet
2520 2520 data_param[:,2] = hmet
2521 2521 data_param[:,3] = SNRm[tmet,hmet]
2522 2522 data_param[:,4] = velRad[tmet,hmet]
2523 2523 data_param[:,5] = coh[tmet,hmet]
2524 2524 data_param[:,6:] = phase[:,tmet,hmet].T
2525 2525
2526 2526 elif mode == 'DBS':
2527 2527 dataOut.groupList = numpy.arange(nChannels)
2528 2528
2529 2529 #Radial Velocities
2530 2530 phase = numpy.angle(data_acf[:,1,:,:])
2531 2531 # phase = ndimage.median_filter(numpy.angle(data_acf[:,1,:,:]), size = (1,5,1))
2532 2532 velRad = phase*lamb/(4*numpy.pi*tSamp)
2533 2533
2534 2534 #Spectral width
2535 2535 # acf1 = ndimage.median_filter(numpy.abs(data_acf[:,1,:,:]), size = (1,5,1))
2536 2536 # acf2 = ndimage.median_filter(numpy.abs(data_acf[:,2,:,:]), size = (1,5,1))
2537 2537 acf1 = data_acf[:,1,:,:]
2538 2538 acf2 = data_acf[:,2,:,:]
2539 2539
2540 2540 spcWidth = (lamb/(2*numpy.sqrt(6)*numpy.pi*tSamp))*numpy.sqrt(numpy.log(acf1/acf2))
2541 2541 # velRad = ndimage.median_filter(velRad, size = (1,5,1))
2542 2542 if allData:
2543 2543 boolMetFin = ~numpy.isnan(SNRdB)
2544 2544 else:
2545 2545 #SNR
2546 2546 boolMet1 = (SNRdB>SNRthresh) #SNR mask
2547 2547 boolMet1 = ndimage.median_filter(boolMet1, size=(1,5,5))
2548 2548
2549 2549 #Radial velocity
2550 2550 boolMet2 = numpy.abs(velRad) < 20
2551 2551 boolMet2 = ndimage.median_filter(boolMet2, (1,5,5))
2552 2552
2553 2553 #Spectral Width
2554 2554 boolMet3 = spcWidth < 30
2555 2555 boolMet3 = ndimage.median_filter(boolMet3, (1,5,5))
2556 2556 # boolMetFin = self.__erase_small(boolMet1, 10,5)
2557 2557 boolMetFin = boolMet1&boolMet2&boolMet3
2558 2558
2559 2559 #Creating data_param
2560 2560 coordMet = numpy.where(boolMetFin)
2561 2561
2562 2562 cmet = coordMet[0]
2563 2563 tmet = coordMet[1]
2564 2564 hmet = coordMet[2]
2565 2565
2566 2566 data_param = numpy.zeros((tmet.size, 7))
2567 2567 data_param[:,0] = utctime
2568 2568 data_param[:,1] = cmet
2569 2569 data_param[:,2] = tmet
2570 2570 data_param[:,3] = hmet
2571 2571 data_param[:,4] = SNR[cmet,tmet,hmet].T
2572 2572 data_param[:,5] = velRad[cmet,tmet,hmet].T
2573 2573 data_param[:,6] = spcWidth[cmet,tmet,hmet].T
2574 2574
2575 2575 # self.dataOut.data_param = data_int
2576 2576 if len(data_param) == 0:
2577 2577 dataOut.flagNoData = True
2578 2578 else:
2579 2579 dataOut.data_param = data_param
2580 2580
2581 2581 def __erase_small(self, binArray, threshX, threshY):
2582 2582 labarray, numfeat = ndimage.measurements.label(binArray)
2583 2583 binArray1 = numpy.copy(binArray)
2584 2584
2585 2585 for i in range(1,numfeat + 1):
2586 2586 auxBin = (labarray==i)
2587 2587 auxSize = auxBin.sum()
2588 2588
2589 2589 x,y = numpy.where(auxBin)
2590 2590 widthX = x.max() - x.min()
2591 2591 widthY = y.max() - y.min()
2592 2592
2593 2593 #width X: 3 seg -> 12.5*3
2594 2594 #width Y:
2595 2595
2596 2596 if (auxSize < 50) or (widthX < threshX) or (widthY < threshY):
2597 2597 binArray1[auxBin] = False
2598 2598
2599 2599 return binArray1
2600 2600
2601 2601 #--------------- Specular Meteor ----------------
2602 2602
2603 2603 class SMDetection(Operation):
2604 2604 '''
2605 2605 Function DetectMeteors()
2606 2606 Project developed with paper:
2607 2607 HOLDSWORTH ET AL. 2004
2608 2608
2609 2609 Input:
2610 2610 self.dataOut.data_pre
2611 2611
2612 2612 centerReceiverIndex: From the channels, which is the center receiver
2613 2613
2614 2614 hei_ref: Height reference for the Beacon signal extraction
2615 2615 tauindex:
2616 2616 predefinedPhaseShifts: Predefined phase offset for the voltge signals
2617 2617
2618 2618 cohDetection: Whether to user Coherent detection or not
2619 2619 cohDet_timeStep: Coherent Detection calculation time step
2620 2620 cohDet_thresh: Coherent Detection phase threshold to correct phases
2621 2621
2622 2622 noise_timeStep: Noise calculation time step
2623 2623 noise_multiple: Noise multiple to define signal threshold
2624 2624
2625 2625 multDet_timeLimit: Multiple Detection Removal time limit in seconds
2626 2626 multDet_rangeLimit: Multiple Detection Removal range limit in km
2627 2627
2628 2628 phaseThresh: Maximum phase difference between receiver to be consider a meteor
2629 2629 SNRThresh: Minimum SNR threshold of the meteor signal to be consider a meteor
2630 2630
2631 2631 hmin: Minimum Height of the meteor to use it in the further wind estimations
2632 2632 hmax: Maximum Height of the meteor to use it in the further wind estimations
2633 2633 azimuth: Azimuth angle correction
2634 2634
2635 2635 Affected:
2636 2636 self.dataOut.data_param
2637 2637
2638 2638 Rejection Criteria (Errors):
2639 2639 0: No error; analysis OK
2640 2640 1: SNR < SNR threshold
2641 2641 2: angle of arrival (AOA) ambiguously determined
2642 2642 3: AOA estimate not feasible
2643 2643 4: Large difference in AOAs obtained from different antenna baselines
2644 2644 5: echo at start or end of time series
2645 2645 6: echo less than 5 examples long; too short for analysis
2646 2646 7: echo rise exceeds 0.3s
2647 2647 8: echo decay time less than twice rise time
2648 2648 9: large power level before echo
2649 2649 10: large power level after echo
2650 2650 11: poor fit to amplitude for estimation of decay time
2651 2651 12: poor fit to CCF phase variation for estimation of radial drift velocity
2652 2652 13: height unresolvable echo: not valid height within 70 to 110 km
2653 2653 14: height ambiguous echo: more then one possible height within 70 to 110 km
2654 2654 15: radial drift velocity or projected horizontal velocity exceeds 200 m/s
2655 2655 16: oscilatory echo, indicating event most likely not an underdense echo
2656 2656
2657 2657 17: phase difference in meteor Reestimation
2658 2658
2659 2659 Data Storage:
2660 2660 Meteors for Wind Estimation (8):
2661 2661 Utc Time | Range Height
2662 2662 Azimuth Zenith errorCosDir
2663 2663 VelRad errorVelRad
2664 2664 Phase0 Phase1 Phase2 Phase3
2665 2665 TypeError
2666 2666
2667 2667 '''
2668 2668
2669 2669 def run(self, dataOut, hei_ref = None, tauindex = 0,
2670 2670 phaseOffsets = None,
2671 2671 cohDetection = False, cohDet_timeStep = 1, cohDet_thresh = 25,
2672 2672 noise_timeStep = 4, noise_multiple = 4,
2673 2673 multDet_timeLimit = 1, multDet_rangeLimit = 3,
2674 2674 phaseThresh = 20, SNRThresh = 5,
2675 2675 hmin = 50, hmax=150, azimuth = 0,
2676 2676 channelPositions = None) :
2677 2677
2678 2678
2679 2679 #Getting Pairslist
2680 2680 if channelPositions is None:
2681 2681 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
2682 2682 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
2683 2683 meteorOps = SMOperations()
2684 2684 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
2685 2685 heiRang = dataOut.heightList
2686 2686 #Get Beacon signal - No Beacon signal anymore
2687 2687 # newheis = numpy.where(self.dataOut.heightList>self.dataOut.radarControllerHeaderObj.Taus[tauindex])
2688 2688 #
2689 2689 # if hei_ref != None:
2690 2690 # newheis = numpy.where(self.dataOut.heightList>hei_ref)
2691 2691 #
2692 2692
2693 2693
2694 2694 #****************REMOVING HARDWARE PHASE DIFFERENCES***************
2695 2695 # see if the user put in pre defined phase shifts
2696 2696 voltsPShift = dataOut.data_pre.copy()
2697 2697
2698 2698 # if predefinedPhaseShifts != None:
2699 2699 # hardwarePhaseShifts = numpy.array(predefinedPhaseShifts)*numpy.pi/180
2700 2700 #
2701 2701 # # elif beaconPhaseShifts:
2702 2702 # # #get hardware phase shifts using beacon signal
2703 2703 # # hardwarePhaseShifts = self.__getHardwarePhaseDiff(self.dataOut.data_pre, pairslist, newheis, 10)
2704 2704 # # hardwarePhaseShifts = numpy.insert(hardwarePhaseShifts,centerReceiverIndex,0)
2705 2705 #
2706 2706 # else:
2707 2707 # hardwarePhaseShifts = numpy.zeros(5)
2708 2708 #
2709 2709 # voltsPShift = numpy.zeros((self.dataOut.data_pre.shape[0],self.dataOut.data_pre.shape[1],self.dataOut.data_pre.shape[2]), dtype = 'complex')
2710 2710 # for i in range(self.dataOut.data_pre.shape[0]):
2711 2711 # voltsPShift[i,:,:] = self.__shiftPhase(self.dataOut.data_pre[i,:,:], hardwarePhaseShifts[i])
2712 2712
2713 2713 #******************END OF REMOVING HARDWARE PHASE DIFFERENCES*********
2714 2714
2715 2715 #Remove DC
2716 2716 voltsDC = numpy.mean(voltsPShift,1)
2717 2717 voltsDC = numpy.mean(voltsDC,1)
2718 2718 for i in range(voltsDC.shape[0]):
2719 2719 voltsPShift[i] = voltsPShift[i] - voltsDC[i]
2720 2720
2721 2721 #Don't considerate last heights, theyre used to calculate Hardware Phase Shift
2722 2722 # voltsPShift = voltsPShift[:,:,:newheis[0][0]]
2723 2723
2724 2724 #************ FIND POWER OF DATA W/COH OR NON COH DETECTION (3.4) **********
2725 2725 #Coherent Detection
2726 2726 if cohDetection:
2727 2727 #use coherent detection to get the net power
2728 2728 cohDet_thresh = cohDet_thresh*numpy.pi/180
2729 2729 voltsPShift = self.__coherentDetection(voltsPShift, cohDet_timeStep, dataOut.timeInterval, pairslist0, cohDet_thresh)
2730 2730
2731 2731 #Non-coherent detection!
2732 2732 powerNet = numpy.nansum(numpy.abs(voltsPShift[:,:,:])**2,0)
2733 2733 #********** END OF COH/NON-COH POWER CALCULATION**********************
2734 2734
2735 2735 #********** FIND THE NOISE LEVEL AND POSSIBLE METEORS ****************
2736 2736 #Get noise
2737 2737 noise, noise1 = self.__getNoise(powerNet, noise_timeStep, dataOut.timeInterval)
2738 2738 # noise = self.getNoise1(powerNet, noise_timeStep, self.dataOut.timeInterval)
2739 2739 #Get signal threshold
2740 2740 signalThresh = noise_multiple*noise
2741 2741 #Meteor echoes detection
2742 2742 listMeteors = self.__findMeteors(powerNet, signalThresh)
2743 2743 #******* END OF NOISE LEVEL AND POSSIBLE METEORS CACULATION **********
2744 2744
2745 2745 #************** REMOVE MULTIPLE DETECTIONS (3.5) ***************************
2746 2746 #Parameters
2747 2747 heiRange = dataOut.heightList
2748 2748 rangeInterval = heiRange[1] - heiRange[0]
2749 2749 rangeLimit = multDet_rangeLimit/rangeInterval
2750 2750 timeLimit = multDet_timeLimit/dataOut.timeInterval
2751 2751 #Multiple detection removals
2752 2752 listMeteors1 = self.__removeMultipleDetections(listMeteors, rangeLimit, timeLimit)
2753 2753 #************ END OF REMOVE MULTIPLE DETECTIONS **********************
2754 2754
2755 2755 #********************* METEOR REESTIMATION (3.7, 3.8, 3.9, 3.10) ********************
2756 2756 #Parameters
2757 2757 phaseThresh = phaseThresh*numpy.pi/180
2758 2758 thresh = [phaseThresh, noise_multiple, SNRThresh]
2759 2759 #Meteor reestimation (Errors N 1, 6, 12, 17)
2760 2760 listMeteors2, listMeteorsPower, listMeteorsVolts = self.__meteorReestimation(listMeteors1, voltsPShift, pairslist0, thresh, noise, dataOut.timeInterval, dataOut.frequency)
2761 2761 # listMeteors2, listMeteorsPower, listMeteorsVolts = self.meteorReestimation3(listMeteors2, listMeteorsPower, listMeteorsVolts, voltsPShift, pairslist, thresh, noise)
2762 2762 #Estimation of decay times (Errors N 7, 8, 11)
2763 2763 listMeteors3 = self.__estimateDecayTime(listMeteors2, listMeteorsPower, dataOut.timeInterval, dataOut.frequency)
2764 2764 #******************* END OF METEOR REESTIMATION *******************
2765 2765
2766 2766 #********************* METEOR PARAMETERS CALCULATION (3.11, 3.12, 3.13) **************************
2767 2767 #Calculating Radial Velocity (Error N 15)
2768 2768 radialStdThresh = 10
2769 2769 listMeteors4 = self.__getRadialVelocity(listMeteors3, listMeteorsVolts, radialStdThresh, pairslist0, dataOut.timeInterval)
2770 2770
2771 2771 if len(listMeteors4) > 0:
2772 2772 #Setting New Array
2773 2773 date = dataOut.utctime
2774 2774 arrayParameters = self.__setNewArrays(listMeteors4, date, heiRang)
2775 2775
2776 2776 #Correcting phase offset
2777 2777 if phaseOffsets != None:
2778 2778 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
2779 2779 arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
2780 2780
2781 2781 #Second Pairslist
2782 2782 pairsList = []
2783 2783 pairx = (0,1)
2784 2784 pairy = (2,3)
2785 2785 pairsList.append(pairx)
2786 2786 pairsList.append(pairy)
2787 2787
2788 2788 jph = numpy.array([0,0,0,0])
2789 2789 h = (hmin,hmax)
2790 2790 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
2791 2791
2792 2792 # #Calculate AOA (Error N 3, 4)
2793 2793 # #JONES ET AL. 1998
2794 2794 # error = arrayParameters[:,-1]
2795 2795 # AOAthresh = numpy.pi/8
2796 2796 # phases = -arrayParameters[:,9:13]
2797 2797 # arrayParameters[:,4:7], arrayParameters[:,-1] = meteorOps.getAOA(phases, pairsList, error, AOAthresh, azimuth)
2798 2798 #
2799 2799 # #Calculate Heights (Error N 13 and 14)
2800 2800 # error = arrayParameters[:,-1]
2801 2801 # Ranges = arrayParameters[:,2]
2802 2802 # zenith = arrayParameters[:,5]
2803 2803 # arrayParameters[:,3], arrayParameters[:,-1] = meteorOps.getHeights(Ranges, zenith, error, hmin, hmax)
2804 2804 # error = arrayParameters[:,-1]
2805 2805 #********************* END OF PARAMETERS CALCULATION **************************
2806 2806
2807 2807 #***************************+ PASS DATA TO NEXT STEP **********************
2808 2808 # arrayFinal = arrayParameters.reshape((1,arrayParameters.shape[0],arrayParameters.shape[1]))
2809 2809 dataOut.data_param = arrayParameters
2810 2810
2811 2811 if arrayParameters is None:
2812 2812 dataOut.flagNoData = True
2813 2813 else:
2814 2814 dataOut.flagNoData = True
2815 2815
2816 2816 return
2817 2817
2818 2818 def __getHardwarePhaseDiff(self, voltage0, pairslist, newheis, n):
2819 2819
2820 2820 minIndex = min(newheis[0])
2821 2821 maxIndex = max(newheis[0])
2822 2822
2823 2823 voltage = voltage0[:,:,minIndex:maxIndex+1]
2824 2824 nLength = voltage.shape[1]/n
2825 2825 nMin = 0
2826 2826 nMax = 0
2827 2827 phaseOffset = numpy.zeros((len(pairslist),n))
2828 2828
2829 2829 for i in range(n):
2830 2830 nMax += nLength
2831 2831 phaseCCF = -numpy.angle(self.__calculateCCF(voltage[:,nMin:nMax,:], pairslist, [0]))
2832 2832 phaseCCF = numpy.mean(phaseCCF, axis = 2)
2833 2833 phaseOffset[:,i] = phaseCCF.transpose()
2834 2834 nMin = nMax
2835 2835 # phaseDiff, phaseArrival = self.estimatePhaseDifference(voltage, pairslist)
2836 2836
2837 2837 #Remove Outliers
2838 2838 factor = 2
2839 2839 wt = phaseOffset - signal.medfilt(phaseOffset,(1,5))
2840 2840 dw = numpy.std(wt,axis = 1)
2841 2841 dw = dw.reshape((dw.size,1))
2842 2842 ind = numpy.where(numpy.logical_or(wt>dw*factor,wt<-dw*factor))
2843 2843 phaseOffset[ind] = numpy.nan
2844 2844 phaseOffset = stats.nanmean(phaseOffset, axis=1)
2845 2845
2846 2846 return phaseOffset
2847 2847
2848 2848 def __shiftPhase(self, data, phaseShift):
2849 2849 #this will shift the phase of a complex number
2850 2850 dataShifted = numpy.abs(data) * numpy.exp((numpy.angle(data)+phaseShift)*1j)
2851 2851 return dataShifted
2852 2852
2853 2853 def __estimatePhaseDifference(self, array, pairslist):
2854 2854 nChannel = array.shape[0]
2855 2855 nHeights = array.shape[2]
2856 2856 numPairs = len(pairslist)
2857 2857 # phaseCCF = numpy.zeros((nChannel, 5, nHeights))
2858 2858 phaseCCF = numpy.angle(self.__calculateCCF(array, pairslist, [-2,-1,0,1,2]))
2859 2859
2860 2860 #Correct phases
2861 2861 derPhaseCCF = phaseCCF[:,1:,:] - phaseCCF[:,0:-1,:]
2862 2862 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
2863 2863
2864 2864 if indDer[0].shape[0] > 0:
2865 2865 for i in range(indDer[0].shape[0]):
2866 2866 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i],indDer[2][i]])
2867 2867 phaseCCF[indDer[0][i],indDer[1][i]+1:,:] += signo*2*numpy.pi
2868 2868
2869 2869 # for j in range(numSides):
2870 2870 # phaseCCFAux = self.calculateCCF(arrayCenter, arraySides[j,:,:], [-2,1,0,1,2])
2871 2871 # phaseCCF[j,:,:] = numpy.angle(phaseCCFAux)
2872 2872 #
2873 2873 #Linear
2874 2874 phaseInt = numpy.zeros((numPairs,1))
2875 2875 angAllCCF = phaseCCF[:,[0,1,3,4],0]
2876 2876 for j in range(numPairs):
2877 2877 fit = stats.linregress([-2,-1,1,2],angAllCCF[j,:])
2878 2878 phaseInt[j] = fit[1]
2879 2879 #Phase Differences
2880 2880 phaseDiff = phaseInt - phaseCCF[:,2,:]
2881 2881 phaseArrival = phaseInt.reshape(phaseInt.size)
2882 2882
2883 2883 #Dealias
2884 2884 phaseArrival = numpy.angle(numpy.exp(1j*phaseArrival))
2885 2885 # indAlias = numpy.where(phaseArrival > numpy.pi)
2886 2886 # phaseArrival[indAlias] -= 2*numpy.pi
2887 2887 # indAlias = numpy.where(phaseArrival < -numpy.pi)
2888 2888 # phaseArrival[indAlias] += 2*numpy.pi
2889 2889
2890 2890 return phaseDiff, phaseArrival
2891 2891
2892 2892 def __coherentDetection(self, volts, timeSegment, timeInterval, pairslist, thresh):
2893 2893 #this function will run the coherent detection used in Holdworth et al. 2004 and return the net power
2894 2894 #find the phase shifts of each channel over 1 second intervals
2895 2895 #only look at ranges below the beacon signal
2896 2896 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
2897 2897 numBlocks = int(volts.shape[1]/numProfPerBlock)
2898 2898 numHeights = volts.shape[2]
2899 2899 nChannel = volts.shape[0]
2900 2900 voltsCohDet = volts.copy()
2901 2901
2902 2902 pairsarray = numpy.array(pairslist)
2903 2903 indSides = pairsarray[:,1]
2904 2904 # indSides = numpy.array(range(nChannel))
2905 2905 # indSides = numpy.delete(indSides, indCenter)
2906 2906 #
2907 2907 # listCenter = numpy.array_split(volts[indCenter,:,:], numBlocks, 0)
2908 2908 listBlocks = numpy.array_split(volts, numBlocks, 1)
2909 2909
2910 2910 startInd = 0
2911 2911 endInd = 0
2912 2912
2913 2913 for i in range(numBlocks):
2914 2914 startInd = endInd
2915 2915 endInd = endInd + listBlocks[i].shape[1]
2916 2916
2917 2917 arrayBlock = listBlocks[i]
2918 2918 # arrayBlockCenter = listCenter[i]
2919 2919
2920 2920 #Estimate the Phase Difference
2921 2921 phaseDiff, aux = self.__estimatePhaseDifference(arrayBlock, pairslist)
2922 2922 #Phase Difference RMS
2923 2923 arrayPhaseRMS = numpy.abs(phaseDiff)
2924 2924 phaseRMSaux = numpy.sum(arrayPhaseRMS < thresh,0)
2925 2925 indPhase = numpy.where(phaseRMSaux==4)
2926 2926 #Shifting
2927 2927 if indPhase[0].shape[0] > 0:
2928 2928 for j in range(indSides.size):
2929 2929 arrayBlock[indSides[j],:,indPhase] = self.__shiftPhase(arrayBlock[indSides[j],:,indPhase], phaseDiff[j,indPhase].transpose())
2930 2930 voltsCohDet[:,startInd:endInd,:] = arrayBlock
2931 2931
2932 2932 return voltsCohDet
2933 2933
2934 2934 def __calculateCCF(self, volts, pairslist ,laglist):
2935 2935
2936 2936 nHeights = volts.shape[2]
2937 2937 nPoints = volts.shape[1]
2938 2938 voltsCCF = numpy.zeros((len(pairslist), len(laglist), nHeights),dtype = 'complex')
2939 2939
2940 2940 for i in range(len(pairslist)):
2941 2941 volts1 = volts[pairslist[i][0]]
2942 2942 volts2 = volts[pairslist[i][1]]
2943 2943
2944 2944 for t in range(len(laglist)):
2945 2945 idxT = laglist[t]
2946 2946 if idxT >= 0:
2947 2947 vStacked = numpy.vstack((volts2[idxT:,:],
2948 2948 numpy.zeros((idxT, nHeights),dtype='complex')))
2949 2949 else:
2950 2950 vStacked = numpy.vstack((numpy.zeros((-idxT, nHeights),dtype='complex'),
2951 2951 volts2[:(nPoints + idxT),:]))
2952 2952 voltsCCF[i,t,:] = numpy.sum((numpy.conjugate(volts1)*vStacked),axis=0)
2953 2953
2954 2954 vStacked = None
2955 2955 return voltsCCF
2956 2956
2957 2957 def __getNoise(self, power, timeSegment, timeInterval):
2958 2958 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
2959 2959 numBlocks = int(power.shape[0]/numProfPerBlock)
2960 2960 numHeights = power.shape[1]
2961 2961
2962 2962 listPower = numpy.array_split(power, numBlocks, 0)
2963 2963 noise = numpy.zeros((power.shape[0], power.shape[1]))
2964 2964 noise1 = numpy.zeros((power.shape[0], power.shape[1]))
2965 2965
2966 2966 startInd = 0
2967 2967 endInd = 0
2968 2968
2969 2969 for i in range(numBlocks): #split por canal
2970 2970 startInd = endInd
2971 2971 endInd = endInd + listPower[i].shape[0]
2972 2972
2973 2973 arrayBlock = listPower[i]
2974 2974 noiseAux = numpy.mean(arrayBlock, 0)
2975 2975 # noiseAux = numpy.median(noiseAux)
2976 2976 # noiseAux = numpy.mean(arrayBlock)
2977 2977 noise[startInd:endInd,:] = noise[startInd:endInd,:] + noiseAux
2978 2978
2979 2979 noiseAux1 = numpy.mean(arrayBlock)
2980 2980 noise1[startInd:endInd,:] = noise1[startInd:endInd,:] + noiseAux1
2981 2981
2982 2982 return noise, noise1
2983 2983
2984 2984 def __findMeteors(self, power, thresh):
2985 2985 nProf = power.shape[0]
2986 2986 nHeights = power.shape[1]
2987 2987 listMeteors = []
2988 2988
2989 2989 for i in range(nHeights):
2990 2990 powerAux = power[:,i]
2991 2991 threshAux = thresh[:,i]
2992 2992
2993 2993 indUPthresh = numpy.where(powerAux > threshAux)[0]
2994 2994 indDNthresh = numpy.where(powerAux <= threshAux)[0]
2995 2995
2996 2996 j = 0
2997 2997
2998 2998 while (j < indUPthresh.size - 2):
2999 2999 if (indUPthresh[j + 2] == indUPthresh[j] + 2):
3000 3000 indDNAux = numpy.where(indDNthresh > indUPthresh[j])
3001 3001 indDNthresh = indDNthresh[indDNAux]
3002 3002
3003 3003 if (indDNthresh.size > 0):
3004 3004 indEnd = indDNthresh[0] - 1
3005 3005 indInit = indUPthresh[j]
3006 3006
3007 3007 meteor = powerAux[indInit:indEnd + 1]
3008 3008 indPeak = meteor.argmax() + indInit
3009 3009 FLA = sum(numpy.conj(meteor)*numpy.hstack((meteor[1:],0)))
3010 3010
3011 3011 listMeteors.append(numpy.array([i,indInit,indPeak,indEnd,FLA])) #CHEQUEAR!!!!!
3012 3012 j = numpy.where(indUPthresh == indEnd)[0] + 1
3013 3013 else: j+=1
3014 3014 else: j+=1
3015 3015
3016 3016 return listMeteors
3017 3017
3018 3018 def __removeMultipleDetections(self,listMeteors, rangeLimit, timeLimit):
3019 3019
3020 3020 arrayMeteors = numpy.asarray(listMeteors)
3021 3021 listMeteors1 = []
3022 3022
3023 3023 while arrayMeteors.shape[0] > 0:
3024 3024 FLAs = arrayMeteors[:,4]
3025 3025 maxFLA = FLAs.argmax()
3026 3026 listMeteors1.append(arrayMeteors[maxFLA,:])
3027 3027
3028 3028 MeteorInitTime = arrayMeteors[maxFLA,1]
3029 3029 MeteorEndTime = arrayMeteors[maxFLA,3]
3030 3030 MeteorHeight = arrayMeteors[maxFLA,0]
3031 3031
3032 3032 #Check neighborhood
3033 3033 maxHeightIndex = MeteorHeight + rangeLimit
3034 3034 minHeightIndex = MeteorHeight - rangeLimit
3035 3035 minTimeIndex = MeteorInitTime - timeLimit
3036 3036 maxTimeIndex = MeteorEndTime + timeLimit
3037 3037
3038 3038 #Check Heights
3039 3039 indHeight = numpy.logical_and(arrayMeteors[:,0] >= minHeightIndex, arrayMeteors[:,0] <= maxHeightIndex)
3040 3040 indTime = numpy.logical_and(arrayMeteors[:,3] >= minTimeIndex, arrayMeteors[:,1] <= maxTimeIndex)
3041 3041 indBoth = numpy.where(numpy.logical_and(indTime,indHeight))
3042 3042
3043 3043 arrayMeteors = numpy.delete(arrayMeteors, indBoth, axis = 0)
3044 3044
3045 3045 return listMeteors1
3046 3046
3047 3047 def __meteorReestimation(self, listMeteors, volts, pairslist, thresh, noise, timeInterval,frequency):
3048 3048 numHeights = volts.shape[2]
3049 3049 nChannel = volts.shape[0]
3050 3050
3051 3051 thresholdPhase = thresh[0]
3052 3052 thresholdNoise = thresh[1]
3053 3053 thresholdDB = float(thresh[2])
3054 3054
3055 3055 thresholdDB1 = 10**(thresholdDB/10)
3056 3056 pairsarray = numpy.array(pairslist)
3057 3057 indSides = pairsarray[:,1]
3058 3058
3059 3059 pairslist1 = list(pairslist)
3060 3060 pairslist1.append((0,1))
3061 3061 pairslist1.append((3,4))
3062 3062
3063 3063 listMeteors1 = []
3064 3064 listPowerSeries = []
3065 3065 listVoltageSeries = []
3066 3066 #volts has the war data
3067 3067
3068 3068 if frequency == 30e6:
3069 3069 timeLag = 45*10**-3
3070 3070 else:
3071 3071 timeLag = 15*10**-3
3072 3072 lag = numpy.ceil(timeLag/timeInterval)
3073 3073
3074 3074 for i in range(len(listMeteors)):
3075 3075
3076 3076 ###################### 3.6 - 3.7 PARAMETERS REESTIMATION #########################
3077 3077 meteorAux = numpy.zeros(16)
3078 3078
3079 3079 #Loading meteor Data (mHeight, mStart, mPeak, mEnd)
3080 3080 mHeight = listMeteors[i][0]
3081 3081 mStart = listMeteors[i][1]
3082 3082 mPeak = listMeteors[i][2]
3083 3083 mEnd = listMeteors[i][3]
3084 3084
3085 3085 #get the volt data between the start and end times of the meteor
3086 3086 meteorVolts = volts[:,mStart:mEnd+1,mHeight]
3087 3087 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
3088 3088
3089 3089 #3.6. Phase Difference estimation
3090 3090 phaseDiff, aux = self.__estimatePhaseDifference(meteorVolts, pairslist)
3091 3091
3092 3092 #3.7. Phase difference removal & meteor start, peak and end times reestimated
3093 3093 #meteorVolts0.- all Channels, all Profiles
3094 3094 meteorVolts0 = volts[:,:,mHeight]
3095 3095 meteorThresh = noise[:,mHeight]*thresholdNoise
3096 3096 meteorNoise = noise[:,mHeight]
3097 3097 meteorVolts0[indSides,:] = self.__shiftPhase(meteorVolts0[indSides,:], phaseDiff) #Phase Shifting
3098 3098 powerNet0 = numpy.nansum(numpy.abs(meteorVolts0)**2, axis = 0) #Power
3099 3099
3100 3100 #Times reestimation
3101 3101 mStart1 = numpy.where(powerNet0[:mPeak] < meteorThresh[:mPeak])[0]
3102 3102 if mStart1.size > 0:
3103 3103 mStart1 = mStart1[-1] + 1
3104 3104
3105 3105 else:
3106 3106 mStart1 = mPeak
3107 3107
3108 3108 mEnd1 = numpy.where(powerNet0[mPeak:] < meteorThresh[mPeak:])[0][0] + mPeak - 1
3109 3109 mEndDecayTime1 = numpy.where(powerNet0[mPeak:] < meteorNoise[mPeak:])[0]
3110 3110 if mEndDecayTime1.size == 0:
3111 3111 mEndDecayTime1 = powerNet0.size
3112 3112 else:
3113 3113 mEndDecayTime1 = mEndDecayTime1[0] + mPeak - 1
3114 3114 # mPeak1 = meteorVolts0[mStart1:mEnd1 + 1].argmax()
3115 3115
3116 3116 #meteorVolts1.- all Channels, from start to end
3117 3117 meteorVolts1 = meteorVolts0[:,mStart1:mEnd1 + 1]
3118 3118 meteorVolts2 = meteorVolts0[:,mPeak + lag:mEnd1 + 1]
3119 3119 if meteorVolts2.shape[1] == 0:
3120 3120 meteorVolts2 = meteorVolts0[:,mPeak:mEnd1 + 1]
3121 3121 meteorVolts1 = meteorVolts1.reshape(meteorVolts1.shape[0], meteorVolts1.shape[1], 1)
3122 3122 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1], 1)
3123 3123 ##################### END PARAMETERS REESTIMATION #########################
3124 3124
3125 3125 ##################### 3.8 PHASE DIFFERENCE REESTIMATION ########################
3126 3126 # if mEnd1 - mStart1 > 4: #Error Number 6: echo less than 5 samples long; too short for analysis
3127 3127 if meteorVolts2.shape[1] > 0:
3128 3128 #Phase Difference re-estimation
3129 3129 phaseDiff1, phaseDiffint = self.__estimatePhaseDifference(meteorVolts2, pairslist1) #Phase Difference Estimation
3130 3130 # phaseDiff1, phaseDiffint = self.estimatePhaseDifference(meteorVolts2, pairslist)
3131 3131 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1])
3132 3132 phaseDiff11 = numpy.reshape(phaseDiff1, (phaseDiff1.shape[0],1))
3133 3133 meteorVolts2[indSides,:] = self.__shiftPhase(meteorVolts2[indSides,:], phaseDiff11[0:4]) #Phase Shifting
3134 3134
3135 3135 #Phase Difference RMS
3136 3136 phaseRMS1 = numpy.sqrt(numpy.mean(numpy.square(phaseDiff1)))
3137 3137 powerNet1 = numpy.nansum(numpy.abs(meteorVolts1[:,:])**2,0)
3138 3138 #Data from Meteor
3139 3139 mPeak1 = powerNet1.argmax() + mStart1
3140 3140 mPeakPower1 = powerNet1.max()
3141 3141 noiseAux = sum(noise[mStart1:mEnd1 + 1,mHeight])
3142 3142 mSNR1 = (sum(powerNet1)-noiseAux)/noiseAux
3143 3143 Meteor1 = numpy.array([mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1])
3144 3144 Meteor1 = numpy.hstack((Meteor1,phaseDiffint))
3145 3145 PowerSeries = powerNet0[mStart1:mEndDecayTime1 + 1]
3146 3146 #Vectorize
3147 3147 meteorAux[0:7] = [mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1]
3148 3148 meteorAux[7:11] = phaseDiffint[0:4]
3149 3149
3150 3150 #Rejection Criterions
3151 3151 if phaseRMS1 > thresholdPhase: #Error Number 17: Phase variation
3152 3152 meteorAux[-1] = 17
3153 3153 elif mSNR1 < thresholdDB1: #Error Number 1: SNR < threshold dB
3154 3154 meteorAux[-1] = 1
3155 3155
3156 3156
3157 3157 else:
3158 3158 meteorAux[0:4] = [mHeight, mStart, mPeak, mEnd]
3159 3159 meteorAux[-1] = 6 #Error Number 6: echo less than 5 samples long; too short for analysis
3160 3160 PowerSeries = 0
3161 3161
3162 3162 listMeteors1.append(meteorAux)
3163 3163 listPowerSeries.append(PowerSeries)
3164 3164 listVoltageSeries.append(meteorVolts1)
3165 3165
3166 3166 return listMeteors1, listPowerSeries, listVoltageSeries
3167 3167
3168 3168 def __estimateDecayTime(self, listMeteors, listPower, timeInterval, frequency):
3169 3169
3170 3170 threshError = 10
3171 3171 #Depending if it is 30 or 50 MHz
3172 3172 if frequency == 30e6:
3173 3173 timeLag = 45*10**-3
3174 3174 else:
3175 3175 timeLag = 15*10**-3
3176 3176 lag = numpy.ceil(timeLag/timeInterval)
3177 3177
3178 3178 listMeteors1 = []
3179 3179
3180 3180 for i in range(len(listMeteors)):
3181 3181 meteorPower = listPower[i]
3182 3182 meteorAux = listMeteors[i]
3183 3183
3184 3184 if meteorAux[-1] == 0:
3185 3185
3186 3186 try:
3187 3187 indmax = meteorPower.argmax()
3188 3188 indlag = indmax + lag
3189 3189
3190 3190 y = meteorPower[indlag:]
3191 3191 x = numpy.arange(0, y.size)*timeLag
3192 3192
3193 3193 #first guess
3194 3194 a = y[0]
3195 3195 tau = timeLag
3196 3196 #exponential fit
3197 3197 popt, pcov = optimize.curve_fit(self.__exponential_function, x, y, p0 = [a, tau])
3198 3198 y1 = self.__exponential_function(x, *popt)
3199 3199 #error estimation
3200 3200 error = sum((y - y1)**2)/(numpy.var(y)*(y.size - popt.size))
3201 3201
3202 3202 decayTime = popt[1]
3203 3203 riseTime = indmax*timeInterval
3204 3204 meteorAux[11:13] = [decayTime, error]
3205 3205
3206 3206 #Table items 7, 8 and 11
3207 3207 if (riseTime > 0.3): #Number 7: Echo rise exceeds 0.3s
3208 3208 meteorAux[-1] = 7
3209 3209 elif (decayTime < 2*riseTime) : #Number 8: Echo decay time less than than twice rise time
3210 3210 meteorAux[-1] = 8
3211 3211 if (error > threshError): #Number 11: Poor fit to amplitude for estimation of decay time
3212 3212 meteorAux[-1] = 11
3213 3213
3214 3214
3215 3215 except:
3216 3216 meteorAux[-1] = 11
3217 3217
3218 3218
3219 3219 listMeteors1.append(meteorAux)
3220 3220
3221 3221 return listMeteors1
3222 3222
3223 3223 #Exponential Function
3224 3224
3225 3225 def __exponential_function(self, x, a, tau):
3226 3226 y = a*numpy.exp(-x/tau)
3227 3227 return y
3228 3228
3229 3229 def __getRadialVelocity(self, listMeteors, listVolts, radialStdThresh, pairslist, timeInterval):
3230 3230
3231 3231 pairslist1 = list(pairslist)
3232 3232 pairslist1.append((0,1))
3233 3233 pairslist1.append((3,4))
3234 3234 numPairs = len(pairslist1)
3235 3235 #Time Lag
3236 3236 timeLag = 45*10**-3
3237 3237 c = 3e8
3238 3238 lag = numpy.ceil(timeLag/timeInterval)
3239 3239 freq = 30e6
3240 3240
3241 3241 listMeteors1 = []
3242 3242
3243 3243 for i in range(len(listMeteors)):
3244 3244 meteorAux = listMeteors[i]
3245 3245 if meteorAux[-1] == 0:
3246 3246 mStart = listMeteors[i][1]
3247 3247 mPeak = listMeteors[i][2]
3248 3248 mLag = mPeak - mStart + lag
3249 3249
3250 3250 #get the volt data between the start and end times of the meteor
3251 3251 meteorVolts = listVolts[i]
3252 3252 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
3253 3253
3254 3254 #Get CCF
3255 3255 allCCFs = self.__calculateCCF(meteorVolts, pairslist1, [-2,-1,0,1,2])
3256 3256
3257 3257 #Method 2
3258 3258 slopes = numpy.zeros(numPairs)
3259 3259 time = numpy.array([-2,-1,1,2])*timeInterval
3260 3260 angAllCCF = numpy.angle(allCCFs[:,[0,1,3,4],0])
3261 3261
3262 3262 #Correct phases
3263 3263 derPhaseCCF = angAllCCF[:,1:] - angAllCCF[:,0:-1]
3264 3264 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
3265 3265
3266 3266 if indDer[0].shape[0] > 0:
3267 3267 for i in range(indDer[0].shape[0]):
3268 3268 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i]])
3269 3269 angAllCCF[indDer[0][i],indDer[1][i]+1:] += signo*2*numpy.pi
3270 3270
3271 3271 # fit = scipy.stats.linregress(numpy.array([-2,-1,1,2])*timeInterval, numpy.array([phaseLagN2s[i],phaseLagN1s[i],phaseLag1s[i],phaseLag2s[i]]))
3272 3272 for j in range(numPairs):
3273 3273 fit = stats.linregress(time, angAllCCF[j,:])
3274 3274 slopes[j] = fit[0]
3275 3275
3276 3276 #Remove Outlier
3277 3277 # indOut = numpy.argmax(numpy.abs(slopes - numpy.mean(slopes)))
3278 3278 # slopes = numpy.delete(slopes,indOut)
3279 3279 # indOut = numpy.argmax(numpy.abs(slopes - numpy.mean(slopes)))
3280 3280 # slopes = numpy.delete(slopes,indOut)
3281 3281
3282 3282 radialVelocity = -numpy.mean(slopes)*(0.25/numpy.pi)*(c/freq)
3283 3283 radialError = numpy.std(slopes)*(0.25/numpy.pi)*(c/freq)
3284 3284 meteorAux[-2] = radialError
3285 3285 meteorAux[-3] = radialVelocity
3286 3286
3287 3287 #Setting Error
3288 3288 #Number 15: Radial Drift velocity or projected horizontal velocity exceeds 200 m/s
3289 3289 if numpy.abs(radialVelocity) > 200:
3290 3290 meteorAux[-1] = 15
3291 3291 #Number 12: Poor fit to CCF variation for estimation of radial drift velocity
3292 3292 elif radialError > radialStdThresh:
3293 3293 meteorAux[-1] = 12
3294 3294
3295 3295 listMeteors1.append(meteorAux)
3296 3296 return listMeteors1
3297 3297
3298 3298 def __setNewArrays(self, listMeteors, date, heiRang):
3299 3299
3300 3300 #New arrays
3301 3301 arrayMeteors = numpy.array(listMeteors)
3302 3302 arrayParameters = numpy.zeros((len(listMeteors), 13))
3303 3303
3304 3304 #Date inclusion
3305 3305 # date = re.findall(r'\((.*?)\)', date)
3306 3306 # date = date[0].split(',')
3307 3307 # date = map(int, date)
3308 3308 #
3309 3309 # if len(date)<6:
3310 3310 # date.append(0)
3311 3311 #
3312 3312 # date = [date[0]*10000 + date[1]*100 + date[2], date[3]*10000 + date[4]*100 + date[5]]
3313 3313 # arrayDate = numpy.tile(date, (len(listMeteors), 1))
3314 3314 arrayDate = numpy.tile(date, (len(listMeteors)))
3315 3315
3316 3316 #Meteor array
3317 3317 # arrayMeteors[:,0] = heiRang[arrayMeteors[:,0].astype(int)]
3318 3318 # arrayMeteors = numpy.hstack((arrayDate, arrayMeteors))
3319 3319
3320 3320 #Parameters Array
3321 3321 arrayParameters[:,0] = arrayDate #Date
3322 3322 arrayParameters[:,1] = heiRang[arrayMeteors[:,0].astype(int)] #Range
3323 3323 arrayParameters[:,6:8] = arrayMeteors[:,-3:-1] #Radial velocity and its error
3324 3324 arrayParameters[:,8:12] = arrayMeteors[:,7:11] #Phases
3325 3325 arrayParameters[:,-1] = arrayMeteors[:,-1] #Error
3326 3326
3327 3327
3328 3328 return arrayParameters
3329 3329
3330 3330 class CorrectSMPhases(Operation):
3331 3331
3332 3332 def run(self, dataOut, phaseOffsets, hmin = 50, hmax = 150, azimuth = 45, channelPositions = None):
3333 3333
3334 3334 arrayParameters = dataOut.data_param
3335 3335 pairsList = []
3336 3336 pairx = (0,1)
3337 3337 pairy = (2,3)
3338 3338 pairsList.append(pairx)
3339 3339 pairsList.append(pairy)
3340 3340 jph = numpy.zeros(4)
3341 3341
3342 3342 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
3343 3343 # arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
3344 3344 arrayParameters[:,8:12] = numpy.angle(numpy.exp(1j*(arrayParameters[:,8:12] + phaseOffsets)))
3345 3345
3346 3346 meteorOps = SMOperations()
3347 3347 if channelPositions is None:
3348 3348 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
3349 3349 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
3350 3350
3351 3351 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
3352 3352 h = (hmin,hmax)
3353 3353
3354 3354 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
3355 3355
3356 3356 dataOut.data_param = arrayParameters
3357 3357 return
3358 3358
3359 3359 class SMPhaseCalibration(Operation):
3360 3360
3361 3361 __buffer = None
3362 3362
3363 3363 __initime = None
3364 3364
3365 3365 __dataReady = False
3366 3366
3367 3367 __isConfig = False
3368 3368
3369 3369 def __checkTime(self, currentTime, initTime, paramInterval, outputInterval):
3370 3370
3371 3371 dataTime = currentTime + paramInterval
3372 3372 deltaTime = dataTime - initTime
3373 3373
3374 3374 if deltaTime >= outputInterval or deltaTime < 0:
3375 3375 return True
3376 3376
3377 3377 return False
3378 3378
3379 3379 def __getGammas(self, pairs, d, phases):
3380 3380 gammas = numpy.zeros(2)
3381 3381
3382 3382 for i in range(len(pairs)):
3383 3383
3384 3384 pairi = pairs[i]
3385 3385
3386 3386 phip3 = phases[:,pairi[0]]
3387 3387 d3 = d[pairi[0]]
3388 3388 phip2 = phases[:,pairi[1]]
3389 3389 d2 = d[pairi[1]]
3390 3390 #Calculating gamma
3391 3391 # jdcos = alp1/(k*d1)
3392 3392 # jgamma = numpy.angle(numpy.exp(1j*(d0*alp1/d1 - alp0)))
3393 3393 jgamma = -phip2*d3/d2 - phip3
3394 3394 jgamma = numpy.angle(numpy.exp(1j*jgamma))
3395 3395 # jgamma[jgamma>numpy.pi] -= 2*numpy.pi
3396 3396 # jgamma[jgamma<-numpy.pi] += 2*numpy.pi
3397 3397
3398 3398 #Revised distribution
3399 3399 jgammaArray = numpy.hstack((jgamma,jgamma+0.5*numpy.pi,jgamma-0.5*numpy.pi))
3400 3400
3401 3401 #Histogram
3402 3402 nBins = 64
3403 3403 rmin = -0.5*numpy.pi
3404 3404 rmax = 0.5*numpy.pi
3405 3405 phaseHisto = numpy.histogram(jgammaArray, bins=nBins, range=(rmin,rmax))
3406 3406
3407 3407 meteorsY = phaseHisto[0]
3408 3408 phasesX = phaseHisto[1][:-1]
3409 3409 width = phasesX[1] - phasesX[0]
3410 3410 phasesX += width/2
3411 3411
3412 3412 #Gaussian aproximation
3413 3413 bpeak = meteorsY.argmax()
3414 3414 peak = meteorsY.max()
3415 3415 jmin = bpeak - 5
3416 3416 jmax = bpeak + 5 + 1
3417 3417
3418 3418 if jmin<0:
3419 3419 jmin = 0
3420 3420 jmax = 6
3421 3421 elif jmax > meteorsY.size:
3422 3422 jmin = meteorsY.size - 6
3423 3423 jmax = meteorsY.size
3424 3424
3425 3425 x0 = numpy.array([peak,bpeak,50])
3426 3426 coeff = optimize.leastsq(self.__residualFunction, x0, args=(meteorsY[jmin:jmax], phasesX[jmin:jmax]))
3427 3427
3428 3428 #Gammas
3429 3429 gammas[i] = coeff[0][1]
3430 3430
3431 3431 return gammas
3432 3432
3433 3433 def __residualFunction(self, coeffs, y, t):
3434 3434
3435 3435 return y - self.__gauss_function(t, coeffs)
3436 3436
3437 3437 def __gauss_function(self, t, coeffs):
3438 3438
3439 3439 return coeffs[0]*numpy.exp(-0.5*((t - coeffs[1]) / coeffs[2])**2)
3440 3440
3441 3441 def __getPhases(self, azimuth, h, pairsList, d, gammas, meteorsArray):
3442 3442 meteorOps = SMOperations()
3443 3443 nchan = 4
3444 3444 pairx = pairsList[0] #x es 0
3445 3445 pairy = pairsList[1] #y es 1
3446 3446 center_xangle = 0
3447 3447 center_yangle = 0
3448 3448 range_angle = numpy.array([10*numpy.pi,numpy.pi,numpy.pi/2,numpy.pi/4])
3449 3449 ntimes = len(range_angle)
3450 3450
3451 3451 nstepsx = 20
3452 3452 nstepsy = 20
3453 3453
3454 3454 for iz in range(ntimes):
3455 3455 min_xangle = -range_angle[iz]/2 + center_xangle
3456 3456 max_xangle = range_angle[iz]/2 + center_xangle
3457 3457 min_yangle = -range_angle[iz]/2 + center_yangle
3458 3458 max_yangle = range_angle[iz]/2 + center_yangle
3459 3459
3460 3460 inc_x = (max_xangle-min_xangle)/nstepsx
3461 3461 inc_y = (max_yangle-min_yangle)/nstepsy
3462 3462
3463 3463 alpha_y = numpy.arange(nstepsy)*inc_y + min_yangle
3464 3464 alpha_x = numpy.arange(nstepsx)*inc_x + min_xangle
3465 3465 penalty = numpy.zeros((nstepsx,nstepsy))
3466 3466 jph_array = numpy.zeros((nchan,nstepsx,nstepsy))
3467 3467 jph = numpy.zeros(nchan)
3468 3468
3469 3469 # Iterations looking for the offset
3470 3470 for iy in range(int(nstepsy)):
3471 3471 for ix in range(int(nstepsx)):
3472 3472 d3 = d[pairsList[1][0]]
3473 3473 d2 = d[pairsList[1][1]]
3474 3474 d5 = d[pairsList[0][0]]
3475 3475 d4 = d[pairsList[0][1]]
3476 3476
3477 3477 alp2 = alpha_y[iy] #gamma 1
3478 3478 alp4 = alpha_x[ix] #gamma 0
3479 3479
3480 3480 alp3 = -alp2*d3/d2 - gammas[1]
3481 3481 alp5 = -alp4*d5/d4 - gammas[0]
3482 3482 # jph[pairy[1]] = alpha_y[iy]
3483 3483 # jph[pairy[0]] = -gammas[1] - alpha_y[iy]*d[pairy[1]]/d[pairy[0]]
3484 3484
3485 3485 # jph[pairx[1]] = alpha_x[ix]
3486 3486 # jph[pairx[0]] = -gammas[0] - alpha_x[ix]*d[pairx[1]]/d[pairx[0]]
3487 3487 jph[pairsList[0][1]] = alp4
3488 3488 jph[pairsList[0][0]] = alp5
3489 3489 jph[pairsList[1][0]] = alp3
3490 3490 jph[pairsList[1][1]] = alp2
3491 3491 jph_array[:,ix,iy] = jph
3492 3492 # d = [2.0,2.5,2.5,2.0]
3493 3493 #falta chequear si va a leer bien los meteoros
3494 3494 meteorsArray1 = meteorOps.getMeteorParams(meteorsArray, azimuth, h, pairsList, d, jph)
3495 3495 error = meteorsArray1[:,-1]
3496 3496 ind1 = numpy.where(error==0)[0]
3497 3497 penalty[ix,iy] = ind1.size
3498 3498
3499 3499 i,j = numpy.unravel_index(penalty.argmax(), penalty.shape)
3500 3500 phOffset = jph_array[:,i,j]
3501 3501
3502 3502 center_xangle = phOffset[pairx[1]]
3503 3503 center_yangle = phOffset[pairy[1]]
3504 3504
3505 3505 phOffset = numpy.angle(numpy.exp(1j*jph_array[:,i,j]))
3506 3506 phOffset = phOffset*180/numpy.pi
3507 3507 return phOffset
3508 3508
3509 3509
3510 3510 def run(self, dataOut, hmin, hmax, channelPositions=None, nHours = 1):
3511 3511
3512 3512 dataOut.flagNoData = True
3513 3513 self.__dataReady = False
3514 3514 dataOut.outputInterval = nHours*3600
3515 3515
3516 3516 if self.__isConfig == False:
3517 3517 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
3518 3518 #Get Initial LTC time
3519 3519 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
3520 3520 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
3521 3521
3522 3522 self.__isConfig = True
3523 3523
3524 3524 if self.__buffer is None:
3525 3525 self.__buffer = dataOut.data_param.copy()
3526 3526
3527 3527 else:
3528 3528 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
3529 3529
3530 3530 self.__dataReady = self.__checkTime(dataOut.utctime, self.__initime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
3531 3531
3532 3532 if self.__dataReady:
3533 3533 dataOut.utctimeInit = self.__initime
3534 3534 self.__initime += dataOut.outputInterval #to erase time offset
3535 3535
3536 3536 freq = dataOut.frequency
3537 3537 c = dataOut.C #m/s
3538 3538 lamb = c/freq
3539 3539 k = 2*numpy.pi/lamb
3540 3540 azimuth = 0
3541 3541 h = (hmin, hmax)
3542 3542 # pairs = ((0,1),(2,3)) #Estrella
3543 3543 # pairs = ((1,0),(2,3)) #T
3544 3544
3545 3545 if channelPositions is None:
3546 3546 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
3547 3547 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
3548 3548 meteorOps = SMOperations()
3549 3549 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
3550 3550
3551 3551 #Checking correct order of pairs
3552 3552 pairs = []
3553 3553 if distances[1] > distances[0]:
3554 3554 pairs.append((1,0))
3555 3555 else:
3556 3556 pairs.append((0,1))
3557 3557
3558 3558 if distances[3] > distances[2]:
3559 3559 pairs.append((3,2))
3560 3560 else:
3561 3561 pairs.append((2,3))
3562 3562 # distances1 = [-distances[0]*lamb, distances[1]*lamb, -distances[2]*lamb, distances[3]*lamb]
3563 3563
3564 3564 meteorsArray = self.__buffer
3565 3565 error = meteorsArray[:,-1]
3566 3566 boolError = (error==0)|(error==3)|(error==4)|(error==13)|(error==14)
3567 3567 ind1 = numpy.where(boolError)[0]
3568 3568 meteorsArray = meteorsArray[ind1,:]
3569 3569 meteorsArray[:,-1] = 0
3570 3570 phases = meteorsArray[:,8:12]
3571 3571
3572 3572 #Calculate Gammas
3573 3573 gammas = self.__getGammas(pairs, distances, phases)
3574 3574 # gammas = numpy.array([-21.70409463,45.76935864])*numpy.pi/180
3575 3575 #Calculate Phases
3576 3576 phasesOff = self.__getPhases(azimuth, h, pairs, distances, gammas, meteorsArray)
3577 3577 phasesOff = phasesOff.reshape((1,phasesOff.size))
3578 3578 dataOut.data_output = -phasesOff
3579 3579 dataOut.flagNoData = False
3580 3580 self.__buffer = None
3581 3581
3582 3582
3583 3583 return
3584 3584
3585 3585 class SMOperations():
3586 3586
3587 3587 def __init__(self):
3588 3588
3589 3589 return
3590 3590
3591 3591 def getMeteorParams(self, arrayParameters0, azimuth, h, pairsList, distances, jph):
3592 3592
3593 3593 arrayParameters = arrayParameters0.copy()
3594 3594 hmin = h[0]
3595 3595 hmax = h[1]
3596 3596
3597 3597 #Calculate AOA (Error N 3, 4)
3598 3598 #JONES ET AL. 1998
3599 3599 AOAthresh = numpy.pi/8
3600 3600 error = arrayParameters[:,-1]
3601 3601 phases = -arrayParameters[:,8:12] + jph
3602 3602 # phases = numpy.unwrap(phases)
3603 3603 arrayParameters[:,3:6], arrayParameters[:,-1] = self.__getAOA(phases, pairsList, distances, error, AOAthresh, azimuth)
3604 3604
3605 3605 #Calculate Heights (Error N 13 and 14)
3606 3606 error = arrayParameters[:,-1]
3607 3607 Ranges = arrayParameters[:,1]
3608 3608 zenith = arrayParameters[:,4]
3609 3609 arrayParameters[:,2], arrayParameters[:,-1] = self.__getHeights(Ranges, zenith, error, hmin, hmax)
3610 3610
3611 3611 #----------------------- Get Final data ------------------------------------
3612 3612 # error = arrayParameters[:,-1]
3613 3613 # ind1 = numpy.where(error==0)[0]
3614 3614 # arrayParameters = arrayParameters[ind1,:]
3615 3615
3616 3616 return arrayParameters
3617 3617
3618 3618 def __getAOA(self, phases, pairsList, directions, error, AOAthresh, azimuth):
3619 3619
3620 3620 arrayAOA = numpy.zeros((phases.shape[0],3))
3621 3621 cosdir0, cosdir = self.__getDirectionCosines(phases, pairsList,directions)
3622 3622
3623 3623 arrayAOA[:,:2] = self.__calculateAOA(cosdir, azimuth)
3624 3624 cosDirError = numpy.sum(numpy.abs(cosdir0 - cosdir), axis = 1)
3625 3625 arrayAOA[:,2] = cosDirError
3626 3626
3627 3627 azimuthAngle = arrayAOA[:,0]
3628 3628 zenithAngle = arrayAOA[:,1]
3629 3629
3630 3630 #Setting Error
3631 3631 indError = numpy.where(numpy.logical_or(error == 3, error == 4))[0]
3632 3632 error[indError] = 0
3633 3633 #Number 3: AOA not fesible
3634 3634 indInvalid = numpy.where(numpy.logical_and((numpy.logical_or(numpy.isnan(zenithAngle), numpy.isnan(azimuthAngle))),error == 0))[0]
3635 3635 error[indInvalid] = 3
3636 3636 #Number 4: Large difference in AOAs obtained from different antenna baselines
3637 3637 indInvalid = numpy.where(numpy.logical_and(cosDirError > AOAthresh,error == 0))[0]
3638 3638 error[indInvalid] = 4
3639 3639 return arrayAOA, error
3640 3640
3641 3641 def __getDirectionCosines(self, arrayPhase, pairsList, distances):
3642 3642
3643 3643 #Initializing some variables
3644 3644 ang_aux = numpy.array([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])*2*numpy.pi
3645 3645 ang_aux = ang_aux.reshape(1,ang_aux.size)
3646 3646
3647 3647 cosdir = numpy.zeros((arrayPhase.shape[0],2))
3648 3648 cosdir0 = numpy.zeros((arrayPhase.shape[0],2))
3649 3649
3650 3650
3651 3651 for i in range(2):
3652 3652 ph0 = arrayPhase[:,pairsList[i][0]]
3653 3653 ph1 = arrayPhase[:,pairsList[i][1]]
3654 3654 d0 = distances[pairsList[i][0]]
3655 3655 d1 = distances[pairsList[i][1]]
3656 3656
3657 3657 ph0_aux = ph0 + ph1
3658 3658 ph0_aux = numpy.angle(numpy.exp(1j*ph0_aux))
3659 3659 # ph0_aux[ph0_aux > numpy.pi] -= 2*numpy.pi
3660 3660 # ph0_aux[ph0_aux < -numpy.pi] += 2*numpy.pi
3661 3661 #First Estimation
3662 3662 cosdir0[:,i] = (ph0_aux)/(2*numpy.pi*(d0 - d1))
3663 3663
3664 3664 #Most-Accurate Second Estimation
3665 3665 phi1_aux = ph0 - ph1
3666 3666 phi1_aux = phi1_aux.reshape(phi1_aux.size,1)
3667 3667 #Direction Cosine 1
3668 3668 cosdir1 = (phi1_aux + ang_aux)/(2*numpy.pi*(d0 + d1))
3669 3669
3670 3670 #Searching the correct Direction Cosine
3671 3671 cosdir0_aux = cosdir0[:,i]
3672 3672 cosdir0_aux = cosdir0_aux.reshape(cosdir0_aux.size,1)
3673 3673 #Minimum Distance
3674 3674 cosDiff = (cosdir1 - cosdir0_aux)**2
3675 3675 indcos = cosDiff.argmin(axis = 1)
3676 3676 #Saving Value obtained
3677 3677 cosdir[:,i] = cosdir1[numpy.arange(len(indcos)),indcos]
3678 3678
3679 3679 return cosdir0, cosdir
3680 3680
3681 3681 def __calculateAOA(self, cosdir, azimuth):
3682 3682 cosdirX = cosdir[:,0]
3683 3683 cosdirY = cosdir[:,1]
3684 3684
3685 3685 zenithAngle = numpy.arccos(numpy.sqrt(1 - cosdirX**2 - cosdirY**2))*180/numpy.pi
3686 3686 azimuthAngle = numpy.arctan2(cosdirX,cosdirY)*180/numpy.pi + azimuth#0 deg north, 90 deg east
3687 3687 angles = numpy.vstack((azimuthAngle, zenithAngle)).transpose()
3688 3688
3689 3689 return angles
3690 3690
3691 3691 def __getHeights(self, Ranges, zenith, error, minHeight, maxHeight):
3692 3692
3693 3693 Ramb = 375 #Ramb = c/(2*PRF)
3694 3694 Re = 6371 #Earth Radius
3695 3695 heights = numpy.zeros(Ranges.shape)
3696 3696
3697 3697 R_aux = numpy.array([0,1,2])*Ramb
3698 3698 R_aux = R_aux.reshape(1,R_aux.size)
3699 3699
3700 3700 Ranges = Ranges.reshape(Ranges.size,1)
3701 3701
3702 3702 Ri = Ranges + R_aux
3703 3703 hi = numpy.sqrt(Re**2 + Ri**2 + (2*Re*numpy.cos(zenith*numpy.pi/180)*Ri.transpose()).transpose()) - Re
3704 3704
3705 3705 #Check if there is a height between 70 and 110 km
3706 3706 h_bool = numpy.sum(numpy.logical_and(hi > minHeight, hi < maxHeight), axis = 1)
3707 3707 ind_h = numpy.where(h_bool == 1)[0]
3708 3708
3709 3709 hCorr = hi[ind_h, :]
3710 3710 ind_hCorr = numpy.where(numpy.logical_and(hi > minHeight, hi < maxHeight))
3711 3711
3712 3712 hCorr = hi[ind_hCorr][:len(ind_h)]
3713 3713 heights[ind_h] = hCorr
3714 3714
3715 3715 #Setting Error
3716 3716 #Number 13: Height unresolvable echo: not valid height within 70 to 110 km
3717 3717 #Number 14: Height ambiguous echo: more than one possible height within 70 to 110 km
3718 3718 indError = numpy.where(numpy.logical_or(error == 13, error == 14))[0]
3719 3719 error[indError] = 0
3720 3720 indInvalid2 = numpy.where(numpy.logical_and(h_bool > 1, error == 0))[0]
3721 3721 error[indInvalid2] = 14
3722 3722 indInvalid1 = numpy.where(numpy.logical_and(h_bool == 0, error == 0))[0]
3723 3723 error[indInvalid1] = 13
3724 3724
3725 3725 return heights, error
3726 3726
3727 3727 def getPhasePairs(self, channelPositions):
3728 3728 chanPos = numpy.array(channelPositions)
3729 3729 listOper = list(itertools.combinations(list(range(5)),2))
3730 3730
3731 3731 distances = numpy.zeros(4)
3732 3732 axisX = []
3733 3733 axisY = []
3734 3734 distX = numpy.zeros(3)
3735 3735 distY = numpy.zeros(3)
3736 3736 ix = 0
3737 3737 iy = 0
3738 3738
3739 3739 pairX = numpy.zeros((2,2))
3740 3740 pairY = numpy.zeros((2,2))
3741 3741
3742 3742 for i in range(len(listOper)):
3743 3743 pairi = listOper[i]
3744 3744
3745 3745 posDif = numpy.abs(chanPos[pairi[0],:] - chanPos[pairi[1],:])
3746 3746
3747 3747 if posDif[0] == 0:
3748 3748 axisY.append(pairi)
3749 3749 distY[iy] = posDif[1]
3750 3750 iy += 1
3751 3751 elif posDif[1] == 0:
3752 3752 axisX.append(pairi)
3753 3753 distX[ix] = posDif[0]
3754 3754 ix += 1
3755 3755
3756 3756 for i in range(2):
3757 3757 if i==0:
3758 3758 dist0 = distX
3759 3759 axis0 = axisX
3760 3760 else:
3761 3761 dist0 = distY
3762 3762 axis0 = axisY
3763 3763
3764 3764 side = numpy.argsort(dist0)[:-1]
3765 3765 axis0 = numpy.array(axis0)[side,:]
3766 3766 chanC = int(numpy.intersect1d(axis0[0,:], axis0[1,:])[0])
3767 3767 axis1 = numpy.unique(numpy.reshape(axis0,4))
3768 3768 side = axis1[axis1 != chanC]
3769 3769 diff1 = chanPos[chanC,i] - chanPos[side[0],i]
3770 3770 diff2 = chanPos[chanC,i] - chanPos[side[1],i]
3771 3771 if diff1<0:
3772 3772 chan2 = side[0]
3773 3773 d2 = numpy.abs(diff1)
3774 3774 chan1 = side[1]
3775 3775 d1 = numpy.abs(diff2)
3776 3776 else:
3777 3777 chan2 = side[1]
3778 3778 d2 = numpy.abs(diff2)
3779 3779 chan1 = side[0]
3780 3780 d1 = numpy.abs(diff1)
3781 3781
3782 3782 if i==0:
3783 3783 chanCX = chanC
3784 3784 chan1X = chan1
3785 3785 chan2X = chan2
3786 3786 distances[0:2] = numpy.array([d1,d2])
3787 3787 else:
3788 3788 chanCY = chanC
3789 3789 chan1Y = chan1
3790 3790 chan2Y = chan2
3791 3791 distances[2:4] = numpy.array([d1,d2])
3792 3792 # axisXsides = numpy.reshape(axisX[ix,:],4)
3793 3793 #
3794 3794 # channelCentX = int(numpy.intersect1d(pairX[0,:], pairX[1,:])[0])
3795 3795 # channelCentY = int(numpy.intersect1d(pairY[0,:], pairY[1,:])[0])
3796 3796 #
3797 3797 # ind25X = numpy.where(pairX[0,:] != channelCentX)[0][0]
3798 3798 # ind20X = numpy.where(pairX[1,:] != channelCentX)[0][0]
3799 3799 # channel25X = int(pairX[0,ind25X])
3800 3800 # channel20X = int(pairX[1,ind20X])
3801 3801 # ind25Y = numpy.where(pairY[0,:] != channelCentY)[0][0]
3802 3802 # ind20Y = numpy.where(pairY[1,:] != channelCentY)[0][0]
3803 3803 # channel25Y = int(pairY[0,ind25Y])
3804 3804 # channel20Y = int(pairY[1,ind20Y])
3805 3805
3806 3806 # pairslist = [(channelCentX, channel25X),(channelCentX, channel20X),(channelCentY,channel25Y),(channelCentY, channel20Y)]
3807 3807 pairslist = [(chanCX, chan1X),(chanCX, chan2X),(chanCY,chan1Y),(chanCY, chan2Y)]
3808 3808
3809 3809 return pairslist, distances
3810 3810 # def __getAOA(self, phases, pairsList, error, AOAthresh, azimuth):
3811 3811 #
3812 3812 # arrayAOA = numpy.zeros((phases.shape[0],3))
3813 3813 # cosdir0, cosdir = self.__getDirectionCosines(phases, pairsList)
3814 3814 #
3815 3815 # arrayAOA[:,:2] = self.__calculateAOA(cosdir, azimuth)
3816 3816 # cosDirError = numpy.sum(numpy.abs(cosdir0 - cosdir), axis = 1)
3817 3817 # arrayAOA[:,2] = cosDirError
3818 3818 #
3819 3819 # azimuthAngle = arrayAOA[:,0]
3820 3820 # zenithAngle = arrayAOA[:,1]
3821 3821 #
3822 3822 # #Setting Error
3823 3823 # #Number 3: AOA not fesible
3824 3824 # indInvalid = numpy.where(numpy.logical_and((numpy.logical_or(numpy.isnan(zenithAngle), numpy.isnan(azimuthAngle))),error == 0))[0]
3825 3825 # error[indInvalid] = 3
3826 3826 # #Number 4: Large difference in AOAs obtained from different antenna baselines
3827 3827 # indInvalid = numpy.where(numpy.logical_and(cosDirError > AOAthresh,error == 0))[0]
3828 3828 # error[indInvalid] = 4
3829 3829 # return arrayAOA, error
3830 3830 #
3831 3831 # def __getDirectionCosines(self, arrayPhase, pairsList):
3832 3832 #
3833 3833 # #Initializing some variables
3834 3834 # ang_aux = numpy.array([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])*2*numpy.pi
3835 3835 # ang_aux = ang_aux.reshape(1,ang_aux.size)
3836 3836 #
3837 3837 # cosdir = numpy.zeros((arrayPhase.shape[0],2))
3838 3838 # cosdir0 = numpy.zeros((arrayPhase.shape[0],2))
3839 3839 #
3840 3840 #
3841 3841 # for i in range(2):
3842 3842 # #First Estimation
3843 3843 # phi0_aux = arrayPhase[:,pairsList[i][0]] + arrayPhase[:,pairsList[i][1]]
3844 3844 # #Dealias
3845 3845 # indcsi = numpy.where(phi0_aux > numpy.pi)
3846 3846 # phi0_aux[indcsi] -= 2*numpy.pi
3847 3847 # indcsi = numpy.where(phi0_aux < -numpy.pi)
3848 3848 # phi0_aux[indcsi] += 2*numpy.pi
3849 3849 # #Direction Cosine 0
3850 3850 # cosdir0[:,i] = -(phi0_aux)/(2*numpy.pi*0.5)
3851 3851 #
3852 3852 # #Most-Accurate Second Estimation
3853 3853 # phi1_aux = arrayPhase[:,pairsList[i][0]] - arrayPhase[:,pairsList[i][1]]
3854 3854 # phi1_aux = phi1_aux.reshape(phi1_aux.size,1)
3855 3855 # #Direction Cosine 1
3856 3856 # cosdir1 = -(phi1_aux + ang_aux)/(2*numpy.pi*4.5)
3857 3857 #
3858 3858 # #Searching the correct Direction Cosine
3859 3859 # cosdir0_aux = cosdir0[:,i]
3860 3860 # cosdir0_aux = cosdir0_aux.reshape(cosdir0_aux.size,1)
3861 3861 # #Minimum Distance
3862 3862 # cosDiff = (cosdir1 - cosdir0_aux)**2
3863 3863 # indcos = cosDiff.argmin(axis = 1)
3864 3864 # #Saving Value obtained
3865 3865 # cosdir[:,i] = cosdir1[numpy.arange(len(indcos)),indcos]
3866 3866 #
3867 3867 # return cosdir0, cosdir
3868 3868 #
3869 3869 # def __calculateAOA(self, cosdir, azimuth):
3870 3870 # cosdirX = cosdir[:,0]
3871 3871 # cosdirY = cosdir[:,1]
3872 3872 #
3873 3873 # zenithAngle = numpy.arccos(numpy.sqrt(1 - cosdirX**2 - cosdirY**2))*180/numpy.pi
3874 3874 # azimuthAngle = numpy.arctan2(cosdirX,cosdirY)*180/numpy.pi + azimuth #0 deg north, 90 deg east
3875 3875 # angles = numpy.vstack((azimuthAngle, zenithAngle)).transpose()
3876 3876 #
3877 3877 # return angles
3878 3878 #
3879 3879 # def __getHeights(self, Ranges, zenith, error, minHeight, maxHeight):
3880 3880 #
3881 3881 # Ramb = 375 #Ramb = c/(2*PRF)
3882 3882 # Re = 6371 #Earth Radius
3883 3883 # heights = numpy.zeros(Ranges.shape)
3884 3884 #
3885 3885 # R_aux = numpy.array([0,1,2])*Ramb
3886 3886 # R_aux = R_aux.reshape(1,R_aux.size)
3887 3887 #
3888 3888 # Ranges = Ranges.reshape(Ranges.size,1)
3889 3889 #
3890 3890 # Ri = Ranges + R_aux
3891 3891 # hi = numpy.sqrt(Re**2 + Ri**2 + (2*Re*numpy.cos(zenith*numpy.pi/180)*Ri.transpose()).transpose()) - Re
3892 3892 #
3893 3893 # #Check if there is a height between 70 and 110 km
3894 3894 # h_bool = numpy.sum(numpy.logical_and(hi > minHeight, hi < maxHeight), axis = 1)
3895 3895 # ind_h = numpy.where(h_bool == 1)[0]
3896 3896 #
3897 3897 # hCorr = hi[ind_h, :]
3898 3898 # ind_hCorr = numpy.where(numpy.logical_and(hi > minHeight, hi < maxHeight))
3899 3899 #
3900 3900 # hCorr = hi[ind_hCorr]
3901 3901 # heights[ind_h] = hCorr
3902 3902 #
3903 3903 # #Setting Error
3904 3904 # #Number 13: Height unresolvable echo: not valid height within 70 to 110 km
3905 3905 # #Number 14: Height ambiguous echo: more than one possible height within 70 to 110 km
3906 3906 #
3907 3907 # indInvalid2 = numpy.where(numpy.logical_and(h_bool > 1, error == 0))[0]
3908 3908 # error[indInvalid2] = 14
3909 3909 # indInvalid1 = numpy.where(numpy.logical_and(h_bool == 0, error == 0))[0]
3910 3910 # error[indInvalid1] = 13
3911 3911 #
3912 3912 # return heights, error
3913 3913
3914 3914
3915 3915 class WeatherRadar(Operation):
3916 3916 '''
3917 3917 Function tat implements Weather Radar operations-
3918 3918 Input:
3919 3919 Output:
3920 3920 Parameters affected:
3921 3921 '''
3922 3922 isConfig = False
3923 3923 variableList = None
3924 3924
3925 3925 def __init__(self):
3926 3926 Operation.__init__(self)
3927 3927
3928 3928 def setup(self,dataOut,variableList= None,Pt=0,Gt=0,Gr=0,Glna=0,lambda_=0, aL=0,
3929 3929 tauW= 0,thetaT=0,thetaR=0,Km =0):
3930 3930 print("INICIO")
3931 3931 self.nCh = dataOut.nChannels
3932 3932 self.nHeis = dataOut.nHeights
3933 3933 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
3934 3934 self.Range = numpy.arange(dataOut.nHeights)*deltaHeight + dataOut.heightList[0]
3935 3935 self.Range = self.Range.reshape(1,self.nHeis)
3936 3936 self.Range = numpy.tile(self.Range,[self.nCh,1])
3937 3937 '''-----------1 Constante del Radar----------'''
3938 3938 self.Pt = Pt # Pmax =200 W x DC=(0.2 useg/400useg)
3939 3939 self.Gt = Gt # 38 db
3940 3940 self.Gr = Gr # 38 dB
3941 3941 self.Glna = Glna # 60 dB
3942 3942 self.lambda_ = lambda_ # 3.2 cm 0.032 m.
3943 3943 self.aL = aL # Perdidas
3944 3944 self.tauW = tauW #ancho de pulso 0.2useg pulso corto.
3945 3945 self.thetaT = thetaT # 1.8ΒΊ -- 0.0314 rad
3946 3946 self.thetaR = thetaR # 1.8Βͺ --0.0314 rad
3947 3947 self.Km = Km
3948 3948 Numerator = ((4*numpy.pi)**3 * aL**2 * 16 *numpy.log(2)*(10**18))
3949 3949 Denominator = (Pt *(10**(Gt/10.0))*(10**(Gr/10.0))*(10**(Glna/10.0))* lambda_**2 * SPEED_OF_LIGHT * tauW * numpy.pi*thetaT*thetaR)
3950 3950 self.RadarConstant = Numerator/Denominator
3951 3951 if self.variableList== None:
3952 3952 self.variableList= ['Reflectividad','ReflectividadDiferencial','CoeficienteCorrelacion','FaseDiferencial','VelocidadRadial','AnchoEspectral']
3953 3953 print('FIN')
3954 3954 def setMoments(self,dataOut,i):
3955 3955
3956 3956 type = dataOut.inputUnit
3957 3957 nCh = dataOut.nChannels
3958 3958 nHeis = dataOut.nHeights
3959 3959 data_param = numpy.zeros((nCh,4,nHeis))
3960 3960 if type == "Voltage":
3961 3961 factor = 1
3962 3962 data_param[:,0,:] = dataOut.dataPP_POW/(factor)
3963 3963 data_param[:,1,:] = dataOut.dataPP_DOP
3964 3964 data_param[:,2,:] = dataOut.dataPP_WIDTH
3965 3965 data_param[:,3,:] = dataOut.dataPP_SNR
3966 3966 if type == "Spectra":
3967 3967 factor = dataOut.normFactor
3968 3968 data_param[:,0,:] = dataOut.data_POW/(factor)
3969 3969 data_param[:,1,:] = dataOut.data_DOP
3970 3970 data_param[:,2,:] = dataOut.data_WIDTH
3971 3971 data_param[:,3,:] = dataOut.data_SNR
3972 3972
3973 3973 return data_param[:,i,:]
3974 3974
3975 3975 def getCoeficienteCorrelacionROhv_R(self,dataOut):
3976 3976 type = dataOut.inputUnit
3977 3977 nHeis = dataOut.nHeights
3978 3978 data_RhoHV_R = numpy.zeros((nHeis))
3979 3979 if type == "Voltage":
3980 3980 powa = dataOut.dataPP_POWER[0]
3981 3981 powb = dataOut.dataPP_POWER[1]
3982 3982 ccf = dataOut.dataPP_CCF
3983 3983 avgcoherenceComplex = ccf / numpy.sqrt(powa * powb)
3984 3984 data_RhoHV_R = numpy.abs(avgcoherenceComplex)
3985 3985 if type == "Spectra":
3986 3986 data_RhoHV_R = dataOut.getCoherence()
3987 3987
3988 3988 return data_RhoHV_R
3989 3989
3990 3990 def getFasediferencialPhiD_P(self,dataOut,phase= True):
3991 3991 type = dataOut.inputUnit
3992 3992 nHeis = dataOut.nHeights
3993 3993 data_PhiD_P = numpy.zeros((nHeis))
3994 3994 if type == "Voltage":
3995 3995 powa = dataOut.dataPP_POWER[0]
3996 3996 powb = dataOut.dataPP_POWER[1]
3997 3997 ccf = dataOut.dataPP_CCF
3998 3998 avgcoherenceComplex = ccf / numpy.sqrt(powa * powb)
3999 3999 if phase:
4000 4000 data_PhiD_P = numpy.arctan2(avgcoherenceComplex.imag,
4001 4001 avgcoherenceComplex.real) * 180 / numpy.pi
4002 4002 if type == "Spectra":
4003 4003 data_PhiD_P = dataOut.getCoherence(phase = phase)
4004 4004
4005 4005 return data_PhiD_P
4006 4006
4007 4007 def getReflectividad_D(self,dataOut,type):
4008 4008 '''-----------------------------Potencia de Radar -Signal S-----------------------------'''
4009 4009
4010 4010 Pr = self.setMoments(dataOut,0)
4011 4011
4012 4012 '''-----------2 Reflectividad del Radar y Factor de Reflectividad------'''
4013 4013 self.n_radar = numpy.zeros((self.nCh,self.nHeis))
4014 4014 self.Z_radar = numpy.zeros((self.nCh,self.nHeis))
4015 4015 for R in range(self.nHeis):
4016 4016 self.n_radar[:,R] = self.RadarConstant*Pr[:,R]* (self.Range[:,R])**2
4017 4017
4018 4018 self.Z_radar[:,R] = self.n_radar[:,R]* self.lambda_**4/( numpy.pi**5 * self.Km**2)
4019 4019
4020 4020 '''----------- Factor de Reflectividad Equivalente lamda_ < 10 cm , lamda_= 3.2cm-------'''
4021 4021 Zeh = self.Z_radar
4022 4022 dBZeh = 10*numpy.log10(Zeh)
4023 4023 if type=='N':
4024 4024 return dBZeh
4025 4025 elif type=='D':
4026 4026 Zdb_D = dBZeh[0] - dBZeh[1]
4027 4027 return Zdb_D
4028 4028
4029 4029 def getRadialVelocity_V(self,dataOut):
4030 4030 velRadial_V = self.setMoments(dataOut,1)
4031 4031 return velRadial_V
4032 4032
4033 4033 def getAnchoEspectral_W(self,dataOut):
4034 4034 Sigmav_W = self.setMoments(dataOut,2)
4035 4035 return Sigmav_W
4036 4036
4037 4037
4038 4038 def run(self,dataOut,variableList=variableList,Pt=0.158,Gt=38.5,Gr=38.5,Glna=70.0,lambda_=0.032, aL=1,
4039 4039 tauW= 0.2*1e-6,thetaT=0.0314,thetaR=0.0314,Km =0.93):
4040 4040
4041 4041 if not self.isConfig:
4042 4042 self.setup(dataOut= dataOut,variableList=variableList,Pt=Pt,Gt=Gt,Gr=Gr,Glna=Glna,lambda_=lambda_, aL=aL,
4043 4043 tauW= tauW,thetaT=thetaT,thetaR=thetaR,Km =Km)
4044 4044 self.isConfig = True
4045 4045 for i in range(len(self.variableList)):
4046 4046 if self.variableList[i]=='Reflectividad':
4047 4047 dataOut.Zdb =self.getReflectividad_D(dataOut=dataOut,type='N')
4048 4048 print(dataOut.Zdb)
4049 4049 if self.variableList[i]=='ReflectividadDiferencial':
4050 4050 dataOut.Zdb_D =self.getReflectividad_D(dataOut=dataOut,type='D')
4051 4051 if self.variableList[i]=='FaseDiferencial':
4052 4052 dataOut.PhiD_P =self.getFasediferencialPhiD_P(dataOut=dataOut, phase=True)
4053 4053 if self.variableList[i] == "CoeficienteCorrelacion":
4054 4054 dataOut.RhoHV_R = self.getCoeficienteCorrelacionROhv_R(dataOut)
4055 4055 if self.variableList[i] =="VelocidadRadial":
4056 4056 dataOut.velRadial_V = self.getRadialVelocity_V(dataOut)
4057 4057 if self.variableList[i] =="AnchoEspectral":
4058 4058 dataOut.Sigmav_W = self.getAnchoEspectral_W(dataOut)
4059 4059 return dataOut
4060 4060
4061 4061 class PedestalInformation(Operation):
4062 4062
4063 4063 def __init__(self):
4064 4064 Operation.__init__(self)
4065 4065 self.filename = False
4066 4066 self.delay = 30
4067 4067 self.nTries = 3
4068 4068
4069 4069 def find_file(self, timestamp):
4070 4070
4071 4071 dt = datetime.datetime.utcfromtimestamp(timestamp)
4072 4072 path = os.path.join(self.path, dt.strftime('%Y-%m-%dT%H-00-00'))
4073 4073
4074 4074 if not os.path.exists(path):
4075 4075 return False, False
4076 4076 fileList = glob.glob(os.path.join(path, '*.h5'))
4077 4077 fileList.sort()
4078 4078 return fileList
4079 4079
4080 4080 def find_next_file(self):
4081 4081
4082 4082 while True:
4083 4083 if self.utctime < self.utcfile:
4084 4084 self.flagNoData = True
4085 4085 break
4086 4086 self.flagNoData = False
4087 4087 file_size = len(self.fp['Data']['utc'])
4088 4088 if self.utctime < self.utcfile+file_size*self.interval:
4089 4089 break
4090 4090 dt = datetime.datetime.utcfromtimestamp(self.utcfile)
4091 4091 if dt.second > 0:
4092 4092 self.utcfile -= dt.second
4093 4093 self.utcfile += self.samples*self.interval
4094 4094 dt = datetime.datetime.utcfromtimestamp(self.utctime)
4095 4095 path = os.path.join(self.path, dt.strftime('%Y-%m-%dT%H-00-00'))
4096 4096 self.filename = os.path.join(path, 'pos@{}.000.h5'.format(int(self.utcfile)))
4097 4097
4098 4098 for n in range(self.nTries):
4099 4099 ok = False
4100 4100 try:
4101 4101 if not os.path.exists(self.filename):
4102 4102 log.warning('Waiting {}s for position files...'.format(self.delay), self.name)
4103 4103 time.sleep(self.delay)
4104 4104 continue
4105 4105 self.fp.close()
4106 4106 self.fp = h5py.File(self.filename, 'r')
4107 4107 log.log('Opening file: {}'.format(self.filename), self.name)
4108 4108 ok = True
4109 4109 break
4110 4110 except:
4111 4111 log.warning('Waiting {}s for position file to be ready...'.format(self.delay), self.name)
4112 4112 time.sleep(self.delay)
4113 4113 continue
4114 4114
4115 4115 if not ok:
4116 4116 log.error('No new position files found in {}'.format(path))
4117 4117 raise IOError('No new position files found in {}'.format(path))
4118 4118
4119 4119
4120 4120 def get_values(self):
4121 4121
4122 4122 if self.flagNoData:
4123 4123 return numpy.nan, numpy.nan
4124 4124 else:
4125 4125 index = int((self.utctime-self.utcfile)/self.interval)
4126 4126 return self.fp['Data']['azi_pos'][index], self.fp['Data']['ele_pos'][index]
4127 4127
4128 4128 def setup(self, dataOut, path, conf, samples, interval, az_offset):
4129 4129
4130 4130 self.path = path
4131 4131 self.conf = conf
4132 4132 self.samples = samples
4133 4133 self.interval = interval
4134 4134 filelist = self.find_file(dataOut.utctime)
4135 4135
4136 4136 if not filelist:
4137 4137 log.error('No position files found in {}'.format(path), self.name)
4138 4138 raise IOError('No position files found in {}'.format(path))
4139 4139 else:
4140 4140 self.filename = filelist[0]
4141 4141 self.utcfile = int(self.filename.split('/')[-1][4:14])
4142 4142 log.log('Opening file: {}'.format(self.filename), self.name)
4143 4143 self.fp = h5py.File(self.filename, 'r')
4144 4144
4145 4145 def run(self, dataOut, path, conf=None, samples=1500, interval=0.04, az_offset=0, time_offset=0):
4146 4146
4147 4147 if not self.isConfig:
4148 4148 self.setup(dataOut, path, conf, samples, interval, az_offset)
4149 4149 self.isConfig = True
4150 4150
4151 4151 self.utctime = dataOut.utctime + time_offset
4152 4152
4153 4153 self.find_next_file()
4154 4154
4155 4155 az, el = self.get_values()
4156 4156 dataOut.flagNoData = False
4157 4157
4158 4158 if numpy.isnan(az) or numpy.isnan(el) :
4159 4159 dataOut.flagNoData = True
4160 4160 return dataOut
4161 4161
4162 4162 dataOut.azimuth = az - az_offset
4163 4163 if dataOut.azimuth < 0:
4164 4164 dataOut.azimuth += 360
4165 4165 dataOut.elevation = el
4166 4166
4167 4167 return dataOut
4168 4168
4169 4169 class Block360(Operation):
4170 4170 '''
4171 4171 '''
4172 4172 isConfig = False
4173 4173 __profIndex = 0
4174 4174 __initime = None
4175 4175 __lastdatatime = None
4176 4176 __buffer = None
4177 4177 __dataReady = False
4178 4178 n = None
4179 4179 __nch = 0
4180 4180 __nHeis = 0
4181 4181 index = 0
4182 4182 mode = 0
4183 4183
4184 4184 def __init__(self,**kwargs):
4185 4185 Operation.__init__(self,**kwargs)
4186 4186
4187 4187 def setup(self, dataOut, n = None, mode = None):
4188 4188 '''
4189 4189 n= Numero de PRF's de entrada
4190 4190 '''
4191 4191 self.__initime = None
4192 4192 self.__lastdatatime = 0
4193 4193 self.__dataReady = False
4194 4194 self.__buffer = 0
4195 4195 self.__buffer_1D = 0
4196 4196 self.__profIndex = 0
4197 4197 self.index = 0
4198 4198 self.__nch = dataOut.nChannels
4199 4199 self.__nHeis = dataOut.nHeights
4200 4200 ##print("ELVALOR DE n es:", n)
4201 4201 if n == None:
4202 4202 raise ValueError("n should be specified.")
4203 4203
4204 4204 if mode == None:
4205 4205 raise ValueError("mode should be specified.")
4206 4206
4207 4207 if n != None:
4208 4208 if n<1:
4209 4209 print("n should be greater than 2")
4210 4210 raise ValueError("n should be greater than 2")
4211 4211
4212 4212 self.n = n
4213 4213 self.mode = mode
4214 4214 #print("self.mode",self.mode)
4215 4215 #print("nHeights")
4216 4216 self.__buffer = numpy.zeros(( dataOut.nChannels,n, dataOut.nHeights))
4217 4217 self.__buffer2 = numpy.zeros(n)
4218 4218 self.__buffer3 = numpy.zeros(n)
4219 4219
4220 4220
4221 4221
4222 4222
4223 4223 def putData(self,data,mode):
4224 4224 '''
4225 4225 Add a profile to he __buffer and increase in one the __profiel Index
4226 4226 '''
4227 4227 #print("line 4049",data.dataPP_POW.shape,data.dataPP_POW[:10])
4228 4228 #print("line 4049",data.azimuth.shape,data.azimuth)
4229 4229 if self.mode==0:
4230 4230 self.__buffer[:,self.__profIndex,:]= data.dataPP_POWER# PRIMER MOMENTO
4231 4231 if self.mode==1:
4232 4232 self.__buffer[:,self.__profIndex,:]= data.data_pow
4233 4233 #print("me casi",self.index,data.azimuth[self.index])
4234 4234 #print(self.__profIndex, self.index , data.azimuth[self.index] )
4235 4235 #print("magic",data.profileIndex)
4236 4236 #print(data.azimuth[self.index])
4237 4237 #print("index",self.index)
4238 4238
4239 4239 #####self.__buffer2[self.__profIndex] = data.azimuth[self.index]
4240 4240 self.__buffer2[self.__profIndex] = data.azimuth
4241 4241 self.__buffer3[self.__profIndex] = data.elevation
4242 4242 #print("q pasa")
4243 4243 #####self.index+=1
4244 4244 #print("index",self.index,data.azimuth[:10])
4245 4245 self.__profIndex += 1
4246 4246 return #Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β· Remove DCΒ·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·
4247 4247
4248 4248 def pushData(self,data):
4249 4249 '''
4250 4250 Return the PULSEPAIR and the profiles used in the operation
4251 4251 Affected : self.__profileIndex
4252 4252 '''
4253 4253 #print("pushData")
4254 4254
4255 4255 data_360 = self.__buffer
4256 4256 data_p = self.__buffer2
4257 4257 data_e = self.__buffer3
4258 4258 n = self.__profIndex
4259 4259
4260 4260 self.__buffer = numpy.zeros((self.__nch, self.n,self.__nHeis))
4261 4261 self.__buffer2 = numpy.zeros(self.n)
4262 4262 self.__buffer3 = numpy.zeros(self.n)
4263 4263 self.__profIndex = 0
4264 4264 #print("pushData")
4265 4265 return data_360,n,data_p,data_e
4266 4266
4267 4267
4268 4268 def byProfiles(self,dataOut):
4269 4269
4270 4270 self.__dataReady = False
4271 4271 data_360 = None
4272 4272 data_p = None
4273 4273 data_e = None
4274 4274 #print("dataOu",dataOut.dataPP_POW)
4275 4275 self.putData(data=dataOut,mode = self.mode)
4276 4276 ##### print("profIndex",self.__profIndex)
4277 4277 if self.__profIndex == self.n:
4278 4278 data_360,n,data_p,data_e = self.pushData(data=dataOut)
4279 4279 self.__dataReady = True
4280 4280
4281 4281 return data_360,data_p,data_e
4282 4282
4283 4283
4284 4284 def blockOp(self, dataOut, datatime= None):
4285 4285 if self.__initime == None:
4286 4286 self.__initime = datatime
4287 4287 data_360,data_p,data_e = self.byProfiles(dataOut)
4288 4288 self.__lastdatatime = datatime
4289 4289
4290 4290 if data_360 is None:
4291 4291 return None, None,None,None
4292 4292
4293 4293
4294 4294 avgdatatime = self.__initime
4295 4295 if self.n==1:
4296 4296 avgdatatime = datatime
4297 4297 deltatime = datatime - self.__lastdatatime
4298 4298 self.__initime = datatime
4299 4299 #print(data_360.shape,avgdatatime,data_p.shape)
4300 4300 return data_360,avgdatatime,data_p,data_e
4301 4301
4302 4302 def run(self, dataOut,n = None,mode=None,**kwargs):
4303 4303 #print("BLOCK 360 HERE WE GO MOMENTOS")
4304 4304 print("Block 360")
4305 4305 #exit(1)
4306 4306 if not self.isConfig:
4307 4307 self.setup(dataOut = dataOut, n = n ,mode= mode ,**kwargs)
4308 4308 ####self.index = 0
4309 4309 #print("comova",self.isConfig)
4310 4310 self.isConfig = True
4311 4311 ####if self.index==dataOut.azimuth.shape[0]:
4312 4312 #### self.index=0
4313 4313 data_360, avgdatatime,data_p,data_e = self.blockOp(dataOut, dataOut.utctime)
4314 4314 dataOut.flagNoData = True
4315 4315
4316 4316 if self.__dataReady:
4317 4317 dataOut.data_360 = data_360 # S
4318 4318 #print("DATA 360")
4319 4319 #print(dataOut.data_360)
4320 4320 #print("---------------------------------------------------------------------------------")
4321 4321 print("---------------------------DATAREADY---------------------------------------------")
4322 4322 #print("---------------------------------------------------------------------------------")
4323 4323 #print("data_360",dataOut.data_360.shape)
4324 4324 dataOut.data_azi = data_p
4325 4325 dataOut.data_ele = data_e
4326 4326 ###print("azi: ",dataOut.data_azi)
4327 4327 #print("ele: ",dataOut.data_ele)
4328 4328 #print("jroproc_parameters",data_p[0],data_p[-1])#,data_360.shape,avgdatatime)
4329 4329 dataOut.utctime = avgdatatime
4330 4330 dataOut.flagNoData = False
4331 4331 return dataOut
4332 4332
4333 4333 class Block360_vRF(Operation):
4334 4334 '''
4335 4335 '''
4336 4336 isConfig = False
4337 4337 __profIndex = 0
4338 4338 __initime = None
4339 4339 __lastdatatime = None
4340 4340 __buffer = None
4341 4341 __dataReady = False
4342 4342 n = None
4343 4343 __nch = 0
4344 4344 __nHeis = 0
4345 4345 index = 0
4346 4346 mode = 0
4347 4347
4348 4348 def __init__(self,**kwargs):
4349 4349 Operation.__init__(self,**kwargs)
4350 4350
4351 4351 def setup(self, dataOut, n = None, mode = None):
4352 4352 '''
4353 4353 n= Numero de PRF's de entrada
4354 4354 '''
4355 4355 self.__initime = None
4356 4356 self.__lastdatatime = 0
4357 4357 self.__dataReady = False
4358 4358 self.__buffer = 0
4359 4359 self.__buffer_1D = 0
4360 4360 self.__profIndex = 0
4361 4361 self.index = 0
4362 4362 self.__nch = dataOut.nChannels
4363 4363 self.__nHeis = dataOut.nHeights
4364 4364 ##print("ELVALOR DE n es:", n)
4365 4365 if n == None:
4366 4366 raise ValueError("n should be specified.")
4367 4367
4368 4368 if mode == None:
4369 4369 raise ValueError("mode should be specified.")
4370 4370
4371 4371 if n != None:
4372 4372 if n<1:
4373 4373 print("n should be greater than 2")
4374 4374 raise ValueError("n should be greater than 2")
4375 4375
4376 4376 self.n = n
4377 4377 self.mode = mode
4378 4378 #print("self.mode",self.mode)
4379 4379 #print("nHeights")
4380 4380 self.__buffer = numpy.zeros(( dataOut.nChannels,n, dataOut.nHeights))
4381 4381 self.__buffer2 = numpy.zeros(n)
4382 4382 self.__buffer3 = numpy.zeros(n)
4383 4383
4384 4384
4385 4385
4386 4386
4387 4387 def putData(self,data,mode):
4388 4388 '''
4389 4389 Add a profile to he __buffer and increase in one the __profiel Index
4390 4390 '''
4391 4391 #print("line 4049",data.dataPP_POW.shape,data.dataPP_POW[:10])
4392 4392 #print("line 4049",data.azimuth.shape,data.azimuth)
4393 4393 if self.mode==0:
4394 4394 self.__buffer[:,self.__profIndex,:]= data.dataPP_POWER# PRIMER MOMENTO
4395 4395 if self.mode==1:
4396 4396 self.__buffer[:,self.__profIndex,:]= data.data_pow
4397 4397 #print("me casi",self.index,data.azimuth[self.index])
4398 4398 #print(self.__profIndex, self.index , data.azimuth[self.index] )
4399 4399 #print("magic",data.profileIndex)
4400 4400 #print(data.azimuth[self.index])
4401 4401 #print("index",self.index)
4402 4402
4403 4403 #####self.__buffer2[self.__profIndex] = data.azimuth[self.index]
4404 4404 self.__buffer2[self.__profIndex] = data.azimuth
4405 4405 self.__buffer3[self.__profIndex] = data.elevation
4406 4406 #print("q pasa")
4407 4407 #####self.index+=1
4408 4408 #print("index",self.index,data.azimuth[:10])
4409 4409 self.__profIndex += 1
4410 4410 return #Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β· Remove DCΒ·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·
4411 4411
4412 4412 def pushData(self,data):
4413 4413 '''
4414 4414 Return the PULSEPAIR and the profiles used in the operation
4415 4415 Affected : self.__profileIndex
4416 4416 '''
4417 4417 #print("pushData")
4418 4418
4419 4419 data_360 = self.__buffer
4420 4420 data_p = self.__buffer2
4421 4421 data_e = self.__buffer3
4422 4422 n = self.__profIndex
4423 4423
4424 4424 self.__buffer = numpy.zeros((self.__nch, self.n,self.__nHeis))
4425 4425 self.__buffer2 = numpy.zeros(self.n)
4426 4426 self.__buffer3 = numpy.zeros(self.n)
4427 4427 self.__profIndex = 0
4428 4428 #print("pushData")
4429 4429 return data_360,n,data_p,data_e
4430 4430
4431 4431
4432 4432 def byProfiles(self,dataOut):
4433 4433
4434 4434 self.__dataReady = False
4435 4435 data_360 = None
4436 4436 data_p = None
4437 4437 data_e = None
4438 4438 #print("dataOu",dataOut.dataPP_POW)
4439 4439 self.putData(data=dataOut,mode = self.mode)
4440 4440 ##### print("profIndex",self.__profIndex)
4441 4441 if self.__profIndex == self.n:
4442 4442 data_360,n,data_p,data_e = self.pushData(data=dataOut)
4443 4443 self.__dataReady = True
4444 4444
4445 4445 return data_360,data_p,data_e
4446 4446
4447 4447
4448 4448 def blockOp(self, dataOut, datatime= None):
4449 4449 if self.__initime == None:
4450 4450 self.__initime = datatime
4451 4451 data_360,data_p,data_e = self.byProfiles(dataOut)
4452 4452 self.__lastdatatime = datatime
4453 4453
4454 4454 if data_360 is None:
4455 4455 return None, None,None,None
4456 4456
4457 4457
4458 4458 avgdatatime = self.__initime
4459 4459 if self.n==1:
4460 4460 avgdatatime = datatime
4461 4461 deltatime = datatime - self.__lastdatatime
4462 4462 self.__initime = datatime
4463 4463 #print(data_360.shape,avgdatatime,data_p.shape)
4464 4464 return data_360,avgdatatime,data_p,data_e
4465 4465
4466 4466 def checkcase(self,data_ele):
4467 4467 start = data_ele[0]
4468 4468 end = data_ele[-1]
4469 4469 diff_angle = (end-start)
4470 4470 len_ang=len(data_ele)
4471 4471 print("start",start)
4472 4472 print("end",end)
4473 4473 print("number",diff_angle)
4474 4474
4475 4475 print("len_ang",len_ang)
4476 4476
4477 4477 aux = (data_ele<0).any(axis=0)
4478 4478
4479 4479 #exit(1)
4480 4480 if diff_angle<0 and aux!=1: #Bajada
4481 4481 return 1
4482 4482 elif diff_angle<0 and aux==1: #Bajada con angulos negativos
4483 4483 return 0
4484 4484 elif diff_angle == 0: # This case happens when the angle reaches the max_angle if n = 2
4485 4485 self.flagEraseFirstData = 1
4486 4486 print("ToDO this case")
4487 4487 exit(1)
4488 4488 elif diff_angle>0: #Subida
4489 4489 return 0
4490 4490
4491 4491 def run(self, dataOut,n = None,mode=None,**kwargs):
4492 4492 #print("BLOCK 360 HERE WE GO MOMENTOS")
4493 4493 print("Block 360")
4494 4494
4495 4495 #exit(1)
4496 4496 if not self.isConfig:
4497 4497 if n == 1:
4498 4498 print("*******************Min Value is 2. Setting n = 2*******************")
4499 4499 n = 2
4500 4500 #exit(1)
4501 4501 print(n)
4502 4502 self.setup(dataOut = dataOut, n = n ,mode= mode ,**kwargs)
4503 4503 ####self.index = 0
4504 4504 #print("comova",self.isConfig)
4505 4505 self.isConfig = True
4506 4506 ####if self.index==dataOut.azimuth.shape[0]:
4507 4507 #### self.index=0
4508 4508 data_360, avgdatatime,data_p,data_e = self.blockOp(dataOut, dataOut.utctime)
4509 4509 dataOut.flagNoData = True
4510 4510
4511 4511 if self.__dataReady:
4512 4512 dataOut.data_360 = data_360 # S
4513 4513 #print("DATA 360")
4514 4514 #print(dataOut.data_360)
4515 4515 #print("---------------------------------------------------------------------------------")
4516 4516 print("---------------------------DATAREADY---------------------------------------------")
4517 4517 #print("---------------------------------------------------------------------------------")
4518 4518 #print("data_360",dataOut.data_360.shape)
4519 4519 dataOut.data_azi = data_p
4520 4520 dataOut.data_ele = data_e
4521 4521 ###print("azi: ",dataOut.data_azi)
4522 4522 #print("ele: ",dataOut.data_ele)
4523 4523 #print("jroproc_parameters",data_p[0],data_p[-1])#,data_360.shape,avgdatatime)
4524 4524 dataOut.utctime = avgdatatime
4525 4525
4526 4526 dataOut.case_flag = self.checkcase(dataOut.data_ele)
4527 4527 if dataOut.case_flag: #Si estΓ‘ de bajada empieza a plotear
4528 4528 print("INSIDE CASE FLAG BAJADA")
4529 4529 dataOut.flagNoData = False
4530 4530 else:
4531 4531 print("CASE SUBIDA")
4532 4532 dataOut.flagNoData = True
4533 4533
4534 4534 #dataOut.flagNoData = False
4535 4535 return dataOut
4536 4536
4537 4537 class Block360_vRF2(Operation):
4538 4538 '''
4539 4539 '''
4540 4540 isConfig = False
4541 4541 __profIndex = 0
4542 4542 __initime = None
4543 4543 __lastdatatime = None
4544 4544 __buffer = None
4545 4545 __dataReady = False
4546 4546 n = None
4547 4547 __nch = 0
4548 4548 __nHeis = 0
4549 4549 index = 0
4550 4550 mode = None
4551 4551
4552 4552 def __init__(self,**kwargs):
4553 4553 Operation.__init__(self,**kwargs)
4554 4554
4555 4555 def setup(self, dataOut, n = None, mode = None):
4556 4556 '''
4557 4557 n= Numero de PRF's de entrada
4558 4558 '''
4559 4559 self.__initime = None
4560 4560 self.__lastdatatime = 0
4561 4561 self.__dataReady = False
4562 4562 self.__buffer = 0
4563 4563 self.__buffer_1D = 0
4564 4564 #self.__profIndex = 0
4565 4565 self.index = 0
4566 4566 self.__nch = dataOut.nChannels
4567 4567 self.__nHeis = dataOut.nHeights
4568 4568
4569 4569 self.mode = mode
4570 4570 #print("self.mode",self.mode)
4571 4571 #print("nHeights")
4572 4572 self.__buffer = []
4573 4573 self.__buffer2 = []
4574 4574 self.__buffer3 = []
4575 4575 self.__buffer4 = []
4576 4576
4577 4577 def putData(self,data,mode):
4578 4578 '''
4579 4579 Add a profile to he __buffer and increase in one the __profiel Index
4580 4580 '''
4581 4581
4582 4582 if self.mode==0:
4583 4583 self.__buffer.append(data.dataPP_POWER)# PRIMER MOMENTO
4584 4584 if self.mode==1:
4585 4585 self.__buffer.append(data.data_pow)
4586 4586
4587 4587 self.__buffer4.append(data.dataPP_DOP)
4588 4588
4589 4589 self.__buffer2.append(data.azimuth)
4590 4590 self.__buffer3.append(data.elevation)
4591 4591 self.__profIndex += 1
4592 4592
4593 4593 return numpy.array(self.__buffer3) #Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β· Remove DCΒ·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·
4594 4594
4595 4595 def pushData(self,data):
4596 4596 '''
4597 4597 Return the PULSEPAIR and the profiles used in the operation
4598 4598 Affected : self.__profileIndex
4599 4599 '''
4600 4600
4601 4601 data_360_Power = numpy.array(self.__buffer).transpose(1,0,2)
4602 4602 data_360_Velocity = numpy.array(self.__buffer4).transpose(1,0,2)
4603 4603 data_p = numpy.array(self.__buffer2)
4604 4604 data_e = numpy.array(self.__buffer3)
4605 4605 n = self.__profIndex
4606 4606
4607 4607 self.__buffer = []
4608 4608 self.__buffer4 = []
4609 4609 self.__buffer2 = []
4610 4610 self.__buffer3 = []
4611 4611 self.__profIndex = 0
4612 4612 return data_360_Power,data_360_Velocity,n,data_p,data_e
4613 4613
4614 4614
4615 4615 def byProfiles(self,dataOut):
4616 4616
4617 4617 self.__dataReady = False
4618 4618 data_360_Power = []
4619 4619 data_360_Velocity = []
4620 4620 data_p = None
4621 4621 data_e = None
4622 4622
4623 4623 elevations = self.putData(data=dataOut,mode = self.mode)
4624 4624
4625 4625 if self.__profIndex > 1:
4626 4626 case_flag = self.checkcase(elevations)
4627 4627
4628 4628 if case_flag == 0: #Subida
4629 4629
4630 4630 if len(self.__buffer) == 2: #Cuando estΓ‘ de subida
4631 4631 #Se borra el dato anterior para liberar buffer y comparar el dato actual con el siguiente
4632 4632 self.__buffer.pop(0) #Erase first data
4633 4633 self.__buffer2.pop(0)
4634 4634 self.__buffer3.pop(0)
4635 4635 self.__buffer4.pop(0)
4636 4636 self.__profIndex -= 1
4637 4637 else: #Cuando ha estado de bajada y ha vuelto a subir
4638 4638 #Se borra el ΓΊltimo dato
4639 4639 self.__buffer.pop() #Erase last data
4640 4640 self.__buffer2.pop()
4641 4641 self.__buffer3.pop()
4642 4642 self.__buffer4.pop()
4643 4643 data_360_Power,data_360_Velocity,n,data_p,data_e = self.pushData(data=dataOut)
4644 4644
4645 4645 self.__dataReady = True
4646 4646
4647 4647 return data_360_Power,data_360_Velocity,data_p,data_e
4648 4648
4649 4649
4650 4650 def blockOp(self, dataOut, datatime= None):
4651 4651 if self.__initime == None:
4652 4652 self.__initime = datatime
4653 4653 data_360_Power,data_360_Velocity,data_p,data_e = self.byProfiles(dataOut)
4654 4654 self.__lastdatatime = datatime
4655 4655
4656 4656 avgdatatime = self.__initime
4657 4657 if self.n==1:
4658 4658 avgdatatime = datatime
4659 4659 deltatime = datatime - self.__lastdatatime
4660 4660 self.__initime = datatime
4661 4661 return data_360_Power,data_360_Velocity,avgdatatime,data_p,data_e
4662 4662
4663 4663 def checkcase(self,data_ele):
4664 4664 #print(data_ele)
4665 4665 start = data_ele[-2]
4666 4666 end = data_ele[-1]
4667 4667 diff_angle = (end-start)
4668 4668 len_ang=len(data_ele)
4669 4669
4670 4670 if diff_angle > 0: #Subida
4671 4671 return 0
4672 4672
4673 4673 def run(self, dataOut,mode='Power',**kwargs):
4674 4674 #print("BLOCK 360 HERE WE GO MOMENTOS")
4675 4675 #print("Block 360")
4676 4676 dataOut.mode = mode
4677 4677
4678 4678 if not self.isConfig:
4679 4679 self.setup(dataOut = dataOut ,mode= mode ,**kwargs)
4680 4680 self.isConfig = True
4681 4681
4682 4682
4683 4683 data_360_Power, data_360_Velocity, avgdatatime,data_p,data_e = self.blockOp(dataOut, dataOut.utctime)
4684 4684
4685 4685
4686 4686 dataOut.flagNoData = True
4687 4687
4688 4688
4689 4689 if self.__dataReady:
4690 4690 dataOut.data_360_Power = data_360_Power # S
4691 4691 dataOut.data_360_Velocity = data_360_Velocity
4692 4692 dataOut.data_azi = data_p
4693 4693 dataOut.data_ele = data_e
4694 4694 dataOut.utctime = avgdatatime
4695 4695 dataOut.flagNoData = False
4696 4696
4697 4697 return dataOut
4698 4698
4699 4699 class Block360_vRF3(Operation):
4700 4700 '''
4701 4701 '''
4702 4702 isConfig = False
4703 4703 __profIndex = 0
4704 4704 __initime = None
4705 4705 __lastdatatime = None
4706 4706 __buffer = None
4707 4707 __dataReady = False
4708 4708 n = None
4709 4709 __nch = 0
4710 4710 __nHeis = 0
4711 4711 index = 0
4712 4712 mode = None
4713 4713
4714 4714 def __init__(self,**kwargs):
4715 4715 Operation.__init__(self,**kwargs)
4716 4716
4717 4717 def setup(self, dataOut, attr):
4718 4718 '''
4719 4719 n= Numero de PRF's de entrada
4720 4720 '''
4721 4721 self.__initime = None
4722 4722 self.__lastdatatime = 0
4723 4723 self.__dataReady = False
4724 4724 self.__buffer = 0
4725 4725 self.__buffer_1D = 0
4726 4726 self.index = 0
4727 4727 self.__nch = dataOut.nChannels
4728 4728 self.__nHeis = dataOut.nHeights
4729 4729
4730 4730 self.attr = attr
4731 4731 #print("self.mode",self.mode)
4732 4732 #print("nHeights")
4733 4733 self.__buffer = []
4734 4734 self.__buffer2 = []
4735 4735 self.__buffer3 = []
4736 4736
4737 4737 def putData(self, data, attr):
4738 4738 '''
4739 4739 Add a profile to he __buffer and increase in one the __profiel Index
4740 4740 '''
4741 4741
4742 4742 self.__buffer.append(getattr(data, attr))
4743 4743 self.__buffer2.append(data.azimuth)
4744 4744 self.__buffer3.append(data.elevation)
4745 4745 self.__profIndex += 1
4746 4746
4747 4747 return numpy.array(self.__buffer3)
4748 4748
4749 4749 def pushData(self, data):
4750 4750 '''
4751 4751 Return the PULSEPAIR and the profiles used in the operation
4752 4752 Affected : self.__profileIndex
4753 4753 '''
4754 4754
4755 4755 data_360 = numpy.array(self.__buffer).transpose(1, 0, 2)
4756 4756 data_p = numpy.array(self.__buffer2)
4757 4757 data_e = numpy.array(self.__buffer3)
4758 4758 n = self.__profIndex
4759 4759
4760 4760 self.__buffer = []
4761 4761 self.__buffer2 = []
4762 4762 self.__buffer3 = []
4763 4763 self.__profIndex = 0
4764 4764 return data_360, n, data_p, data_e
4765 4765
4766 4766
4767 4767 def byProfiles(self,dataOut):
4768 4768
4769 4769 self.__dataReady = False
4770 4770 data_360 = []
4771 4771 data_p = None
4772 4772 data_e = None
4773 4773
4774 4774 elevations = self.putData(data=dataOut, attr = self.attr)
4775 4775
4776 4776 if self.__profIndex > 1:
4777 4777 case_flag = self.checkcase(elevations)
4778 4778
4779 4779 if case_flag == 0: #Subida
4780 4780
4781 4781 if len(self.__buffer) == 2: #Cuando estΓ‘ de subida
4782 4782 #Se borra el dato anterior para liberar buffer y comparar el dato actual con el siguiente
4783 4783 self.__buffer.pop(0) #Erase first data
4784 4784 self.__buffer2.pop(0)
4785 4785 self.__buffer3.pop(0)
4786 4786 self.__profIndex -= 1
4787 4787 else: #Cuando ha estado de bajada y ha vuelto a subir
4788 4788 #Se borra el ΓΊltimo dato
4789 4789 self.__buffer.pop() #Erase last data
4790 4790 self.__buffer2.pop()
4791 4791 self.__buffer3.pop()
4792 4792 data_360, n, data_p, data_e = self.pushData(data=dataOut)
4793 4793
4794 4794 self.__dataReady = True
4795 4795
4796 4796 return data_360, data_p, data_e
4797 4797
4798 4798
4799 4799 def blockOp(self, dataOut, datatime= None):
4800 4800 if self.__initime == None:
4801 4801 self.__initime = datatime
4802 4802 data_360, data_p, data_e = self.byProfiles(dataOut)
4803 4803 self.__lastdatatime = datatime
4804 4804
4805 4805 avgdatatime = self.__initime
4806 4806 if self.n==1:
4807 4807 avgdatatime = datatime
4808 4808 deltatime = datatime - self.__lastdatatime
4809 4809 self.__initime = datatime
4810 4810 return data_360, avgdatatime, data_p, data_e
4811 4811
4812 4812 def checkcase(self, data_ele):
4813 4813
4814 4814 start = data_ele[-2]
4815 4815 end = data_ele[-1]
4816 4816 diff_angle = (end-start)
4817 4817 len_ang=len(data_ele)
4818 4818
4819 4819 if diff_angle > 0: #Subida
4820 4820 return 0
4821 4821
4822 4822 def run(self, dataOut, attr_data='dataPP_POWER',**kwargs):
4823 4823
4824 4824 dataOut.attr_data = attr_data
4825 4825
4826 4826 if not self.isConfig:
4827 4827 self.setup(dataOut = dataOut, attr = attr_data ,**kwargs)
4828 4828 self.isConfig = True
4829 4829
4830 4830 data_360, avgdatatime, data_p, data_e = self.blockOp(dataOut, dataOut.utctime)
4831 4831
4832 4832 dataOut.flagNoData = True
4833 4833
4834 4834 if self.__dataReady:
4835 4835 setattr(dataOut, attr_data, data_360 )
4836 4836 dataOut.data_azi = data_p
4837 4837 dataOut.data_ele = data_e
4838 4838 dataOut.utctime = avgdatatime
4839 4839 dataOut.flagNoData = False
4840 4840
4841 4841 return dataOut
4842 4842
4843 4843 class Block360_vRF4(Operation):
4844 4844 '''
4845 4845 '''
4846 4846 isConfig = False
4847 4847 __profIndex = 0
4848 4848 __initime = None
4849 4849 __lastdatatime = None
4850 4850 __buffer = None
4851 4851 __dataReady = False
4852 4852 n = None
4853 4853 __nch = 0
4854 4854 __nHeis = 0
4855 4855 index = 0
4856 4856 mode = None
4857 4857
4858 4858 def __init__(self,**kwargs):
4859 4859 Operation.__init__(self,**kwargs)
4860 4860
4861 4861 def setup(self, dataOut, attr):
4862 4862 '''
4863 4863 n= Numero de PRF's de entrada
4864 4864 '''
4865 4865 self.__initime = None
4866 4866 self.__lastdatatime = 0
4867 4867 self.__dataReady = False
4868 4868 self.__buffer = 0
4869 4869 self.__buffer_1D = 0
4870 4870 self.index = 0
4871 4871 self.__nch = dataOut.nChannels
4872 4872 self.__nHeis = dataOut.nHeights
4873 4873
4874 4874 self.attr = attr
4875 4875
4876 4876 self.__buffer = []
4877 4877 self.__buffer2 = []
4878 4878 self.__buffer3 = []
4879 4879
4880 4880 def putData(self, data, attr, flagMode):
4881 4881 '''
4882 4882 Add a profile to he __buffer and increase in one the __profiel Index
4883 4883 '''
4884 4884 tmp= getattr(data, attr)
4885 if tmp.shape[0] is not 2:
4885 if tmp.shape[0] != 2:
4886 4886 size_tmp= tmp.shape[0]
4887 4887 tmp=tmp.reshape(1,size_tmp)
4888 4888
4889 4889 self.__buffer.append(tmp)
4890 4890 self.__buffer2.append(data.azimuth)
4891 4891 self.__buffer3.append(data.elevation)
4892 4892 self.__profIndex += 1
4893 4893
4894 4894 if flagMode == 1: #'AZI'
4895 4895 return numpy.array(self.__buffer2)
4896 4896 elif flagMode == 0: #'ELE'
4897 4897 return numpy.array(self.__buffer3)
4898 4898
4899 4899 def pushData(self, data,flagMode,case_flag):
4900 4900 '''
4901 4901 Return the PULSEPAIR and the profiles used in the operation
4902 4902 Affected : self.__profileIndex
4903 4903 '''
4904 4904
4905 4905 data_360 = numpy.array(self.__buffer).transpose(1, 0, 2)
4906 4906 data_p = numpy.array(self.__buffer2)
4907 4907 data_e = numpy.array(self.__buffer3)
4908 4908 n = self.__profIndex
4909 4909
4910 4910 self.__buffer = []
4911 4911 self.__buffer2 = []
4912 4912 self.__buffer3 = []
4913 4913 self.__profIndex = 0
4914 4914
4915 4915 if flagMode == 1 and case_flag == 0: #'AZI' y ha girado
4916 4916 self.putData(data=data, attr = self.attr, flagMode=flagMode)
4917 4917
4918 4918 return data_360, n, data_p, data_e
4919 4919
4920 4920
4921 4921 def byProfiles(self,dataOut,flagMode):
4922 4922
4923 4923 self.__dataReady = False
4924 4924 data_360 = []
4925 4925 data_p = None
4926 4926 data_e = None
4927 4927
4928 4928 angles = self.putData(data=dataOut, attr = self.attr, flagMode=flagMode)
4929 4929 #print(angles)
4930 4930 if self.__profIndex > 1:
4931 4931 case_flag = self.checkcase(angles,flagMode)
4932 4932
4933 4933 if flagMode == 1: #'AZI':
4934 4934 if case_flag == 0: #Ya girΓ³
4935 4935 self.__buffer.pop() #Erase last data
4936 4936 self.__buffer2.pop()
4937 4937 self.__buffer3.pop()
4938 4938 data_360,n,data_p,data_e = self.pushData(data=dataOut,flagMode=flagMode,case_flag=case_flag)
4939 4939
4940 4940 self.__dataReady = True
4941 4941
4942 4942 elif flagMode == 0: #'ELE'
4943 4943
4944 4944 if case_flag == 0: #Subida
4945 4945
4946 4946 if len(self.__buffer) == 2: #Cuando estΓ‘ de subida
4947 4947 #Se borra el dato anterior para liberar buffer y comparar el dato actual con el siguiente
4948 4948 self.__buffer.pop(0) #Erase first data
4949 4949 self.__buffer2.pop(0)
4950 4950 self.__buffer3.pop(0)
4951 4951 self.__profIndex -= 1
4952 4952 else: #Cuando ha estado de bajada y ha vuelto a subir
4953 4953 #Se borra el ΓΊltimo dato
4954 4954 self.__buffer.pop() #Erase last data
4955 4955 self.__buffer2.pop()
4956 4956 self.__buffer3.pop()
4957 4957 data_360, n, data_p, data_e = self.pushData(data=dataOut,flagMode=flagMode,case_flag=case_flag)
4958 4958
4959 4959 self.__dataReady = True
4960 4960
4961 4961 return data_360, data_p, data_e
4962 4962
4963 4963
4964 4964 def blockOp(self, dataOut, flagMode, datatime= None):
4965 4965 if self.__initime == None:
4966 4966 self.__initime = datatime
4967 4967 data_360, data_p, data_e = self.byProfiles(dataOut,flagMode)
4968 4968 self.__lastdatatime = datatime
4969 4969
4970 4970 avgdatatime = self.__initime
4971 4971 if self.n==1:
4972 4972 avgdatatime = datatime
4973 4973 deltatime = datatime - self.__lastdatatime
4974 4974 self.__initime = datatime
4975 4975 return data_360, avgdatatime, data_p, data_e
4976 4976
4977 4977 def checkcase(self, angles, flagMode):
4978 4978
4979 4979 if flagMode == 1: #'AZI'
4980 4980 start = angles[-2]
4981 4981 end = angles[-1]
4982 4982 diff_angle = (end-start)
4983 4983
4984 4984 if diff_angle < 0: #Ya girΓ³
4985 4985 return 0
4986 4986
4987 4987 elif flagMode == 0: #'ELE'
4988 4988
4989 4989 start = angles[-2]
4990 4990 end = angles[-1]
4991 4991 diff_angle = (end-start)
4992 4992
4993 4993 if diff_angle > 0: #Subida
4994 4994 return 0
4995 4995
4996 def run(self, dataOut, attr_data='dataPP_POWER', axis=None,**kwargs):
4996 def run(self, dataOut, attr_data='dataPP_POWER', axis=None, runNextOp = False,**kwargs):
4997 4997
4998 4998 dataOut.attr_data = attr_data
4999 dataOut.runNextOp = runNextOp
4999 5000
5000 5001 dataOut.flagMode = axis[0] #Provisional, deberΓ­a venir del header
5001 5002
5002 5003 if not self.isConfig:
5003 5004 self.setup(dataOut = dataOut, attr = attr_data ,**kwargs)
5004 5005 self.isConfig = True
5005 5006
5006 5007 data_360, avgdatatime, data_p, data_e = self.blockOp(dataOut, dataOut.flagMode, dataOut.utctime)
5007 5008
5008 5009 dataOut.flagNoData = True
5009 5010
5010 5011 if self.__dataReady:
5011 5012 setattr(dataOut, attr_data, data_360 )
5012 5013 dataOut.data_azi = data_p
5013 5014 dataOut.data_ele = data_e
5014 5015 dataOut.utctime = avgdatatime
5015 5016 dataOut.flagNoData = False
5016 print("********************attr_data********************",attr_data)
5017 #print("********************attr_data********************",attr_data)
5017 5018 #print(data_360.shape)
5018 5019 #print(dataOut.heightList)
5019 5020
5020 5021 return dataOut
5021 5022
5022 5023 class MergeProc(ProcessingUnit):
5023 5024
5024 5025 def __init__(self):
5025 5026 ProcessingUnit.__init__(self)
5026 5027
5027 5028 def run(self, attr_data, mode=0):
5028 5029
5029 5030 #exit(1)
5030 5031 self.dataOut = getattr(self, self.inputs[0])
5031 5032 data_inputs = [getattr(self, attr) for attr in self.inputs]
5032 5033 #print(data_inputs)
5033 5034 #print(numpy.shape([getattr(data, attr_data) for data in data_inputs][1]))
5034 5035 #exit(1)
5035 5036 if mode==0:
5036 5037 data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs])
5037 5038 setattr(self.dataOut, attr_data, data)
5038 5039
5039 5040 if mode==1: #Hybrid
5040 5041 #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1)
5041 5042 #setattr(self.dataOut, attr_data, data)
5042 5043 setattr(self.dataOut, 'dataLag_spc', [getattr(data, attr_data) for data in data_inputs][0])
5043 5044 setattr(self.dataOut, 'dataLag_spc_LP', [getattr(data, attr_data) for data in data_inputs][1])
5044 5045 setattr(self.dataOut, 'dataLag_cspc', [getattr(data, attr_data_2) for data in data_inputs][0])
5045 5046 setattr(self.dataOut, 'dataLag_cspc_LP', [getattr(data, attr_data_2) for data in data_inputs][1])
5046 5047 #setattr(self.dataOut, 'nIncohInt', [getattr(data, attr_data_3) for data in data_inputs][0])
5047 5048 #setattr(self.dataOut, 'nIncohInt_LP', [getattr(data, attr_data_3) for data in data_inputs][1])
5048 5049 '''
5049 5050 print(self.dataOut.dataLag_spc_LP.shape)
5050 5051 print(self.dataOut.dataLag_cspc_LP.shape)
5051 5052 exit(1)
5052 5053 '''
5053 5054
5054 5055 #self.dataOut.dataLag_spc_LP = numpy.transpose(self.dataOut.dataLag_spc_LP[0],(2,0,1))
5055 5056 #self.dataOut.dataLag_cspc_LP = numpy.transpose(self.dataOut.dataLag_cspc_LP,(3,1,2,0))
5056 5057 '''
5057 5058 print("Merge")
5058 5059 print(numpy.shape(self.dataOut.dataLag_spc))
5059 5060 print(numpy.shape(self.dataOut.dataLag_spc_LP))
5060 5061 print(numpy.shape(self.dataOut.dataLag_cspc))
5061 5062 print(numpy.shape(self.dataOut.dataLag_cspc_LP))
5062 5063 exit(1)
5063 5064 '''
5064 5065 #print(numpy.sum(self.dataOut.dataLag_spc_LP[2,:,164])/128)
5065 5066 #print(numpy.sum(self.dataOut.dataLag_cspc_LP[0,:,30,1])/128)
5066 5067 #exit(1)
5067 5068 #print(self.dataOut.NDP)
5068 5069 #print(self.dataOut.nNoiseProfiles)
5069 5070
5070 5071 #self.dataOut.nIncohInt_LP = 128
5071 5072 self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP
5072 5073 self.dataOut.nIncohInt_LP = self.dataOut.nIncohInt
5073 5074 self.dataOut.NLAG = 16
5074 5075 self.dataOut.NRANGE = 200
5075 5076 self.dataOut.NSCAN = 128
5076 5077 #print(numpy.shape(self.dataOut.data_spc))
5077 5078
5078 5079 #exit(1)
5079 5080
5080 5081 if mode==2: #HAE 2022
5081 5082 data = numpy.sum([getattr(data, attr_data) for data in data_inputs],axis=0)
5082 5083 setattr(self.dataOut, attr_data, data)
5083 5084
5084 5085 self.dataOut.nIncohInt *= 2
5085 5086 #meta = self.dataOut.getFreqRange(1)/1000.
5086 5087 self.dataOut.freqRange = self.dataOut.getFreqRange(1)/1000.
5087 5088
5088 5089 #exit(1)
5089 5090
5090 5091 if mode==7: #RM
5091 5092
5092 5093 f = [getattr(data, attr_data) for data in data_inputs][0]
5093 5094 g = [getattr(data, attr_data) for data in data_inputs][1]
5094 5095
5095 5096 data = numpy.concatenate((f,g),axis=2)
5096 5097 #print(data)
5097 5098 setattr(self.dataOut, attr_data, data)
5098 5099 #print(self.dataOut.dataPP_POWER.shape)
5099 5100 #CONSTRUIR NUEVA ALTURAS
5100 5101 #print("hei_merge",self.dataOut.heightList)
5101 5102 dh = self.dataOut.heightList[1]-self.dataOut.heightList[0]
5102 5103 heightList_2 = (self.dataOut.heightList[-1]+dh) + numpy.arange(g.shape[-1], dtype=numpy.float) * dh
5103 5104
5104 5105 self.dataOut.heightList = numpy.concatenate((self.dataOut.heightList,heightList_2))
5105 5106 #print("hei_merge_total",self.dataOut.heightList)
5106 5107 #exit(1)
@@ -1,1860 +1,1861
1 1 import sys
2 2 import numpy,math
3 3 from scipy import interpolate
4 4 from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation, MPDecorator
5 5 from schainpy.model.data.jrodata import Voltage,hildebrand_sekhon
6 6 from schainpy.utils import log
7 7 from time import time
8 8
9 9
10 10
11 11 class VoltageProc(ProcessingUnit):
12 12
13 13 def __init__(self):
14 14
15 15 ProcessingUnit.__init__(self)
16 16
17 17 self.dataOut = Voltage()
18 18 self.flip = 1
19 19 self.setupReq = False
20 20
21 21 def run(self):
22 22
23 23 if self.dataIn.type == 'AMISR':
24 24 self.__updateObjFromAmisrInput()
25 25
26 26 if self.dataIn.type == 'Voltage':
27 27 self.dataOut.copy(self.dataIn)
28 28
29 29 def __updateObjFromAmisrInput(self):
30 30
31 31 self.dataOut.timeZone = self.dataIn.timeZone
32 32 self.dataOut.dstFlag = self.dataIn.dstFlag
33 33 self.dataOut.errorCount = self.dataIn.errorCount
34 34 self.dataOut.useLocalTime = self.dataIn.useLocalTime
35 35
36 36 self.dataOut.flagNoData = self.dataIn.flagNoData
37 37 self.dataOut.data = self.dataIn.data
38 38 self.dataOut.utctime = self.dataIn.utctime
39 39 self.dataOut.channelList = self.dataIn.channelList
40 40 #self.dataOut.timeInterval = self.dataIn.timeInterval
41 41 self.dataOut.heightList = self.dataIn.heightList
42 42 self.dataOut.nProfiles = self.dataIn.nProfiles
43 43
44 44 self.dataOut.nCohInt = self.dataIn.nCohInt
45 45 self.dataOut.ippSeconds = self.dataIn.ippSeconds
46 46 self.dataOut.frequency = self.dataIn.frequency
47 47
48 48 self.dataOut.azimuth = self.dataIn.azimuth
49 49 self.dataOut.zenith = self.dataIn.zenith
50 50
51 51 self.dataOut.beam.codeList = self.dataIn.beam.codeList
52 52 self.dataOut.beam.azimuthList = self.dataIn.beam.azimuthList
53 53 self.dataOut.beam.zenithList = self.dataIn.beam.zenithList
54 54
55 55
56 56 class selectChannels(Operation):
57 57
58 58 def run(self, dataOut, channelList):
59 59
60 60 channelIndexList = []
61 61 self.dataOut = dataOut
62 62 for channel in channelList:
63 63 if channel not in self.dataOut.channelList:
64 64 raise ValueError("Channel %d is not in %s" %(channel, str(self.dataOut.channelList)))
65 65
66 66 index = self.dataOut.channelList.index(channel)
67 67 channelIndexList.append(index)
68 68 self.selectChannelsByIndex(channelIndexList)
69 69 return self.dataOut
70 70
71 71 def selectChannelsByIndex(self, channelIndexList):
72 72 """
73 73 Selecciona un bloque de datos en base a canales segun el channelIndexList
74 74
75 75 Input:
76 76 channelIndexList : lista sencilla de canales a seleccionar por ej. [2,3,7]
77 77
78 78 Affected:
79 79 self.dataOut.data
80 80 self.dataOut.channelIndexList
81 81 self.dataOut.nChannels
82 82 self.dataOut.m_ProcessingHeader.totalSpectra
83 83 self.dataOut.systemHeaderObj.numChannels
84 84 self.dataOut.m_ProcessingHeader.blockSize
85 85
86 86 Return:
87 87 None
88 88 """
89 89
90 90 for channelIndex in channelIndexList:
91 91 if channelIndex not in self.dataOut.channelIndexList:
92 92 raise ValueError("The value %d in channelIndexList is not valid" %channelIndex)
93 93
94 94 if self.dataOut.type == 'Voltage':
95 95 if self.dataOut.flagDataAsBlock:
96 96 """
97 97 Si la data es obtenida por bloques, dimension = [nChannels, nProfiles, nHeis]
98 98 """
99 99 data = self.dataOut.data[channelIndexList,:,:]
100 100 else:
101 101 data = self.dataOut.data[channelIndexList,:]
102 102
103 103 self.dataOut.data = data
104 104 # self.dataOut.channelList = [self.dataOut.channelList[i] for i in channelIndexList]
105 105 self.dataOut.channelList = range(len(channelIndexList))
106 106
107 107 elif self.dataOut.type == 'Spectra':
108 108 data_spc = self.dataOut.data_spc[channelIndexList, :]
109 109 data_dc = self.dataOut.data_dc[channelIndexList, :]
110 110
111 111 self.dataOut.data_spc = data_spc
112 112 self.dataOut.data_dc = data_dc
113 113
114 114 # self.dataOut.channelList = [self.dataOut.channelList[i] for i in channelIndexList]
115 115 self.dataOut.channelList = range(len(channelIndexList))
116 116 self.__selectPairsByChannel(channelIndexList)
117 117
118 118 return 1
119 119
120 120 def __selectPairsByChannel(self, channelList=None):
121 121
122 122 if channelList == None:
123 123 return
124 124
125 125 pairsIndexListSelected = []
126 126 for pairIndex in self.dataOut.pairsIndexList:
127 127 # First pair
128 128 if self.dataOut.pairsList[pairIndex][0] not in channelList:
129 129 continue
130 130 # Second pair
131 131 if self.dataOut.pairsList[pairIndex][1] not in channelList:
132 132 continue
133 133
134 134 pairsIndexListSelected.append(pairIndex)
135 135
136 136 if not pairsIndexListSelected:
137 137 self.dataOut.data_cspc = None
138 138 self.dataOut.pairsList = []
139 139 return
140 140
141 141 self.dataOut.data_cspc = self.dataOut.data_cspc[pairsIndexListSelected]
142 142 self.dataOut.pairsList = [self.dataOut.pairsList[i]
143 143 for i in pairsIndexListSelected]
144 144
145 145 return
146 146
147 147 class selectHeights(Operation):
148 148
149 149 def run(self, dataOut, minHei=None, maxHei=None, minIndex=None, maxIndex=None):
150 150 """
151 151 Selecciona un bloque de datos en base a un grupo de valores de alturas segun el rango
152 152 minHei <= height <= maxHei
153 153
154 154 Input:
155 155 minHei : valor minimo de altura a considerar
156 156 maxHei : valor maximo de altura a considerar
157 157
158 158 Affected:
159 159 Indirectamente son cambiados varios valores a travez del metodo selectHeightsByIndex
160 160
161 161 Return:
162 162 1 si el metodo se ejecuto con exito caso contrario devuelve 0
163 163 """
164 164
165 165 self.dataOut = dataOut
166 166
167 167 if minHei and maxHei:
168 168
169 169 if (minHei < self.dataOut.heightList[0]):
170 170 minHei = self.dataOut.heightList[0]
171 171
172 172 if (maxHei > self.dataOut.heightList[-1]):
173 173 maxHei = self.dataOut.heightList[-1]
174 174
175 175 minIndex = 0
176 176 maxIndex = 0
177 177 heights = self.dataOut.heightList
178 178
179 179 inda = numpy.where(heights >= minHei)
180 180 indb = numpy.where(heights <= maxHei)
181 181
182 182 try:
183 183 minIndex = inda[0][0]
184 184 except:
185 185 minIndex = 0
186 186
187 187 try:
188 188 maxIndex = indb[0][-1]
189 189 except:
190 190 maxIndex = len(heights)
191 191
192 192 self.selectHeightsByIndex(minIndex, maxIndex)
193 193
194 194 return self.dataOut
195 195
196 196 def selectHeightsByIndex(self, minIndex, maxIndex):
197 197 """
198 198 Selecciona un bloque de datos en base a un grupo indices de alturas segun el rango
199 199 minIndex <= index <= maxIndex
200 200
201 201 Input:
202 202 minIndex : valor de indice minimo de altura a considerar
203 203 maxIndex : valor de indice maximo de altura a considerar
204 204
205 205 Affected:
206 206 self.dataOut.data
207 207 self.dataOut.heightList
208 208
209 209 Return:
210 210 1 si el metodo se ejecuto con exito caso contrario devuelve 0
211 211 """
212 212
213 213 if self.dataOut.type == 'Voltage':
214 214 if (minIndex < 0) or (minIndex > maxIndex):
215 215 raise ValueError("Height index range (%d,%d) is not valid" % (minIndex, maxIndex))
216 216
217 217 if (maxIndex >= self.dataOut.nHeights):
218 218 maxIndex = self.dataOut.nHeights
219 219 #print("shapeeee",self.dataOut.data.shape)
220 220 #voltage
221 221 if self.dataOut.flagDataAsBlock:
222 222 """
223 223 Si la data es obtenida por bloques, dimension = [nChannels, nProfiles, nHeis]
224 224 """
225 225 data = self.dataOut.data[:,:, minIndex:maxIndex]
226 226 else:
227 227 data = self.dataOut.data[:, minIndex:maxIndex]
228 228
229 229 # firstHeight = self.dataOut.heightList[minIndex]
230 230
231 231 self.dataOut.data = data
232 232 self.dataOut.heightList = self.dataOut.heightList[minIndex:maxIndex]
233 233
234 234 if self.dataOut.nHeights <= 1:
235 235 raise ValueError("selectHeights: Too few heights. Current number of heights is %d" %(self.dataOut.nHeights))
236 236 elif self.dataOut.type == 'Spectra':
237 237 if (minIndex < 0) or (minIndex > maxIndex):
238 238 raise ValueError("Error selecting heights: Index range (%d,%d) is not valid" % (
239 239 minIndex, maxIndex))
240 240
241 241 if (maxIndex >= self.dataOut.nHeights):
242 242 maxIndex = self.dataOut.nHeights - 1
243 243
244 244 # Spectra
245 245 data_spc = self.dataOut.data_spc[:, :, minIndex:maxIndex + 1]
246 246
247 247 data_cspc = None
248 248 if self.dataOut.data_cspc is not None:
249 249 data_cspc = self.dataOut.data_cspc[:, :, minIndex:maxIndex + 1]
250 250
251 251 data_dc = None
252 252 if self.dataOut.data_dc is not None:
253 253 data_dc = self.dataOut.data_dc[:, minIndex:maxIndex + 1]
254 254
255 255 self.dataOut.data_spc = data_spc
256 256 self.dataOut.data_cspc = data_cspc
257 257 self.dataOut.data_dc = data_dc
258 258
259 259 self.dataOut.heightList = self.dataOut.heightList[minIndex:maxIndex + 1]
260 260
261 261 return 1
262 262
263 263
264 264 class filterByHeights(Operation):
265 265
266 266 def run(self, dataOut, window):
267 267
268 268 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
269 269
270 270 if window == None:
271 271 window = (dataOut.radarControllerHeaderObj.txA/dataOut.radarControllerHeaderObj.nBaud) / deltaHeight
272 272
273 273 newdelta = deltaHeight * window
274 274 r = dataOut.nHeights % window
275 275 newheights = (dataOut.nHeights-r)/window
276 276
277 277 if newheights <= 1:
278 278 raise ValueError("filterByHeights: Too few heights. Current number of heights is %d and window is %d" %(dataOut.nHeights, window))
279 279
280 280 if dataOut.flagDataAsBlock:
281 281 """
282 282 Si la data es obtenida por bloques, dimension = [nChannels, nProfiles, nHeis]
283 283 """
284 284 buffer = dataOut.data[:, :, 0:int(dataOut.nHeights-r)]
285 285 buffer = buffer.reshape(dataOut.nChannels, dataOut.nProfiles, int(dataOut.nHeights/window), window)
286 286 buffer = numpy.sum(buffer,3)
287 287
288 288 else:
289 289 buffer = dataOut.data[:,0:int(dataOut.nHeights-r)]
290 290 buffer = buffer.reshape(dataOut.nChannels,int(dataOut.nHeights/window),int(window))
291 291 buffer = numpy.sum(buffer,2)
292 292
293 293 dataOut.data = buffer
294 294 dataOut.heightList = dataOut.heightList[0] + numpy.arange( newheights )*newdelta
295 295 dataOut.windowOfFilter = window
296 296
297 297 return dataOut
298 298
299 299
300 300 class setH0(Operation):
301 301
302 302 def run(self, dataOut, h0, deltaHeight = None):
303 303
304 304 if not deltaHeight:
305 305 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
306 306
307 307 nHeights = dataOut.nHeights
308 308
309 309 newHeiRange = h0 + numpy.arange(nHeights)*deltaHeight
310 310
311 311 dataOut.heightList = newHeiRange
312 312
313 313 return dataOut
314 314
315 315
316 316 class deFlip(Operation):
317 317
318 318 def run(self, dataOut, channelList = []):
319 319
320 320 data = dataOut.data.copy()
321 321
322 322 if dataOut.flagDataAsBlock:
323 323 flip = self.flip
324 324 profileList = list(range(dataOut.nProfiles))
325 325
326 326 if not channelList:
327 327 for thisProfile in profileList:
328 328 data[:,thisProfile,:] = data[:,thisProfile,:]*flip
329 329 flip *= -1.0
330 330 else:
331 331 for thisChannel in channelList:
332 332 if thisChannel not in dataOut.channelList:
333 333 continue
334 334
335 335 for thisProfile in profileList:
336 336 data[thisChannel,thisProfile,:] = data[thisChannel,thisProfile,:]*flip
337 337 flip *= -1.0
338 338
339 339 self.flip = flip
340 340
341 341 else:
342 342 if not channelList:
343 343 data[:,:] = data[:,:]*self.flip
344 344 else:
345 345 for thisChannel in channelList:
346 346 if thisChannel not in dataOut.channelList:
347 347 continue
348 348
349 349 data[thisChannel,:] = data[thisChannel,:]*self.flip
350 350
351 351 self.flip *= -1.
352 352
353 353 dataOut.data = data
354 354
355 355 return dataOut
356 356
357 357
358 358 class setAttribute(Operation):
359 359 '''
360 360 Set an arbitrary attribute(s) to dataOut
361 361 '''
362 362
363 363 def __init__(self):
364 364
365 365 Operation.__init__(self)
366 366 self._ready = False
367 367
368 368 def run(self, dataOut, **kwargs):
369 369
370 370 for key, value in kwargs.items():
371 371 setattr(dataOut, key, value)
372 372
373 373 return dataOut
374 374
375 375
376 376 @MPDecorator
377 377 class printAttribute(Operation):
378 378 '''
379 379 Print an arbitrary attribute of dataOut
380 380 '''
381 381
382 382 def __init__(self):
383 383
384 384 Operation.__init__(self)
385 385
386 386 def run(self, dataOut, attributes):
387 387
388 388 if isinstance(attributes, str):
389 389 attributes = [attributes]
390 390 for attr in attributes:
391 391 if hasattr(dataOut, attr):
392 392 log.log(getattr(dataOut, attr), attr)
393 393
394 394
395 395 class interpolateHeights(Operation):
396 396
397 397 def run(self, dataOut, topLim, botLim):
398 398 #69 al 72 para julia
399 399 #82-84 para meteoros
400 400 if len(numpy.shape(dataOut.data))==2:
401 401 sampInterp = (dataOut.data[:,botLim-1] + dataOut.data[:,topLim+1])/2
402 402 sampInterp = numpy.transpose(numpy.tile(sampInterp,(topLim-botLim + 1,1)))
403 403 #dataOut.data[:,botLim:limSup+1] = sampInterp
404 404 dataOut.data[:,botLim:topLim+1] = sampInterp
405 405 else:
406 406 nHeights = dataOut.data.shape[2]
407 407 x = numpy.hstack((numpy.arange(botLim),numpy.arange(topLim+1,nHeights)))
408 408 y = dataOut.data[:,:,list(range(botLim))+list(range(topLim+1,nHeights))]
409 409 f = interpolate.interp1d(x, y, axis = 2)
410 410 xnew = numpy.arange(botLim,topLim+1)
411 411 ynew = f(xnew)
412 412 dataOut.data[:,:,botLim:topLim+1] = ynew
413 413
414 414 return dataOut
415 415
416 416
417 417 class CohInt(Operation):
418 418
419 419 isConfig = False
420 420 __profIndex = 0
421 421 __byTime = False
422 422 __initime = None
423 423 __lastdatatime = None
424 424 __integrationtime = None
425 425 __buffer = None
426 426 __bufferStride = []
427 427 __dataReady = False
428 428 __profIndexStride = 0
429 429 __dataToPutStride = False
430 430 n = None
431 431
432 432 def __init__(self, **kwargs):
433 433
434 434 Operation.__init__(self, **kwargs)
435 435
436 436 def setup(self, n=None, timeInterval=None, stride=None, overlapping=False, byblock=False):
437 437 """
438 438 Set the parameters of the integration class.
439 439
440 440 Inputs:
441 441
442 442 n : Number of coherent integrations
443 443 timeInterval : Time of integration. If the parameter "n" is selected this one does not work
444 444 overlapping :
445 445 """
446 446
447 447 self.__initime = None
448 448 self.__lastdatatime = 0
449 449 self.__buffer = None
450 450 self.__dataReady = False
451 451 self.byblock = byblock
452 452 self.stride = stride
453 453
454 454 if n == None and timeInterval == None:
455 455 raise ValueError("n or timeInterval should be specified ...")
456 456
457 457 if n != None:
458 458 self.n = n
459 459 self.__byTime = False
460 460 else:
461 461 self.__integrationtime = timeInterval #* 60. #if (type(timeInterval)!=integer) -> change this line
462 462 self.n = 9999
463 463 self.__byTime = True
464 464
465 465 if overlapping:
466 466 self.__withOverlapping = True
467 467 self.__buffer = None
468 468 else:
469 469 self.__withOverlapping = False
470 470 self.__buffer = 0
471 471
472 472 self.__profIndex = 0
473 473
474 474 def putData(self, data):
475 475
476 476 """
477 477 Add a profile to the __buffer and increase in one the __profileIndex
478 478
479 479 """
480 480
481 481 if not self.__withOverlapping:
482 482 self.__buffer += data.copy()
483 483 self.__profIndex += 1
484 484 return
485 485
486 486 #Overlapping data
487 487 nChannels, nHeis = data.shape
488 488 data = numpy.reshape(data, (1, nChannels, nHeis))
489 489
490 490 #If the buffer is empty then it takes the data value
491 491 if self.__buffer is None:
492 492 self.__buffer = data
493 493 self.__profIndex += 1
494 494 return
495 495
496 496 #If the buffer length is lower than n then stakcing the data value
497 497 if self.__profIndex < self.n:
498 498 self.__buffer = numpy.vstack((self.__buffer, data))
499 499 self.__profIndex += 1
500 500 return
501 501
502 502 #If the buffer length is equal to n then replacing the last buffer value with the data value
503 503 self.__buffer = numpy.roll(self.__buffer, -1, axis=0)
504 504 self.__buffer[self.n-1] = data
505 505 self.__profIndex = self.n
506 506 return
507 507
508 508
509 509 def pushData(self):
510 510 """
511 511 Return the sum of the last profiles and the profiles used in the sum.
512 512
513 513 Affected:
514 514
515 515 self.__profileIndex
516 516
517 517 """
518 518
519 519 if not self.__withOverlapping:
520 520 data = self.__buffer
521 521 n = self.__profIndex
522 522
523 523 self.__buffer = 0
524 524 self.__profIndex = 0
525 525
526 526 return data, n
527 527
528 528 #Integration with Overlapping
529 529 data = numpy.sum(self.__buffer, axis=0)
530 530 # print data
531 531 # raise
532 532 n = self.__profIndex
533 533
534 534 return data, n
535 535
536 536 def byProfiles(self, data):
537 537
538 538 self.__dataReady = False
539 539 avgdata = None
540 540 # n = None
541 541 # print data
542 542 # raise
543 543 self.putData(data)
544 544
545 545 if self.__profIndex == self.n:
546 546 avgdata, n = self.pushData()
547 547 self.__dataReady = True
548 548
549 549 return avgdata
550 550
551 551 def byTime(self, data, datatime):
552 552
553 553 self.__dataReady = False
554 554 avgdata = None
555 555 n = None
556 556
557 557 self.putData(data)
558 558
559 559 if (datatime - self.__initime) >= self.__integrationtime:
560 560 avgdata, n = self.pushData()
561 561 self.n = n
562 562 self.__dataReady = True
563 563
564 564 return avgdata
565 565
566 566 def integrateByStride(self, data, datatime):
567 567 # print data
568 568 if self.__profIndex == 0:
569 569 self.__buffer = [[data.copy(), datatime]]
570 570 else:
571 571 self.__buffer.append([data.copy(),datatime])
572 572 self.__profIndex += 1
573 573 self.__dataReady = False
574 574
575 575 if self.__profIndex == self.n * self.stride :
576 576 self.__dataToPutStride = True
577 577 self.__profIndexStride = 0
578 578 self.__profIndex = 0
579 579 self.__bufferStride = []
580 580 for i in range(self.stride):
581 581 current = self.__buffer[i::self.stride]
582 582 data = numpy.sum([t[0] for t in current], axis=0)
583 583 avgdatatime = numpy.average([t[1] for t in current])
584 584 # print data
585 585 self.__bufferStride.append((data, avgdatatime))
586 586
587 587 if self.__dataToPutStride:
588 588 self.__dataReady = True
589 589 self.__profIndexStride += 1
590 590 if self.__profIndexStride == self.stride:
591 591 self.__dataToPutStride = False
592 592 # print self.__bufferStride[self.__profIndexStride - 1]
593 593 # raise
594 594 return self.__bufferStride[self.__profIndexStride - 1]
595 595
596 596
597 597 return None, None
598 598
599 599 def integrate(self, data, datatime=None):
600 600
601 601 if self.__initime == None:
602 602 self.__initime = datatime
603 603
604 604 if self.__byTime:
605 605 avgdata = self.byTime(data, datatime)
606 606 else:
607 607 avgdata = self.byProfiles(data)
608 608
609 609
610 610 self.__lastdatatime = datatime
611 611
612 612 if avgdata is None:
613 613 return None, None
614 614
615 615 avgdatatime = self.__initime
616 616
617 617 deltatime = datatime - self.__lastdatatime
618 618
619 619 if not self.__withOverlapping:
620 620 self.__initime = datatime
621 621 else:
622 622 self.__initime += deltatime
623 623
624 624 return avgdata, avgdatatime
625 625
626 626 def integrateByBlock(self, dataOut):
627 627
628 628 times = int(dataOut.data.shape[1]/self.n)
629 629 avgdata = numpy.zeros((dataOut.nChannels, times, dataOut.nHeights), dtype=numpy.complex)
630 630
631 631 id_min = 0
632 632 id_max = self.n
633 633
634 634 for i in range(times):
635 635 junk = dataOut.data[:,id_min:id_max,:]
636 636 avgdata[:,i,:] = junk.sum(axis=1)
637 637 id_min += self.n
638 638 id_max += self.n
639 639
640 640 timeInterval = dataOut.ippSeconds*self.n
641 641 avgdatatime = (times - 1) * timeInterval + dataOut.utctime
642 642 self.__dataReady = True
643 643 return avgdata, avgdatatime
644 644
645 645 def run(self, dataOut, n=None, timeInterval=None, stride=None, overlapping=False, byblock=False, **kwargs):
646 646
647 647 if not self.isConfig:
648 648 self.setup(n=n, stride=stride, timeInterval=timeInterval, overlapping=overlapping, byblock=byblock, **kwargs)
649 649 self.isConfig = True
650 650
651 651 if dataOut.flagDataAsBlock:
652 652 """
653 653 Si la data es leida por bloques, dimension = [nChannels, nProfiles, nHeis]
654 654 """
655 655 avgdata, avgdatatime = self.integrateByBlock(dataOut)
656 656 dataOut.nProfiles /= self.n
657 657 else:
658 658 if stride is None:
659 659 avgdata, avgdatatime = self.integrate(dataOut.data, dataOut.utctime)
660 660 else:
661 661 avgdata, avgdatatime = self.integrateByStride(dataOut.data, dataOut.utctime)
662 662
663 663
664 664 # dataOut.timeInterval *= n
665 665 dataOut.flagNoData = True
666 666
667 667 if self.__dataReady:
668 668 dataOut.data = avgdata
669 669 if not dataOut.flagCohInt:
670 670 dataOut.nCohInt *= self.n
671 671 dataOut.flagCohInt = True
672 dataOut.utctime = avgdatatime
672 ####################################dataOut.utctime = avgdatatime
673 673 # print avgdata, avgdatatime
674 674 # raise
675 675 # dataOut.timeInterval = dataOut.ippSeconds * dataOut.nCohInt
676 676 dataOut.flagNoData = False
677 677 return dataOut
678 678
679 679 class Decoder(Operation):
680 680
681 681 isConfig = False
682 682 __profIndex = 0
683 683
684 684 code = None
685 685
686 686 nCode = None
687 687 nBaud = None
688 688
689 689 def __init__(self, **kwargs):
690 690
691 691 Operation.__init__(self, **kwargs)
692 692
693 693 self.times = None
694 694 self.osamp = None
695 695 # self.__setValues = False
696 696 self.isConfig = False
697 697 self.setupReq = False
698 698 def setup(self, code, osamp, dataOut):
699 699
700 700 self.__profIndex = 0
701 701
702 702 self.code = code
703 703
704 704 self.nCode = len(code)
705 705 self.nBaud = len(code[0])
706 706
707 707 if (osamp != None) and (osamp >1):
708 708 self.osamp = osamp
709 709 self.code = numpy.repeat(code, repeats=self.osamp, axis=1)
710 710 self.nBaud = self.nBaud*self.osamp
711 711
712 712 self.__nChannels = dataOut.nChannels
713 713 self.__nProfiles = dataOut.nProfiles
714 714 self.__nHeis = dataOut.nHeights
715 715
716 716 if self.__nHeis < self.nBaud:
717 717 raise ValueError('Number of heights (%d) should be greater than number of bauds (%d)' %(self.__nHeis, self.nBaud))
718 718
719 719 #Frequency
720 720 __codeBuffer = numpy.zeros((self.nCode, self.__nHeis), dtype=numpy.complex)
721 721
722 722 __codeBuffer[:,0:self.nBaud] = self.code
723 723
724 724 self.fft_code = numpy.conj(numpy.fft.fft(__codeBuffer, axis=1))
725 725
726 726 if dataOut.flagDataAsBlock:
727 727
728 728 self.ndatadec = self.__nHeis #- self.nBaud + 1
729 729
730 730 self.datadecTime = numpy.zeros((self.__nChannels, self.__nProfiles, self.ndatadec), dtype=numpy.complex)
731 731
732 732 else:
733 733
734 734 #Time
735 735 self.ndatadec = self.__nHeis #- self.nBaud + 1
736 736
737 737 self.datadecTime = numpy.zeros((self.__nChannels, self.ndatadec), dtype=numpy.complex)
738 738
739 739 def __convolutionInFreq(self, data):
740 740
741 741 fft_code = self.fft_code[self.__profIndex].reshape(1,-1)
742 742
743 743 fft_data = numpy.fft.fft(data, axis=1)
744 744
745 745 conv = fft_data*fft_code
746 746
747 747 data = numpy.fft.ifft(conv,axis=1)
748 748
749 749 return data
750 750
751 751 def __convolutionInFreqOpt(self, data):
752 752
753 753 raise NotImplementedError
754 754
755 755 def __convolutionInTime(self, data):
756 756
757 757 code = self.code[self.__profIndex]
758 758 for i in range(self.__nChannels):
759 759 self.datadecTime[i,:] = numpy.correlate(data[i,:], code, mode='full')[self.nBaud-1:]
760 760
761 761 return self.datadecTime
762 762
763 763 def __convolutionByBlockInTime(self, data):
764 764
765 765 repetitions = int(self.__nProfiles / self.nCode)
766 766 junk = numpy.lib.stride_tricks.as_strided(self.code, (repetitions, self.code.size), (0, self.code.itemsize))
767 767 junk = junk.flatten()
768 768 code_block = numpy.reshape(junk, (self.nCode*repetitions, self.nBaud))
769 769 profilesList = range(self.__nProfiles)
770 770
771 771 for i in range(self.__nChannels):
772 772 for j in profilesList:
773 773 self.datadecTime[i,j,:] = numpy.correlate(data[i,j,:], code_block[j,:], mode='full')[self.nBaud-1:]
774 774 return self.datadecTime
775 775
776 776 def __convolutionByBlockInFreq(self, data):
777 777
778 778 raise NotImplementedError("Decoder by frequency fro Blocks not implemented")
779 779
780 780
781 781 fft_code = self.fft_code[self.__profIndex].reshape(1,-1)
782 782
783 783 fft_data = numpy.fft.fft(data, axis=2)
784 784
785 785 conv = fft_data*fft_code
786 786
787 787 data = numpy.fft.ifft(conv,axis=2)
788 788
789 789 return data
790 790
791 791
792 792 def run(self, dataOut, code=None, nCode=None, nBaud=None, mode = 0, osamp=None, times=None):
793 793
794 794 if dataOut.flagDecodeData:
795 795 print("This data is already decoded, recoding again ...")
796 796
797 797 if not self.isConfig:
798 798
799 799 if code is None:
800 800 if dataOut.code is None:
801 801 raise ValueError("Code could not be read from %s instance. Enter a value in Code parameter" %dataOut.type)
802 802
803 803 code = dataOut.code
804 804 else:
805 805 code = numpy.array(code).reshape(nCode,nBaud)
806 806 self.setup(code, osamp, dataOut)
807 807
808 808 self.isConfig = True
809 809
810 810 if mode == 3:
811 811 sys.stderr.write("Decoder Warning: mode=%d is not valid, using mode=0\n" %mode)
812 812
813 813 if times != None:
814 814 sys.stderr.write("Decoder Warning: Argument 'times' in not used anymore\n")
815 815
816 816 if self.code is None:
817 817 print("Fail decoding: Code is not defined.")
818 818 return
819 819
820 820 self.__nProfiles = dataOut.nProfiles
821 821 datadec = None
822 822
823 823 if mode == 3:
824 824 mode = 0
825 825
826 826 if dataOut.flagDataAsBlock:
827 827 """
828 828 Decoding when data have been read as block,
829 829 """
830 830
831 831 if mode == 0:
832 832 datadec = self.__convolutionByBlockInTime(dataOut.data)
833 833 if mode == 1:
834 834 datadec = self.__convolutionByBlockInFreq(dataOut.data)
835 835 else:
836 836 """
837 837 Decoding when data have been read profile by profile
838 838 """
839 839 if mode == 0:
840 840 datadec = self.__convolutionInTime(dataOut.data)
841 841
842 842 if mode == 1:
843 843 datadec = self.__convolutionInFreq(dataOut.data)
844 844
845 845 if mode == 2:
846 846 datadec = self.__convolutionInFreqOpt(dataOut.data)
847 847
848 848 if datadec is None:
849 849 raise ValueError("Codification mode selected is not valid: mode=%d. Try selecting 0 or 1" %mode)
850 850
851 851 dataOut.code = self.code
852 852 dataOut.nCode = self.nCode
853 853 dataOut.nBaud = self.nBaud
854 854
855 855 dataOut.data = datadec
856 856
857 857 dataOut.heightList = dataOut.heightList[0:datadec.shape[-1]]
858 858
859 859 dataOut.flagDecodeData = True #asumo q la data esta decodificada
860 860
861 861 if self.__profIndex == self.nCode-1:
862 862 self.__profIndex = 0
863 863 return dataOut
864 864
865 865 self.__profIndex += 1
866 866
867 867 return dataOut
868 868 # dataOut.flagDeflipData = True #asumo q la data no esta sin flip
869 869
870 870
871 871 class ProfileConcat(Operation):
872 872
873 873 isConfig = False
874 874 buffer = None
875 875
876 876 def __init__(self, **kwargs):
877 877
878 878 Operation.__init__(self, **kwargs)
879 879 self.profileIndex = 0
880 880
881 881 def reset(self):
882 882 self.buffer = numpy.zeros_like(self.buffer)
883 883 self.start_index = 0
884 884 self.times = 1
885 885
886 886 def setup(self, data, m, n=1):
887 887 self.buffer = numpy.zeros((data.shape[0],data.shape[1]*m),dtype=type(data[0,0]))
888 888 self.nHeights = data.shape[1]#.nHeights
889 889 self.start_index = 0
890 890 self.times = 1
891 891
892 892 def concat(self, data):
893 893
894 894 self.buffer[:,self.start_index:self.nHeights*self.times] = data.copy()
895 895 self.start_index = self.start_index + self.nHeights
896 896
897 897 def run(self, dataOut, m):
898 898 dataOut.flagNoData = True
899 899
900 900 if not self.isConfig:
901 901 self.setup(dataOut.data, m, 1)
902 902 self.isConfig = True
903 903
904 904 if dataOut.flagDataAsBlock:
905 905 raise ValueError("ProfileConcat can only be used when voltage have been read profile by profile, getBlock = False")
906 906
907 907 else:
908 908 self.concat(dataOut.data)
909 909 self.times += 1
910 910 if self.times > m:
911 911 dataOut.data = self.buffer
912 912 self.reset()
913 913 dataOut.flagNoData = False
914 914 # se deben actualizar mas propiedades del header y del objeto dataOut, por ejemplo, las alturas
915 915 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
916 916 xf = dataOut.heightList[0] + dataOut.nHeights * deltaHeight * m
917 917 dataOut.heightList = numpy.arange(dataOut.heightList[0], xf, deltaHeight)
918 918 dataOut.ippSeconds *= m
919 919 return dataOut
920 920
921 921 class ProfileSelector(Operation):
922 922
923 923 profileIndex = None
924 924 # Tamanho total de los perfiles
925 925 nProfiles = None
926 926
927 927 def __init__(self, **kwargs):
928 928
929 929 Operation.__init__(self, **kwargs)
930 930 self.profileIndex = 0
931 931
932 932 def incProfileIndex(self):
933 933
934 934 self.profileIndex += 1
935 935
936 936 if self.profileIndex >= self.nProfiles:
937 937 self.profileIndex = 0
938 938
939 939 def isThisProfileInRange(self, profileIndex, minIndex, maxIndex):
940 940
941 941 if profileIndex < minIndex:
942 942 return False
943 943
944 944 if profileIndex > maxIndex:
945 945 return False
946 946
947 947 return True
948 948
949 949 def isThisProfileInList(self, profileIndex, profileList):
950 950
951 951 if profileIndex not in profileList:
952 952 return False
953 953
954 954 return True
955 955
956 956 def run(self, dataOut, profileList=None, profileRangeList=None, beam=None, byblock=False, rangeList = None, nProfiles=None):
957 957 #print("before",dataOut.data.shape)
958 958 """
959 959 ProfileSelector:
960 960
961 961 Inputs:
962 962 profileList : Index of profiles selected. Example: profileList = (0,1,2,7,8)
963 963
964 964 profileRangeList : Minimum and maximum profile indexes. Example: profileRangeList = (4, 30)
965 965
966 966 rangeList : List of profile ranges. Example: rangeList = ((4, 30), (32, 64), (128, 256))
967 967
968 968 """
969 969
970 970 if rangeList is not None:
971 971 if type(rangeList[0]) not in (tuple, list):
972 972 rangeList = [rangeList]
973 973
974 974 dataOut.flagNoData = True
975 975
976 976 if dataOut.flagDataAsBlock:
977 977 """
978 978 data dimension = [nChannels, nProfiles, nHeis]
979 979 """
980 980 if profileList != None:
981 981 dataOut.data = dataOut.data[:,profileList,:]
982 982
983 983 if profileRangeList != None:
984 984 minIndex = profileRangeList[0]
985 985 maxIndex = profileRangeList[1]
986 986 profileList = list(range(minIndex, maxIndex+1))
987 987
988 988 dataOut.data = dataOut.data[:,minIndex:maxIndex+1,:]
989 989
990 990 if rangeList != None:
991 991
992 992 profileList = []
993 993
994 994 for thisRange in rangeList:
995 995 minIndex = thisRange[0]
996 996 maxIndex = thisRange[1]
997 997
998 998 profileList.extend(list(range(minIndex, maxIndex+1)))
999 999
1000 1000 dataOut.data = dataOut.data[:,profileList,:]
1001 1001
1002 1002 dataOut.nProfiles = len(profileList)
1003 1003 dataOut.profileIndex = dataOut.nProfiles - 1
1004 1004 dataOut.flagNoData = False
1005 1005 #print(dataOut.data.shape)
1006 1006 return dataOut
1007 1007
1008 1008 """
1009 1009 data dimension = [nChannels, nHeis]
1010 1010 """
1011 1011
1012 1012 if profileList != None:
1013 1013
1014 1014 if self.isThisProfileInList(dataOut.profileIndex, profileList):
1015 1015
1016 1016 self.nProfiles = len(profileList)
1017 1017 dataOut.nProfiles = self.nProfiles
1018 1018 dataOut.profileIndex = self.profileIndex
1019 1019 dataOut.flagNoData = False
1020 1020
1021 1021 self.incProfileIndex()
1022 1022 return dataOut
1023 1023
1024 1024 if profileRangeList != None:
1025 1025
1026 1026 minIndex = profileRangeList[0]
1027 1027 maxIndex = profileRangeList[1]
1028 1028
1029 1029 if self.isThisProfileInRange(dataOut.profileIndex, minIndex, maxIndex):
1030 1030
1031 1031 self.nProfiles = maxIndex - minIndex + 1
1032 1032 dataOut.nProfiles = self.nProfiles
1033 1033 dataOut.profileIndex = self.profileIndex
1034 1034 dataOut.flagNoData = False
1035 1035
1036 1036 self.incProfileIndex()
1037 1037 return dataOut
1038 1038
1039 1039 if rangeList != None:
1040 1040
1041 1041 nProfiles = 0
1042 1042
1043 1043 for thisRange in rangeList:
1044 1044 minIndex = thisRange[0]
1045 1045 maxIndex = thisRange[1]
1046 1046
1047 1047 nProfiles += maxIndex - minIndex + 1
1048 1048
1049 1049 for thisRange in rangeList:
1050 1050
1051 1051 minIndex = thisRange[0]
1052 1052 maxIndex = thisRange[1]
1053 1053
1054 1054 if self.isThisProfileInRange(dataOut.profileIndex, minIndex, maxIndex):
1055 1055
1056 1056 self.nProfiles = nProfiles
1057 1057 dataOut.nProfiles = self.nProfiles
1058 1058 dataOut.profileIndex = self.profileIndex
1059 1059 dataOut.flagNoData = False
1060 1060
1061 1061 self.incProfileIndex()
1062 1062
1063 1063 break
1064 1064
1065 1065 return dataOut
1066 1066
1067 1067
1068 1068 if beam != None: #beam is only for AMISR data
1069 1069 if self.isThisProfileInList(dataOut.profileIndex, dataOut.beamRangeDict[beam]):
1070 1070 dataOut.flagNoData = False
1071 1071 dataOut.profileIndex = self.profileIndex
1072 1072
1073 1073 self.incProfileIndex()
1074 1074
1075 1075 return dataOut
1076 1076
1077 1077 raise ValueError("ProfileSelector needs profileList, profileRangeList or rangeList parameter")
1078 1078
1079 1079
1080 1080 class Reshaper(Operation):
1081 1081
1082 1082 def __init__(self, **kwargs):
1083 1083
1084 1084 Operation.__init__(self, **kwargs)
1085 1085
1086 1086 self.__buffer = None
1087 1087 self.__nitems = 0
1088 1088
1089 1089 def __appendProfile(self, dataOut, nTxs):
1090 1090
1091 1091 if self.__buffer is None:
1092 1092 shape = (dataOut.nChannels, int(dataOut.nHeights/nTxs) )
1093 1093 self.__buffer = numpy.empty(shape, dtype = dataOut.data.dtype)
1094 1094
1095 1095 ini = dataOut.nHeights * self.__nitems
1096 1096 end = ini + dataOut.nHeights
1097 1097
1098 1098 self.__buffer[:, ini:end] = dataOut.data
1099 1099
1100 1100 self.__nitems += 1
1101 1101
1102 1102 return int(self.__nitems*nTxs)
1103 1103
1104 1104 def __getBuffer(self):
1105 1105
1106 1106 if self.__nitems == int(1./self.__nTxs):
1107 1107
1108 1108 self.__nitems = 0
1109 1109
1110 1110 return self.__buffer.copy()
1111 1111
1112 1112 return None
1113 1113
1114 1114 def __checkInputs(self, dataOut, shape, nTxs):
1115 1115
1116 1116 if shape is None and nTxs is None:
1117 1117 raise ValueError("Reshaper: shape of factor should be defined")
1118 1118
1119 1119 if nTxs:
1120 1120 if nTxs < 0:
1121 1121 raise ValueError("nTxs should be greater than 0")
1122 1122
1123 1123 if nTxs < 1 and dataOut.nProfiles % (1./nTxs) != 0:
1124 1124 raise ValueError("nProfiles= %d is not divisibled by (1./nTxs) = %f" %(dataOut.nProfiles, (1./nTxs)))
1125 1125
1126 1126 shape = [dataOut.nChannels, dataOut.nProfiles*nTxs, dataOut.nHeights/nTxs]
1127 1127
1128 1128 return shape, nTxs
1129 1129
1130 1130 if len(shape) != 2 and len(shape) != 3:
1131 1131 raise ValueError("shape dimension should be equal to 2 or 3. shape = (nProfiles, nHeis) or (nChannels, nProfiles, nHeis). Actually shape = (%d, %d, %d)" %(dataOut.nChannels, dataOut.nProfiles, dataOut.nHeights))
1132 1132
1133 1133 if len(shape) == 2:
1134 1134 shape_tuple = [dataOut.nChannels]
1135 1135 shape_tuple.extend(shape)
1136 1136 else:
1137 1137 shape_tuple = list(shape)
1138 1138
1139 1139 nTxs = 1.0*shape_tuple[1]/dataOut.nProfiles
1140 1140
1141 1141 return shape_tuple, nTxs
1142 1142
1143 1143 def run(self, dataOut, shape=None, nTxs=None):
1144 1144
1145 1145 shape_tuple, self.__nTxs = self.__checkInputs(dataOut, shape, nTxs)
1146 1146
1147 1147 dataOut.flagNoData = True
1148 1148 profileIndex = None
1149 1149
1150 1150 if dataOut.flagDataAsBlock:
1151 1151
1152 1152 dataOut.data = numpy.reshape(dataOut.data, shape_tuple)
1153 1153 dataOut.flagNoData = False
1154 1154
1155 1155 profileIndex = int(dataOut.nProfiles*self.__nTxs) - 1
1156 1156
1157 1157 else:
1158 1158
1159 1159 if self.__nTxs < 1:
1160 1160
1161 1161 self.__appendProfile(dataOut, self.__nTxs)
1162 1162 new_data = self.__getBuffer()
1163 1163
1164 1164 if new_data is not None:
1165 1165 dataOut.data = new_data
1166 1166 dataOut.flagNoData = False
1167 1167
1168 1168 profileIndex = dataOut.profileIndex*nTxs
1169 1169
1170 1170 else:
1171 1171 raise ValueError("nTxs should be greater than 0 and lower than 1, or use VoltageReader(..., getblock=True)")
1172 1172
1173 1173 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
1174 1174
1175 1175 dataOut.heightList = numpy.arange(dataOut.nHeights/self.__nTxs) * deltaHeight + dataOut.heightList[0]
1176 1176
1177 1177 dataOut.nProfiles = int(dataOut.nProfiles*self.__nTxs)
1178 1178
1179 1179 dataOut.profileIndex = profileIndex
1180 1180
1181 1181 dataOut.ippSeconds /= self.__nTxs
1182 1182
1183 1183 return dataOut
1184 1184
1185 1185 class SplitProfiles(Operation):
1186 1186
1187 1187 def __init__(self, **kwargs):
1188 1188
1189 1189 Operation.__init__(self, **kwargs)
1190 1190
1191 1191 def run(self, dataOut, n):
1192 1192
1193 1193 dataOut.flagNoData = True
1194 1194 profileIndex = None
1195 1195
1196 1196 if dataOut.flagDataAsBlock:
1197 1197
1198 1198 #nchannels, nprofiles, nsamples
1199 1199 shape = dataOut.data.shape
1200 1200
1201 1201 if shape[2] % n != 0:
1202 1202 raise ValueError("Could not split the data, n=%d has to be multiple of %d" %(n, shape[2]))
1203 1203
1204 1204 new_shape = shape[0], shape[1]*n, int(shape[2]/n)
1205 1205
1206 1206 dataOut.data = numpy.reshape(dataOut.data, new_shape)
1207 1207 dataOut.flagNoData = False
1208 1208
1209 1209 profileIndex = int(dataOut.nProfiles/n) - 1
1210 1210
1211 1211 else:
1212 1212
1213 1213 raise ValueError("Could not split the data when is read Profile by Profile. Use VoltageReader(..., getblock=True)")
1214 1214
1215 1215 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
1216 1216
1217 1217 dataOut.heightList = numpy.arange(dataOut.nHeights/n) * deltaHeight + dataOut.heightList[0]
1218 1218
1219 1219 dataOut.nProfiles = int(dataOut.nProfiles*n)
1220 1220
1221 1221 dataOut.profileIndex = profileIndex
1222 1222
1223 1223 dataOut.ippSeconds /= n
1224 1224
1225 1225 return dataOut
1226 1226
1227 1227 class CombineProfiles(Operation):
1228 1228 def __init__(self, **kwargs):
1229 1229
1230 1230 Operation.__init__(self, **kwargs)
1231 1231
1232 1232 self.__remData = None
1233 1233 self.__profileIndex = 0
1234 1234
1235 1235 def run(self, dataOut, n):
1236 1236
1237 1237 dataOut.flagNoData = True
1238 1238 profileIndex = None
1239 1239
1240 1240 if dataOut.flagDataAsBlock:
1241 1241
1242 1242 #nchannels, nprofiles, nsamples
1243 1243 shape = dataOut.data.shape
1244 1244 new_shape = shape[0], shape[1]/n, shape[2]*n
1245 1245
1246 1246 if shape[1] % n != 0:
1247 1247 raise ValueError("Could not split the data, n=%d has to be multiple of %d" %(n, shape[1]))
1248 1248
1249 1249 dataOut.data = numpy.reshape(dataOut.data, new_shape)
1250 1250 dataOut.flagNoData = False
1251 1251
1252 1252 profileIndex = int(dataOut.nProfiles*n) - 1
1253 1253
1254 1254 else:
1255 1255
1256 1256 #nchannels, nsamples
1257 1257 if self.__remData is None:
1258 1258 newData = dataOut.data
1259 1259 else:
1260 1260 newData = numpy.concatenate((self.__remData, dataOut.data), axis=1)
1261 1261
1262 1262 self.__profileIndex += 1
1263 1263
1264 1264 if self.__profileIndex < n:
1265 1265 self.__remData = newData
1266 1266 #continue
1267 1267 return
1268 1268
1269 1269 self.__profileIndex = 0
1270 1270 self.__remData = None
1271 1271
1272 1272 dataOut.data = newData
1273 1273 dataOut.flagNoData = False
1274 1274
1275 1275 profileIndex = dataOut.profileIndex/n
1276 1276
1277 1277
1278 1278 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
1279 1279
1280 1280 dataOut.heightList = numpy.arange(dataOut.nHeights*n) * deltaHeight + dataOut.heightList[0]
1281 1281
1282 1282 dataOut.nProfiles = int(dataOut.nProfiles/n)
1283 1283
1284 1284 dataOut.profileIndex = profileIndex
1285 1285
1286 1286 dataOut.ippSeconds *= n
1287 1287
1288 1288 return dataOut
1289 1289
1290 1290 class PulsePair(Operation):
1291 1291 '''
1292 1292 Function PulsePair(Signal Power, Velocity)
1293 1293 The real component of Lag[0] provides Intensity Information
1294 1294 The imag component of Lag[1] Phase provides Velocity Information
1295 1295
1296 1296 Configuration Parameters:
1297 1297 nPRF = Number of Several PRF
1298 1298 theta = Degree Azimuth angel Boundaries
1299 1299
1300 1300 Input:
1301 1301 self.dataOut
1302 1302 lag[N]
1303 1303 Affected:
1304 1304 self.dataOut.spc
1305 1305 '''
1306 1306 isConfig = False
1307 1307 __profIndex = 0
1308 1308 __initime = None
1309 1309 __lastdatatime = None
1310 1310 __buffer = None
1311 1311 noise = None
1312 1312 __dataReady = False
1313 1313 n = None
1314 1314 __nch = 0
1315 1315 __nHeis = 0
1316 1316 removeDC = False
1317 1317 ipp = None
1318 1318 lambda_ = 0
1319 1319
1320 1320 def __init__(self,**kwargs):
1321 1321 Operation.__init__(self,**kwargs)
1322 1322
1323 1323 def setup(self, dataOut, n = None, removeDC=False):
1324 1324 '''
1325 1325 n= Numero de PRF's de entrada
1326 1326 '''
1327 1327 self.__initime = None
1328 1328 ####print("[INICIO]-setup del METODO PULSE PAIR")
1329 1329 self.__lastdatatime = 0
1330 1330 self.__dataReady = False
1331 1331 self.__buffer = 0
1332 1332 self.__profIndex = 0
1333 1333 self.noise = None
1334 1334 self.__nch = dataOut.nChannels
1335 1335 self.__nHeis = dataOut.nHeights
1336 1336 self.removeDC = removeDC
1337 1337 self.lambda_ = 3.0e8/(9345.0e6)
1338 1338 self.ippSec = dataOut.ippSeconds
1339 1339 self.nCohInt = dataOut.nCohInt
1340 1340 ####print("IPPseconds",dataOut.ippSeconds)
1341 1341 ####print("ELVALOR DE n es:", n)
1342 1342 if n == None:
1343 1343 raise ValueError("n should be specified.")
1344 1344
1345 1345 if n != None:
1346 1346 if n<2:
1347 1347 raise ValueError("n should be greater than 2")
1348 1348
1349 1349 self.n = n
1350 1350 self.__nProf = n
1351 1351
1352 1352 self.__buffer = numpy.zeros((dataOut.nChannels,
1353 1353 n,
1354 1354 dataOut.nHeights),
1355 1355 dtype='complex')
1356 1356
1357 1357 def putData(self,data):
1358 1358 '''
1359 1359 Add a profile to he __buffer and increase in one the __profiel Index
1360 1360 '''
1361 1361 self.__buffer[:,self.__profIndex,:]= data
1362 1362 self.__profIndex += 1
1363 1363 return
1364 1364
1365 1365 def pushData(self,dataOut):
1366 1366 '''
1367 1367 Return the PULSEPAIR and the profiles used in the operation
1368 1368 Affected : self.__profileIndex
1369 1369 '''
1370 1370 #----------------- Remove DC-----------------------------------
1371 1371 if self.removeDC==True:
1372 1372 mean = numpy.mean(self.__buffer,1)
1373 1373 tmp = mean.reshape(self.__nch,1,self.__nHeis)
1374 1374 dc= numpy.tile(tmp,[1,self.__nProf,1])
1375 1375 self.__buffer = self.__buffer - dc
1376 1376 #------------------Calculo de Potencia ------------------------
1377 1377 pair0 = self.__buffer*numpy.conj(self.__buffer)
1378 1378 pair0 = pair0.real
1379 1379 lag_0 = numpy.sum(pair0,1)
1380 1380 #-----------------Calculo de Cscp------------------------------ New
1381 1381 cspc_pair01 = self.__buffer[0]*self.__buffer[1]
1382 1382 #------------------Calculo de Ruido x canal--------------------
1383 1383 self.noise = numpy.zeros(self.__nch)
1384 1384 for i in range(self.__nch):
1385 1385 daux = numpy.sort(pair0[i,:,:],axis= None)
1386 1386 self.noise[i]=hildebrand_sekhon( daux ,self.nCohInt)
1387 1387
1388 1388 self.noise = self.noise.reshape(self.__nch,1)
1389 1389 self.noise = numpy.tile(self.noise,[1,self.__nHeis])
1390 1390 noise_buffer = self.noise.reshape(self.__nch,1,self.__nHeis)
1391 1391 noise_buffer = numpy.tile(noise_buffer,[1,self.__nProf,1])
1392 1392 #------------------ Potencia recibida= P , Potencia senal = S , Ruido= N--
1393 1393 #------------------ P= S+N ,P=lag_0/N ---------------------------------
1394 1394 #-------------------- Power --------------------------------------------------
1395 1395 data_power = lag_0/(self.n*self.nCohInt)
1396 1396 #--------------------CCF------------------------------------------------------
1397 1397 data_ccf =numpy.sum(cspc_pair01,axis=0)/(self.n*self.nCohInt)
1398 1398 #------------------ Senal --------------------------------------------------
1399 1399 data_intensity = pair0 - noise_buffer
1400 1400 data_intensity = numpy.sum(data_intensity,axis=1)*(self.n*self.nCohInt)#*self.nCohInt)
1401 1401 #data_intensity = (lag_0-self.noise*self.n)*(self.n*self.nCohInt)
1402 1402 for i in range(self.__nch):
1403 1403 for j in range(self.__nHeis):
1404 1404 if data_intensity[i][j] < 0:
1405 1405 data_intensity[i][j] = numpy.min(numpy.absolute(data_intensity[i][j]))
1406 1406
1407 1407 #----------------- Calculo de Frecuencia y Velocidad doppler--------
1408 1408 pair1 = self.__buffer[:,:-1,:]*numpy.conjugate(self.__buffer[:,1:,:])
1409 1409 lag_1 = numpy.sum(pair1,1)
1410 1410 data_freq = (-1/(2.0*math.pi*self.ippSec*self.nCohInt))*numpy.angle(lag_1)
1411 1411 data_velocity = (self.lambda_/2.0)*data_freq
1412 1412
1413 1413 #---------------- Potencia promedio estimada de la Senal-----------
1414 1414 lag_0 = lag_0/self.n
1415 1415 S = lag_0-self.noise
1416 1416
1417 1417 #---------------- Frecuencia Doppler promedio ---------------------
1418 1418 lag_1 = lag_1/(self.n-1)
1419 1419 R1 = numpy.abs(lag_1)
1420 1420
1421 1421 #---------------- Calculo del SNR----------------------------------
1422 1422 data_snrPP = S/self.noise
1423 1423 for i in range(self.__nch):
1424 1424 for j in range(self.__nHeis):
1425 1425 if data_snrPP[i][j] < 1.e-20:
1426 1426 data_snrPP[i][j] = 1.e-20
1427 1427
1428 1428 #----------------- Calculo del ancho espectral ----------------------
1429 1429 L = S/R1
1430 1430 L = numpy.where(L<0,1,L)
1431 1431 L = numpy.log(L)
1432 1432 tmp = numpy.sqrt(numpy.absolute(L))
1433 1433 data_specwidth = (self.lambda_/(2*math.sqrt(2)*math.pi*self.ippSec*self.nCohInt))*tmp*numpy.sign(L)
1434 1434 n = self.__profIndex
1435 1435
1436 1436 self.__buffer = numpy.zeros((self.__nch, self.__nProf,self.__nHeis), dtype='complex')
1437 1437 self.__profIndex = 0
1438 1438 return data_power,data_intensity,data_velocity,data_snrPP,data_specwidth,data_ccf,n
1439 1439
1440 1440
1441 1441 def pulsePairbyProfiles(self,dataOut):
1442 1442
1443 1443 self.__dataReady = False
1444 1444 data_power = None
1445 1445 data_intensity = None
1446 1446 data_velocity = None
1447 1447 data_specwidth = None
1448 1448 data_snrPP = None
1449 1449 data_ccf = None
1450 1450 self.putData(data=dataOut.data)
1451 1451 if self.__profIndex == self.n:
1452 1452 data_power,data_intensity, data_velocity,data_snrPP,data_specwidth,data_ccf, n = self.pushData(dataOut=dataOut)
1453 1453 self.__dataReady = True
1454 1454
1455 1455 return data_power, data_intensity, data_velocity, data_snrPP,data_specwidth,data_ccf
1456 1456
1457 1457
1458 1458 def pulsePairOp(self, dataOut, datatime= None):
1459 1459
1460 1460 if self.__initime == None:
1461 1461 self.__initime = datatime
1462 1462 data_power, data_intensity, data_velocity, data_snrPP,data_specwidth,data_ccf = self.pulsePairbyProfiles(dataOut)
1463 1463 self.__lastdatatime = datatime
1464 1464
1465 1465 if data_power is None:
1466 1466 return None, None, None,None,None,None,None
1467 1467
1468 1468 avgdatatime = self.__initime
1469 1469 deltatime = datatime - self.__lastdatatime
1470 1470 self.__initime = datatime
1471 1471
1472 1472 return data_power, data_intensity, data_velocity, data_snrPP,data_specwidth,data_ccf, avgdatatime
1473 1473
1474 1474 def run(self, dataOut,n = None,removeDC= False, overlapping= False,**kwargs):
1475 1475 #print("hey")
1476 1476 #print(dataOut.data.shape)
1477 1477 #exit(1)
1478 1478 #print(self.__profIndex)
1479 1479 if not self.isConfig:
1480 1480 self.setup(dataOut = dataOut, n = n , removeDC=removeDC , **kwargs)
1481 1481 self.isConfig = True
1482 1482 data_power, data_intensity, data_velocity,data_snrPP,data_specwidth,data_ccf, avgdatatime = self.pulsePairOp(dataOut, dataOut.utctime)
1483 1483 dataOut.flagNoData = True
1484 1484
1485 1485 if self.__dataReady:
1486 1486 ###print("READY ----------------------------------")
1487 1487 dataOut.nCohInt *= self.n
1488 1488 dataOut.dataPP_POW = data_intensity # S
1489 1489 dataOut.dataPP_POWER = data_power # P valor que corresponde a POTENCIA MOMENTO
1490 1490 dataOut.dataPP_DOP = data_velocity
1491 1491 dataOut.dataPP_SNR = data_snrPP
1492 1492 dataOut.dataPP_WIDTH = data_specwidth
1493 1493 dataOut.dataPP_CCF = data_ccf
1494 1494 dataOut.PRFbyAngle = self.n #numero de PRF*cada angulo rotado que equivale a un tiempo.
1495 1495 dataOut.nProfiles = int(dataOut.nProfiles/n)
1496 1496 dataOut.utctime = avgdatatime
1497 1497 dataOut.flagNoData = False
1498 1498 return dataOut
1499 1499
1500 1500 class PulsePair_vRF(Operation):
1501 1501 '''
1502 1502 Function PulsePair(Signal Power, Velocity)
1503 1503 The real component of Lag[0] provides Intensity Information
1504 1504 The imag component of Lag[1] Phase provides Velocity Information
1505 1505
1506 1506 Configuration Parameters:
1507 1507 nPRF = Number of Several PRF
1508 1508 theta = Degree Azimuth angel Boundaries
1509 1509
1510 1510 Input:
1511 1511 self.dataOut
1512 1512 lag[N]
1513 1513 Affected:
1514 1514 self.dataOut.spc
1515 1515 '''
1516 1516 isConfig = False
1517 1517 __profIndex = 0
1518 1518 __initime = None
1519 1519 __lastdatatime = None
1520 1520 __buffer = None
1521 1521 noise = None
1522 1522 __dataReady = False
1523 1523 n = None
1524 1524 __nch = 0
1525 1525 __nHeis = 0
1526 1526 removeDC = False
1527 1527 ipp = None
1528 1528 lambda_ = 0
1529 1529
1530 1530 def __init__(self,**kwargs):
1531 1531 Operation.__init__(self,**kwargs)
1532 1532
1533 1533 def setup(self, dataOut, n = None, removeDC=False):
1534 1534 '''
1535 1535 n= Numero de PRF's de entrada
1536 1536 '''
1537 1537 self.__initime = None
1538 1538 ####print("[INICIO]-setup del METODO PULSE PAIR")
1539 1539 self.__lastdatatime = 0
1540 1540 self.__dataReady = False
1541 1541 self.__buffer = 0
1542 1542 self.__profIndex = 0
1543 1543 self.noise = None
1544 1544 self.__nch = dataOut.nChannels
1545 1545 self.__nHeis = dataOut.nHeights
1546 1546 self.removeDC = removeDC
1547 1547 self.lambda_ = 3.0e8/(9345.0e6)
1548 1548 self.ippSec = dataOut.ippSeconds
1549 1549 self.nCohInt = dataOut.nCohInt
1550 1550 ####print("IPPseconds",dataOut.ippSeconds)
1551 1551 ####print("ELVALOR DE n es:", n)
1552 1552 if n == None:
1553 1553 raise ValueError("n should be specified.")
1554 1554
1555 1555 if n != None:
1556 1556 if n<2:
1557 1557 raise ValueError("n should be greater than 2")
1558 1558
1559 1559 self.n = n
1560 1560 self.__nProf = n
1561 1561
1562 1562 self.__buffer = numpy.zeros((dataOut.nChannels,
1563 1563 n,
1564 1564 dataOut.nHeights),
1565 1565 dtype='complex')
1566 1566
1567 1567 def putData(self,data):
1568 1568 '''
1569 1569 Add a profile to he __buffer and increase in one the __profiel Index
1570 1570 '''
1571 1571 self.__buffer[:,self.__profIndex,:]= data
1572 1572 self.__profIndex += 1
1573 1573 return
1574 1574
1575 1575 def putDataByBlock(self,data,n):
1576 1576 '''
1577 1577 Add a profile to he __buffer and increase in one the __profiel Index
1578 1578 '''
1579 1579 self.__buffer[:]= data
1580 1580 self.__profIndex = n
1581 1581 return
1582 1582
1583 1583 def pushData(self,dataOut):
1584 1584 '''
1585 1585 Return the PULSEPAIR and the profiles used in the operation
1586 1586 Affected : self.__profileIndex
1587 1587 '''
1588 1588 #----------------- Remove DC-----------------------------------
1589 1589 if self.removeDC==True:
1590 1590 mean = numpy.mean(self.__buffer,1)
1591 1591 tmp = mean.reshape(self.__nch,1,self.__nHeis)
1592 1592 dc= numpy.tile(tmp,[1,self.__nProf,1])
1593 1593 self.__buffer = self.__buffer - dc
1594 1594 #------------------Calculo de Potencia ------------------------
1595 1595 pair0 = self.__buffer*numpy.conj(self.__buffer)
1596 1596 pair0 = pair0.real
1597 1597 lag_0 = numpy.sum(pair0,1)
1598 1598 #-----------------Calculo de Cscp------------------------------ New
1599 1599 cspc_pair01 = self.__buffer[0]*self.__buffer[1]
1600 1600 #------------------Calculo de Ruido x canal--------------------
1601 1601 self.noise = numpy.zeros(self.__nch)
1602 1602 for i in range(self.__nch):
1603 1603 daux = numpy.sort(pair0[i,:,:],axis= None)
1604 1604 self.noise[i]=hildebrand_sekhon( daux ,self.nCohInt)
1605 1605
1606 1606 self.noise = self.noise.reshape(self.__nch,1)
1607 1607 self.noise = numpy.tile(self.noise,[1,self.__nHeis])
1608 1608 noise_buffer = self.noise.reshape(self.__nch,1,self.__nHeis)
1609 1609 noise_buffer = numpy.tile(noise_buffer,[1,self.__nProf,1])
1610 1610 #------------------ Potencia recibida= P , Potencia senal = S , Ruido= N--
1611 1611 #------------------ P= S+N ,P=lag_0/N ---------------------------------
1612 1612 #-------------------- Power --------------------------------------------------
1613 1613 data_power = lag_0/(self.n*self.nCohInt)
1614 1614 #--------------------CCF------------------------------------------------------
1615 1615 data_ccf =numpy.sum(cspc_pair01,axis=0)/(self.n*self.nCohInt)
1616 1616 #------------------ Senal --------------------------------------------------
1617 1617 data_intensity = pair0 - noise_buffer
1618 1618 data_intensity = numpy.sum(data_intensity,axis=1)*(self.n*self.nCohInt)#*self.nCohInt)
1619 1619 #data_intensity = (lag_0-self.noise*self.n)*(self.n*self.nCohInt)
1620 1620 for i in range(self.__nch):
1621 1621 for j in range(self.__nHeis):
1622 1622 if data_intensity[i][j] < 0:
1623 1623 data_intensity[i][j] = numpy.min(numpy.absolute(data_intensity[i][j]))
1624 1624
1625 1625 #----------------- Calculo de Frecuencia y Velocidad doppler--------
1626 1626 pair1 = self.__buffer[:,:-1,:]*numpy.conjugate(self.__buffer[:,1:,:])
1627 1627 lag_1 = numpy.sum(pair1,1)
1628 1628 data_freq = (-1/(2.0*math.pi*self.ippSec*self.nCohInt))*numpy.angle(lag_1)
1629 1629 data_velocity = (self.lambda_/2.0)*data_freq
1630 1630
1631 1631 #---------------- Potencia promedio estimada de la Senal-----------
1632 1632 lag_0 = lag_0/self.n
1633 1633 S = lag_0-self.noise
1634 1634
1635 1635 #---------------- Frecuencia Doppler promedio ---------------------
1636 1636 lag_1 = lag_1/(self.n-1)
1637 1637 R1 = numpy.abs(lag_1)
1638 1638
1639 1639 #---------------- Calculo del SNR----------------------------------
1640 1640 data_snrPP = S/self.noise
1641 1641 for i in range(self.__nch):
1642 1642 for j in range(self.__nHeis):
1643 1643 if data_snrPP[i][j] < 1.e-20:
1644 1644 data_snrPP[i][j] = 1.e-20
1645 1645
1646 1646 #----------------- Calculo del ancho espectral ----------------------
1647 1647 L = S/R1
1648 1648 L = numpy.where(L<0,1,L)
1649 1649 L = numpy.log(L)
1650 1650 tmp = numpy.sqrt(numpy.absolute(L))
1651 1651 data_specwidth = (self.lambda_/(2*math.sqrt(2)*math.pi*self.ippSec*self.nCohInt))*tmp*numpy.sign(L)
1652 1652 n = self.__profIndex
1653 1653
1654 1654 self.__buffer = numpy.zeros((self.__nch, self.__nProf,self.__nHeis), dtype='complex')
1655 1655 self.__profIndex = 0
1656 1656 return data_power,data_intensity,data_velocity,data_snrPP,data_specwidth,data_ccf,n
1657 1657
1658 1658
1659 1659 def pulsePairbyProfiles(self,dataOut,n):
1660 1660
1661 1661 self.__dataReady = False
1662 1662 data_power = None
1663 1663 data_intensity = None
1664 1664 data_velocity = None
1665 1665 data_specwidth = None
1666 1666 data_snrPP = None
1667 1667 data_ccf = None
1668 1668
1669 1669 if dataOut.flagDataAsBlock:
1670 1670 self.putDataByBlock(data=dataOut.data,n=n)
1671 1671 else:
1672 1672 self.putData(data=dataOut.data)
1673 1673 if self.__profIndex == self.n:
1674 1674 data_power,data_intensity, data_velocity,data_snrPP,data_specwidth,data_ccf, n = self.pushData(dataOut=dataOut)
1675 1675 self.__dataReady = True
1676 1676
1677 1677 return data_power, data_intensity, data_velocity, data_snrPP,data_specwidth,data_ccf
1678 1678
1679 1679
1680 1680 def pulsePairOp(self, dataOut, n, datatime= None):
1681 1681
1682 1682 if self.__initime == None:
1683 1683 self.__initime = datatime
1684 1684 data_power, data_intensity, data_velocity, data_snrPP,data_specwidth,data_ccf = self.pulsePairbyProfiles(dataOut,n)
1685 1685 self.__lastdatatime = datatime
1686 1686
1687 1687 if data_power is None:
1688 1688 return None, None, None,None,None,None,None
1689 1689
1690 1690 avgdatatime = self.__initime
1691 1691 deltatime = datatime - self.__lastdatatime
1692 1692 self.__initime = datatime
1693 1693
1694 1694 return data_power, data_intensity, data_velocity, data_snrPP,data_specwidth,data_ccf, avgdatatime
1695 1695
1696 1696 def run(self, dataOut,n = None,removeDC= False, overlapping= False,**kwargs):
1697 1697
1698 1698 if dataOut.flagDataAsBlock:
1699 n = dataOut.nProfiles
1699 n = int(dataOut.nProfiles)
1700 #print("n",n)
1700 1701
1701 1702 if not self.isConfig:
1702 1703 self.setup(dataOut = dataOut, n = n , removeDC=removeDC , **kwargs)
1703 1704 self.isConfig = True
1704 1705
1705 1706
1706 1707 data_power, data_intensity, data_velocity,data_snrPP,data_specwidth,data_ccf, avgdatatime = self.pulsePairOp(dataOut, n, dataOut.utctime)
1707 1708
1708 1709
1709 1710 dataOut.flagNoData = True
1710 1711
1711 1712 if self.__dataReady:
1712 1713 ###print("READY ----------------------------------")
1713 1714 dataOut.nCohInt *= self.n
1714 1715 dataOut.dataPP_POW = data_intensity # S
1715 1716 dataOut.dataPP_POWER = data_power # P valor que corresponde a POTENCIA MOMENTO
1716 1717 dataOut.dataPP_DOP = data_velocity
1717 1718 dataOut.dataPP_SNR = data_snrPP
1718 1719 dataOut.dataPP_WIDTH = data_specwidth
1719 1720 dataOut.dataPP_CCF = data_ccf
1720 1721 dataOut.PRFbyAngle = self.n #numero de PRF*cada angulo rotado que equivale a un tiempo.
1721 1722 dataOut.nProfiles = int(dataOut.nProfiles/n)
1722 1723 dataOut.utctime = avgdatatime
1723 1724 dataOut.flagNoData = False
1724 1725 return dataOut
1725 1726
1726 1727 # import collections
1727 1728 # from scipy.stats import mode
1728 1729 #
1729 1730 # class Synchronize(Operation):
1730 1731 #
1731 1732 # isConfig = False
1732 1733 # __profIndex = 0
1733 1734 #
1734 1735 # def __init__(self, **kwargs):
1735 1736 #
1736 1737 # Operation.__init__(self, **kwargs)
1737 1738 # # self.isConfig = False
1738 1739 # self.__powBuffer = None
1739 1740 # self.__startIndex = 0
1740 1741 # self.__pulseFound = False
1741 1742 #
1742 1743 # def __findTxPulse(self, dataOut, channel=0, pulse_with = None):
1743 1744 #
1744 1745 # #Read data
1745 1746 #
1746 1747 # powerdB = dataOut.getPower(channel = channel)
1747 1748 # noisedB = dataOut.getNoise(channel = channel)[0]
1748 1749 #
1749 1750 # self.__powBuffer.extend(powerdB.flatten())
1750 1751 #
1751 1752 # dataArray = numpy.array(self.__powBuffer)
1752 1753 #
1753 1754 # filteredPower = numpy.correlate(dataArray, dataArray[0:self.__nSamples], "same")
1754 1755 #
1755 1756 # maxValue = numpy.nanmax(filteredPower)
1756 1757 #
1757 1758 # if maxValue < noisedB + 10:
1758 1759 # #No se encuentra ningun pulso de transmision
1759 1760 # return None
1760 1761 #
1761 1762 # maxValuesIndex = numpy.where(filteredPower > maxValue - 0.1*abs(maxValue))[0]
1762 1763 #
1763 1764 # if len(maxValuesIndex) < 2:
1764 1765 # #Solo se encontro un solo pulso de transmision de un baudio, esperando por el siguiente TX
1765 1766 # return None
1766 1767 #
1767 1768 # phasedMaxValuesIndex = maxValuesIndex - self.__nSamples
1768 1769 #
1769 1770 # #Seleccionar solo valores con un espaciamiento de nSamples
1770 1771 # pulseIndex = numpy.intersect1d(maxValuesIndex, phasedMaxValuesIndex)
1771 1772 #
1772 1773 # if len(pulseIndex) < 2:
1773 1774 # #Solo se encontro un pulso de transmision con ancho mayor a 1
1774 1775 # return None
1775 1776 #
1776 1777 # spacing = pulseIndex[1:] - pulseIndex[:-1]
1777 1778 #
1778 1779 # #remover senales que se distancien menos de 10 unidades o muestras
1779 1780 # #(No deberian existir IPP menor a 10 unidades)
1780 1781 #
1781 1782 # realIndex = numpy.where(spacing > 10 )[0]
1782 1783 #
1783 1784 # if len(realIndex) < 2:
1784 1785 # #Solo se encontro un pulso de transmision con ancho mayor a 1
1785 1786 # return None
1786 1787 #
1787 1788 # #Eliminar pulsos anchos (deja solo la diferencia entre IPPs)
1788 1789 # realPulseIndex = pulseIndex[realIndex]
1789 1790 #
1790 1791 # period = mode(realPulseIndex[1:] - realPulseIndex[:-1])[0][0]
1791 1792 #
1792 1793 # print "IPP = %d samples" %period
1793 1794 #
1794 1795 # self.__newNSamples = dataOut.nHeights #int(period)
1795 1796 # self.__startIndex = int(realPulseIndex[0])
1796 1797 #
1797 1798 # return 1
1798 1799 #
1799 1800 #
1800 1801 # def setup(self, nSamples, nChannels, buffer_size = 4):
1801 1802 #
1802 1803 # self.__powBuffer = collections.deque(numpy.zeros( buffer_size*nSamples,dtype=numpy.float),
1803 1804 # maxlen = buffer_size*nSamples)
1804 1805 #
1805 1806 # bufferList = []
1806 1807 #
1807 1808 # for i in range(nChannels):
1808 1809 # bufferByChannel = collections.deque(numpy.zeros( buffer_size*nSamples, dtype=numpy.complex) + numpy.NAN,
1809 1810 # maxlen = buffer_size*nSamples)
1810 1811 #
1811 1812 # bufferList.append(bufferByChannel)
1812 1813 #
1813 1814 # self.__nSamples = nSamples
1814 1815 # self.__nChannels = nChannels
1815 1816 # self.__bufferList = bufferList
1816 1817 #
1817 1818 # def run(self, dataOut, channel = 0):
1818 1819 #
1819 1820 # if not self.isConfig:
1820 1821 # nSamples = dataOut.nHeights
1821 1822 # nChannels = dataOut.nChannels
1822 1823 # self.setup(nSamples, nChannels)
1823 1824 # self.isConfig = True
1824 1825 #
1825 1826 # #Append new data to internal buffer
1826 1827 # for thisChannel in range(self.__nChannels):
1827 1828 # bufferByChannel = self.__bufferList[thisChannel]
1828 1829 # bufferByChannel.extend(dataOut.data[thisChannel])
1829 1830 #
1830 1831 # if self.__pulseFound:
1831 1832 # self.__startIndex -= self.__nSamples
1832 1833 #
1833 1834 # #Finding Tx Pulse
1834 1835 # if not self.__pulseFound:
1835 1836 # indexFound = self.__findTxPulse(dataOut, channel)
1836 1837 #
1837 1838 # if indexFound == None:
1838 1839 # dataOut.flagNoData = True
1839 1840 # return
1840 1841 #
1841 1842 # self.__arrayBuffer = numpy.zeros((self.__nChannels, self.__newNSamples), dtype = numpy.complex)
1842 1843 # self.__pulseFound = True
1843 1844 # self.__startIndex = indexFound
1844 1845 #
1845 1846 # #If pulse was found ...
1846 1847 # for thisChannel in range(self.__nChannels):
1847 1848 # bufferByChannel = self.__bufferList[thisChannel]
1848 1849 # #print self.__startIndex
1849 1850 # x = numpy.array(bufferByChannel)
1850 1851 # self.__arrayBuffer[thisChannel] = x[self.__startIndex:self.__startIndex+self.__newNSamples]
1851 1852 #
1852 1853 # deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
1853 1854 # dataOut.heightList = numpy.arange(self.__newNSamples)*deltaHeight
1854 1855 # # dataOut.ippSeconds = (self.__newNSamples / deltaHeight)/1e6
1855 1856 #
1856 1857 # dataOut.data = self.__arrayBuffer
1857 1858 #
1858 1859 # self.__startIndex += self.__newNSamples
1859 1860 #
1860 1861 # return
General Comments 0
You need to be logged in to leave comments. Login now