##// END OF EJS Templates
Now we can merge ProcUnits for Double Pulse Experiments
rflores -
r1452:d596eb625435
parent child
Show More
@@ -1,662 +1,665
1 1 # Copyright (c) 2012-2020 Jicamarca Radio Observatory
2 2 # All rights reserved.
3 3 #
4 4 # Distributed under the terms of the BSD 3-clause license.
5 5 """API to create signal chain projects
6 6
7 7 The API is provide through class: Project
8 8 """
9 9
10 10 import re
11 11 import sys
12 12 import ast
13 13 import datetime
14 14 import traceback
15 15 import time
16 16 import multiprocessing
17 17 from multiprocessing import Process, Queue
18 18 from threading import Thread
19 19 from xml.etree.ElementTree import ElementTree, Element, SubElement
20 20
21 21 from schainpy.admin import Alarm, SchainWarning
22 22 from schainpy.model import *
23 23 from schainpy.utils import log
24 24
25 25 if 'darwin' in sys.platform and sys.version_info[0] == 3 and sys.version_info[1] > 7:
26 26 multiprocessing.set_start_method('fork')
27 27
28 28 class ConfBase():
29 29
30 30 def __init__(self):
31 31
32 32 self.id = '0'
33 33 self.name = None
34 34 self.priority = None
35 35 self.parameters = {}
36 36 self.object = None
37 37 self.operations = []
38 38
39 39 def getId(self):
40 40
41 41 return self.id
42 42
43 43 def getNewId(self):
44 44
45 45 return int(self.id) * 10 + len(self.operations) + 1
46 46
47 47 def updateId(self, new_id):
48 48
49 49 self.id = str(new_id)
50 50
51 51 n = 1
52 52 for conf in self.operations:
53 53 conf_id = str(int(new_id) * 10 + n)
54 54 conf.updateId(conf_id)
55 55 n += 1
56 56
57 57 def getKwargs(self):
58 58
59 59 params = {}
60 60
61 61 for key, value in self.parameters.items():
62 62 if value not in (None, '', ' '):
63 63 params[key] = value
64 64
65 65 return params
66 66
67 67 def update(self, **kwargs):
68 68
69 69 if 'format' not in kwargs:
70 70 kwargs['format'] = None
71 71 for key, value, fmt in kwargs.items():
72 72 self.addParameter(name=key, value=value, format=fmt)
73 73
74 74 def addParameter(self, name, value, format=None):
75 75 '''
76 76 '''
77 77 if format is not None:
78 78 self.parameters[name] = eval(format)(value)
79 79 elif isinstance(value, str) and re.search(r'(\d+/\d+/\d+)', value):
80 80 self.parameters[name] = datetime.date(*[int(x) for x in value.split('/')])
81 81 elif isinstance(value, str) and re.search(r'(\d+:\d+:\d+)', value):
82 82 self.parameters[name] = datetime.time(*[int(x) for x in value.split(':')])
83 83 else:
84 84 try:
85 85 self.parameters[name] = ast.literal_eval(value)
86 86 except:
87 87 if isinstance(value, str) and ',' in value:
88 88 self.parameters[name] = value.split(',')
89 89 else:
90 90 self.parameters[name] = value
91 91
92 92 def getParameters(self):
93 93
94 94 params = {}
95 95 for key, value in self.parameters.items():
96 96 s = type(value).__name__
97 97 if s == 'date':
98 98 params[key] = value.strftime('%Y/%m/%d')
99 99 elif s == 'time':
100 100 params[key] = value.strftime('%H:%M:%S')
101 101 else:
102 102 params[key] = str(value)
103 103
104 104 return params
105 105
106 106 def makeXml(self, element):
107 107
108 108 xml = SubElement(element, self.ELEMENTNAME)
109 109 for label in self.xml_labels:
110 110 xml.set(label, str(getattr(self, label)))
111 111
112 112 for key, value in self.getParameters().items():
113 113 xml_param = SubElement(xml, 'Parameter')
114 114 xml_param.set('name', key)
115 115 xml_param.set('value', value)
116 116
117 117 for conf in self.operations:
118 118 conf.makeXml(xml)
119 119
120 120 def __str__(self):
121 121
122 122 if self.ELEMENTNAME == 'Operation':
123 123 s = ' {}[id={}]\n'.format(self.name, self.id)
124 124 else:
125 125 s = '{}[id={}, inputId={}]\n'.format(self.name, self.id, self.inputId)
126 126
127 127 for key, value in self.parameters.items():
128 128 if self.ELEMENTNAME == 'Operation':
129 129 s += ' {}: {}\n'.format(key, value)
130 130 else:
131 131 s += ' {}: {}\n'.format(key, value)
132 132
133 133 for conf in self.operations:
134 134 s += str(conf)
135 135
136 136 return s
137 137
138 138 class OperationConf(ConfBase):
139 139
140 140 ELEMENTNAME = 'Operation'
141 141 xml_labels = ['id', 'name']
142 142
143 143 def setup(self, id, name, priority, project_id, err_queue):
144 144
145 145 self.id = str(id)
146 146 self.project_id = project_id
147 147 self.name = name
148 148 self.type = 'other'
149 149 self.err_queue = err_queue
150 150
151 151 def readXml(self, element, project_id, err_queue):
152 152
153 153 self.id = element.get('id')
154 154 self.name = element.get('name')
155 155 self.type = 'other'
156 156 self.project_id = str(project_id)
157 157 self.err_queue = err_queue
158 158
159 159 for elm in element.iter('Parameter'):
160 160 self.addParameter(elm.get('name'), elm.get('value'))
161 161
162 162 def createObject(self):
163 163
164 164 className = eval(self.name)
165 165
166 166 if 'Plot' in self.name or 'Writer' in self.name or 'Send' in self.name or 'print' in self.name:
167 167 kwargs = self.getKwargs()
168 168 opObj = className(self.id, self.id, self.project_id, self.err_queue, **kwargs)
169 169 opObj.start()
170 170 self.type = 'external'
171 171 else:
172 172 opObj = className()
173 173
174 174 self.object = opObj
175 175 return opObj
176 176
177 177 class ProcUnitConf(ConfBase):
178 178
179 179 ELEMENTNAME = 'ProcUnit'
180 180 xml_labels = ['id', 'inputId', 'name']
181 181
182 182 def setup(self, project_id, id, name, datatype, inputId, err_queue):
183 183 '''
184 184 '''
185 185
186 186 if datatype == None and name == None:
187 187 raise ValueError('datatype or name should be defined')
188 188
189 189 if name == None:
190 190 if 'Proc' in datatype:
191 191 name = datatype
192 192 else:
193 193 name = '%sProc' % (datatype)
194 194
195 195 if datatype == None:
196 196 datatype = name.replace('Proc', '')
197 197
198 198 self.id = str(id)
199 199 self.project_id = project_id
200 200 self.name = name
201 201 self.datatype = datatype
202 202 self.inputId = inputId
203 203 self.err_queue = err_queue
204 204 self.operations = []
205 205 self.parameters = {}
206 206
207 207 def removeOperation(self, id):
208 208
209 209 i = [1 if x.id==id else 0 for x in self.operations]
210 210 self.operations.pop(i.index(1))
211 211
212 212 def getOperation(self, id):
213 213
214 214 for conf in self.operations:
215 215 if conf.id == id:
216 216 return conf
217 217
218 218 def addOperation(self, name, optype='self'):
219 219 '''
220 220 '''
221 221
222 222 id = self.getNewId()
223 223 conf = OperationConf()
224 224 conf.setup(id, name=name, priority='0', project_id=self.project_id, err_queue=self.err_queue)
225 225 self.operations.append(conf)
226 226
227 227 return conf
228 228
229 229 def readXml(self, element, project_id, err_queue):
230 230
231 231 self.id = element.get('id')
232 232 self.name = element.get('name')
233 233 self.inputId = None if element.get('inputId') == 'None' else element.get('inputId')
234 234 self.datatype = element.get('datatype', self.name.replace(self.ELEMENTNAME.replace('Unit', ''), ''))
235 235 self.project_id = str(project_id)
236 236 self.err_queue = err_queue
237 237 self.operations = []
238 238 self.parameters = {}
239 239
240 240 for elm in element:
241 241 if elm.tag == 'Parameter':
242 242 self.addParameter(elm.get('name'), elm.get('value'))
243 243 elif elm.tag == 'Operation':
244 244 conf = OperationConf()
245 245 conf.readXml(elm, project_id, err_queue)
246 246 self.operations.append(conf)
247 247
248 248 def createObjects(self):
249 249 '''
250 250 Instancia de unidades de procesamiento.
251 251 '''
252 252
253 253 className = eval(self.name)
254 254 kwargs = self.getKwargs()
255 255 procUnitObj = className()
256 256 procUnitObj.name = self.name
257 257 log.success('creating process...', self.name)
258 258
259 259 for conf in self.operations:
260 260
261 261 opObj = conf.createObject()
262 262
263 263 log.success('adding operation: {}, type:{}'.format(
264 264 conf.name,
265 265 conf.type), self.name)
266 266
267 267 procUnitObj.addOperation(conf, opObj)
268 268
269 269 self.object = procUnitObj
270 270
271 271 def run(self):
272 272 '''
273 273 '''
274 274
275 275 return self.object.call(**self.getKwargs())
276 276
277 277
278 278 class ReadUnitConf(ProcUnitConf):
279 279
280 280 ELEMENTNAME = 'ReadUnit'
281 281
282 282 def __init__(self):
283 283
284 284 self.id = None
285 285 self.datatype = None
286 286 self.name = None
287 287 self.inputId = None
288 288 self.operations = []
289 289 self.parameters = {}
290 290
291 291 def setup(self, project_id, id, name, datatype, err_queue, path='', startDate='', endDate='',
292 292 startTime='', endTime='', server=None, **kwargs):
293 293
294 294 if datatype == None and name == None:
295 295 raise ValueError('datatype or name should be defined')
296 296 if name == None:
297 297 if 'Reader' in datatype:
298 298 name = datatype
299 299 datatype = name.replace('Reader','')
300 300 else:
301 301 name = '{}Reader'.format(datatype)
302 302 if datatype == None:
303 303 if 'Reader' in name:
304 304 datatype = name.replace('Reader','')
305 305 else:
306 306 datatype = name
307 307 name = '{}Reader'.format(name)
308 308
309 309 self.id = id
310 310 self.project_id = project_id
311 311 self.name = name
312 312 self.datatype = datatype
313 313 self.err_queue = err_queue
314 314
315 315 self.addParameter(name='path', value=path, format='str')
316 316 self.addParameter(name='startDate', value=startDate)
317 317 self.addParameter(name='endDate', value=endDate)
318 318 self.addParameter(name='startTime', value=startTime)
319 319 self.addParameter(name='endTime', value=endTime)
320 320
321 321 for key, value in kwargs.items():
322 322 self.addParameter(name=key, value=value)
323 323
324 324
325 325 class Project(Process):
326 326 """API to create signal chain projects"""
327 327
328 328 ELEMENTNAME = 'Project'
329 329
330 330 def __init__(self, name=''):
331 331
332 332 Process.__init__(self)
333 333 self.id = '1'
334 334 if name:
335 335 self.name = '{} ({})'.format(Process.__name__, name)
336 336 self.filename = None
337 337 self.description = None
338 338 self.email = None
339 339 self.alarm = []
340 340 self.configurations = {}
341 341 # self.err_queue = Queue()
342 342 self.err_queue = None
343 343 self.started = False
344 344
345 345 def getNewId(self):
346 346
347 347 idList = list(self.configurations.keys())
348 348 id = int(self.id) * 10
349 349
350 350 while True:
351 351 id += 1
352 352
353 353 if str(id) in idList:
354 354 continue
355 355
356 356 break
357 357
358 358 return str(id)
359 359
360 360 def updateId(self, new_id):
361 361
362 362 self.id = str(new_id)
363 363
364 364 keyList = list(self.configurations.keys())
365 365 keyList.sort()
366 366
367 367 n = 1
368 368 new_confs = {}
369 369
370 370 for procKey in keyList:
371 371
372 372 conf = self.configurations[procKey]
373 373 idProcUnit = str(int(self.id) * 10 + n)
374 374 conf.updateId(idProcUnit)
375 375 new_confs[idProcUnit] = conf
376 376 n += 1
377 377
378 378 self.configurations = new_confs
379 379
380 380 def setup(self, id=1, name='', description='', email=None, alarm=[]):
381 381
382 382 self.id = str(id)
383 383 self.description = description
384 384 self.email = email
385 385 self.alarm = alarm
386 386 if name:
387 387 self.name = '{} ({})'.format(Process.__name__, name)
388 388
389 389 def update(self, **kwargs):
390 390
391 391 for key, value in kwargs.items():
392 392 setattr(self, key, value)
393 393
394 394 def clone(self):
395 395
396 396 p = Project()
397 397 p.id = self.id
398 398 p.name = self.name
399 399 p.description = self.description
400 400 p.configurations = self.configurations.copy()
401 401
402 402 return p
403 403
404 404 def addReadUnit(self, id=None, datatype=None, name=None, **kwargs):
405 405
406 406 '''
407 407 '''
408 408
409 409 if id is None:
410 410 idReadUnit = self.getNewId()
411 411 else:
412 412 idReadUnit = str(id)
413 413
414 414 conf = ReadUnitConf()
415 415 conf.setup(self.id, idReadUnit, name, datatype, self.err_queue, **kwargs)
416 416 self.configurations[conf.id] = conf
417 417
418 418 return conf
419 419
420 420 def addProcUnit(self, id=None, inputId='0', datatype=None, name=None):
421 421
422 422 '''
423 423 '''
424 424
425 425 if id is None:
426 426 idProcUnit = self.getNewId()
427 427 else:
428 428 idProcUnit = id
429 429
430 430 conf = ProcUnitConf()
431 431 conf.setup(self.id, idProcUnit, name, datatype, inputId, self.err_queue)
432 432 self.configurations[conf.id] = conf
433 433
434 434 return conf
435 435
436 436 def removeProcUnit(self, id):
437 437
438 438 if id in self.configurations:
439 439 self.configurations.pop(id)
440 440
441 441 def getReadUnit(self):
442 442
443 443 for obj in list(self.configurations.values()):
444 444 if obj.ELEMENTNAME == 'ReadUnit':
445 445 return obj
446 446
447 447 return None
448 448
449 449 def getProcUnit(self, id):
450 450
451 451 return self.configurations[id]
452 452
453 453 def getUnits(self):
454 454
455 455 keys = list(self.configurations)
456 456 keys.sort()
457 457
458 458 for key in keys:
459 459 yield self.configurations[key]
460 460
461 461 def updateUnit(self, id, **kwargs):
462 462
463 463 conf = self.configurations[id].update(**kwargs)
464 464
465 465 def makeXml(self):
466 466
467 467 xml = Element('Project')
468 468 xml.set('id', str(self.id))
469 469 xml.set('name', self.name)
470 470 xml.set('description', self.description)
471 471
472 472 for conf in self.configurations.values():
473 473 conf.makeXml(xml)
474 474
475 475 self.xml = xml
476 476
477 477 def writeXml(self, filename=None):
478 478
479 479 if filename == None:
480 480 if self.filename:
481 481 filename = self.filename
482 482 else:
483 483 filename = 'schain.xml'
484 484
485 485 if not filename:
486 486 print('filename has not been defined. Use setFilename(filename) for do it.')
487 487 return 0
488 488
489 489 abs_file = os.path.abspath(filename)
490 490
491 491 if not os.access(os.path.dirname(abs_file), os.W_OK):
492 492 print('No write permission on %s' % os.path.dirname(abs_file))
493 493 return 0
494 494
495 495 if os.path.isfile(abs_file) and not(os.access(abs_file, os.W_OK)):
496 496 print('File %s already exists and it could not be overwriten' % abs_file)
497 497 return 0
498 498
499 499 self.makeXml()
500 500
501 501 ElementTree(self.xml).write(abs_file, method='xml')
502 502
503 503 self.filename = abs_file
504 504
505 505 return 1
506 506
507 507 def readXml(self, filename):
508 508
509 509 abs_file = os.path.abspath(filename)
510 510
511 511 self.configurations = {}
512 512
513 513 try:
514 514 self.xml = ElementTree().parse(abs_file)
515 515 except:
516 516 log.error('Error reading %s, verify file format' % filename)
517 517 return 0
518 518
519 519 self.id = self.xml.get('id')
520 520 self.name = self.xml.get('name')
521 521 self.description = self.xml.get('description')
522 522
523 523 for element in self.xml:
524 524 if element.tag == 'ReadUnit':
525 525 conf = ReadUnitConf()
526 526 conf.readXml(element, self.id, self.err_queue)
527 527 self.configurations[conf.id] = conf
528 528 elif element.tag == 'ProcUnit':
529 529 conf = ProcUnitConf()
530 530 input_proc = self.configurations[element.get('inputId')]
531 531 conf.readXml(element, self.id, self.err_queue)
532 532 self.configurations[conf.id] = conf
533 533
534 534 self.filename = abs_file
535 535
536 536 return 1
537 537
538 538 def __str__(self):
539 539
540 540 text = '\nProject[id=%s, name=%s, description=%s]\n\n' % (
541 541 self.id,
542 542 self.name,
543 543 self.description,
544 544 )
545 545
546 546 for conf in self.configurations.values():
547 547 text += '{}'.format(conf)
548 548
549 549 return text
550 550
551 551 def createObjects(self):
552 552
553 553 keys = list(self.configurations.keys())
554 554 keys.sort()
555 555 for key in keys:
556 556 conf = self.configurations[key]
557 557 conf.createObjects()
558 558 if conf.inputId is not None:
559 conf.object.setInput(self.configurations[conf.inputId].object)
559 if isinstance(conf.inputId, list):
560 conf.object.setInput([self.configurations[x].object for x in conf.inputId])
561 else:
562 conf.object.setInput([self.configurations[conf.inputId].object])
560 563
561 564 def monitor(self):
562 565
563 566 t = Thread(target=self._monitor, args=(self.err_queue, self.ctx))
564 567 t.start()
565 568
566 569 def _monitor(self, queue, ctx):
567 570
568 571 import socket
569 572
570 573 procs = 0
571 574 err_msg = ''
572 575
573 576 while True:
574 577 msg = queue.get()
575 578 if '#_start_#' in msg:
576 579 procs += 1
577 580 elif '#_end_#' in msg:
578 581 procs -=1
579 582 else:
580 583 err_msg = msg
581 584
582 585 if procs == 0 or 'Traceback' in err_msg:
583 586 break
584 587 time.sleep(0.1)
585 588
586 589 if '|' in err_msg:
587 590 name, err = err_msg.split('|')
588 591 if 'SchainWarning' in err:
589 592 log.warning(err.split('SchainWarning:')[-1].split('\n')[0].strip(), name)
590 593 elif 'SchainError' in err:
591 594 log.error(err.split('SchainError:')[-1].split('\n')[0].strip(), name)
592 595 else:
593 596 log.error(err, name)
594 597 else:
595 598 name, err = self.name, err_msg
596 599
597 600 time.sleep(1)
598 601
599 602 ctx.term()
600 603
601 604 message = ''.join(err)
602 605
603 606 if err_msg:
604 607 subject = 'SChain v%s: Error running %s\n' % (
605 608 schainpy.__version__, self.name)
606 609
607 610 subtitle = 'Hostname: %s\n' % socket.gethostbyname(
608 611 socket.gethostname())
609 612 subtitle += 'Working directory: %s\n' % os.path.abspath('./')
610 613 subtitle += 'Configuration file: %s\n' % self.filename
611 614 subtitle += 'Time: %s\n' % str(datetime.datetime.now())
612 615
613 616 readUnitConfObj = self.getReadUnit()
614 617 if readUnitConfObj:
615 618 subtitle += '\nInput parameters:\n'
616 619 subtitle += '[Data path = %s]\n' % readUnitConfObj.parameters['path']
617 620 subtitle += '[Start date = %s]\n' % readUnitConfObj.parameters['startDate']
618 621 subtitle += '[End date = %s]\n' % readUnitConfObj.parameters['endDate']
619 622 subtitle += '[Start time = %s]\n' % readUnitConfObj.parameters['startTime']
620 623 subtitle += '[End time = %s]\n' % readUnitConfObj.parameters['endTime']
621 624
622 625 a = Alarm(
623 626 modes=self.alarm,
624 627 email=self.email,
625 628 message=message,
626 629 subject=subject,
627 630 subtitle=subtitle,
628 631 filename=self.filename
629 632 )
630 633
631 634 a.start()
632 635
633 636 def setFilename(self, filename):
634 637
635 638 self.filename = filename
636 639
637 640 def runProcs(self):
638 641
639 642 err = False
640 643 n = len(self.configurations)
641 644
642 645 while not err:
643 646 for conf in self.getUnits():
644 647 ok = conf.run()
645 648 if ok == 'Error':
646 649 n -= 1
647 650 continue
648 651 elif not ok:
649 652 break
650 653 if n == 0:
651 654 err = True
652 655
653 656 def run(self):
654 657
655 658 log.success('\nStarting Project {} [id={}]'.format(self.name, self.id), tag='')
656 659 self.started = True
657 660 self.start_time = time.time()
658 661 self.createObjects()
659 662 self.runProcs()
660 663 log.success('{} Done (Time: {:4.2f}s)'.format(
661 664 self.name,
662 665 time.time()-self.start_time), '')
@@ -1,1788 +1,1788
1 1 import os
2 2 import datetime
3 3 import numpy
4 4 from mpl_toolkits.axisartist.grid_finder import FixedLocator, DictFormatter
5 5
6 6 from schainpy.model.graphics.jroplot_base import Plot, plt
7 7 from schainpy.model.graphics.jroplot_spectra import SpectraPlot, RTIPlot, CoherencePlot, SpectraCutPlot
8 8 from schainpy.utils import log
9 9 # libreria wradlib
10 10 import wradlib as wrl
11 11
12 12 EARTH_RADIUS = 6.3710e3
13 13
14 14
15 15 def ll2xy(lat1, lon1, lat2, lon2):
16 16
17 17 p = 0.017453292519943295
18 18 a = 0.5 - numpy.cos((lat2 - lat1) * p)/2 + numpy.cos(lat1 * p) * \
19 19 numpy.cos(lat2 * p) * (1 - numpy.cos((lon2 - lon1) * p)) / 2
20 20 r = 12742 * numpy.arcsin(numpy.sqrt(a))
21 21 theta = numpy.arctan2(numpy.sin((lon2-lon1)*p)*numpy.cos(lat2*p), numpy.cos(lat1*p)
22 22 * numpy.sin(lat2*p)-numpy.sin(lat1*p)*numpy.cos(lat2*p)*numpy.cos((lon2-lon1)*p))
23 23 theta = -theta + numpy.pi/2
24 24 return r*numpy.cos(theta), r*numpy.sin(theta)
25 25
26 26
27 27 def km2deg(km):
28 28 '''
29 29 Convert distance in km to degrees
30 30 '''
31 31
32 32 return numpy.rad2deg(km/EARTH_RADIUS)
33 33
34 34
35 35
36 36 class SpectralMomentsPlot(SpectraPlot):
37 37 '''
38 38 Plot for Spectral Moments
39 39 '''
40 40 CODE = 'spc_moments'
41 41 # colormap = 'jet'
42 42 # plot_type = 'pcolor'
43 43
44 44 class DobleGaussianPlot(SpectraPlot):
45 45 '''
46 46 Plot for Double Gaussian Plot
47 47 '''
48 48 CODE = 'gaussian_fit'
49 49 # colormap = 'jet'
50 50 # plot_type = 'pcolor'
51 51
52 52 class DoubleGaussianSpectraCutPlot(SpectraCutPlot):
53 53 '''
54 54 Plot SpectraCut with Double Gaussian Fit
55 55 '''
56 56 CODE = 'cut_gaussian_fit'
57 57
58 58 class SnrPlot(RTIPlot):
59 59 '''
60 60 Plot for SNR Data
61 61 '''
62 62
63 63 CODE = 'snr'
64 64 colormap = 'jet'
65 65
66 66 def update(self, dataOut):
67 67
68 68 data = {
69 69 'snr': 10*numpy.log10(dataOut.data_snr)
70 70 }
71 71
72 72 return data, {}
73 73
74 74 class DopplerPlot(RTIPlot):
75 75 '''
76 76 Plot for DOPPLER Data (1st moment)
77 77 '''
78 78
79 79 CODE = 'dop'
80 80 colormap = 'jet'
81 81
82 82 def update(self, dataOut):
83 83
84 84 data = {
85 85 'dop': 10*numpy.log10(dataOut.data_dop)
86 86 }
87 87
88 88 return data, {}
89 89
90 90 class PowerPlot(RTIPlot):
91 91 '''
92 92 Plot for Power Data (0 moment)
93 93 '''
94 94
95 95 CODE = 'pow'
96 96 colormap = 'jet'
97 97
98 98 def update(self, dataOut):
99 99 data = {
100 100 'pow': 10*numpy.log10(dataOut.data_pow/dataOut.normFactor)
101 101 }
102 102 return data, {}
103 103
104 104 class SpectralWidthPlot(RTIPlot):
105 105 '''
106 106 Plot for Spectral Width Data (2nd moment)
107 107 '''
108 108
109 109 CODE = 'width'
110 110 colormap = 'jet'
111 111
112 112 def update(self, dataOut):
113 113
114 114 data = {
115 115 'width': dataOut.data_width
116 116 }
117 117
118 118 return data, {}
119 119
120 120 class SkyMapPlot(Plot):
121 121 '''
122 122 Plot for meteors detection data
123 123 '''
124 124
125 125 CODE = 'param'
126 126
127 127 def setup(self):
128 128
129 129 self.ncols = 1
130 130 self.nrows = 1
131 131 self.width = 7.2
132 132 self.height = 7.2
133 133 self.nplots = 1
134 134 self.xlabel = 'Zonal Zenith Angle (deg)'
135 135 self.ylabel = 'Meridional Zenith Angle (deg)'
136 136 self.polar = True
137 137 self.ymin = -180
138 138 self.ymax = 180
139 139 self.colorbar = False
140 140
141 141 def plot(self):
142 142
143 143 arrayParameters = numpy.concatenate(self.data['param'])
144 144 error = arrayParameters[:, -1]
145 145 indValid = numpy.where(error == 0)[0]
146 146 finalMeteor = arrayParameters[indValid, :]
147 147 finalAzimuth = finalMeteor[:, 3]
148 148 finalZenith = finalMeteor[:, 4]
149 149
150 150 x = finalAzimuth * numpy.pi / 180
151 151 y = finalZenith
152 152
153 153 ax = self.axes[0]
154 154
155 155 if ax.firsttime:
156 156 ax.plot = ax.plot(x, y, 'bo', markersize=5)[0]
157 157 else:
158 158 ax.plot.set_data(x, y)
159 159
160 160 dt1 = self.getDateTime(self.data.min_time).strftime('%y/%m/%d %H:%M:%S')
161 161 dt2 = self.getDateTime(self.data.max_time).strftime('%y/%m/%d %H:%M:%S')
162 162 title = 'Meteor Detection Sky Map\n %s - %s \n Number of events: %5.0f\n' % (dt1,
163 163 dt2,
164 164 len(x))
165 165 self.titles[0] = title
166 166
167 167
168 168 class GenericRTIPlot(Plot):
169 169 '''
170 170 Plot for data_xxxx object
171 171 '''
172 172
173 173 CODE = 'param'
174 174 colormap = 'viridis'
175 175 plot_type = 'pcolorbuffer'
176 176
177 177 def setup(self):
178 178 self.xaxis = 'time'
179 179 self.ncols = 1
180 180 self.nrows = self.data.shape('param')[0]
181 181 self.nplots = self.nrows
182 182 self.plots_adjust.update({'hspace':0.8, 'left': 0.1, 'bottom': 0.08, 'right':0.95, 'top': 0.95})
183 183
184 184 if not self.xlabel:
185 185 self.xlabel = 'Time'
186 186
187 187 self.ylabel = 'Range [km]'
188 188 if not self.titles:
189 189 self.titles = ['Param {}'.format(x) for x in range(self.nrows)]
190 190
191 191 def update(self, dataOut):
192 192
193 193 data = {
194 194 'param' : numpy.concatenate([getattr(dataOut, attr) for attr in self.attr_data], axis=0)
195 195 }
196 196
197 197 meta = {}
198 198
199 199 return data, meta
200 200
201 201 def plot(self):
202 202 # self.data.normalize_heights()
203 203 self.x = self.data.times
204 204 self.y = self.data.yrange
205 205 self.z = self.data['param']
206 206 self.z = 10*numpy.log10(self.z)
207 207 self.z = numpy.ma.masked_invalid(self.z)
208 208
209 209 if self.decimation is None:
210 210 x, y, z = self.fill_gaps(self.x, self.y, self.z)
211 211 else:
212 212 x, y, z = self.fill_gaps(*self.decimate())
213 213
214 214 for n, ax in enumerate(self.axes):
215 215
216 216 self.zmax = self.zmax if self.zmax is not None else numpy.max(
217 217 self.z[n])
218 218 self.zmin = self.zmin if self.zmin is not None else numpy.min(
219 219 self.z[n])
220 220
221 221 if ax.firsttime:
222 222 if self.zlimits is not None:
223 223 self.zmin, self.zmax = self.zlimits[n]
224 224
225 225 ax.plt = ax.pcolormesh(x, y, z[n].T * self.factors[n],
226 226 vmin=self.zmin,
227 227 vmax=self.zmax,
228 228 cmap=self.cmaps[n]
229 229 )
230 230 else:
231 231 if self.zlimits is not None:
232 232 self.zmin, self.zmax = self.zlimits[n]
233 233 ax.collections.remove(ax.collections[0])
234 234 ax.plt = ax.pcolormesh(x, y, z[n].T * self.factors[n],
235 235 vmin=self.zmin,
236 236 vmax=self.zmax,
237 237 cmap=self.cmaps[n]
238 238 )
239 239
240 240
241 241 class PolarMapPlot(Plot):
242 242 '''
243 243 Plot for weather radar
244 244 '''
245 245
246 246 CODE = 'param'
247 247 colormap = 'seismic'
248 248
249 249 def setup(self):
250 250 self.ncols = 1
251 251 self.nrows = 1
252 252 self.width = 9
253 253 self.height = 8
254 254 self.mode = self.data.meta['mode']
255 255 if self.channels is not None:
256 256 self.nplots = len(self.channels)
257 257 self.nrows = len(self.channels)
258 258 else:
259 259 self.nplots = self.data.shape(self.CODE)[0]
260 260 self.nrows = self.nplots
261 261 self.channels = list(range(self.nplots))
262 262 if self.mode == 'E':
263 263 self.xlabel = 'Longitude'
264 264 self.ylabel = 'Latitude'
265 265 else:
266 266 self.xlabel = 'Range (km)'
267 267 self.ylabel = 'Height (km)'
268 268 self.bgcolor = 'white'
269 269 self.cb_labels = self.data.meta['units']
270 270 self.lat = self.data.meta['latitude']
271 271 self.lon = self.data.meta['longitude']
272 272 self.xmin, self.xmax = float(
273 273 km2deg(self.xmin) + self.lon), float(km2deg(self.xmax) + self.lon)
274 274 self.ymin, self.ymax = float(
275 275 km2deg(self.ymin) + self.lat), float(km2deg(self.ymax) + self.lat)
276 276 # self.polar = True
277 277
278 278 def plot(self):
279 279
280 280 for n, ax in enumerate(self.axes):
281 281 data = self.data['param'][self.channels[n]]
282 282
283 283 zeniths = numpy.linspace(
284 284 0, self.data.meta['max_range'], data.shape[1])
285 285 if self.mode == 'E':
286 286 azimuths = -numpy.radians(self.data.yrange)+numpy.pi/2
287 287 r, theta = numpy.meshgrid(zeniths, azimuths)
288 288 x, y = r*numpy.cos(theta)*numpy.cos(numpy.radians(self.data.meta['elevation'])), r*numpy.sin(
289 289 theta)*numpy.cos(numpy.radians(self.data.meta['elevation']))
290 290 x = km2deg(x) + self.lon
291 291 y = km2deg(y) + self.lat
292 292 else:
293 293 azimuths = numpy.radians(self.data.yrange)
294 294 r, theta = numpy.meshgrid(zeniths, azimuths)
295 295 x, y = r*numpy.cos(theta), r*numpy.sin(theta)
296 296 self.y = zeniths
297 297
298 298 if ax.firsttime:
299 299 if self.zlimits is not None:
300 300 self.zmin, self.zmax = self.zlimits[n]
301 301 ax.plt = ax.pcolormesh( # r, theta, numpy.ma.array(data, mask=numpy.isnan(data)),
302 302 x, y, numpy.ma.array(data, mask=numpy.isnan(data)),
303 303 vmin=self.zmin,
304 304 vmax=self.zmax,
305 305 cmap=self.cmaps[n])
306 306 else:
307 307 if self.zlimits is not None:
308 308 self.zmin, self.zmax = self.zlimits[n]
309 309 ax.collections.remove(ax.collections[0])
310 310 ax.plt = ax.pcolormesh( # r, theta, numpy.ma.array(data, mask=numpy.isnan(data)),
311 311 x, y, numpy.ma.array(data, mask=numpy.isnan(data)),
312 312 vmin=self.zmin,
313 313 vmax=self.zmax,
314 314 cmap=self.cmaps[n])
315 315
316 316 if self.mode == 'A':
317 317 continue
318 318
319 319 # plot district names
320 320 f = open('/data/workspace/schain_scripts/distrito.csv')
321 321 for line in f:
322 322 label, lon, lat = [s.strip() for s in line.split(',') if s]
323 323 lat = float(lat)
324 324 lon = float(lon)
325 325 # ax.plot(lon, lat, '.b', ms=2)
326 326 ax.text(lon, lat, label.decode('utf8'), ha='center',
327 327 va='bottom', size='8', color='black')
328 328
329 329 # plot limites
330 330 limites = []
331 331 tmp = []
332 332 for line in open('/data/workspace/schain_scripts/lima.csv'):
333 333 if '#' in line:
334 334 if tmp:
335 335 limites.append(tmp)
336 336 tmp = []
337 337 continue
338 338 values = line.strip().split(',')
339 339 tmp.append((float(values[0]), float(values[1])))
340 340 for points in limites:
341 341 ax.add_patch(
342 342 Polygon(points, ec='k', fc='none', ls='--', lw=0.5))
343 343
344 344 # plot Cuencas
345 345 for cuenca in ('rimac', 'lurin', 'mala', 'chillon', 'chilca', 'chancay-huaral'):
346 346 f = open('/data/workspace/schain_scripts/{}.csv'.format(cuenca))
347 347 values = [line.strip().split(',') for line in f]
348 348 points = [(float(s[0]), float(s[1])) for s in values]
349 349 ax.add_patch(Polygon(points, ec='b', fc='none'))
350 350
351 351 # plot grid
352 352 for r in (15, 30, 45, 60):
353 353 ax.add_artist(plt.Circle((self.lon, self.lat),
354 354 km2deg(r), color='0.6', fill=False, lw=0.2))
355 355 ax.text(
356 356 self.lon + (km2deg(r))*numpy.cos(60*numpy.pi/180),
357 357 self.lat + (km2deg(r))*numpy.sin(60*numpy.pi/180),
358 358 '{}km'.format(r),
359 359 ha='center', va='bottom', size='8', color='0.6', weight='heavy')
360 360
361 361 if self.mode == 'E':
362 362 title = 'El={}$^\circ$'.format(self.data.meta['elevation'])
363 363 label = 'E{:02d}'.format(int(self.data.meta['elevation']))
364 364 else:
365 365 title = 'Az={}$^\circ$'.format(self.data.meta['azimuth'])
366 366 label = 'A{:02d}'.format(int(self.data.meta['azimuth']))
367 367
368 368 self.save_labels = ['{}-{}'.format(lbl, label) for lbl in self.labels]
369 369 self.titles = ['{} {}'.format(
370 370 self.data.parameters[x], title) for x in self.channels]
371 371
372 372 class WeatherPlot(Plot):
373 373 CODE = 'weather'
374 374 plot_name = 'weather'
375 375 plot_type = 'ppistyle'
376 376 buffering = False
377 377
378 378 def setup(self):
379 379 self.ncols = 1
380 380 self.nrows = 1
381 381 self.width =8
382 382 self.height =8
383 383 self.nplots= 1
384 384 self.ylabel= 'Range [Km]'
385 385 self.titles= ['Weather']
386 386 self.colorbar=False
387 387 self.ini =0
388 388 self.len_azi =0
389 389 self.buffer_ini = None
390 390 self.buffer_azi = None
391 391 self.plots_adjust.update({'wspace': 0.4, 'hspace':0.4, 'left': 0.1, 'right': 0.9, 'bottom': 0.08})
392 392 self.flag =0
393 393 self.indicador= 0
394 394 self.last_data_azi = None
395 395 self.val_mean = None
396 396
397 397 def update(self, dataOut):
398 398
399 399 data = {}
400 400 meta = {}
401 401 if hasattr(dataOut, 'dataPP_POWER'):
402 402 factor = 1
403 403 if hasattr(dataOut, 'nFFTPoints'):
404 404 factor = dataOut.normFactor
405 405 #print("DIME EL SHAPE PORFAVOR",dataOut.data_360.shape)
406 406 data['weather'] = 10*numpy.log10(dataOut.data_360[1]/(factor))
407 407 data['azi'] = dataOut.data_azi
408 408 data['ele'] = dataOut.data_ele
409 409 return data, meta
410 410
411 411 def get2List(self,angulos):
412 412 list1=[]
413 413 list2=[]
414 414 for i in reversed(range(len(angulos))):
415 415 diff_ = angulos[i]-angulos[i-1]
416 416 if diff_ >1.5:
417 417 list1.append(i-1)
418 418 list2.append(diff_)
419 419 return list(reversed(list1)),list(reversed(list2))
420 420
421 421 def fixData360(self,list_,ang_):
422 422 if list_[0]==-1:
423 423 vec = numpy.where(ang_<ang_[0])
424 424 ang_[vec] = ang_[vec]+360
425 425 return ang_
426 426 return ang_
427 427
428 428 def fixData360HL(self,angulos):
429 429 vec = numpy.where(angulos>=360)
430 430 angulos[vec]=angulos[vec]-360
431 431 return angulos
432 432
433 433 def search_pos(self,pos,list_):
434 434 for i in range(len(list_)):
435 435 if pos == list_[i]:
436 436 return True,i
437 437 i=None
438 438 return False,i
439 439
440 440 def fixDataComp(self,ang_,list1_,list2_):
441 441 size = len(ang_)
442 442 size2 = 0
443 443 for i in range(len(list2_)):
444 444 size2=size2+round(list2_[i])-1
445 445 new_size= size+size2
446 446 ang_new = numpy.zeros(new_size)
447 447 ang_new2 = numpy.zeros(new_size)
448 448
449 449 tmp = 0
450 450 c = 0
451 451 for i in range(len(ang_)):
452 452 ang_new[tmp +c] = ang_[i]
453 453 ang_new2[tmp+c] = ang_[i]
454 454 condition , value = self.search_pos(i,list1_)
455 455 if condition:
456 456 pos = tmp + c + 1
457 457 for k in range(round(list2_[value])-1):
458 458 ang_new[pos+k] = ang_new[pos+k-1]+1
459 459 ang_new2[pos+k] = numpy.nan
460 460 tmp = pos +k
461 461 c = 0
462 462 c=c+1
463 463 return ang_new,ang_new2
464 464
465 465 def globalCheckPED(self,angulos):
466 466 l1,l2 = self.get2List(angulos)
467 467 if len(l1)>0:
468 468 angulos2 = self.fixData360(list_=l1,ang_=angulos)
469 469 l1,l2 = self.get2List(angulos2)
470 470
471 471 ang1_,ang2_ = self.fixDataComp(ang_=angulos2,list1_=l1,list2_=l2)
472 472 ang1_ = self.fixData360HL(ang1_)
473 473 ang2_ = self.fixData360HL(ang2_)
474 474 else:
475 475 ang1_= angulos
476 476 ang2_= angulos
477 477 return ang1_,ang2_
478 478
479 479 def analizeDATA(self,data_azi):
480 480 list1 = []
481 481 list2 = []
482 482 dat = data_azi
483 483 for i in reversed(range(1,len(dat))):
484 484 if dat[i]>dat[i-1]:
485 485 diff = int(dat[i])-int(dat[i-1])
486 486 else:
487 487 diff = 360+int(dat[i])-int(dat[i-1])
488 488 if diff > 1:
489 489 list1.append(i-1)
490 490 list2.append(diff-1)
491 491 return list1,list2
492 492
493 493 def fixDATANEW(self,data_azi,data_weather):
494 494 list1,list2 = self.analizeDATA(data_azi)
495 495 if len(list1)== 0:
496 496 return data_azi,data_weather
497 497 else:
498 498 resize = 0
499 499 for i in range(len(list2)):
500 500 resize= resize + list2[i]
501 501 new_data_azi = numpy.resize(data_azi,resize)
502 502 new_data_weather= numpy.resize(date_weather,resize)
503 503
504 504 for i in range(len(list2)):
505 505 j=0
506 506 position=list1[i]+1
507 507 for j in range(list2[i]):
508 508 new_data_azi[position+j]=new_data_azi[position+j-1]+1
509 509 return new_data_azi
510 510
511 511 def fixDATA(self,data_azi):
512 512 data=data_azi
513 513 for i in range(len(data)):
514 514 if numpy.isnan(data[i]):
515 515 data[i]=data[i-1]+1
516 516 return data
517 517
518 518 def replaceNAN(self,data_weather,data_azi,val):
519 519 data= data_azi
520 520 data_T= data_weather
521 521 if data.shape[0]> data_T.shape[0]:
522 522 data_N = numpy.ones( [data.shape[0],data_T.shape[1]])
523 523 c = 0
524 524 for i in range(len(data)):
525 525 if numpy.isnan(data[i]):
526 526 data_N[i,:]=numpy.ones(data_T.shape[1])*numpy.nan
527 527 else:
528 528 data_N[i,:]=data_T[c,:]
529 529 c=c+1
530 530 return data_N
531 531 else:
532 532 for i in range(len(data)):
533 533 if numpy.isnan(data[i]):
534 534 data_T[i,:]=numpy.ones(data_T.shape[1])*numpy.nan
535 535 return data_T
536 536
537 537 def const_ploteo(self,data_weather,data_azi,step,res):
538 538 if self.ini==0:
539 539 #-------
540 540 n = (360/res)-len(data_azi)
541 541 #--------------------- new -------------------------
542 542 data_azi_new ,data_azi_old= self.globalCheckPED(data_azi)
543 543 #------------------------
544 544 start = data_azi_new[-1] + res
545 545 end = data_azi_new[0] - res
546 546 #------ new
547 547 self.last_data_azi = end
548 548 if start>end:
549 549 end = end + 360
550 550 azi_vacia = numpy.linspace(start,end,int(n))
551 551 azi_vacia = numpy.where(azi_vacia>360,azi_vacia-360,azi_vacia)
552 552 data_azi = numpy.hstack((data_azi_new,azi_vacia))
553 553 # RADAR
554 554 val_mean = numpy.mean(data_weather[:,-1])
555 555 self.val_mean = val_mean
556 556 data_weather_cmp = numpy.ones([(360-data_weather.shape[0]),data_weather.shape[1]])*val_mean
557 557 data_weather = self.replaceNAN(data_weather=data_weather,data_azi=data_azi_old,val=self.val_mean)
558 558 data_weather = numpy.vstack((data_weather,data_weather_cmp))
559 559 else:
560 560 # azimuth
561 561 flag=0
562 562 start_azi = self.res_azi[0]
563 563 #-----------new------------
564 564 data_azi ,data_azi_old= self.globalCheckPED(data_azi)
565 565 data_weather = self.replaceNAN(data_weather=data_weather,data_azi=data_azi_old,val=self.val_mean)
566 566 #--------------------------
567 567 start = data_azi[0]
568 568 end = data_azi[-1]
569 569 self.last_data_azi= end
570 570 if start< start_azi:
571 571 start = start +360
572 572 if end <start_azi:
573 573 end = end +360
574 574
575 575 pos_ini = int((start-start_azi)/res)
576 576 len_azi = len(data_azi)
577 577 if (360-pos_ini)<len_azi:
578 578 if pos_ini+1==360:
579 579 pos_ini=0
580 580 else:
581 581 flag=1
582 582 dif= 360-pos_ini
583 583 comp= len_azi-dif
584 584 #-----------------
585 585 if flag==0:
586 586 # AZIMUTH
587 587 self.res_azi[pos_ini:pos_ini+len_azi] = data_azi
588 588 # RADAR
589 589 self.res_weather[pos_ini:pos_ini+len_azi,:] = data_weather
590 590 else:
591 591 # AZIMUTH
592 592 self.res_azi[pos_ini:pos_ini+dif] = data_azi[0:dif]
593 593 self.res_azi[0:comp] = data_azi[dif:]
594 594 # RADAR
595 595 self.res_weather[pos_ini:pos_ini+dif,:] = data_weather[0:dif,:]
596 596 self.res_weather[0:comp,:] = data_weather[dif:,:]
597 597 flag=0
598 598 data_azi = self.res_azi
599 599 data_weather = self.res_weather
600 600
601 601 return data_weather,data_azi
602 602
603 603 def plot(self):
604 604 thisDatetime = datetime.datetime.utcfromtimestamp(self.data.times[-1]).strftime('%Y-%m-%d %H:%M:%S')
605 605 data = self.data[-1]
606 606 r = self.data.yrange
607 607 delta_height = r[1]-r[0]
608 608 r_mask = numpy.where(r>=0)[0]
609 609 r = numpy.arange(len(r_mask))*delta_height
610 610 self.y = 2*r
611 611 # RADAR
612 612 #data_weather = data['weather']
613 613 # PEDESTAL
614 614 #data_azi = data['azi']
615 615 res = 1
616 616 # STEP
617 617 step = (360/(res*data['weather'].shape[0]))
618 618
619 619 self.res_weather, self.res_azi = self.const_ploteo(data_weather=data['weather'][:,r_mask],data_azi=data['azi'],step=step,res=res)
620 620 self.res_ele = numpy.mean(data['ele'])
621 621 ################# PLOTEO ###################
622 622 for i,ax in enumerate(self.axes):
623 623 self.zmin = self.zmin if self.zmin else 20
624 624 self.zmax = self.zmax if self.zmax else 80
625 625 if ax.firsttime:
626 626 plt.clf()
627 627 cgax, pm = wrl.vis.plot_ppi(self.res_weather,r=r,az=self.res_azi,fig=self.figures[0], proj='cg', vmin=self.zmin, vmax=self.zmax)
628 628 else:
629 629 plt.clf()
630 630 cgax, pm = wrl.vis.plot_ppi(self.res_weather,r=r,az=self.res_azi,fig=self.figures[0], proj='cg', vmin=self.zmin, vmax=self.zmax)
631 631 caax = cgax.parasites[0]
632 632 paax = cgax.parasites[1]
633 633 cbar = plt.gcf().colorbar(pm, pad=0.075)
634 634 caax.set_xlabel('x_range [km]')
635 635 caax.set_ylabel('y_range [km]')
636 636 plt.text(1.0, 1.05, 'Azimuth '+str(thisDatetime)+" Step "+str(self.ini)+ " EL: "+str(round(self.res_ele, 1)), transform=caax.transAxes, va='bottom',ha='right')
637 637
638 638 self.ini= self.ini+1
639 639
640 640
641 641 class WeatherRHIPlot(Plot):
642 642 CODE = 'weather'
643 643 plot_name = 'weather'
644 644 plot_type = 'rhistyle'
645 645 buffering = False
646 646 data_ele_tmp = None
647 647
648 648 def setup(self):
649 649 print("********************")
650 650 print("********************")
651 651 print("********************")
652 652 print("SETUP WEATHER PLOT")
653 653 self.ncols = 1
654 654 self.nrows = 1
655 655 self.nplots= 1
656 656 self.ylabel= 'Range [Km]'
657 657 self.titles= ['Weather']
658 658 if self.channels is not None:
659 659 self.nplots = len(self.channels)
660 660 self.nrows = len(self.channels)
661 661 else:
662 662 self.nplots = self.data.shape(self.CODE)[0]
663 663 self.nrows = self.nplots
664 664 self.channels = list(range(self.nplots))
665 665 print("channels",self.channels)
666 666 print("que saldra", self.data.shape(self.CODE)[0])
667 667 self.titles = ['{} Channel {}'.format(self.CODE.upper(), x) for x in range(self.nrows)]
668 668 print("self.titles",self.titles)
669 669 self.colorbar=False
670 670 self.width =12
671 671 self.height =8
672 672 self.ini =0
673 673 self.len_azi =0
674 674 self.buffer_ini = None
675 675 self.buffer_ele = None
676 676 self.plots_adjust.update({'wspace': 0.4, 'hspace':0.4, 'left': 0.1, 'right': 0.9, 'bottom': 0.08})
677 677 self.flag =0
678 678 self.indicador= 0
679 679 self.last_data_ele = None
680 680 self.val_mean = None
681 681
682 682 def update(self, dataOut):
683 683
684 684 data = {}
685 685 meta = {}
686 686 if hasattr(dataOut, 'dataPP_POWER'):
687 687 factor = 1
688 688 if hasattr(dataOut, 'nFFTPoints'):
689 689 factor = dataOut.normFactor
690 690 print("dataOut",dataOut.data_360.shape)
691 691 #
692 692 data['weather'] = 10*numpy.log10(dataOut.data_360/(factor))
693 693 #
694 694 #data['weather'] = 10*numpy.log10(dataOut.data_360[1]/(factor))
695 695 data['azi'] = dataOut.data_azi
696 696 data['ele'] = dataOut.data_ele
697 697 #print("UPDATE")
698 698 #print("data[weather]",data['weather'].shape)
699 699 #print("data[azi]",data['azi'])
700 700 return data, meta
701 701
702 702 def get2List(self,angulos):
703 703 list1=[]
704 704 list2=[]
705 705 for i in reversed(range(len(angulos))):
706 706 if not i==0:#el caso de i=0 evalula el primero de la lista con el ultimo y no es relevante
707 707 diff_ = angulos[i]-angulos[i-1]
708 708 if abs(diff_) >1.5:
709 709 list1.append(i-1)
710 710 list2.append(diff_)
711 711 return list(reversed(list1)),list(reversed(list2))
712 712
713 713 def fixData90(self,list_,ang_):
714 714 if list_[0]==-1:
715 715 vec = numpy.where(ang_<ang_[0])
716 716 ang_[vec] = ang_[vec]+90
717 717 return ang_
718 718 return ang_
719 719
720 720 def fixData90HL(self,angulos):
721 721 vec = numpy.where(angulos>=90)
722 722 angulos[vec]=angulos[vec]-90
723 723 return angulos
724 724
725 725
726 726 def search_pos(self,pos,list_):
727 727 for i in range(len(list_)):
728 728 if pos == list_[i]:
729 729 return True,i
730 730 i=None
731 731 return False,i
732 732
733 733 def fixDataComp(self,ang_,list1_,list2_,tipo_case):
734 734 size = len(ang_)
735 735 size2 = 0
736 736 for i in range(len(list2_)):
737 737 size2=size2+round(abs(list2_[i]))-1
738 738 new_size= size+size2
739 739 ang_new = numpy.zeros(new_size)
740 740 ang_new2 = numpy.zeros(new_size)
741 741
742 742 tmp = 0
743 743 c = 0
744 744 for i in range(len(ang_)):
745 745 ang_new[tmp +c] = ang_[i]
746 746 ang_new2[tmp+c] = ang_[i]
747 747 condition , value = self.search_pos(i,list1_)
748 748 if condition:
749 749 pos = tmp + c + 1
750 750 for k in range(round(abs(list2_[value]))-1):
751 751 if tipo_case==0 or tipo_case==3:#subida
752 752 ang_new[pos+k] = ang_new[pos+k-1]+1
753 753 ang_new2[pos+k] = numpy.nan
754 754 elif tipo_case==1 or tipo_case==2:#bajada
755 755 ang_new[pos+k] = ang_new[pos+k-1]-1
756 756 ang_new2[pos+k] = numpy.nan
757 757
758 758 tmp = pos +k
759 759 c = 0
760 760 c=c+1
761 761 return ang_new,ang_new2
762 762
763 763 def globalCheckPED(self,angulos,tipo_case):
764 764 l1,l2 = self.get2List(angulos)
765 765 ##print("l1",l1)
766 766 ##print("l2",l2)
767 767 if len(l1)>0:
768 768 #angulos2 = self.fixData90(list_=l1,ang_=angulos)
769 769 #l1,l2 = self.get2List(angulos2)
770 770 ang1_,ang2_ = self.fixDataComp(ang_=angulos,list1_=l1,list2_=l2,tipo_case=tipo_case)
771 771 #ang1_ = self.fixData90HL(ang1_)
772 772 #ang2_ = self.fixData90HL(ang2_)
773 773 else:
774 774 ang1_= angulos
775 775 ang2_= angulos
776 776 return ang1_,ang2_
777 777
778 778
779 779 def replaceNAN(self,data_weather,data_ele,val):
780 780 data= data_ele
781 781 data_T= data_weather
782 782 if data.shape[0]> data_T.shape[0]:
783 783 data_N = numpy.ones( [data.shape[0],data_T.shape[1]])
784 784 c = 0
785 785 for i in range(len(data)):
786 786 if numpy.isnan(data[i]):
787 787 data_N[i,:]=numpy.ones(data_T.shape[1])*numpy.nan
788 788 else:
789 789 data_N[i,:]=data_T[c,:]
790 790 c=c+1
791 791 return data_N
792 792 else:
793 793 for i in range(len(data)):
794 794 if numpy.isnan(data[i]):
795 795 data_T[i,:]=numpy.ones(data_T.shape[1])*numpy.nan
796 796 return data_T
797 797
798 798 def check_case(self,data_ele,ang_max,ang_min):
799 799 start = data_ele[0]
800 800 end = data_ele[-1]
801 801 number = (end-start)
802 802 len_ang=len(data_ele)
803 803 print("start",start)
804 804 print("end",end)
805 805 print("number",number)
806 806
807 807 print("len_ang",len_ang)
808 808
809 809 #exit(1)
810 810
811 811 if start<end and (round(abs(number)+1)>=len_ang or (numpy.argmin(data_ele)==0)):#caso subida
812 812 return 0
813 813 #elif start>end and (round(abs(number)+1)>=len_ang or(numpy.argmax(data_ele)==0)):#caso bajada
814 814 # return 1
815 815 elif round(abs(number)+1)>=len_ang and (start>end or(numpy.argmax(data_ele)==0)):#caso bajada
816 816 return 1
817 817 elif round(abs(number)+1)<len_ang and data_ele[-2]>data_ele[-1]:# caso BAJADA CAMBIO ANG MAX
818 818 return 2
819 819 elif round(abs(number)+1)<len_ang and data_ele[-2]<data_ele[-1] :# caso SUBIDA CAMBIO ANG MIN
820 820 return 3
821 821
822 822
823 823 def const_ploteo(self,val_ch,data_weather,data_ele,step,res,ang_max,ang_min):
824 824 ang_max= ang_max
825 825 ang_min= ang_min
826 826 data_weather=data_weather
827 827 val_ch=val_ch
828 828 ##print("*********************DATA WEATHER**************************************")
829 829 ##print(data_weather)
830 830 if self.ini==0:
831 831 '''
832 832 print("**********************************************")
833 833 print("**********************************************")
834 834 print("***************ini**************")
835 835 print("**********************************************")
836 836 print("**********************************************")
837 837 '''
838 838 #print("data_ele",data_ele)
839 839 #----------------------------------------------------------
840 840 tipo_case = self.check_case(data_ele,ang_max,ang_min)
841 841 print("check_case",tipo_case)
842 842 #exit(1)
843 843 #--------------------- new -------------------------
844 844 data_ele_new ,data_ele_old= self.globalCheckPED(data_ele,tipo_case)
845 845
846 846 #-------------------------CAMBIOS RHI---------------------------------
847 847 start= ang_min
848 848 end = ang_max
849 849 n= (ang_max-ang_min)/res
850 850 #------ new
851 851 self.start_data_ele = data_ele_new[0]
852 852 self.end_data_ele = data_ele_new[-1]
853 853 if tipo_case==0 or tipo_case==3: # SUBIDA
854 854 n1= round(self.start_data_ele)- start
855 855 n2= end - round(self.end_data_ele)
856 856 print(self.start_data_ele)
857 857 print(self.end_data_ele)
858 858 if n1>0:
859 859 ele1= numpy.linspace(ang_min+1,self.start_data_ele-1,n1)
860 860 ele1_nan= numpy.ones(n1)*numpy.nan
861 861 data_ele = numpy.hstack((ele1,data_ele_new))
862 862 print("ele1_nan",ele1_nan.shape)
863 863 print("data_ele_old",data_ele_old.shape)
864 864 data_ele_old = numpy.hstack((ele1_nan,data_ele_old))
865 865 if n2>0:
866 866 ele2= numpy.linspace(self.end_data_ele+1,end,n2)
867 867 ele2_nan= numpy.ones(n2)*numpy.nan
868 868 data_ele = numpy.hstack((data_ele,ele2))
869 869 print("ele2_nan",ele2_nan.shape)
870 870 print("data_ele_old",data_ele_old.shape)
871 871 data_ele_old = numpy.hstack((data_ele_old,ele2_nan))
872 872
873 873 if tipo_case==1 or tipo_case==2: # BAJADA
874 874 data_ele_new = data_ele_new[::-1] # reversa
875 875 data_ele_old = data_ele_old[::-1]# reversa
876 876 data_weather = data_weather[::-1,:]# reversa
877 877 vec= numpy.where(data_ele_new<ang_max)
878 878 data_ele_new = data_ele_new[vec]
879 879 data_ele_old = data_ele_old[vec]
880 880 data_weather = data_weather[vec[0]]
881 881 vec2= numpy.where(0<data_ele_new)
882 882 data_ele_new = data_ele_new[vec2]
883 883 data_ele_old = data_ele_old[vec2]
884 884 data_weather = data_weather[vec2[0]]
885 885 self.start_data_ele = data_ele_new[0]
886 886 self.end_data_ele = data_ele_new[-1]
887 887
888 888 n1= round(self.start_data_ele)- start
889 889 n2= end - round(self.end_data_ele)-1
890 890 print(self.start_data_ele)
891 891 print(self.end_data_ele)
892 892 if n1>0:
893 893 ele1= numpy.linspace(ang_min+1,self.start_data_ele-1,n1)
894 894 ele1_nan= numpy.ones(n1)*numpy.nan
895 895 data_ele = numpy.hstack((ele1,data_ele_new))
896 896 data_ele_old = numpy.hstack((ele1_nan,data_ele_old))
897 897 if n2>0:
898 898 ele2= numpy.linspace(self.end_data_ele+1,end,n2)
899 899 ele2_nan= numpy.ones(n2)*numpy.nan
900 900 data_ele = numpy.hstack((data_ele,ele2))
901 901 data_ele_old = numpy.hstack((data_ele_old,ele2_nan))
902 902 # RADAR
903 903 # NOTA data_ele y data_weather es la variable que retorna
904 904 val_mean = numpy.mean(data_weather[:,-1])
905 905 self.val_mean = val_mean
906 906 data_weather = self.replaceNAN(data_weather=data_weather,data_ele=data_ele_old,val=self.val_mean)
907 907 self.data_ele_tmp[val_ch]= data_ele_old
908 908 else:
909 909 #print("**********************************************")
910 910 #print("****************VARIABLE**********************")
911 911 #-------------------------CAMBIOS RHI---------------------------------
912 912 #---------------------------------------------------------------------
913 913 ##print("INPUT data_ele",data_ele)
914 914 flag=0
915 915 start_ele = self.res_ele[0]
916 916 tipo_case = self.check_case(data_ele,ang_max,ang_min)
917 917 #print("TIPO DE DATA",tipo_case)
918 918 #-----------new------------
919 919 data_ele ,data_ele_old = self.globalCheckPED(data_ele,tipo_case)
920 920 data_weather = self.replaceNAN(data_weather=data_weather,data_ele=data_ele_old,val=self.val_mean)
921 921
922 922 #-------------------------------NEW RHI ITERATIVO-------------------------
923 923
924 924 if tipo_case==0 : # SUBIDA
925 925 vec = numpy.where(data_ele<ang_max)
926 926 data_ele = data_ele[vec]
927 927 data_ele_old = data_ele_old[vec]
928 928 data_weather = data_weather[vec[0]]
929 929
930 930 vec2 = numpy.where(0<data_ele)
931 931 data_ele= data_ele[vec2]
932 932 data_ele_old= data_ele_old[vec2]
933 933 ##print(data_ele_new)
934 934 data_weather= data_weather[vec2[0]]
935 935
936 936 new_i_ele = int(round(data_ele[0]))
937 937 new_f_ele = int(round(data_ele[-1]))
938 938 #print(new_i_ele)
939 939 #print(new_f_ele)
940 940 #print(data_ele,len(data_ele))
941 941 #print(data_ele_old,len(data_ele_old))
942 942 if new_i_ele< 2:
943 943 self.data_ele_tmp[val_ch] = numpy.ones(ang_max-ang_min)*numpy.nan
944 944 self.res_weather[val_ch] = self.replaceNAN(data_weather=self.res_weather[val_ch],data_ele=self.data_ele_tmp[val_ch],val=self.val_mean)
945 945 self.data_ele_tmp[val_ch][new_i_ele:new_i_ele+len(data_ele)]=data_ele_old
946 946 self.res_ele[new_i_ele:new_i_ele+len(data_ele)]= data_ele
947 947 self.res_weather[val_ch][new_i_ele:new_i_ele+len(data_ele),:]= data_weather
948 948 data_ele = self.res_ele
949 949 data_weather = self.res_weather[val_ch]
950 950
951 951 elif tipo_case==1 : #BAJADA
952 952 data_ele = data_ele[::-1] # reversa
953 953 data_ele_old = data_ele_old[::-1]# reversa
954 954 data_weather = data_weather[::-1,:]# reversa
955 955 vec= numpy.where(data_ele<ang_max)
956 956 data_ele = data_ele[vec]
957 957 data_ele_old = data_ele_old[vec]
958 958 data_weather = data_weather[vec[0]]
959 959 vec2= numpy.where(0<data_ele)
960 960 data_ele = data_ele[vec2]
961 961 data_ele_old = data_ele_old[vec2]
962 962 data_weather = data_weather[vec2[0]]
963 963
964 964
965 965 new_i_ele = int(round(data_ele[0]))
966 966 new_f_ele = int(round(data_ele[-1]))
967 967 #print(data_ele)
968 968 #print(ang_max)
969 969 #print(data_ele_old)
970 970 if new_i_ele <= 1:
971 971 new_i_ele = 1
972 972 if round(data_ele[-1])>=ang_max-1:
973 973 self.data_ele_tmp[val_ch] = numpy.ones(ang_max-ang_min)*numpy.nan
974 974 self.res_weather[val_ch] = self.replaceNAN(data_weather=self.res_weather[val_ch],data_ele=self.data_ele_tmp[val_ch],val=self.val_mean)
975 975 self.data_ele_tmp[val_ch][new_i_ele-1:new_i_ele+len(data_ele)-1]=data_ele_old
976 976 self.res_ele[new_i_ele-1:new_i_ele+len(data_ele)-1]= data_ele
977 977 self.res_weather[val_ch][new_i_ele-1:new_i_ele+len(data_ele)-1,:]= data_weather
978 978 data_ele = self.res_ele
979 979 data_weather = self.res_weather[val_ch]
980 980
981 981 elif tipo_case==2: #bajada
982 982 vec = numpy.where(data_ele<ang_max)
983 983 data_ele = data_ele[vec]
984 984 data_weather= data_weather[vec[0]]
985 985
986 986 len_vec = len(vec)
987 987 data_ele_new = data_ele[::-1] # reversa
988 988 data_weather = data_weather[::-1,:]
989 989 new_i_ele = int(data_ele_new[0])
990 990 new_f_ele = int(data_ele_new[-1])
991 991
992 992 n1= new_i_ele- ang_min
993 993 n2= ang_max - new_f_ele-1
994 994 if n1>0:
995 995 ele1= numpy.linspace(ang_min+1,new_i_ele-1,n1)
996 996 ele1_nan= numpy.ones(n1)*numpy.nan
997 997 data_ele = numpy.hstack((ele1,data_ele_new))
998 998 data_ele_old = numpy.hstack((ele1_nan,data_ele_new))
999 999 if n2>0:
1000 1000 ele2= numpy.linspace(new_f_ele+1,ang_max,n2)
1001 1001 ele2_nan= numpy.ones(n2)*numpy.nan
1002 1002 data_ele = numpy.hstack((data_ele,ele2))
1003 1003 data_ele_old = numpy.hstack((data_ele_old,ele2_nan))
1004 1004
1005 1005 self.data_ele_tmp[val_ch] = data_ele_old
1006 1006 self.res_ele = data_ele
1007 1007 self.res_weather[val_ch] = self.replaceNAN(data_weather=data_weather,data_ele=data_ele_old,val=self.val_mean)
1008 1008 data_ele = self.res_ele
1009 1009 data_weather = self.res_weather[val_ch]
1010 1010
1011 1011 elif tipo_case==3:#subida
1012 1012 vec = numpy.where(0<data_ele)
1013 1013 data_ele= data_ele[vec]
1014 1014 data_ele_new = data_ele
1015 1015 data_ele_old= data_ele_old[vec]
1016 1016 data_weather= data_weather[vec[0]]
1017 1017 pos_ini = numpy.argmin(data_ele)
1018 1018 if pos_ini>0:
1019 1019 len_vec= len(data_ele)
1020 1020 vec3 = numpy.linspace(pos_ini,len_vec-1,len_vec-pos_ini).astype(int)
1021 1021 #print(vec3)
1022 1022 data_ele= data_ele[vec3]
1023 1023 data_ele_new = data_ele
1024 1024 data_ele_old= data_ele_old[vec3]
1025 1025 data_weather= data_weather[vec3]
1026 1026
1027 1027 new_i_ele = int(data_ele_new[0])
1028 1028 new_f_ele = int(data_ele_new[-1])
1029 1029 n1= new_i_ele- ang_min
1030 1030 n2= ang_max - new_f_ele-1
1031 1031 if n1>0:
1032 1032 ele1= numpy.linspace(ang_min+1,new_i_ele-1,n1)
1033 1033 ele1_nan= numpy.ones(n1)*numpy.nan
1034 1034 data_ele = numpy.hstack((ele1,data_ele_new))
1035 1035 data_ele_old = numpy.hstack((ele1_nan,data_ele_new))
1036 1036 if n2>0:
1037 1037 ele2= numpy.linspace(new_f_ele+1,ang_max,n2)
1038 1038 ele2_nan= numpy.ones(n2)*numpy.nan
1039 1039 data_ele = numpy.hstack((data_ele,ele2))
1040 1040 data_ele_old = numpy.hstack((data_ele_old,ele2_nan))
1041 1041
1042 1042 self.data_ele_tmp[val_ch] = data_ele_old
1043 1043 self.res_ele = data_ele
1044 1044 self.res_weather[val_ch] = self.replaceNAN(data_weather=data_weather,data_ele=data_ele_old,val=self.val_mean)
1045 1045 data_ele = self.res_ele
1046 1046 data_weather = self.res_weather[val_ch]
1047 1047 #print("self.data_ele_tmp",self.data_ele_tmp)
1048 1048 return data_weather,data_ele
1049 1049
1050 1050
1051 1051 def plot(self):
1052 1052 thisDatetime = datetime.datetime.utcfromtimestamp(self.data.times[-1]).strftime('%Y-%m-%d %H:%M:%S')
1053 1053 data = self.data[-1]
1054 1054 r = self.data.yrange
1055 1055 delta_height = r[1]-r[0]
1056 1056 r_mask = numpy.where(r>=0)[0]
1057 1057 ##print("delta_height",delta_height)
1058 1058 #print("r_mask",r_mask,len(r_mask))
1059 1059 r = numpy.arange(len(r_mask))*delta_height
1060 1060 self.y = 2*r
1061 1061 res = 1
1062 1062 ###print("data['weather'].shape[0]",data['weather'].shape[0])
1063 1063 ang_max = self.ang_max
1064 1064 ang_min = self.ang_min
1065 1065 var_ang =ang_max - ang_min
1066 1066 step = (int(var_ang)/(res*data['weather'].shape[0]))
1067 1067 ###print("step",step)
1068 1068 #--------------------------------------------------------
1069 1069 ##print('weather',data['weather'].shape)
1070 1070 ##print('ele',data['ele'].shape)
1071 1071
1072 1072 ###self.res_weather, self.res_ele = self.const_ploteo(data_weather=data['weather'][:,r_mask],data_ele=data['ele'],step=step,res=res,ang_max=ang_max,ang_min=ang_min)
1073 1073 ###self.res_azi = numpy.mean(data['azi'])
1074 1074 ###print("self.res_ele",self.res_ele)
1075 1075 plt.clf()
1076 1076 subplots = [121, 122]
1077 1077 cg={'angular_spacing': 20.}
1078 1078 if self.ini==0:
1079 1079 self.data_ele_tmp = numpy.ones([self.nplots,int(var_ang)])*numpy.nan
1080 1080 self.res_weather= numpy.ones([self.nplots,int(var_ang),len(r_mask)])*numpy.nan
1081 1081 print("SHAPE",self.data_ele_tmp.shape)
1082 1082
1083 1083 for i,ax in enumerate(self.axes):
1084 1084 self.res_weather[i], self.res_ele = self.const_ploteo(val_ch=i, data_weather=data['weather'][i][:,r_mask],data_ele=data['ele'],step=step,res=res,ang_max=ang_max,ang_min=ang_min)
1085 1085 self.res_azi = numpy.mean(data['azi'])
1086 1086 if i==0:
1087 1087 print("*****************************************************************************to plot**************************",self.res_weather[i].shape)
1088 1088 self.zmin = self.zmin if self.zmin else 20
1089 1089 self.zmax = self.zmax if self.zmax else 80
1090 1090 if ax.firsttime:
1091 1091 #plt.clf()
1092 1092 cgax, pm = wrl.vis.plot_rhi(self.res_weather[i],r=r,th=self.res_ele,ax=subplots[i], proj=cg,vmin=self.zmin, vmax=self.zmax)
1093 1093 #fig=self.figures[0]
1094 1094 else:
1095 1095 #plt.clf()
1096 1096 if i==0:
1097 1097 print(self.res_weather[i])
1098 1098 print(self.res_ele)
1099 1099 cgax, pm = wrl.vis.plot_rhi(self.res_weather[i],r=r,th=self.res_ele,ax=subplots[i], proj=cg,vmin=self.zmin, vmax=self.zmax)
1100 1100 caax = cgax.parasites[0]
1101 1101 paax = cgax.parasites[1]
1102 1102 cbar = plt.gcf().colorbar(pm, pad=0.075)
1103 1103 caax.set_xlabel('x_range [km]')
1104 1104 caax.set_ylabel('y_range [km]')
1105 1105 plt.text(1.0, 1.05, 'Elevacion '+str(thisDatetime)+" Step "+str(self.ini)+ " Azi: "+str(round(self.res_azi,2)), transform=caax.transAxes, va='bottom',ha='right')
1106 1106 print("***************************self.ini****************************",self.ini)
1107 1107 self.ini= self.ini+1
1108 1108
1109 1109 class Weather_vRF_Plot(Plot):
1110 1110 CODE = 'PPI'
1111 1111 plot_name = 'PPI'
1112 1112 #plot_type = 'ppistyle'
1113 1113 buffering = False
1114 1114
1115 1115 def setup(self):
1116 1116
1117 1117 self.ncols = 1
1118 1118 self.nrows = 1
1119 1119 self.width =8
1120 1120 self.height =8
1121 1121 self.nplots= 1
1122 1122 self.ylabel= 'Range [Km]'
1123 1123 self.xlabel= 'Range [Km]'
1124 1124 self.titles= ['PPI']
1125 1125 self.polar = True
1126 1126 if self.channels is not None:
1127 1127 self.nplots = len(self.channels)
1128 1128 self.nrows = len(self.channels)
1129 1129 else:
1130 1130 self.nplots = self.data.shape(self.CODE)[0]
1131 1131 self.nrows = self.nplots
1132 1132 self.channels = list(range(self.nplots))
1133 1133
1134 1134 if self.CODE == 'POWER':
1135 1135 self.cb_label = r'Power (dB)'
1136 1136 elif self.CODE == 'DOPPLER':
1137 1137 self.cb_label = r'Velocity (m/s)'
1138 1138 self.colorbar=True
1139 1139 self.width = 9
1140 1140 self.height =8
1141 1141 self.ini =0
1142 1142 self.len_azi =0
1143 1143 self.buffer_ini = None
1144 1144 self.buffer_ele = None
1145 1145 self.plots_adjust.update({'wspace': 0.4, 'hspace':0.4, 'left': 0.15, 'right': 0.9, 'bottom': 0.08})
1146 1146 self.flag =0
1147 1147 self.indicador= 0
1148 1148 self.last_data_ele = None
1149 1149 self.val_mean = None
1150 1150
1151 1151 def update(self, dataOut):
1152 1152
1153 1153 data = {}
1154 1154 meta = {}
1155 1155 if hasattr(dataOut, 'dataPP_POWER'):
1156 1156 factor = 1
1157 1157 if hasattr(dataOut, 'nFFTPoints'):
1158 1158 factor = dataOut.normFactor
1159 1159
1160 1160 if 'pow' in self.attr_data[0].lower():
1161 1161 data['data'] = 10*numpy.log10(getattr(dataOut, self.attr_data[0])/(factor))
1162 1162 else:
1163 1163 data['data'] = getattr(dataOut, self.attr_data[0])/(factor)
1164 1164
1165 1165 data['azi'] = dataOut.data_azi
1166 1166 data['ele'] = dataOut.data_ele
1167 1167
1168 1168 return data, meta
1169 1169
1170 1170 def plot(self):
1171 1171 data = self.data[-1]
1172 1172 r = self.data.yrange
1173 1173 delta_height = r[1]-r[0]
1174 1174 r_mask = numpy.where(r>=0)[0]
1175 1175 self.r_mask = r_mask
1176 1176 r = numpy.arange(len(r_mask))*delta_height
1177 1177 self.y = 2*r
1178 1178
1179 1179 z = data['data'][self.channels[0]][:,r_mask]
1180 1180
1181 1181 self.titles = []
1182 1182
1183 1183 self.ymax = self.ymax if self.ymax else numpy.nanmax(r)
1184 1184 self.ymin = self.ymin if self.ymin else numpy.nanmin(r)
1185 1185 self.zmax = self.zmax if self.zmax else numpy.nanmax(z)
1186 1186 self.zmin = self.zmin if self.zmin else numpy.nanmin(z)
1187 1187 self.ang_min = self.ang_min if self.ang_min else 0
1188 1188 self.ang_max = self.ang_max if self.ang_max else 360
1189 1189
1190 1190 r, theta = numpy.meshgrid(r, numpy.radians(data['azi']) )
1191 1191
1192 1192 for i,ax in enumerate(self.axes):
1193 1193
1194 1194 if ax.firsttime:
1195 1195 ax.set_xlim(numpy.radians(self.ang_min),numpy.radians(self.ang_max))
1196 1196 ax.plt = ax.pcolormesh(theta, r, z, cmap=self.colormap, vmin=self.zmin, vmax=self.zmax)
1197 1197
1198 1198 else:
1199 1199 ax.set_xlim(numpy.radians(self.ang_min),numpy.radians(self.ang_max))
1200 1200 ax.plt = ax.pcolormesh(theta, r, z, cmap=self.colormap, vmin=self.zmin, vmax=self.zmax)
1201
1201
1202 1202 ax.grid(True)
1203
1203
1204 1204 if len(self.channels) !=1:
1205 1205 self.titles = ['PPI {} at EL: {} Channel {}'.format(self.self.labels[x], str(round(numpy.mean(data['ele']),1)), x) for x in range(self.nrows)]
1206 1206 else:
1207 1207 self.titles = ['PPI {} at EL: {} Channel {}'.format(self.labels[0], str(round(numpy.mean(data['ele']),1)), self.channels[0])]
1208 1208
1209 1209 class WeatherRHI_vRF2_Plot(Plot):
1210 1210 CODE = 'weather'
1211 1211 plot_name = 'weather'
1212 1212 plot_type = 'rhistyle'
1213 1213 buffering = False
1214 1214 data_ele_tmp = None
1215 1215
1216 1216 def setup(self):
1217 1217 print("********************")
1218 1218 print("********************")
1219 1219 print("********************")
1220 1220 print("SETUP WEATHER PLOT")
1221 1221 self.ncols = 1
1222 1222 self.nrows = 1
1223 1223 self.nplots= 1
1224 1224 self.ylabel= 'Range [Km]'
1225 1225 self.titles= ['Weather']
1226 1226 if self.channels is not None:
1227 1227 self.nplots = len(self.channels)
1228 1228 self.nrows = len(self.channels)
1229 1229 else:
1230 1230 self.nplots = self.data.shape(self.CODE)[0]
1231 1231 self.nrows = self.nplots
1232 1232 self.channels = list(range(self.nplots))
1233 1233 print("channels",self.channels)
1234 1234 print("que saldra", self.data.shape(self.CODE)[0])
1235 1235 self.titles = ['{} Channel {}'.format(self.CODE.upper(), x) for x in range(self.nrows)]
1236 1236 print("self.titles",self.titles)
1237 1237 self.colorbar=False
1238 1238 self.width =8
1239 1239 self.height =8
1240 1240 self.ini =0
1241 1241 self.len_azi =0
1242 1242 self.buffer_ini = None
1243 1243 self.buffer_ele = None
1244 1244 self.plots_adjust.update({'wspace': 0.4, 'hspace':0.4, 'left': 0.1, 'right': 0.9, 'bottom': 0.08})
1245 1245 self.flag =0
1246 1246 self.indicador= 0
1247 1247 self.last_data_ele = None
1248 1248 self.val_mean = None
1249 1249
1250 1250 def update(self, dataOut):
1251 1251
1252 1252 data = {}
1253 1253 meta = {}
1254 1254 if hasattr(dataOut, 'dataPP_POWER'):
1255 1255 factor = 1
1256 1256 if hasattr(dataOut, 'nFFTPoints'):
1257 1257 factor = dataOut.normFactor
1258 1258 print("dataOut",dataOut.data_360.shape)
1259 1259 #
1260 1260 data['weather'] = 10*numpy.log10(dataOut.data_360/(factor))
1261 1261 #
1262 1262 #data['weather'] = 10*numpy.log10(dataOut.data_360[1]/(factor))
1263 1263 data['azi'] = dataOut.data_azi
1264 1264 data['ele'] = dataOut.data_ele
1265 1265 data['case_flag'] = dataOut.case_flag
1266 1266 #print("UPDATE")
1267 1267 #print("data[weather]",data['weather'].shape)
1268 1268 #print("data[azi]",data['azi'])
1269 1269 return data, meta
1270 1270
1271 1271 def get2List(self,angulos):
1272 1272 list1=[]
1273 1273 list2=[]
1274 1274 for i in reversed(range(len(angulos))):
1275 1275 if not i==0:#el caso de i=0 evalula el primero de la lista con el ultimo y no es relevante
1276 1276 diff_ = angulos[i]-angulos[i-1]
1277 1277 if abs(diff_) >1.5:
1278 1278 list1.append(i-1)
1279 1279 list2.append(diff_)
1280 1280 return list(reversed(list1)),list(reversed(list2))
1281 1281
1282 1282 def fixData90(self,list_,ang_):
1283 1283 if list_[0]==-1:
1284 1284 vec = numpy.where(ang_<ang_[0])
1285 1285 ang_[vec] = ang_[vec]+90
1286 1286 return ang_
1287 1287 return ang_
1288 1288
1289 1289 def fixData90HL(self,angulos):
1290 1290 vec = numpy.where(angulos>=90)
1291 1291 angulos[vec]=angulos[vec]-90
1292 1292 return angulos
1293 1293
1294 1294
1295 1295 def search_pos(self,pos,list_):
1296 1296 for i in range(len(list_)):
1297 1297 if pos == list_[i]:
1298 1298 return True,i
1299 1299 i=None
1300 1300 return False,i
1301 1301
1302 1302 def fixDataComp(self,ang_,list1_,list2_,tipo_case):
1303 1303 size = len(ang_)
1304 1304 size2 = 0
1305 1305 for i in range(len(list2_)):
1306 1306 size2=size2+round(abs(list2_[i]))-1
1307 1307 new_size= size+size2
1308 1308 ang_new = numpy.zeros(new_size)
1309 1309 ang_new2 = numpy.zeros(new_size)
1310 1310
1311 1311 tmp = 0
1312 1312 c = 0
1313 1313 for i in range(len(ang_)):
1314 1314 ang_new[tmp +c] = ang_[i]
1315 1315 ang_new2[tmp+c] = ang_[i]
1316 1316 condition , value = self.search_pos(i,list1_)
1317 1317 if condition:
1318 1318 pos = tmp + c + 1
1319 1319 for k in range(round(abs(list2_[value]))-1):
1320 1320 if tipo_case==0 or tipo_case==3:#subida
1321 1321 ang_new[pos+k] = ang_new[pos+k-1]+1
1322 1322 ang_new2[pos+k] = numpy.nan
1323 1323 elif tipo_case==1 or tipo_case==2:#bajada
1324 1324 ang_new[pos+k] = ang_new[pos+k-1]-1
1325 1325 ang_new2[pos+k] = numpy.nan
1326 1326
1327 1327 tmp = pos +k
1328 1328 c = 0
1329 1329 c=c+1
1330 1330 return ang_new,ang_new2
1331 1331
1332 1332 def globalCheckPED(self,angulos,tipo_case):
1333 1333 l1,l2 = self.get2List(angulos)
1334 1334 ##print("l1",l1)
1335 1335 ##print("l2",l2)
1336 1336 if len(l1)>0:
1337 1337 #angulos2 = self.fixData90(list_=l1,ang_=angulos)
1338 1338 #l1,l2 = self.get2List(angulos2)
1339 1339 ang1_,ang2_ = self.fixDataComp(ang_=angulos,list1_=l1,list2_=l2,tipo_case=tipo_case)
1340 1340 #ang1_ = self.fixData90HL(ang1_)
1341 1341 #ang2_ = self.fixData90HL(ang2_)
1342 1342 else:
1343 1343 ang1_= angulos
1344 1344 ang2_= angulos
1345 1345 return ang1_,ang2_
1346 1346
1347 1347
1348 1348 def replaceNAN(self,data_weather,data_ele,val):
1349 1349 data= data_ele
1350 1350 data_T= data_weather
1351 1351 if data.shape[0]> data_T.shape[0]:
1352 1352 data_N = numpy.ones( [data.shape[0],data_T.shape[1]])
1353 1353 c = 0
1354 1354 for i in range(len(data)):
1355 1355 if numpy.isnan(data[i]):
1356 1356 data_N[i,:]=numpy.ones(data_T.shape[1])*numpy.nan
1357 1357 else:
1358 1358 data_N[i,:]=data_T[c,:]
1359 1359 c=c+1
1360 1360 return data_N
1361 1361 else:
1362 1362 for i in range(len(data)):
1363 1363 if numpy.isnan(data[i]):
1364 1364 data_T[i,:]=numpy.ones(data_T.shape[1])*numpy.nan
1365 1365 return data_T
1366 1366
1367 1367 def check_case(self,data_ele,ang_max,ang_min):
1368 1368 start = data_ele[0]
1369 1369 end = data_ele[-1]
1370 1370 number = (end-start)
1371 1371 len_ang=len(data_ele)
1372 1372 print("start",start)
1373 1373 print("end",end)
1374 1374 print("number",number)
1375 1375
1376 1376 print("len_ang",len_ang)
1377 1377
1378 1378 #exit(1)
1379 1379
1380 1380 if start<end and (round(abs(number)+1)>=len_ang or (numpy.argmin(data_ele)==0)):#caso subida
1381 1381 return 0
1382 1382 #elif start>end and (round(abs(number)+1)>=len_ang or(numpy.argmax(data_ele)==0)):#caso bajada
1383 1383 # return 1
1384 1384 elif round(abs(number)+1)>=len_ang and (start>end or(numpy.argmax(data_ele)==0)):#caso bajada
1385 1385 return 1
1386 1386 elif round(abs(number)+1)<len_ang and data_ele[-2]>data_ele[-1]:# caso BAJADA CAMBIO ANG MAX
1387 1387 return 2
1388 1388 elif round(abs(number)+1)<len_ang and data_ele[-2]<data_ele[-1] :# caso SUBIDA CAMBIO ANG MIN
1389 1389 return 3
1390 1390
1391 1391
1392 1392 def const_ploteo(self,val_ch,data_weather,data_ele,step,res,ang_max,ang_min,case_flag):
1393 1393 ang_max= ang_max
1394 1394 ang_min= ang_min
1395 1395 data_weather=data_weather
1396 1396 val_ch=val_ch
1397 1397 ##print("*********************DATA WEATHER**************************************")
1398 1398 ##print(data_weather)
1399 1399 if self.ini==0:
1400 1400 '''
1401 1401 print("**********************************************")
1402 1402 print("**********************************************")
1403 1403 print("***************ini**************")
1404 1404 print("**********************************************")
1405 1405 print("**********************************************")
1406 1406 '''
1407 1407 #print("data_ele",data_ele)
1408 1408 #----------------------------------------------------------
1409 1409 tipo_case = case_flag[-1]
1410 1410 #tipo_case = self.check_case(data_ele,ang_max,ang_min)
1411 1411 print("check_case",tipo_case)
1412 1412 #exit(1)
1413 1413 #--------------------- new -------------------------
1414 1414 data_ele_new ,data_ele_old= self.globalCheckPED(data_ele,tipo_case)
1415 1415
1416 1416 #-------------------------CAMBIOS RHI---------------------------------
1417 1417 start= ang_min
1418 1418 end = ang_max
1419 1419 n= (ang_max-ang_min)/res
1420 1420 #------ new
1421 1421 self.start_data_ele = data_ele_new[0]
1422 1422 self.end_data_ele = data_ele_new[-1]
1423 1423 if tipo_case==0 or tipo_case==3: # SUBIDA
1424 1424 n1= round(self.start_data_ele)- start
1425 1425 n2= end - round(self.end_data_ele)
1426 1426 print(self.start_data_ele)
1427 1427 print(self.end_data_ele)
1428 1428 if n1>0:
1429 1429 ele1= numpy.linspace(ang_min+1,self.start_data_ele-1,n1)
1430 1430 ele1_nan= numpy.ones(n1)*numpy.nan
1431 1431 data_ele = numpy.hstack((ele1,data_ele_new))
1432 1432 print("ele1_nan",ele1_nan.shape)
1433 1433 print("data_ele_old",data_ele_old.shape)
1434 1434 data_ele_old = numpy.hstack((ele1_nan,data_ele_old))
1435 1435 if n2>0:
1436 1436 ele2= numpy.linspace(self.end_data_ele+1,end,n2)
1437 1437 ele2_nan= numpy.ones(n2)*numpy.nan
1438 1438 data_ele = numpy.hstack((data_ele,ele2))
1439 1439 print("ele2_nan",ele2_nan.shape)
1440 1440 print("data_ele_old",data_ele_old.shape)
1441 1441 data_ele_old = numpy.hstack((data_ele_old,ele2_nan))
1442 1442
1443 1443 if tipo_case==1 or tipo_case==2: # BAJADA
1444 1444 data_ele_new = data_ele_new[::-1] # reversa
1445 1445 data_ele_old = data_ele_old[::-1]# reversa
1446 1446 data_weather = data_weather[::-1,:]# reversa
1447 1447 vec= numpy.where(data_ele_new<ang_max)
1448 1448 data_ele_new = data_ele_new[vec]
1449 1449 data_ele_old = data_ele_old[vec]
1450 1450 data_weather = data_weather[vec[0]]
1451 1451 vec2= numpy.where(0<data_ele_new)
1452 1452 data_ele_new = data_ele_new[vec2]
1453 1453 data_ele_old = data_ele_old[vec2]
1454 1454 data_weather = data_weather[vec2[0]]
1455 1455 self.start_data_ele = data_ele_new[0]
1456 1456 self.end_data_ele = data_ele_new[-1]
1457 1457
1458 1458 n1= round(self.start_data_ele)- start
1459 1459 n2= end - round(self.end_data_ele)-1
1460 1460 print(self.start_data_ele)
1461 1461 print(self.end_data_ele)
1462 1462 if n1>0:
1463 1463 ele1= numpy.linspace(ang_min+1,self.start_data_ele-1,n1)
1464 1464 ele1_nan= numpy.ones(n1)*numpy.nan
1465 1465 data_ele = numpy.hstack((ele1,data_ele_new))
1466 1466 data_ele_old = numpy.hstack((ele1_nan,data_ele_old))
1467 1467 if n2>0:
1468 1468 ele2= numpy.linspace(self.end_data_ele+1,end,n2)
1469 1469 ele2_nan= numpy.ones(n2)*numpy.nan
1470 1470 data_ele = numpy.hstack((data_ele,ele2))
1471 1471 data_ele_old = numpy.hstack((data_ele_old,ele2_nan))
1472 1472 # RADAR
1473 1473 # NOTA data_ele y data_weather es la variable que retorna
1474 1474 val_mean = numpy.mean(data_weather[:,-1])
1475 1475 self.val_mean = val_mean
1476 1476 data_weather = self.replaceNAN(data_weather=data_weather,data_ele=data_ele_old,val=self.val_mean)
1477 1477 print("eleold",data_ele_old)
1478 1478 print(self.data_ele_tmp[val_ch])
1479 1479 print(data_ele_old.shape[0])
1480 1480 print(self.data_ele_tmp[val_ch].shape[0])
1481 1481 if (data_ele_old.shape[0]==91 or self.data_ele_tmp[val_ch].shape[0]==91):
1482 1482 import sys
1483 1483 print("EXIT",self.ini)
1484 1484
1485 1485 sys.exit(1)
1486 1486 self.data_ele_tmp[val_ch]= data_ele_old
1487 1487 else:
1488 1488 #print("**********************************************")
1489 1489 #print("****************VARIABLE**********************")
1490 1490 #-------------------------CAMBIOS RHI---------------------------------
1491 1491 #---------------------------------------------------------------------
1492 1492 ##print("INPUT data_ele",data_ele)
1493 1493 flag=0
1494 1494 start_ele = self.res_ele[0]
1495 1495 #tipo_case = self.check_case(data_ele,ang_max,ang_min)
1496 1496 tipo_case = case_flag[-1]
1497 1497 #print("TIPO DE DATA",tipo_case)
1498 1498 #-----------new------------
1499 1499 data_ele ,data_ele_old = self.globalCheckPED(data_ele,tipo_case)
1500 1500 data_weather = self.replaceNAN(data_weather=data_weather,data_ele=data_ele_old,val=self.val_mean)
1501 1501
1502 1502 #-------------------------------NEW RHI ITERATIVO-------------------------
1503 1503
1504 1504 if tipo_case==0 : # SUBIDA
1505 1505 vec = numpy.where(data_ele<ang_max)
1506 1506 data_ele = data_ele[vec]
1507 1507 data_ele_old = data_ele_old[vec]
1508 1508 data_weather = data_weather[vec[0]]
1509 1509
1510 1510 vec2 = numpy.where(0<data_ele)
1511 1511 data_ele= data_ele[vec2]
1512 1512 data_ele_old= data_ele_old[vec2]
1513 1513 ##print(data_ele_new)
1514 1514 data_weather= data_weather[vec2[0]]
1515 1515
1516 1516 new_i_ele = int(round(data_ele[0]))
1517 1517 new_f_ele = int(round(data_ele[-1]))
1518 1518 #print(new_i_ele)
1519 1519 #print(new_f_ele)
1520 1520 #print(data_ele,len(data_ele))
1521 1521 #print(data_ele_old,len(data_ele_old))
1522 1522 if new_i_ele< 2:
1523 1523 self.data_ele_tmp[val_ch] = numpy.ones(ang_max-ang_min)*numpy.nan
1524 1524 self.res_weather[val_ch] = self.replaceNAN(data_weather=self.res_weather[val_ch],data_ele=self.data_ele_tmp[val_ch],val=self.val_mean)
1525 1525 self.data_ele_tmp[val_ch][new_i_ele:new_i_ele+len(data_ele)]=data_ele_old
1526 1526 self.res_ele[new_i_ele:new_i_ele+len(data_ele)]= data_ele
1527 1527 self.res_weather[val_ch][new_i_ele:new_i_ele+len(data_ele),:]= data_weather
1528 1528 data_ele = self.res_ele
1529 1529 data_weather = self.res_weather[val_ch]
1530 1530
1531 1531 elif tipo_case==1 : #BAJADA
1532 1532 data_ele = data_ele[::-1] # reversa
1533 1533 data_ele_old = data_ele_old[::-1]# reversa
1534 1534 data_weather = data_weather[::-1,:]# reversa
1535 1535 vec= numpy.where(data_ele<ang_max)
1536 1536 data_ele = data_ele[vec]
1537 1537 data_ele_old = data_ele_old[vec]
1538 1538 data_weather = data_weather[vec[0]]
1539 1539 vec2= numpy.where(0<data_ele)
1540 1540 data_ele = data_ele[vec2]
1541 1541 data_ele_old = data_ele_old[vec2]
1542 1542 data_weather = data_weather[vec2[0]]
1543 1543
1544 1544
1545 1545 new_i_ele = int(round(data_ele[0]))
1546 1546 new_f_ele = int(round(data_ele[-1]))
1547 1547 #print(data_ele)
1548 1548 #print(ang_max)
1549 1549 #print(data_ele_old)
1550 1550 if new_i_ele <= 1:
1551 1551 new_i_ele = 1
1552 1552 if round(data_ele[-1])>=ang_max-1:
1553 1553 self.data_ele_tmp[val_ch] = numpy.ones(ang_max-ang_min)*numpy.nan
1554 1554 self.res_weather[val_ch] = self.replaceNAN(data_weather=self.res_weather[val_ch],data_ele=self.data_ele_tmp[val_ch],val=self.val_mean)
1555 1555 self.data_ele_tmp[val_ch][new_i_ele-1:new_i_ele+len(data_ele)-1]=data_ele_old
1556 1556 self.res_ele[new_i_ele-1:new_i_ele+len(data_ele)-1]= data_ele
1557 1557 self.res_weather[val_ch][new_i_ele-1:new_i_ele+len(data_ele)-1,:]= data_weather
1558 1558 data_ele = self.res_ele
1559 1559 data_weather = self.res_weather[val_ch]
1560 1560
1561 1561 elif tipo_case==2: #bajada
1562 1562 vec = numpy.where(data_ele<ang_max)
1563 1563 data_ele = data_ele[vec]
1564 1564 data_weather= data_weather[vec[0]]
1565 1565
1566 1566 len_vec = len(vec)
1567 1567 data_ele_new = data_ele[::-1] # reversa
1568 1568 data_weather = data_weather[::-1,:]
1569 1569 new_i_ele = int(data_ele_new[0])
1570 1570 new_f_ele = int(data_ele_new[-1])
1571 1571
1572 1572 n1= new_i_ele- ang_min
1573 1573 n2= ang_max - new_f_ele-1
1574 1574 if n1>0:
1575 1575 ele1= numpy.linspace(ang_min+1,new_i_ele-1,n1)
1576 1576 ele1_nan= numpy.ones(n1)*numpy.nan
1577 1577 data_ele = numpy.hstack((ele1,data_ele_new))
1578 1578 data_ele_old = numpy.hstack((ele1_nan,data_ele_new))
1579 1579 if n2>0:
1580 1580 ele2= numpy.linspace(new_f_ele+1,ang_max,n2)
1581 1581 ele2_nan= numpy.ones(n2)*numpy.nan
1582 1582 data_ele = numpy.hstack((data_ele,ele2))
1583 1583 data_ele_old = numpy.hstack((data_ele_old,ele2_nan))
1584 1584
1585 1585 self.data_ele_tmp[val_ch] = data_ele_old
1586 1586 self.res_ele = data_ele
1587 1587 self.res_weather[val_ch] = self.replaceNAN(data_weather=data_weather,data_ele=data_ele_old,val=self.val_mean)
1588 1588 data_ele = self.res_ele
1589 1589 data_weather = self.res_weather[val_ch]
1590 1590
1591 1591 elif tipo_case==3:#subida
1592 1592 vec = numpy.where(0<data_ele)
1593 1593 data_ele= data_ele[vec]
1594 1594 data_ele_new = data_ele
1595 1595 data_ele_old= data_ele_old[vec]
1596 1596 data_weather= data_weather[vec[0]]
1597 1597 pos_ini = numpy.argmin(data_ele)
1598 1598 if pos_ini>0:
1599 1599 len_vec= len(data_ele)
1600 1600 vec3 = numpy.linspace(pos_ini,len_vec-1,len_vec-pos_ini).astype(int)
1601 1601 #print(vec3)
1602 1602 data_ele= data_ele[vec3]
1603 1603 data_ele_new = data_ele
1604 1604 data_ele_old= data_ele_old[vec3]
1605 1605 data_weather= data_weather[vec3]
1606 1606
1607 1607 new_i_ele = int(data_ele_new[0])
1608 1608 new_f_ele = int(data_ele_new[-1])
1609 1609 n1= new_i_ele- ang_min
1610 1610 n2= ang_max - new_f_ele-1
1611 1611 if n1>0:
1612 1612 ele1= numpy.linspace(ang_min+1,new_i_ele-1,n1)
1613 1613 ele1_nan= numpy.ones(n1)*numpy.nan
1614 1614 data_ele = numpy.hstack((ele1,data_ele_new))
1615 1615 data_ele_old = numpy.hstack((ele1_nan,data_ele_new))
1616 1616 if n2>0:
1617 1617 ele2= numpy.linspace(new_f_ele+1,ang_max,n2)
1618 1618 ele2_nan= numpy.ones(n2)*numpy.nan
1619 1619 data_ele = numpy.hstack((data_ele,ele2))
1620 1620 data_ele_old = numpy.hstack((data_ele_old,ele2_nan))
1621 1621
1622 1622 self.data_ele_tmp[val_ch] = data_ele_old
1623 1623 self.res_ele = data_ele
1624 1624 self.res_weather[val_ch] = self.replaceNAN(data_weather=data_weather,data_ele=data_ele_old,val=self.val_mean)
1625 1625 data_ele = self.res_ele
1626 1626 data_weather = self.res_weather[val_ch]
1627 1627 #print("self.data_ele_tmp",self.data_ele_tmp)
1628 1628 return data_weather,data_ele
1629 1629
1630 1630
1631 1631 def plot(self):
1632 1632 thisDatetime = datetime.datetime.utcfromtimestamp(self.data.times[-1]).strftime('%Y-%m-%d %H:%M:%S')
1633 1633 data = self.data[-1]
1634 1634 r = self.data.yrange
1635 1635 delta_height = r[1]-r[0]
1636 1636 r_mask = numpy.where(r>=0)[0]
1637 1637 ##print("delta_height",delta_height)
1638 1638 #print("r_mask",r_mask,len(r_mask))
1639 1639 r = numpy.arange(len(r_mask))*delta_height
1640 1640 self.y = 2*r
1641 1641 res = 1
1642 1642 ###print("data['weather'].shape[0]",data['weather'].shape[0])
1643 1643 ang_max = self.ang_max
1644 1644 ang_min = self.ang_min
1645 1645 var_ang =ang_max - ang_min
1646 1646 step = (int(var_ang)/(res*data['weather'].shape[0]))
1647 1647 ###print("step",step)
1648 1648 #--------------------------------------------------------
1649 1649 ##print('weather',data['weather'].shape)
1650 1650 ##print('ele',data['ele'].shape)
1651 1651
1652 1652 ###self.res_weather, self.res_ele = self.const_ploteo(data_weather=data['weather'][:,r_mask],data_ele=data['ele'],step=step,res=res,ang_max=ang_max,ang_min=ang_min)
1653 1653 ###self.res_azi = numpy.mean(data['azi'])
1654 1654 ###print("self.res_ele",self.res_ele)
1655 1655 plt.clf()
1656 1656 subplots = [121, 122]
1657 1657 try:
1658 1658 if self.data[-2]['ele'].max()<data['ele'].max():
1659 1659 self.ini=0
1660 1660 except:
1661 1661 pass
1662 1662 if self.ini==0:
1663 1663 self.data_ele_tmp = numpy.ones([self.nplots,int(var_ang)])*numpy.nan
1664 1664 self.res_weather= numpy.ones([self.nplots,int(var_ang),len(r_mask)])*numpy.nan
1665 1665 print("SHAPE",self.data_ele_tmp.shape)
1666 1666
1667 1667 for i,ax in enumerate(self.axes):
1668 1668 self.res_weather[i], self.res_ele = self.const_ploteo(val_ch=i, data_weather=data['weather'][i][:,r_mask],data_ele=data['ele'],step=step,res=res,ang_max=ang_max,ang_min=ang_min,case_flag=self.data['case_flag'])
1669 1669 self.res_azi = numpy.mean(data['azi'])
1670 1670
1671 1671 if ax.firsttime:
1672 1672 #plt.clf()
1673 1673 print("Frist Plot")
1674 1674 cgax, pm = wrl.vis.plot_rhi(self.res_weather[i],r=r,th=self.res_ele,ax=subplots[i], proj='cg',vmin=20, vmax=80)
1675 1675 #fig=self.figures[0]
1676 1676 else:
1677 1677 #plt.clf()
1678 1678 print("ELSE PLOT")
1679 1679 cgax, pm = wrl.vis.plot_rhi(self.res_weather[i],r=r,th=self.res_ele,ax=subplots[i], proj='cg',vmin=20, vmax=80)
1680 1680 caax = cgax.parasites[0]
1681 1681 paax = cgax.parasites[1]
1682 1682 cbar = plt.gcf().colorbar(pm, pad=0.075)
1683 1683 caax.set_xlabel('x_range [km]')
1684 1684 caax.set_ylabel('y_range [km]')
1685 1685 plt.text(1.0, 1.05, 'Elevacion '+str(thisDatetime)+" Step "+str(self.ini)+ " Azi: "+str(round(self.res_azi,2)), transform=caax.transAxes, va='bottom',ha='right')
1686 1686 print("***************************self.ini****************************",self.ini)
1687 1687 self.ini= self.ini+1
1688 1688
1689 1689
1690 1690
1691 1691
1692 1692
1693 1693 class WeatherRHI_vRF4_Plot(Plot):
1694 1694 CODE = 'RHI'
1695 1695 plot_name = 'RHI'
1696 1696 #plot_type = 'rhistyle'
1697 1697 buffering = False
1698 1698
1699 1699 def setup(self):
1700 1700
1701 1701 self.ncols = 1
1702 1702 self.nrows = 1
1703 1703 self.nplots= 1
1704 1704 self.ylabel= 'Range [Km]'
1705 1705 self.xlabel= 'Range [Km]'
1706 1706 self.titles= ['RHI']
1707 1707 self.polar = True
1708 1708 self.grid = True
1709 1709 if self.channels is not None:
1710 1710 self.nplots = len(self.channels)
1711 1711 self.nrows = len(self.channels)
1712 1712 else:
1713 1713 self.nplots = self.data.shape(self.CODE)[0]
1714 1714 self.nrows = self.nplots
1715 1715 self.channels = list(range(self.nplots))
1716 1716
1717 1717 if self.CODE == 'Power':
1718 1718 self.cb_label = r'Power (dB)'
1719 1719 elif self.CODE == 'Doppler':
1720 1720 self.cb_label = r'Velocity (m/s)'
1721 1721 self.colorbar=True
1722 1722 self.width =8
1723 1723 self.height =8
1724 1724 self.ini =0
1725 1725 self.len_azi =0
1726 1726 self.buffer_ini = None
1727 1727 self.buffer_ele = None
1728 1728 self.plots_adjust.update({'wspace': 0.4, 'hspace':0.4, 'left': 0.1, 'right': 0.9, 'bottom': 0.08})
1729 1729 self.flag =0
1730 1730 self.indicador= 0
1731 1731 self.last_data_ele = None
1732 1732 self.val_mean = None
1733 1733
1734 1734 def update(self, dataOut):
1735 1735
1736 1736 data = {}
1737 1737 meta = {}
1738 1738 if hasattr(dataOut, 'dataPP_POWER'):
1739 1739 factor = 1
1740 1740 if hasattr(dataOut, 'nFFTPoints'):
1741 1741 factor = dataOut.normFactor
1742 1742
1743 1743 if 'pow' in self.attr_data[0].lower():
1744 1744 data['data'] = 10*numpy.log10(getattr(dataOut, self.attr_data[0])/(factor))
1745 1745 else:
1746 1746 data['data'] = getattr(dataOut, self.attr_data[0])/(factor)
1747 1747
1748 1748 data['azi'] = dataOut.data_azi
1749 1749 data['ele'] = dataOut.data_ele
1750 1750
1751 1751 return data, meta
1752 1752
1753 1753 def plot(self):
1754 1754 data = self.data[-1]
1755 1755 r = self.data.yrange
1756 1756 delta_height = r[1]-r[0]
1757 1757 r_mask = numpy.where(r>=0)[0]
1758 1758 self.r_mask =r_mask
1759 1759 r = numpy.arange(len(r_mask))*delta_height
1760 1760 self.y = 2*r
1761 1761
1762 1762 z = data['data'][self.channels[0]][:,r_mask]
1763 1763
1764 1764 self.titles = []
1765 1765
1766 1766 self.ymax = self.ymax if self.ymax else numpy.nanmax(r)
1767 1767 self.ymin = self.ymin if self.ymin else numpy.nanmin(r)
1768 1768 self.zmax = self.zmax if self.zmax else numpy.nanmax(z)
1769 1769 self.zmin = self.zmin if self.zmin else numpy.nanmin(z)
1770 1770 self.ang_min = self.ang_min if self.ang_min else 0
1771 1771 self.ang_max = self.ang_max if self.ang_max else 90
1772 1772
1773 1773 r, theta = numpy.meshgrid(r, numpy.radians(data['ele']) )
1774 1774
1775 1775 for i,ax in enumerate(self.axes):
1776 1776
1777 1777 if ax.firsttime:
1778 1778 ax.set_xlim(numpy.radians(self.ang_min),numpy.radians(self.ang_max))
1779 1779 ax.plt = ax.pcolormesh(theta, r, z, cmap=self.colormap, vmin=self.zmin, vmax=self.zmax)
1780 1780
1781 1781 else:
1782 1782 ax.set_xlim(numpy.radians(self.ang_min),numpy.radians(self.ang_max))
1783 1783 ax.plt = ax.pcolormesh(theta, r, z, cmap=self.colormap, vmin=self.zmin, vmax=self.zmax)
1784 1784 ax.grid(True)
1785 1785 if len(self.channels) !=1:
1786 1786 self.titles = ['RHI {} at AZ: {} Channel {}'.format(self.labels[x], str(round(numpy.mean(data['azi']),1)), x) for x in range(self.nrows)]
1787 1787 else:
1788 1788 self.titles = ['RHI {} at AZ: {} Channel {}'.format(self.labels[0], str(round(numpy.mean(data['azi']),1)), self.channels[0])]
@@ -1,208 +1,226
1 1 '''
2 2 Base clases to create Processing units and operations, the MPDecorator
3 3 must be used in plotting and writing operations to allow to run as an
4 4 external process.
5 5 '''
6 6
7 7 import os
8 8 import inspect
9 9 import zmq
10 10 import time
11 11 import pickle
12 12 import traceback
13 13 from threading import Thread
14 14 from multiprocessing import Process, Queue
15 15 from schainpy.utils import log
16 16
17 17 QUEUE_SIZE = int(os.environ.get('QUEUE_MAX_SIZE', '10'))
18 18
19 19 class ProcessingUnit(object):
20 20 '''
21 21 Base class to create Signal Chain Units
22 22 '''
23 23
24 24 proc_type = 'processing'
25 25
26 26 def __init__(self):
27 27
28 28 self.dataIn = None
29 29 self.dataOut = None
30 30 self.isConfig = False
31 31 self.operations = []
32 self.name = 'Test'
33 self.inputs = []
32 34
33 35 def setInput(self, unit):
34 36
35 self.dataIn = unit.dataOut
37 attr = 'dataIn'
38 for i, u in enumerate(unit):
39 if i==0:
40 self.dataIn = u.dataOut
41 self.inputs.append('dataIn')
42 else:
43 setattr(self, 'dataIn{}'.format(i), u.dataOut)
44 self.inputs.append('dataIn{}'.format(i))
36 45
37 46 def getAllowedArgs(self):
38 47 if hasattr(self, '__attrs__'):
39 48 return self.__attrs__
40 49 else:
41 50 return inspect.getargspec(self.run).args
42 51
43 52 def addOperation(self, conf, operation):
44 53 '''
45 54 '''
46 55
47 56 self.operations.append((operation, conf.type, conf.getKwargs()))
48 57
49 58 def getOperationObj(self, objId):
50 59
51 60 if objId not in list(self.operations.keys()):
52 61 return None
53 62
54 63 return self.operations[objId]
55 64
56 65 def call(self, **kwargs):
57 66 '''
58 67 '''
59 68
60 69 try:
61 70 if self.dataIn is not None and self.dataIn.flagNoData and not self.dataIn.error:
62 71 return self.dataIn.isReady()
63 72 elif self.dataIn is None or not self.dataIn.error:
64 73 self.run(**kwargs)
65 74 elif self.dataIn.error:
66 75 self.dataOut.error = self.dataIn.error
67 76 self.dataOut.flagNoData = True
68 77 except:
69 78 err = traceback.format_exc()
70 79 if 'SchainWarning' in err:
71 80 log.warning(err.split('SchainWarning:')[-1].split('\n')[0].strip(), self.name)
72 81 elif 'SchainError' in err:
73 82 log.error(err.split('SchainError:')[-1].split('\n')[0].strip(), self.name)
74 83 else:
75 84 log.error(err, self.name)
76 85 self.dataOut.error = True
77 86 ##### correcion de la declaracion Out
78 87 for op, optype, opkwargs in self.operations:
79 88 aux = self.dataOut.copy()
80 89 if optype == 'other' and not self.dataOut.flagNoData:
81 90 self.dataOut = op.run(self.dataOut, **opkwargs)
82 91 elif optype == 'external' and not self.dataOut.flagNoData:
83 92 #op.queue.put(self.dataOut)
84 93 op.queue.put(aux)
85 94 elif optype == 'external' and self.dataOut.error:
86 95 #op.queue.put(self.dataOut)
87 96 op.queue.put(aux)
88 97
89 return 'Error' if self.dataOut.error else self.dataOut.isReady()
98 try:
99 if self.dataOut.runNextUnit:
100 runNextUnit = self.dataOut.runNextUnit
101
102 else:
103 runNextUnit = self.dataOut.isReady()
104 except:
105 runNextUnit = self.dataOut.isReady()
106
107 return 'Error' if self.dataOut.error else runNextUnit
90 108
91 109 def setup(self):
92 110
93 111 raise NotImplementedError
94 112
95 113 def run(self):
96 114
97 115 raise NotImplementedError
98 116
99 117 def close(self):
100 118
101 119 return
102 120
103 121
104 122 class Operation(object):
105 123
106 124 '''
107 125 '''
108 126
109 127 proc_type = 'operation'
110 128
111 129 def __init__(self):
112 130
113 131 self.id = None
114 132 self.isConfig = False
115 133
116 134 if not hasattr(self, 'name'):
117 135 self.name = self.__class__.__name__
118 136
119 137 def getAllowedArgs(self):
120 138 if hasattr(self, '__attrs__'):
121 139 return self.__attrs__
122 140 else:
123 141 return inspect.getargspec(self.run).args
124 142
125 143 def setup(self):
126 144
127 145 self.isConfig = True
128 146
129 147 raise NotImplementedError
130 148
131 149 def run(self, dataIn, **kwargs):
132 150 """
133 151 Realiza las operaciones necesarias sobre la dataIn.data y actualiza los
134 152 atributos del objeto dataIn.
135 153
136 154 Input:
137 155
138 156 dataIn : objeto del tipo JROData
139 157
140 158 Return:
141 159
142 160 None
143 161
144 162 Affected:
145 163 __buffer : buffer de recepcion de datos.
146 164
147 165 """
148 166 if not self.isConfig:
149 167 self.setup(**kwargs)
150 168
151 169 raise NotImplementedError
152 170
153 171 def close(self):
154 172
155 173 return
156 174
157 175
158 176 def MPDecorator(BaseClass):
159 177 """
160 178 Multiprocessing class decorator
161 179
162 180 This function add multiprocessing features to a BaseClass.
163 181 """
164 182
165 183 class MPClass(BaseClass, Process):
166 184
167 185 def __init__(self, *args, **kwargs):
168 186 super(MPClass, self).__init__()
169 187 Process.__init__(self)
170 188
171 189 self.args = args
172 190 self.kwargs = kwargs
173 191 self.t = time.time()
174 192 self.op_type = 'external'
175 193 self.name = BaseClass.__name__
176 194 self.__doc__ = BaseClass.__doc__
177 195
178 196 if 'plot' in self.name.lower() and not self.name.endswith('_'):
179 197 self.name = '{}{}'.format(self.CODE.upper(), 'Plot')
180 198
181 199 self.start_time = time.time()
182 200 self.err_queue = args[3]
183 201 self.queue = Queue(maxsize=QUEUE_SIZE)
184 202 self.myrun = BaseClass.run
185 203
186 204 def run(self):
187 205
188 206 while True:
189 207
190 208 dataOut = self.queue.get()
191 209
192 210 if not dataOut.error:
193 211 try:
194 212 BaseClass.run(self, dataOut, **self.kwargs)
195 213 except:
196 214 err = traceback.format_exc()
197 215 log.error(err, self.name)
198 216 else:
199 217 break
200 218
201 219 self.close()
202 220
203 221 def close(self):
204 222
205 223 BaseClass.close(self)
206 224 log.success('Done...(Time:{:4.2f} secs)'.format(time.time()-self.start_time), self.name)
207 225
208 226 return MPClass
@@ -1,4999 +1,5088
1 1
2 2 import os
3 3 import time
4 4 import math
5 5
6 6 import re
7 7 import datetime
8 8 import copy
9 9 import sys
10 10 import importlib
11 11 import itertools
12 12
13 13 from multiprocessing import Pool, TimeoutError
14 14 from multiprocessing.pool import ThreadPool
15 15 import numpy
16 16 import glob
17 17 import scipy
18 18 import h5py
19 19 from scipy.optimize import fmin_l_bfgs_b #optimize with bounds on state papameters
20 20 from .jroproc_base import ProcessingUnit, Operation, MPDecorator
21 21 from schainpy.model.data.jrodata import Parameters, hildebrand_sekhon
22 22 from scipy import asarray as ar,exp
23 23 from scipy.optimize import curve_fit
24 24 from schainpy.utils import log
25 25 import schainpy.admin
26 26 import warnings
27 27 from scipy import optimize, interpolate, signal, stats, ndimage
28 28 from scipy.optimize.optimize import OptimizeWarning
29 29 warnings.filterwarnings('ignore')
30 30
31 31
32 32 SPEED_OF_LIGHT = 299792458
33 33
34 34 '''solving pickling issue'''
35 35
36 36 def _pickle_method(method):
37 37 func_name = method.__func__.__name__
38 38 obj = method.__self__
39 39 cls = method.__self__.__class__
40 40 return _unpickle_method, (func_name, obj, cls)
41 41
42 42 def _unpickle_method(func_name, obj, cls):
43 43 for cls in cls.mro():
44 44 try:
45 45 func = cls.__dict__[func_name]
46 46 except KeyError:
47 47 pass
48 48 else:
49 49 break
50 50 return func.__get__(obj, cls)
51 51
52 52 def isNumber(str):
53 53 try:
54 54 float(str)
55 55 return True
56 56 except:
57 57 return False
58 58
59 59 class ParametersProc(ProcessingUnit):
60 60
61 61 METHODS = {}
62 62 nSeconds = None
63 63
64 64 def __init__(self):
65 65 ProcessingUnit.__init__(self)
66 66
67 67 # self.objectDict = {}
68 68 self.buffer = None
69 69 self.firstdatatime = None
70 70 self.profIndex = 0
71 71 self.dataOut = Parameters()
72 72 self.setupReq = False #Agregar a todas las unidades de proc
73 73
74 74 def __updateObjFromInput(self):
75 75
76 76 self.dataOut.inputUnit = self.dataIn.type
77 77
78 78 self.dataOut.timeZone = self.dataIn.timeZone
79 79 self.dataOut.dstFlag = self.dataIn.dstFlag
80 80 self.dataOut.errorCount = self.dataIn.errorCount
81 81 self.dataOut.useLocalTime = self.dataIn.useLocalTime
82 82
83 83 self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy()
84 84 self.dataOut.systemHeaderObj = self.dataIn.systemHeaderObj.copy()
85 85 self.dataOut.channelList = self.dataIn.channelList
86 86 self.dataOut.heightList = self.dataIn.heightList
87 87 self.dataOut.dtype = numpy.dtype([('real','<f4'),('imag','<f4')])
88 88 # self.dataOut.nHeights = self.dataIn.nHeights
89 89 # self.dataOut.nChannels = self.dataIn.nChannels
90 90 # self.dataOut.nBaud = self.dataIn.nBaud
91 91 # self.dataOut.nCode = self.dataIn.nCode
92 92 # self.dataOut.code = self.dataIn.code
93 93 # self.dataOut.nProfiles = self.dataOut.nFFTPoints
94 94 self.dataOut.flagDiscontinuousBlock = self.dataIn.flagDiscontinuousBlock
95 95 # self.dataOut.utctime = self.firstdatatime
96 96 self.dataOut.utctime = self.dataIn.utctime
97 97 self.dataOut.flagDecodeData = self.dataIn.flagDecodeData #asumo q la data esta decodificada
98 98 self.dataOut.flagDeflipData = self.dataIn.flagDeflipData #asumo q la data esta sin flip
99 99 self.dataOut.nCohInt = self.dataIn.nCohInt
100 100 # self.dataOut.nIncohInt = 1
101 101 # self.dataOut.ippSeconds = self.dataIn.ippSeconds
102 102 # self.dataOut.windowOfFilter = self.dataIn.windowOfFilter
103 103 self.dataOut.timeInterval1 = self.dataIn.timeInterval
104 104 self.dataOut.heightList = self.dataIn.heightList
105 105 self.dataOut.frequency = self.dataIn.frequency
106 106 # self.dataOut.noise = self.dataIn.noise
107 self.dataOut.runNextUnit = self.dataIn.runNextUnit
107 108
108 def run(self):
109
109 def run(self, runNextUnit = 0):
110 110
111 self.dataIn.runNextUnit = runNextUnit
111 112 #print("HOLA MUNDO SOY YO")
112 113 #---------------------- Voltage Data ---------------------------
113 114
114 115 if self.dataIn.type == "Voltage":
115 116
116 117 self.__updateObjFromInput()
117 118 self.dataOut.data_pre = self.dataIn.data.copy()
118 119 self.dataOut.flagNoData = False
119 120 self.dataOut.utctimeInit = self.dataIn.utctime
120 121 self.dataOut.paramInterval = self.dataIn.nProfiles*self.dataIn.nCohInt*self.dataIn.ippSeconds
121 122
122 123 if hasattr(self.dataIn, 'flagDataAsBlock'):
123 124 self.dataOut.flagDataAsBlock = self.dataIn.flagDataAsBlock
124 125
125 126 if hasattr(self.dataIn, 'profileIndex'):
126 127 self.dataOut.profileIndex = self.dataIn.profileIndex
127 128
128 129 if hasattr(self.dataIn, 'dataPP_POW'):
129 130 self.dataOut.dataPP_POW = self.dataIn.dataPP_POW
130 131
131 132 if hasattr(self.dataIn, 'dataPP_POWER'):
132 133 self.dataOut.dataPP_POWER = self.dataIn.dataPP_POWER
133 134
134 135 if hasattr(self.dataIn, 'dataPP_DOP'):
135 136 self.dataOut.dataPP_DOP = self.dataIn.dataPP_DOP
136 137
137 138 if hasattr(self.dataIn, 'dataPP_SNR'):
138 139 self.dataOut.dataPP_SNR = self.dataIn.dataPP_SNR
139 140
140 141 if hasattr(self.dataIn, 'dataPP_WIDTH'):
141 142 self.dataOut.dataPP_WIDTH = self.dataIn.dataPP_WIDTH
142 143 return
143 144
144 145 #---------------------- Spectra Data ---------------------------
145 146
146 147 if self.dataIn.type == "Spectra":
147 148 #print("que paso en spectra")
148 149 self.dataOut.data_pre = [self.dataIn.data_spc, self.dataIn.data_cspc]
149 150 self.dataOut.data_spc = self.dataIn.data_spc
150 151 self.dataOut.data_cspc = self.dataIn.data_cspc
151 152 self.dataOut.nProfiles = self.dataIn.nProfiles
152 153 self.dataOut.nIncohInt = self.dataIn.nIncohInt
153 154 self.dataOut.nFFTPoints = self.dataIn.nFFTPoints
154 155 self.dataOut.ippFactor = self.dataIn.ippFactor
155 156 self.dataOut.abscissaList = self.dataIn.getVelRange(1)
156 157 self.dataOut.spc_noise = self.dataIn.getNoise()
157 158 self.dataOut.spc_range = (self.dataIn.getFreqRange(1) , self.dataIn.getAcfRange(1) , self.dataIn.getVelRange(1))
158 159 # self.dataOut.normFactor = self.dataIn.normFactor
159 160 self.dataOut.pairsList = self.dataIn.pairsList
160 161 self.dataOut.groupList = self.dataIn.pairsList
161 162 self.dataOut.flagNoData = False
162 163
163 164 if hasattr(self.dataIn, 'flagDataAsBlock'):
164 165 self.dataOut.flagDataAsBlock = self.dataIn.flagDataAsBlock
165 166
166 167 if hasattr(self.dataIn, 'ChanDist'): #Distances of receiver channels
167 168 self.dataOut.ChanDist = self.dataIn.ChanDist
168 169 else: self.dataOut.ChanDist = None
169 170
170 171 #if hasattr(self.dataIn, 'VelRange'): #Velocities range
171 172 # self.dataOut.VelRange = self.dataIn.VelRange
172 173 #else: self.dataOut.VelRange = None
173 174
174 175 if hasattr(self.dataIn, 'RadarConst'): #Radar Constant
175 176 self.dataOut.RadarConst = self.dataIn.RadarConst
176 177
177 178 if hasattr(self.dataIn, 'NPW'): #NPW
178 179 self.dataOut.NPW = self.dataIn.NPW
179 180
180 181 if hasattr(self.dataIn, 'COFA'): #COFA
181 182 self.dataOut.COFA = self.dataIn.COFA
182 183
183 184
184 185
185 186 #---------------------- Correlation Data ---------------------------
186 187
187 188 if self.dataIn.type == "Correlation":
188 189 acf_ind, ccf_ind, acf_pairs, ccf_pairs, data_acf, data_ccf = self.dataIn.splitFunctions()
189 190
190 191 self.dataOut.data_pre = (self.dataIn.data_cf[acf_ind,:], self.dataIn.data_cf[ccf_ind,:,:])
191 192 self.dataOut.normFactor = (self.dataIn.normFactor[acf_ind,:], self.dataIn.normFactor[ccf_ind,:])
192 193 self.dataOut.groupList = (acf_pairs, ccf_pairs)
193 194
194 195 self.dataOut.abscissaList = self.dataIn.lagRange
195 196 self.dataOut.noise = self.dataIn.noise
196 197 self.dataOut.data_snr = self.dataIn.SNR
197 198 self.dataOut.flagNoData = False
198 199 self.dataOut.nAvg = self.dataIn.nAvg
199 200
200 201 #---------------------- Parameters Data ---------------------------
201 202
202 203 if self.dataIn.type == "Parameters":
203 204 self.dataOut.copy(self.dataIn)
204 205 self.dataOut.flagNoData = False
205 206 #print("yo si entre")
206 207
207 208 return True
208 209
209 210 self.__updateObjFromInput()
210 211 #print("yo si entre2")
211 212
212 213 self.dataOut.utctimeInit = self.dataIn.utctime
213 214 self.dataOut.paramInterval = self.dataIn.timeInterval
214 215 #print("soy spectra ",self.dataOut.utctimeInit)
215 216 return
216 217
217 218
218 219 def target(tups):
219 220
220 221 obj, args = tups
221 222
222 223 return obj.FitGau(args)
223 224
224 225 class RemoveWideGC(Operation):
225 226 ''' This class remove the wide clutter and replace it with a simple interpolation points
226 227 This mainly applies to CLAIRE radar
227 228
228 229 ClutterWidth : Width to look for the clutter peak
229 230
230 231 Input:
231 232
232 233 self.dataOut.data_pre : SPC and CSPC
233 234 self.dataOut.spc_range : To select wind and rainfall velocities
234 235
235 236 Affected:
236 237
237 238 self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
238 239
239 240 Written by D. ScipiΓ³n 25.02.2021
240 241 '''
241 242 def __init__(self):
242 243 Operation.__init__(self)
243 244 self.i = 0
244 245 self.ich = 0
245 246 self.ir = 0
246 247
247 248 def run(self, dataOut, ClutterWidth=2.5):
248 249 # print ('Entering RemoveWideGC ... ')
249 250
250 251 self.spc = dataOut.data_pre[0].copy()
251 252 self.spc_out = dataOut.data_pre[0].copy()
252 253 self.Num_Chn = self.spc.shape[0]
253 254 self.Num_Hei = self.spc.shape[2]
254 255 VelRange = dataOut.spc_range[2][:-1]
255 256 dv = VelRange[1]-VelRange[0]
256 257
257 258 # Find the velocities that corresponds to zero
258 259 gc_values = numpy.squeeze(numpy.where(numpy.abs(VelRange) <= ClutterWidth))
259 260
260 261 # Removing novalid data from the spectra
261 262 for ich in range(self.Num_Chn) :
262 263 for ir in range(self.Num_Hei) :
263 264 # Estimate the noise at each range
264 265 HSn = hildebrand_sekhon(self.spc[ich,:,ir],dataOut.nIncohInt)
265 266
266 267 # Removing the noise floor at each range
267 268 novalid = numpy.where(self.spc[ich,:,ir] < HSn)
268 269 self.spc[ich,novalid,ir] = HSn
269 270
270 271 junk = numpy.append(numpy.insert(numpy.squeeze(self.spc[ich,gc_values,ir]),0,HSn),HSn)
271 272 j1index = numpy.squeeze(numpy.where(numpy.diff(junk)>0))
272 273 j2index = numpy.squeeze(numpy.where(numpy.diff(junk)<0))
273 274 if ((numpy.size(j1index)<=1) | (numpy.size(j2index)<=1)) :
274 275 continue
275 276 junk3 = numpy.squeeze(numpy.diff(j1index))
276 277 junk4 = numpy.squeeze(numpy.diff(j2index))
277 278
278 279 valleyindex = j2index[numpy.where(junk4>1)]
279 280 peakindex = j1index[numpy.where(junk3>1)]
280 281
281 282 isvalid = numpy.squeeze(numpy.where(numpy.abs(VelRange[gc_values[peakindex]]) <= 2.5*dv))
282 283 if numpy.size(isvalid) == 0 :
283 284 continue
284 285 if numpy.size(isvalid) >1 :
285 286 vindex = numpy.argmax(self.spc[ich,gc_values[peakindex[isvalid]],ir])
286 287 isvalid = isvalid[vindex]
287 288
288 289 # clutter peak
289 290 gcpeak = peakindex[isvalid]
290 291 vl = numpy.where(valleyindex < gcpeak)
291 292 if numpy.size(vl) == 0:
292 293 continue
293 294 gcvl = valleyindex[vl[0][-1]]
294 295 vr = numpy.where(valleyindex > gcpeak)
295 296 if numpy.size(vr) == 0:
296 297 continue
297 298 gcvr = valleyindex[vr[0][0]]
298 299
299 300 # Removing the clutter
300 301 interpindex = numpy.array([gc_values[gcvl], gc_values[gcvr]])
301 302 gcindex = gc_values[gcvl+1:gcvr-1]
302 303 self.spc_out[ich,gcindex,ir] = numpy.interp(VelRange[gcindex],VelRange[interpindex],self.spc[ich,interpindex,ir])
303 304
304 305 dataOut.data_pre[0] = self.spc_out
305 306 #print ('Leaving RemoveWideGC ... ')
306 307 return dataOut
307 308
308 309 class SpectralFilters(Operation):
309 310 ''' This class allows to replace the novalid values with noise for each channel
310 311 This applies to CLAIRE RADAR
311 312
312 313 PositiveLimit : RightLimit of novalid data
313 314 NegativeLimit : LeftLimit of novalid data
314 315
315 316 Input:
316 317
317 318 self.dataOut.data_pre : SPC and CSPC
318 319 self.dataOut.spc_range : To select wind and rainfall velocities
319 320
320 321 Affected:
321 322
322 323 self.dataOut.data_pre : It is used for the new SPC and CSPC ranges of wind
323 324
324 325 Written by D. ScipiΓ³n 29.01.2021
325 326 '''
326 327 def __init__(self):
327 328 Operation.__init__(self)
328 329 self.i = 0
329 330
330 331 def run(self, dataOut, ):
331 332
332 333 self.spc = dataOut.data_pre[0].copy()
333 334 self.Num_Chn = self.spc.shape[0]
334 335 VelRange = dataOut.spc_range[2]
335 336
336 337 # novalid corresponds to data within the Negative and PositiveLimit
337 338
338 339
339 340 # Removing novalid data from the spectra
340 341 for i in range(self.Num_Chn):
341 342 self.spc[i,novalid,:] = dataOut.noise[i]
342 343 dataOut.data_pre[0] = self.spc
343 344 return dataOut
344 345
345 346 class GaussianFit(Operation):
346 347
347 348 '''
348 349 Function that fit of one and two generalized gaussians (gg) based
349 350 on the PSD shape across an "power band" identified from a cumsum of
350 351 the measured spectrum - noise.
351 352
352 353 Input:
353 354 self.dataOut.data_pre : SelfSpectra
354 355
355 356 Output:
356 357 self.dataOut.SPCparam : SPC_ch1, SPC_ch2
357 358
358 359 '''
359 360 def __init__(self):
360 361 Operation.__init__(self)
361 362 self.i=0
362 363
363 364
364 365 # def run(self, dataOut, num_intg=7, pnoise=1., SNRlimit=-9): #num_intg: Incoherent integrations, pnoise: Noise, vel_arr: range of velocities, similar to the ftt points
365 366 def run(self, dataOut, SNRdBlimit=-9, method='generalized'):
366 367 """This routine will find a couple of generalized Gaussians to a power spectrum
367 368 methods: generalized, squared
368 369 input: spc
369 370 output:
370 371 noise, amplitude0,shift0,width0,p0,Amplitude1,shift1,width1,p1
371 372 """
372 373 print ('Entering ',method,' double Gaussian fit')
373 374 self.spc = dataOut.data_pre[0].copy()
374 375 self.Num_Hei = self.spc.shape[2]
375 376 self.Num_Bin = self.spc.shape[1]
376 377 self.Num_Chn = self.spc.shape[0]
377 378
378 379 start_time = time.time()
379 380
380 381 pool = Pool(processes=self.Num_Chn)
381 382 args = [(dataOut.spc_range[2], ich, dataOut.spc_noise[ich], dataOut.nIncohInt, SNRdBlimit) for ich in range(self.Num_Chn)]
382 383 objs = [self for __ in range(self.Num_Chn)]
383 384 attrs = list(zip(objs, args))
384 385 DGauFitParam = pool.map(target, attrs)
385 386 # Parameters:
386 387 # 0. Noise, 1. Amplitude, 2. Shift, 3. Width 4. Power
387 388 dataOut.DGauFitParams = numpy.asarray(DGauFitParam)
388 389
389 390 # Double Gaussian Curves
390 391 gau0 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
391 392 gau0[:] = numpy.NaN
392 393 gau1 = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
393 394 gau1[:] = numpy.NaN
394 395 x_mtr = numpy.transpose(numpy.tile(dataOut.getVelRange(1)[:-1], (self.Num_Hei,1)))
395 396 for iCh in range(self.Num_Chn):
396 397 N0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,0]] * self.Num_Bin))
397 398 N1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][0,:,1]] * self.Num_Bin))
398 399 A0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,0]] * self.Num_Bin))
399 400 A1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][1,:,1]] * self.Num_Bin))
400 401 v0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,0]] * self.Num_Bin))
401 402 v1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][2,:,1]] * self.Num_Bin))
402 403 s0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,0]] * self.Num_Bin))
403 404 s1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][3,:,1]] * self.Num_Bin))
404 405 if method == 'genealized':
405 406 p0 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,0]] * self.Num_Bin))
406 407 p1 = numpy.transpose(numpy.transpose([dataOut.DGauFitParams[iCh][4,:,1]] * self.Num_Bin))
407 408 elif method == 'squared':
408 409 p0 = 2.
409 410 p1 = 2.
410 411 gau0[iCh] = A0*numpy.exp(-0.5*numpy.abs((x_mtr-v0)/s0)**p0)+N0
411 412 gau1[iCh] = A1*numpy.exp(-0.5*numpy.abs((x_mtr-v1)/s1)**p1)+N1
412 413 dataOut.GaussFit0 = gau0
413 414 dataOut.GaussFit1 = gau1
414 415
415 416 print('Leaving ',method ,' double Gaussian fit')
416 417 return dataOut
417 418
418 419 def FitGau(self, X):
419 420 # print('Entering FitGau')
420 421 # Assigning the variables
421 422 Vrange, ch, wnoise, num_intg, SNRlimit = X
422 423 # Noise Limits
423 424 noisebl = wnoise * 0.9
424 425 noisebh = wnoise * 1.1
425 426 # Radar Velocity
426 427 Va = max(Vrange)
427 428 deltav = Vrange[1] - Vrange[0]
428 429 x = numpy.arange(self.Num_Bin)
429 430
430 431 # print ('stop 0')
431 432
432 433 # 5 parameters, 2 Gaussians
433 434 DGauFitParam = numpy.zeros([5, self.Num_Hei,2])
434 435 DGauFitParam[:] = numpy.NaN
435 436
436 437 # SPCparam = []
437 438 # SPC_ch1 = numpy.zeros([self.Num_Bin,self.Num_Hei])
438 439 # SPC_ch2 = numpy.zeros([self.Num_Bin,self.Num_Hei])
439 440 # SPC_ch1[:] = 0 #numpy.NaN
440 441 # SPC_ch2[:] = 0 #numpy.NaN
441 442 # print ('stop 1')
442 443 for ht in range(self.Num_Hei):
443 444 # print (ht)
444 445 # print ('stop 2')
445 446 # Spectra at each range
446 447 spc = numpy.asarray(self.spc)[ch,:,ht]
447 448 snr = ( spc.mean() - wnoise ) / wnoise
448 449 snrdB = 10.*numpy.log10(snr)
449 450
450 451 #print ('stop 3')
451 452 if snrdB < SNRlimit :
452 453 # snr = numpy.NaN
453 454 # SPC_ch1[:,ht] = 0#numpy.NaN
454 455 # SPC_ch1[:,ht] = 0#numpy.NaN
455 456 # SPCparam = (SPC_ch1,SPC_ch2)
456 457 # print ('SNR less than SNRth')
457 458 continue
458 459 # wnoise = hildebrand_sekhon(spc,num_intg)
459 460 # print ('stop 2.01')
460 461 #############################################
461 462 # normalizing spc and noise
462 463 # This part differs from gg1
463 464 # spc_norm_max = max(spc) #commented by D. ScipiΓ³n 19.03.2021
464 465 #spc = spc / spc_norm_max
465 466 # pnoise = pnoise #/ spc_norm_max #commented by D. ScipiΓ³n 19.03.2021
466 467 #############################################
467 468
468 469 # print ('stop 2.1')
469 470 fatspectra=1.0
470 471 # noise per channel.... we might want to use the noise at each range
471 472
472 473 # wnoise = noise_ #/ spc_norm_max #commented by D. ScipiΓ³n 19.03.2021
473 474 #wnoise,stdv,i_max,index =enoise(spc,num_intg) #noise estimate using Hildebrand Sekhon, only wnoise is used
474 475 #if wnoise>1.1*pnoise: # to be tested later
475 476 # wnoise=pnoise
476 477 # noisebl = wnoise*0.9
477 478 # noisebh = wnoise*1.1
478 479 spc = spc - wnoise # signal
479 480
480 481 # print ('stop 2.2')
481 482 minx = numpy.argmin(spc)
482 483 #spcs=spc.copy()
483 484 spcs = numpy.roll(spc,-minx)
484 485 cum = numpy.cumsum(spcs)
485 486 # tot_noise = wnoise * self.Num_Bin #64;
486 487
487 488 # print ('stop 2.3')
488 489 # snr = sum(spcs) / tot_noise
489 490 # snrdB = 10.*numpy.log10(snr)
490 491 #print ('stop 3')
491 492 # if snrdB < SNRlimit :
492 493 # snr = numpy.NaN
493 494 # SPC_ch1[:,ht] = 0#numpy.NaN
494 495 # SPC_ch1[:,ht] = 0#numpy.NaN
495 496 # SPCparam = (SPC_ch1,SPC_ch2)
496 497 # print ('SNR less than SNRth')
497 498 # continue
498 499
499 500
500 501 #if snrdB<-18 or numpy.isnan(snrdB) or num_intg<4:
501 502 # return [None,]*4,[None,]*4,None,snrdB,None,None,[None,]*5,[None,]*9,None
502 503 # print ('stop 4')
503 504 cummax = max(cum)
504 505 epsi = 0.08 * fatspectra # cumsum to narrow down the energy region
505 506 cumlo = cummax * epsi
506 507 cumhi = cummax * (1-epsi)
507 508 powerindex = numpy.array(numpy.where(numpy.logical_and(cum>cumlo, cum<cumhi))[0])
508 509
509 510 # print ('stop 5')
510 511 if len(powerindex) < 1:# case for powerindex 0
511 512 # print ('powerindex < 1')
512 513 continue
513 514 powerlo = powerindex[0]
514 515 powerhi = powerindex[-1]
515 516 powerwidth = powerhi-powerlo
516 517 if powerwidth <= 1:
517 518 # print('powerwidth <= 1')
518 519 continue
519 520
520 521 # print ('stop 6')
521 522 firstpeak = powerlo + powerwidth/10.# first gaussian energy location
522 523 secondpeak = powerhi - powerwidth/10. #second gaussian energy location
523 524 midpeak = (firstpeak + secondpeak)/2.
524 525 firstamp = spcs[int(firstpeak)]
525 526 secondamp = spcs[int(secondpeak)]
526 527 midamp = spcs[int(midpeak)]
527 528
528 529 y_data = spc + wnoise
529 530
530 531 ''' single Gaussian '''
531 532 shift0 = numpy.mod(midpeak+minx, self.Num_Bin )
532 533 width0 = powerwidth/4.#Initialization entire power of spectrum divided by 4
533 534 power0 = 2.
534 535 amplitude0 = midamp
535 536 state0 = [shift0,width0,amplitude0,power0,wnoise]
536 537 bnds = ((0,self.Num_Bin-1),(1,powerwidth),(0,None),(0.5,3.),(noisebl,noisebh))
537 538 lsq1 = fmin_l_bfgs_b(self.misfit1, state0, args=(y_data,x,num_intg), bounds=bnds, approx_grad=True)
538 539 # print ('stop 7.1')
539 540 # print (bnds)
540 541
541 542 chiSq1=lsq1[1]
542 543
543 544 # print ('stop 8')
544 545 if fatspectra<1.0 and powerwidth<4:
545 546 choice=0
546 547 Amplitude0=lsq1[0][2]
547 548 shift0=lsq1[0][0]
548 549 width0=lsq1[0][1]
549 550 p0=lsq1[0][3]
550 551 Amplitude1=0.
551 552 shift1=0.
552 553 width1=0.
553 554 p1=0.
554 555 noise=lsq1[0][4]
555 556 #return (numpy.array([shift0,width0,Amplitude0,p0]),
556 557 # numpy.array([shift1,width1,Amplitude1,p1]),noise,snrdB,chiSq1,6.,sigmas1,[None,]*9,choice)
557 558
558 559 # print ('stop 9')
559 560 ''' two Gaussians '''
560 561 #shift0=numpy.mod(firstpeak+minx,64); shift1=numpy.mod(secondpeak+minx,64)
561 562 shift0 = numpy.mod(firstpeak+minx, self.Num_Bin )
562 563 shift1 = numpy.mod(secondpeak+minx, self.Num_Bin )
563 564 width0 = powerwidth/6.
564 565 width1 = width0
565 566 power0 = 2.
566 567 power1 = power0
567 568 amplitude0 = firstamp
568 569 amplitude1 = secondamp
569 570 state0 = [shift0,width0,amplitude0,power0,shift1,width1,amplitude1,power1,wnoise]
570 571 #bnds=((0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(0,63),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
571 572 bnds=((0,self.Num_Bin-1),(1,powerwidth/2.),(0,None),(0.5,3.),(0,self.Num_Bin-1),(1,powerwidth/2.),(0,None),(0.5,3.),(noisebl,noisebh))
572 573 #bnds=(( 0,(self.Num_Bin-1) ),(1,powerwidth/2.),(0,None),(0.5,3.),( 0,(self.Num_Bin-1)),(1,powerwidth/2.),(0,None),(0.5,3.),(0.1,0.5))
573 574
574 575 # print ('stop 10')
575 576 lsq2 = fmin_l_bfgs_b( self.misfit2 , state0 , args=(y_data,x,num_intg) , bounds=bnds , approx_grad=True )
576 577
577 578 # print ('stop 11')
578 579 chiSq2 = lsq2[1]
579 580
580 581 # print ('stop 12')
581 582
582 583 oneG = (chiSq1<5 and chiSq1/chiSq2<2.0) and (abs(lsq2[0][0]-lsq2[0][4])<(lsq2[0][1]+lsq2[0][5])/3. or abs(lsq2[0][0]-lsq2[0][4])<10)
583 584
584 585 # print ('stop 13')
585 586 if snrdB>-12: # when SNR is strong pick the peak with least shift (LOS velocity) error
586 587 if oneG:
587 588 choice = 0
588 589 else:
589 590 w1 = lsq2[0][1]; w2 = lsq2[0][5]
590 591 a1 = lsq2[0][2]; a2 = lsq2[0][6]
591 592 p1 = lsq2[0][3]; p2 = lsq2[0][7]
592 593 s1 = (2**(1+1./p1))*scipy.special.gamma(1./p1)/p1
593 594 s2 = (2**(1+1./p2))*scipy.special.gamma(1./p2)/p2
594 595 gp1 = a1*w1*s1; gp2 = a2*w2*s2 # power content of each ggaussian with proper p scaling
595 596
596 597 if gp1>gp2:
597 598 if a1>0.7*a2:
598 599 choice = 1
599 600 else:
600 601 choice = 2
601 602 elif gp2>gp1:
602 603 if a2>0.7*a1:
603 604 choice = 2
604 605 else:
605 606 choice = 1
606 607 else:
607 608 choice = numpy.argmax([a1,a2])+1
608 609 #else:
609 610 #choice=argmin([std2a,std2b])+1
610 611
611 612 else: # with low SNR go to the most energetic peak
612 613 choice = numpy.argmax([lsq1[0][2]*lsq1[0][1],lsq2[0][2]*lsq2[0][1],lsq2[0][6]*lsq2[0][5]])
613 614
614 615 # print ('stop 14')
615 616 shift0 = lsq2[0][0]
616 617 vel0 = Vrange[0] + shift0 * deltav
617 618 shift1 = lsq2[0][4]
618 619 # vel1=Vrange[0] + shift1 * deltav
619 620
620 621 # max_vel = 1.0
621 622 # Va = max(Vrange)
622 623 # deltav = Vrange[1]-Vrange[0]
623 624 # print ('stop 15')
624 625 #first peak will be 0, second peak will be 1
625 626 # if vel0 > -1.0 and vel0 < max_vel : #first peak is in the correct range # Commented by D.ScipiΓ³n 19.03.2021
626 627 if vel0 > -Va and vel0 < Va : #first peak is in the correct range
627 628 shift0 = lsq2[0][0]
628 629 width0 = lsq2[0][1]
629 630 Amplitude0 = lsq2[0][2]
630 631 p0 = lsq2[0][3]
631 632
632 633 shift1 = lsq2[0][4]
633 634 width1 = lsq2[0][5]
634 635 Amplitude1 = lsq2[0][6]
635 636 p1 = lsq2[0][7]
636 637 noise = lsq2[0][8]
637 638 else:
638 639 shift1 = lsq2[0][0]
639 640 width1 = lsq2[0][1]
640 641 Amplitude1 = lsq2[0][2]
641 642 p1 = lsq2[0][3]
642 643
643 644 shift0 = lsq2[0][4]
644 645 width0 = lsq2[0][5]
645 646 Amplitude0 = lsq2[0][6]
646 647 p0 = lsq2[0][7]
647 648 noise = lsq2[0][8]
648 649
649 650 if Amplitude0<0.05: # in case the peak is noise
650 651 shift0,width0,Amplitude0,p0 = 4*[numpy.NaN]
651 652 if Amplitude1<0.05:
652 653 shift1,width1,Amplitude1,p1 = 4*[numpy.NaN]
653 654
654 655 # print ('stop 16 ')
655 656 # SPC_ch1[:,ht] = noise + Amplitude0*numpy.exp(-0.5*(abs(x-shift0)/width0)**p0)
656 657 # SPC_ch2[:,ht] = noise + Amplitude1*numpy.exp(-0.5*(abs(x-shift1)/width1)**p1)
657 658 # SPCparam = (SPC_ch1,SPC_ch2)
658 659
659 660 DGauFitParam[0,ht,0] = noise
660 661 DGauFitParam[0,ht,1] = noise
661 662 DGauFitParam[1,ht,0] = Amplitude0
662 663 DGauFitParam[1,ht,1] = Amplitude1
663 664 DGauFitParam[2,ht,0] = Vrange[0] + shift0 * deltav
664 665 DGauFitParam[2,ht,1] = Vrange[0] + shift1 * deltav
665 666 DGauFitParam[3,ht,0] = width0 * deltav
666 667 DGauFitParam[3,ht,1] = width1 * deltav
667 668 DGauFitParam[4,ht,0] = p0
668 669 DGauFitParam[4,ht,1] = p1
669 670
670 671 # print (DGauFitParam.shape)
671 672 # print ('Leaving FitGau')
672 673 return DGauFitParam
673 674 # return SPCparam
674 675 # return GauSPC
675 676
676 677 def y_model1(self,x,state):
677 678 shift0, width0, amplitude0, power0, noise = state
678 679 model0 = amplitude0*numpy.exp(-0.5*abs((x - shift0)/width0)**power0)
679 680 model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0)
680 681 model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0)
681 682 return model0 + model0u + model0d + noise
682 683
683 684 def y_model2(self,x,state): #Equation for two generalized Gaussians with Nyquist
684 685 shift0, width0, amplitude0, power0, shift1, width1, amplitude1, power1, noise = state
685 686 model0 = amplitude0*numpy.exp(-0.5*abs((x-shift0)/width0)**power0)
686 687 model0u = amplitude0*numpy.exp(-0.5*abs((x - shift0 - self.Num_Bin)/width0)**power0)
687 688 model0d = amplitude0*numpy.exp(-0.5*abs((x - shift0 + self.Num_Bin)/width0)**power0)
688 689
689 690 model1 = amplitude1*numpy.exp(-0.5*abs((x - shift1)/width1)**power1)
690 691 model1u = amplitude1*numpy.exp(-0.5*abs((x - shift1 - self.Num_Bin)/width1)**power1)
691 692 model1d = amplitude1*numpy.exp(-0.5*abs((x - shift1 + self.Num_Bin)/width1)**power1)
692 693 return model0 + model0u + model0d + model1 + model1u + model1d + noise
693 694
694 695 def misfit1(self,state,y_data,x,num_intg): # This function compares how close real data is with the model data, the close it is, the better it is.
695 696
696 697 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model1(x,state)))**2)#/(64-5.) # /(64-5.) can be commented
697 698
698 699 def misfit2(self,state,y_data,x,num_intg):
699 700 return num_intg*sum((numpy.log(y_data)-numpy.log(self.y_model2(x,state)))**2)#/(64-9.)
700 701
701 702
702 703
703 704 class PrecipitationProc(Operation):
704 705
705 706 '''
706 707 Operator that estimates Reflectivity factor (Z), and estimates rainfall Rate (R)
707 708
708 709 Input:
709 710 self.dataOut.data_pre : SelfSpectra
710 711
711 712 Output:
712 713
713 714 self.dataOut.data_output : Reflectivity factor, rainfall Rate
714 715
715 716
716 717 Parameters affected:
717 718 '''
718 719
719 720 def __init__(self):
720 721 Operation.__init__(self)
721 722 self.i=0
722 723
723 724 def run(self, dataOut, radar=None, Pt=5000, Gt=295.1209, Gr=70.7945, Lambda=0.6741, aL=2.5118,
724 725 tauW=4e-06, ThetaT=0.1656317, ThetaR=0.36774087, Km2 = 0.93, Altitude=3350,SNRdBlimit=-30):
725 726
726 727 # print ('Entering PrecepitationProc ... ')
727 728
728 729 if radar == "MIRA35C" :
729 730
730 731 self.spc = dataOut.data_pre[0].copy()
731 732 self.Num_Hei = self.spc.shape[2]
732 733 self.Num_Bin = self.spc.shape[1]
733 734 self.Num_Chn = self.spc.shape[0]
734 735 Ze = self.dBZeMODE2(dataOut)
735 736
736 737 else:
737 738
738 739 self.spc = dataOut.data_pre[0].copy()
739 740
740 741 #NOTA SE DEBE REMOVER EL RANGO DEL PULSO TX
741 742 self.spc[:,:,0:7]= numpy.NaN
742 743
743 744 self.Num_Hei = self.spc.shape[2]
744 745 self.Num_Bin = self.spc.shape[1]
745 746 self.Num_Chn = self.spc.shape[0]
746 747
747 748 VelRange = dataOut.spc_range[2]
748 749
749 750 ''' Se obtiene la constante del RADAR '''
750 751
751 752 self.Pt = Pt
752 753 self.Gt = Gt
753 754 self.Gr = Gr
754 755 self.Lambda = Lambda
755 756 self.aL = aL
756 757 self.tauW = tauW
757 758 self.ThetaT = ThetaT
758 759 self.ThetaR = ThetaR
759 760 self.GSys = 10**(36.63/10) # Ganancia de los LNA 36.63 dB
760 761 self.lt = 10**(1.67/10) # Perdida en cables Tx 1.67 dB
761 762 self.lr = 10**(5.73/10) # Perdida en cables Rx 5.73 dB
762 763
763 764 Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
764 765 Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * tauW * numpy.pi * ThetaT * ThetaR)
765 766 RadarConstant = 10e-26 * Numerator / Denominator #
766 767 ExpConstant = 10**(40/10) #Constante Experimental
767 768
768 769 SignalPower = numpy.zeros([self.Num_Chn,self.Num_Bin,self.Num_Hei])
769 770 for i in range(self.Num_Chn):
770 771 SignalPower[i,:,:] = self.spc[i,:,:] - dataOut.noise[i]
771 772 SignalPower[numpy.where(SignalPower < 0)] = 1e-20
772 773
773 774 SPCmean = numpy.mean(SignalPower, 0)
774 775 Pr = SPCmean[:,:]/dataOut.normFactor
775 776
776 777 # Declaring auxiliary variables
777 778 Range = dataOut.heightList*1000. #Range in m
778 779 # replicate the heightlist to obtain a matrix [Num_Bin,Num_Hei]
779 780 rMtrx = numpy.transpose(numpy.transpose([dataOut.heightList*1000.] * self.Num_Bin))
780 781 zMtrx = rMtrx+Altitude
781 782 # replicate the VelRange to obtain a matrix [Num_Bin,Num_Hei]
782 783 VelMtrx = numpy.transpose(numpy.tile(VelRange[:-1], (self.Num_Hei,1)))
783 784
784 785 # height dependence to air density Foote and Du Toit (1969)
785 786 delv_z = 1 + 3.68e-5 * zMtrx + 1.71e-9 * zMtrx**2
786 787 VMtrx = VelMtrx / delv_z #Normalized velocity
787 788 VMtrx[numpy.where(VMtrx> 9.6)] = numpy.NaN
788 789 # Diameter is related to the fall speed of falling drops
789 790 D_Vz = -1.667 * numpy.log( 0.9369 - 0.097087 * VMtrx ) # D in [mm]
790 791 # Only valid for D>= 0.16 mm
791 792 D_Vz[numpy.where(D_Vz < 0.16)] = numpy.NaN
792 793
793 794 #Calculate Radar Reflectivity ETAn
794 795 ETAn = (RadarConstant *ExpConstant) * Pr * rMtrx**2 #Reflectivity (ETA)
795 796 ETAd = ETAn * 6.18 * exp( -0.6 * D_Vz ) * delv_z
796 797 # Radar Cross Section
797 798 sigmaD = Km2 * (D_Vz * 1e-3 )**6 * numpy.pi**5 / Lambda**4
798 799 # Drop Size Distribution
799 800 DSD = ETAn / sigmaD
800 801 # Equivalente Reflectivy
801 802 Ze_eqn = numpy.nansum( DSD * D_Vz**6 ,axis=0)
802 803 Ze_org = numpy.nansum(ETAn * Lambda**4, axis=0) / (1e-18*numpy.pi**5 * Km2) # [mm^6 /m^3]
803 804 # RainFall Rate
804 805 RR = 0.0006*numpy.pi * numpy.nansum( D_Vz**3 * DSD * VelMtrx ,0) #mm/hr
805 806
806 807 # Censoring the data
807 808 # Removing data with SNRth < 0dB se debe considerar el SNR por canal
808 809 SNRth = 10**(SNRdBlimit/10) #-30dB
809 810 novalid = numpy.where((dataOut.data_snr[0,:] <SNRth) | (dataOut.data_snr[1,:] <SNRth) | (dataOut.data_snr[2,:] <SNRth)) # AND condition. Maybe OR condition better
810 811 W = numpy.nanmean(dataOut.data_dop,0)
811 812 W[novalid] = numpy.NaN
812 813 Ze_org[novalid] = numpy.NaN
813 814 RR[novalid] = numpy.NaN
814 815
815 816 dataOut.data_output = RR[8]
816 817 dataOut.data_param = numpy.ones([3,self.Num_Hei])
817 818 dataOut.channelList = [0,1,2]
818 819
819 820 dataOut.data_param[0]=10*numpy.log10(Ze_org)
820 821 dataOut.data_param[1]=-W
821 822 dataOut.data_param[2]=RR
822 823
823 824 # print ('Leaving PrecepitationProc ... ')
824 825 return dataOut
825 826
826 827 def dBZeMODE2(self, dataOut): # Processing for MIRA35C
827 828
828 829 NPW = dataOut.NPW
829 830 COFA = dataOut.COFA
830 831
831 832 SNR = numpy.array([self.spc[0,:,:] / NPW[0]]) #, self.spc[1,:,:] / NPW[1]])
832 833 RadarConst = dataOut.RadarConst
833 834 #frequency = 34.85*10**9
834 835
835 836 ETA = numpy.zeros(([self.Num_Chn ,self.Num_Hei]))
836 837 data_output = numpy.ones([self.Num_Chn , self.Num_Hei])*numpy.NaN
837 838
838 839 ETA = numpy.sum(SNR,1)
839 840
840 841 ETA = numpy.where(ETA != 0. , ETA, numpy.NaN)
841 842
842 843 Ze = numpy.ones([self.Num_Chn, self.Num_Hei] )
843 844
844 845 for r in range(self.Num_Hei):
845 846
846 847 Ze[0,r] = ( ETA[0,r] ) * COFA[0,r][0] * RadarConst * ((r/5000.)**2)
847 848 #Ze[1,r] = ( ETA[1,r] ) * COFA[1,r][0] * RadarConst * ((r/5000.)**2)
848 849
849 850 return Ze
850 851
851 852 # def GetRadarConstant(self):
852 853 #
853 854 # """
854 855 # Constants:
855 856 #
856 857 # Pt: Transmission Power dB 5kW 5000
857 858 # Gt: Transmission Gain dB 24.7 dB 295.1209
858 859 # Gr: Reception Gain dB 18.5 dB 70.7945
859 860 # Lambda: Wavelenght m 0.6741 m 0.6741
860 861 # aL: Attenuation loses dB 4dB 2.5118
861 862 # tauW: Width of transmission pulse s 4us 4e-6
862 863 # ThetaT: Transmission antenna bean angle rad 0.1656317 rad 0.1656317
863 864 # ThetaR: Reception antenna beam angle rad 0.36774087 rad 0.36774087
864 865 #
865 866 # """
866 867 #
867 868 # Numerator = ( (4*numpy.pi)**3 * aL**2 * 16 * numpy.log(2) )
868 869 # Denominator = ( Pt * Gt * Gr * Lambda**2 * SPEED_OF_LIGHT * TauW * numpy.pi * ThetaT * TheraR)
869 870 # RadarConstant = Numerator / Denominator
870 871 #
871 872 # return RadarConstant
872 873
873 874
874 875
875 876 class FullSpectralAnalysis(Operation):
876 877
877 878 """
878 879 Function that implements Full Spectral Analysis technique.
879 880
880 881 Input:
881 882 self.dataOut.data_pre : SelfSpectra and CrossSpectra data
882 883 self.dataOut.groupList : Pairlist of channels
883 884 self.dataOut.ChanDist : Physical distance between receivers
884 885
885 886
886 887 Output:
887 888
888 889 self.dataOut.data_output : Zonal wind, Meridional wind, and Vertical wind
889 890
890 891
891 892 Parameters affected: Winds, height range, SNR
892 893
893 894 """
894 895 def run(self, dataOut, Xi01=None, Xi02=None, Xi12=None, Eta01=None, Eta02=None, Eta12=None, SNRdBlimit=-30,
895 896 minheight=None, maxheight=None, NegativeLimit=None, PositiveLimit=None):
896 897
897 898 spc = dataOut.data_pre[0].copy()
898 899 cspc = dataOut.data_pre[1]
899 900 nHeights = spc.shape[2]
900 901
901 902 # first_height = 0.75 #km (ref: data header 20170822)
902 903 # resolution_height = 0.075 #km
903 904 '''
904 905 finding height range. check this when radar parameters are changed!
905 906 '''
906 907 if maxheight is not None:
907 908 # range_max = math.ceil((maxheight - first_height) / resolution_height) # theoretical
908 909 range_max = math.ceil(13.26 * maxheight - 3) # empirical, works better
909 910 else:
910 911 range_max = nHeights
911 912 if minheight is not None:
912 913 # range_min = int((minheight - first_height) / resolution_height) # theoretical
913 914 range_min = int(13.26 * minheight - 5) # empirical, works better
914 915 if range_min < 0:
915 916 range_min = 0
916 917 else:
917 918 range_min = 0
918 919
919 920 pairsList = dataOut.groupList
920 921 if dataOut.ChanDist is not None :
921 922 ChanDist = dataOut.ChanDist
922 923 else:
923 924 ChanDist = numpy.array([[Xi01, Eta01],[Xi02,Eta02],[Xi12,Eta12]])
924 925
925 926 # 4 variables: zonal, meridional, vertical, and average SNR
926 927 data_param = numpy.zeros([4,nHeights]) * numpy.NaN
927 928 velocityX = numpy.zeros([nHeights]) * numpy.NaN
928 929 velocityY = numpy.zeros([nHeights]) * numpy.NaN
929 930 velocityZ = numpy.zeros([nHeights]) * numpy.NaN
930 931
931 932 dbSNR = 10*numpy.log10(numpy.average(dataOut.data_snr,0))
932 933
933 934 '''***********************************************WIND ESTIMATION**************************************'''
934 935 for Height in range(nHeights):
935 936
936 937 if Height >= range_min and Height < range_max:
937 938 # error_code will be useful in future analysis
938 939 [Vzon,Vmer,Vver, error_code] = self.WindEstimation(spc[:,:,Height], cspc[:,:,Height], pairsList,
939 940 ChanDist, Height, dataOut.noise, dataOut.spc_range, dbSNR[Height], SNRdBlimit, NegativeLimit, PositiveLimit,dataOut.frequency)
940 941
941 942 if abs(Vzon) < 100. and abs(Vmer) < 100.:
942 943 velocityX[Height] = Vzon
943 944 velocityY[Height] = -Vmer
944 945 velocityZ[Height] = Vver
945 946
946 947 # Censoring data with SNR threshold
947 948 dbSNR [dbSNR < SNRdBlimit] = numpy.NaN
948 949
949 950 data_param[0] = velocityX
950 951 data_param[1] = velocityY
951 952 data_param[2] = velocityZ
952 953 data_param[3] = dbSNR
953 954 dataOut.data_param = data_param
954 955 return dataOut
955 956
956 957 def moving_average(self,x, N=2):
957 958 """ convolution for smoothenig data. note that last N-1 values are convolution with zeroes """
958 959 return numpy.convolve(x, numpy.ones((N,))/N)[(N-1):]
959 960
960 961 def gaus(self,xSamples,Amp,Mu,Sigma):
961 962 return Amp * numpy.exp(-0.5*((xSamples - Mu)/Sigma)**2)
962 963
963 964 def Moments(self, ySamples, xSamples):
964 965 Power = numpy.nanmean(ySamples) # Power, 0th Moment
965 966 yNorm = ySamples / numpy.nansum(ySamples)
966 967 RadVel = numpy.nansum(xSamples * yNorm) # Radial Velocity, 1st Moment
967 968 Sigma2 = numpy.nansum(yNorm * (xSamples - RadVel)**2) # Spectral Width, 2nd Moment
968 969 StdDev = numpy.sqrt(numpy.abs(Sigma2)) # Desv. Estandar, Ancho espectral
969 970 return numpy.array([Power,RadVel,StdDev])
970 971
971 972 def StopWindEstimation(self, error_code):
972 973 Vzon = numpy.NaN
973 974 Vmer = numpy.NaN
974 975 Vver = numpy.NaN
975 976 return Vzon, Vmer, Vver, error_code
976 977
977 978 def AntiAliasing(self, interval, maxstep):
978 979 """
979 980 function to prevent errors from aliased values when computing phaseslope
980 981 """
981 982 antialiased = numpy.zeros(len(interval))
982 983 copyinterval = interval.copy()
983 984
984 985 antialiased[0] = copyinterval[0]
985 986
986 987 for i in range(1,len(antialiased)):
987 988 step = interval[i] - interval[i-1]
988 989 if step > maxstep:
989 990 copyinterval -= 2*numpy.pi
990 991 antialiased[i] = copyinterval[i]
991 992 elif step < maxstep*(-1):
992 993 copyinterval += 2*numpy.pi
993 994 antialiased[i] = copyinterval[i]
994 995 else:
995 996 antialiased[i] = copyinterval[i].copy()
996 997
997 998 return antialiased
998 999
999 1000 def WindEstimation(self, spc, cspc, pairsList, ChanDist, Height, noise, AbbsisaRange, dbSNR, SNRlimit, NegativeLimit, PositiveLimit, radfreq):
1000 1001 """
1001 1002 Function that Calculates Zonal, Meridional and Vertical wind velocities.
1002 1003 Initial Version by E. Bocanegra updated by J. Zibell until Nov. 2019.
1003 1004
1004 1005 Input:
1005 1006 spc, cspc : self spectra and cross spectra data. In Briggs notation something like S_i*(S_i)_conj, (S_j)_conj respectively.
1006 1007 pairsList : Pairlist of channels
1007 1008 ChanDist : array of xi_ij and eta_ij
1008 1009 Height : height at which data is processed
1009 1010 noise : noise in [channels] format for specific height
1010 1011 Abbsisarange : range of the frequencies or velocities
1011 1012 dbSNR, SNRlimit : signal to noise ratio in db, lower limit
1012 1013
1013 1014 Output:
1014 1015 Vzon, Vmer, Vver : wind velocities
1015 1016 error_code : int that states where code is terminated
1016 1017
1017 1018 0 : no error detected
1018 1019 1 : Gaussian of mean spc exceeds widthlimit
1019 1020 2 : no Gaussian of mean spc found
1020 1021 3 : SNR to low or velocity to high -> prec. e.g.
1021 1022 4 : at least one Gaussian of cspc exceeds widthlimit
1022 1023 5 : zero out of three cspc Gaussian fits converged
1023 1024 6 : phase slope fit could not be found
1024 1025 7 : arrays used to fit phase have different length
1025 1026 8 : frequency range is either too short (len <= 5) or very long (> 30% of cspc)
1026 1027
1027 1028 """
1028 1029
1029 1030 error_code = 0
1030 1031
1031 1032 nChan = spc.shape[0]
1032 1033 nProf = spc.shape[1]
1033 1034 nPair = cspc.shape[0]
1034 1035
1035 1036 SPC_Samples = numpy.zeros([nChan, nProf]) # for normalized spc values for one height
1036 1037 CSPC_Samples = numpy.zeros([nPair, nProf], dtype=numpy.complex_) # for normalized cspc values
1037 1038 phase = numpy.zeros([nPair, nProf]) # phase between channels
1038 1039 PhaseSlope = numpy.zeros(nPair) # slope of the phases, channelwise
1039 1040 PhaseInter = numpy.zeros(nPair) # intercept to the slope of the phases, channelwise
1040 1041 xFrec = AbbsisaRange[0][:-1] # frequency range
1041 1042 xVel = AbbsisaRange[2][:-1] # velocity range
1042 1043 xSamples = xFrec # the frequency range is taken
1043 1044 delta_x = xSamples[1] - xSamples[0] # delta_f or delta_x
1044 1045
1045 1046 # only consider velocities with in NegativeLimit and PositiveLimit
1046 1047 if (NegativeLimit is None):
1047 1048 NegativeLimit = numpy.min(xVel)
1048 1049 if (PositiveLimit is None):
1049 1050 PositiveLimit = numpy.max(xVel)
1050 1051 xvalid = numpy.where((xVel > NegativeLimit) & (xVel < PositiveLimit))
1051 1052 xSamples_zoom = xSamples[xvalid]
1052 1053
1053 1054 '''Getting Eij and Nij'''
1054 1055 Xi01, Xi02, Xi12 = ChanDist[:,0]
1055 1056 Eta01, Eta02, Eta12 = ChanDist[:,1]
1056 1057
1057 1058 # spwd limit - updated by D. ScipiΓ³n 30.03.2021
1058 1059 widthlimit = 10
1059 1060 '''************************* SPC is normalized ********************************'''
1060 1061 spc_norm = spc.copy()
1061 1062 # For each channel
1062 1063 for i in range(nChan):
1063 1064 spc_sub = spc_norm[i,:] - noise[i] # only the signal power
1064 1065 SPC_Samples[i] = spc_sub / (numpy.nansum(spc_sub) * delta_x)
1065 1066
1066 1067 '''********************** FITTING MEAN SPC GAUSSIAN **********************'''
1067 1068
1068 1069 """ the gaussian of the mean: first subtract noise, then normalize. this is legal because
1069 1070 you only fit the curve and don't need the absolute value of height for calculation,
1070 1071 only for estimation of width. for normalization of cross spectra, you need initial,
1071 1072 unnormalized self-spectra With noise.
1072 1073
1073 1074 Technically, you don't even need to normalize the self-spectra, as you only need the
1074 1075 width of the peak. However, it was left this way. Note that the normalization has a flaw:
1075 1076 due to subtraction of the noise, some values are below zero. Raw "spc" values should be
1076 1077 >= 0, as it is the modulus squared of the signals (complex * it's conjugate)
1077 1078 """
1078 1079 # initial conditions
1079 1080 popt = [1e-10,0,1e-10]
1080 1081 # Spectra average
1081 1082 SPCMean = numpy.average(SPC_Samples,0)
1082 1083 # Moments in frequency
1083 1084 SPCMoments = self.Moments(SPCMean[xvalid], xSamples_zoom)
1084 1085
1085 1086 # Gauss Fit SPC in frequency domain
1086 1087 if dbSNR > SNRlimit: # only if SNR > SNRth
1087 1088 try:
1088 1089 popt,pcov = curve_fit(self.gaus,xSamples_zoom,SPCMean[xvalid],p0=SPCMoments)
1089 1090 if popt[2] <= 0 or popt[2] > widthlimit: # CONDITION
1090 1091 return self.StopWindEstimation(error_code = 1)
1091 1092 FitGauss = self.gaus(xSamples_zoom,*popt)
1092 1093 except :#RuntimeError:
1093 1094 return self.StopWindEstimation(error_code = 2)
1094 1095 else:
1095 1096 return self.StopWindEstimation(error_code = 3)
1096 1097
1097 1098 '''***************************** CSPC Normalization *************************
1098 1099 The Spc spectra are used to normalize the crossspectra. Peaks from precipitation
1099 1100 influence the norm which is not desired. First, a range is identified where the
1100 1101 wind peak is estimated -> sum_wind is sum of those frequencies. Next, the area
1101 1102 around it gets cut off and values replaced by mean determined by the boundary
1102 1103 data -> sum_noise (spc is not normalized here, thats why the noise is important)
1103 1104
1104 1105 The sums are then added and multiplied by range/datapoints, because you need
1105 1106 an integral and not a sum for normalization.
1106 1107
1107 1108 A norm is found according to Briggs 92.
1108 1109 '''
1109 1110 # for each pair
1110 1111 for i in range(nPair):
1111 1112 cspc_norm = cspc[i,:].copy()
1112 1113 chan_index0 = pairsList[i][0]
1113 1114 chan_index1 = pairsList[i][1]
1114 1115 CSPC_Samples[i] = cspc_norm / (numpy.sqrt(numpy.nansum(spc_norm[chan_index0])*numpy.nansum(spc_norm[chan_index1])) * delta_x)
1115 1116 phase[i] = numpy.arctan2(CSPC_Samples[i].imag, CSPC_Samples[i].real)
1116 1117
1117 1118 CSPCmoments = numpy.vstack([self.Moments(numpy.abs(CSPC_Samples[0,xvalid]), xSamples_zoom),
1118 1119 self.Moments(numpy.abs(CSPC_Samples[1,xvalid]), xSamples_zoom),
1119 1120 self.Moments(numpy.abs(CSPC_Samples[2,xvalid]), xSamples_zoom)])
1120 1121
1121 1122 popt01, popt02, popt12 = [1e-10,0,1e-10], [1e-10,0,1e-10] ,[1e-10,0,1e-10]
1122 1123 FitGauss01, FitGauss02, FitGauss12 = numpy.zeros(len(xSamples)), numpy.zeros(len(xSamples)), numpy.zeros(len(xSamples))
1123 1124
1124 1125 '''*******************************FIT GAUSS CSPC************************************'''
1125 1126 try:
1126 1127 popt01,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[0][xvalid]),p0=CSPCmoments[0])
1127 1128 if popt01[2] > widthlimit: # CONDITION
1128 1129 return self.StopWindEstimation(error_code = 4)
1129 1130 popt02,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[1][xvalid]),p0=CSPCmoments[1])
1130 1131 if popt02[2] > widthlimit: # CONDITION
1131 1132 return self.StopWindEstimation(error_code = 4)
1132 1133 popt12,pcov = curve_fit(self.gaus,xSamples_zoom,numpy.abs(CSPC_Samples[2][xvalid]),p0=CSPCmoments[2])
1133 1134 if popt12[2] > widthlimit: # CONDITION
1134 1135 return self.StopWindEstimation(error_code = 4)
1135 1136
1136 1137 FitGauss01 = self.gaus(xSamples_zoom, *popt01)
1137 1138 FitGauss02 = self.gaus(xSamples_zoom, *popt02)
1138 1139 FitGauss12 = self.gaus(xSamples_zoom, *popt12)
1139 1140 except:
1140 1141 return self.StopWindEstimation(error_code = 5)
1141 1142
1142 1143
1143 1144 '''************* Getting Fij ***************'''
1144 1145 # x-axis point of the gaussian where the center is located from GaussFit of spectra
1145 1146 GaussCenter = popt[1]
1146 1147 ClosestCenter = xSamples_zoom[numpy.abs(xSamples_zoom-GaussCenter).argmin()]
1147 1148 PointGauCenter = numpy.where(xSamples_zoom==ClosestCenter)[0][0]
1148 1149
1149 1150 # Point where e^-1 is located in the gaussian
1150 1151 PeMinus1 = numpy.max(FitGauss) * numpy.exp(-1)
1151 1152 FijClosest = FitGauss[numpy.abs(FitGauss-PeMinus1).argmin()] # The closest point to"Peminus1" in "FitGauss"
1152 1153 PointFij = numpy.where(FitGauss==FijClosest)[0][0]
1153 1154 Fij = numpy.abs(xSamples_zoom[PointFij] - xSamples_zoom[PointGauCenter])
1154 1155
1155 1156 '''********** Taking frequency ranges from mean SPCs **********'''
1156 1157 GauWidth = popt[2] * 3/2 # Bandwidth of Gau01
1157 1158 Range = numpy.empty(2)
1158 1159 Range[0] = GaussCenter - GauWidth
1159 1160 Range[1] = GaussCenter + GauWidth
1160 1161 # Point in x-axis where the bandwidth is located (min:max)
1161 1162 ClosRangeMin = xSamples_zoom[numpy.abs(xSamples_zoom-Range[0]).argmin()]
1162 1163 ClosRangeMax = xSamples_zoom[numpy.abs(xSamples_zoom-Range[1]).argmin()]
1163 1164 PointRangeMin = numpy.where(xSamples_zoom==ClosRangeMin)[0][0]
1164 1165 PointRangeMax = numpy.where(xSamples_zoom==ClosRangeMax)[0][0]
1165 1166 Range = numpy.array([ PointRangeMin, PointRangeMax ])
1166 1167 FrecRange = xSamples_zoom[ Range[0] : Range[1] ]
1167 1168
1168 1169 '''************************** Getting Phase Slope ***************************'''
1169 1170 for i in range(nPair):
1170 1171 if len(FrecRange) > 5:
1171 1172 PhaseRange = phase[i, xvalid[0][Range[0]:Range[1]]].copy()
1172 1173 mask = ~numpy.isnan(FrecRange) & ~numpy.isnan(PhaseRange)
1173 1174 if len(FrecRange) == len(PhaseRange):
1174 1175 try:
1175 1176 slope, intercept, _, _, _ = stats.linregress(FrecRange[mask], self.AntiAliasing(PhaseRange[mask], 4.5))
1176 1177 PhaseSlope[i] = slope
1177 1178 PhaseInter[i] = intercept
1178 1179 except:
1179 1180 return self.StopWindEstimation(error_code = 6)
1180 1181 else:
1181 1182 return self.StopWindEstimation(error_code = 7)
1182 1183 else:
1183 1184 return self.StopWindEstimation(error_code = 8)
1184 1185
1185 1186 '''*** Constants A-H correspond to the convention as in Briggs and Vincent 1992 ***'''
1186 1187
1187 1188 '''Getting constant C'''
1188 1189 cC=(Fij*numpy.pi)**2
1189 1190
1190 1191 '''****** Getting constants F and G ******'''
1191 1192 MijEijNij = numpy.array([[Xi02,Eta02], [Xi12,Eta12]])
1192 1193 # MijEijNij = numpy.array([[Xi01,Eta01], [Xi02,Eta02], [Xi12,Eta12]])
1193 1194 # MijResult0 = (-PhaseSlope[0] * cC) / (2*numpy.pi)
1194 1195 MijResult1 = (-PhaseSlope[1] * cC) / (2*numpy.pi)
1195 1196 MijResult2 = (-PhaseSlope[2] * cC) / (2*numpy.pi)
1196 1197 # MijResults = numpy.array([MijResult0, MijResult1, MijResult2])
1197 1198 MijResults = numpy.array([MijResult1, MijResult2])
1198 1199 (cF,cG) = numpy.linalg.solve(MijEijNij, MijResults)
1199 1200
1200 1201 '''****** Getting constants A, B and H ******'''
1201 1202 W01 = numpy.nanmax( FitGauss01 )
1202 1203 W02 = numpy.nanmax( FitGauss02 )
1203 1204 W12 = numpy.nanmax( FitGauss12 )
1204 1205
1205 1206 WijResult01 = ((cF * Xi01 + cG * Eta01)**2)/cC - numpy.log(W01 / numpy.sqrt(numpy.pi / cC))
1206 1207 WijResult02 = ((cF * Xi02 + cG * Eta02)**2)/cC - numpy.log(W02 / numpy.sqrt(numpy.pi / cC))
1207 1208 WijResult12 = ((cF * Xi12 + cG * Eta12)**2)/cC - numpy.log(W12 / numpy.sqrt(numpy.pi / cC))
1208 1209 WijResults = numpy.array([WijResult01, WijResult02, WijResult12])
1209 1210
1210 1211 WijEijNij = numpy.array([ [Xi01**2, Eta01**2, 2*Xi01*Eta01] , [Xi02**2, Eta02**2, 2*Xi02*Eta02] , [Xi12**2, Eta12**2, 2*Xi12*Eta12] ])
1211 1212 (cA,cB,cH) = numpy.linalg.solve(WijEijNij, WijResults)
1212 1213
1213 1214 VxVy = numpy.array([[cA,cH],[cH,cB]])
1214 1215 VxVyResults = numpy.array([-cF,-cG])
1215 1216 (Vmer,Vzon) = numpy.linalg.solve(VxVy, VxVyResults)
1216 1217 Vver = -SPCMoments[1]*SPEED_OF_LIGHT/(2*radfreq)
1217 1218 error_code = 0
1218 1219
1219 1220 return Vzon, Vmer, Vver, error_code
1220 1221
1221 1222 class SpectralMoments(Operation):
1222 1223
1223 1224 '''
1224 1225 Function SpectralMoments()
1225 1226
1226 1227 Calculates moments (power, mean, standard deviation) and SNR of the signal
1227 1228
1228 1229 Type of dataIn: Spectra
1229 1230
1230 1231 Configuration Parameters:
1231 1232
1232 1233 dirCosx : Cosine director in X axis
1233 1234 dirCosy : Cosine director in Y axis
1234 1235
1235 1236 elevation :
1236 1237 azimuth :
1237 1238
1238 1239 Input:
1239 1240 channelList : simple channel list to select e.g. [2,3,7]
1240 1241 self.dataOut.data_pre : Spectral data
1241 1242 self.dataOut.abscissaList : List of frequencies
1242 1243 self.dataOut.noise : Noise level per channel
1243 1244
1244 1245 Affected:
1245 1246 self.dataOut.moments : Parameters per channel
1246 1247 self.dataOut.data_snr : SNR per channel
1247 1248
1248 1249 '''
1249 1250
1250 1251 def run(self, dataOut):
1251 1252
1252 1253 data = dataOut.data_pre[0]
1253 1254 absc = dataOut.abscissaList[:-1]
1254 1255 noise = dataOut.noise
1255 1256 nChannel = data.shape[0]
1256 1257 data_param = numpy.zeros((nChannel, 4, data.shape[2]))
1257 1258
1258 1259 for ind in range(nChannel):
1259 1260 data_param[ind,:,:] = self.__calculateMoments( data[ind,:,:] , absc , noise[ind] )
1260 1261
1261 1262 dataOut.moments = data_param[:,1:,:]
1262 1263 dataOut.data_snr = data_param[:,0]
1263 1264 dataOut.data_pow = data_param[:,1]
1264 1265 dataOut.data_dop = data_param[:,2]
1265 1266 dataOut.data_width = data_param[:,3]
1266 1267 return dataOut
1267 1268
1268 1269 def __calculateMoments(self, oldspec, oldfreq, n0,
1269 1270 nicoh = None, graph = None, smooth = None, type1 = None, fwindow = None, snrth = None, dc = None, aliasing = None, oldfd = None, wwauto = None):
1270 1271
1271 1272 if (nicoh is None): nicoh = 1
1272 1273 if (graph is None): graph = 0
1273 1274 if (smooth is None): smooth = 0
1274 1275 elif (self.smooth < 3): smooth = 0
1275 1276
1276 1277 if (type1 is None): type1 = 0
1277 1278 if (fwindow is None): fwindow = numpy.zeros(oldfreq.size) + 1
1278 1279 if (snrth is None): snrth = -3
1279 1280 if (dc is None): dc = 0
1280 1281 if (aliasing is None): aliasing = 0
1281 1282 if (oldfd is None): oldfd = 0
1282 1283 if (wwauto is None): wwauto = 0
1283 1284
1284 1285 if (n0 < 1.e-20): n0 = 1.e-20
1285 1286
1286 1287 freq = oldfreq
1287 1288 vec_power = numpy.zeros(oldspec.shape[1])
1288 1289 vec_fd = numpy.zeros(oldspec.shape[1])
1289 1290 vec_w = numpy.zeros(oldspec.shape[1])
1290 1291 vec_snr = numpy.zeros(oldspec.shape[1])
1291 1292
1292 1293 # oldspec = numpy.ma.masked_invalid(oldspec)
1293 1294 for ind in range(oldspec.shape[1]):
1294 1295
1295 1296 spec = oldspec[:,ind]
1296 1297 aux = spec*fwindow
1297 1298 max_spec = aux.max()
1298 1299 m = aux.tolist().index(max_spec)
1299 1300
1300 1301 # Smooth
1301 1302 if (smooth == 0):
1302 1303 spec2 = spec
1303 1304 else:
1304 1305 spec2 = scipy.ndimage.filters.uniform_filter1d(spec,size=smooth)
1305 1306
1306 1307 # Moments Estimation
1307 1308 bb = spec2[numpy.arange(m,spec2.size)]
1308 1309 bb = (bb<n0).nonzero()
1309 1310 bb = bb[0]
1310 1311
1311 1312 ss = spec2[numpy.arange(0,m + 1)]
1312 1313 ss = (ss<n0).nonzero()
1313 1314 ss = ss[0]
1314 1315
1315 1316 if (bb.size == 0):
1316 1317 bb0 = spec.size - 1 - m
1317 1318 else:
1318 1319 bb0 = bb[0] - 1
1319 1320 if (bb0 < 0):
1320 1321 bb0 = 0
1321 1322
1322 1323 if (ss.size == 0):
1323 1324 ss1 = 1
1324 1325 else:
1325 1326 ss1 = max(ss) + 1
1326 1327
1327 1328 if (ss1 > m):
1328 1329 ss1 = m
1329 1330
1330 1331 #valid = numpy.arange(int(m + bb0 - ss1 + 1)) + ss1
1331 1332 valid = numpy.arange(1,oldspec.shape[0])# valid perfil completo igual pulsepair
1332 1333 signal_power = ((spec2[valid] - n0) * fwindow[valid]).mean() # D. ScipiΓ³n added with correct definition
1333 1334 total_power = (spec2[valid] * fwindow[valid]).mean() # D. ScipiΓ³n added with correct definition
1334 1335 power = ((spec2[valid] - n0) * fwindow[valid]).sum()
1335 1336 fd = ((spec2[valid]- n0)*freq[valid] * fwindow[valid]).sum() / power
1336 1337 w = numpy.sqrt(((spec2[valid] - n0)*fwindow[valid]*(freq[valid]- fd)**2).sum() / power)
1337 1338 snr = (spec2.mean()-n0)/n0
1338 1339 if (snr < 1.e-20) :
1339 1340 snr = 1.e-20
1340 1341
1341 1342 # vec_power[ind] = power #D. ScipiΓ³n replaced with the line below
1342 1343 vec_power[ind] = total_power
1343 1344 vec_fd[ind] = fd
1344 1345 vec_w[ind] = w
1345 1346 vec_snr[ind] = snr
1346 1347
1347 1348 return numpy.vstack((vec_snr, vec_power, vec_fd, vec_w))
1348 1349
1349 1350 #------------------ Get SA Parameters --------------------------
1350 1351
1351 1352 def GetSAParameters(self):
1352 1353 #SA en frecuencia
1353 1354 pairslist = self.dataOut.groupList
1354 1355 num_pairs = len(pairslist)
1355 1356
1356 1357 vel = self.dataOut.abscissaList
1357 1358 spectra = self.dataOut.data_pre
1358 1359 cspectra = self.dataIn.data_cspc
1359 1360 delta_v = vel[1] - vel[0]
1360 1361
1361 1362 #Calculating the power spectrum
1362 1363 spc_pow = numpy.sum(spectra, 3)*delta_v
1363 1364 #Normalizing Spectra
1364 1365 norm_spectra = spectra/spc_pow
1365 1366 #Calculating the norm_spectra at peak
1366 1367 max_spectra = numpy.max(norm_spectra, 3)
1367 1368
1368 1369 #Normalizing Cross Spectra
1369 1370 norm_cspectra = numpy.zeros(cspectra.shape)
1370 1371
1371 1372 for i in range(num_chan):
1372 1373 norm_cspectra[i,:,:] = cspectra[i,:,:]/numpy.sqrt(spc_pow[pairslist[i][0],:]*spc_pow[pairslist[i][1],:])
1373 1374
1374 1375 max_cspectra = numpy.max(norm_cspectra,2)
1375 1376 max_cspectra_index = numpy.argmax(norm_cspectra, 2)
1376 1377
1377 1378 for i in range(num_pairs):
1378 1379 cspc_par[i,:,:] = __calculateMoments(norm_cspectra)
1379 1380 #------------------- Get Lags ----------------------------------
1380 1381
1381 1382 class SALags(Operation):
1382 1383 '''
1383 1384 Function GetMoments()
1384 1385
1385 1386 Input:
1386 1387 self.dataOut.data_pre
1387 1388 self.dataOut.abscissaList
1388 1389 self.dataOut.noise
1389 1390 self.dataOut.normFactor
1390 1391 self.dataOut.data_snr
1391 1392 self.dataOut.groupList
1392 1393 self.dataOut.nChannels
1393 1394
1394 1395 Affected:
1395 1396 self.dataOut.data_param
1396 1397
1397 1398 '''
1398 1399 def run(self, dataOut):
1399 1400 data_acf = dataOut.data_pre[0]
1400 1401 data_ccf = dataOut.data_pre[1]
1401 1402 normFactor_acf = dataOut.normFactor[0]
1402 1403 normFactor_ccf = dataOut.normFactor[1]
1403 1404 pairs_acf = dataOut.groupList[0]
1404 1405 pairs_ccf = dataOut.groupList[1]
1405 1406
1406 1407 nHeights = dataOut.nHeights
1407 1408 absc = dataOut.abscissaList
1408 1409 noise = dataOut.noise
1409 1410 SNR = dataOut.data_snr
1410 1411 nChannels = dataOut.nChannels
1411 1412 # pairsList = dataOut.groupList
1412 1413 # pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairsList, nChannels)
1413 1414
1414 1415 for l in range(len(pairs_acf)):
1415 1416 data_acf[l,:,:] = data_acf[l,:,:]/normFactor_acf[l,:]
1416 1417
1417 1418 for l in range(len(pairs_ccf)):
1418 1419 data_ccf[l,:,:] = data_ccf[l,:,:]/normFactor_ccf[l,:]
1419 1420
1420 1421 dataOut.data_param = numpy.zeros((len(pairs_ccf)*2 + 1, nHeights))
1421 1422 dataOut.data_param[:-1,:] = self.__calculateTaus(data_acf, data_ccf, absc)
1422 1423 dataOut.data_param[-1,:] = self.__calculateLag1Phase(data_acf, absc)
1423 1424 return
1424 1425
1425 1426 # def __getPairsAutoCorr(self, pairsList, nChannels):
1426 1427 #
1427 1428 # pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan
1428 1429 #
1429 1430 # for l in range(len(pairsList)):
1430 1431 # firstChannel = pairsList[l][0]
1431 1432 # secondChannel = pairsList[l][1]
1432 1433 #
1433 1434 # #Obteniendo pares de Autocorrelacion
1434 1435 # if firstChannel == secondChannel:
1435 1436 # pairsAutoCorr[firstChannel] = int(l)
1436 1437 #
1437 1438 # pairsAutoCorr = pairsAutoCorr.astype(int)
1438 1439 #
1439 1440 # pairsCrossCorr = range(len(pairsList))
1440 1441 # pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr)
1441 1442 #
1442 1443 # return pairsAutoCorr, pairsCrossCorr
1443 1444
1444 1445 def __calculateTaus(self, data_acf, data_ccf, lagRange):
1445 1446
1446 1447 lag0 = data_acf.shape[1]/2
1447 1448 #Funcion de Autocorrelacion
1448 1449 mean_acf = stats.nanmean(data_acf, axis = 0)
1449 1450
1450 1451 #Obtencion Indice de TauCross
1451 1452 ind_ccf = data_ccf.argmax(axis = 1)
1452 1453 #Obtencion Indice de TauAuto
1453 1454 ind_acf = numpy.zeros(ind_ccf.shape,dtype = 'int')
1454 1455 ccf_lag0 = data_ccf[:,lag0,:]
1455 1456
1456 1457 for i in range(ccf_lag0.shape[0]):
1457 1458 ind_acf[i,:] = numpy.abs(mean_acf - ccf_lag0[i,:]).argmin(axis = 0)
1458 1459
1459 1460 #Obtencion de TauCross y TauAuto
1460 1461 tau_ccf = lagRange[ind_ccf]
1461 1462 tau_acf = lagRange[ind_acf]
1462 1463
1463 1464 Nan1, Nan2 = numpy.where(tau_ccf == lagRange[0])
1464 1465
1465 1466 tau_ccf[Nan1,Nan2] = numpy.nan
1466 1467 tau_acf[Nan1,Nan2] = numpy.nan
1467 1468 tau = numpy.vstack((tau_ccf,tau_acf))
1468 1469
1469 1470 return tau
1470 1471
1471 1472 def __calculateLag1Phase(self, data, lagTRange):
1472 1473 data1 = stats.nanmean(data, axis = 0)
1473 1474 lag1 = numpy.where(lagTRange == 0)[0][0] + 1
1474 1475
1475 1476 phase = numpy.angle(data1[lag1,:])
1476 1477
1477 1478 return phase
1478 1479
1479 1480 class SpectralFitting(Operation):
1480 1481 '''
1481 1482 Function GetMoments()
1482 1483
1483 1484 Input:
1484 1485 Output:
1485 1486 Variables modified:
1486 1487 '''
1487 1488
1488 1489 def run(self, dataOut, getSNR = True, path=None, file=None, groupList=None):
1489 1490
1490 1491
1491 1492 if path != None:
1492 1493 sys.path.append(path)
1493 1494 self.dataOut.library = importlib.import_module(file)
1494 1495
1495 1496 #To be inserted as a parameter
1496 1497 groupArray = numpy.array(groupList)
1497 1498 # groupArray = numpy.array([[0,1],[2,3]])
1498 1499 self.dataOut.groupList = groupArray
1499 1500
1500 1501 nGroups = groupArray.shape[0]
1501 1502 nChannels = self.dataIn.nChannels
1502 1503 nHeights=self.dataIn.heightList.size
1503 1504
1504 1505 #Parameters Array
1505 1506 self.dataOut.data_param = None
1506 1507
1507 1508 #Set constants
1508 1509 constants = self.dataOut.library.setConstants(self.dataIn)
1509 1510 self.dataOut.constants = constants
1510 1511 M = self.dataIn.normFactor
1511 1512 N = self.dataIn.nFFTPoints
1512 1513 ippSeconds = self.dataIn.ippSeconds
1513 1514 K = self.dataIn.nIncohInt
1514 1515 pairsArray = numpy.array(self.dataIn.pairsList)
1515 1516
1516 1517 #List of possible combinations
1517 1518 listComb = itertools.combinations(numpy.arange(groupArray.shape[1]),2)
1518 1519 indCross = numpy.zeros(len(list(listComb)), dtype = 'int')
1519 1520
1520 1521 if getSNR:
1521 1522 listChannels = groupArray.reshape((groupArray.size))
1522 1523 listChannels.sort()
1523 1524 noise = self.dataIn.getNoise()
1524 1525 self.dataOut.data_snr = self.__getSNR(self.dataIn.data_spc[listChannels,:,:], noise[listChannels])
1525 1526
1526 1527 for i in range(nGroups):
1527 1528 coord = groupArray[i,:]
1528 1529
1529 1530 #Input data array
1530 1531 data = self.dataIn.data_spc[coord,:,:]/(M*N)
1531 1532 data = data.reshape((data.shape[0]*data.shape[1],data.shape[2]))
1532 1533
1533 1534 #Cross Spectra data array for Covariance Matrixes
1534 1535 ind = 0
1535 1536 for pairs in listComb:
1536 1537 pairsSel = numpy.array([coord[x],coord[y]])
1537 1538 indCross[ind] = int(numpy.where(numpy.all(pairsArray == pairsSel, axis = 1))[0][0])
1538 1539 ind += 1
1539 1540 dataCross = self.dataIn.data_cspc[indCross,:,:]/(M*N)
1540 1541 dataCross = dataCross**2/K
1541 1542
1542 1543 for h in range(nHeights):
1543 1544
1544 1545 #Input
1545 1546 d = data[:,h]
1546 1547
1547 1548 #Covariance Matrix
1548 1549 D = numpy.diag(d**2/K)
1549 1550 ind = 0
1550 1551 for pairs in listComb:
1551 1552 #Coordinates in Covariance Matrix
1552 1553 x = pairs[0]
1553 1554 y = pairs[1]
1554 1555 #Channel Index
1555 1556 S12 = dataCross[ind,:,h]
1556 1557 D12 = numpy.diag(S12)
1557 1558 #Completing Covariance Matrix with Cross Spectras
1558 1559 D[x*N:(x+1)*N,y*N:(y+1)*N] = D12
1559 1560 D[y*N:(y+1)*N,x*N:(x+1)*N] = D12
1560 1561 ind += 1
1561 1562 Dinv=numpy.linalg.inv(D)
1562 1563 L=numpy.linalg.cholesky(Dinv)
1563 1564 LT=L.T
1564 1565
1565 1566 dp = numpy.dot(LT,d)
1566 1567
1567 1568 #Initial values
1568 1569 data_spc = self.dataIn.data_spc[coord,:,h]
1569 1570
1570 1571 if (h>0)and(error1[3]<5):
1571 1572 p0 = self.dataOut.data_param[i,:,h-1]
1572 1573 else:
1573 1574 p0 = numpy.array(self.dataOut.library.initialValuesFunction(data_spc, constants, i))
1574 1575
1575 1576 try:
1576 1577 #Least Squares
1577 1578 minp,covp,infodict,mesg,ier = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants),full_output=True)
1578 1579 # minp,covp = optimize.leastsq(self.__residFunction,p0,args=(dp,LT,constants))
1579 1580 #Chi square error
1580 1581 error0 = numpy.sum(infodict['fvec']**2)/(2*N)
1581 1582 #Error with Jacobian
1582 1583 error1 = self.dataOut.library.errorFunction(minp,constants,LT)
1583 1584 except:
1584 1585 minp = p0*numpy.nan
1585 1586 error0 = numpy.nan
1586 1587 error1 = p0*numpy.nan
1587 1588
1588 1589 #Save
1589 1590 if self.dataOut.data_param is None:
1590 1591 self.dataOut.data_param = numpy.zeros((nGroups, p0.size, nHeights))*numpy.nan
1591 1592 self.dataOut.data_error = numpy.zeros((nGroups, p0.size + 1, nHeights))*numpy.nan
1592 1593
1593 1594 self.dataOut.data_error[i,:,h] = numpy.hstack((error0,error1))
1594 1595 self.dataOut.data_param[i,:,h] = minp
1595 1596 return
1596 1597
1597 1598 def __residFunction(self, p, dp, LT, constants):
1598 1599
1599 1600 fm = self.dataOut.library.modelFunction(p, constants)
1600 1601 fmp=numpy.dot(LT,fm)
1601 1602
1602 1603 return dp-fmp
1603 1604
1604 1605 def __getSNR(self, z, noise):
1605 1606
1606 1607 avg = numpy.average(z, axis=1)
1607 1608 SNR = (avg.T-noise)/noise
1608 1609 SNR = SNR.T
1609 1610 return SNR
1610 1611
1611 1612 def __chisq(p,chindex,hindex):
1612 1613 #similar to Resid but calculates CHI**2
1613 1614 [LT,d,fm]=setupLTdfm(p,chindex,hindex)
1614 1615 dp=numpy.dot(LT,d)
1615 1616 fmp=numpy.dot(LT,fm)
1616 1617 chisq=numpy.dot((dp-fmp).T,(dp-fmp))
1617 1618 return chisq
1618 1619
1619 1620 class WindProfiler(Operation):
1620 1621
1621 1622 __isConfig = False
1622 1623
1623 1624 __initime = None
1624 1625 __lastdatatime = None
1625 1626 __integrationtime = None
1626 1627
1627 1628 __buffer = None
1628 1629
1629 1630 __dataReady = False
1630 1631
1631 1632 __firstdata = None
1632 1633
1633 1634 n = None
1634 1635
1635 1636 def __init__(self):
1636 1637 Operation.__init__(self)
1637 1638
1638 1639 def __calculateCosDir(self, elev, azim):
1639 1640 zen = (90 - elev)*numpy.pi/180
1640 1641 azim = azim*numpy.pi/180
1641 1642 cosDirX = numpy.sqrt((1-numpy.cos(zen)**2)/((1+numpy.tan(azim)**2)))
1642 1643 cosDirY = numpy.sqrt(1-numpy.cos(zen)**2-cosDirX**2)
1643 1644
1644 1645 signX = numpy.sign(numpy.cos(azim))
1645 1646 signY = numpy.sign(numpy.sin(azim))
1646 1647
1647 1648 cosDirX = numpy.copysign(cosDirX, signX)
1648 1649 cosDirY = numpy.copysign(cosDirY, signY)
1649 1650 return cosDirX, cosDirY
1650 1651
1651 1652 def __calculateAngles(self, theta_x, theta_y, azimuth):
1652 1653
1653 1654 dir_cosw = numpy.sqrt(1-theta_x**2-theta_y**2)
1654 1655 zenith_arr = numpy.arccos(dir_cosw)
1655 1656 azimuth_arr = numpy.arctan2(theta_x,theta_y) + azimuth*math.pi/180
1656 1657
1657 1658 dir_cosu = numpy.sin(azimuth_arr)*numpy.sin(zenith_arr)
1658 1659 dir_cosv = numpy.cos(azimuth_arr)*numpy.sin(zenith_arr)
1659 1660
1660 1661 return azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw
1661 1662
1662 1663 def __calculateMatA(self, dir_cosu, dir_cosv, dir_cosw, horOnly):
1663 1664
1664 1665 #
1665 1666 if horOnly:
1666 1667 A = numpy.c_[dir_cosu,dir_cosv]
1667 1668 else:
1668 1669 A = numpy.c_[dir_cosu,dir_cosv,dir_cosw]
1669 1670 A = numpy.asmatrix(A)
1670 1671 A1 = numpy.linalg.inv(A.transpose()*A)*A.transpose()
1671 1672
1672 1673 return A1
1673 1674
1674 1675 def __correctValues(self, heiRang, phi, velRadial, SNR):
1675 1676 listPhi = phi.tolist()
1676 1677 maxid = listPhi.index(max(listPhi))
1677 1678 minid = listPhi.index(min(listPhi))
1678 1679
1679 1680 rango = list(range(len(phi)))
1680 1681 # rango = numpy.delete(rango,maxid)
1681 1682
1682 1683 heiRang1 = heiRang*math.cos(phi[maxid])
1683 1684 heiRangAux = heiRang*math.cos(phi[minid])
1684 1685 indOut = (heiRang1 < heiRangAux[0]).nonzero()
1685 1686 heiRang1 = numpy.delete(heiRang1,indOut)
1686 1687
1687 1688 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
1688 1689 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
1689 1690
1690 1691 for i in rango:
1691 1692 x = heiRang*math.cos(phi[i])
1692 1693 y1 = velRadial[i,:]
1693 1694 f1 = interpolate.interp1d(x,y1,kind = 'cubic')
1694 1695
1695 1696 x1 = heiRang1
1696 1697 y11 = f1(x1)
1697 1698
1698 1699 y2 = SNR[i,:]
1699 1700 f2 = interpolate.interp1d(x,y2,kind = 'cubic')
1700 1701 y21 = f2(x1)
1701 1702
1702 1703 velRadial1[i,:] = y11
1703 1704 SNR1[i,:] = y21
1704 1705
1705 1706 return heiRang1, velRadial1, SNR1
1706 1707
1707 1708 def __calculateVelUVW(self, A, velRadial):
1708 1709
1709 1710 #Operacion Matricial
1710 1711 # velUVW = numpy.zeros((velRadial.shape[1],3))
1711 1712 # for ind in range(velRadial.shape[1]):
1712 1713 # velUVW[ind,:] = numpy.dot(A,velRadial[:,ind])
1713 1714 # velUVW = velUVW.transpose()
1714 1715 velUVW = numpy.zeros((A.shape[0],velRadial.shape[1]))
1715 1716 velUVW[:,:] = numpy.dot(A,velRadial)
1716 1717
1717 1718
1718 1719 return velUVW
1719 1720
1720 1721 # def techniqueDBS(self, velRadial0, dirCosx, disrCosy, azimuth, correct, horizontalOnly, heiRang, SNR0):
1721 1722
1722 1723 def techniqueDBS(self, kwargs):
1723 1724 """
1724 1725 Function that implements Doppler Beam Swinging (DBS) technique.
1725 1726
1726 1727 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
1727 1728 Direction correction (if necessary), Ranges and SNR
1728 1729
1729 1730 Output: Winds estimation (Zonal, Meridional and Vertical)
1730 1731
1731 1732 Parameters affected: Winds, height range, SNR
1732 1733 """
1733 1734 velRadial0 = kwargs['velRadial']
1734 1735 heiRang = kwargs['heightList']
1735 1736 SNR0 = kwargs['SNR']
1736 1737
1737 1738 if 'dirCosx' in kwargs and 'dirCosy' in kwargs:
1738 1739 theta_x = numpy.array(kwargs['dirCosx'])
1739 1740 theta_y = numpy.array(kwargs['dirCosy'])
1740 1741 else:
1741 1742 elev = numpy.array(kwargs['elevation'])
1742 1743 azim = numpy.array(kwargs['azimuth'])
1743 1744 theta_x, theta_y = self.__calculateCosDir(elev, azim)
1744 1745 azimuth = kwargs['correctAzimuth']
1745 1746 if 'horizontalOnly' in kwargs:
1746 1747 horizontalOnly = kwargs['horizontalOnly']
1747 1748 else: horizontalOnly = False
1748 1749 if 'correctFactor' in kwargs:
1749 1750 correctFactor = kwargs['correctFactor']
1750 1751 else: correctFactor = 1
1751 1752 if 'channelList' in kwargs:
1752 1753 channelList = kwargs['channelList']
1753 1754 if len(channelList) == 2:
1754 1755 horizontalOnly = True
1755 1756 arrayChannel = numpy.array(channelList)
1756 1757 param = param[arrayChannel,:,:]
1757 1758 theta_x = theta_x[arrayChannel]
1758 1759 theta_y = theta_y[arrayChannel]
1759 1760
1760 1761 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
1761 1762 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, zenith_arr, correctFactor*velRadial0, SNR0)
1762 1763 A = self.__calculateMatA(dir_cosu, dir_cosv, dir_cosw, horizontalOnly)
1763 1764
1764 1765 #Calculo de Componentes de la velocidad con DBS
1765 1766 winds = self.__calculateVelUVW(A,velRadial1)
1766 1767
1767 1768 return winds, heiRang1, SNR1
1768 1769
1769 1770 def __calculateDistance(self, posx, posy, pairs_ccf, azimuth = None):
1770 1771
1771 1772 nPairs = len(pairs_ccf)
1772 1773 posx = numpy.asarray(posx)
1773 1774 posy = numpy.asarray(posy)
1774 1775
1775 1776 #Rotacion Inversa para alinear con el azimuth
1776 1777 if azimuth!= None:
1777 1778 azimuth = azimuth*math.pi/180
1778 1779 posx1 = posx*math.cos(azimuth) + posy*math.sin(azimuth)
1779 1780 posy1 = -posx*math.sin(azimuth) + posy*math.cos(azimuth)
1780 1781 else:
1781 1782 posx1 = posx
1782 1783 posy1 = posy
1783 1784
1784 1785 #Calculo de Distancias
1785 1786 distx = numpy.zeros(nPairs)
1786 1787 disty = numpy.zeros(nPairs)
1787 1788 dist = numpy.zeros(nPairs)
1788 1789 ang = numpy.zeros(nPairs)
1789 1790
1790 1791 for i in range(nPairs):
1791 1792 distx[i] = posx1[pairs_ccf[i][1]] - posx1[pairs_ccf[i][0]]
1792 1793 disty[i] = posy1[pairs_ccf[i][1]] - posy1[pairs_ccf[i][0]]
1793 1794 dist[i] = numpy.sqrt(distx[i]**2 + disty[i]**2)
1794 1795 ang[i] = numpy.arctan2(disty[i],distx[i])
1795 1796
1796 1797 return distx, disty, dist, ang
1797 1798 #Calculo de Matrices
1798 1799 # nPairs = len(pairs)
1799 1800 # ang1 = numpy.zeros((nPairs, 2, 1))
1800 1801 # dist1 = numpy.zeros((nPairs, 2, 1))
1801 1802 #
1802 1803 # for j in range(nPairs):
1803 1804 # dist1[j,0,0] = dist[pairs[j][0]]
1804 1805 # dist1[j,1,0] = dist[pairs[j][1]]
1805 1806 # ang1[j,0,0] = ang[pairs[j][0]]
1806 1807 # ang1[j,1,0] = ang[pairs[j][1]]
1807 1808 #
1808 1809 # return distx,disty, dist1,ang1
1809 1810
1810 1811
1811 1812 def __calculateVelVer(self, phase, lagTRange, _lambda):
1812 1813
1813 1814 Ts = lagTRange[1] - lagTRange[0]
1814 1815 velW = -_lambda*phase/(4*math.pi*Ts)
1815 1816
1816 1817 return velW
1817 1818
1818 1819 def __calculateVelHorDir(self, dist, tau1, tau2, ang):
1819 1820 nPairs = tau1.shape[0]
1820 1821 nHeights = tau1.shape[1]
1821 1822 vel = numpy.zeros((nPairs,3,nHeights))
1822 1823 dist1 = numpy.reshape(dist, (dist.size,1))
1823 1824
1824 1825 angCos = numpy.cos(ang)
1825 1826 angSin = numpy.sin(ang)
1826 1827
1827 1828 vel0 = dist1*tau1/(2*tau2**2)
1828 1829 vel[:,0,:] = (vel0*angCos).sum(axis = 1)
1829 1830 vel[:,1,:] = (vel0*angSin).sum(axis = 1)
1830 1831
1831 1832 ind = numpy.where(numpy.isinf(vel))
1832 1833 vel[ind] = numpy.nan
1833 1834
1834 1835 return vel
1835 1836
1836 1837 # def __getPairsAutoCorr(self, pairsList, nChannels):
1837 1838 #
1838 1839 # pairsAutoCorr = numpy.zeros(nChannels, dtype = 'int')*numpy.nan
1839 1840 #
1840 1841 # for l in range(len(pairsList)):
1841 1842 # firstChannel = pairsList[l][0]
1842 1843 # secondChannel = pairsList[l][1]
1843 1844 #
1844 1845 # #Obteniendo pares de Autocorrelacion
1845 1846 # if firstChannel == secondChannel:
1846 1847 # pairsAutoCorr[firstChannel] = int(l)
1847 1848 #
1848 1849 # pairsAutoCorr = pairsAutoCorr.astype(int)
1849 1850 #
1850 1851 # pairsCrossCorr = range(len(pairsList))
1851 1852 # pairsCrossCorr = numpy.delete(pairsCrossCorr,pairsAutoCorr)
1852 1853 #
1853 1854 # return pairsAutoCorr, pairsCrossCorr
1854 1855
1855 1856 # def techniqueSA(self, pairsSelected, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, lagTRange, correctFactor):
1856 1857 def techniqueSA(self, kwargs):
1857 1858
1858 1859 """
1859 1860 Function that implements Spaced Antenna (SA) technique.
1860 1861
1861 1862 Input: Radial velocities, Direction cosines (x and y) of the Beam, Antenna azimuth,
1862 1863 Direction correction (if necessary), Ranges and SNR
1863 1864
1864 1865 Output: Winds estimation (Zonal, Meridional and Vertical)
1865 1866
1866 1867 Parameters affected: Winds
1867 1868 """
1868 1869 position_x = kwargs['positionX']
1869 1870 position_y = kwargs['positionY']
1870 1871 azimuth = kwargs['azimuth']
1871 1872
1872 1873 if 'correctFactor' in kwargs:
1873 1874 correctFactor = kwargs['correctFactor']
1874 1875 else:
1875 1876 correctFactor = 1
1876 1877
1877 1878 groupList = kwargs['groupList']
1878 1879 pairs_ccf = groupList[1]
1879 1880 tau = kwargs['tau']
1880 1881 _lambda = kwargs['_lambda']
1881 1882
1882 1883 #Cross Correlation pairs obtained
1883 1884 # pairsAutoCorr, pairsCrossCorr = self.__getPairsAutoCorr(pairssList, nChannels)
1884 1885 # pairsArray = numpy.array(pairsList)[pairsCrossCorr]
1885 1886 # pairsSelArray = numpy.array(pairsSelected)
1886 1887 # pairs = []
1887 1888 #
1888 1889 # #Wind estimation pairs obtained
1889 1890 # for i in range(pairsSelArray.shape[0]/2):
1890 1891 # ind1 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i], axis = 1))[0][0]
1891 1892 # ind2 = numpy.where(numpy.all(pairsArray == pairsSelArray[2*i + 1], axis = 1))[0][0]
1892 1893 # pairs.append((ind1,ind2))
1893 1894
1894 1895 indtau = tau.shape[0]/2
1895 1896 tau1 = tau[:indtau,:]
1896 1897 tau2 = tau[indtau:-1,:]
1897 1898 # tau1 = tau1[pairs,:]
1898 1899 # tau2 = tau2[pairs,:]
1899 1900 phase1 = tau[-1,:]
1900 1901
1901 1902 #---------------------------------------------------------------------
1902 1903 #Metodo Directo
1903 1904 distx, disty, dist, ang = self.__calculateDistance(position_x, position_y, pairs_ccf,azimuth)
1904 1905 winds = self.__calculateVelHorDir(dist, tau1, tau2, ang)
1905 1906 winds = stats.nanmean(winds, axis=0)
1906 1907 #---------------------------------------------------------------------
1907 1908 #Metodo General
1908 1909 # distx, disty, dist = self.calculateDistance(position_x,position_y,pairsCrossCorr, pairsList, azimuth)
1909 1910 # #Calculo Coeficientes de Funcion de Correlacion
1910 1911 # F,G,A,B,H = self.calculateCoef(tau1,tau2,distx,disty,n)
1911 1912 # #Calculo de Velocidades
1912 1913 # winds = self.calculateVelUV(F,G,A,B,H)
1913 1914
1914 1915 #---------------------------------------------------------------------
1915 1916 winds[2,:] = self.__calculateVelVer(phase1, lagTRange, _lambda)
1916 1917 winds = correctFactor*winds
1917 1918 return winds
1918 1919
1919 1920 def __checkTime(self, currentTime, paramInterval, outputInterval):
1920 1921
1921 1922 dataTime = currentTime + paramInterval
1922 1923 deltaTime = dataTime - self.__initime
1923 1924
1924 1925 if deltaTime >= outputInterval or deltaTime < 0:
1925 1926 self.__dataReady = True
1926 1927 return
1927 1928
1928 1929 def techniqueMeteors(self, arrayMeteor, meteorThresh, heightMin, heightMax):
1929 1930 '''
1930 1931 Function that implements winds estimation technique with detected meteors.
1931 1932
1932 1933 Input: Detected meteors, Minimum meteor quantity to wind estimation
1933 1934
1934 1935 Output: Winds estimation (Zonal and Meridional)
1935 1936
1936 1937 Parameters affected: Winds
1937 1938 '''
1938 1939 #Settings
1939 1940 nInt = (heightMax - heightMin)/2
1940 1941 nInt = int(nInt)
1941 1942 winds = numpy.zeros((2,nInt))*numpy.nan
1942 1943
1943 1944 #Filter errors
1944 1945 error = numpy.where(arrayMeteor[:,-1] == 0)[0]
1945 1946 finalMeteor = arrayMeteor[error,:]
1946 1947
1947 1948 #Meteor Histogram
1948 1949 finalHeights = finalMeteor[:,2]
1949 1950 hist = numpy.histogram(finalHeights, bins = nInt, range = (heightMin,heightMax))
1950 1951 nMeteorsPerI = hist[0]
1951 1952 heightPerI = hist[1]
1952 1953
1953 1954 #Sort of meteors
1954 1955 indSort = finalHeights.argsort()
1955 1956 finalMeteor2 = finalMeteor[indSort,:]
1956 1957
1957 1958 # Calculating winds
1958 1959 ind1 = 0
1959 1960 ind2 = 0
1960 1961
1961 1962 for i in range(nInt):
1962 1963 nMet = nMeteorsPerI[i]
1963 1964 ind1 = ind2
1964 1965 ind2 = ind1 + nMet
1965 1966
1966 1967 meteorAux = finalMeteor2[ind1:ind2,:]
1967 1968
1968 1969 if meteorAux.shape[0] >= meteorThresh:
1969 1970 vel = meteorAux[:, 6]
1970 1971 zen = meteorAux[:, 4]*numpy.pi/180
1971 1972 azim = meteorAux[:, 3]*numpy.pi/180
1972 1973
1973 1974 n = numpy.cos(zen)
1974 1975 # m = (1 - n**2)/(1 - numpy.tan(azim)**2)
1975 1976 # l = m*numpy.tan(azim)
1976 1977 l = numpy.sin(zen)*numpy.sin(azim)
1977 1978 m = numpy.sin(zen)*numpy.cos(azim)
1978 1979
1979 1980 A = numpy.vstack((l, m)).transpose()
1980 1981 A1 = numpy.dot(numpy.linalg.inv( numpy.dot(A.transpose(),A) ),A.transpose())
1981 1982 windsAux = numpy.dot(A1, vel)
1982 1983
1983 1984 winds[0,i] = windsAux[0]
1984 1985 winds[1,i] = windsAux[1]
1985 1986
1986 1987 return winds, heightPerI[:-1]
1987 1988
1988 1989 def techniqueNSM_SA(self, **kwargs):
1989 1990 metArray = kwargs['metArray']
1990 1991 heightList = kwargs['heightList']
1991 1992 timeList = kwargs['timeList']
1992 1993
1993 1994 rx_location = kwargs['rx_location']
1994 1995 groupList = kwargs['groupList']
1995 1996 azimuth = kwargs['azimuth']
1996 1997 dfactor = kwargs['dfactor']
1997 1998 k = kwargs['k']
1998 1999
1999 2000 azimuth1, dist = self.__calculateAzimuth1(rx_location, groupList, azimuth)
2000 2001 d = dist*dfactor
2001 2002 #Phase calculation
2002 2003 metArray1 = self.__getPhaseSlope(metArray, heightList, timeList)
2003 2004
2004 2005 metArray1[:,-2] = metArray1[:,-2]*metArray1[:,2]*1000/(k*d[metArray1[:,1].astype(int)]) #angles into velocities
2005 2006
2006 2007 velEst = numpy.zeros((heightList.size,2))*numpy.nan
2007 2008 azimuth1 = azimuth1*numpy.pi/180
2008 2009
2009 2010 for i in range(heightList.size):
2010 2011 h = heightList[i]
2011 2012 indH = numpy.where((metArray1[:,2] == h)&(numpy.abs(metArray1[:,-2]) < 100))[0]
2012 2013 metHeight = metArray1[indH,:]
2013 2014 if metHeight.shape[0] >= 2:
2014 2015 velAux = numpy.asmatrix(metHeight[:,-2]).T #Radial Velocities
2015 2016 iazim = metHeight[:,1].astype(int)
2016 2017 azimAux = numpy.asmatrix(azimuth1[iazim]).T #Azimuths
2017 2018 A = numpy.hstack((numpy.cos(azimAux),numpy.sin(azimAux)))
2018 2019 A = numpy.asmatrix(A)
2019 2020 A1 = numpy.linalg.pinv(A.transpose()*A)*A.transpose()
2020 2021 velHor = numpy.dot(A1,velAux)
2021 2022
2022 2023 velEst[i,:] = numpy.squeeze(velHor)
2023 2024 return velEst
2024 2025
2025 2026 def __getPhaseSlope(self, metArray, heightList, timeList):
2026 2027 meteorList = []
2027 2028 #utctime sec1 height SNR velRad ph0 ph1 ph2 coh0 coh1 coh2
2028 2029 #Putting back together the meteor matrix
2029 2030 utctime = metArray[:,0]
2030 2031 uniqueTime = numpy.unique(utctime)
2031 2032
2032 2033 phaseDerThresh = 0.5
2033 2034 ippSeconds = timeList[1] - timeList[0]
2034 2035 sec = numpy.where(timeList>1)[0][0]
2035 2036 nPairs = metArray.shape[1] - 6
2036 2037 nHeights = len(heightList)
2037 2038
2038 2039 for t in uniqueTime:
2039 2040 metArray1 = metArray[utctime==t,:]
2040 2041 # phaseDerThresh = numpy.pi/4 #reducir Phase thresh
2041 2042 tmet = metArray1[:,1].astype(int)
2042 2043 hmet = metArray1[:,2].astype(int)
2043 2044
2044 2045 metPhase = numpy.zeros((nPairs, heightList.size, timeList.size - 1))
2045 2046 metPhase[:,:] = numpy.nan
2046 2047 metPhase[:,hmet,tmet] = metArray1[:,6:].T
2047 2048
2048 2049 #Delete short trails
2049 2050 metBool = ~numpy.isnan(metPhase[0,:,:])
2050 2051 heightVect = numpy.sum(metBool, axis = 1)
2051 2052 metBool[heightVect<sec,:] = False
2052 2053 metPhase[:,heightVect<sec,:] = numpy.nan
2053 2054
2054 2055 #Derivative
2055 2056 metDer = numpy.abs(metPhase[:,:,1:] - metPhase[:,:,:-1])
2056 2057 phDerAux = numpy.dstack((numpy.full((nPairs,nHeights,1), False, dtype=bool),metDer > phaseDerThresh))
2057 2058 metPhase[phDerAux] = numpy.nan
2058 2059
2059 2060 #--------------------------METEOR DETECTION -----------------------------------------
2060 2061 indMet = numpy.where(numpy.any(metBool,axis=1))[0]
2061 2062
2062 2063 for p in numpy.arange(nPairs):
2063 2064 phase = metPhase[p,:,:]
2064 2065 phDer = metDer[p,:,:]
2065 2066
2066 2067 for h in indMet:
2067 2068 height = heightList[h]
2068 2069 phase1 = phase[h,:] #82
2069 2070 phDer1 = phDer[h,:]
2070 2071
2071 2072 phase1[~numpy.isnan(phase1)] = numpy.unwrap(phase1[~numpy.isnan(phase1)]) #Unwrap
2072 2073
2073 2074 indValid = numpy.where(~numpy.isnan(phase1))[0]
2074 2075 initMet = indValid[0]
2075 2076 endMet = 0
2076 2077
2077 2078 for i in range(len(indValid)-1):
2078 2079
2079 2080 #Time difference
2080 2081 inow = indValid[i]
2081 2082 inext = indValid[i+1]
2082 2083 idiff = inext - inow
2083 2084 #Phase difference
2084 2085 phDiff = numpy.abs(phase1[inext] - phase1[inow])
2085 2086
2086 2087 if idiff>sec or phDiff>numpy.pi/4 or inext==indValid[-1]: #End of Meteor
2087 2088 sizeTrail = inow - initMet + 1
2088 2089 if sizeTrail>3*sec: #Too short meteors
2089 2090 x = numpy.arange(initMet,inow+1)*ippSeconds
2090 2091 y = phase1[initMet:inow+1]
2091 2092 ynnan = ~numpy.isnan(y)
2092 2093 x = x[ynnan]
2093 2094 y = y[ynnan]
2094 2095 slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
2095 2096 ylin = x*slope + intercept
2096 2097 rsq = r_value**2
2097 2098 if rsq > 0.5:
2098 2099 vel = slope#*height*1000/(k*d)
2099 2100 estAux = numpy.array([utctime,p,height, vel, rsq])
2100 2101 meteorList.append(estAux)
2101 2102 initMet = inext
2102 2103 metArray2 = numpy.array(meteorList)
2103 2104
2104 2105 return metArray2
2105 2106
2106 2107 def __calculateAzimuth1(self, rx_location, pairslist, azimuth0):
2107 2108
2108 2109 azimuth1 = numpy.zeros(len(pairslist))
2109 2110 dist = numpy.zeros(len(pairslist))
2110 2111
2111 2112 for i in range(len(rx_location)):
2112 2113 ch0 = pairslist[i][0]
2113 2114 ch1 = pairslist[i][1]
2114 2115
2115 2116 diffX = rx_location[ch0][0] - rx_location[ch1][0]
2116 2117 diffY = rx_location[ch0][1] - rx_location[ch1][1]
2117 2118 azimuth1[i] = numpy.arctan2(diffY,diffX)*180/numpy.pi
2118 2119 dist[i] = numpy.sqrt(diffX**2 + diffY**2)
2119 2120
2120 2121 azimuth1 -= azimuth0
2121 2122 return azimuth1, dist
2122 2123
2123 2124 def techniqueNSM_DBS(self, **kwargs):
2124 2125 metArray = kwargs['metArray']
2125 2126 heightList = kwargs['heightList']
2126 2127 timeList = kwargs['timeList']
2127 2128 azimuth = kwargs['azimuth']
2128 2129 theta_x = numpy.array(kwargs['theta_x'])
2129 2130 theta_y = numpy.array(kwargs['theta_y'])
2130 2131
2131 2132 utctime = metArray[:,0]
2132 2133 cmet = metArray[:,1].astype(int)
2133 2134 hmet = metArray[:,3].astype(int)
2134 2135 SNRmet = metArray[:,4]
2135 2136 vmet = metArray[:,5]
2136 2137 spcmet = metArray[:,6]
2137 2138
2138 2139 nChan = numpy.max(cmet) + 1
2139 2140 nHeights = len(heightList)
2140 2141
2141 2142 azimuth_arr, zenith_arr, dir_cosu, dir_cosv, dir_cosw = self.__calculateAngles(theta_x, theta_y, azimuth)
2142 2143 hmet = heightList[hmet]
2143 2144 h1met = hmet*numpy.cos(zenith_arr[cmet]) #Corrected heights
2144 2145
2145 2146 velEst = numpy.zeros((heightList.size,2))*numpy.nan
2146 2147
2147 2148 for i in range(nHeights - 1):
2148 2149 hmin = heightList[i]
2149 2150 hmax = heightList[i + 1]
2150 2151
2151 2152 thisH = (h1met>=hmin) & (h1met<hmax) & (cmet!=2) & (SNRmet>8) & (vmet<50) & (spcmet<10)
2152 2153 indthisH = numpy.where(thisH)
2153 2154
2154 2155 if numpy.size(indthisH) > 3:
2155 2156
2156 2157 vel_aux = vmet[thisH]
2157 2158 chan_aux = cmet[thisH]
2158 2159 cosu_aux = dir_cosu[chan_aux]
2159 2160 cosv_aux = dir_cosv[chan_aux]
2160 2161 cosw_aux = dir_cosw[chan_aux]
2161 2162
2162 2163 nch = numpy.size(numpy.unique(chan_aux))
2163 2164 if nch > 1:
2164 2165 A = self.__calculateMatA(cosu_aux, cosv_aux, cosw_aux, True)
2165 2166 velEst[i,:] = numpy.dot(A,vel_aux)
2166 2167
2167 2168 return velEst
2168 2169
2169 2170 def run(self, dataOut, technique, nHours=1, hmin=70, hmax=110, **kwargs):
2170 2171
2171 2172 param = dataOut.data_param
2172 2173 if dataOut.abscissaList != None:
2173 2174 absc = dataOut.abscissaList[:-1]
2174 2175 # noise = dataOut.noise
2175 2176 heightList = dataOut.heightList
2176 2177 SNR = dataOut.data_snr
2177 2178
2178 2179 if technique == 'DBS':
2179 2180
2180 2181 kwargs['velRadial'] = param[:,1,:] #Radial velocity
2181 2182 kwargs['heightList'] = heightList
2182 2183 kwargs['SNR'] = SNR
2183 2184
2184 2185 dataOut.data_output, dataOut.heightList, dataOut.data_snr = self.techniqueDBS(kwargs) #DBS Function
2185 2186 dataOut.utctimeInit = dataOut.utctime
2186 2187 dataOut.outputInterval = dataOut.paramInterval
2187 2188
2188 2189 elif technique == 'SA':
2189 2190
2190 2191 #Parameters
2191 2192 # position_x = kwargs['positionX']
2192 2193 # position_y = kwargs['positionY']
2193 2194 # azimuth = kwargs['azimuth']
2194 2195 #
2195 2196 # if kwargs.has_key('crosspairsList'):
2196 2197 # pairs = kwargs['crosspairsList']
2197 2198 # else:
2198 2199 # pairs = None
2199 2200 #
2200 2201 # if kwargs.has_key('correctFactor'):
2201 2202 # correctFactor = kwargs['correctFactor']
2202 2203 # else:
2203 2204 # correctFactor = 1
2204 2205
2205 2206 # tau = dataOut.data_param
2206 2207 # _lambda = dataOut.C/dataOut.frequency
2207 2208 # pairsList = dataOut.groupList
2208 2209 # nChannels = dataOut.nChannels
2209 2210
2210 2211 kwargs['groupList'] = dataOut.groupList
2211 2212 kwargs['tau'] = dataOut.data_param
2212 2213 kwargs['_lambda'] = dataOut.C/dataOut.frequency
2213 2214 # dataOut.data_output = self.techniqueSA(pairs, pairsList, nChannels, tau, azimuth, _lambda, position_x, position_y, absc, correctFactor)
2214 2215 dataOut.data_output = self.techniqueSA(kwargs)
2215 2216 dataOut.utctimeInit = dataOut.utctime
2216 2217 dataOut.outputInterval = dataOut.timeInterval
2217 2218
2218 2219 elif technique == 'Meteors':
2219 2220 dataOut.flagNoData = True
2220 2221 self.__dataReady = False
2221 2222
2222 2223 if 'nHours' in kwargs:
2223 2224 nHours = kwargs['nHours']
2224 2225 else:
2225 2226 nHours = 1
2226 2227
2227 2228 if 'meteorsPerBin' in kwargs:
2228 2229 meteorThresh = kwargs['meteorsPerBin']
2229 2230 else:
2230 2231 meteorThresh = 6
2231 2232
2232 2233 if 'hmin' in kwargs:
2233 2234 hmin = kwargs['hmin']
2234 2235 else: hmin = 70
2235 2236 if 'hmax' in kwargs:
2236 2237 hmax = kwargs['hmax']
2237 2238 else: hmax = 110
2238 2239
2239 2240 dataOut.outputInterval = nHours*3600
2240 2241
2241 2242 if self.__isConfig == False:
2242 2243 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
2243 2244 #Get Initial LTC time
2244 2245 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
2245 2246 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
2246 2247
2247 2248 self.__isConfig = True
2248 2249
2249 2250 if self.__buffer is None:
2250 2251 self.__buffer = dataOut.data_param
2251 2252 self.__firstdata = copy.copy(dataOut)
2252 2253
2253 2254 else:
2254 2255 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
2255 2256
2256 2257 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
2257 2258
2258 2259 if self.__dataReady:
2259 2260 dataOut.utctimeInit = self.__initime
2260 2261
2261 2262 self.__initime += dataOut.outputInterval #to erase time offset
2262 2263
2263 2264 dataOut.data_output, dataOut.heightList = self.techniqueMeteors(self.__buffer, meteorThresh, hmin, hmax)
2264 2265 dataOut.flagNoData = False
2265 2266 self.__buffer = None
2266 2267
2267 2268 elif technique == 'Meteors1':
2268 2269 dataOut.flagNoData = True
2269 2270 self.__dataReady = False
2270 2271
2271 2272 if 'nMins' in kwargs:
2272 2273 nMins = kwargs['nMins']
2273 2274 else: nMins = 20
2274 2275 if 'rx_location' in kwargs:
2275 2276 rx_location = kwargs['rx_location']
2276 2277 else: rx_location = [(0,1),(1,1),(1,0)]
2277 2278 if 'azimuth' in kwargs:
2278 2279 azimuth = kwargs['azimuth']
2279 2280 else: azimuth = 51.06
2280 2281 if 'dfactor' in kwargs:
2281 2282 dfactor = kwargs['dfactor']
2282 2283 if 'mode' in kwargs:
2283 2284 mode = kwargs['mode']
2284 2285 if 'theta_x' in kwargs:
2285 2286 theta_x = kwargs['theta_x']
2286 2287 if 'theta_y' in kwargs:
2287 2288 theta_y = kwargs['theta_y']
2288 2289 else: mode = 'SA'
2289 2290
2290 2291 #Borrar luego esto
2291 2292 if dataOut.groupList is None:
2292 2293 dataOut.groupList = [(0,1),(0,2),(1,2)]
2293 2294 groupList = dataOut.groupList
2294 2295 C = 3e8
2295 2296 freq = 50e6
2296 2297 lamb = C/freq
2297 2298 k = 2*numpy.pi/lamb
2298 2299
2299 2300 timeList = dataOut.abscissaList
2300 2301 heightList = dataOut.heightList
2301 2302
2302 2303 if self.__isConfig == False:
2303 2304 dataOut.outputInterval = nMins*60
2304 2305 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
2305 2306 #Get Initial LTC time
2306 2307 initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
2307 2308 minuteAux = initime.minute
2308 2309 minuteNew = int(numpy.floor(minuteAux/nMins)*nMins)
2309 2310 self.__initime = (initime.replace(minute = minuteNew, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
2310 2311
2311 2312 self.__isConfig = True
2312 2313
2313 2314 if self.__buffer is None:
2314 2315 self.__buffer = dataOut.data_param
2315 2316 self.__firstdata = copy.copy(dataOut)
2316 2317
2317 2318 else:
2318 2319 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
2319 2320
2320 2321 self.__checkTime(dataOut.utctime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
2321 2322
2322 2323 if self.__dataReady:
2323 2324 dataOut.utctimeInit = self.__initime
2324 2325 self.__initime += dataOut.outputInterval #to erase time offset
2325 2326
2326 2327 metArray = self.__buffer
2327 2328 if mode == 'SA':
2328 2329 dataOut.data_output = self.techniqueNSM_SA(rx_location=rx_location, groupList=groupList, azimuth=azimuth, dfactor=dfactor, k=k,metArray=metArray, heightList=heightList,timeList=timeList)
2329 2330 elif mode == 'DBS':
2330 2331 dataOut.data_output = self.techniqueNSM_DBS(metArray=metArray,heightList=heightList,timeList=timeList, azimuth=azimuth, theta_x=theta_x, theta_y=theta_y)
2331 2332 dataOut.data_output = dataOut.data_output.T
2332 2333 dataOut.flagNoData = False
2333 2334 self.__buffer = None
2334 2335
2335 2336 return
2336 2337
2337 2338 class EWDriftsEstimation(Operation):
2338 2339
2339 2340 def __init__(self):
2340 2341 Operation.__init__(self)
2341 2342
2342 2343 def __correctValues(self, heiRang, phi, velRadial, SNR):
2343 2344 listPhi = phi.tolist()
2344 2345 maxid = listPhi.index(max(listPhi))
2345 2346 minid = listPhi.index(min(listPhi))
2346 2347
2347 2348 rango = list(range(len(phi)))
2348 2349 # rango = numpy.delete(rango,maxid)
2349 2350
2350 2351 heiRang1 = heiRang*math.cos(phi[maxid])
2351 2352 heiRangAux = heiRang*math.cos(phi[minid])
2352 2353 indOut = (heiRang1 < heiRangAux[0]).nonzero()
2353 2354 heiRang1 = numpy.delete(heiRang1,indOut)
2354 2355
2355 2356 velRadial1 = numpy.zeros([len(phi),len(heiRang1)])
2356 2357 SNR1 = numpy.zeros([len(phi),len(heiRang1)])
2357 2358
2358 2359 for i in rango:
2359 2360 x = heiRang*math.cos(phi[i])
2360 2361 y1 = velRadial[i,:]
2361 2362 f1 = interpolate.interp1d(x,y1,kind = 'cubic')
2362 2363
2363 2364 x1 = heiRang1
2364 2365 y11 = f1(x1)
2365 2366
2366 2367 y2 = SNR[i,:]
2367 2368 f2 = interpolate.interp1d(x,y2,kind = 'cubic')
2368 2369 y21 = f2(x1)
2369 2370
2370 2371 velRadial1[i,:] = y11
2371 2372 SNR1[i,:] = y21
2372 2373
2373 2374 return heiRang1, velRadial1, SNR1
2374 2375
2375 2376 def run(self, dataOut, zenith, zenithCorrection):
2376 2377 heiRang = dataOut.heightList
2377 2378 velRadial = dataOut.data_param[:,3,:]
2378 2379 SNR = dataOut.data_snr
2379 2380
2380 2381 zenith = numpy.array(zenith)
2381 2382 zenith -= zenithCorrection
2382 2383 zenith *= numpy.pi/180
2383 2384
2384 2385 heiRang1, velRadial1, SNR1 = self.__correctValues(heiRang, numpy.abs(zenith), velRadial, SNR)
2385 2386
2386 2387 alp = zenith[0]
2387 2388 bet = zenith[1]
2388 2389
2389 2390 w_w = velRadial1[0,:]
2390 2391 w_e = velRadial1[1,:]
2391 2392
2392 2393 w = (w_w*numpy.sin(bet) - w_e*numpy.sin(alp))/(numpy.cos(alp)*numpy.sin(bet) - numpy.cos(bet)*numpy.sin(alp))
2393 2394 u = (w_w*numpy.cos(bet) - w_e*numpy.cos(alp))/(numpy.sin(alp)*numpy.cos(bet) - numpy.sin(bet)*numpy.cos(alp))
2394 2395
2395 2396 winds = numpy.vstack((u,w))
2396 2397
2397 2398 dataOut.heightList = heiRang1
2398 2399 dataOut.data_output = winds
2399 2400 dataOut.data_snr = SNR1
2400 2401
2401 2402 dataOut.utctimeInit = dataOut.utctime
2402 2403 dataOut.outputInterval = dataOut.timeInterval
2403 2404 return
2404 2405
2405 2406 #--------------- Non Specular Meteor ----------------
2406 2407
2407 2408 class NonSpecularMeteorDetection(Operation):
2408 2409
2409 2410 def run(self, dataOut, mode, SNRthresh=8, phaseDerThresh=0.5, cohThresh=0.8, allData = False):
2410 2411 data_acf = dataOut.data_pre[0]
2411 2412 data_ccf = dataOut.data_pre[1]
2412 2413 pairsList = dataOut.groupList[1]
2413 2414
2414 2415 lamb = dataOut.C/dataOut.frequency
2415 2416 tSamp = dataOut.ippSeconds*dataOut.nCohInt
2416 2417 paramInterval = dataOut.paramInterval
2417 2418
2418 2419 nChannels = data_acf.shape[0]
2419 2420 nLags = data_acf.shape[1]
2420 2421 nProfiles = data_acf.shape[2]
2421 2422 nHeights = dataOut.nHeights
2422 2423 nCohInt = dataOut.nCohInt
2423 2424 sec = numpy.round(nProfiles/dataOut.paramInterval)
2424 2425 heightList = dataOut.heightList
2425 2426 ippSeconds = dataOut.ippSeconds*dataOut.nCohInt*dataOut.nAvg
2426 2427 utctime = dataOut.utctime
2427 2428
2428 2429 dataOut.abscissaList = numpy.arange(0,paramInterval+ippSeconds,ippSeconds)
2429 2430
2430 2431 #------------------------ SNR --------------------------------------
2431 2432 power = data_acf[:,0,:,:].real
2432 2433 noise = numpy.zeros(nChannels)
2433 2434 SNR = numpy.zeros(power.shape)
2434 2435 for i in range(nChannels):
2435 2436 noise[i] = hildebrand_sekhon(power[i,:], nCohInt)
2436 2437 SNR[i] = (power[i]-noise[i])/noise[i]
2437 2438 SNRm = numpy.nanmean(SNR, axis = 0)
2438 2439 SNRdB = 10*numpy.log10(SNR)
2439 2440
2440 2441 if mode == 'SA':
2441 2442 dataOut.groupList = dataOut.groupList[1]
2442 2443 nPairs = data_ccf.shape[0]
2443 2444 #---------------------- Coherence and Phase --------------------------
2444 2445 phase = numpy.zeros(data_ccf[:,0,:,:].shape)
2445 2446 # phase1 = numpy.copy(phase)
2446 2447 coh1 = numpy.zeros(data_ccf[:,0,:,:].shape)
2447 2448
2448 2449 for p in range(nPairs):
2449 2450 ch0 = pairsList[p][0]
2450 2451 ch1 = pairsList[p][1]
2451 2452 ccf = data_ccf[p,0,:,:]/numpy.sqrt(data_acf[ch0,0,:,:]*data_acf[ch1,0,:,:])
2452 2453 phase[p,:,:] = ndimage.median_filter(numpy.angle(ccf), size = (5,1)) #median filter
2453 2454 # phase1[p,:,:] = numpy.angle(ccf) #median filter
2454 2455 coh1[p,:,:] = ndimage.median_filter(numpy.abs(ccf), 5) #median filter
2455 2456 # coh1[p,:,:] = numpy.abs(ccf) #median filter
2456 2457 coh = numpy.nanmax(coh1, axis = 0)
2457 2458 # struc = numpy.ones((5,1))
2458 2459 # coh = ndimage.morphology.grey_dilation(coh, size=(10,1))
2459 2460 #---------------------- Radial Velocity ----------------------------
2460 2461 phaseAux = numpy.mean(numpy.angle(data_acf[:,1,:,:]), axis = 0)
2461 2462 velRad = phaseAux*lamb/(4*numpy.pi*tSamp)
2462 2463
2463 2464 if allData:
2464 2465 boolMetFin = ~numpy.isnan(SNRm)
2465 2466 # coh[:-1,:] = numpy.nanmean(numpy.abs(phase[:,1:,:] - phase[:,:-1,:]),axis=0)
2466 2467 else:
2467 2468 #------------------------ Meteor mask ---------------------------------
2468 2469 # #SNR mask
2469 2470 # boolMet = (SNRdB>SNRthresh)#|(~numpy.isnan(SNRdB))
2470 2471 #
2471 2472 # #Erase small objects
2472 2473 # boolMet1 = self.__erase_small(boolMet, 2*sec, 5)
2473 2474 #
2474 2475 # auxEEJ = numpy.sum(boolMet1,axis=0)
2475 2476 # indOver = auxEEJ>nProfiles*0.8 #Use this later
2476 2477 # indEEJ = numpy.where(indOver)[0]
2477 2478 # indNEEJ = numpy.where(~indOver)[0]
2478 2479 #
2479 2480 # boolMetFin = boolMet1
2480 2481 #
2481 2482 # if indEEJ.size > 0:
2482 2483 # boolMet1[:,indEEJ] = False #Erase heights with EEJ
2483 2484 #
2484 2485 # boolMet2 = coh > cohThresh
2485 2486 # boolMet2 = self.__erase_small(boolMet2, 2*sec,5)
2486 2487 #
2487 2488 # #Final Meteor mask
2488 2489 # boolMetFin = boolMet1|boolMet2
2489 2490
2490 2491 #Coherence mask
2491 2492 boolMet1 = coh > 0.75
2492 2493 struc = numpy.ones((30,1))
2493 2494 boolMet1 = ndimage.morphology.binary_dilation(boolMet1, structure=struc)
2494 2495
2495 2496 #Derivative mask
2496 2497 derPhase = numpy.nanmean(numpy.abs(phase[:,1:,:] - phase[:,:-1,:]),axis=0)
2497 2498 boolMet2 = derPhase < 0.2
2498 2499 # boolMet2 = ndimage.morphology.binary_opening(boolMet2)
2499 2500 # boolMet2 = ndimage.morphology.binary_closing(boolMet2, structure = numpy.ones((10,1)))
2500 2501 boolMet2 = ndimage.median_filter(boolMet2,size=5)
2501 2502 boolMet2 = numpy.vstack((boolMet2,numpy.full((1,nHeights), True, dtype=bool)))
2502 2503 # #Final mask
2503 2504 # boolMetFin = boolMet2
2504 2505 boolMetFin = boolMet1&boolMet2
2505 2506 # boolMetFin = ndimage.morphology.binary_dilation(boolMetFin)
2506 2507 #Creating data_param
2507 2508 coordMet = numpy.where(boolMetFin)
2508 2509
2509 2510 tmet = coordMet[0]
2510 2511 hmet = coordMet[1]
2511 2512
2512 2513 data_param = numpy.zeros((tmet.size, 6 + nPairs))
2513 2514 data_param[:,0] = utctime
2514 2515 data_param[:,1] = tmet
2515 2516 data_param[:,2] = hmet
2516 2517 data_param[:,3] = SNRm[tmet,hmet]
2517 2518 data_param[:,4] = velRad[tmet,hmet]
2518 2519 data_param[:,5] = coh[tmet,hmet]
2519 2520 data_param[:,6:] = phase[:,tmet,hmet].T
2520 2521
2521 2522 elif mode == 'DBS':
2522 2523 dataOut.groupList = numpy.arange(nChannels)
2523 2524
2524 2525 #Radial Velocities
2525 2526 phase = numpy.angle(data_acf[:,1,:,:])
2526 2527 # phase = ndimage.median_filter(numpy.angle(data_acf[:,1,:,:]), size = (1,5,1))
2527 2528 velRad = phase*lamb/(4*numpy.pi*tSamp)
2528 2529
2529 2530 #Spectral width
2530 2531 # acf1 = ndimage.median_filter(numpy.abs(data_acf[:,1,:,:]), size = (1,5,1))
2531 2532 # acf2 = ndimage.median_filter(numpy.abs(data_acf[:,2,:,:]), size = (1,5,1))
2532 2533 acf1 = data_acf[:,1,:,:]
2533 2534 acf2 = data_acf[:,2,:,:]
2534 2535
2535 2536 spcWidth = (lamb/(2*numpy.sqrt(6)*numpy.pi*tSamp))*numpy.sqrt(numpy.log(acf1/acf2))
2536 2537 # velRad = ndimage.median_filter(velRad, size = (1,5,1))
2537 2538 if allData:
2538 2539 boolMetFin = ~numpy.isnan(SNRdB)
2539 2540 else:
2540 2541 #SNR
2541 2542 boolMet1 = (SNRdB>SNRthresh) #SNR mask
2542 2543 boolMet1 = ndimage.median_filter(boolMet1, size=(1,5,5))
2543 2544
2544 2545 #Radial velocity
2545 2546 boolMet2 = numpy.abs(velRad) < 20
2546 2547 boolMet2 = ndimage.median_filter(boolMet2, (1,5,5))
2547 2548
2548 2549 #Spectral Width
2549 2550 boolMet3 = spcWidth < 30
2550 2551 boolMet3 = ndimage.median_filter(boolMet3, (1,5,5))
2551 2552 # boolMetFin = self.__erase_small(boolMet1, 10,5)
2552 2553 boolMetFin = boolMet1&boolMet2&boolMet3
2553 2554
2554 2555 #Creating data_param
2555 2556 coordMet = numpy.where(boolMetFin)
2556 2557
2557 2558 cmet = coordMet[0]
2558 2559 tmet = coordMet[1]
2559 2560 hmet = coordMet[2]
2560 2561
2561 2562 data_param = numpy.zeros((tmet.size, 7))
2562 2563 data_param[:,0] = utctime
2563 2564 data_param[:,1] = cmet
2564 2565 data_param[:,2] = tmet
2565 2566 data_param[:,3] = hmet
2566 2567 data_param[:,4] = SNR[cmet,tmet,hmet].T
2567 2568 data_param[:,5] = velRad[cmet,tmet,hmet].T
2568 2569 data_param[:,6] = spcWidth[cmet,tmet,hmet].T
2569 2570
2570 2571 # self.dataOut.data_param = data_int
2571 2572 if len(data_param) == 0:
2572 2573 dataOut.flagNoData = True
2573 2574 else:
2574 2575 dataOut.data_param = data_param
2575 2576
2576 2577 def __erase_small(self, binArray, threshX, threshY):
2577 2578 labarray, numfeat = ndimage.measurements.label(binArray)
2578 2579 binArray1 = numpy.copy(binArray)
2579 2580
2580 2581 for i in range(1,numfeat + 1):
2581 2582 auxBin = (labarray==i)
2582 2583 auxSize = auxBin.sum()
2583 2584
2584 2585 x,y = numpy.where(auxBin)
2585 2586 widthX = x.max() - x.min()
2586 2587 widthY = y.max() - y.min()
2587 2588
2588 2589 #width X: 3 seg -> 12.5*3
2589 2590 #width Y:
2590 2591
2591 2592 if (auxSize < 50) or (widthX < threshX) or (widthY < threshY):
2592 2593 binArray1[auxBin] = False
2593 2594
2594 2595 return binArray1
2595 2596
2596 2597 #--------------- Specular Meteor ----------------
2597 2598
2598 2599 class SMDetection(Operation):
2599 2600 '''
2600 2601 Function DetectMeteors()
2601 2602 Project developed with paper:
2602 2603 HOLDSWORTH ET AL. 2004
2603 2604
2604 2605 Input:
2605 2606 self.dataOut.data_pre
2606 2607
2607 2608 centerReceiverIndex: From the channels, which is the center receiver
2608 2609
2609 2610 hei_ref: Height reference for the Beacon signal extraction
2610 2611 tauindex:
2611 2612 predefinedPhaseShifts: Predefined phase offset for the voltge signals
2612 2613
2613 2614 cohDetection: Whether to user Coherent detection or not
2614 2615 cohDet_timeStep: Coherent Detection calculation time step
2615 2616 cohDet_thresh: Coherent Detection phase threshold to correct phases
2616 2617
2617 2618 noise_timeStep: Noise calculation time step
2618 2619 noise_multiple: Noise multiple to define signal threshold
2619 2620
2620 2621 multDet_timeLimit: Multiple Detection Removal time limit in seconds
2621 2622 multDet_rangeLimit: Multiple Detection Removal range limit in km
2622 2623
2623 2624 phaseThresh: Maximum phase difference between receiver to be consider a meteor
2624 2625 SNRThresh: Minimum SNR threshold of the meteor signal to be consider a meteor
2625 2626
2626 2627 hmin: Minimum Height of the meteor to use it in the further wind estimations
2627 2628 hmax: Maximum Height of the meteor to use it in the further wind estimations
2628 2629 azimuth: Azimuth angle correction
2629 2630
2630 2631 Affected:
2631 2632 self.dataOut.data_param
2632 2633
2633 2634 Rejection Criteria (Errors):
2634 2635 0: No error; analysis OK
2635 2636 1: SNR < SNR threshold
2636 2637 2: angle of arrival (AOA) ambiguously determined
2637 2638 3: AOA estimate not feasible
2638 2639 4: Large difference in AOAs obtained from different antenna baselines
2639 2640 5: echo at start or end of time series
2640 2641 6: echo less than 5 examples long; too short for analysis
2641 2642 7: echo rise exceeds 0.3s
2642 2643 8: echo decay time less than twice rise time
2643 2644 9: large power level before echo
2644 2645 10: large power level after echo
2645 2646 11: poor fit to amplitude for estimation of decay time
2646 2647 12: poor fit to CCF phase variation for estimation of radial drift velocity
2647 2648 13: height unresolvable echo: not valid height within 70 to 110 km
2648 2649 14: height ambiguous echo: more then one possible height within 70 to 110 km
2649 2650 15: radial drift velocity or projected horizontal velocity exceeds 200 m/s
2650 2651 16: oscilatory echo, indicating event most likely not an underdense echo
2651 2652
2652 2653 17: phase difference in meteor Reestimation
2653 2654
2654 2655 Data Storage:
2655 2656 Meteors for Wind Estimation (8):
2656 2657 Utc Time | Range Height
2657 2658 Azimuth Zenith errorCosDir
2658 2659 VelRad errorVelRad
2659 2660 Phase0 Phase1 Phase2 Phase3
2660 2661 TypeError
2661 2662
2662 2663 '''
2663 2664
2664 2665 def run(self, dataOut, hei_ref = None, tauindex = 0,
2665 2666 phaseOffsets = None,
2666 2667 cohDetection = False, cohDet_timeStep = 1, cohDet_thresh = 25,
2667 2668 noise_timeStep = 4, noise_multiple = 4,
2668 2669 multDet_timeLimit = 1, multDet_rangeLimit = 3,
2669 2670 phaseThresh = 20, SNRThresh = 5,
2670 2671 hmin = 50, hmax=150, azimuth = 0,
2671 2672 channelPositions = None) :
2672 2673
2673 2674
2674 2675 #Getting Pairslist
2675 2676 if channelPositions is None:
2676 2677 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
2677 2678 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
2678 2679 meteorOps = SMOperations()
2679 2680 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
2680 2681 heiRang = dataOut.heightList
2681 2682 #Get Beacon signal - No Beacon signal anymore
2682 2683 # newheis = numpy.where(self.dataOut.heightList>self.dataOut.radarControllerHeaderObj.Taus[tauindex])
2683 2684 #
2684 2685 # if hei_ref != None:
2685 2686 # newheis = numpy.where(self.dataOut.heightList>hei_ref)
2686 2687 #
2687 2688
2688 2689
2689 2690 #****************REMOVING HARDWARE PHASE DIFFERENCES***************
2690 2691 # see if the user put in pre defined phase shifts
2691 2692 voltsPShift = dataOut.data_pre.copy()
2692 2693
2693 2694 # if predefinedPhaseShifts != None:
2694 2695 # hardwarePhaseShifts = numpy.array(predefinedPhaseShifts)*numpy.pi/180
2695 2696 #
2696 2697 # # elif beaconPhaseShifts:
2697 2698 # # #get hardware phase shifts using beacon signal
2698 2699 # # hardwarePhaseShifts = self.__getHardwarePhaseDiff(self.dataOut.data_pre, pairslist, newheis, 10)
2699 2700 # # hardwarePhaseShifts = numpy.insert(hardwarePhaseShifts,centerReceiverIndex,0)
2700 2701 #
2701 2702 # else:
2702 2703 # hardwarePhaseShifts = numpy.zeros(5)
2703 2704 #
2704 2705 # voltsPShift = numpy.zeros((self.dataOut.data_pre.shape[0],self.dataOut.data_pre.shape[1],self.dataOut.data_pre.shape[2]), dtype = 'complex')
2705 2706 # for i in range(self.dataOut.data_pre.shape[0]):
2706 2707 # voltsPShift[i,:,:] = self.__shiftPhase(self.dataOut.data_pre[i,:,:], hardwarePhaseShifts[i])
2707 2708
2708 2709 #******************END OF REMOVING HARDWARE PHASE DIFFERENCES*********
2709 2710
2710 2711 #Remove DC
2711 2712 voltsDC = numpy.mean(voltsPShift,1)
2712 2713 voltsDC = numpy.mean(voltsDC,1)
2713 2714 for i in range(voltsDC.shape[0]):
2714 2715 voltsPShift[i] = voltsPShift[i] - voltsDC[i]
2715 2716
2716 2717 #Don't considerate last heights, theyre used to calculate Hardware Phase Shift
2717 2718 # voltsPShift = voltsPShift[:,:,:newheis[0][0]]
2718 2719
2719 2720 #************ FIND POWER OF DATA W/COH OR NON COH DETECTION (3.4) **********
2720 2721 #Coherent Detection
2721 2722 if cohDetection:
2722 2723 #use coherent detection to get the net power
2723 2724 cohDet_thresh = cohDet_thresh*numpy.pi/180
2724 2725 voltsPShift = self.__coherentDetection(voltsPShift, cohDet_timeStep, dataOut.timeInterval, pairslist0, cohDet_thresh)
2725 2726
2726 2727 #Non-coherent detection!
2727 2728 powerNet = numpy.nansum(numpy.abs(voltsPShift[:,:,:])**2,0)
2728 2729 #********** END OF COH/NON-COH POWER CALCULATION**********************
2729 2730
2730 2731 #********** FIND THE NOISE LEVEL AND POSSIBLE METEORS ****************
2731 2732 #Get noise
2732 2733 noise, noise1 = self.__getNoise(powerNet, noise_timeStep, dataOut.timeInterval)
2733 2734 # noise = self.getNoise1(powerNet, noise_timeStep, self.dataOut.timeInterval)
2734 2735 #Get signal threshold
2735 2736 signalThresh = noise_multiple*noise
2736 2737 #Meteor echoes detection
2737 2738 listMeteors = self.__findMeteors(powerNet, signalThresh)
2738 2739 #******* END OF NOISE LEVEL AND POSSIBLE METEORS CACULATION **********
2739 2740
2740 2741 #************** REMOVE MULTIPLE DETECTIONS (3.5) ***************************
2741 2742 #Parameters
2742 2743 heiRange = dataOut.heightList
2743 2744 rangeInterval = heiRange[1] - heiRange[0]
2744 2745 rangeLimit = multDet_rangeLimit/rangeInterval
2745 2746 timeLimit = multDet_timeLimit/dataOut.timeInterval
2746 2747 #Multiple detection removals
2747 2748 listMeteors1 = self.__removeMultipleDetections(listMeteors, rangeLimit, timeLimit)
2748 2749 #************ END OF REMOVE MULTIPLE DETECTIONS **********************
2749 2750
2750 2751 #********************* METEOR REESTIMATION (3.7, 3.8, 3.9, 3.10) ********************
2751 2752 #Parameters
2752 2753 phaseThresh = phaseThresh*numpy.pi/180
2753 2754 thresh = [phaseThresh, noise_multiple, SNRThresh]
2754 2755 #Meteor reestimation (Errors N 1, 6, 12, 17)
2755 2756 listMeteors2, listMeteorsPower, listMeteorsVolts = self.__meteorReestimation(listMeteors1, voltsPShift, pairslist0, thresh, noise, dataOut.timeInterval, dataOut.frequency)
2756 2757 # listMeteors2, listMeteorsPower, listMeteorsVolts = self.meteorReestimation3(listMeteors2, listMeteorsPower, listMeteorsVolts, voltsPShift, pairslist, thresh, noise)
2757 2758 #Estimation of decay times (Errors N 7, 8, 11)
2758 2759 listMeteors3 = self.__estimateDecayTime(listMeteors2, listMeteorsPower, dataOut.timeInterval, dataOut.frequency)
2759 2760 #******************* END OF METEOR REESTIMATION *******************
2760 2761
2761 2762 #********************* METEOR PARAMETERS CALCULATION (3.11, 3.12, 3.13) **************************
2762 2763 #Calculating Radial Velocity (Error N 15)
2763 2764 radialStdThresh = 10
2764 2765 listMeteors4 = self.__getRadialVelocity(listMeteors3, listMeteorsVolts, radialStdThresh, pairslist0, dataOut.timeInterval)
2765 2766
2766 2767 if len(listMeteors4) > 0:
2767 2768 #Setting New Array
2768 2769 date = dataOut.utctime
2769 2770 arrayParameters = self.__setNewArrays(listMeteors4, date, heiRang)
2770 2771
2771 2772 #Correcting phase offset
2772 2773 if phaseOffsets != None:
2773 2774 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
2774 2775 arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
2775 2776
2776 2777 #Second Pairslist
2777 2778 pairsList = []
2778 2779 pairx = (0,1)
2779 2780 pairy = (2,3)
2780 2781 pairsList.append(pairx)
2781 2782 pairsList.append(pairy)
2782 2783
2783 2784 jph = numpy.array([0,0,0,0])
2784 2785 h = (hmin,hmax)
2785 2786 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
2786 2787
2787 2788 # #Calculate AOA (Error N 3, 4)
2788 2789 # #JONES ET AL. 1998
2789 2790 # error = arrayParameters[:,-1]
2790 2791 # AOAthresh = numpy.pi/8
2791 2792 # phases = -arrayParameters[:,9:13]
2792 2793 # arrayParameters[:,4:7], arrayParameters[:,-1] = meteorOps.getAOA(phases, pairsList, error, AOAthresh, azimuth)
2793 2794 #
2794 2795 # #Calculate Heights (Error N 13 and 14)
2795 2796 # error = arrayParameters[:,-1]
2796 2797 # Ranges = arrayParameters[:,2]
2797 2798 # zenith = arrayParameters[:,5]
2798 2799 # arrayParameters[:,3], arrayParameters[:,-1] = meteorOps.getHeights(Ranges, zenith, error, hmin, hmax)
2799 2800 # error = arrayParameters[:,-1]
2800 2801 #********************* END OF PARAMETERS CALCULATION **************************
2801 2802
2802 2803 #***************************+ PASS DATA TO NEXT STEP **********************
2803 2804 # arrayFinal = arrayParameters.reshape((1,arrayParameters.shape[0],arrayParameters.shape[1]))
2804 2805 dataOut.data_param = arrayParameters
2805 2806
2806 2807 if arrayParameters is None:
2807 2808 dataOut.flagNoData = True
2808 2809 else:
2809 2810 dataOut.flagNoData = True
2810 2811
2811 2812 return
2812 2813
2813 2814 def __getHardwarePhaseDiff(self, voltage0, pairslist, newheis, n):
2814 2815
2815 2816 minIndex = min(newheis[0])
2816 2817 maxIndex = max(newheis[0])
2817 2818
2818 2819 voltage = voltage0[:,:,minIndex:maxIndex+1]
2819 2820 nLength = voltage.shape[1]/n
2820 2821 nMin = 0
2821 2822 nMax = 0
2822 2823 phaseOffset = numpy.zeros((len(pairslist),n))
2823 2824
2824 2825 for i in range(n):
2825 2826 nMax += nLength
2826 2827 phaseCCF = -numpy.angle(self.__calculateCCF(voltage[:,nMin:nMax,:], pairslist, [0]))
2827 2828 phaseCCF = numpy.mean(phaseCCF, axis = 2)
2828 2829 phaseOffset[:,i] = phaseCCF.transpose()
2829 2830 nMin = nMax
2830 2831 # phaseDiff, phaseArrival = self.estimatePhaseDifference(voltage, pairslist)
2831 2832
2832 2833 #Remove Outliers
2833 2834 factor = 2
2834 2835 wt = phaseOffset - signal.medfilt(phaseOffset,(1,5))
2835 2836 dw = numpy.std(wt,axis = 1)
2836 2837 dw = dw.reshape((dw.size,1))
2837 2838 ind = numpy.where(numpy.logical_or(wt>dw*factor,wt<-dw*factor))
2838 2839 phaseOffset[ind] = numpy.nan
2839 2840 phaseOffset = stats.nanmean(phaseOffset, axis=1)
2840 2841
2841 2842 return phaseOffset
2842 2843
2843 2844 def __shiftPhase(self, data, phaseShift):
2844 2845 #this will shift the phase of a complex number
2845 2846 dataShifted = numpy.abs(data) * numpy.exp((numpy.angle(data)+phaseShift)*1j)
2846 2847 return dataShifted
2847 2848
2848 2849 def __estimatePhaseDifference(self, array, pairslist):
2849 2850 nChannel = array.shape[0]
2850 2851 nHeights = array.shape[2]
2851 2852 numPairs = len(pairslist)
2852 2853 # phaseCCF = numpy.zeros((nChannel, 5, nHeights))
2853 2854 phaseCCF = numpy.angle(self.__calculateCCF(array, pairslist, [-2,-1,0,1,2]))
2854 2855
2855 2856 #Correct phases
2856 2857 derPhaseCCF = phaseCCF[:,1:,:] - phaseCCF[:,0:-1,:]
2857 2858 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
2858 2859
2859 2860 if indDer[0].shape[0] > 0:
2860 2861 for i in range(indDer[0].shape[0]):
2861 2862 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i],indDer[2][i]])
2862 2863 phaseCCF[indDer[0][i],indDer[1][i]+1:,:] += signo*2*numpy.pi
2863 2864
2864 2865 # for j in range(numSides):
2865 2866 # phaseCCFAux = self.calculateCCF(arrayCenter, arraySides[j,:,:], [-2,1,0,1,2])
2866 2867 # phaseCCF[j,:,:] = numpy.angle(phaseCCFAux)
2867 2868 #
2868 2869 #Linear
2869 2870 phaseInt = numpy.zeros((numPairs,1))
2870 2871 angAllCCF = phaseCCF[:,[0,1,3,4],0]
2871 2872 for j in range(numPairs):
2872 2873 fit = stats.linregress([-2,-1,1,2],angAllCCF[j,:])
2873 2874 phaseInt[j] = fit[1]
2874 2875 #Phase Differences
2875 2876 phaseDiff = phaseInt - phaseCCF[:,2,:]
2876 2877 phaseArrival = phaseInt.reshape(phaseInt.size)
2877 2878
2878 2879 #Dealias
2879 2880 phaseArrival = numpy.angle(numpy.exp(1j*phaseArrival))
2880 2881 # indAlias = numpy.where(phaseArrival > numpy.pi)
2881 2882 # phaseArrival[indAlias] -= 2*numpy.pi
2882 2883 # indAlias = numpy.where(phaseArrival < -numpy.pi)
2883 2884 # phaseArrival[indAlias] += 2*numpy.pi
2884 2885
2885 2886 return phaseDiff, phaseArrival
2886 2887
2887 2888 def __coherentDetection(self, volts, timeSegment, timeInterval, pairslist, thresh):
2888 2889 #this function will run the coherent detection used in Holdworth et al. 2004 and return the net power
2889 2890 #find the phase shifts of each channel over 1 second intervals
2890 2891 #only look at ranges below the beacon signal
2891 2892 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
2892 2893 numBlocks = int(volts.shape[1]/numProfPerBlock)
2893 2894 numHeights = volts.shape[2]
2894 2895 nChannel = volts.shape[0]
2895 2896 voltsCohDet = volts.copy()
2896 2897
2897 2898 pairsarray = numpy.array(pairslist)
2898 2899 indSides = pairsarray[:,1]
2899 2900 # indSides = numpy.array(range(nChannel))
2900 2901 # indSides = numpy.delete(indSides, indCenter)
2901 2902 #
2902 2903 # listCenter = numpy.array_split(volts[indCenter,:,:], numBlocks, 0)
2903 2904 listBlocks = numpy.array_split(volts, numBlocks, 1)
2904 2905
2905 2906 startInd = 0
2906 2907 endInd = 0
2907 2908
2908 2909 for i in range(numBlocks):
2909 2910 startInd = endInd
2910 2911 endInd = endInd + listBlocks[i].shape[1]
2911 2912
2912 2913 arrayBlock = listBlocks[i]
2913 2914 # arrayBlockCenter = listCenter[i]
2914 2915
2915 2916 #Estimate the Phase Difference
2916 2917 phaseDiff, aux = self.__estimatePhaseDifference(arrayBlock, pairslist)
2917 2918 #Phase Difference RMS
2918 2919 arrayPhaseRMS = numpy.abs(phaseDiff)
2919 2920 phaseRMSaux = numpy.sum(arrayPhaseRMS < thresh,0)
2920 2921 indPhase = numpy.where(phaseRMSaux==4)
2921 2922 #Shifting
2922 2923 if indPhase[0].shape[0] > 0:
2923 2924 for j in range(indSides.size):
2924 2925 arrayBlock[indSides[j],:,indPhase] = self.__shiftPhase(arrayBlock[indSides[j],:,indPhase], phaseDiff[j,indPhase].transpose())
2925 2926 voltsCohDet[:,startInd:endInd,:] = arrayBlock
2926 2927
2927 2928 return voltsCohDet
2928 2929
2929 2930 def __calculateCCF(self, volts, pairslist ,laglist):
2930 2931
2931 2932 nHeights = volts.shape[2]
2932 2933 nPoints = volts.shape[1]
2933 2934 voltsCCF = numpy.zeros((len(pairslist), len(laglist), nHeights),dtype = 'complex')
2934 2935
2935 2936 for i in range(len(pairslist)):
2936 2937 volts1 = volts[pairslist[i][0]]
2937 2938 volts2 = volts[pairslist[i][1]]
2938 2939
2939 2940 for t in range(len(laglist)):
2940 2941 idxT = laglist[t]
2941 2942 if idxT >= 0:
2942 2943 vStacked = numpy.vstack((volts2[idxT:,:],
2943 2944 numpy.zeros((idxT, nHeights),dtype='complex')))
2944 2945 else:
2945 2946 vStacked = numpy.vstack((numpy.zeros((-idxT, nHeights),dtype='complex'),
2946 2947 volts2[:(nPoints + idxT),:]))
2947 2948 voltsCCF[i,t,:] = numpy.sum((numpy.conjugate(volts1)*vStacked),axis=0)
2948 2949
2949 2950 vStacked = None
2950 2951 return voltsCCF
2951 2952
2952 2953 def __getNoise(self, power, timeSegment, timeInterval):
2953 2954 numProfPerBlock = numpy.ceil(timeSegment/timeInterval)
2954 2955 numBlocks = int(power.shape[0]/numProfPerBlock)
2955 2956 numHeights = power.shape[1]
2956 2957
2957 2958 listPower = numpy.array_split(power, numBlocks, 0)
2958 2959 noise = numpy.zeros((power.shape[0], power.shape[1]))
2959 2960 noise1 = numpy.zeros((power.shape[0], power.shape[1]))
2960 2961
2961 2962 startInd = 0
2962 2963 endInd = 0
2963 2964
2964 2965 for i in range(numBlocks): #split por canal
2965 2966 startInd = endInd
2966 2967 endInd = endInd + listPower[i].shape[0]
2967 2968
2968 2969 arrayBlock = listPower[i]
2969 2970 noiseAux = numpy.mean(arrayBlock, 0)
2970 2971 # noiseAux = numpy.median(noiseAux)
2971 2972 # noiseAux = numpy.mean(arrayBlock)
2972 2973 noise[startInd:endInd,:] = noise[startInd:endInd,:] + noiseAux
2973 2974
2974 2975 noiseAux1 = numpy.mean(arrayBlock)
2975 2976 noise1[startInd:endInd,:] = noise1[startInd:endInd,:] + noiseAux1
2976 2977
2977 2978 return noise, noise1
2978 2979
2979 2980 def __findMeteors(self, power, thresh):
2980 2981 nProf = power.shape[0]
2981 2982 nHeights = power.shape[1]
2982 2983 listMeteors = []
2983 2984
2984 2985 for i in range(nHeights):
2985 2986 powerAux = power[:,i]
2986 2987 threshAux = thresh[:,i]
2987 2988
2988 2989 indUPthresh = numpy.where(powerAux > threshAux)[0]
2989 2990 indDNthresh = numpy.where(powerAux <= threshAux)[0]
2990 2991
2991 2992 j = 0
2992 2993
2993 2994 while (j < indUPthresh.size - 2):
2994 2995 if (indUPthresh[j + 2] == indUPthresh[j] + 2):
2995 2996 indDNAux = numpy.where(indDNthresh > indUPthresh[j])
2996 2997 indDNthresh = indDNthresh[indDNAux]
2997 2998
2998 2999 if (indDNthresh.size > 0):
2999 3000 indEnd = indDNthresh[0] - 1
3000 3001 indInit = indUPthresh[j]
3001 3002
3002 3003 meteor = powerAux[indInit:indEnd + 1]
3003 3004 indPeak = meteor.argmax() + indInit
3004 3005 FLA = sum(numpy.conj(meteor)*numpy.hstack((meteor[1:],0)))
3005 3006
3006 3007 listMeteors.append(numpy.array([i,indInit,indPeak,indEnd,FLA])) #CHEQUEAR!!!!!
3007 3008 j = numpy.where(indUPthresh == indEnd)[0] + 1
3008 3009 else: j+=1
3009 3010 else: j+=1
3010 3011
3011 3012 return listMeteors
3012 3013
3013 3014 def __removeMultipleDetections(self,listMeteors, rangeLimit, timeLimit):
3014 3015
3015 3016 arrayMeteors = numpy.asarray(listMeteors)
3016 3017 listMeteors1 = []
3017 3018
3018 3019 while arrayMeteors.shape[0] > 0:
3019 3020 FLAs = arrayMeteors[:,4]
3020 3021 maxFLA = FLAs.argmax()
3021 3022 listMeteors1.append(arrayMeteors[maxFLA,:])
3022 3023
3023 3024 MeteorInitTime = arrayMeteors[maxFLA,1]
3024 3025 MeteorEndTime = arrayMeteors[maxFLA,3]
3025 3026 MeteorHeight = arrayMeteors[maxFLA,0]
3026 3027
3027 3028 #Check neighborhood
3028 3029 maxHeightIndex = MeteorHeight + rangeLimit
3029 3030 minHeightIndex = MeteorHeight - rangeLimit
3030 3031 minTimeIndex = MeteorInitTime - timeLimit
3031 3032 maxTimeIndex = MeteorEndTime + timeLimit
3032 3033
3033 3034 #Check Heights
3034 3035 indHeight = numpy.logical_and(arrayMeteors[:,0] >= minHeightIndex, arrayMeteors[:,0] <= maxHeightIndex)
3035 3036 indTime = numpy.logical_and(arrayMeteors[:,3] >= minTimeIndex, arrayMeteors[:,1] <= maxTimeIndex)
3036 3037 indBoth = numpy.where(numpy.logical_and(indTime,indHeight))
3037 3038
3038 3039 arrayMeteors = numpy.delete(arrayMeteors, indBoth, axis = 0)
3039 3040
3040 3041 return listMeteors1
3041 3042
3042 3043 def __meteorReestimation(self, listMeteors, volts, pairslist, thresh, noise, timeInterval,frequency):
3043 3044 numHeights = volts.shape[2]
3044 3045 nChannel = volts.shape[0]
3045 3046
3046 3047 thresholdPhase = thresh[0]
3047 3048 thresholdNoise = thresh[1]
3048 3049 thresholdDB = float(thresh[2])
3049 3050
3050 3051 thresholdDB1 = 10**(thresholdDB/10)
3051 3052 pairsarray = numpy.array(pairslist)
3052 3053 indSides = pairsarray[:,1]
3053 3054
3054 3055 pairslist1 = list(pairslist)
3055 3056 pairslist1.append((0,1))
3056 3057 pairslist1.append((3,4))
3057 3058
3058 3059 listMeteors1 = []
3059 3060 listPowerSeries = []
3060 3061 listVoltageSeries = []
3061 3062 #volts has the war data
3062 3063
3063 3064 if frequency == 30e6:
3064 3065 timeLag = 45*10**-3
3065 3066 else:
3066 3067 timeLag = 15*10**-3
3067 3068 lag = numpy.ceil(timeLag/timeInterval)
3068 3069
3069 3070 for i in range(len(listMeteors)):
3070 3071
3071 3072 ###################### 3.6 - 3.7 PARAMETERS REESTIMATION #########################
3072 3073 meteorAux = numpy.zeros(16)
3073 3074
3074 3075 #Loading meteor Data (mHeight, mStart, mPeak, mEnd)
3075 3076 mHeight = listMeteors[i][0]
3076 3077 mStart = listMeteors[i][1]
3077 3078 mPeak = listMeteors[i][2]
3078 3079 mEnd = listMeteors[i][3]
3079 3080
3080 3081 #get the volt data between the start and end times of the meteor
3081 3082 meteorVolts = volts[:,mStart:mEnd+1,mHeight]
3082 3083 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
3083 3084
3084 3085 #3.6. Phase Difference estimation
3085 3086 phaseDiff, aux = self.__estimatePhaseDifference(meteorVolts, pairslist)
3086 3087
3087 3088 #3.7. Phase difference removal & meteor start, peak and end times reestimated
3088 3089 #meteorVolts0.- all Channels, all Profiles
3089 3090 meteorVolts0 = volts[:,:,mHeight]
3090 3091 meteorThresh = noise[:,mHeight]*thresholdNoise
3091 3092 meteorNoise = noise[:,mHeight]
3092 3093 meteorVolts0[indSides,:] = self.__shiftPhase(meteorVolts0[indSides,:], phaseDiff) #Phase Shifting
3093 3094 powerNet0 = numpy.nansum(numpy.abs(meteorVolts0)**2, axis = 0) #Power
3094 3095
3095 3096 #Times reestimation
3096 3097 mStart1 = numpy.where(powerNet0[:mPeak] < meteorThresh[:mPeak])[0]
3097 3098 if mStart1.size > 0:
3098 3099 mStart1 = mStart1[-1] + 1
3099 3100
3100 3101 else:
3101 3102 mStart1 = mPeak
3102 3103
3103 3104 mEnd1 = numpy.where(powerNet0[mPeak:] < meteorThresh[mPeak:])[0][0] + mPeak - 1
3104 3105 mEndDecayTime1 = numpy.where(powerNet0[mPeak:] < meteorNoise[mPeak:])[0]
3105 3106 if mEndDecayTime1.size == 0:
3106 3107 mEndDecayTime1 = powerNet0.size
3107 3108 else:
3108 3109 mEndDecayTime1 = mEndDecayTime1[0] + mPeak - 1
3109 3110 # mPeak1 = meteorVolts0[mStart1:mEnd1 + 1].argmax()
3110 3111
3111 3112 #meteorVolts1.- all Channels, from start to end
3112 3113 meteorVolts1 = meteorVolts0[:,mStart1:mEnd1 + 1]
3113 3114 meteorVolts2 = meteorVolts0[:,mPeak + lag:mEnd1 + 1]
3114 3115 if meteorVolts2.shape[1] == 0:
3115 3116 meteorVolts2 = meteorVolts0[:,mPeak:mEnd1 + 1]
3116 3117 meteorVolts1 = meteorVolts1.reshape(meteorVolts1.shape[0], meteorVolts1.shape[1], 1)
3117 3118 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1], 1)
3118 3119 ##################### END PARAMETERS REESTIMATION #########################
3119 3120
3120 3121 ##################### 3.8 PHASE DIFFERENCE REESTIMATION ########################
3121 3122 # if mEnd1 - mStart1 > 4: #Error Number 6: echo less than 5 samples long; too short for analysis
3122 3123 if meteorVolts2.shape[1] > 0:
3123 3124 #Phase Difference re-estimation
3124 3125 phaseDiff1, phaseDiffint = self.__estimatePhaseDifference(meteorVolts2, pairslist1) #Phase Difference Estimation
3125 3126 # phaseDiff1, phaseDiffint = self.estimatePhaseDifference(meteorVolts2, pairslist)
3126 3127 meteorVolts2 = meteorVolts2.reshape(meteorVolts2.shape[0], meteorVolts2.shape[1])
3127 3128 phaseDiff11 = numpy.reshape(phaseDiff1, (phaseDiff1.shape[0],1))
3128 3129 meteorVolts2[indSides,:] = self.__shiftPhase(meteorVolts2[indSides,:], phaseDiff11[0:4]) #Phase Shifting
3129 3130
3130 3131 #Phase Difference RMS
3131 3132 phaseRMS1 = numpy.sqrt(numpy.mean(numpy.square(phaseDiff1)))
3132 3133 powerNet1 = numpy.nansum(numpy.abs(meteorVolts1[:,:])**2,0)
3133 3134 #Data from Meteor
3134 3135 mPeak1 = powerNet1.argmax() + mStart1
3135 3136 mPeakPower1 = powerNet1.max()
3136 3137 noiseAux = sum(noise[mStart1:mEnd1 + 1,mHeight])
3137 3138 mSNR1 = (sum(powerNet1)-noiseAux)/noiseAux
3138 3139 Meteor1 = numpy.array([mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1])
3139 3140 Meteor1 = numpy.hstack((Meteor1,phaseDiffint))
3140 3141 PowerSeries = powerNet0[mStart1:mEndDecayTime1 + 1]
3141 3142 #Vectorize
3142 3143 meteorAux[0:7] = [mHeight, mStart1, mPeak1, mEnd1, mPeakPower1, mSNR1, phaseRMS1]
3143 3144 meteorAux[7:11] = phaseDiffint[0:4]
3144 3145
3145 3146 #Rejection Criterions
3146 3147 if phaseRMS1 > thresholdPhase: #Error Number 17: Phase variation
3147 3148 meteorAux[-1] = 17
3148 3149 elif mSNR1 < thresholdDB1: #Error Number 1: SNR < threshold dB
3149 3150 meteorAux[-1] = 1
3150 3151
3151 3152
3152 3153 else:
3153 3154 meteorAux[0:4] = [mHeight, mStart, mPeak, mEnd]
3154 3155 meteorAux[-1] = 6 #Error Number 6: echo less than 5 samples long; too short for analysis
3155 3156 PowerSeries = 0
3156 3157
3157 3158 listMeteors1.append(meteorAux)
3158 3159 listPowerSeries.append(PowerSeries)
3159 3160 listVoltageSeries.append(meteorVolts1)
3160 3161
3161 3162 return listMeteors1, listPowerSeries, listVoltageSeries
3162 3163
3163 3164 def __estimateDecayTime(self, listMeteors, listPower, timeInterval, frequency):
3164 3165
3165 3166 threshError = 10
3166 3167 #Depending if it is 30 or 50 MHz
3167 3168 if frequency == 30e6:
3168 3169 timeLag = 45*10**-3
3169 3170 else:
3170 3171 timeLag = 15*10**-3
3171 3172 lag = numpy.ceil(timeLag/timeInterval)
3172 3173
3173 3174 listMeteors1 = []
3174 3175
3175 3176 for i in range(len(listMeteors)):
3176 3177 meteorPower = listPower[i]
3177 3178 meteorAux = listMeteors[i]
3178 3179
3179 3180 if meteorAux[-1] == 0:
3180 3181
3181 3182 try:
3182 3183 indmax = meteorPower.argmax()
3183 3184 indlag = indmax + lag
3184 3185
3185 3186 y = meteorPower[indlag:]
3186 3187 x = numpy.arange(0, y.size)*timeLag
3187 3188
3188 3189 #first guess
3189 3190 a = y[0]
3190 3191 tau = timeLag
3191 3192 #exponential fit
3192 3193 popt, pcov = optimize.curve_fit(self.__exponential_function, x, y, p0 = [a, tau])
3193 3194 y1 = self.__exponential_function(x, *popt)
3194 3195 #error estimation
3195 3196 error = sum((y - y1)**2)/(numpy.var(y)*(y.size - popt.size))
3196 3197
3197 3198 decayTime = popt[1]
3198 3199 riseTime = indmax*timeInterval
3199 3200 meteorAux[11:13] = [decayTime, error]
3200 3201
3201 3202 #Table items 7, 8 and 11
3202 3203 if (riseTime > 0.3): #Number 7: Echo rise exceeds 0.3s
3203 3204 meteorAux[-1] = 7
3204 3205 elif (decayTime < 2*riseTime) : #Number 8: Echo decay time less than than twice rise time
3205 3206 meteorAux[-1] = 8
3206 3207 if (error > threshError): #Number 11: Poor fit to amplitude for estimation of decay time
3207 3208 meteorAux[-1] = 11
3208 3209
3209 3210
3210 3211 except:
3211 3212 meteorAux[-1] = 11
3212 3213
3213 3214
3214 3215 listMeteors1.append(meteorAux)
3215 3216
3216 3217 return listMeteors1
3217 3218
3218 3219 #Exponential Function
3219 3220
3220 3221 def __exponential_function(self, x, a, tau):
3221 3222 y = a*numpy.exp(-x/tau)
3222 3223 return y
3223 3224
3224 3225 def __getRadialVelocity(self, listMeteors, listVolts, radialStdThresh, pairslist, timeInterval):
3225 3226
3226 3227 pairslist1 = list(pairslist)
3227 3228 pairslist1.append((0,1))
3228 3229 pairslist1.append((3,4))
3229 3230 numPairs = len(pairslist1)
3230 3231 #Time Lag
3231 3232 timeLag = 45*10**-3
3232 3233 c = 3e8
3233 3234 lag = numpy.ceil(timeLag/timeInterval)
3234 3235 freq = 30e6
3235 3236
3236 3237 listMeteors1 = []
3237 3238
3238 3239 for i in range(len(listMeteors)):
3239 3240 meteorAux = listMeteors[i]
3240 3241 if meteorAux[-1] == 0:
3241 3242 mStart = listMeteors[i][1]
3242 3243 mPeak = listMeteors[i][2]
3243 3244 mLag = mPeak - mStart + lag
3244 3245
3245 3246 #get the volt data between the start and end times of the meteor
3246 3247 meteorVolts = listVolts[i]
3247 3248 meteorVolts = meteorVolts.reshape(meteorVolts.shape[0], meteorVolts.shape[1], 1)
3248 3249
3249 3250 #Get CCF
3250 3251 allCCFs = self.__calculateCCF(meteorVolts, pairslist1, [-2,-1,0,1,2])
3251 3252
3252 3253 #Method 2
3253 3254 slopes = numpy.zeros(numPairs)
3254 3255 time = numpy.array([-2,-1,1,2])*timeInterval
3255 3256 angAllCCF = numpy.angle(allCCFs[:,[0,1,3,4],0])
3256 3257
3257 3258 #Correct phases
3258 3259 derPhaseCCF = angAllCCF[:,1:] - angAllCCF[:,0:-1]
3259 3260 indDer = numpy.where(numpy.abs(derPhaseCCF) > numpy.pi)
3260 3261
3261 3262 if indDer[0].shape[0] > 0:
3262 3263 for i in range(indDer[0].shape[0]):
3263 3264 signo = -numpy.sign(derPhaseCCF[indDer[0][i],indDer[1][i]])
3264 3265 angAllCCF[indDer[0][i],indDer[1][i]+1:] += signo*2*numpy.pi
3265 3266
3266 3267 # fit = scipy.stats.linregress(numpy.array([-2,-1,1,2])*timeInterval, numpy.array([phaseLagN2s[i],phaseLagN1s[i],phaseLag1s[i],phaseLag2s[i]]))
3267 3268 for j in range(numPairs):
3268 3269 fit = stats.linregress(time, angAllCCF[j,:])
3269 3270 slopes[j] = fit[0]
3270 3271
3271 3272 #Remove Outlier
3272 3273 # indOut = numpy.argmax(numpy.abs(slopes - numpy.mean(slopes)))
3273 3274 # slopes = numpy.delete(slopes,indOut)
3274 3275 # indOut = numpy.argmax(numpy.abs(slopes - numpy.mean(slopes)))
3275 3276 # slopes = numpy.delete(slopes,indOut)
3276 3277
3277 3278 radialVelocity = -numpy.mean(slopes)*(0.25/numpy.pi)*(c/freq)
3278 3279 radialError = numpy.std(slopes)*(0.25/numpy.pi)*(c/freq)
3279 3280 meteorAux[-2] = radialError
3280 3281 meteorAux[-3] = radialVelocity
3281 3282
3282 3283 #Setting Error
3283 3284 #Number 15: Radial Drift velocity or projected horizontal velocity exceeds 200 m/s
3284 3285 if numpy.abs(radialVelocity) > 200:
3285 3286 meteorAux[-1] = 15
3286 3287 #Number 12: Poor fit to CCF variation for estimation of radial drift velocity
3287 3288 elif radialError > radialStdThresh:
3288 3289 meteorAux[-1] = 12
3289 3290
3290 3291 listMeteors1.append(meteorAux)
3291 3292 return listMeteors1
3292 3293
3293 3294 def __setNewArrays(self, listMeteors, date, heiRang):
3294 3295
3295 3296 #New arrays
3296 3297 arrayMeteors = numpy.array(listMeteors)
3297 3298 arrayParameters = numpy.zeros((len(listMeteors), 13))
3298 3299
3299 3300 #Date inclusion
3300 3301 # date = re.findall(r'\((.*?)\)', date)
3301 3302 # date = date[0].split(',')
3302 3303 # date = map(int, date)
3303 3304 #
3304 3305 # if len(date)<6:
3305 3306 # date.append(0)
3306 3307 #
3307 3308 # date = [date[0]*10000 + date[1]*100 + date[2], date[3]*10000 + date[4]*100 + date[5]]
3308 3309 # arrayDate = numpy.tile(date, (len(listMeteors), 1))
3309 3310 arrayDate = numpy.tile(date, (len(listMeteors)))
3310 3311
3311 3312 #Meteor array
3312 3313 # arrayMeteors[:,0] = heiRang[arrayMeteors[:,0].astype(int)]
3313 3314 # arrayMeteors = numpy.hstack((arrayDate, arrayMeteors))
3314 3315
3315 3316 #Parameters Array
3316 3317 arrayParameters[:,0] = arrayDate #Date
3317 3318 arrayParameters[:,1] = heiRang[arrayMeteors[:,0].astype(int)] #Range
3318 3319 arrayParameters[:,6:8] = arrayMeteors[:,-3:-1] #Radial velocity and its error
3319 3320 arrayParameters[:,8:12] = arrayMeteors[:,7:11] #Phases
3320 3321 arrayParameters[:,-1] = arrayMeteors[:,-1] #Error
3321 3322
3322 3323
3323 3324 return arrayParameters
3324 3325
3325 3326 class CorrectSMPhases(Operation):
3326 3327
3327 3328 def run(self, dataOut, phaseOffsets, hmin = 50, hmax = 150, azimuth = 45, channelPositions = None):
3328 3329
3329 3330 arrayParameters = dataOut.data_param
3330 3331 pairsList = []
3331 3332 pairx = (0,1)
3332 3333 pairy = (2,3)
3333 3334 pairsList.append(pairx)
3334 3335 pairsList.append(pairy)
3335 3336 jph = numpy.zeros(4)
3336 3337
3337 3338 phaseOffsets = numpy.array(phaseOffsets)*numpy.pi/180
3338 3339 # arrayParameters[:,8:12] = numpy.unwrap(arrayParameters[:,8:12] + phaseOffsets)
3339 3340 arrayParameters[:,8:12] = numpy.angle(numpy.exp(1j*(arrayParameters[:,8:12] + phaseOffsets)))
3340 3341
3341 3342 meteorOps = SMOperations()
3342 3343 if channelPositions is None:
3343 3344 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
3344 3345 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
3345 3346
3346 3347 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
3347 3348 h = (hmin,hmax)
3348 3349
3349 3350 arrayParameters = meteorOps.getMeteorParams(arrayParameters, azimuth, h, pairsList, distances, jph)
3350 3351
3351 3352 dataOut.data_param = arrayParameters
3352 3353 return
3353 3354
3354 3355 class SMPhaseCalibration(Operation):
3355 3356
3356 3357 __buffer = None
3357 3358
3358 3359 __initime = None
3359 3360
3360 3361 __dataReady = False
3361 3362
3362 3363 __isConfig = False
3363 3364
3364 3365 def __checkTime(self, currentTime, initTime, paramInterval, outputInterval):
3365 3366
3366 3367 dataTime = currentTime + paramInterval
3367 3368 deltaTime = dataTime - initTime
3368 3369
3369 3370 if deltaTime >= outputInterval or deltaTime < 0:
3370 3371 return True
3371 3372
3372 3373 return False
3373 3374
3374 3375 def __getGammas(self, pairs, d, phases):
3375 3376 gammas = numpy.zeros(2)
3376 3377
3377 3378 for i in range(len(pairs)):
3378 3379
3379 3380 pairi = pairs[i]
3380 3381
3381 3382 phip3 = phases[:,pairi[0]]
3382 3383 d3 = d[pairi[0]]
3383 3384 phip2 = phases[:,pairi[1]]
3384 3385 d2 = d[pairi[1]]
3385 3386 #Calculating gamma
3386 3387 # jdcos = alp1/(k*d1)
3387 3388 # jgamma = numpy.angle(numpy.exp(1j*(d0*alp1/d1 - alp0)))
3388 3389 jgamma = -phip2*d3/d2 - phip3
3389 3390 jgamma = numpy.angle(numpy.exp(1j*jgamma))
3390 3391 # jgamma[jgamma>numpy.pi] -= 2*numpy.pi
3391 3392 # jgamma[jgamma<-numpy.pi] += 2*numpy.pi
3392 3393
3393 3394 #Revised distribution
3394 3395 jgammaArray = numpy.hstack((jgamma,jgamma+0.5*numpy.pi,jgamma-0.5*numpy.pi))
3395 3396
3396 3397 #Histogram
3397 3398 nBins = 64
3398 3399 rmin = -0.5*numpy.pi
3399 3400 rmax = 0.5*numpy.pi
3400 3401 phaseHisto = numpy.histogram(jgammaArray, bins=nBins, range=(rmin,rmax))
3401 3402
3402 3403 meteorsY = phaseHisto[0]
3403 3404 phasesX = phaseHisto[1][:-1]
3404 3405 width = phasesX[1] - phasesX[0]
3405 3406 phasesX += width/2
3406 3407
3407 3408 #Gaussian aproximation
3408 3409 bpeak = meteorsY.argmax()
3409 3410 peak = meteorsY.max()
3410 3411 jmin = bpeak - 5
3411 3412 jmax = bpeak + 5 + 1
3412 3413
3413 3414 if jmin<0:
3414 3415 jmin = 0
3415 3416 jmax = 6
3416 3417 elif jmax > meteorsY.size:
3417 3418 jmin = meteorsY.size - 6
3418 3419 jmax = meteorsY.size
3419 3420
3420 3421 x0 = numpy.array([peak,bpeak,50])
3421 3422 coeff = optimize.leastsq(self.__residualFunction, x0, args=(meteorsY[jmin:jmax], phasesX[jmin:jmax]))
3422 3423
3423 3424 #Gammas
3424 3425 gammas[i] = coeff[0][1]
3425 3426
3426 3427 return gammas
3427 3428
3428 3429 def __residualFunction(self, coeffs, y, t):
3429 3430
3430 3431 return y - self.__gauss_function(t, coeffs)
3431 3432
3432 3433 def __gauss_function(self, t, coeffs):
3433 3434
3434 3435 return coeffs[0]*numpy.exp(-0.5*((t - coeffs[1]) / coeffs[2])**2)
3435 3436
3436 3437 def __getPhases(self, azimuth, h, pairsList, d, gammas, meteorsArray):
3437 3438 meteorOps = SMOperations()
3438 3439 nchan = 4
3439 3440 pairx = pairsList[0] #x es 0
3440 3441 pairy = pairsList[1] #y es 1
3441 3442 center_xangle = 0
3442 3443 center_yangle = 0
3443 3444 range_angle = numpy.array([10*numpy.pi,numpy.pi,numpy.pi/2,numpy.pi/4])
3444 3445 ntimes = len(range_angle)
3445 3446
3446 3447 nstepsx = 20
3447 3448 nstepsy = 20
3448 3449
3449 3450 for iz in range(ntimes):
3450 3451 min_xangle = -range_angle[iz]/2 + center_xangle
3451 3452 max_xangle = range_angle[iz]/2 + center_xangle
3452 3453 min_yangle = -range_angle[iz]/2 + center_yangle
3453 3454 max_yangle = range_angle[iz]/2 + center_yangle
3454 3455
3455 3456 inc_x = (max_xangle-min_xangle)/nstepsx
3456 3457 inc_y = (max_yangle-min_yangle)/nstepsy
3457 3458
3458 3459 alpha_y = numpy.arange(nstepsy)*inc_y + min_yangle
3459 3460 alpha_x = numpy.arange(nstepsx)*inc_x + min_xangle
3460 3461 penalty = numpy.zeros((nstepsx,nstepsy))
3461 3462 jph_array = numpy.zeros((nchan,nstepsx,nstepsy))
3462 3463 jph = numpy.zeros(nchan)
3463 3464
3464 3465 # Iterations looking for the offset
3465 3466 for iy in range(int(nstepsy)):
3466 3467 for ix in range(int(nstepsx)):
3467 3468 d3 = d[pairsList[1][0]]
3468 3469 d2 = d[pairsList[1][1]]
3469 3470 d5 = d[pairsList[0][0]]
3470 3471 d4 = d[pairsList[0][1]]
3471 3472
3472 3473 alp2 = alpha_y[iy] #gamma 1
3473 3474 alp4 = alpha_x[ix] #gamma 0
3474 3475
3475 3476 alp3 = -alp2*d3/d2 - gammas[1]
3476 3477 alp5 = -alp4*d5/d4 - gammas[0]
3477 3478 # jph[pairy[1]] = alpha_y[iy]
3478 3479 # jph[pairy[0]] = -gammas[1] - alpha_y[iy]*d[pairy[1]]/d[pairy[0]]
3479 3480
3480 3481 # jph[pairx[1]] = alpha_x[ix]
3481 3482 # jph[pairx[0]] = -gammas[0] - alpha_x[ix]*d[pairx[1]]/d[pairx[0]]
3482 3483 jph[pairsList[0][1]] = alp4
3483 3484 jph[pairsList[0][0]] = alp5
3484 3485 jph[pairsList[1][0]] = alp3
3485 3486 jph[pairsList[1][1]] = alp2
3486 3487 jph_array[:,ix,iy] = jph
3487 3488 # d = [2.0,2.5,2.5,2.0]
3488 3489 #falta chequear si va a leer bien los meteoros
3489 3490 meteorsArray1 = meteorOps.getMeteorParams(meteorsArray, azimuth, h, pairsList, d, jph)
3490 3491 error = meteorsArray1[:,-1]
3491 3492 ind1 = numpy.where(error==0)[0]
3492 3493 penalty[ix,iy] = ind1.size
3493 3494
3494 3495 i,j = numpy.unravel_index(penalty.argmax(), penalty.shape)
3495 3496 phOffset = jph_array[:,i,j]
3496 3497
3497 3498 center_xangle = phOffset[pairx[1]]
3498 3499 center_yangle = phOffset[pairy[1]]
3499 3500
3500 3501 phOffset = numpy.angle(numpy.exp(1j*jph_array[:,i,j]))
3501 3502 phOffset = phOffset*180/numpy.pi
3502 3503 return phOffset
3503 3504
3504 3505
3505 3506 def run(self, dataOut, hmin, hmax, channelPositions=None, nHours = 1):
3506 3507
3507 3508 dataOut.flagNoData = True
3508 3509 self.__dataReady = False
3509 3510 dataOut.outputInterval = nHours*3600
3510 3511
3511 3512 if self.__isConfig == False:
3512 3513 # self.__initime = dataOut.datatime.replace(minute = 0, second = 0, microsecond = 03)
3513 3514 #Get Initial LTC time
3514 3515 self.__initime = datetime.datetime.utcfromtimestamp(dataOut.utctime)
3515 3516 self.__initime = (self.__initime.replace(minute = 0, second = 0, microsecond = 0) - datetime.datetime(1970, 1, 1)).total_seconds()
3516 3517
3517 3518 self.__isConfig = True
3518 3519
3519 3520 if self.__buffer is None:
3520 3521 self.__buffer = dataOut.data_param.copy()
3521 3522
3522 3523 else:
3523 3524 self.__buffer = numpy.vstack((self.__buffer, dataOut.data_param))
3524 3525
3525 3526 self.__dataReady = self.__checkTime(dataOut.utctime, self.__initime, dataOut.paramInterval, dataOut.outputInterval) #Check if the buffer is ready
3526 3527
3527 3528 if self.__dataReady:
3528 3529 dataOut.utctimeInit = self.__initime
3529 3530 self.__initime += dataOut.outputInterval #to erase time offset
3530 3531
3531 3532 freq = dataOut.frequency
3532 3533 c = dataOut.C #m/s
3533 3534 lamb = c/freq
3534 3535 k = 2*numpy.pi/lamb
3535 3536 azimuth = 0
3536 3537 h = (hmin, hmax)
3537 3538 # pairs = ((0,1),(2,3)) #Estrella
3538 3539 # pairs = ((1,0),(2,3)) #T
3539 3540
3540 3541 if channelPositions is None:
3541 3542 # channelPositions = [(2.5,0), (0,2.5), (0,0), (0,4.5), (-2,0)] #T
3542 3543 channelPositions = [(4.5,2), (2,4.5), (2,2), (2,0), (0,2)] #Estrella
3543 3544 meteorOps = SMOperations()
3544 3545 pairslist0, distances = meteorOps.getPhasePairs(channelPositions)
3545 3546
3546 3547 #Checking correct order of pairs
3547 3548 pairs = []
3548 3549 if distances[1] > distances[0]:
3549 3550 pairs.append((1,0))
3550 3551 else:
3551 3552 pairs.append((0,1))
3552 3553
3553 3554 if distances[3] > distances[2]:
3554 3555 pairs.append((3,2))
3555 3556 else:
3556 3557 pairs.append((2,3))
3557 3558 # distances1 = [-distances[0]*lamb, distances[1]*lamb, -distances[2]*lamb, distances[3]*lamb]
3558 3559
3559 3560 meteorsArray = self.__buffer
3560 3561 error = meteorsArray[:,-1]
3561 3562 boolError = (error==0)|(error==3)|(error==4)|(error==13)|(error==14)
3562 3563 ind1 = numpy.where(boolError)[0]
3563 3564 meteorsArray = meteorsArray[ind1,:]
3564 3565 meteorsArray[:,-1] = 0
3565 3566 phases = meteorsArray[:,8:12]
3566 3567
3567 3568 #Calculate Gammas
3568 3569 gammas = self.__getGammas(pairs, distances, phases)
3569 3570 # gammas = numpy.array([-21.70409463,45.76935864])*numpy.pi/180
3570 3571 #Calculate Phases
3571 3572 phasesOff = self.__getPhases(azimuth, h, pairs, distances, gammas, meteorsArray)
3572 3573 phasesOff = phasesOff.reshape((1,phasesOff.size))
3573 3574 dataOut.data_output = -phasesOff
3574 3575 dataOut.flagNoData = False
3575 3576 self.__buffer = None
3576 3577
3577 3578
3578 3579 return
3579 3580
3580 3581 class SMOperations():
3581 3582
3582 3583 def __init__(self):
3583 3584
3584 3585 return
3585 3586
3586 3587 def getMeteorParams(self, arrayParameters0, azimuth, h, pairsList, distances, jph):
3587 3588
3588 3589 arrayParameters = arrayParameters0.copy()
3589 3590 hmin = h[0]
3590 3591 hmax = h[1]
3591 3592
3592 3593 #Calculate AOA (Error N 3, 4)
3593 3594 #JONES ET AL. 1998
3594 3595 AOAthresh = numpy.pi/8
3595 3596 error = arrayParameters[:,-1]
3596 3597 phases = -arrayParameters[:,8:12] + jph
3597 3598 # phases = numpy.unwrap(phases)
3598 3599 arrayParameters[:,3:6], arrayParameters[:,-1] = self.__getAOA(phases, pairsList, distances, error, AOAthresh, azimuth)
3599 3600
3600 3601 #Calculate Heights (Error N 13 and 14)
3601 3602 error = arrayParameters[:,-1]
3602 3603 Ranges = arrayParameters[:,1]
3603 3604 zenith = arrayParameters[:,4]
3604 3605 arrayParameters[:,2], arrayParameters[:,-1] = self.__getHeights(Ranges, zenith, error, hmin, hmax)
3605 3606
3606 3607 #----------------------- Get Final data ------------------------------------
3607 3608 # error = arrayParameters[:,-1]
3608 3609 # ind1 = numpy.where(error==0)[0]
3609 3610 # arrayParameters = arrayParameters[ind1,:]
3610 3611
3611 3612 return arrayParameters
3612 3613
3613 3614 def __getAOA(self, phases, pairsList, directions, error, AOAthresh, azimuth):
3614 3615
3615 3616 arrayAOA = numpy.zeros((phases.shape[0],3))
3616 3617 cosdir0, cosdir = self.__getDirectionCosines(phases, pairsList,directions)
3617 3618
3618 3619 arrayAOA[:,:2] = self.__calculateAOA(cosdir, azimuth)
3619 3620 cosDirError = numpy.sum(numpy.abs(cosdir0 - cosdir), axis = 1)
3620 3621 arrayAOA[:,2] = cosDirError
3621 3622
3622 3623 azimuthAngle = arrayAOA[:,0]
3623 3624 zenithAngle = arrayAOA[:,1]
3624 3625
3625 3626 #Setting Error
3626 3627 indError = numpy.where(numpy.logical_or(error == 3, error == 4))[0]
3627 3628 error[indError] = 0
3628 3629 #Number 3: AOA not fesible
3629 3630 indInvalid = numpy.where(numpy.logical_and((numpy.logical_or(numpy.isnan(zenithAngle), numpy.isnan(azimuthAngle))),error == 0))[0]
3630 3631 error[indInvalid] = 3
3631 3632 #Number 4: Large difference in AOAs obtained from different antenna baselines
3632 3633 indInvalid = numpy.where(numpy.logical_and(cosDirError > AOAthresh,error == 0))[0]
3633 3634 error[indInvalid] = 4
3634 3635 return arrayAOA, error
3635 3636
3636 3637 def __getDirectionCosines(self, arrayPhase, pairsList, distances):
3637 3638
3638 3639 #Initializing some variables
3639 3640 ang_aux = numpy.array([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])*2*numpy.pi
3640 3641 ang_aux = ang_aux.reshape(1,ang_aux.size)
3641 3642
3642 3643 cosdir = numpy.zeros((arrayPhase.shape[0],2))
3643 3644 cosdir0 = numpy.zeros((arrayPhase.shape[0],2))
3644 3645
3645 3646
3646 3647 for i in range(2):
3647 3648 ph0 = arrayPhase[:,pairsList[i][0]]
3648 3649 ph1 = arrayPhase[:,pairsList[i][1]]
3649 3650 d0 = distances[pairsList[i][0]]
3650 3651 d1 = distances[pairsList[i][1]]
3651 3652
3652 3653 ph0_aux = ph0 + ph1
3653 3654 ph0_aux = numpy.angle(numpy.exp(1j*ph0_aux))
3654 3655 # ph0_aux[ph0_aux > numpy.pi] -= 2*numpy.pi
3655 3656 # ph0_aux[ph0_aux < -numpy.pi] += 2*numpy.pi
3656 3657 #First Estimation
3657 3658 cosdir0[:,i] = (ph0_aux)/(2*numpy.pi*(d0 - d1))
3658 3659
3659 3660 #Most-Accurate Second Estimation
3660 3661 phi1_aux = ph0 - ph1
3661 3662 phi1_aux = phi1_aux.reshape(phi1_aux.size,1)
3662 3663 #Direction Cosine 1
3663 3664 cosdir1 = (phi1_aux + ang_aux)/(2*numpy.pi*(d0 + d1))
3664 3665
3665 3666 #Searching the correct Direction Cosine
3666 3667 cosdir0_aux = cosdir0[:,i]
3667 3668 cosdir0_aux = cosdir0_aux.reshape(cosdir0_aux.size,1)
3668 3669 #Minimum Distance
3669 3670 cosDiff = (cosdir1 - cosdir0_aux)**2
3670 3671 indcos = cosDiff.argmin(axis = 1)
3671 3672 #Saving Value obtained
3672 3673 cosdir[:,i] = cosdir1[numpy.arange(len(indcos)),indcos]
3673 3674
3674 3675 return cosdir0, cosdir
3675 3676
3676 3677 def __calculateAOA(self, cosdir, azimuth):
3677 3678 cosdirX = cosdir[:,0]
3678 3679 cosdirY = cosdir[:,1]
3679 3680
3680 3681 zenithAngle = numpy.arccos(numpy.sqrt(1 - cosdirX**2 - cosdirY**2))*180/numpy.pi
3681 3682 azimuthAngle = numpy.arctan2(cosdirX,cosdirY)*180/numpy.pi + azimuth#0 deg north, 90 deg east
3682 3683 angles = numpy.vstack((azimuthAngle, zenithAngle)).transpose()
3683 3684
3684 3685 return angles
3685 3686
3686 3687 def __getHeights(self, Ranges, zenith, error, minHeight, maxHeight):
3687 3688
3688 3689 Ramb = 375 #Ramb = c/(2*PRF)
3689 3690 Re = 6371 #Earth Radius
3690 3691 heights = numpy.zeros(Ranges.shape)
3691 3692
3692 3693 R_aux = numpy.array([0,1,2])*Ramb
3693 3694 R_aux = R_aux.reshape(1,R_aux.size)
3694 3695
3695 3696 Ranges = Ranges.reshape(Ranges.size,1)
3696 3697
3697 3698 Ri = Ranges + R_aux
3698 3699 hi = numpy.sqrt(Re**2 + Ri**2 + (2*Re*numpy.cos(zenith*numpy.pi/180)*Ri.transpose()).transpose()) - Re
3699 3700
3700 3701 #Check if there is a height between 70 and 110 km
3701 3702 h_bool = numpy.sum(numpy.logical_and(hi > minHeight, hi < maxHeight), axis = 1)
3702 3703 ind_h = numpy.where(h_bool == 1)[0]
3703 3704
3704 3705 hCorr = hi[ind_h, :]
3705 3706 ind_hCorr = numpy.where(numpy.logical_and(hi > minHeight, hi < maxHeight))
3706 3707
3707 3708 hCorr = hi[ind_hCorr][:len(ind_h)]
3708 3709 heights[ind_h] = hCorr
3709 3710
3710 3711 #Setting Error
3711 3712 #Number 13: Height unresolvable echo: not valid height within 70 to 110 km
3712 3713 #Number 14: Height ambiguous echo: more than one possible height within 70 to 110 km
3713 3714 indError = numpy.where(numpy.logical_or(error == 13, error == 14))[0]
3714 3715 error[indError] = 0
3715 3716 indInvalid2 = numpy.where(numpy.logical_and(h_bool > 1, error == 0))[0]
3716 3717 error[indInvalid2] = 14
3717 3718 indInvalid1 = numpy.where(numpy.logical_and(h_bool == 0, error == 0))[0]
3718 3719 error[indInvalid1] = 13
3719 3720
3720 3721 return heights, error
3721 3722
3722 3723 def getPhasePairs(self, channelPositions):
3723 3724 chanPos = numpy.array(channelPositions)
3724 3725 listOper = list(itertools.combinations(list(range(5)),2))
3725 3726
3726 3727 distances = numpy.zeros(4)
3727 3728 axisX = []
3728 3729 axisY = []
3729 3730 distX = numpy.zeros(3)
3730 3731 distY = numpy.zeros(3)
3731 3732 ix = 0
3732 3733 iy = 0
3733 3734
3734 3735 pairX = numpy.zeros((2,2))
3735 3736 pairY = numpy.zeros((2,2))
3736 3737
3737 3738 for i in range(len(listOper)):
3738 3739 pairi = listOper[i]
3739 3740
3740 3741 posDif = numpy.abs(chanPos[pairi[0],:] - chanPos[pairi[1],:])
3741 3742
3742 3743 if posDif[0] == 0:
3743 3744 axisY.append(pairi)
3744 3745 distY[iy] = posDif[1]
3745 3746 iy += 1
3746 3747 elif posDif[1] == 0:
3747 3748 axisX.append(pairi)
3748 3749 distX[ix] = posDif[0]
3749 3750 ix += 1
3750 3751
3751 3752 for i in range(2):
3752 3753 if i==0:
3753 3754 dist0 = distX
3754 3755 axis0 = axisX
3755 3756 else:
3756 3757 dist0 = distY
3757 3758 axis0 = axisY
3758 3759
3759 3760 side = numpy.argsort(dist0)[:-1]
3760 3761 axis0 = numpy.array(axis0)[side,:]
3761 3762 chanC = int(numpy.intersect1d(axis0[0,:], axis0[1,:])[0])
3762 3763 axis1 = numpy.unique(numpy.reshape(axis0,4))
3763 3764 side = axis1[axis1 != chanC]
3764 3765 diff1 = chanPos[chanC,i] - chanPos[side[0],i]
3765 3766 diff2 = chanPos[chanC,i] - chanPos[side[1],i]
3766 3767 if diff1<0:
3767 3768 chan2 = side[0]
3768 3769 d2 = numpy.abs(diff1)
3769 3770 chan1 = side[1]
3770 3771 d1 = numpy.abs(diff2)
3771 3772 else:
3772 3773 chan2 = side[1]
3773 3774 d2 = numpy.abs(diff2)
3774 3775 chan1 = side[0]
3775 3776 d1 = numpy.abs(diff1)
3776 3777
3777 3778 if i==0:
3778 3779 chanCX = chanC
3779 3780 chan1X = chan1
3780 3781 chan2X = chan2
3781 3782 distances[0:2] = numpy.array([d1,d2])
3782 3783 else:
3783 3784 chanCY = chanC
3784 3785 chan1Y = chan1
3785 3786 chan2Y = chan2
3786 3787 distances[2:4] = numpy.array([d1,d2])
3787 3788 # axisXsides = numpy.reshape(axisX[ix,:],4)
3788 3789 #
3789 3790 # channelCentX = int(numpy.intersect1d(pairX[0,:], pairX[1,:])[0])
3790 3791 # channelCentY = int(numpy.intersect1d(pairY[0,:], pairY[1,:])[0])
3791 3792 #
3792 3793 # ind25X = numpy.where(pairX[0,:] != channelCentX)[0][0]
3793 3794 # ind20X = numpy.where(pairX[1,:] != channelCentX)[0][0]
3794 3795 # channel25X = int(pairX[0,ind25X])
3795 3796 # channel20X = int(pairX[1,ind20X])
3796 3797 # ind25Y = numpy.where(pairY[0,:] != channelCentY)[0][0]
3797 3798 # ind20Y = numpy.where(pairY[1,:] != channelCentY)[0][0]
3798 3799 # channel25Y = int(pairY[0,ind25Y])
3799 3800 # channel20Y = int(pairY[1,ind20Y])
3800 3801
3801 3802 # pairslist = [(channelCentX, channel25X),(channelCentX, channel20X),(channelCentY,channel25Y),(channelCentY, channel20Y)]
3802 3803 pairslist = [(chanCX, chan1X),(chanCX, chan2X),(chanCY,chan1Y),(chanCY, chan2Y)]
3803 3804
3804 3805 return pairslist, distances
3805 3806 # def __getAOA(self, phases, pairsList, error, AOAthresh, azimuth):
3806 3807 #
3807 3808 # arrayAOA = numpy.zeros((phases.shape[0],3))
3808 3809 # cosdir0, cosdir = self.__getDirectionCosines(phases, pairsList)
3809 3810 #
3810 3811 # arrayAOA[:,:2] = self.__calculateAOA(cosdir, azimuth)
3811 3812 # cosDirError = numpy.sum(numpy.abs(cosdir0 - cosdir), axis = 1)
3812 3813 # arrayAOA[:,2] = cosDirError
3813 3814 #
3814 3815 # azimuthAngle = arrayAOA[:,0]
3815 3816 # zenithAngle = arrayAOA[:,1]
3816 3817 #
3817 3818 # #Setting Error
3818 3819 # #Number 3: AOA not fesible
3819 3820 # indInvalid = numpy.where(numpy.logical_and((numpy.logical_or(numpy.isnan(zenithAngle), numpy.isnan(azimuthAngle))),error == 0))[0]
3820 3821 # error[indInvalid] = 3
3821 3822 # #Number 4: Large difference in AOAs obtained from different antenna baselines
3822 3823 # indInvalid = numpy.where(numpy.logical_and(cosDirError > AOAthresh,error == 0))[0]
3823 3824 # error[indInvalid] = 4
3824 3825 # return arrayAOA, error
3825 3826 #
3826 3827 # def __getDirectionCosines(self, arrayPhase, pairsList):
3827 3828 #
3828 3829 # #Initializing some variables
3829 3830 # ang_aux = numpy.array([-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8])*2*numpy.pi
3830 3831 # ang_aux = ang_aux.reshape(1,ang_aux.size)
3831 3832 #
3832 3833 # cosdir = numpy.zeros((arrayPhase.shape[0],2))
3833 3834 # cosdir0 = numpy.zeros((arrayPhase.shape[0],2))
3834 3835 #
3835 3836 #
3836 3837 # for i in range(2):
3837 3838 # #First Estimation
3838 3839 # phi0_aux = arrayPhase[:,pairsList[i][0]] + arrayPhase[:,pairsList[i][1]]
3839 3840 # #Dealias
3840 3841 # indcsi = numpy.where(phi0_aux > numpy.pi)
3841 3842 # phi0_aux[indcsi] -= 2*numpy.pi
3842 3843 # indcsi = numpy.where(phi0_aux < -numpy.pi)
3843 3844 # phi0_aux[indcsi] += 2*numpy.pi
3844 3845 # #Direction Cosine 0
3845 3846 # cosdir0[:,i] = -(phi0_aux)/(2*numpy.pi*0.5)
3846 3847 #
3847 3848 # #Most-Accurate Second Estimation
3848 3849 # phi1_aux = arrayPhase[:,pairsList[i][0]] - arrayPhase[:,pairsList[i][1]]
3849 3850 # phi1_aux = phi1_aux.reshape(phi1_aux.size,1)
3850 3851 # #Direction Cosine 1
3851 3852 # cosdir1 = -(phi1_aux + ang_aux)/(2*numpy.pi*4.5)
3852 3853 #
3853 3854 # #Searching the correct Direction Cosine
3854 3855 # cosdir0_aux = cosdir0[:,i]
3855 3856 # cosdir0_aux = cosdir0_aux.reshape(cosdir0_aux.size,1)
3856 3857 # #Minimum Distance
3857 3858 # cosDiff = (cosdir1 - cosdir0_aux)**2
3858 3859 # indcos = cosDiff.argmin(axis = 1)
3859 3860 # #Saving Value obtained
3860 3861 # cosdir[:,i] = cosdir1[numpy.arange(len(indcos)),indcos]
3861 3862 #
3862 3863 # return cosdir0, cosdir
3863 3864 #
3864 3865 # def __calculateAOA(self, cosdir, azimuth):
3865 3866 # cosdirX = cosdir[:,0]
3866 3867 # cosdirY = cosdir[:,1]
3867 3868 #
3868 3869 # zenithAngle = numpy.arccos(numpy.sqrt(1 - cosdirX**2 - cosdirY**2))*180/numpy.pi
3869 3870 # azimuthAngle = numpy.arctan2(cosdirX,cosdirY)*180/numpy.pi + azimuth #0 deg north, 90 deg east
3870 3871 # angles = numpy.vstack((azimuthAngle, zenithAngle)).transpose()
3871 3872 #
3872 3873 # return angles
3873 3874 #
3874 3875 # def __getHeights(self, Ranges, zenith, error, minHeight, maxHeight):
3875 3876 #
3876 3877 # Ramb = 375 #Ramb = c/(2*PRF)
3877 3878 # Re = 6371 #Earth Radius
3878 3879 # heights = numpy.zeros(Ranges.shape)
3879 3880 #
3880 3881 # R_aux = numpy.array([0,1,2])*Ramb
3881 3882 # R_aux = R_aux.reshape(1,R_aux.size)
3882 3883 #
3883 3884 # Ranges = Ranges.reshape(Ranges.size,1)
3884 3885 #
3885 3886 # Ri = Ranges + R_aux
3886 3887 # hi = numpy.sqrt(Re**2 + Ri**2 + (2*Re*numpy.cos(zenith*numpy.pi/180)*Ri.transpose()).transpose()) - Re
3887 3888 #
3888 3889 # #Check if there is a height between 70 and 110 km
3889 3890 # h_bool = numpy.sum(numpy.logical_and(hi > minHeight, hi < maxHeight), axis = 1)
3890 3891 # ind_h = numpy.where(h_bool == 1)[0]
3891 3892 #
3892 3893 # hCorr = hi[ind_h, :]
3893 3894 # ind_hCorr = numpy.where(numpy.logical_and(hi > minHeight, hi < maxHeight))
3894 3895 #
3895 3896 # hCorr = hi[ind_hCorr]
3896 3897 # heights[ind_h] = hCorr
3897 3898 #
3898 3899 # #Setting Error
3899 3900 # #Number 13: Height unresolvable echo: not valid height within 70 to 110 km
3900 3901 # #Number 14: Height ambiguous echo: more than one possible height within 70 to 110 km
3901 3902 #
3902 3903 # indInvalid2 = numpy.where(numpy.logical_and(h_bool > 1, error == 0))[0]
3903 3904 # error[indInvalid2] = 14
3904 3905 # indInvalid1 = numpy.where(numpy.logical_and(h_bool == 0, error == 0))[0]
3905 3906 # error[indInvalid1] = 13
3906 3907 #
3907 3908 # return heights, error
3908 3909
3909 3910
3910 3911 class WeatherRadar(Operation):
3911 3912 '''
3912 3913 Function tat implements Weather Radar operations-
3913 3914 Input:
3914 3915 Output:
3915 3916 Parameters affected:
3916 3917 '''
3917 3918 isConfig = False
3918 3919 variableList = None
3919 3920
3920 3921 def __init__(self):
3921 3922 Operation.__init__(self)
3922 3923
3923 3924 def setup(self,dataOut,variableList= None,Pt=0,Gt=0,Gr=0,lambda_=0, aL=0,
3924 3925 tauW= 0,thetaT=0,thetaR=0,Km =0):
3925 3926 self.nCh = dataOut.nChannels
3926 3927 self.nHeis = dataOut.nHeights
3927 3928 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
3928 3929 self.Range = numpy.arange(dataOut.nHeights)*deltaHeight + dataOut.heightList[0]
3929 3930 self.Range = self.Range.reshape(1,self.nHeis)
3930 3931 self.Range = numpy.tile(self.Range,[self.nCh,1])
3931 3932 '''-----------1 Constante del Radar----------'''
3932 3933 self.Pt = Pt
3933 3934 self.Gt = Gt
3934 3935 self.Gr = Gr
3935 3936 self.lambda_ = lambda_
3936 3937 self.aL = aL
3937 3938 self.tauW = tauW
3938 3939 self.thetaT = thetaT
3939 3940 self.thetaR = thetaR
3940 3941 self.Km = Km
3941 3942 Numerator = ((4*numpy.pi)**3 * aL**2 * 16 *numpy.log(2))
3942 3943 Denominator = (Pt * Gt * Gr * lambda_**2 * SPEED_OF_LIGHT * tauW * numpy.pi*thetaT*thetaR)
3943 3944 self.RadarConstant = Numerator/Denominator
3944 3945 self.variableList= variableList
3945 3946
3946 3947 def setMoments(self,dataOut,i):
3947 3948
3948 3949 type = dataOut.inputUnit
3949 3950 nCh = dataOut.nChannels
3950 3951 nHeis = dataOut.nHeights
3951 3952 data_param = numpy.zeros((nCh,4,nHeis))
3952 3953 if type == "Voltage":
3953 3954 factor = dataOut.normFactor
3954 3955 data_param[:,0,:] = dataOut.dataPP_POW/(factor)
3955 3956 data_param[:,1,:] = dataOut.dataPP_DOP
3956 3957 data_param[:,2,:] = dataOut.dataPP_WIDTH
3957 3958 data_param[:,3,:] = dataOut.dataPP_SNR
3958 3959 if type == "Spectra":
3959 3960 data_param[:,0,:] = dataOut.data_POW
3960 3961 data_param[:,1,:] = dataOut.data_DOP
3961 3962 data_param[:,2,:] = dataOut.data_WIDTH
3962 3963 data_param[:,3,:] = dataOut.data_SNR
3963 3964
3964 3965 return data_param[:,i,:]
3965 3966
3966 3967 def getCoeficienteCorrelacionROhv_R(self,dataOut):
3967 3968 type = dataOut.inputUnit
3968 3969 nHeis = dataOut.nHeights
3969 3970 data_RhoHV_R = numpy.zeros((nHeis))
3970 3971 if type == "Voltage":
3971 3972 powa = dataOut.dataPP_POWER[0]
3972 3973 powb = dataOut.dataPP_POWER[1]
3973 3974 ccf = dataOut.dataPP_CCF
3974 3975 avgcoherenceComplex = ccf / numpy.sqrt(powa * powb)
3975 3976 data_RhoHV_R = numpy.abs(avgcoherenceComplex)
3976 3977 if type == "Spectra":
3977 3978 data_RhoHV_R = dataOut.getCoherence()
3978 3979
3979 3980 return data_RhoHV_R
3980 3981
3981 3982 def getFasediferencialPhiD_P(self,dataOut,phase= True):
3982 3983 type = dataOut.inputUnit
3983 3984 nHeis = dataOut.nHeights
3984 3985 data_PhiD_P = numpy.zeros((nHeis))
3985 3986 if type == "Voltage":
3986 3987 powa = dataOut.dataPP_POWER[0]
3987 3988 powb = dataOut.dataPP_POWER[1]
3988 3989 ccf = dataOut.dataPP_CCF
3989 3990 avgcoherenceComplex = ccf / numpy.sqrt(powa * powb)
3990 3991 if phase:
3991 3992 data_PhiD_P = numpy.arctan2(avgcoherenceComplex.imag,
3992 3993 avgcoherenceComplex.real) * 180 / numpy.pi
3993 3994 if type == "Spectra":
3994 3995 data_PhiD_P = dataOut.getCoherence(phase = phase)
3995 3996
3996 3997 return data_PhiD_P
3997 3998
3998 3999 def getReflectividad_D(self,dataOut):
3999 4000 '''-----------------------------Potencia de Radar -Signal S-----------------------------'''
4000 4001
4001 4002 Pr = self.setMoments(dataOut,0)
4002 4003
4003 4004 '''-----------2 Reflectividad del Radar y Factor de Reflectividad------'''
4004 4005 self.n_radar = numpy.zeros((self.nCh,self.nHeis))
4005 4006 self.Z_radar = numpy.zeros((self.nCh,self.nHeis))
4006 4007 for R in range(self.nHeis):
4007 4008 self.n_radar[:,R] = self.RadarConstant*Pr[:,R]* (self.Range[:,R])**2
4008 4009
4009 4010 self.Z_radar[:,R] = self.n_radar[:,R]* self.lambda_**4/( numpy.pi**5 * self.Km**2)
4010 4011
4011 4012 '''----------- Factor de Reflectividad Equivalente lamda_ < 10 cm , lamda_= 3.2cm-------'''
4012 4013 Zeh = self.Z_radar
4013 4014 dBZeh = 10*numpy.log10(Zeh)
4014 4015 Zdb_D = dBZeh[0] - dBZeh[1]
4015 4016 return Zdb_D
4016 4017
4017 4018 def getRadialVelocity_V(self,dataOut):
4018 4019 velRadial_V = self.setMoments(dataOut,1)
4019 4020 return velRadial_V
4020 4021
4021 4022 def getAnchoEspectral_W(self,dataOut):
4022 4023 Sigmav_W = self.setMoments(dataOut,2)
4023 4024 return Sigmav_W
4024 4025
4025 4026
4026 4027 def run(self,dataOut,variableList=None,Pt=25,Gt=200.0,Gr=50.0,lambda_=0.32, aL=2.5118,
4027 4028 tauW= 4.0e-6,thetaT=0.165,thetaR=0.367,Km =0.93):
4028 4029
4029 4030 if not self.isConfig:
4030 4031 self.setup(dataOut= dataOut,variableList=None,Pt=25,Gt=200.0,Gr=50.0,lambda_=0.32, aL=2.5118,
4031 4032 tauW= 4.0e-6,thetaT=0.165,thetaR=0.367,Km =0.93)
4032 4033 self.isConfig = True
4033 4034
4034 4035 for i in range(len(self.variableList)):
4035 4036 if self.variableList[i]=='ReflectividadDiferencial':
4036 4037 dataOut.Zdb_D =self.getReflectividad_D(dataOut=dataOut)
4037 4038 if self.variableList[i]=='FaseDiferencial':
4038 4039 dataOut.PhiD_P =self.getFasediferencialPhiD_P(dataOut=dataOut, phase=True)
4039 4040 if self.variableList[i] == "CoeficienteCorrelacion":
4040 4041 dataOut.RhoHV_R = self.getCoeficienteCorrelacionROhv_R(dataOut)
4041 4042 if self.variableList[i] =="VelocidadRadial":
4042 4043 dataOut.velRadial_V = self.getRadialVelocity_V(dataOut)
4043 4044 if self.variableList[i] =="AnchoEspectral":
4044 4045 dataOut.Sigmav_W = self.getAnchoEspectral_W(dataOut)
4045 4046 return dataOut
4046 4047
4047 4048 class PedestalInformation(Operation):
4048 4049
4049 4050 def __init__(self):
4050 4051 Operation.__init__(self)
4051 4052 self.filename = False
4052 4053 self.delay = 30
4053 4054 self.nTries = 3
4054 4055
4055 4056 def find_file(self, timestamp):
4056 4057
4057 4058 dt = datetime.datetime.utcfromtimestamp(timestamp)
4058 4059 path = os.path.join(self.path, dt.strftime('%Y-%m-%dT%H-00-00'))
4059 4060
4060 4061 if not os.path.exists(path):
4061 4062 return False, False
4062 4063 fileList = glob.glob(os.path.join(path, '*.h5'))
4063 4064 fileList.sort()
4064 4065 return fileList
4065 4066
4066 4067 def find_next_file(self):
4067 4068
4068 4069 while True:
4069 4070 if self.utctime < self.utcfile:
4070 4071 self.flagNoData = True
4071 4072 break
4072 4073 self.flagNoData = False
4073 4074 file_size = len(self.fp['Data']['utc'])
4074 4075 if self.utctime < self.utcfile+file_size*self.interval:
4075 4076 break
4076 4077 dt = datetime.datetime.utcfromtimestamp(self.utcfile)
4077 4078 if dt.second > 0:
4078 4079 self.utcfile -= dt.second
4079 4080 self.utcfile += self.samples*self.interval
4080 4081 dt = datetime.datetime.utcfromtimestamp(self.utctime)
4081 4082 path = os.path.join(self.path, dt.strftime('%Y-%m-%dT%H-00-00'))
4082 4083 self.filename = os.path.join(path, 'pos@{}.000.h5'.format(int(self.utcfile)))
4083
4084
4084 4085 for n in range(self.nTries):
4085 4086 ok = False
4086 4087 try:
4087 4088 if not os.path.exists(self.filename):
4088 4089 log.warning('Waiting {}s for position files...'.format(self.delay), self.name)
4089 4090 time.sleep(self.delay)
4090 4091 continue
4091 4092 self.fp.close()
4092 4093 self.fp = h5py.File(self.filename, 'r')
4093 4094 log.log('Opening file: {}'.format(self.filename), self.name)
4094 4095 ok = True
4095 4096 break
4096 4097 except:
4097 4098 log.warning('Waiting {}s for position file to be ready...'.format(self.delay), self.name)
4098 4099 time.sleep(self.delay)
4099 4100 continue
4100
4101
4101 4102 if not ok:
4102 4103 log.error('No new position files found in {}'.format(path))
4103 4104 raise IOError('No new position files found in {}'.format(path))
4104 4105
4105 4106
4106 4107 def get_values(self):
4107 4108
4108 4109 if self.flagNoData:
4109 4110 return numpy.nan, numpy.nan
4110 4111 else:
4111 4112 index = int((self.utctime-self.utcfile)/self.interval)
4112 4113 return self.fp['Data']['azi_pos'][index], self.fp['Data']['ele_pos'][index]
4113 4114
4114 4115 def setup(self, dataOut, path, conf, samples, interval, az_offset):
4115 4116
4116 4117 self.path = path
4117 4118 self.conf = conf
4118 4119 self.samples = samples
4119 4120 self.interval = interval
4120 4121 filelist = self.find_file(dataOut.utctime)
4121 4122
4122 4123 if not filelist:
4123 4124 log.error('No position files found in {}'.format(path), self.name)
4124 4125 raise IOError('No position files found in {}'.format(path))
4125 4126 else:
4126 4127 self.filename = filelist[0]
4127 4128 self.utcfile = int(self.filename.split('/')[-1][4:14])
4128 4129 log.log('Opening file: {}'.format(self.filename), self.name)
4129 4130 self.fp = h5py.File(self.filename, 'r')
4130 4131
4131 4132 def run(self, dataOut, path, conf=None, samples=1500, interval=0.04, az_offset=0, time_offset=0):
4132 4133
4133 4134 if not self.isConfig:
4134 4135 self.setup(dataOut, path, conf, samples, interval, az_offset)
4135 4136 self.isConfig = True
4136 4137
4137 4138 self.utctime = dataOut.utctime + time_offset
4138 4139
4139 4140 self.find_next_file()
4140 4141
4141 4142 az, el = self.get_values()
4142 4143 dataOut.flagNoData = False
4143 4144
4144 4145 if numpy.isnan(az) or numpy.isnan(el) :
4145 4146 dataOut.flagNoData = True
4146 4147 return dataOut
4147 4148
4148 4149 dataOut.azimuth = az - az_offset
4149 4150 if dataOut.azimuth < 0:
4150 4151 dataOut.azimuth += 360
4151 4152 dataOut.elevation = el
4152 4153
4153 4154 return dataOut
4154 4155
4155 4156 class Block360(Operation):
4156 4157 '''
4157 4158 '''
4158 4159 isConfig = False
4159 4160 __profIndex = 0
4160 4161 __initime = None
4161 4162 __lastdatatime = None
4162 4163 __buffer = None
4163 4164 __dataReady = False
4164 4165 n = None
4165 4166 __nch = 0
4166 4167 __nHeis = 0
4167 4168 index = 0
4168 4169 mode = 0
4169 4170
4170 4171 def __init__(self,**kwargs):
4171 4172 Operation.__init__(self,**kwargs)
4172 4173
4173 4174 def setup(self, dataOut, n = None, mode = None):
4174 4175 '''
4175 4176 n= Numero de PRF's de entrada
4176 4177 '''
4177 4178 self.__initime = None
4178 4179 self.__lastdatatime = 0
4179 4180 self.__dataReady = False
4180 4181 self.__buffer = 0
4181 4182 self.__buffer_1D = 0
4182 4183 self.__profIndex = 0
4183 4184 self.index = 0
4184 4185 self.__nch = dataOut.nChannels
4185 4186 self.__nHeis = dataOut.nHeights
4186 4187 ##print("ELVALOR DE n es:", n)
4187 4188 if n == None:
4188 4189 raise ValueError("n should be specified.")
4189 4190
4190 4191 if mode == None:
4191 4192 raise ValueError("mode should be specified.")
4192 4193
4193 4194 if n != None:
4194 4195 if n<1:
4195 4196 print("n should be greater than 2")
4196 4197 raise ValueError("n should be greater than 2")
4197 4198
4198 4199 self.n = n
4199 4200 self.mode = mode
4200 4201 #print("self.mode",self.mode)
4201 4202 #print("nHeights")
4202 4203 self.__buffer = numpy.zeros(( dataOut.nChannels,n, dataOut.nHeights))
4203 4204 self.__buffer2 = numpy.zeros(n)
4204 4205 self.__buffer3 = numpy.zeros(n)
4205 4206
4206 4207
4207 4208
4208 4209
4209 4210 def putData(self,data,mode):
4210 4211 '''
4211 4212 Add a profile to he __buffer and increase in one the __profiel Index
4212 4213 '''
4213 4214 #print("line 4049",data.dataPP_POW.shape,data.dataPP_POW[:10])
4214 4215 #print("line 4049",data.azimuth.shape,data.azimuth)
4215 4216 if self.mode==0:
4216 4217 self.__buffer[:,self.__profIndex,:]= data.dataPP_POWER# PRIMER MOMENTO
4217 4218 if self.mode==1:
4218 4219 self.__buffer[:,self.__profIndex,:]= data.data_pow
4219 4220 #print("me casi",self.index,data.azimuth[self.index])
4220 4221 #print(self.__profIndex, self.index , data.azimuth[self.index] )
4221 4222 #print("magic",data.profileIndex)
4222 4223 #print(data.azimuth[self.index])
4223 4224 #print("index",self.index)
4224 4225
4225 4226 #####self.__buffer2[self.__profIndex] = data.azimuth[self.index]
4226 4227 self.__buffer2[self.__profIndex] = data.azimuth
4227 4228 self.__buffer3[self.__profIndex] = data.elevation
4228 4229 #print("q pasa")
4229 4230 #####self.index+=1
4230 4231 #print("index",self.index,data.azimuth[:10])
4231 4232 self.__profIndex += 1
4232 4233 return #Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β· Remove DCΒ·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·
4233 4234
4234 4235 def pushData(self,data):
4235 4236 '''
4236 4237 Return the PULSEPAIR and the profiles used in the operation
4237 4238 Affected : self.__profileIndex
4238 4239 '''
4239 4240 #print("pushData")
4240 4241
4241 4242 data_360 = self.__buffer
4242 4243 data_p = self.__buffer2
4243 4244 data_e = self.__buffer3
4244 4245 n = self.__profIndex
4245 4246
4246 4247 self.__buffer = numpy.zeros((self.__nch, self.n,self.__nHeis))
4247 4248 self.__buffer2 = numpy.zeros(self.n)
4248 4249 self.__buffer3 = numpy.zeros(self.n)
4249 4250 self.__profIndex = 0
4250 4251 #print("pushData")
4251 4252 return data_360,n,data_p,data_e
4252 4253
4253 4254
4254 4255 def byProfiles(self,dataOut):
4255 4256
4256 4257 self.__dataReady = False
4257 4258 data_360 = None
4258 4259 data_p = None
4259 4260 data_e = None
4260 4261 #print("dataOu",dataOut.dataPP_POW)
4261 4262 self.putData(data=dataOut,mode = self.mode)
4262 4263 ##### print("profIndex",self.__profIndex)
4263 4264 if self.__profIndex == self.n:
4264 4265 data_360,n,data_p,data_e = self.pushData(data=dataOut)
4265 4266 self.__dataReady = True
4266 4267
4267 4268 return data_360,data_p,data_e
4268 4269
4269 4270
4270 4271 def blockOp(self, dataOut, datatime= None):
4271 4272 if self.__initime == None:
4272 4273 self.__initime = datatime
4273 4274 data_360,data_p,data_e = self.byProfiles(dataOut)
4274 4275 self.__lastdatatime = datatime
4275 4276
4276 4277 if data_360 is None:
4277 4278 return None, None,None,None
4278 4279
4279 4280
4280 4281 avgdatatime = self.__initime
4281 4282 if self.n==1:
4282 4283 avgdatatime = datatime
4283 4284 deltatime = datatime - self.__lastdatatime
4284 4285 self.__initime = datatime
4285 4286 #print(data_360.shape,avgdatatime,data_p.shape)
4286 4287 return data_360,avgdatatime,data_p,data_e
4287 4288
4288 4289 def run(self, dataOut,n = None,mode=None,**kwargs):
4289 4290 #print("BLOCK 360 HERE WE GO MOMENTOS")
4290 4291 print("Block 360")
4291 4292 #exit(1)
4292 4293 if not self.isConfig:
4293 4294 self.setup(dataOut = dataOut, n = n ,mode= mode ,**kwargs)
4294 4295 ####self.index = 0
4295 4296 #print("comova",self.isConfig)
4296 4297 self.isConfig = True
4297 4298 ####if self.index==dataOut.azimuth.shape[0]:
4298 4299 #### self.index=0
4299 4300 data_360, avgdatatime,data_p,data_e = self.blockOp(dataOut, dataOut.utctime)
4300 4301 dataOut.flagNoData = True
4301 4302
4302 4303 if self.__dataReady:
4303 4304 dataOut.data_360 = data_360 # S
4304 4305 #print("DATA 360")
4305 4306 #print(dataOut.data_360)
4306 4307 #print("---------------------------------------------------------------------------------")
4307 4308 print("---------------------------DATAREADY---------------------------------------------")
4308 4309 #print("---------------------------------------------------------------------------------")
4309 4310 #print("data_360",dataOut.data_360.shape)
4310 4311 dataOut.data_azi = data_p
4311 4312 dataOut.data_ele = data_e
4312 4313 ###print("azi: ",dataOut.data_azi)
4313 4314 #print("ele: ",dataOut.data_ele)
4314 4315 #print("jroproc_parameters",data_p[0],data_p[-1])#,data_360.shape,avgdatatime)
4315 4316 dataOut.utctime = avgdatatime
4316 4317 dataOut.flagNoData = False
4317 4318 return dataOut
4318 4319
4319 4320 class Block360_vRF(Operation):
4320 4321 '''
4321 4322 '''
4322 4323 isConfig = False
4323 4324 __profIndex = 0
4324 4325 __initime = None
4325 4326 __lastdatatime = None
4326 4327 __buffer = None
4327 4328 __dataReady = False
4328 4329 n = None
4329 4330 __nch = 0
4330 4331 __nHeis = 0
4331 4332 index = 0
4332 4333 mode = 0
4333 4334
4334 4335 def __init__(self,**kwargs):
4335 4336 Operation.__init__(self,**kwargs)
4336 4337
4337 4338 def setup(self, dataOut, n = None, mode = None):
4338 4339 '''
4339 4340 n= Numero de PRF's de entrada
4340 4341 '''
4341 4342 self.__initime = None
4342 4343 self.__lastdatatime = 0
4343 4344 self.__dataReady = False
4344 4345 self.__buffer = 0
4345 4346 self.__buffer_1D = 0
4346 4347 self.__profIndex = 0
4347 4348 self.index = 0
4348 4349 self.__nch = dataOut.nChannels
4349 4350 self.__nHeis = dataOut.nHeights
4350 4351 ##print("ELVALOR DE n es:", n)
4351 4352 if n == None:
4352 4353 raise ValueError("n should be specified.")
4353 4354
4354 4355 if mode == None:
4355 4356 raise ValueError("mode should be specified.")
4356 4357
4357 4358 if n != None:
4358 4359 if n<1:
4359 4360 print("n should be greater than 2")
4360 4361 raise ValueError("n should be greater than 2")
4361 4362
4362 4363 self.n = n
4363 4364 self.mode = mode
4364 4365 #print("self.mode",self.mode)
4365 4366 #print("nHeights")
4366 4367 self.__buffer = numpy.zeros(( dataOut.nChannels,n, dataOut.nHeights))
4367 4368 self.__buffer2 = numpy.zeros(n)
4368 4369 self.__buffer3 = numpy.zeros(n)
4369 4370
4370 4371
4371 4372
4372 4373
4373 4374 def putData(self,data,mode):
4374 4375 '''
4375 4376 Add a profile to he __buffer and increase in one the __profiel Index
4376 4377 '''
4377 4378 #print("line 4049",data.dataPP_POW.shape,data.dataPP_POW[:10])
4378 4379 #print("line 4049",data.azimuth.shape,data.azimuth)
4379 4380 if self.mode==0:
4380 4381 self.__buffer[:,self.__profIndex,:]= data.dataPP_POWER# PRIMER MOMENTO
4381 4382 if self.mode==1:
4382 4383 self.__buffer[:,self.__profIndex,:]= data.data_pow
4383 4384 #print("me casi",self.index,data.azimuth[self.index])
4384 4385 #print(self.__profIndex, self.index , data.azimuth[self.index] )
4385 4386 #print("magic",data.profileIndex)
4386 4387 #print(data.azimuth[self.index])
4387 4388 #print("index",self.index)
4388 4389
4389 4390 #####self.__buffer2[self.__profIndex] = data.azimuth[self.index]
4390 4391 self.__buffer2[self.__profIndex] = data.azimuth
4391 4392 self.__buffer3[self.__profIndex] = data.elevation
4392 4393 #print("q pasa")
4393 4394 #####self.index+=1
4394 4395 #print("index",self.index,data.azimuth[:10])
4395 4396 self.__profIndex += 1
4396 4397 return #Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β· Remove DCΒ·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·
4397 4398
4398 4399 def pushData(self,data):
4399 4400 '''
4400 4401 Return the PULSEPAIR and the profiles used in the operation
4401 4402 Affected : self.__profileIndex
4402 4403 '''
4403 4404 #print("pushData")
4404 4405
4405 4406 data_360 = self.__buffer
4406 4407 data_p = self.__buffer2
4407 4408 data_e = self.__buffer3
4408 4409 n = self.__profIndex
4409 4410
4410 4411 self.__buffer = numpy.zeros((self.__nch, self.n,self.__nHeis))
4411 4412 self.__buffer2 = numpy.zeros(self.n)
4412 4413 self.__buffer3 = numpy.zeros(self.n)
4413 4414 self.__profIndex = 0
4414 4415 #print("pushData")
4415 4416 return data_360,n,data_p,data_e
4416 4417
4417 4418
4418 4419 def byProfiles(self,dataOut):
4419 4420
4420 4421 self.__dataReady = False
4421 4422 data_360 = None
4422 4423 data_p = None
4423 4424 data_e = None
4424 4425 #print("dataOu",dataOut.dataPP_POW)
4425 4426 self.putData(data=dataOut,mode = self.mode)
4426 4427 ##### print("profIndex",self.__profIndex)
4427 4428 if self.__profIndex == self.n:
4428 4429 data_360,n,data_p,data_e = self.pushData(data=dataOut)
4429 4430 self.__dataReady = True
4430 4431
4431 4432 return data_360,data_p,data_e
4432 4433
4433 4434
4434 4435 def blockOp(self, dataOut, datatime= None):
4435 4436 if self.__initime == None:
4436 4437 self.__initime = datatime
4437 4438 data_360,data_p,data_e = self.byProfiles(dataOut)
4438 4439 self.__lastdatatime = datatime
4439 4440
4440 4441 if data_360 is None:
4441 4442 return None, None,None,None
4442 4443
4443 4444
4444 4445 avgdatatime = self.__initime
4445 4446 if self.n==1:
4446 4447 avgdatatime = datatime
4447 4448 deltatime = datatime - self.__lastdatatime
4448 4449 self.__initime = datatime
4449 4450 #print(data_360.shape,avgdatatime,data_p.shape)
4450 4451 return data_360,avgdatatime,data_p,data_e
4451 4452
4452 4453 def checkcase(self,data_ele):
4453 4454 start = data_ele[0]
4454 4455 end = data_ele[-1]
4455 4456 diff_angle = (end-start)
4456 4457 len_ang=len(data_ele)
4457 4458 print("start",start)
4458 4459 print("end",end)
4459 4460 print("number",diff_angle)
4460 4461
4461 4462 print("len_ang",len_ang)
4462 4463
4463 4464 aux = (data_ele<0).any(axis=0)
4464 4465
4465 4466 #exit(1)
4466 4467 if diff_angle<0 and aux!=1: #Bajada
4467 4468 return 1
4468 4469 elif diff_angle<0 and aux==1: #Bajada con angulos negativos
4469 4470 return 0
4470 4471 elif diff_angle == 0: # This case happens when the angle reaches the max_angle if n = 2
4471 4472 self.flagEraseFirstData = 1
4472 4473 print("ToDO this case")
4473 4474 exit(1)
4474 4475 elif diff_angle>0: #Subida
4475 4476 return 0
4476 4477
4477 4478 def run(self, dataOut,n = None,mode=None,**kwargs):
4478 4479 #print("BLOCK 360 HERE WE GO MOMENTOS")
4479 4480 print("Block 360")
4480 4481
4481 4482 #exit(1)
4482 4483 if not self.isConfig:
4483 4484 if n == 1:
4484 4485 print("*******************Min Value is 2. Setting n = 2*******************")
4485 4486 n = 2
4486 4487 #exit(1)
4487 4488 print(n)
4488 4489 self.setup(dataOut = dataOut, n = n ,mode= mode ,**kwargs)
4489 4490 ####self.index = 0
4490 4491 #print("comova",self.isConfig)
4491 4492 self.isConfig = True
4492 4493 ####if self.index==dataOut.azimuth.shape[0]:
4493 4494 #### self.index=0
4494 4495 data_360, avgdatatime,data_p,data_e = self.blockOp(dataOut, dataOut.utctime)
4495 4496 dataOut.flagNoData = True
4496 4497
4497 4498 if self.__dataReady:
4498 4499 dataOut.data_360 = data_360 # S
4499 4500 #print("DATA 360")
4500 4501 #print(dataOut.data_360)
4501 4502 #print("---------------------------------------------------------------------------------")
4502 4503 print("---------------------------DATAREADY---------------------------------------------")
4503 4504 #print("---------------------------------------------------------------------------------")
4504 4505 #print("data_360",dataOut.data_360.shape)
4505 4506 dataOut.data_azi = data_p
4506 4507 dataOut.data_ele = data_e
4507 4508 ###print("azi: ",dataOut.data_azi)
4508 4509 #print("ele: ",dataOut.data_ele)
4509 4510 #print("jroproc_parameters",data_p[0],data_p[-1])#,data_360.shape,avgdatatime)
4510 4511 dataOut.utctime = avgdatatime
4511 4512
4512 4513 dataOut.case_flag = self.checkcase(dataOut.data_ele)
4513 4514 if dataOut.case_flag: #Si estΓ‘ de bajada empieza a plotear
4514 4515 print("INSIDE CASE FLAG BAJADA")
4515 4516 dataOut.flagNoData = False
4516 4517 else:
4517 4518 print("CASE SUBIDA")
4518 4519 dataOut.flagNoData = True
4519 4520
4520 4521 #dataOut.flagNoData = False
4521 4522 return dataOut
4522 4523
4523 4524 class Block360_vRF2(Operation):
4524 4525 '''
4525 4526 '''
4526 4527 isConfig = False
4527 4528 __profIndex = 0
4528 4529 __initime = None
4529 4530 __lastdatatime = None
4530 4531 __buffer = None
4531 4532 __dataReady = False
4532 4533 n = None
4533 4534 __nch = 0
4534 4535 __nHeis = 0
4535 4536 index = 0
4536 4537 mode = None
4537 4538
4538 4539 def __init__(self,**kwargs):
4539 4540 Operation.__init__(self,**kwargs)
4540 4541
4541 4542 def setup(self, dataOut, n = None, mode = None):
4542 4543 '''
4543 4544 n= Numero de PRF's de entrada
4544 4545 '''
4545 4546 self.__initime = None
4546 4547 self.__lastdatatime = 0
4547 4548 self.__dataReady = False
4548 4549 self.__buffer = 0
4549 4550 self.__buffer_1D = 0
4550 4551 #self.__profIndex = 0
4551 4552 self.index = 0
4552 4553 self.__nch = dataOut.nChannels
4553 4554 self.__nHeis = dataOut.nHeights
4554 4555
4555 4556 self.mode = mode
4556 4557 #print("self.mode",self.mode)
4557 4558 #print("nHeights")
4558 4559 self.__buffer = []
4559 4560 self.__buffer2 = []
4560 4561 self.__buffer3 = []
4561 4562 self.__buffer4 = []
4562 4563
4563 4564 def putData(self,data,mode):
4564 4565 '''
4565 4566 Add a profile to he __buffer and increase in one the __profiel Index
4566 4567 '''
4567 4568
4568 4569 if self.mode==0:
4569 4570 self.__buffer.append(data.dataPP_POWER)# PRIMER MOMENTO
4570 4571 if self.mode==1:
4571 4572 self.__buffer.append(data.data_pow)
4572 4573
4573 4574 self.__buffer4.append(data.dataPP_DOP)
4574 4575
4575 4576 self.__buffer2.append(data.azimuth)
4576 4577 self.__buffer3.append(data.elevation)
4577 4578 self.__profIndex += 1
4578 4579
4579 4580 return numpy.array(self.__buffer3) #Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β· Remove DCΒ·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·Β·
4580 4581
4581 4582 def pushData(self,data):
4582 4583 '''
4583 4584 Return the PULSEPAIR and the profiles used in the operation
4584 4585 Affected : self.__profileIndex
4585 4586 '''
4586 4587
4587 4588 data_360_Power = numpy.array(self.__buffer).transpose(1,0,2)
4588 4589 data_360_Velocity = numpy.array(self.__buffer4).transpose(1,0,2)
4589 4590 data_p = numpy.array(self.__buffer2)
4590 4591 data_e = numpy.array(self.__buffer3)
4591 4592 n = self.__profIndex
4592 4593
4593 4594 self.__buffer = []
4594 4595 self.__buffer4 = []
4595 4596 self.__buffer2 = []
4596 4597 self.__buffer3 = []
4597 4598 self.__profIndex = 0
4598 4599 return data_360_Power,data_360_Velocity,n,data_p,data_e
4599 4600
4600 4601
4601 4602 def byProfiles(self,dataOut):
4602 4603
4603 4604 self.__dataReady = False
4604 4605 data_360_Power = []
4605 4606 data_360_Velocity = []
4606 4607 data_p = None
4607 4608 data_e = None
4608 4609
4609 4610 elevations = self.putData(data=dataOut,mode = self.mode)
4610 4611
4611 4612 if self.__profIndex > 1:
4612 4613 case_flag = self.checkcase(elevations)
4613 4614
4614 4615 if case_flag == 0: #Subida
4615 4616
4616 4617 if len(self.__buffer) == 2: #Cuando estΓ‘ de subida
4617 4618 #Se borra el dato anterior para liberar buffer y comparar el dato actual con el siguiente
4618 4619 self.__buffer.pop(0) #Erase first data
4619 4620 self.__buffer2.pop(0)
4620 4621 self.__buffer3.pop(0)
4621 4622 self.__buffer4.pop(0)
4622 4623 self.__profIndex -= 1
4623 4624 else: #Cuando ha estado de bajada y ha vuelto a subir
4624 4625 #Se borra el ΓΊltimo dato
4625 4626 self.__buffer.pop() #Erase last data
4626 4627 self.__buffer2.pop()
4627 4628 self.__buffer3.pop()
4628 4629 self.__buffer4.pop()
4629 4630 data_360_Power,data_360_Velocity,n,data_p,data_e = self.pushData(data=dataOut)
4630 4631
4631 4632 self.__dataReady = True
4632 4633
4633 4634 return data_360_Power,data_360_Velocity,data_p,data_e
4634 4635
4635 4636
4636 4637 def blockOp(self, dataOut, datatime= None):
4637 4638 if self.__initime == None:
4638 4639 self.__initime = datatime
4639 4640 data_360_Power,data_360_Velocity,data_p,data_e = self.byProfiles(dataOut)
4640 4641 self.__lastdatatime = datatime
4641 4642
4642 4643 avgdatatime = self.__initime
4643 4644 if self.n==1:
4644 4645 avgdatatime = datatime
4645 4646 deltatime = datatime - self.__lastdatatime
4646 4647 self.__initime = datatime
4647 4648 return data_360_Power,data_360_Velocity,avgdatatime,data_p,data_e
4648 4649
4649 4650 def checkcase(self,data_ele):
4650 4651 #print(data_ele)
4651 4652 start = data_ele[-2]
4652 4653 end = data_ele[-1]
4653 4654 diff_angle = (end-start)
4654 4655 len_ang=len(data_ele)
4655 4656
4656 4657 if diff_angle > 0: #Subida
4657 4658 return 0
4658 4659
4659 4660 def run(self, dataOut,mode='Power',**kwargs):
4660 4661 #print("BLOCK 360 HERE WE GO MOMENTOS")
4661 4662 #print("Block 360")
4662 4663 dataOut.mode = mode
4663 4664
4664 4665 if not self.isConfig:
4665 4666 self.setup(dataOut = dataOut ,mode= mode ,**kwargs)
4666 4667 self.isConfig = True
4667 4668
4668 4669
4669 4670 data_360_Power, data_360_Velocity, avgdatatime,data_p,data_e = self.blockOp(dataOut, dataOut.utctime)
4670 4671
4671 4672
4672 4673 dataOut.flagNoData = True
4673 4674
4674 4675
4675 4676 if self.__dataReady:
4676 4677 dataOut.data_360_Power = data_360_Power # S
4677 4678 dataOut.data_360_Velocity = data_360_Velocity
4678 4679 dataOut.data_azi = data_p
4679 4680 dataOut.data_ele = data_e
4680 4681 dataOut.utctime = avgdatatime
4681 4682 dataOut.flagNoData = False
4682 4683
4683 4684 return dataOut
4684 4685
4685 4686 class Block360_vRF3(Operation):
4686 4687 '''
4687 4688 '''
4688 4689 isConfig = False
4689 4690 __profIndex = 0
4690 4691 __initime = None
4691 4692 __lastdatatime = None
4692 4693 __buffer = None
4693 4694 __dataReady = False
4694 4695 n = None
4695 4696 __nch = 0
4696 4697 __nHeis = 0
4697 4698 index = 0
4698 4699 mode = None
4699 4700
4700 4701 def __init__(self,**kwargs):
4701 4702 Operation.__init__(self,**kwargs)
4702 4703
4703 4704 def setup(self, dataOut, attr):
4704 4705 '''
4705 4706 n= Numero de PRF's de entrada
4706 4707 '''
4707 4708 self.__initime = None
4708 4709 self.__lastdatatime = 0
4709 4710 self.__dataReady = False
4710 4711 self.__buffer = 0
4711 4712 self.__buffer_1D = 0
4712 4713 self.index = 0
4713 4714 self.__nch = dataOut.nChannels
4714 4715 self.__nHeis = dataOut.nHeights
4715 4716
4716 4717 self.attr = attr
4717 4718 #print("self.mode",self.mode)
4718 4719 #print("nHeights")
4719 4720 self.__buffer = []
4720 4721 self.__buffer2 = []
4721 4722 self.__buffer3 = []
4722 4723
4723 4724 def putData(self, data, attr):
4724 4725 '''
4725 4726 Add a profile to he __buffer and increase in one the __profiel Index
4726 4727 '''
4727 4728
4728 4729 self.__buffer.append(getattr(data, attr))
4729 4730 self.__buffer2.append(data.azimuth)
4730 4731 self.__buffer3.append(data.elevation)
4731 4732 self.__profIndex += 1
4732 4733
4733 4734 return numpy.array(self.__buffer3)
4734 4735
4735 4736 def pushData(self, data):
4736 4737 '''
4737 4738 Return the PULSEPAIR and the profiles used in the operation
4738 4739 Affected : self.__profileIndex
4739 4740 '''
4740 4741
4741 4742 data_360 = numpy.array(self.__buffer).transpose(1, 0, 2)
4742 4743 data_p = numpy.array(self.__buffer2)
4743 4744 data_e = numpy.array(self.__buffer3)
4744 4745 n = self.__profIndex
4745 4746
4746 4747 self.__buffer = []
4747 4748 self.__buffer2 = []
4748 4749 self.__buffer3 = []
4749 4750 self.__profIndex = 0
4750 4751 return data_360, n, data_p, data_e
4751 4752
4752 4753
4753 4754 def byProfiles(self,dataOut):
4754 4755
4755 4756 self.__dataReady = False
4756 4757 data_360 = []
4757 4758 data_p = None
4758 4759 data_e = None
4759 4760
4760 4761 elevations = self.putData(data=dataOut, attr = self.attr)
4761 4762
4762 4763 if self.__profIndex > 1:
4763 4764 case_flag = self.checkcase(elevations)
4764 4765
4765 4766 if case_flag == 0: #Subida
4766 4767
4767 4768 if len(self.__buffer) == 2: #Cuando estΓ‘ de subida
4768 4769 #Se borra el dato anterior para liberar buffer y comparar el dato actual con el siguiente
4769 4770 self.__buffer.pop(0) #Erase first data
4770 4771 self.__buffer2.pop(0)
4771 4772 self.__buffer3.pop(0)
4772 4773 self.__profIndex -= 1
4773 4774 else: #Cuando ha estado de bajada y ha vuelto a subir
4774 4775 #Se borra el ΓΊltimo dato
4775 4776 self.__buffer.pop() #Erase last data
4776 4777 self.__buffer2.pop()
4777 4778 self.__buffer3.pop()
4778 4779 data_360, n, data_p, data_e = self.pushData(data=dataOut)
4779 4780
4780 4781 self.__dataReady = True
4781 4782
4782 4783 return data_360, data_p, data_e
4783 4784
4784 4785
4785 4786 def blockOp(self, dataOut, datatime= None):
4786 4787 if self.__initime == None:
4787 4788 self.__initime = datatime
4788 4789 data_360, data_p, data_e = self.byProfiles(dataOut)
4789 4790 self.__lastdatatime = datatime
4790 4791
4791 4792 avgdatatime = self.__initime
4792 4793 if self.n==1:
4793 4794 avgdatatime = datatime
4794 4795 deltatime = datatime - self.__lastdatatime
4795 4796 self.__initime = datatime
4796 4797 return data_360, avgdatatime, data_p, data_e
4797 4798
4798 4799 def checkcase(self, data_ele):
4799 4800
4800 4801 start = data_ele[-2]
4801 4802 end = data_ele[-1]
4802 4803 diff_angle = (end-start)
4803 4804 len_ang=len(data_ele)
4804 4805
4805 4806 if diff_angle > 0: #Subida
4806 4807 return 0
4807 4808
4808 4809 def run(self, dataOut, attr_data='dataPP_POWER',**kwargs):
4809 4810
4810 4811 dataOut.attr_data = attr_data
4811 4812
4812 4813 if not self.isConfig:
4813 4814 self.setup(dataOut = dataOut, attr = attr_data ,**kwargs)
4814 4815 self.isConfig = True
4815 4816
4816 4817 data_360, avgdatatime, data_p, data_e = self.blockOp(dataOut, dataOut.utctime)
4817 4818
4818 4819 dataOut.flagNoData = True
4819 4820
4820 4821 if self.__dataReady:
4821 4822 setattr(dataOut, attr_data, data_360 )
4822 4823 dataOut.data_azi = data_p
4823 4824 dataOut.data_ele = data_e
4824 4825 dataOut.utctime = avgdatatime
4825 4826 dataOut.flagNoData = False
4826 4827
4827 4828 return dataOut
4828 4829
4829 4830 class Block360_vRF4(Operation):
4830 4831 '''
4831 4832 '''
4832 4833 isConfig = False
4833 4834 __profIndex = 0
4834 4835 __initime = None
4835 4836 __lastdatatime = None
4836 4837 __buffer = None
4837 4838 __dataReady = False
4838 4839 n = None
4839 4840 __nch = 0
4840 4841 __nHeis = 0
4841 4842 index = 0
4842 4843 mode = None
4843 4844
4844 4845 def __init__(self,**kwargs):
4845 4846 Operation.__init__(self,**kwargs)
4846 4847
4847 4848 def setup(self, dataOut, attr):
4848 4849 '''
4849 4850 n= Numero de PRF's de entrada
4850 4851 '''
4851 4852 self.__initime = None
4852 4853 self.__lastdatatime = 0
4853 4854 self.__dataReady = False
4854 4855 self.__buffer = 0
4855 4856 self.__buffer_1D = 0
4856 4857 self.index = 0
4857 4858 self.__nch = dataOut.nChannels
4858 4859 self.__nHeis = dataOut.nHeights
4859 4860
4860 4861 self.attr = attr
4861 4862
4862 4863 self.__buffer = []
4863 4864 self.__buffer2 = []
4864 4865 self.__buffer3 = []
4865 4866
4866 4867 def putData(self, data, attr, flagMode):
4867 4868 '''
4868 4869 Add a profile to he __buffer and increase in one the __profiel Index
4869 4870 '''
4870 4871
4871 4872 self.__buffer.append(getattr(data, attr))
4872 4873 self.__buffer2.append(data.azimuth)
4873 4874 self.__buffer3.append(data.elevation)
4874 4875 self.__profIndex += 1
4875 4876
4876 4877 if flagMode == 1: #'AZI'
4877 4878 return numpy.array(self.__buffer2)
4878 4879 elif flagMode == 0: #'ELE'
4879 4880 return numpy.array(self.__buffer3)
4880 4881
4881 4882 def pushData(self, data,flagMode,case_flag):
4882 4883 '''
4883 4884 Return the PULSEPAIR and the profiles used in the operation
4884 4885 Affected : self.__profileIndex
4885 4886 '''
4886 4887
4887 4888 data_360 = numpy.array(self.__buffer).transpose(1, 0, 2)
4888 4889 data_p = numpy.array(self.__buffer2)
4889 4890 data_e = numpy.array(self.__buffer3)
4890 4891 n = self.__profIndex
4891 4892
4892 4893 self.__buffer = []
4893 4894 self.__buffer2 = []
4894 4895 self.__buffer3 = []
4895 4896 self.__profIndex = 0
4896 4897
4897 4898 if flagMode == 1 and case_flag == 0: #'AZI' y ha girado
4898 4899 self.putData(data=data, attr = self.attr, flagMode=flagMode)
4899 4900
4900 4901 return data_360, n, data_p, data_e
4901 4902
4902 4903
4903 4904 def byProfiles(self,dataOut,flagMode):
4904 4905
4905 4906 self.__dataReady = False
4906 4907 data_360 = []
4907 4908 data_p = None
4908 4909 data_e = None
4909 4910
4910 4911 angles = self.putData(data=dataOut, attr = self.attr, flagMode=flagMode)
4911
4912 #print(angles)
4912 4913 if self.__profIndex > 1:
4913 4914 case_flag = self.checkcase(angles,flagMode)
4914 4915
4915 4916 if flagMode == 1: #'AZI':
4916 4917 if case_flag == 0: #Ya girΓ³
4917 4918 self.__buffer.pop() #Erase last data
4918 4919 self.__buffer2.pop()
4919 4920 self.__buffer3.pop()
4920 4921 data_360,n,data_p,data_e = self.pushData(data=dataOut,flagMode=flagMode,case_flag=case_flag)
4921 4922
4922 4923 self.__dataReady = True
4923 4924
4924 4925 elif flagMode == 0: #'ELE'
4925 4926
4926 4927 if case_flag == 0: #Subida
4927 4928
4928 4929 if len(self.__buffer) == 2: #Cuando estΓ‘ de subida
4929 4930 #Se borra el dato anterior para liberar buffer y comparar el dato actual con el siguiente
4930 4931 self.__buffer.pop(0) #Erase first data
4931 4932 self.__buffer2.pop(0)
4932 4933 self.__buffer3.pop(0)
4933 4934 self.__profIndex -= 1
4934 4935 else: #Cuando ha estado de bajada y ha vuelto a subir
4935 4936 #Se borra el ΓΊltimo dato
4936 4937 self.__buffer.pop() #Erase last data
4937 4938 self.__buffer2.pop()
4938 4939 self.__buffer3.pop()
4939 4940 data_360, n, data_p, data_e = self.pushData(data=dataOut,flagMode=flagMode,case_flag=case_flag)
4940 4941
4941 4942 self.__dataReady = True
4942 4943
4943 4944 return data_360, data_p, data_e
4944 4945
4945 4946
4946 4947 def blockOp(self, dataOut, flagMode, datatime= None):
4947 4948 if self.__initime == None:
4948 4949 self.__initime = datatime
4949 4950 data_360, data_p, data_e = self.byProfiles(dataOut,flagMode)
4950 4951 self.__lastdatatime = datatime
4951 4952
4952 4953 avgdatatime = self.__initime
4953 4954 if self.n==1:
4954 4955 avgdatatime = datatime
4955 4956 deltatime = datatime - self.__lastdatatime
4956 4957 self.__initime = datatime
4957 4958 return data_360, avgdatatime, data_p, data_e
4958 4959
4959 4960 def checkcase(self, angles, flagMode):
4960 4961
4961 4962 if flagMode == 1: #'AZI'
4962 4963 start = angles[-2]
4963 4964 end = angles[-1]
4964 4965 diff_angle = (end-start)
4965 4966
4966 4967 if diff_angle < 0: #Ya girΓ³
4967 4968 return 0
4968 4969
4969 4970 elif flagMode == 0: #'ELE'
4970 4971
4971 4972 start = angles[-2]
4972 4973 end = angles[-1]
4973 4974 diff_angle = (end-start)
4974 4975
4975 4976 if diff_angle > 0: #Subida
4976 4977 return 0
4977 4978
4978 4979 def run(self, dataOut, attr_data='dataPP_POWER', axis=None,**kwargs):
4979 4980
4980 4981 dataOut.attr_data = attr_data
4981 4982
4982 4983 dataOut.flagMode = axis[0] #Provisional, deberΓ­a venir del header
4983 4984
4984 4985 if not self.isConfig:
4985 4986 self.setup(dataOut = dataOut, attr = attr_data ,**kwargs)
4986 4987 self.isConfig = True
4987 4988
4988 4989 data_360, avgdatatime, data_p, data_e = self.blockOp(dataOut, dataOut.flagMode, dataOut.utctime)
4989 4990
4990 4991 dataOut.flagNoData = True
4991 4992
4992 4993 if self.__dataReady:
4993 4994 setattr(dataOut, attr_data, data_360 )
4994 4995 dataOut.data_azi = data_p
4995 4996 dataOut.data_ele = data_e
4996 4997 dataOut.utctime = avgdatatime
4997 4998 dataOut.flagNoData = False
4999 #print(data_360.shape)
5000 #print(dataOut.heightList)
4998 5001
4999 5002 return dataOut
5003
5004 class MergeProc(ProcessingUnit):
5005
5006 def __init__(self):
5007 ProcessingUnit.__init__(self)
5008
5009 def run(self, attr_data, mode=0):
5010
5011 #exit(1)
5012 self.dataOut = getattr(self, self.inputs[0])
5013 data_inputs = [getattr(self, attr) for attr in self.inputs]
5014 #print(data_inputs)
5015 #print(numpy.shape([getattr(data, attr_data) for data in data_inputs][1]))
5016 #exit(1)
5017 if mode==0:
5018 data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs])
5019 setattr(self.dataOut, attr_data, data)
5020
5021 if mode==1: #Hybrid
5022 #data = numpy.concatenate([getattr(data, attr_data) for data in data_inputs],axis=1)
5023 #setattr(self.dataOut, attr_data, data)
5024 setattr(self.dataOut, 'dataLag_spc', [getattr(data, attr_data) for data in data_inputs][0])
5025 setattr(self.dataOut, 'dataLag_spc_LP', [getattr(data, attr_data) for data in data_inputs][1])
5026 setattr(self.dataOut, 'dataLag_cspc', [getattr(data, attr_data_2) for data in data_inputs][0])
5027 setattr(self.dataOut, 'dataLag_cspc_LP', [getattr(data, attr_data_2) for data in data_inputs][1])
5028 #setattr(self.dataOut, 'nIncohInt', [getattr(data, attr_data_3) for data in data_inputs][0])
5029 #setattr(self.dataOut, 'nIncohInt_LP', [getattr(data, attr_data_3) for data in data_inputs][1])
5030 '''
5031 print(self.dataOut.dataLag_spc_LP.shape)
5032 print(self.dataOut.dataLag_cspc_LP.shape)
5033 exit(1)
5034 '''
5035
5036 #self.dataOut.dataLag_spc_LP = numpy.transpose(self.dataOut.dataLag_spc_LP[0],(2,0,1))
5037 #self.dataOut.dataLag_cspc_LP = numpy.transpose(self.dataOut.dataLag_cspc_LP,(3,1,2,0))
5038 '''
5039 print("Merge")
5040 print(numpy.shape(self.dataOut.dataLag_spc))
5041 print(numpy.shape(self.dataOut.dataLag_spc_LP))
5042 print(numpy.shape(self.dataOut.dataLag_cspc))
5043 print(numpy.shape(self.dataOut.dataLag_cspc_LP))
5044 exit(1)
5045 '''
5046 #print(numpy.sum(self.dataOut.dataLag_spc_LP[2,:,164])/128)
5047 #print(numpy.sum(self.dataOut.dataLag_cspc_LP[0,:,30,1])/128)
5048 #exit(1)
5049 #print(self.dataOut.NDP)
5050 #print(self.dataOut.nNoiseProfiles)
5051
5052 #self.dataOut.nIncohInt_LP = 128
5053 self.dataOut.nProfiles_LP = 128#self.dataOut.nIncohInt_LP
5054 self.dataOut.nIncohInt_LP = self.dataOut.nIncohInt
5055 self.dataOut.NLAG = 16
5056 self.dataOut.NRANGE = 200
5057 self.dataOut.NSCAN = 128
5058 #print(numpy.shape(self.dataOut.data_spc))
5059
5060 #exit(1)
5061
5062 if mode==2: #HAE 2022
5063 data = numpy.sum([getattr(data, attr_data) for data in data_inputs],axis=0)
5064 setattr(self.dataOut, attr_data, data)
5065
5066 self.dataOut.nIncohInt *= 2
5067 #meta = self.dataOut.getFreqRange(1)/1000.
5068 self.dataOut.freqRange = self.dataOut.getFreqRange(1)/1000.
5069
5070 #exit(1)
5071
5072 if mode==7: #RM
5073
5074 f = [getattr(data, attr_data) for data in data_inputs][0]
5075 g = [getattr(data, attr_data) for data in data_inputs][1]
5076
5077 data = numpy.concatenate((f,g),axis=2)
5078 #print(data)
5079 setattr(self.dataOut, attr_data, data)
5080 #print(self.dataOut.dataPP_POWER.shape)
5081 #CONSTRUIR NUEVA ALTURAS
5082 #print("hei_merge",self.dataOut.heightList)
5083 dh = self.dataOut.heightList[1]-self.dataOut.heightList[0]
5084 heightList_2 = (self.dataOut.heightList[-1]+dh) + numpy.arange(g.shape[-1], dtype=numpy.float) * dh
5085
5086 self.dataOut.heightList = numpy.concatenate((self.dataOut.heightList,heightList_2))
5087 #print("hei_merge_total",self.dataOut.heightList)
5088 #exit(1)
@@ -1,1862 +1,1860
1 1 import sys
2 2 import numpy,math
3 3 from scipy import interpolate
4 4 from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation, MPDecorator
5 5 from schainpy.model.data.jrodata import Voltage,hildebrand_sekhon
6 6 from schainpy.utils import log
7 7 from time import time
8 8
9 9
10 10
11 11 class VoltageProc(ProcessingUnit):
12 12
13 13 def __init__(self):
14 14
15 15 ProcessingUnit.__init__(self)
16 16
17 17 self.dataOut = Voltage()
18 18 self.flip = 1
19 19 self.setupReq = False
20 20
21 21 def run(self):
22 22
23 23 if self.dataIn.type == 'AMISR':
24 24 self.__updateObjFromAmisrInput()
25 25
26 26 if self.dataIn.type == 'Voltage':
27 27 self.dataOut.copy(self.dataIn)
28 28
29 29 def __updateObjFromAmisrInput(self):
30 30
31 31 self.dataOut.timeZone = self.dataIn.timeZone
32 32 self.dataOut.dstFlag = self.dataIn.dstFlag
33 33 self.dataOut.errorCount = self.dataIn.errorCount
34 34 self.dataOut.useLocalTime = self.dataIn.useLocalTime
35 35
36 36 self.dataOut.flagNoData = self.dataIn.flagNoData
37 37 self.dataOut.data = self.dataIn.data
38 38 self.dataOut.utctime = self.dataIn.utctime
39 39 self.dataOut.channelList = self.dataIn.channelList
40 40 #self.dataOut.timeInterval = self.dataIn.timeInterval
41 41 self.dataOut.heightList = self.dataIn.heightList
42 42 self.dataOut.nProfiles = self.dataIn.nProfiles
43 43
44 44 self.dataOut.nCohInt = self.dataIn.nCohInt
45 45 self.dataOut.ippSeconds = self.dataIn.ippSeconds
46 46 self.dataOut.frequency = self.dataIn.frequency
47 47
48 48 self.dataOut.azimuth = self.dataIn.azimuth
49 49 self.dataOut.zenith = self.dataIn.zenith
50 50
51 51 self.dataOut.beam.codeList = self.dataIn.beam.codeList
52 52 self.dataOut.beam.azimuthList = self.dataIn.beam.azimuthList
53 53 self.dataOut.beam.zenithList = self.dataIn.beam.zenithList
54 54
55 55
56 56 class selectChannels(Operation):
57 57
58 58 def run(self, dataOut, channelList):
59 59
60 60 channelIndexList = []
61 61 self.dataOut = dataOut
62 62 for channel in channelList:
63 63 if channel not in self.dataOut.channelList:
64 64 raise ValueError("Channel %d is not in %s" %(channel, str(self.dataOut.channelList)))
65 65
66 66 index = self.dataOut.channelList.index(channel)
67 67 channelIndexList.append(index)
68 68 self.selectChannelsByIndex(channelIndexList)
69 69 return self.dataOut
70 70
71 71 def selectChannelsByIndex(self, channelIndexList):
72 72 """
73 73 Selecciona un bloque de datos en base a canales segun el channelIndexList
74 74
75 75 Input:
76 76 channelIndexList : lista sencilla de canales a seleccionar por ej. [2,3,7]
77 77
78 78 Affected:
79 79 self.dataOut.data
80 80 self.dataOut.channelIndexList
81 81 self.dataOut.nChannels
82 82 self.dataOut.m_ProcessingHeader.totalSpectra
83 83 self.dataOut.systemHeaderObj.numChannels
84 84 self.dataOut.m_ProcessingHeader.blockSize
85 85
86 86 Return:
87 87 None
88 88 """
89 89
90 90 for channelIndex in channelIndexList:
91 91 if channelIndex not in self.dataOut.channelIndexList:
92 92 raise ValueError("The value %d in channelIndexList is not valid" %channelIndex)
93 93
94 94 if self.dataOut.type == 'Voltage':
95 95 if self.dataOut.flagDataAsBlock:
96 96 """
97 97 Si la data es obtenida por bloques, dimension = [nChannels, nProfiles, nHeis]
98 98 """
99 99 data = self.dataOut.data[channelIndexList,:,:]
100 100 else:
101 101 data = self.dataOut.data[channelIndexList,:]
102 102
103 103 self.dataOut.data = data
104 104 # self.dataOut.channelList = [self.dataOut.channelList[i] for i in channelIndexList]
105 105 self.dataOut.channelList = range(len(channelIndexList))
106 106
107 107 elif self.dataOut.type == 'Spectra':
108 108 data_spc = self.dataOut.data_spc[channelIndexList, :]
109 109 data_dc = self.dataOut.data_dc[channelIndexList, :]
110 110
111 111 self.dataOut.data_spc = data_spc
112 112 self.dataOut.data_dc = data_dc
113 113
114 114 # self.dataOut.channelList = [self.dataOut.channelList[i] for i in channelIndexList]
115 115 self.dataOut.channelList = range(len(channelIndexList))
116 116 self.__selectPairsByChannel(channelIndexList)
117 117
118 118 return 1
119 119
120 120 def __selectPairsByChannel(self, channelList=None):
121 121
122 122 if channelList == None:
123 123 return
124 124
125 125 pairsIndexListSelected = []
126 126 for pairIndex in self.dataOut.pairsIndexList:
127 127 # First pair
128 128 if self.dataOut.pairsList[pairIndex][0] not in channelList:
129 129 continue
130 130 # Second pair
131 131 if self.dataOut.pairsList[pairIndex][1] not in channelList:
132 132 continue
133 133
134 134 pairsIndexListSelected.append(pairIndex)
135 135
136 136 if not pairsIndexListSelected:
137 137 self.dataOut.data_cspc = None
138 138 self.dataOut.pairsList = []
139 139 return
140 140
141 141 self.dataOut.data_cspc = self.dataOut.data_cspc[pairsIndexListSelected]
142 142 self.dataOut.pairsList = [self.dataOut.pairsList[i]
143 143 for i in pairsIndexListSelected]
144 144
145 145 return
146 146
147 147 class selectHeights(Operation):
148 148
149 149 def run(self, dataOut, minHei=None, maxHei=None, minIndex=None, maxIndex=None):
150 150 """
151 151 Selecciona un bloque de datos en base a un grupo de valores de alturas segun el rango
152 152 minHei <= height <= maxHei
153 153
154 154 Input:
155 155 minHei : valor minimo de altura a considerar
156 156 maxHei : valor maximo de altura a considerar
157 157
158 158 Affected:
159 159 Indirectamente son cambiados varios valores a travez del metodo selectHeightsByIndex
160 160
161 161 Return:
162 162 1 si el metodo se ejecuto con exito caso contrario devuelve 0
163 163 """
164 164
165 165 self.dataOut = dataOut
166 166
167 167 if minHei and maxHei:
168 168
169 169 if (minHei < self.dataOut.heightList[0]):
170 170 minHei = self.dataOut.heightList[0]
171 171
172 172 if (maxHei > self.dataOut.heightList[-1]):
173 173 maxHei = self.dataOut.heightList[-1]
174 174
175 175 minIndex = 0
176 176 maxIndex = 0
177 177 heights = self.dataOut.heightList
178 178
179 179 inda = numpy.where(heights >= minHei)
180 180 indb = numpy.where(heights <= maxHei)
181 181
182 182 try:
183 183 minIndex = inda[0][0]
184 184 except:
185 185 minIndex = 0
186 186
187 187 try:
188 188 maxIndex = indb[0][-1]
189 189 except:
190 190 maxIndex = len(heights)
191 191
192 192 self.selectHeightsByIndex(minIndex, maxIndex)
193 193
194 194 return self.dataOut
195 195
196 196 def selectHeightsByIndex(self, minIndex, maxIndex):
197 197 """
198 198 Selecciona un bloque de datos en base a un grupo indices de alturas segun el rango
199 199 minIndex <= index <= maxIndex
200 200
201 201 Input:
202 202 minIndex : valor de indice minimo de altura a considerar
203 203 maxIndex : valor de indice maximo de altura a considerar
204 204
205 205 Affected:
206 206 self.dataOut.data
207 207 self.dataOut.heightList
208 208
209 209 Return:
210 210 1 si el metodo se ejecuto con exito caso contrario devuelve 0
211 211 """
212 212
213 213 if self.dataOut.type == 'Voltage':
214 214 if (minIndex < 0) or (minIndex > maxIndex):
215 215 raise ValueError("Height index range (%d,%d) is not valid" % (minIndex, maxIndex))
216 216
217 217 if (maxIndex >= self.dataOut.nHeights):
218 218 maxIndex = self.dataOut.nHeights
219 219 #print("shapeeee",self.dataOut.data.shape)
220 220 #voltage
221 221 if self.dataOut.flagDataAsBlock:
222 222 """
223 223 Si la data es obtenida por bloques, dimension = [nChannels, nProfiles, nHeis]
224 224 """
225 225 data = self.dataOut.data[:,:, minIndex:maxIndex]
226 226 else:
227 227 data = self.dataOut.data[:, minIndex:maxIndex]
228 228
229 229 # firstHeight = self.dataOut.heightList[minIndex]
230 230
231 231 self.dataOut.data = data
232 232 self.dataOut.heightList = self.dataOut.heightList[minIndex:maxIndex]
233 233
234 234 if self.dataOut.nHeights <= 1:
235 235 raise ValueError("selectHeights: Too few heights. Current number of heights is %d" %(self.dataOut.nHeights))
236 236 elif self.dataOut.type == 'Spectra':
237 237 if (minIndex < 0) or (minIndex > maxIndex):
238 238 raise ValueError("Error selecting heights: Index range (%d,%d) is not valid" % (
239 239 minIndex, maxIndex))
240 240
241 241 if (maxIndex >= self.dataOut.nHeights):
242 242 maxIndex = self.dataOut.nHeights - 1
243 243
244 244 # Spectra
245 245 data_spc = self.dataOut.data_spc[:, :, minIndex:maxIndex + 1]
246 246
247 247 data_cspc = None
248 248 if self.dataOut.data_cspc is not None:
249 249 data_cspc = self.dataOut.data_cspc[:, :, minIndex:maxIndex + 1]
250 250
251 251 data_dc = None
252 252 if self.dataOut.data_dc is not None:
253 253 data_dc = self.dataOut.data_dc[:, minIndex:maxIndex + 1]
254 254
255 255 self.dataOut.data_spc = data_spc
256 256 self.dataOut.data_cspc = data_cspc
257 257 self.dataOut.data_dc = data_dc
258 258
259 259 self.dataOut.heightList = self.dataOut.heightList[minIndex:maxIndex + 1]
260 260
261 261 return 1
262 262
263 263
264 264 class filterByHeights(Operation):
265 265
266 266 def run(self, dataOut, window):
267 267
268 268 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
269 269
270 270 if window == None:
271 271 window = (dataOut.radarControllerHeaderObj.txA/dataOut.radarControllerHeaderObj.nBaud) / deltaHeight
272 272
273 273 newdelta = deltaHeight * window
274 274 r = dataOut.nHeights % window
275 275 newheights = (dataOut.nHeights-r)/window
276 276
277 277 if newheights <= 1:
278 278 raise ValueError("filterByHeights: Too few heights. Current number of heights is %d and window is %d" %(dataOut.nHeights, window))
279 279
280 280 if dataOut.flagDataAsBlock:
281 281 """
282 282 Si la data es obtenida por bloques, dimension = [nChannels, nProfiles, nHeis]
283 283 """
284 284 buffer = dataOut.data[:, :, 0:int(dataOut.nHeights-r)]
285 285 buffer = buffer.reshape(dataOut.nChannels, dataOut.nProfiles, int(dataOut.nHeights/window), window)
286 286 buffer = numpy.sum(buffer,3)
287 287
288 288 else:
289 289 buffer = dataOut.data[:,0:int(dataOut.nHeights-r)]
290 290 buffer = buffer.reshape(dataOut.nChannels,int(dataOut.nHeights/window),int(window))
291 291 buffer = numpy.sum(buffer,2)
292 292
293 293 dataOut.data = buffer
294 294 dataOut.heightList = dataOut.heightList[0] + numpy.arange( newheights )*newdelta
295 295 dataOut.windowOfFilter = window
296 296
297 297 return dataOut
298 298
299 299
300 300 class setH0(Operation):
301 301
302 302 def run(self, dataOut, h0, deltaHeight = None):
303 303
304 304 if not deltaHeight:
305 305 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
306 306
307 307 nHeights = dataOut.nHeights
308 308
309 309 newHeiRange = h0 + numpy.arange(nHeights)*deltaHeight
310 310
311 311 dataOut.heightList = newHeiRange
312 312
313 313 return dataOut
314 314
315 315
316 316 class deFlip(Operation):
317 317
318 318 def run(self, dataOut, channelList = []):
319 319
320 320 data = dataOut.data.copy()
321 321
322 322 if dataOut.flagDataAsBlock:
323 323 flip = self.flip
324 324 profileList = list(range(dataOut.nProfiles))
325 325
326 326 if not channelList:
327 327 for thisProfile in profileList:
328 328 data[:,thisProfile,:] = data[:,thisProfile,:]*flip
329 329 flip *= -1.0
330 330 else:
331 331 for thisChannel in channelList:
332 332 if thisChannel not in dataOut.channelList:
333 333 continue
334 334
335 335 for thisProfile in profileList:
336 336 data[thisChannel,thisProfile,:] = data[thisChannel,thisProfile,:]*flip
337 337 flip *= -1.0
338 338
339 339 self.flip = flip
340 340
341 341 else:
342 342 if not channelList:
343 343 data[:,:] = data[:,:]*self.flip
344 344 else:
345 345 for thisChannel in channelList:
346 346 if thisChannel not in dataOut.channelList:
347 347 continue
348 348
349 349 data[thisChannel,:] = data[thisChannel,:]*self.flip
350 350
351 351 self.flip *= -1.
352 352
353 353 dataOut.data = data
354 354
355 355 return dataOut
356 356
357 357
358 358 class setAttribute(Operation):
359 359 '''
360 360 Set an arbitrary attribute(s) to dataOut
361 361 '''
362 362
363 363 def __init__(self):
364 364
365 365 Operation.__init__(self)
366 366 self._ready = False
367 367
368 368 def run(self, dataOut, **kwargs):
369 369
370 370 for key, value in kwargs.items():
371 371 setattr(dataOut, key, value)
372 372
373 373 return dataOut
374 374
375 375
376 376 @MPDecorator
377 377 class printAttribute(Operation):
378 378 '''
379 379 Print an arbitrary attribute of dataOut
380 380 '''
381 381
382 382 def __init__(self):
383 383
384 384 Operation.__init__(self)
385 385
386 386 def run(self, dataOut, attributes):
387 387
388 388 if isinstance(attributes, str):
389 389 attributes = [attributes]
390 390 for attr in attributes:
391 391 if hasattr(dataOut, attr):
392 392 log.log(getattr(dataOut, attr), attr)
393 393
394 394
395 395 class interpolateHeights(Operation):
396 396
397 397 def run(self, dataOut, topLim, botLim):
398 398 #69 al 72 para julia
399 399 #82-84 para meteoros
400 400 if len(numpy.shape(dataOut.data))==2:
401 401 sampInterp = (dataOut.data[:,botLim-1] + dataOut.data[:,topLim+1])/2
402 402 sampInterp = numpy.transpose(numpy.tile(sampInterp,(topLim-botLim + 1,1)))
403 403 #dataOut.data[:,botLim:limSup+1] = sampInterp
404 404 dataOut.data[:,botLim:topLim+1] = sampInterp
405 405 else:
406 406 nHeights = dataOut.data.shape[2]
407 407 x = numpy.hstack((numpy.arange(botLim),numpy.arange(topLim+1,nHeights)))
408 408 y = dataOut.data[:,:,list(range(botLim))+list(range(topLim+1,nHeights))]
409 409 f = interpolate.interp1d(x, y, axis = 2)
410 410 xnew = numpy.arange(botLim,topLim+1)
411 411 ynew = f(xnew)
412 412 dataOut.data[:,:,botLim:topLim+1] = ynew
413 413
414 414 return dataOut
415 415
416 416
417 417 class CohInt(Operation):
418 418
419 419 isConfig = False
420 420 __profIndex = 0
421 421 __byTime = False
422 422 __initime = None
423 423 __lastdatatime = None
424 424 __integrationtime = None
425 425 __buffer = None
426 426 __bufferStride = []
427 427 __dataReady = False
428 428 __profIndexStride = 0
429 429 __dataToPutStride = False
430 430 n = None
431 431
432 432 def __init__(self, **kwargs):
433 433
434 434 Operation.__init__(self, **kwargs)
435 435
436 436 def setup(self, n=None, timeInterval=None, stride=None, overlapping=False, byblock=False):
437 437 """
438 438 Set the parameters of the integration class.
439 439
440 440 Inputs:
441 441
442 442 n : Number of coherent integrations
443 443 timeInterval : Time of integration. If the parameter "n" is selected this one does not work
444 444 overlapping :
445 445 """
446 446
447 447 self.__initime = None
448 448 self.__lastdatatime = 0
449 449 self.__buffer = None
450 450 self.__dataReady = False
451 451 self.byblock = byblock
452 452 self.stride = stride
453 453
454 454 if n == None and timeInterval == None:
455 455 raise ValueError("n or timeInterval should be specified ...")
456 456
457 457 if n != None:
458 458 self.n = n
459 459 self.__byTime = False
460 460 else:
461 461 self.__integrationtime = timeInterval #* 60. #if (type(timeInterval)!=integer) -> change this line
462 462 self.n = 9999
463 463 self.__byTime = True
464 464
465 465 if overlapping:
466 466 self.__withOverlapping = True
467 467 self.__buffer = None
468 468 else:
469 469 self.__withOverlapping = False
470 470 self.__buffer = 0
471 471
472 472 self.__profIndex = 0
473 473
474 474 def putData(self, data):
475 475
476 476 """
477 477 Add a profile to the __buffer and increase in one the __profileIndex
478 478
479 479 """
480 480
481 481 if not self.__withOverlapping:
482 482 self.__buffer += data.copy()
483 483 self.__profIndex += 1
484 484 return
485 485
486 486 #Overlapping data
487 487 nChannels, nHeis = data.shape
488 488 data = numpy.reshape(data, (1, nChannels, nHeis))
489 489
490 490 #If the buffer is empty then it takes the data value
491 491 if self.__buffer is None:
492 492 self.__buffer = data
493 493 self.__profIndex += 1
494 494 return
495 495
496 496 #If the buffer length is lower than n then stakcing the data value
497 497 if self.__profIndex < self.n:
498 498 self.__buffer = numpy.vstack((self.__buffer, data))
499 499 self.__profIndex += 1
500 500 return
501 501
502 502 #If the buffer length is equal to n then replacing the last buffer value with the data value
503 503 self.__buffer = numpy.roll(self.__buffer, -1, axis=0)
504 504 self.__buffer[self.n-1] = data
505 505 self.__profIndex = self.n
506 506 return
507 507
508 508
509 509 def pushData(self):
510 510 """
511 511 Return the sum of the last profiles and the profiles used in the sum.
512 512
513 513 Affected:
514 514
515 515 self.__profileIndex
516 516
517 517 """
518 518
519 519 if not self.__withOverlapping:
520 520 data = self.__buffer
521 521 n = self.__profIndex
522 522
523 523 self.__buffer = 0
524 524 self.__profIndex = 0
525 525
526 526 return data, n
527 527
528 528 #Integration with Overlapping
529 529 data = numpy.sum(self.__buffer, axis=0)
530 530 # print data
531 531 # raise
532 532 n = self.__profIndex
533 533
534 534 return data, n
535 535
536 536 def byProfiles(self, data):
537 537
538 538 self.__dataReady = False
539 539 avgdata = None
540 540 # n = None
541 541 # print data
542 542 # raise
543 543 self.putData(data)
544 544
545 545 if self.__profIndex == self.n:
546 546 avgdata, n = self.pushData()
547 547 self.__dataReady = True
548 548
549 549 return avgdata
550 550
551 551 def byTime(self, data, datatime):
552 552
553 553 self.__dataReady = False
554 554 avgdata = None
555 555 n = None
556 556
557 557 self.putData(data)
558 558
559 559 if (datatime - self.__initime) >= self.__integrationtime:
560 560 avgdata, n = self.pushData()
561 561 self.n = n
562 562 self.__dataReady = True
563 563
564 564 return avgdata
565 565
566 566 def integrateByStride(self, data, datatime):
567 567 # print data
568 568 if self.__profIndex == 0:
569 569 self.__buffer = [[data.copy(), datatime]]
570 570 else:
571 571 self.__buffer.append([data.copy(),datatime])
572 572 self.__profIndex += 1
573 573 self.__dataReady = False
574 574
575 575 if self.__profIndex == self.n * self.stride :
576 576 self.__dataToPutStride = True
577 577 self.__profIndexStride = 0
578 578 self.__profIndex = 0
579 579 self.__bufferStride = []
580 580 for i in range(self.stride):
581 581 current = self.__buffer[i::self.stride]
582 582 data = numpy.sum([t[0] for t in current], axis=0)
583 583 avgdatatime = numpy.average([t[1] for t in current])
584 584 # print data
585 585 self.__bufferStride.append((data, avgdatatime))
586 586
587 587 if self.__dataToPutStride:
588 588 self.__dataReady = True
589 589 self.__profIndexStride += 1
590 590 if self.__profIndexStride == self.stride:
591 591 self.__dataToPutStride = False
592 592 # print self.__bufferStride[self.__profIndexStride - 1]
593 593 # raise
594 594 return self.__bufferStride[self.__profIndexStride - 1]
595 595
596 596
597 597 return None, None
598 598
599 599 def integrate(self, data, datatime=None):
600 600
601 601 if self.__initime == None:
602 602 self.__initime = datatime
603 603
604 604 if self.__byTime:
605 605 avgdata = self.byTime(data, datatime)
606 606 else:
607 607 avgdata = self.byProfiles(data)
608 608
609 609
610 610 self.__lastdatatime = datatime
611 611
612 612 if avgdata is None:
613 613 return None, None
614 614
615 615 avgdatatime = self.__initime
616 616
617 617 deltatime = datatime - self.__lastdatatime
618 618
619 619 if not self.__withOverlapping:
620 620 self.__initime = datatime
621 621 else:
622 622 self.__initime += deltatime
623 623
624 624 return avgdata, avgdatatime
625 625
626 626 def integrateByBlock(self, dataOut):
627 627
628 628 times = int(dataOut.data.shape[1]/self.n)
629 629 avgdata = numpy.zeros((dataOut.nChannels, times, dataOut.nHeights), dtype=numpy.complex)
630 630
631 631 id_min = 0
632 632 id_max = self.n
633 633
634 634 for i in range(times):
635 635 junk = dataOut.data[:,id_min:id_max,:]
636 636 avgdata[:,i,:] = junk.sum(axis=1)
637 637 id_min += self.n
638 638 id_max += self.n
639 639
640 640 timeInterval = dataOut.ippSeconds*self.n
641 641 avgdatatime = (times - 1) * timeInterval + dataOut.utctime
642 642 self.__dataReady = True
643 643 return avgdata, avgdatatime
644 644
645 645 def run(self, dataOut, n=None, timeInterval=None, stride=None, overlapping=False, byblock=False, **kwargs):
646 646
647 647 if not self.isConfig:
648 648 self.setup(n=n, stride=stride, timeInterval=timeInterval, overlapping=overlapping, byblock=byblock, **kwargs)
649 649 self.isConfig = True
650 650
651 651 if dataOut.flagDataAsBlock:
652 652 """
653 653 Si la data es leida por bloques, dimension = [nChannels, nProfiles, nHeis]
654 654 """
655 655 avgdata, avgdatatime = self.integrateByBlock(dataOut)
656 656 dataOut.nProfiles /= self.n
657 657 else:
658 658 if stride is None:
659 659 avgdata, avgdatatime = self.integrate(dataOut.data, dataOut.utctime)
660 660 else:
661 661 avgdata, avgdatatime = self.integrateByStride(dataOut.data, dataOut.utctime)
662 662
663 663
664 664 # dataOut.timeInterval *= n
665 665 dataOut.flagNoData = True
666 666
667 667 if self.__dataReady:
668 668 dataOut.data = avgdata
669 669 if not dataOut.flagCohInt:
670 670 dataOut.nCohInt *= self.n
671 671 dataOut.flagCohInt = True
672 672 dataOut.utctime = avgdatatime
673 673 # print avgdata, avgdatatime
674 674 # raise
675 675 # dataOut.timeInterval = dataOut.ippSeconds * dataOut.nCohInt
676 676 dataOut.flagNoData = False
677 677 return dataOut
678 678
679 679 class Decoder(Operation):
680 680
681 681 isConfig = False
682 682 __profIndex = 0
683 683
684 684 code = None
685 685
686 686 nCode = None
687 687 nBaud = None
688 688
689 689 def __init__(self, **kwargs):
690 690
691 691 Operation.__init__(self, **kwargs)
692 692
693 693 self.times = None
694 694 self.osamp = None
695 695 # self.__setValues = False
696 696 self.isConfig = False
697 697 self.setupReq = False
698 698 def setup(self, code, osamp, dataOut):
699 699
700 700 self.__profIndex = 0
701 701
702 702 self.code = code
703 703
704 704 self.nCode = len(code)
705 705 self.nBaud = len(code[0])
706 706
707 707 if (osamp != None) and (osamp >1):
708 708 self.osamp = osamp
709 709 self.code = numpy.repeat(code, repeats=self.osamp, axis=1)
710 710 self.nBaud = self.nBaud*self.osamp
711 711
712 712 self.__nChannels = dataOut.nChannels
713 713 self.__nProfiles = dataOut.nProfiles
714 714 self.__nHeis = dataOut.nHeights
715 715
716 716 if self.__nHeis < self.nBaud:
717 717 raise ValueError('Number of heights (%d) should be greater than number of bauds (%d)' %(self.__nHeis, self.nBaud))
718 718
719 719 #Frequency
720 720 __codeBuffer = numpy.zeros((self.nCode, self.__nHeis), dtype=numpy.complex)
721 721
722 722 __codeBuffer[:,0:self.nBaud] = self.code
723 723
724 724 self.fft_code = numpy.conj(numpy.fft.fft(__codeBuffer, axis=1))
725 725
726 726 if dataOut.flagDataAsBlock:
727 727
728 728 self.ndatadec = self.__nHeis #- self.nBaud + 1
729 729
730 730 self.datadecTime = numpy.zeros((self.__nChannels, self.__nProfiles, self.ndatadec), dtype=numpy.complex)
731 731
732 732 else:
733 733
734 734 #Time
735 735 self.ndatadec = self.__nHeis #- self.nBaud + 1
736 736
737 737 self.datadecTime = numpy.zeros((self.__nChannels, self.ndatadec), dtype=numpy.complex)
738 738
739 739 def __convolutionInFreq(self, data):
740 740
741 741 fft_code = self.fft_code[self.__profIndex].reshape(1,-1)
742 742
743 743 fft_data = numpy.fft.fft(data, axis=1)
744 744
745 745 conv = fft_data*fft_code
746 746
747 747 data = numpy.fft.ifft(conv,axis=1)
748 748
749 749 return data
750 750
751 751 def __convolutionInFreqOpt(self, data):
752 752
753 753 raise NotImplementedError
754 754
755 755 def __convolutionInTime(self, data):
756 756
757 757 code = self.code[self.__profIndex]
758 758 for i in range(self.__nChannels):
759 759 self.datadecTime[i,:] = numpy.correlate(data[i,:], code, mode='full')[self.nBaud-1:]
760 760
761 761 return self.datadecTime
762 762
763 763 def __convolutionByBlockInTime(self, data):
764 764
765 765 repetitions = int(self.__nProfiles / self.nCode)
766 766 junk = numpy.lib.stride_tricks.as_strided(self.code, (repetitions, self.code.size), (0, self.code.itemsize))
767 767 junk = junk.flatten()
768 768 code_block = numpy.reshape(junk, (self.nCode*repetitions, self.nBaud))
769 769 profilesList = range(self.__nProfiles)
770 770
771 771 for i in range(self.__nChannels):
772 772 for j in profilesList:
773 773 self.datadecTime[i,j,:] = numpy.correlate(data[i,j,:], code_block[j,:], mode='full')[self.nBaud-1:]
774 774 return self.datadecTime
775 775
776 776 def __convolutionByBlockInFreq(self, data):
777 777
778 778 raise NotImplementedError("Decoder by frequency fro Blocks not implemented")
779 779
780 780
781 781 fft_code = self.fft_code[self.__profIndex].reshape(1,-1)
782 782
783 783 fft_data = numpy.fft.fft(data, axis=2)
784 784
785 785 conv = fft_data*fft_code
786 786
787 787 data = numpy.fft.ifft(conv,axis=2)
788 788
789 789 return data
790 790
791 791
792 792 def run(self, dataOut, code=None, nCode=None, nBaud=None, mode = 0, osamp=None, times=None):
793 793
794 794 if dataOut.flagDecodeData:
795 795 print("This data is already decoded, recoding again ...")
796 796
797 797 if not self.isConfig:
798 798
799 799 if code is None:
800 800 if dataOut.code is None:
801 801 raise ValueError("Code could not be read from %s instance. Enter a value in Code parameter" %dataOut.type)
802 802
803 803 code = dataOut.code
804 804 else:
805 805 code = numpy.array(code).reshape(nCode,nBaud)
806 806 self.setup(code, osamp, dataOut)
807 807
808 808 self.isConfig = True
809 809
810 810 if mode == 3:
811 811 sys.stderr.write("Decoder Warning: mode=%d is not valid, using mode=0\n" %mode)
812 812
813 813 if times != None:
814 814 sys.stderr.write("Decoder Warning: Argument 'times' in not used anymore\n")
815 815
816 816 if self.code is None:
817 817 print("Fail decoding: Code is not defined.")
818 818 return
819 819
820 820 self.__nProfiles = dataOut.nProfiles
821 821 datadec = None
822 822
823 823 if mode == 3:
824 824 mode = 0
825 825
826 826 if dataOut.flagDataAsBlock:
827 827 """
828 828 Decoding when data have been read as block,
829 829 """
830 830
831 831 if mode == 0:
832 832 datadec = self.__convolutionByBlockInTime(dataOut.data)
833 833 if mode == 1:
834 834 datadec = self.__convolutionByBlockInFreq(dataOut.data)
835 835 else:
836 836 """
837 837 Decoding when data have been read profile by profile
838 838 """
839 839 if mode == 0:
840 840 datadec = self.__convolutionInTime(dataOut.data)
841 841
842 842 if mode == 1:
843 843 datadec = self.__convolutionInFreq(dataOut.data)
844 844
845 845 if mode == 2:
846 846 datadec = self.__convolutionInFreqOpt(dataOut.data)
847 847
848 848 if datadec is None:
849 849 raise ValueError("Codification mode selected is not valid: mode=%d. Try selecting 0 or 1" %mode)
850 850
851 851 dataOut.code = self.code
852 852 dataOut.nCode = self.nCode
853 853 dataOut.nBaud = self.nBaud
854 854
855 855 dataOut.data = datadec
856 856
857 857 dataOut.heightList = dataOut.heightList[0:datadec.shape[-1]]
858 858
859 859 dataOut.flagDecodeData = True #asumo q la data esta decodificada
860 860
861 861 if self.__profIndex == self.nCode-1:
862 862 self.__profIndex = 0
863 863 return dataOut
864 864
865 865 self.__profIndex += 1
866 866
867 867 return dataOut
868 868 # dataOut.flagDeflipData = True #asumo q la data no esta sin flip
869 869
870 870
871 871 class ProfileConcat(Operation):
872 872
873 873 isConfig = False
874 874 buffer = None
875 875
876 876 def __init__(self, **kwargs):
877 877
878 878 Operation.__init__(self, **kwargs)
879 879 self.profileIndex = 0
880 880
881 881 def reset(self):
882 882 self.buffer = numpy.zeros_like(self.buffer)
883 883 self.start_index = 0
884 884 self.times = 1
885 885
886 886 def setup(self, data, m, n=1):
887 887 self.buffer = numpy.zeros((data.shape[0],data.shape[1]*m),dtype=type(data[0,0]))
888 888 self.nHeights = data.shape[1]#.nHeights
889 889 self.start_index = 0
890 890 self.times = 1
891 891
892 892 def concat(self, data):
893 893
894 894 self.buffer[:,self.start_index:self.nHeights*self.times] = data.copy()
895 895 self.start_index = self.start_index + self.nHeights
896 896
897 897 def run(self, dataOut, m):
898 898 dataOut.flagNoData = True
899 899
900 900 if not self.isConfig:
901 901 self.setup(dataOut.data, m, 1)
902 902 self.isConfig = True
903 903
904 904 if dataOut.flagDataAsBlock:
905 905 raise ValueError("ProfileConcat can only be used when voltage have been read profile by profile, getBlock = False")
906 906
907 907 else:
908 908 self.concat(dataOut.data)
909 909 self.times += 1
910 910 if self.times > m:
911 911 dataOut.data = self.buffer
912 912 self.reset()
913 913 dataOut.flagNoData = False
914 914 # se deben actualizar mas propiedades del header y del objeto dataOut, por ejemplo, las alturas
915 915 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
916 916 xf = dataOut.heightList[0] + dataOut.nHeights * deltaHeight * m
917 917 dataOut.heightList = numpy.arange(dataOut.heightList[0], xf, deltaHeight)
918 918 dataOut.ippSeconds *= m
919 919 return dataOut
920 920
921 921 class ProfileSelector(Operation):
922 922
923 923 profileIndex = None
924 924 # Tamanho total de los perfiles
925 925 nProfiles = None
926 926
927 927 def __init__(self, **kwargs):
928 928
929 929 Operation.__init__(self, **kwargs)
930 930 self.profileIndex = 0
931 931
932 932 def incProfileIndex(self):
933 933
934 934 self.profileIndex += 1
935 935
936 936 if self.profileIndex >= self.nProfiles:
937 937 self.profileIndex = 0
938 938
939 939 def isThisProfileInRange(self, profileIndex, minIndex, maxIndex):
940 940
941 941 if profileIndex < minIndex:
942 942 return False
943 943
944 944 if profileIndex > maxIndex:
945 945 return False
946 946
947 947 return True
948 948
949 949 def isThisProfileInList(self, profileIndex, profileList):
950 950
951 951 if profileIndex not in profileList:
952 952 return False
953 953
954 954 return True
955 955
956 956 def run(self, dataOut, profileList=None, profileRangeList=None, beam=None, byblock=False, rangeList = None, nProfiles=None):
957
957 #print("before",dataOut.data.shape)
958 958 """
959 959 ProfileSelector:
960 960
961 961 Inputs:
962 962 profileList : Index of profiles selected. Example: profileList = (0,1,2,7,8)
963 963
964 964 profileRangeList : Minimum and maximum profile indexes. Example: profileRangeList = (4, 30)
965 965
966 966 rangeList : List of profile ranges. Example: rangeList = ((4, 30), (32, 64), (128, 256))
967 967
968 968 """
969 969
970 970 if rangeList is not None:
971 971 if type(rangeList[0]) not in (tuple, list):
972 972 rangeList = [rangeList]
973 973
974 974 dataOut.flagNoData = True
975 975
976 976 if dataOut.flagDataAsBlock:
977 977 """
978 978 data dimension = [nChannels, nProfiles, nHeis]
979 979 """
980 980 if profileList != None:
981 981 dataOut.data = dataOut.data[:,profileList,:]
982 982
983 983 if profileRangeList != None:
984 984 minIndex = profileRangeList[0]
985 985 maxIndex = profileRangeList[1]
986 986 profileList = list(range(minIndex, maxIndex+1))
987 987
988 988 dataOut.data = dataOut.data[:,minIndex:maxIndex+1,:]
989 989
990 990 if rangeList != None:
991 991
992 992 profileList = []
993 993
994 994 for thisRange in rangeList:
995 995 minIndex = thisRange[0]
996 996 maxIndex = thisRange[1]
997 997
998 998 profileList.extend(list(range(minIndex, maxIndex+1)))
999 999
1000 1000 dataOut.data = dataOut.data[:,profileList,:]
1001 1001
1002 1002 dataOut.nProfiles = len(profileList)
1003 1003 dataOut.profileIndex = dataOut.nProfiles - 1
1004 1004 dataOut.flagNoData = False
1005
1005 #print(dataOut.data.shape)
1006 1006 return dataOut
1007 1007
1008 1008 """
1009 1009 data dimension = [nChannels, nHeis]
1010 1010 """
1011 1011
1012 1012 if profileList != None:
1013 1013
1014 1014 if self.isThisProfileInList(dataOut.profileIndex, profileList):
1015 1015
1016 1016 self.nProfiles = len(profileList)
1017 1017 dataOut.nProfiles = self.nProfiles
1018 1018 dataOut.profileIndex = self.profileIndex
1019 1019 dataOut.flagNoData = False
1020 1020
1021 1021 self.incProfileIndex()
1022 1022 return dataOut
1023 1023
1024 1024 if profileRangeList != None:
1025 1025
1026 1026 minIndex = profileRangeList[0]
1027 1027 maxIndex = profileRangeList[1]
1028 1028
1029 1029 if self.isThisProfileInRange(dataOut.profileIndex, minIndex, maxIndex):
1030 1030
1031 1031 self.nProfiles = maxIndex - minIndex + 1
1032 1032 dataOut.nProfiles = self.nProfiles
1033 1033 dataOut.profileIndex = self.profileIndex
1034 1034 dataOut.flagNoData = False
1035 1035
1036 1036 self.incProfileIndex()
1037 1037 return dataOut
1038 1038
1039 1039 if rangeList != None:
1040 1040
1041 1041 nProfiles = 0
1042 1042
1043 1043 for thisRange in rangeList:
1044 1044 minIndex = thisRange[0]
1045 1045 maxIndex = thisRange[1]
1046 1046
1047 1047 nProfiles += maxIndex - minIndex + 1
1048 1048
1049 1049 for thisRange in rangeList:
1050 1050
1051 1051 minIndex = thisRange[0]
1052 1052 maxIndex = thisRange[1]
1053 1053
1054 1054 if self.isThisProfileInRange(dataOut.profileIndex, minIndex, maxIndex):
1055 1055
1056 1056 self.nProfiles = nProfiles
1057 1057 dataOut.nProfiles = self.nProfiles
1058 1058 dataOut.profileIndex = self.profileIndex
1059 1059 dataOut.flagNoData = False
1060 1060
1061 1061 self.incProfileIndex()
1062 1062
1063 1063 break
1064 1064
1065 1065 return dataOut
1066 1066
1067 1067
1068 1068 if beam != None: #beam is only for AMISR data
1069 1069 if self.isThisProfileInList(dataOut.profileIndex, dataOut.beamRangeDict[beam]):
1070 1070 dataOut.flagNoData = False
1071 1071 dataOut.profileIndex = self.profileIndex
1072 1072
1073 1073 self.incProfileIndex()
1074 1074
1075 1075 return dataOut
1076 1076
1077 1077 raise ValueError("ProfileSelector needs profileList, profileRangeList or rangeList parameter")
1078 1078
1079 1079
1080 1080 class Reshaper(Operation):
1081 1081
1082 1082 def __init__(self, **kwargs):
1083 1083
1084 1084 Operation.__init__(self, **kwargs)
1085 1085
1086 1086 self.__buffer = None
1087 1087 self.__nitems = 0
1088 1088
1089 1089 def __appendProfile(self, dataOut, nTxs):
1090 1090
1091 1091 if self.__buffer is None:
1092 1092 shape = (dataOut.nChannels, int(dataOut.nHeights/nTxs) )
1093 1093 self.__buffer = numpy.empty(shape, dtype = dataOut.data.dtype)
1094 1094
1095 1095 ini = dataOut.nHeights * self.__nitems
1096 1096 end = ini + dataOut.nHeights
1097 1097
1098 1098 self.__buffer[:, ini:end] = dataOut.data
1099 1099
1100 1100 self.__nitems += 1
1101 1101
1102 1102 return int(self.__nitems*nTxs)
1103 1103
1104 1104 def __getBuffer(self):
1105 1105
1106 1106 if self.__nitems == int(1./self.__nTxs):
1107 1107
1108 1108 self.__nitems = 0
1109 1109
1110 1110 return self.__buffer.copy()
1111 1111
1112 1112 return None
1113 1113
1114 1114 def __checkInputs(self, dataOut, shape, nTxs):
1115 1115
1116 1116 if shape is None and nTxs is None:
1117 1117 raise ValueError("Reshaper: shape of factor should be defined")
1118 1118
1119 1119 if nTxs:
1120 1120 if nTxs < 0:
1121 1121 raise ValueError("nTxs should be greater than 0")
1122 1122
1123 1123 if nTxs < 1 and dataOut.nProfiles % (1./nTxs) != 0:
1124 1124 raise ValueError("nProfiles= %d is not divisibled by (1./nTxs) = %f" %(dataOut.nProfiles, (1./nTxs)))
1125 1125
1126 1126 shape = [dataOut.nChannels, dataOut.nProfiles*nTxs, dataOut.nHeights/nTxs]
1127 1127
1128 1128 return shape, nTxs
1129 1129
1130 1130 if len(shape) != 2 and len(shape) != 3:
1131 1131 raise ValueError("shape dimension should be equal to 2 or 3. shape = (nProfiles, nHeis) or (nChannels, nProfiles, nHeis). Actually shape = (%d, %d, %d)" %(dataOut.nChannels, dataOut.nProfiles, dataOut.nHeights))
1132 1132
1133 1133 if len(shape) == 2:
1134 1134 shape_tuple = [dataOut.nChannels]
1135 1135 shape_tuple.extend(shape)
1136 1136 else:
1137 1137 shape_tuple = list(shape)
1138 1138
1139 1139 nTxs = 1.0*shape_tuple[1]/dataOut.nProfiles
1140 1140
1141 1141 return shape_tuple, nTxs
1142 1142
1143 1143 def run(self, dataOut, shape=None, nTxs=None):
1144 1144
1145 1145 shape_tuple, self.__nTxs = self.__checkInputs(dataOut, shape, nTxs)
1146 1146
1147 1147 dataOut.flagNoData = True
1148 1148 profileIndex = None
1149 1149
1150 1150 if dataOut.flagDataAsBlock:
1151 1151
1152 1152 dataOut.data = numpy.reshape(dataOut.data, shape_tuple)
1153 1153 dataOut.flagNoData = False
1154 1154
1155 1155 profileIndex = int(dataOut.nProfiles*self.__nTxs) - 1
1156 1156
1157 1157 else:
1158 1158
1159 1159 if self.__nTxs < 1:
1160 1160
1161 1161 self.__appendProfile(dataOut, self.__nTxs)
1162 1162 new_data = self.__getBuffer()
1163 1163
1164 1164 if new_data is not None:
1165 1165 dataOut.data = new_data
1166 1166 dataOut.flagNoData = False
1167 1167
1168 1168 profileIndex = dataOut.profileIndex*nTxs
1169 1169
1170 1170 else:
1171 1171 raise ValueError("nTxs should be greater than 0 and lower than 1, or use VoltageReader(..., getblock=True)")
1172 1172
1173 1173 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
1174 1174
1175 1175 dataOut.heightList = numpy.arange(dataOut.nHeights/self.__nTxs) * deltaHeight + dataOut.heightList[0]
1176 1176
1177 1177 dataOut.nProfiles = int(dataOut.nProfiles*self.__nTxs)
1178 1178
1179 1179 dataOut.profileIndex = profileIndex
1180 1180
1181 1181 dataOut.ippSeconds /= self.__nTxs
1182 1182
1183 1183 return dataOut
1184 1184
1185 1185 class SplitProfiles(Operation):
1186 1186
1187 1187 def __init__(self, **kwargs):
1188 1188
1189 1189 Operation.__init__(self, **kwargs)
1190 1190
1191 1191 def run(self, dataOut, n):
1192 1192
1193 1193 dataOut.flagNoData = True
1194 1194 profileIndex = None
1195 1195
1196 1196 if dataOut.flagDataAsBlock:
1197 1197
1198 1198 #nchannels, nprofiles, nsamples
1199 1199 shape = dataOut.data.shape
1200 1200
1201 1201 if shape[2] % n != 0:
1202 1202 raise ValueError("Could not split the data, n=%d has to be multiple of %d" %(n, shape[2]))
1203 1203
1204 1204 new_shape = shape[0], shape[1]*n, int(shape[2]/n)
1205 1205
1206 1206 dataOut.data = numpy.reshape(dataOut.data, new_shape)
1207 1207 dataOut.flagNoData = False
1208 1208
1209 1209 profileIndex = int(dataOut.nProfiles/n) - 1
1210 1210
1211 1211 else:
1212 1212
1213 1213 raise ValueError("Could not split the data when is read Profile by Profile. Use VoltageReader(..., getblock=True)")
1214 1214
1215 1215 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
1216 1216
1217 1217 dataOut.heightList = numpy.arange(dataOut.nHeights/n) * deltaHeight + dataOut.heightList[0]
1218 1218
1219 1219 dataOut.nProfiles = int(dataOut.nProfiles*n)
1220 1220
1221 1221 dataOut.profileIndex = profileIndex
1222 1222
1223 1223 dataOut.ippSeconds /= n
1224 1224
1225 1225 return dataOut
1226 1226
1227 1227 class CombineProfiles(Operation):
1228 1228 def __init__(self, **kwargs):
1229 1229
1230 1230 Operation.__init__(self, **kwargs)
1231 1231
1232 1232 self.__remData = None
1233 1233 self.__profileIndex = 0
1234 1234
1235 1235 def run(self, dataOut, n):
1236 1236
1237 1237 dataOut.flagNoData = True
1238 1238 profileIndex = None
1239 1239
1240 1240 if dataOut.flagDataAsBlock:
1241 1241
1242 1242 #nchannels, nprofiles, nsamples
1243 1243 shape = dataOut.data.shape
1244 1244 new_shape = shape[0], shape[1]/n, shape[2]*n
1245 1245
1246 1246 if shape[1] % n != 0:
1247 1247 raise ValueError("Could not split the data, n=%d has to be multiple of %d" %(n, shape[1]))
1248 1248
1249 1249 dataOut.data = numpy.reshape(dataOut.data, new_shape)
1250 1250 dataOut.flagNoData = False
1251 1251
1252 1252 profileIndex = int(dataOut.nProfiles*n) - 1
1253 1253
1254 1254 else:
1255 1255
1256 1256 #nchannels, nsamples
1257 1257 if self.__remData is None:
1258 1258 newData = dataOut.data
1259 1259 else:
1260 1260 newData = numpy.concatenate((self.__remData, dataOut.data), axis=1)
1261 1261
1262 1262 self.__profileIndex += 1
1263 1263
1264 1264 if self.__profileIndex < n:
1265 1265 self.__remData = newData
1266 1266 #continue
1267 1267 return
1268 1268
1269 1269 self.__profileIndex = 0
1270 1270 self.__remData = None
1271 1271
1272 1272 dataOut.data = newData
1273 1273 dataOut.flagNoData = False
1274 1274
1275 1275 profileIndex = dataOut.profileIndex/n
1276 1276
1277 1277
1278 1278 deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
1279 1279
1280 1280 dataOut.heightList = numpy.arange(dataOut.nHeights*n) * deltaHeight + dataOut.heightList[0]
1281 1281
1282 1282 dataOut.nProfiles = int(dataOut.nProfiles/n)
1283 1283
1284 1284 dataOut.profileIndex = profileIndex
1285 1285
1286 1286 dataOut.ippSeconds *= n
1287 1287
1288 1288 return dataOut
1289 1289
1290 1290 class PulsePair(Operation):
1291 1291 '''
1292 1292 Function PulsePair(Signal Power, Velocity)
1293 1293 The real component of Lag[0] provides Intensity Information
1294 1294 The imag component of Lag[1] Phase provides Velocity Information
1295 1295
1296 1296 Configuration Parameters:
1297 1297 nPRF = Number of Several PRF
1298 1298 theta = Degree Azimuth angel Boundaries
1299 1299
1300 1300 Input:
1301 1301 self.dataOut
1302 1302 lag[N]
1303 1303 Affected:
1304 1304 self.dataOut.spc
1305 1305 '''
1306 1306 isConfig = False
1307 1307 __profIndex = 0
1308 1308 __initime = None
1309 1309 __lastdatatime = None
1310 1310 __buffer = None
1311 1311 noise = None
1312 1312 __dataReady = False
1313 1313 n = None
1314 1314 __nch = 0
1315 1315 __nHeis = 0
1316 1316 removeDC = False
1317 1317 ipp = None
1318 1318 lambda_ = 0
1319 1319
1320 1320 def __init__(self,**kwargs):
1321 1321 Operation.__init__(self,**kwargs)
1322 1322
1323 1323 def setup(self, dataOut, n = None, removeDC=False):
1324 1324 '''
1325 1325 n= Numero de PRF's de entrada
1326 1326 '''
1327 1327 self.__initime = None
1328 1328 ####print("[INICIO]-setup del METODO PULSE PAIR")
1329 1329 self.__lastdatatime = 0
1330 1330 self.__dataReady = False
1331 1331 self.__buffer = 0
1332 1332 self.__profIndex = 0
1333 1333 self.noise = None
1334 1334 self.__nch = dataOut.nChannels
1335 1335 self.__nHeis = dataOut.nHeights
1336 1336 self.removeDC = removeDC
1337 1337 self.lambda_ = 3.0e8/(9345.0e6)
1338 1338 self.ippSec = dataOut.ippSeconds
1339 1339 self.nCohInt = dataOut.nCohInt
1340 1340 ####print("IPPseconds",dataOut.ippSeconds)
1341 1341 ####print("ELVALOR DE n es:", n)
1342 1342 if n == None:
1343 1343 raise ValueError("n should be specified.")
1344 1344
1345 1345 if n != None:
1346 1346 if n<2:
1347 1347 raise ValueError("n should be greater than 2")
1348 1348
1349 1349 self.n = n
1350 1350 self.__nProf = n
1351 1351
1352 1352 self.__buffer = numpy.zeros((dataOut.nChannels,
1353 1353 n,
1354 1354 dataOut.nHeights),
1355 1355 dtype='complex')
1356 1356
1357 1357 def putData(self,data):
1358 1358 '''
1359 1359 Add a profile to he __buffer and increase in one the __profiel Index
1360 1360 '''
1361 1361 self.__buffer[:,self.__profIndex,:]= data
1362 1362 self.__profIndex += 1
1363 1363 return
1364 1364
1365 1365 def pushData(self,dataOut):
1366 1366 '''
1367 1367 Return the PULSEPAIR and the profiles used in the operation
1368 1368 Affected : self.__profileIndex
1369 1369 '''
1370 1370 #----------------- Remove DC-----------------------------------
1371 1371 if self.removeDC==True:
1372 1372 mean = numpy.mean(self.__buffer,1)
1373 1373 tmp = mean.reshape(self.__nch,1,self.__nHeis)
1374 1374 dc= numpy.tile(tmp,[1,self.__nProf,1])
1375 1375 self.__buffer = self.__buffer - dc
1376 1376 #------------------Calculo de Potencia ------------------------
1377 1377 pair0 = self.__buffer*numpy.conj(self.__buffer)
1378 1378 pair0 = pair0.real
1379 1379 lag_0 = numpy.sum(pair0,1)
1380 1380 #-----------------Calculo de Cscp------------------------------ New
1381 1381 cspc_pair01 = self.__buffer[0]*self.__buffer[1]
1382 1382 #------------------Calculo de Ruido x canal--------------------
1383 1383 self.noise = numpy.zeros(self.__nch)
1384 1384 for i in range(self.__nch):
1385 1385 daux = numpy.sort(pair0[i,:,:],axis= None)
1386 1386 self.noise[i]=hildebrand_sekhon( daux ,self.nCohInt)
1387 1387
1388 1388 self.noise = self.noise.reshape(self.__nch,1)
1389 1389 self.noise = numpy.tile(self.noise,[1,self.__nHeis])
1390 1390 noise_buffer = self.noise.reshape(self.__nch,1,self.__nHeis)
1391 1391 noise_buffer = numpy.tile(noise_buffer,[1,self.__nProf,1])
1392 1392 #------------------ Potencia recibida= P , Potencia senal = S , Ruido= N--
1393 1393 #------------------ P= S+N ,P=lag_0/N ---------------------------------
1394 1394 #-------------------- Power --------------------------------------------------
1395 1395 data_power = lag_0/(self.n*self.nCohInt)
1396 1396 #--------------------CCF------------------------------------------------------
1397 1397 data_ccf =numpy.sum(cspc_pair01,axis=0)/(self.n*self.nCohInt)
1398 1398 #------------------ Senal --------------------------------------------------
1399 1399 data_intensity = pair0 - noise_buffer
1400 1400 data_intensity = numpy.sum(data_intensity,axis=1)*(self.n*self.nCohInt)#*self.nCohInt)
1401 1401 #data_intensity = (lag_0-self.noise*self.n)*(self.n*self.nCohInt)
1402 1402 for i in range(self.__nch):
1403 1403 for j in range(self.__nHeis):
1404 1404 if data_intensity[i][j] < 0:
1405 1405 data_intensity[i][j] = numpy.min(numpy.absolute(data_intensity[i][j]))
1406 1406
1407 1407 #----------------- Calculo de Frecuencia y Velocidad doppler--------
1408 1408 pair1 = self.__buffer[:,:-1,:]*numpy.conjugate(self.__buffer[:,1:,:])
1409 1409 lag_1 = numpy.sum(pair1,1)
1410 1410 data_freq = (-1/(2.0*math.pi*self.ippSec*self.nCohInt))*numpy.angle(lag_1)
1411 1411 data_velocity = (self.lambda_/2.0)*data_freq
1412 1412
1413 1413 #---------------- Potencia promedio estimada de la Senal-----------
1414 1414 lag_0 = lag_0/self.n
1415 1415 S = lag_0-self.noise
1416 1416
1417 1417 #---------------- Frecuencia Doppler promedio ---------------------
1418 1418 lag_1 = lag_1/(self.n-1)
1419 1419 R1 = numpy.abs(lag_1)
1420 1420
1421 1421 #---------------- Calculo del SNR----------------------------------
1422 1422 data_snrPP = S/self.noise
1423 1423 for i in range(self.__nch):
1424 1424 for j in range(self.__nHeis):
1425 1425 if data_snrPP[i][j] < 1.e-20:
1426 1426 data_snrPP[i][j] = 1.e-20
1427 1427
1428 1428 #----------------- Calculo del ancho espectral ----------------------
1429 1429 L = S/R1
1430 1430 L = numpy.where(L<0,1,L)
1431 1431 L = numpy.log(L)
1432 1432 tmp = numpy.sqrt(numpy.absolute(L))
1433 1433 data_specwidth = (self.lambda_/(2*math.sqrt(2)*math.pi*self.ippSec*self.nCohInt))*tmp*numpy.sign(L)
1434 1434 n = self.__profIndex
1435 1435
1436 1436 self.__buffer = numpy.zeros((self.__nch, self.__nProf,self.__nHeis), dtype='complex')
1437 1437 self.__profIndex = 0
1438 1438 return data_power,data_intensity,data_velocity,data_snrPP,data_specwidth,data_ccf,n
1439 1439
1440 1440
1441 1441 def pulsePairbyProfiles(self,dataOut):
1442 1442
1443 1443 self.__dataReady = False
1444 1444 data_power = None
1445 1445 data_intensity = None
1446 1446 data_velocity = None
1447 1447 data_specwidth = None
1448 1448 data_snrPP = None
1449 1449 data_ccf = None
1450 1450 self.putData(data=dataOut.data)
1451 1451 if self.__profIndex == self.n:
1452 1452 data_power,data_intensity, data_velocity,data_snrPP,data_specwidth,data_ccf, n = self.pushData(dataOut=dataOut)
1453 1453 self.__dataReady = True
1454 1454
1455 1455 return data_power, data_intensity, data_velocity, data_snrPP,data_specwidth,data_ccf
1456 1456
1457 1457
1458 1458 def pulsePairOp(self, dataOut, datatime= None):
1459 1459
1460 1460 if self.__initime == None:
1461 1461 self.__initime = datatime
1462 1462 data_power, data_intensity, data_velocity, data_snrPP,data_specwidth,data_ccf = self.pulsePairbyProfiles(dataOut)
1463 1463 self.__lastdatatime = datatime
1464 1464
1465 1465 if data_power is None:
1466 1466 return None, None, None,None,None,None,None
1467 1467
1468 1468 avgdatatime = self.__initime
1469 1469 deltatime = datatime - self.__lastdatatime
1470 1470 self.__initime = datatime
1471 1471
1472 1472 return data_power, data_intensity, data_velocity, data_snrPP,data_specwidth,data_ccf, avgdatatime
1473 1473
1474 1474 def run(self, dataOut,n = None,removeDC= False, overlapping= False,**kwargs):
1475 1475 #print("hey")
1476 1476 #print(dataOut.data.shape)
1477 1477 #exit(1)
1478 1478 #print(self.__profIndex)
1479 1479 if not self.isConfig:
1480 1480 self.setup(dataOut = dataOut, n = n , removeDC=removeDC , **kwargs)
1481 1481 self.isConfig = True
1482 1482 data_power, data_intensity, data_velocity,data_snrPP,data_specwidth,data_ccf, avgdatatime = self.pulsePairOp(dataOut, dataOut.utctime)
1483 1483 dataOut.flagNoData = True
1484 1484
1485 1485 if self.__dataReady:
1486 1486 ###print("READY ----------------------------------")
1487 1487 dataOut.nCohInt *= self.n
1488 1488 dataOut.dataPP_POW = data_intensity # S
1489 1489 dataOut.dataPP_POWER = data_power # P valor que corresponde a POTENCIA MOMENTO
1490 1490 dataOut.dataPP_DOP = data_velocity
1491 1491 dataOut.dataPP_SNR = data_snrPP
1492 1492 dataOut.dataPP_WIDTH = data_specwidth
1493 1493 dataOut.dataPP_CCF = data_ccf
1494 1494 dataOut.PRFbyAngle = self.n #numero de PRF*cada angulo rotado que equivale a un tiempo.
1495 1495 dataOut.nProfiles = int(dataOut.nProfiles/n)
1496 1496 dataOut.utctime = avgdatatime
1497 1497 dataOut.flagNoData = False
1498 1498 return dataOut
1499 1499
1500 1500 class PulsePair_vRF(Operation):
1501 1501 '''
1502 1502 Function PulsePair(Signal Power, Velocity)
1503 1503 The real component of Lag[0] provides Intensity Information
1504 1504 The imag component of Lag[1] Phase provides Velocity Information
1505 1505
1506 1506 Configuration Parameters:
1507 1507 nPRF = Number of Several PRF
1508 1508 theta = Degree Azimuth angel Boundaries
1509 1509
1510 1510 Input:
1511 1511 self.dataOut
1512 1512 lag[N]
1513 1513 Affected:
1514 1514 self.dataOut.spc
1515 1515 '''
1516 1516 isConfig = False
1517 1517 __profIndex = 0
1518 1518 __initime = None
1519 1519 __lastdatatime = None
1520 1520 __buffer = None
1521 1521 noise = None
1522 1522 __dataReady = False
1523 1523 n = None
1524 1524 __nch = 0
1525 1525 __nHeis = 0
1526 1526 removeDC = False
1527 1527 ipp = None
1528 1528 lambda_ = 0
1529 1529
1530 1530 def __init__(self,**kwargs):
1531 1531 Operation.__init__(self,**kwargs)
1532 1532
1533 1533 def setup(self, dataOut, n = None, removeDC=False):
1534 1534 '''
1535 1535 n= Numero de PRF's de entrada
1536 1536 '''
1537 1537 self.__initime = None
1538 1538 ####print("[INICIO]-setup del METODO PULSE PAIR")
1539 1539 self.__lastdatatime = 0
1540 1540 self.__dataReady = False
1541 1541 self.__buffer = 0
1542 1542 self.__profIndex = 0
1543 1543 self.noise = None
1544 1544 self.__nch = dataOut.nChannels
1545 1545 self.__nHeis = dataOut.nHeights
1546 1546 self.removeDC = removeDC
1547 1547 self.lambda_ = 3.0e8/(9345.0e6)
1548 1548 self.ippSec = dataOut.ippSeconds
1549 1549 self.nCohInt = dataOut.nCohInt
1550 1550 ####print("IPPseconds",dataOut.ippSeconds)
1551 1551 ####print("ELVALOR DE n es:", n)
1552 1552 if n == None:
1553 1553 raise ValueError("n should be specified.")
1554 1554
1555 1555 if n != None:
1556 1556 if n<2:
1557 1557 raise ValueError("n should be greater than 2")
1558 1558
1559 1559 self.n = n
1560 1560 self.__nProf = n
1561 1561
1562 1562 self.__buffer = numpy.zeros((dataOut.nChannels,
1563 1563 n,
1564 1564 dataOut.nHeights),
1565 1565 dtype='complex')
1566 1566
1567 1567 def putData(self,data):
1568 1568 '''
1569 1569 Add a profile to he __buffer and increase in one the __profiel Index
1570 1570 '''
1571 1571 self.__buffer[:,self.__profIndex,:]= data
1572 1572 self.__profIndex += 1
1573 1573 return
1574 1574
1575 1575 def putDataByBlock(self,data,n):
1576 1576 '''
1577 1577 Add a profile to he __buffer and increase in one the __profiel Index
1578 1578 '''
1579 1579 self.__buffer[:]= data
1580 1580 self.__profIndex = n
1581 1581 return
1582 1582
1583 1583 def pushData(self,dataOut):
1584 1584 '''
1585 1585 Return the PULSEPAIR and the profiles used in the operation
1586 1586 Affected : self.__profileIndex
1587 1587 '''
1588 1588 #----------------- Remove DC-----------------------------------
1589 1589 if self.removeDC==True:
1590 1590 mean = numpy.mean(self.__buffer,1)
1591 1591 tmp = mean.reshape(self.__nch,1,self.__nHeis)
1592 1592 dc= numpy.tile(tmp,[1,self.__nProf,1])
1593 1593 self.__buffer = self.__buffer - dc
1594 1594 #------------------Calculo de Potencia ------------------------
1595 1595 pair0 = self.__buffer*numpy.conj(self.__buffer)
1596 1596 pair0 = pair0.real
1597 1597 lag_0 = numpy.sum(pair0,1)
1598 1598 #-----------------Calculo de Cscp------------------------------ New
1599 1599 cspc_pair01 = self.__buffer[0]*self.__buffer[1]
1600 1600 #------------------Calculo de Ruido x canal--------------------
1601 1601 self.noise = numpy.zeros(self.__nch)
1602 1602 for i in range(self.__nch):
1603 1603 daux = numpy.sort(pair0[i,:,:],axis= None)
1604 1604 self.noise[i]=hildebrand_sekhon( daux ,self.nCohInt)
1605 1605
1606 1606 self.noise = self.noise.reshape(self.__nch,1)
1607 1607 self.noise = numpy.tile(self.noise,[1,self.__nHeis])
1608 1608 noise_buffer = self.noise.reshape(self.__nch,1,self.__nHeis)
1609 1609 noise_buffer = numpy.tile(noise_buffer,[1,self.__nProf,1])
1610 1610 #------------------ Potencia recibida= P , Potencia senal = S , Ruido= N--
1611 1611 #------------------ P= S+N ,P=lag_0/N ---------------------------------
1612 1612 #-------------------- Power --------------------------------------------------
1613 1613 data_power = lag_0/(self.n*self.nCohInt)
1614 1614 #--------------------CCF------------------------------------------------------
1615 1615 data_ccf =numpy.sum(cspc_pair01,axis=0)/(self.n*self.nCohInt)
1616 1616 #------------------ Senal --------------------------------------------------
1617 1617 data_intensity = pair0 - noise_buffer
1618 1618 data_intensity = numpy.sum(data_intensity,axis=1)*(self.n*self.nCohInt)#*self.nCohInt)
1619 1619 #data_intensity = (lag_0-self.noise*self.n)*(self.n*self.nCohInt)
1620 1620 for i in range(self.__nch):
1621 1621 for j in range(self.__nHeis):
1622 1622 if data_intensity[i][j] < 0:
1623 1623 data_intensity[i][j] = numpy.min(numpy.absolute(data_intensity[i][j]))
1624 1624
1625 1625 #----------------- Calculo de Frecuencia y Velocidad doppler--------
1626 1626 pair1 = self.__buffer[:,:-1,:]*numpy.conjugate(self.__buffer[:,1:,:])
1627 1627 lag_1 = numpy.sum(pair1,1)
1628 1628 data_freq = (-1/(2.0*math.pi*self.ippSec*self.nCohInt))*numpy.angle(lag_1)
1629 1629 data_velocity = (self.lambda_/2.0)*data_freq
1630 1630
1631 1631 #---------------- Potencia promedio estimada de la Senal-----------
1632 1632 lag_0 = lag_0/self.n
1633 1633 S = lag_0-self.noise
1634 1634
1635 1635 #---------------- Frecuencia Doppler promedio ---------------------
1636 1636 lag_1 = lag_1/(self.n-1)
1637 1637 R1 = numpy.abs(lag_1)
1638 1638
1639 1639 #---------------- Calculo del SNR----------------------------------
1640 1640 data_snrPP = S/self.noise
1641 1641 for i in range(self.__nch):
1642 1642 for j in range(self.__nHeis):
1643 1643 if data_snrPP[i][j] < 1.e-20:
1644 1644 data_snrPP[i][j] = 1.e-20
1645 1645
1646 1646 #----------------- Calculo del ancho espectral ----------------------
1647 1647 L = S/R1
1648 1648 L = numpy.where(L<0,1,L)
1649 1649 L = numpy.log(L)
1650 1650 tmp = numpy.sqrt(numpy.absolute(L))
1651 1651 data_specwidth = (self.lambda_/(2*math.sqrt(2)*math.pi*self.ippSec*self.nCohInt))*tmp*numpy.sign(L)
1652 1652 n = self.__profIndex
1653 1653
1654 1654 self.__buffer = numpy.zeros((self.__nch, self.__nProf,self.__nHeis), dtype='complex')
1655 1655 self.__profIndex = 0
1656 1656 return data_power,data_intensity,data_velocity,data_snrPP,data_specwidth,data_ccf,n
1657 1657
1658 1658
1659 1659 def pulsePairbyProfiles(self,dataOut,n):
1660 1660
1661 1661 self.__dataReady = False
1662 1662 data_power = None
1663 1663 data_intensity = None
1664 1664 data_velocity = None
1665 1665 data_specwidth = None
1666 1666 data_snrPP = None
1667 1667 data_ccf = None
1668 1668
1669 1669 if dataOut.flagDataAsBlock:
1670 1670 self.putDataByBlock(data=dataOut.data,n=n)
1671 1671 else:
1672 1672 self.putData(data=dataOut.data)
1673 1673 if self.__profIndex == self.n:
1674 1674 data_power,data_intensity, data_velocity,data_snrPP,data_specwidth,data_ccf, n = self.pushData(dataOut=dataOut)
1675 1675 self.__dataReady = True
1676 1676
1677 1677 return data_power, data_intensity, data_velocity, data_snrPP,data_specwidth,data_ccf
1678 1678
1679 1679
1680 1680 def pulsePairOp(self, dataOut, n, datatime= None):
1681 1681
1682 1682 if self.__initime == None:
1683 1683 self.__initime = datatime
1684 1684 data_power, data_intensity, data_velocity, data_snrPP,data_specwidth,data_ccf = self.pulsePairbyProfiles(dataOut,n)
1685 1685 self.__lastdatatime = datatime
1686 1686
1687 1687 if data_power is None:
1688 1688 return None, None, None,None,None,None,None
1689 1689
1690 1690 avgdatatime = self.__initime
1691 1691 deltatime = datatime - self.__lastdatatime
1692 1692 self.__initime = datatime
1693 1693
1694 1694 return data_power, data_intensity, data_velocity, data_snrPP,data_specwidth,data_ccf, avgdatatime
1695 1695
1696 1696 def run(self, dataOut,n = None,removeDC= False, overlapping= False,**kwargs):
1697 #print("hey")
1698 #print(dataOut.data.shape)
1699 #exit(1)
1697
1700 1698 if dataOut.flagDataAsBlock:
1701 n = dataOut.nProfileBlocks
1702 #print(self.__profIndex)
1699 n = dataOut.nProfiles
1700
1703 1701 if not self.isConfig:
1704 1702 self.setup(dataOut = dataOut, n = n , removeDC=removeDC , **kwargs)
1705 1703 self.isConfig = True
1706 1704
1707 1705
1708 1706 data_power, data_intensity, data_velocity,data_snrPP,data_specwidth,data_ccf, avgdatatime = self.pulsePairOp(dataOut, n, dataOut.utctime)
1709 1707
1710 1708
1711 1709 dataOut.flagNoData = True
1712 1710
1713 1711 if self.__dataReady:
1714 1712 ###print("READY ----------------------------------")
1715 1713 dataOut.nCohInt *= self.n
1716 1714 dataOut.dataPP_POW = data_intensity # S
1717 1715 dataOut.dataPP_POWER = data_power # P valor que corresponde a POTENCIA MOMENTO
1718 1716 dataOut.dataPP_DOP = data_velocity
1719 1717 dataOut.dataPP_SNR = data_snrPP
1720 1718 dataOut.dataPP_WIDTH = data_specwidth
1721 1719 dataOut.dataPP_CCF = data_ccf
1722 1720 dataOut.PRFbyAngle = self.n #numero de PRF*cada angulo rotado que equivale a un tiempo.
1723 1721 dataOut.nProfiles = int(dataOut.nProfiles/n)
1724 1722 dataOut.utctime = avgdatatime
1725 1723 dataOut.flagNoData = False
1726 1724 return dataOut
1727 1725
1728 1726 # import collections
1729 1727 # from scipy.stats import mode
1730 1728 #
1731 1729 # class Synchronize(Operation):
1732 1730 #
1733 1731 # isConfig = False
1734 1732 # __profIndex = 0
1735 1733 #
1736 1734 # def __init__(self, **kwargs):
1737 1735 #
1738 1736 # Operation.__init__(self, **kwargs)
1739 1737 # # self.isConfig = False
1740 1738 # self.__powBuffer = None
1741 1739 # self.__startIndex = 0
1742 1740 # self.__pulseFound = False
1743 1741 #
1744 1742 # def __findTxPulse(self, dataOut, channel=0, pulse_with = None):
1745 1743 #
1746 1744 # #Read data
1747 1745 #
1748 1746 # powerdB = dataOut.getPower(channel = channel)
1749 1747 # noisedB = dataOut.getNoise(channel = channel)[0]
1750 1748 #
1751 1749 # self.__powBuffer.extend(powerdB.flatten())
1752 1750 #
1753 1751 # dataArray = numpy.array(self.__powBuffer)
1754 1752 #
1755 1753 # filteredPower = numpy.correlate(dataArray, dataArray[0:self.__nSamples], "same")
1756 1754 #
1757 1755 # maxValue = numpy.nanmax(filteredPower)
1758 1756 #
1759 1757 # if maxValue < noisedB + 10:
1760 1758 # #No se encuentra ningun pulso de transmision
1761 1759 # return None
1762 1760 #
1763 1761 # maxValuesIndex = numpy.where(filteredPower > maxValue - 0.1*abs(maxValue))[0]
1764 1762 #
1765 1763 # if len(maxValuesIndex) < 2:
1766 1764 # #Solo se encontro un solo pulso de transmision de un baudio, esperando por el siguiente TX
1767 1765 # return None
1768 1766 #
1769 1767 # phasedMaxValuesIndex = maxValuesIndex - self.__nSamples
1770 1768 #
1771 1769 # #Seleccionar solo valores con un espaciamiento de nSamples
1772 1770 # pulseIndex = numpy.intersect1d(maxValuesIndex, phasedMaxValuesIndex)
1773 1771 #
1774 1772 # if len(pulseIndex) < 2:
1775 1773 # #Solo se encontro un pulso de transmision con ancho mayor a 1
1776 1774 # return None
1777 1775 #
1778 1776 # spacing = pulseIndex[1:] - pulseIndex[:-1]
1779 1777 #
1780 1778 # #remover senales que se distancien menos de 10 unidades o muestras
1781 1779 # #(No deberian existir IPP menor a 10 unidades)
1782 1780 #
1783 1781 # realIndex = numpy.where(spacing > 10 )[0]
1784 1782 #
1785 1783 # if len(realIndex) < 2:
1786 1784 # #Solo se encontro un pulso de transmision con ancho mayor a 1
1787 1785 # return None
1788 1786 #
1789 1787 # #Eliminar pulsos anchos (deja solo la diferencia entre IPPs)
1790 1788 # realPulseIndex = pulseIndex[realIndex]
1791 1789 #
1792 1790 # period = mode(realPulseIndex[1:] - realPulseIndex[:-1])[0][0]
1793 1791 #
1794 1792 # print "IPP = %d samples" %period
1795 1793 #
1796 1794 # self.__newNSamples = dataOut.nHeights #int(period)
1797 1795 # self.__startIndex = int(realPulseIndex[0])
1798 1796 #
1799 1797 # return 1
1800 1798 #
1801 1799 #
1802 1800 # def setup(self, nSamples, nChannels, buffer_size = 4):
1803 1801 #
1804 1802 # self.__powBuffer = collections.deque(numpy.zeros( buffer_size*nSamples,dtype=numpy.float),
1805 1803 # maxlen = buffer_size*nSamples)
1806 1804 #
1807 1805 # bufferList = []
1808 1806 #
1809 1807 # for i in range(nChannels):
1810 1808 # bufferByChannel = collections.deque(numpy.zeros( buffer_size*nSamples, dtype=numpy.complex) + numpy.NAN,
1811 1809 # maxlen = buffer_size*nSamples)
1812 1810 #
1813 1811 # bufferList.append(bufferByChannel)
1814 1812 #
1815 1813 # self.__nSamples = nSamples
1816 1814 # self.__nChannels = nChannels
1817 1815 # self.__bufferList = bufferList
1818 1816 #
1819 1817 # def run(self, dataOut, channel = 0):
1820 1818 #
1821 1819 # if not self.isConfig:
1822 1820 # nSamples = dataOut.nHeights
1823 1821 # nChannels = dataOut.nChannels
1824 1822 # self.setup(nSamples, nChannels)
1825 1823 # self.isConfig = True
1826 1824 #
1827 1825 # #Append new data to internal buffer
1828 1826 # for thisChannel in range(self.__nChannels):
1829 1827 # bufferByChannel = self.__bufferList[thisChannel]
1830 1828 # bufferByChannel.extend(dataOut.data[thisChannel])
1831 1829 #
1832 1830 # if self.__pulseFound:
1833 1831 # self.__startIndex -= self.__nSamples
1834 1832 #
1835 1833 # #Finding Tx Pulse
1836 1834 # if not self.__pulseFound:
1837 1835 # indexFound = self.__findTxPulse(dataOut, channel)
1838 1836 #
1839 1837 # if indexFound == None:
1840 1838 # dataOut.flagNoData = True
1841 1839 # return
1842 1840 #
1843 1841 # self.__arrayBuffer = numpy.zeros((self.__nChannels, self.__newNSamples), dtype = numpy.complex)
1844 1842 # self.__pulseFound = True
1845 1843 # self.__startIndex = indexFound
1846 1844 #
1847 1845 # #If pulse was found ...
1848 1846 # for thisChannel in range(self.__nChannels):
1849 1847 # bufferByChannel = self.__bufferList[thisChannel]
1850 1848 # #print self.__startIndex
1851 1849 # x = numpy.array(bufferByChannel)
1852 1850 # self.__arrayBuffer[thisChannel] = x[self.__startIndex:self.__startIndex+self.__newNSamples]
1853 1851 #
1854 1852 # deltaHeight = dataOut.heightList[1] - dataOut.heightList[0]
1855 1853 # dataOut.heightList = numpy.arange(self.__newNSamples)*deltaHeight
1856 1854 # # dataOut.ippSeconds = (self.__newNSamples / deltaHeight)/1e6
1857 1855 #
1858 1856 # dataOut.data = self.__arrayBuffer
1859 1857 #
1860 1858 # self.__startIndex += self.__newNSamples
1861 1859 #
1862 1860 # return
General Comments 0
You need to be logged in to leave comments. Login now